aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS22
-rw-r--r--Documentation/DocBook/kernel-api.tmpl1
-rw-r--r--Documentation/DocBook/libata.tmpl2
-rw-r--r--Documentation/RCU/checklist.txt38
-rw-r--r--Documentation/RCU/rcu.txt3
-rw-r--r--Documentation/RCU/torture.txt33
-rw-r--r--Documentation/RCU/whatisRCU.txt3
-rw-r--r--Documentation/ecryptfs.txt77
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/filesystems/gfs2.txt43
-rw-r--r--Documentation/kbuild/kconfig-language.txt2
-rw-r--r--Documentation/kbuild/makefiles.txt2
-rw-r--r--Documentation/kernel-parameters.txt29
-rw-r--r--Documentation/powerpc/booting-without-of.txt252
-rw-r--r--Documentation/sound/oss/AWE3276
-rw-r--r--Documentation/sound/oss/CMI833885
-rw-r--r--Documentation/sound/oss/INSTALL.awe134
-rw-r--r--Documentation/sound/oss/MAD1656
-rw-r--r--Documentation/sound/oss/Maestro123
-rw-r--r--Documentation/sound/oss/Maestro392
-rw-r--r--Documentation/sound/oss/NEWS42
-rw-r--r--Documentation/sound/oss/OPL3-SA52
-rw-r--r--Documentation/sound/oss/README.awe218
-rw-r--r--Documentation/sound/oss/Wavefront339
-rw-r--r--Documentation/sound/oss/es137070
-rw-r--r--Documentation/sound/oss/rme96xx767
-rw-r--r--Documentation/sound/oss/solo170
-rw-r--r--Documentation/sound/oss/sonicvibes81
-rw-r--r--MAINTAINERS54
-rw-r--r--Makefile4
-rw-r--r--arch/i386/kernel/acpi/boot.c9
-rw-r--r--arch/i386/kernel/i8259.c45
-rw-r--r--arch/i386/kernel/io_apic.c495
-rw-r--r--arch/i386/kernel/irq.c19
-rw-r--r--arch/i386/pci/irq.c34
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/irq_ia64.c20
-rw-r--r--arch/ia64/kernel/msi_ia64.c143
-rw-r--r--arch/ia64/pci/pci.c9
-rw-r--r--arch/ia64/sn/kernel/Makefile1
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c (renamed from drivers/pci/msi-altix.c)108
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/hpux/fs.c2
-rw-r--r--arch/parisc/kernel/binfmt_elf32.c24
-rw-r--r--arch/parisc/kernel/cache.c48
-rw-r--r--arch/parisc/kernel/entry.S21
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/irq.c151
-rw-r--r--arch/parisc/kernel/processor.c5
-rw-r--r--arch/parisc/kernel/signal.c5
-rw-r--r--arch/parisc/kernel/smp.c7
-rw-r--r--arch/parisc/kernel/sys_parisc.c45
-rw-r--r--arch/parisc/kernel/syscall_table.S4
-rw-r--r--arch/parisc/kernel/time.c208
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/parisc/mm/init.c23
-rw-r--r--arch/parisc/mm/ioremap.c2
-rw-r--r--arch/powerpc/Kconfig21
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/mpc8272ads.dts223
-rw-r--r--arch/powerpc/boot/dts/mpc8360emds.dts375
-rw-r--r--arch/powerpc/boot/zImage.coff.lds.S1
-rw-r--r--arch/powerpc/configs/mpc8360emds_defconfig1018
-rw-r--r--arch/powerpc/kernel/cputable.c15
-rw-r--r--arch/powerpc/kernel/entry_64.S18
-rw-r--r--arch/powerpc/kernel/head_64.S28
-rw-r--r--arch/powerpc/kernel/misc_64.S46
-rw-r--r--arch/powerpc/kernel/pci_64.c58
-rw-r--r--arch/powerpc/kernel/setup-common.c25
-rw-r--r--arch/powerpc/kernel/setup_32.c8
-rw-r--r--arch/powerpc/kernel/setup_64.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S8
-rw-r--r--arch/powerpc/mm/pgtable_64.c29
-rw-r--r--arch/powerpc/mm/slb_low.S3
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig21
-rw-r--r--arch/powerpc/platforms/82xx/Makefile5
-rw-r--r--arch/powerpc/platforms/82xx/m82xx_pci.h19
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx.c111
-rw-r--r--arch/powerpc/platforms/82xx/mpc82xx_ads.c661
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads.h67
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig13
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c215
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.h19
-rw-r--r--arch/powerpc/platforms/83xx/mpc8360e_pb.c219
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c235
-rw-r--r--arch/powerpc/platforms/cell/interrupt.h97
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c9
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c19
-rw-r--r--arch/powerpc/platforms/iseries/pci.c8
-rw-r--r--arch/powerpc/platforms/iseries/setup.c16
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c2
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c2
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.h2
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c62
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig30
-rw-r--r--arch/powerpc/sysdev/qe_lib/Makefile8
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c353
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c555
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h106
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c226
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c251
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c396
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c404
-rw-r--r--arch/powerpc/xmon/xmon.c35
-rw-r--r--arch/x86_64/kernel/i8259.c108
-rw-r--r--arch/x86_64/kernel/io_apic.c694
-rw-r--r--arch/x86_64/kernel/irq.c14
-rw-r--r--arch/x86_64/kernel/mpparse.c42
-rw-r--r--drivers/ata/ahci.c90
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-scsi.c14
-rw-r--r--drivers/ata/libata-sff.c44
-rw-r--r--drivers/ata/pata_ali.c13
-rw-r--r--drivers/ata/pata_amd.c38
-rw-r--r--drivers/ata/pata_artop.c17
-rw-r--r--drivers/ata/pata_atiixp.c14
-rw-r--r--drivers/ata/pata_cmd64x.c17
-rw-r--r--drivers/ata/pata_cs5520.c10
-rw-r--r--drivers/ata/pata_cs5530.c11
-rw-r--r--drivers/ata/pata_cs5535.c9
-rw-r--r--drivers/ata/pata_cypress.c13
-rw-r--r--drivers/ata/pata_efar.c4
-rw-r--r--drivers/ata/pata_hpt366.c9
-rw-r--r--drivers/ata/pata_hpt37x.c19
-rw-r--r--drivers/ata/pata_hpt3x2n.c17
-rw-r--r--drivers/ata/pata_hpt3x3.c9
-rw-r--r--drivers/ata/pata_it821x.c13
-rw-r--r--drivers/ata/pata_jmicron.c11
-rw-r--r--drivers/ata/pata_mpiix.c9
-rw-r--r--drivers/ata/pata_netcell.c3
-rw-r--r--drivers/ata/pata_ns87410.c9
-rw-r--r--drivers/ata/pata_oldpiix.c4
-rw-r--r--drivers/ata/pata_opti.c10
-rw-r--r--drivers/ata/pata_optidma.c9
-rw-r--r--drivers/ata/pata_pcmcia.c4
-rw-r--r--drivers/ata/pata_pdc2027x.c15
-rw-r--r--drivers/ata/pata_pdc202xx_old.c19
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/pata_rz1000.c12
-rw-r--r--drivers/ata/pata_sc1200.c11
-rw-r--r--drivers/ata/pata_serverworks.c17
-rw-r--r--drivers/ata/pata_sil680.c9
-rw-r--r--drivers/ata/pata_sis.c6
-rw-r--r--drivers/ata/pata_sl82c105.c9
-rw-r--r--drivers/ata/pata_triflex.c10
-rw-r--r--drivers/ata/pata_via.c15
-rw-r--r--drivers/ata/pdc_adma.c3
-rw-r--r--drivers/ata/sata_mv.c27
-rw-r--r--drivers/ata/sata_nv.c53
-rw-r--r--drivers/ata/sata_promise.c55
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c15
-rw-r--r--drivers/ata/sata_sil24.c11
-rw-r--r--drivers/ata/sata_sis.c8
-rw-r--r--drivers/ata/sata_svw.c15
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_uli.c8
-rw-r--r--drivers/ata/sata_via.c6
-rw-r--r--drivers/ata/sata_vsc.c6
-rw-r--r--drivers/atm/adummy.c6
-rw-r--r--drivers/atm/ambassador.c4
-rw-r--r--drivers/atm/firestream.c12
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/horizon.c4
-rw-r--r--drivers/atm/idt77252.c23
-rw-r--r--drivers/atm/lanai.c8
-rw-r--r--drivers/atm/zatm.c7
-rw-r--r--drivers/block/pktcdvd.c132
-rw-r--r--drivers/block/swim3.c4
-rw-r--r--drivers/char/agp/Kconfig10
-rw-r--r--drivers/char/agp/Makefile1
-rw-r--r--drivers/char/agp/parisc-agp.c416
-rw-r--r--drivers/char/amiserial.c30
-rw-r--r--drivers/char/cyclades.c28
-rw-r--r--drivers/char/epca.c17
-rw-r--r--drivers/char/epca.h1
-rw-r--r--drivers/char/generic_serial.c19
-rw-r--r--drivers/char/hvc_iseries.c8
-rw-r--r--drivers/char/hvc_vio.c4
-rw-r--r--drivers/char/riscom8.c10
-rw-r--r--drivers/char/serial167.c22
-rw-r--r--drivers/clocksource/scx200_hrt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/isdn/hisax/niccy.c223
-rw-r--r--drivers/misc/Kconfig32
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/tifm_7xx1.c437
-rw-r--r--drivers/misc/tifm_core.c272
-rw-r--r--drivers/mmc/Kconfig16
-rw-r--r--drivers/mmc/Makefile1
-rw-r--r--drivers/mmc/mmc.c6
-rw-r--r--drivers/mmc/mmc.h4
-rw-r--r--drivers/mmc/mmc_block.c24
-rw-r--r--drivers/mmc/mmc_sysfs.c35
-rw-r--r--drivers/mmc/sdhci.c23
-rw-r--r--drivers/mmc/tifm_sd.c933
-rw-r--r--drivers/parisc/iosapic.c5
-rw-r--r--drivers/parisc/lba_pci.c122
-rw-r--r--drivers/parisc/sba_iommu.c267
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile11
-rw-r--r--drivers/pci/htirq.c190
-rw-r--r--drivers/pci/msi-apic.c101
-rw-r--r--drivers/pci/msi.c941
-rw-r--r--drivers/pci/msi.h110
-rw-r--r--drivers/pci/setup-bus.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c6
-rw-r--r--drivers/rtc/rtc-ds1672.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c4
-rw-r--r--drivers/serial/8250_gsc.c4
-rw-r--r--drivers/serial/Kconfig9
-rw-r--r--drivers/video/riva/fbdev.c2
-rw-r--r--fs/Kconfig14
-rw-r--r--fs/Makefile3
-rw-r--r--fs/binfmt_som.c18
-rw-r--r--fs/configfs/item.c2
-rw-r--r--fs/dcache.c9
-rw-r--r--fs/dlm/Kconfig21
-rw-r--r--fs/dlm/Makefile19
-rw-r--r--fs/dlm/ast.c173
-rw-r--r--fs/dlm/ast.h26
-rw-r--r--fs/dlm/config.c789
-rw-r--r--fs/dlm/config.h42
-rw-r--r--fs/dlm/debug_fs.c387
-rw-r--r--fs/dlm/dir.c423
-rw-r--r--fs/dlm/dir.h30
-rw-r--r--fs/dlm/dlm_internal.h543
-rw-r--r--fs/dlm/lock.c3871
-rw-r--r--fs/dlm/lock.h62
-rw-r--r--fs/dlm/lockspace.c717
-rw-r--r--fs/dlm/lockspace.h25
-rw-r--r--fs/dlm/lowcomms.c1238
-rw-r--r--fs/dlm/lowcomms.h26
-rw-r--r--fs/dlm/lvb_table.h18
-rw-r--r--fs/dlm/main.c97
-rw-r--r--fs/dlm/member.c327
-rw-r--r--fs/dlm/member.h24
-rw-r--r--fs/dlm/memory.c116
-rw-r--r--fs/dlm/memory.h29
-rw-r--r--fs/dlm/midcomms.c140
-rw-r--r--fs/dlm/midcomms.h21
-rw-r--r--fs/dlm/rcom.c472
-rw-r--r--fs/dlm/rcom.h24
-rw-r--r--fs/dlm/recover.c765
-rw-r--r--fs/dlm/recover.h34
-rw-r--r--fs/dlm/recoverd.c290
-rw-r--r--fs/dlm/recoverd.h24
-rw-r--r--fs/dlm/requestqueue.c184
-rw-r--r--fs/dlm/requestqueue.h22
-rw-r--r--fs/dlm/user.c788
-rw-r--r--fs/dlm/user.h16
-rw-r--r--fs/dlm/util.c161
-rw-r--r--fs/dlm/util.h22
-rw-r--r--fs/ecryptfs/Makefile7
-rw-r--r--fs/ecryptfs/crypto.c1659
-rw-r--r--fs/ecryptfs/debug.c123
-rw-r--r--fs/ecryptfs/dentry.c87
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h482
-rw-r--r--fs/ecryptfs/file.c440
-rw-r--r--fs/ecryptfs/inode.c1079
-rw-r--r--fs/ecryptfs/keystore.c1061
-rw-r--r--fs/ecryptfs/main.c831
-rw-r--r--fs/ecryptfs/mmap.c788
-rw-r--r--fs/ecryptfs/super.c198
-rw-r--r--fs/gfs2/Kconfig44
-rw-r--r--fs/gfs2/Makefile10
-rw-r--r--fs/gfs2/acl.c309
-rw-r--r--fs/gfs2/acl.h39
-rw-r--r--fs/gfs2/bmap.c1221
-rw-r--r--fs/gfs2/bmap.h31
-rw-r--r--fs/gfs2/daemon.c196
-rw-r--r--fs/gfs2/daemon.h19
-rw-r--r--fs/gfs2/dir.c1961
-rw-r--r--fs/gfs2/dir.h79
-rw-r--r--fs/gfs2/eaops.c230
-rw-r--r--fs/gfs2/eaops.h30
-rw-r--r--fs/gfs2/eattr.c1501
-rw-r--r--fs/gfs2/eattr.h100
-rw-r--r--fs/gfs2/gfs2.h31
-rw-r--r--fs/gfs2/glock.c2231
-rw-r--r--fs/gfs2/glock.h153
-rw-r--r--fs/gfs2/glops.c615
-rw-r--r--fs/gfs2/glops.h25
-rw-r--r--fs/gfs2/incore.h634
-rw-r--r--fs/gfs2/inode.c1379
-rw-r--r--fs/gfs2/inode.h56
-rw-r--r--fs/gfs2/lm.c217
-rw-r--r--fs/gfs2/lm.h42
-rw-r--r--fs/gfs2/locking.c184
-rw-r--r--fs/gfs2/locking/dlm/Makefile3
-rw-r--r--fs/gfs2/locking/dlm/lock.c524
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h187
-rw-r--r--fs/gfs2/locking/dlm/main.c64
-rw-r--r--fs/gfs2/locking/dlm/mount.c255
-rw-r--r--fs/gfs2/locking/dlm/plock.c301
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c226
-rw-r--r--fs/gfs2/locking/dlm/thread.c359
-rw-r--r--fs/gfs2/locking/nolock/Makefile3
-rw-r--r--fs/gfs2/locking/nolock/main.c246
-rw-r--r--fs/gfs2/log.c687
-rw-r--r--fs/gfs2/log.h65
-rw-r--r--fs/gfs2/lops.c809
-rw-r--r--fs/gfs2/lops.h99
-rw-r--r--fs/gfs2/main.c150
-rw-r--r--fs/gfs2/meta_io.c590
-rw-r--r--fs/gfs2/meta_io.h78
-rw-r--r--fs/gfs2/mount.c214
-rw-r--r--fs/gfs2/mount.h17
-rw-r--r--fs/gfs2/ondisk.c308
-rw-r--r--fs/gfs2/ops_address.c790
-rw-r--r--fs/gfs2/ops_address.h22
-rw-r--r--fs/gfs2/ops_dentry.c119
-rw-r--r--fs/gfs2/ops_dentry.h17
-rw-r--r--fs/gfs2/ops_export.c298
-rw-r--r--fs/gfs2/ops_export.h22
-rw-r--r--fs/gfs2/ops_file.c661
-rw-r--r--fs/gfs2/ops_file.h24
-rw-r--r--fs/gfs2/ops_fstype.c928
-rw-r--r--fs/gfs2/ops_fstype.h18
-rw-r--r--fs/gfs2/ops_inode.c1151
-rw-r--r--fs/gfs2/ops_inode.h20
-rw-r--r--fs/gfs2/ops_super.c468
-rw-r--r--fs/gfs2/ops_super.h17
-rw-r--r--fs/gfs2/ops_vm.c184
-rw-r--r--fs/gfs2/ops_vm.h18
-rw-r--r--fs/gfs2/quota.c1227
-rw-r--r--fs/gfs2/quota.h35
-rw-r--r--fs/gfs2/recovery.c570
-rw-r--r--fs/gfs2/recovery.h34
-rw-r--r--fs/gfs2/rgrp.c1513
-rw-r--r--fs/gfs2/rgrp.h69
-rw-r--r--fs/gfs2/super.c976
-rw-r--r--fs/gfs2/super.h55
-rw-r--r--fs/gfs2/sys.c583
-rw-r--r--fs/gfs2/sys.h27
-rw-r--r--fs/gfs2/trans.c184
-rw-r--r--fs/gfs2/trans.h39
-rw-r--r--fs/gfs2/util.c245
-rw-r--r--fs/gfs2/util.h170
-rw-r--r--fs/lockd/clntlock.c54
-rw-r--r--fs/lockd/clntproc.c17
-rw-r--r--fs/lockd/host.c325
-rw-r--r--fs/lockd/mon.c65
-rw-r--r--fs/lockd/svc.c19
-rw-r--r--fs/lockd/svc4proc.c29
-rw-r--r--fs/lockd/svclock.c197
-rw-r--r--fs/lockd/svcproc.c27
-rw-r--r--fs/lockd/svcshare.c20
-rw-r--r--fs/lockd/svcsubs.c164
-rw-r--r--fs/nfsd/export.c149
-rw-r--r--fs/nfsd/nfs2acl.c3
-rw-r--r--fs/nfsd/nfs3acl.c3
-rw-r--r--fs/nfsd/nfs3proc.c18
-rw-r--r--fs/nfsd/nfs3xdr.c56
-rw-r--r--fs/nfsd/nfs4acl.c711
-rw-r--r--fs/nfsd/nfs4proc.c32
-rw-r--r--fs/nfsd/nfs4xdr.c231
-rw-r--r--fs/nfsd/nfsctl.c49
-rw-r--r--fs/nfsd/nfsproc.c12
-rw-r--r--fs/nfsd/nfssvc.c19
-rw-r--r--fs/nfsd/nfsxdr.c43
-rw-r--r--fs/nfsd/vfs.c86
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--include/asm-i386/hw_irq.h3
-rw-r--r--include/asm-i386/hypertransport.h42
-rw-r--r--include/asm-i386/io_apic.h42
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h5
-rw-r--r--include/asm-i386/msi.h23
-rw-r--r--include/asm-i386/msidef.h47
-rw-r--r--include/asm-ia64/machvec.h21
-rw-r--r--include/asm-ia64/machvec_sn2.h9
-rw-r--r--include/asm-ia64/msi.h29
-rw-r--r--include/asm-parisc/agp.h25
-rw-r--r--include/asm-parisc/assembly.h6
-rw-r--r--include/asm-parisc/cacheflush.h30
-rw-r--r--include/asm-parisc/compat.h4
-rw-r--r--include/asm-parisc/dma.h7
-rw-r--r--include/asm-parisc/futex.h71
-rw-r--r--include/asm-parisc/io.h2
-rw-r--r--include/asm-parisc/iosapic.h53
-rw-r--r--include/asm-parisc/irq.h6
-rw-r--r--include/asm-parisc/mckinley.h9
-rw-r--r--include/asm-parisc/page.h22
-rw-r--r--include/asm-parisc/param.h10
-rw-r--r--include/asm-parisc/parisc-device.h5
-rw-r--r--include/asm-parisc/pci.h5
-rw-r--r--include/asm-parisc/prefetch.h39
-rw-r--r--include/asm-parisc/processor.h39
-rw-r--r--include/asm-parisc/ropes.h322
-rw-r--r--include/asm-parisc/serial.h16
-rw-r--r--include/asm-parisc/spinlock.h115
-rw-r--r--include/asm-powerpc/firmware.h67
-rw-r--r--include/asm-powerpc/immap_qe.h477
-rw-r--r--include/asm-powerpc/qe.h457
-rw-r--r--include/asm-powerpc/qe_ic.h64
-rw-r--r--include/asm-powerpc/system.h4
-rw-r--r--include/asm-powerpc/ucc.h84
-rw-r--r--include/asm-powerpc/ucc_fast.h243
-rw-r--r--include/asm-powerpc/ucc_slow.h289
-rw-r--r--include/asm-powerpc/xmon.h26
-rw-r--r--include/asm-x86_64/hardirq.h3
-rw-r--r--include/asm-x86_64/hw_irq.h8
-rw-r--r--include/asm-x86_64/hypertransport.h42
-rw-r--r--include/asm-x86_64/io_apic.h43
-rw-r--r--include/asm-x86_64/irq.h7
-rw-r--r--include/asm-x86_64/msi.h24
-rw-r--r--include/asm-x86_64/msidef.h47
-rw-r--r--include/linux/Kbuild7
-rw-r--r--include/linux/ac97_codec.h5
-rw-r--r--include/linux/audit.h3
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/dlm.h302
-rw-r--r--include/linux/dlm_device.h86
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/fsl_devices.h65
-rw-r--r--include/linux/gfs2_ondisk.h443
-rw-r--r--include/linux/hardirq.h7
-rw-r--r--include/linux/htirq.h15
-rw-r--r--include/linux/in.h1
-rw-r--r--include/linux/ip.h9
-rw-r--r--include/linux/ipc.h3
-rw-r--r--include/linux/ipsec.h3
-rw-r--r--include/linux/irq.h57
-rw-r--r--include/linux/libata.h9
-rw-r--r--include/linux/lm_interface.h273
-rw-r--r--include/linux/lock_dlm_plock.h41
-rw-r--r--include/linux/lockd/lockd.h61
-rw-r--r--include/linux/lockd/share.h3
-rw-r--r--include/linux/lockd/sm_inter.h5
-rw-r--r--include/linux/msi.h49
-rw-r--r--include/linux/netfilter_bridge/ebt_mark_t.h12
-rw-r--r--include/linux/netfilter_ipv4.h2
-rw-r--r--include/linux/nfsd/const.h20
-rw-r--r--include/linux/nfsd/export.h21
-rw-r--r--include/linux/nfsd/nfsd.h5
-rw-r--r--include/linux/nfsd/xdr.h2
-rw-r--r--include/linux/nfsd/xdr3.h2
-rw-r--r--include/linux/nfsd/xdr4.h2
-rw-r--r--include/linux/notifier.h43
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pci_regs.h21
-rw-r--r--include/linux/rcupdate.h7
-rw-r--r--include/linux/scx200.h2
-rw-r--r--include/linux/slab.h26
-rw-r--r--include/linux/sound.h2
-rw-r--r--include/linux/srcu.h53
-rw-r--r--include/linux/sunrpc/auth.h3
-rw-r--r--include/linux/sunrpc/cache.h11
-rw-r--r--include/linux/sunrpc/msg_prot.h40
-rw-r--r--include/linux/sunrpc/svc.h97
-rw-r--r--include/linux/sunrpc/svcauth.h1
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--include/linux/sunrpc/xprt.h8
-rw-r--r--include/linux/tifm.h158
-rw-r--r--include/linux/utsname.h17
-rw-r--r--include/linux/wavefront.h675
-rw-r--r--include/linux/xfrm.h3
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/auditfilter.c9
-rw-r--r--kernel/auditsc.c28
-rw-r--r--kernel/irq/chip.c63
-rw-r--r--kernel/irq/migration.c34
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/rcutorture.c317
-rw-r--r--kernel/srcu.c258
-rw-r--r--kernel/sys.c125
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/page_alloc.c53
-rw-r--r--mm/readahead.c1
-rw-r--r--mm/slab.c13
-rw-r--r--mm/util.c6
-rw-r--r--net/bridge/netfilter/ebt_mark.c21
-rw-r--r--net/core/neighbour.c12
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ipv4/Kconfig9
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/esp4.c26
-rw-r--r--net/ipv4/ipcomp.c5
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c10
-rw-r--r--net/ipv4/netfilter.c9
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c3
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c97
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c3
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_mode_beet.c139
-rw-r--r--net/ipv6/Kconfig10
-rw-r--r--net/ipv6/Makefile1
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/udp.c64
-rw-r--r--net/ipv6/xfrm6_mode_beet.c107
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/sched/estimator.c196
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c67
-rw-r--r--net/sunrpc/svc.c79
-rw-r--r--net/sunrpc/svcauth_unix.c47
-rw-r--r--net/sunrpc/svcsock.c51
-rw-r--r--net/tipc/link.c5
-rw-r--r--net/xfrm/xfrm_hash.h7
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--net/xfrm/xfrm_state.c16
-rw-r--r--net/xfrm/xfrm_user.c1
-rw-r--r--scripts/Makefile.headersinst2
-rw-r--r--sound/oss/Makefile57
-rw-r--r--sound/oss/ac97.c20
-rw-r--r--sound/oss/ac97.h3
-rw-r--r--sound/oss/ac97_codec.c89
-rw-r--r--sound/oss/ac97_plugin_ad1980.c125
-rw-r--r--sound/oss/ad1848.c4
-rw-r--r--sound/oss/ad1848.h1
-rw-r--r--sound/oss/ali5455.c3735
-rw-r--r--sound/oss/au1000.c2216
-rw-r--r--sound/oss/audio_syms.c16
-rw-r--r--sound/oss/awe_hw.h99
-rw-r--r--sound/oss/awe_wave.c6148
-rw-r--r--sound/oss/awe_wave.h77
-rw-r--r--sound/oss/cmpci.c3380
-rw-r--r--sound/oss/cs4281/Makefile6
-rw-r--r--sound/oss/cs4281/cs4281_hwdefs.h1234
-rw-r--r--sound/oss/cs4281/cs4281_wrapper-24.c41
-rw-r--r--sound/oss/cs4281/cs4281m.c4487
-rw-r--r--sound/oss/cs4281/cs4281pm-24.c45
-rw-r--r--sound/oss/cs4281/cs4281pm.h74
-rw-r--r--sound/oss/dev_table.c44
-rw-r--r--sound/oss/dev_table.h17
-rw-r--r--sound/oss/dm.h79
-rw-r--r--sound/oss/dmabuf.c40
-rw-r--r--sound/oss/es1370.c2819
-rw-r--r--sound/oss/esssolo1.c2516
-rw-r--r--sound/oss/forte.c2138
-rw-r--r--sound/oss/gus.h24
-rw-r--r--sound/oss/gus_card.c292
-rw-r--r--sound/oss/gus_hw.h50
-rw-r--r--sound/oss/gus_linearvol.h18
-rw-r--r--sound/oss/gus_midi.c256
-rw-r--r--sound/oss/gus_vol.c153
-rw-r--r--sound/oss/gus_wave.c3463
-rw-r--r--sound/oss/harmony.c1330
-rw-r--r--sound/oss/ics2101.c247
-rw-r--r--sound/oss/iwmem.h36
-rw-r--r--sound/oss/mad16.c1112
-rw-r--r--sound/oss/maestro.c3686
-rw-r--r--sound/oss/maestro.h60
-rw-r--r--sound/oss/maestro3.c2968
-rw-r--r--sound/oss/maestro3.h821
-rw-r--r--sound/oss/maui.c477
-rw-r--r--sound/oss/midi_syms.c29
-rw-r--r--sound/oss/midi_synth.c21
-rw-r--r--sound/oss/midibuf.c11
-rw-r--r--sound/oss/mpu401.c13
-rw-r--r--sound/oss/mpu401.h2
-rw-r--r--sound/oss/opl3sa.c329
-rw-r--r--sound/oss/rme96xx.c1857
-rw-r--r--sound/oss/rme96xx.h78
-rw-r--r--sound/oss/sequencer.c15
-rw-r--r--sound/oss/sequencer_syms.c29
-rw-r--r--sound/oss/sgalaxy.c207
-rw-r--r--sound/oss/sonicvibes.c2792
-rw-r--r--sound/oss/sound_calls.h3
-rw-r--r--sound/oss/sound_syms.c50
-rw-r--r--sound/oss/sound_timer.c4
-rw-r--r--sound/oss/soundcard.c16
-rw-r--r--sound/oss/tuning.h10
-rw-r--r--sound/oss/wavfront.c3553
-rw-r--r--sound/oss/wf_midi.c880
-rw-r--r--sound/oss/ymfpci.c2691
-rw-r--r--sound/oss/ymfpci.h360
-rw-r--r--sound/oss/ymfpci_image.h1565
-rw-r--r--sound/oss/yss225.c319
-rw-r--r--sound/oss/yss225.h24
-rw-r--r--sound/sound_core.c34
576 files changed, 64066 insertions, 67491 deletions
diff --git a/CREDITS b/CREDITS
index dba3e6334691..5329ead9c672 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2240,6 +2240,12 @@ D: tc: HFSC scheduler
2240S: Freiburg 2240S: Freiburg
2241S: Germany 2241S: Germany
2242 2242
2243N: Paul E. McKenney
2244E: paulmck@us.ibm.com
2245W: http://www.rdrop.com/users/paulmck/
2246D: RCU and variants
2247D: rcutorture module
2248
2243N: Mike McLagan 2249N: Mike McLagan
2244E: mike.mclagan@linux.org 2250E: mike.mclagan@linux.org
2245W: http://www.invlogic.com/~mmclagan 2251W: http://www.invlogic.com/~mmclagan
@@ -2981,6 +2987,10 @@ S: 69 rue Dunois
2981S: 75013 Paris 2987S: 75013 Paris
2982S: France 2988S: France
2983 2989
2990N: Dipankar Sarma
2991E: dipankar@in.ibm.com
2992D: RCU
2993
2984N: Hannu Savolainen 2994N: Hannu Savolainen
2985E: hannu@opensound.com 2995E: hannu@opensound.com
2986D: Maintainer of the sound drivers until 2.1.x days. 2996D: Maintainer of the sound drivers until 2.1.x days.
@@ -3293,6 +3303,12 @@ S: 3 Ballow Crescent
3293S: MacGregor A.C.T 2615 3303S: MacGregor A.C.T 2615
3294S: Australia 3304S: Australia
3295 3305
3306N: Josh Triplett
3307E: josh@freedesktop.org
3308P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87 CA26 189B 9946 D0FE 7AFB
3309D: rcutorture maintainer
3310D: lock annotations, finding and fixing lock bugs
3311
3296N: Winfried Trümper 3312N: Winfried Trümper
3297E: winni@xpilot.org 3313E: winni@xpilot.org
3298W: http://www.shop.de/~winni/ 3314W: http://www.shop.de/~winni/
@@ -3562,11 +3578,11 @@ S: Fargo, North Dakota 58122
3562S: USA 3578S: USA
3563 3579
3564N: Steven Whitehouse 3580N: Steven Whitehouse
3565E: SteveW@ACM.org 3581E: steve@chygwyn.com
3566W: http://www.chygwyn.com/~steve 3582W: http://www.chygwyn.com/~steve
3567D: Linux DECnet project: http://www.sucs.swan.ac.uk/~rohan/DECnet/index.html 3583D: Linux DECnet project
3568D: Minor debugging of other networking protocols. 3584D: Minor debugging of other networking protocols.
3569D: Misc bug fixes and filesystem development 3585D: Misc bug fixes and GFS2 filesystem development
3570 3586
3571N: Hans-Joachim Widmaier 3587N: Hans-Joachim Widmaier
3572E: hjw@zvw.de 3588E: hjw@zvw.de
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 49c745720f47..2b5ac604948c 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -158,6 +158,7 @@ X!Ilib/string.c
158!Emm/filemap.c 158!Emm/filemap.c
159!Emm/memory.c 159!Emm/memory.c
160!Emm/vmalloc.c 160!Emm/vmalloc.c
161!Imm/page_alloc.c
161!Emm/mempool.c 162!Emm/mempool.c
162!Emm/page-writeback.c 163!Emm/page-writeback.c
163!Emm/truncate.c 164!Emm/truncate.c
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index c684abf0d3b2..07a635590b36 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -14,7 +14,7 @@
14 </authorgroup> 14 </authorgroup>
15 15
16 <copyright> 16 <copyright>
17 <year>2003-2005</year> 17 <year>2003-2006</year>
18 <holder>Jeff Garzik</holder> 18 <holder>Jeff Garzik</holder>
19 </copyright> 19 </copyright>
20 20
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 1d50cf0c905e..f4dffadbcb00 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -221,3 +221,41 @@ over a rather long period of time, but improvements are always welcome!
221 disable irq on a given acquisition of that lock will result in 221 disable irq on a given acquisition of that lock will result in
222 deadlock as soon as the RCU callback happens to interrupt that 222 deadlock as soon as the RCU callback happens to interrupt that
223 acquisition's critical section. 223 acquisition's critical section.
224
22513. SRCU (srcu_read_lock(), srcu_read_unlock(), and synchronize_srcu())
226 may only be invoked from process context. Unlike other forms of
227 RCU, it -is- permissible to block in an SRCU read-side critical
228 section (demarked by srcu_read_lock() and srcu_read_unlock()),
229 hence the "SRCU": "sleepable RCU". Please note that if you
230 don't need to sleep in read-side critical sections, you should
231 be using RCU rather than SRCU, because RCU is almost always
232 faster and easier to use than is SRCU.
233
234 Also unlike other forms of RCU, explicit initialization
235 and cleanup is required via init_srcu_struct() and
236 cleanup_srcu_struct(). These are passed a "struct srcu_struct"
237 that defines the scope of a given SRCU domain. Once initialized,
238 the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
239 and synchronize_srcu(). A given synchronize_srcu() waits only
240 for SRCU read-side critical sections governed by srcu_read_lock()
241 and srcu_read_unlock() calls that have been passd the same
242 srcu_struct. This property is what makes sleeping read-side
243 critical sections tolerable -- a given subsystem delays only
244 its own updates, not those of other subsystems using SRCU.
245 Therefore, SRCU is less prone to OOM the system than RCU would
246 be if RCU's read-side critical sections were permitted to
247 sleep.
248
249 The ability to sleep in read-side critical sections does not
250 come for free. First, corresponding srcu_read_lock() and
251 srcu_read_unlock() calls must be passed the same srcu_struct.
252 Second, grace-period-detection overhead is amortized only
253 over those updates sharing a given srcu_struct, rather than
254 being globally amortized as they are for other forms of RCU.
255 Therefore, SRCU should be used in preference to rw_semaphore
256 only in extremely read-intensive situations, or in situations
257 requiring SRCU's read-side deadlock immunity or low read-side
258 realtime latency.
259
260 Note that, rcu_assign_pointer() and rcu_dereference() relate to
261 SRCU just as they do to other forms of RCU.
diff --git a/Documentation/RCU/rcu.txt b/Documentation/RCU/rcu.txt
index 02e27bf1d365..f84407cba816 100644
--- a/Documentation/RCU/rcu.txt
+++ b/Documentation/RCU/rcu.txt
@@ -45,7 +45,8 @@ o How can I see where RCU is currently used in the Linux kernel?
45 45
46 Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu", 46 Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu",
47 "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh", 47 "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh",
48 "synchronize_rcu", and "synchronize_net". 48 "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu",
49 "synchronize_net", and "synchronize_srcu".
49 50
50o What guidelines should I follow when writing code that uses RCU? 51o What guidelines should I follow when writing code that uses RCU?
51 52
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index a4948591607d..25a3c3f7d378 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -28,6 +28,15 @@ nreaders This is the number of RCU reading threads supported.
28 To properly exercise RCU implementations with preemptible 28 To properly exercise RCU implementations with preemptible
29 read-side critical sections. 29 read-side critical sections.
30 30
31nfakewriters This is the number of RCU fake writer threads to run. Fake
32 writer threads repeatedly use the synchronous "wait for
33 current readers" function of the interface selected by
34 torture_type, with a delay between calls to allow for various
35 different numbers of writers running in parallel.
36 nfakewriters defaults to 4, which provides enough parallelism
37 to trigger special cases caused by multiple writers, such as
38 the synchronize_srcu() early return optimization.
39
31stat_interval The number of seconds between output of torture 40stat_interval The number of seconds between output of torture
32 statistics (via printk()). Regardless of the interval, 41 statistics (via printk()). Regardless of the interval,
33 statistics are printed when the module is unloaded. 42 statistics are printed when the module is unloaded.
@@ -44,9 +53,12 @@ test_no_idle_hz Whether or not to test the ability of RCU to operate in
44 a kernel that disables the scheduling-clock interrupt to 53 a kernel that disables the scheduling-clock interrupt to
45 idle CPUs. Boolean parameter, "1" to test, "0" otherwise. 54 idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
46 55
47torture_type The type of RCU to test: "rcu" for the rcu_read_lock() 56torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API,
48 API, "rcu_bh" for the rcu_read_lock_bh() API, and "srcu" 57 "rcu_sync" for rcu_read_lock() with synchronous reclamation,
49 for the "srcu_read_lock()" API. 58 "rcu_bh" for the rcu_read_lock_bh() API, "rcu_bh_sync" for
59 rcu_read_lock_bh() with synchronous reclamation, "srcu" for
60 the "srcu_read_lock()" API, and "sched" for the use of
61 preempt_disable() together with synchronize_sched().
50 62
51verbose Enable debug printk()s. Default is disabled. 63verbose Enable debug printk()s. Default is disabled.
52 64
@@ -118,6 +130,21 @@ o "Free-Block Circulation": Shows the number of torture structures
118 as it is only incremented if a torture structure's counter 130 as it is only incremented if a torture structure's counter
119 somehow gets incremented farther than it should. 131 somehow gets incremented farther than it should.
120 132
133Different implementations of RCU can provide implementation-specific
134additional information. For example, SRCU provides the following:
135
136 srcu-torture: rtc: f8cf46a8 ver: 355 tfle: 0 rta: 356 rtaf: 0 rtf: 346 rtmbe: 0
137 srcu-torture: Reader Pipe: 559738 939 0 0 0 0 0 0 0 0 0
138 srcu-torture: Reader Batch: 560434 243 0 0 0 0 0 0 0 0
139 srcu-torture: Free-Block Circulation: 355 354 353 352 351 350 349 348 347 346 0
140 srcu-torture: per-CPU(idx=1): 0(0,1) 1(0,1) 2(0,0) 3(0,1)
141
142The first four lines are similar to those for RCU. The last line shows
143the per-CPU counter state. The numbers in parentheses are the values
144of the "old" and "current" counters for the corresponding CPU. The
145"idx" value maps the "old" and "current" values to the underlying array,
146and is useful for debugging.
147
121 148
122USAGE 149USAGE
123 150
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 820fee236967..e0d6d99b8f9b 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -778,6 +778,8 @@ Markers for RCU read-side critical sections:
778 rcu_read_unlock 778 rcu_read_unlock
779 rcu_read_lock_bh 779 rcu_read_lock_bh
780 rcu_read_unlock_bh 780 rcu_read_unlock_bh
781 srcu_read_lock
782 srcu_read_unlock
781 783
782RCU pointer/list traversal: 784RCU pointer/list traversal:
783 785
@@ -804,6 +806,7 @@ RCU grace period:
804 synchronize_net 806 synchronize_net
805 synchronize_sched 807 synchronize_sched
806 synchronize_rcu 808 synchronize_rcu
809 synchronize_srcu
807 call_rcu 810 call_rcu
808 call_rcu_bh 811 call_rcu_bh
809 812
diff --git a/Documentation/ecryptfs.txt b/Documentation/ecryptfs.txt
new file mode 100644
index 000000000000..01d8a08351ac
--- /dev/null
+++ b/Documentation/ecryptfs.txt
@@ -0,0 +1,77 @@
1eCryptfs: A stacked cryptographic filesystem for Linux
2
3eCryptfs is free software. Please see the file COPYING for details.
4For documentation, please see the files in the doc/ subdirectory. For
5building and installation instructions please see the INSTALL file.
6
7Maintainer: Phillip Hellewell
8Lead developer: Michael A. Halcrow <mhalcrow@us.ibm.com>
9Developers: Michael C. Thompson
10 Kent Yoder
11Web Site: http://ecryptfs.sf.net
12
13This software is currently undergoing development. Make sure to
14maintain a backup copy of any data you write into eCryptfs.
15
16eCryptfs requires the userspace tools downloadable from the
17SourceForge site:
18
19http://sourceforge.net/projects/ecryptfs/
20
21Userspace requirements include:
22 - David Howells' userspace keyring headers and libraries (version
23 1.0 or higher), obtainable from
24 http://people.redhat.com/~dhowells/keyutils/
25 - Libgcrypt
26
27
28NOTES
29
30In the beta/experimental releases of eCryptfs, when you upgrade
31eCryptfs, you should copy the files to an unencrypted location and
32then copy the files back into the new eCryptfs mount to migrate the
33files.
34
35
36MOUNT-WIDE PASSPHRASE
37
38Create a new directory into which eCryptfs will write its encrypted
39files (i.e., /root/crypt). Then, create the mount point directory
40(i.e., /mnt/crypt). Now it's time to mount eCryptfs:
41
42mount -t ecryptfs /root/crypt /mnt/crypt
43
44You should be prompted for a passphrase and a salt (the salt may be
45blank).
46
47Try writing a new file:
48
49echo "Hello, World" > /mnt/crypt/hello.txt
50
51The operation will complete. Notice that there is a new file in
52/root/crypt that is at least 12288 bytes in size (depending on your
53host page size). This is the encrypted underlying file for what you
54just wrote. To test reading, from start to finish, you need to clear
55the user session keyring:
56
57keyctl clear @u
58
59Then umount /mnt/crypt and mount again per the instructions given
60above.
61
62cat /mnt/crypt/hello.txt
63
64
65NOTES
66
67eCryptfs version 0.1 should only be mounted on (1) empty directories
68or (2) directories containing files only created by eCryptfs. If you
69mount a directory that has pre-existing files not created by eCryptfs,
70then behavior is undefined. Do not run eCryptfs in higher verbosity
71levels unless you are doing so for the sole purpose of debugging or
72development, since secret values will be written out to the system log
73in that case.
74
75
76Mike Halcrow
77mhalcrow@us.ibm.com
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 42b95e0ad558..24f3c63b3017 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -29,14 +29,6 @@ Who: Adrian Bunk <bunk@stusta.de>
29 29
30--------------------------- 30---------------------------
31 31
32What: drivers that were depending on OBSOLETE_OSS_DRIVER
33 (config options already removed)
34When: before 2.6.19
35Why: OSS drivers with ALSA replacements
36Who: Adrian Bunk <bunk@stusta.de>
37
38---------------------------
39
40What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN 32What: raw1394: requests of type RAW1394_REQ_ISO_SEND, RAW1394_REQ_ISO_LISTEN
41When: November 2006 33When: November 2006
42Why: Deprecated in favour of the new ioctl-based rawiso interface, which is 34Why: Deprecated in favour of the new ioctl-based rawiso interface, which is
diff --git a/Documentation/filesystems/gfs2.txt b/Documentation/filesystems/gfs2.txt
new file mode 100644
index 000000000000..593004b6bbab
--- /dev/null
+++ b/Documentation/filesystems/gfs2.txt
@@ -0,0 +1,43 @@
1Global File System
2------------------
3
4http://sources.redhat.com/cluster/
5
6GFS is a cluster file system. It allows a cluster of computers to
7simultaneously use a block device that is shared between them (with FC,
8iSCSI, NBD, etc). GFS reads and writes to the block device like a local
9file system, but also uses a lock module to allow the computers coordinate
10their I/O so file system consistency is maintained. One of the nifty
11features of GFS is perfect consistency -- changes made to the file system
12on one machine show up immediately on all other machines in the cluster.
13
14GFS uses interchangable inter-node locking mechanisms. Different lock
15modules can plug into GFS and each file system selects the appropriate
16lock module at mount time. Lock modules include:
17
18 lock_nolock -- allows gfs to be used as a local file system
19
20 lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
21 The dlm is found at linux/fs/dlm/
22
23In addition to interfacing with an external locking manager, a gfs lock
24module is responsible for interacting with external cluster management
25systems. Lock_dlm depends on user space cluster management systems found
26at the URL above.
27
28To use gfs as a local file system, no external clustering systems are
29needed, simply:
30
31 $ mkfs -t gfs2 -p lock_nolock -j 1 /dev/block_device
32 $ mount -t gfs2 /dev/block_device /dir
33
34GFS2 is not on-disk compatible with previous versions of GFS.
35
36The following man pages can be found at the URL above:
37 gfs2_fsck to repair a filesystem
38 gfs2_grow to expand a filesystem online
39 gfs2_jadd to add journals to a filesystem online
40 gfs2_tool to manipulate, examine and tune a filesystem
41 gfs2_quota to examine and change quota values in a filesystem
42 mount.gfs2 to help mount(8) mount a filesystem
43 mkfs.gfs2 to make a filesystem
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 7f34778dd23b..125093c3ef76 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -1,7 +1,7 @@
1Introduction 1Introduction
2------------ 2------------
3 3
4The configuration database is collection of configuration options 4The configuration database is a collection of configuration options
5organized in a tree structure: 5organized in a tree structure:
6 6
7 +- Code maturity level options 7 +- Code maturity level options
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index e2cbd59cf2d0..50f4eddf899c 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -390,7 +390,7 @@ more details, with real examples.
390 The kernel may be built with several different versions of 390 The kernel may be built with several different versions of
391 $(CC), each supporting a unique set of features and options. 391 $(CC), each supporting a unique set of features and options.
392 kbuild provide basic support to check for valid options for $(CC). 392 kbuild provide basic support to check for valid options for $(CC).
393 $(CC) is useally the gcc compiler, but other alternatives are 393 $(CC) is usually the gcc compiler, but other alternatives are
394 available. 394 available.
395 395
396 as-option 396 as-option
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 12b3b24bfd2f..ff571f9298e0 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -289,9 +289,6 @@ and is between 256 and 4096 characters. It is defined in the file
289 289
290 autotest [IA64] 290 autotest [IA64]
291 291
292 awe= [HW,OSS] AWE32/SB32/AWE64 wave table synth
293 Format: <io>,<memsize>,<isapnp>
294
295 aztcd= [HW,CD] Aztech CD268 CDROM driver 292 aztcd= [HW,CD] Aztech CD268 CDROM driver
296 Format: <io>,0x79 (?) 293 Format: <io>,0x79 (?)
297 294
@@ -536,10 +533,6 @@ and is between 256 and 4096 characters. It is defined in the file
536 Default value is 0. 533 Default value is 0.
537 Value can be changed at runtime via /selinux/enforce. 534 Value can be changed at runtime via /selinux/enforce.
538 535
539 es1370= [HW,OSS]
540 Format: <lineout>[,<micbias>]
541 See also header of sound/oss/es1370.c.
542
543 es1371= [HW,OSS] 536 es1371= [HW,OSS]
544 Format: <spdif>,[<nomix>,[<amplifier>]] 537 Format: <spdif>,[<nomix>,[<amplifier>]]
545 See also header of sound/oss/es1371.c. 538 See also header of sound/oss/es1371.c.
@@ -580,9 +573,6 @@ and is between 256 and 4096 characters. It is defined in the file
580 gscd= [HW,CD] 573 gscd= [HW,CD]
581 Format: <io> 574 Format: <io>
582 575
583 gus= [HW,OSS]
584 Format: <io>,<irq>,<dma>,<dma16>
585
586 gvp11= [HW,SCSI] 576 gvp11= [HW,SCSI]
587 577
588 hashdist= [KNL,NUMA] Large hashes allocated during boot 578 hashdist= [KNL,NUMA] Large hashes allocated during boot
@@ -841,12 +831,6 @@ and is between 256 and 4096 characters. It is defined in the file
841 (machvec) in a generic kernel. 831 (machvec) in a generic kernel.
842 Example: machvec=hpzx1_swiotlb 832 Example: machvec=hpzx1_swiotlb
843 833
844 mad16= [HW,OSS] Format:
845 <io>,<irq>,<dma>,<dma16>,<mpu_io>,<mpu_irq>,<joystick>
846
847 maui= [HW,OSS]
848 Format: <io>,<irq>
849
850 max_loop= [LOOP] Maximum number of loopback devices that can 834 max_loop= [LOOP] Maximum number of loopback devices that can
851 be mounted 835 be mounted
852 Format: <1-256> 836 Format: <1-256>
@@ -1114,9 +1098,6 @@ and is between 256 and 4096 characters. It is defined in the file
1114 opl3= [HW,OSS] 1098 opl3= [HW,OSS]
1115 Format: <io> 1099 Format: <io>
1116 1100
1117 opl3sa= [HW,OSS]
1118 Format: <io>,<irq>,<dma>,<dma2>,<mpu_io>,<mpu_irq>
1119
1120 opl3sa2= [HW,OSS] Format: 1101 opl3sa2= [HW,OSS] Format:
1121 <io>,<irq>,<dma>,<dma2>,<mss_io>,<mpu_io>,<ymode>,<loopback>[,<isapnp>,<multiple] 1102 <io>,<irq>,<dma>,<dma2>,<mss_io>,<mpu_io>,<ymode>,<loopback>[,<isapnp>,<multiple]
1122 1103
@@ -1357,10 +1338,6 @@ and is between 256 and 4096 characters. It is defined in the file
1357 rcu.qlowmark= [KNL,BOOT] Set threshold of queued 1338 rcu.qlowmark= [KNL,BOOT] Set threshold of queued
1358 RCU callbacks below which batch limiting is re-enabled. 1339 RCU callbacks below which batch limiting is re-enabled.
1359 1340
1360 rcu.rsinterval= [KNL,BOOT,SMP] Set the number of additional
1361 RCU callbacks to queued before forcing reschedule
1362 on all cpus.
1363
1364 rdinit= [KNL] 1341 rdinit= [KNL]
1365 Format: <full_path> 1342 Format: <full_path>
1366 Run specified binary instead of /init from the ramdisk, 1343 Run specified binary instead of /init from the ramdisk,
@@ -1455,9 +1432,6 @@ and is between 256 and 4096 characters. It is defined in the file
1455 1432
1456 sg_def_reserved_size= [SCSI] 1433 sg_def_reserved_size= [SCSI]
1457 1434
1458 sgalaxy= [HW,OSS]
1459 Format: <io>,<irq>,<dma>,<dma2>,<sgbase>
1460
1461 shapers= [NET] 1435 shapers= [NET]
1462 Maximal number of shapers. 1436 Maximal number of shapers.
1463 1437
@@ -1598,9 +1572,6 @@ and is between 256 and 4096 characters. It is defined in the file
1598 1572
1599 snd-ymfpci= [HW,ALSA] 1573 snd-ymfpci= [HW,ALSA]
1600 1574
1601 sonicvibes= [HW,OSS]
1602 Format: <reverb>
1603
1604 sonycd535= [HW,CD] 1575 sonycd535= [HW,CD]
1605 Format: <io>[,<irq>] 1576 Format: <io>[,<irq>]
1606 1577
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 1ccc8a515b44..27b457c09729 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -1440,6 +1440,258 @@ platforms are moved over to use the flattened-device-tree model.
1440 descriptor-types-mask = <012b0ebf>; 1440 descriptor-types-mask = <012b0ebf>;
1441 }; 1441 };
1442 1442
1443 h) Board Control and Status (BCSR)
1444
1445 Required properties:
1446
1447 - device_type : Should be "board-control"
1448 - reg : Offset and length of the register set for the device
1449
1450 Example:
1451
1452 bcsr@f8000000 {
1453 device_type = "board-control";
1454 reg = <f8000000 8000>;
1455 };
1456
1457 i) Freescale QUICC Engine module (QE)
1458 This represents qe module that is installed on PowerQUICC II Pro.
1459 Hopefully it will merge backward compatibility with CPM/CPM2.
1460 Basically, it is a bus of devices, that could act more or less
1461 as a complete entity (UCC, USB etc ). All of them should be siblings on
1462 the "root" qe node, using the common properties from there.
1463 The description below applies to the the qe of MPC8360 and
1464 more nodes and properties would be extended in the future.
1465
1466 i) Root QE device
1467
1468 Required properties:
1469 - device_type : should be "qe";
1470 - model : precise model of the QE, Can be "QE", "CPM", or "CPM2"
1471 - reg : offset and length of the device registers.
1472 - bus-frequency : the clock frequency for QUICC Engine.
1473
1474 Recommended properties
1475 - brg-frequency : the internal clock source frequency for baud-rate
1476 generators in Hz.
1477
1478 Example:
1479 qe@e0100000 {
1480 #address-cells = <1>;
1481 #size-cells = <1>;
1482 #interrupt-cells = <2>;
1483 device_type = "qe";
1484 model = "QE";
1485 ranges = <0 e0100000 00100000>;
1486 reg = <e0100000 480>;
1487 brg-frequency = <0>;
1488 bus-frequency = <179A7B00>;
1489 }
1490
1491
1492 ii) SPI (Serial Peripheral Interface)
1493
1494 Required properties:
1495 - device_type : should be "spi".
1496 - compatible : should be "fsl_spi".
1497 - mode : the spi operation mode, it can be "cpu" or "qe".
1498 - reg : Offset and length of the register set for the device
1499 - interrupts : <a b> where a is the interrupt number and b is a
1500 field that represents an encoding of the sense and level
1501 information for the interrupt. This should be encoded based on
1502 the information in section 2) depending on the type of interrupt
1503 controller you have.
1504 - interrupt-parent : the phandle for the interrupt controller that
1505 services interrupts for this device.
1506
1507 Example:
1508 spi@4c0 {
1509 device_type = "spi";
1510 compatible = "fsl_spi";
1511 reg = <4c0 40>;
1512 interrupts = <82 0>;
1513 interrupt-parent = <700>;
1514 mode = "cpu";
1515 };
1516
1517
1518 iii) USB (Universal Serial Bus Controller)
1519
1520 Required properties:
1521 - device_type : should be "usb".
1522 - compatible : could be "qe_udc" or "fhci-hcd".
1523 - mode : the could be "host" or "slave".
1524 - reg : Offset and length of the register set for the device
1525 - interrupts : <a b> where a is the interrupt number and b is a
1526 field that represents an encoding of the sense and level
1527 information for the interrupt. This should be encoded based on
1528 the information in section 2) depending on the type of interrupt
1529 controller you have.
1530 - interrupt-parent : the phandle for the interrupt controller that
1531 services interrupts for this device.
1532
1533 Example(slave):
1534 usb@6c0 {
1535 device_type = "usb";
1536 compatible = "qe_udc";
1537 reg = <6c0 40>;
1538 interrupts = <8b 0>;
1539 interrupt-parent = <700>;
1540 mode = "slave";
1541 };
1542
1543
1544 iv) UCC (Unified Communications Controllers)
1545
1546 Required properties:
1547 - device_type : should be "network", "hldc", "uart", "transparent"
1548 "bisync" or "atm".
1549 - compatible : could be "ucc_geth" or "fsl_atm" and so on.
1550 - model : should be "UCC".
1551 - device-id : the ucc number(1-8), corresponding to UCCx in UM.
1552 - reg : Offset and length of the register set for the device
1553 - interrupts : <a b> where a is the interrupt number and b is a
1554 field that represents an encoding of the sense and level
1555 information for the interrupt. This should be encoded based on
1556 the information in section 2) depending on the type of interrupt
1557 controller you have.
1558 - interrupt-parent : the phandle for the interrupt controller that
1559 services interrupts for this device.
1560 - pio-handle : The phandle for the Parallel I/O port configuration.
1561 - rx-clock : represents the UCC receive clock source.
1562 0x00 : clock source is disabled;
1563 0x1~0x10 : clock source is BRG1~BRG16 respectively;
1564 0x11~0x28: clock source is QE_CLK1~QE_CLK24 respectively.
1565 - tx-clock: represents the UCC transmit clock source;
1566 0x00 : clock source is disabled;
1567 0x1~0x10 : clock source is BRG1~BRG16 respectively;
1568 0x11~0x28: clock source is QE_CLK1~QE_CLK24 respectively.
1569
1570 Required properties for network device_type:
1571 - mac-address : list of bytes representing the ethernet address.
1572 - phy-handle : The phandle for the PHY connected to this controller.
1573
1574 Example:
1575 ucc@2000 {
1576 device_type = "network";
1577 compatible = "ucc_geth";
1578 model = "UCC";
1579 device-id = <1>;
1580 reg = <2000 200>;
1581 interrupts = <a0 0>;
1582 interrupt-parent = <700>;
1583 mac-address = [ 00 04 9f 00 23 23 ];
1584 rx-clock = "none";
1585 tx-clock = "clk9";
1586 phy-handle = <212000>;
1587 pio-handle = <140001>;
1588 };
1589
1590
1591 v) Parallel I/O Ports
1592
1593 This node configures Parallel I/O ports for CPUs with QE support.
1594 The node should reside in the "soc" node of the tree. For each
1595 device that using parallel I/O ports, a child node should be created.
1596 See the definition of the Pin configuration nodes below for more
1597 information.
1598
1599 Required properties:
1600 - device_type : should be "par_io".
1601 - reg : offset to the register set and its length.
1602 - num-ports : number of Parallel I/O ports
1603
1604 Example:
1605 par_io@1400 {
1606 reg = <1400 100>;
1607 #address-cells = <1>;
1608 #size-cells = <0>;
1609 device_type = "par_io";
1610 num-ports = <7>;
1611 ucc_pin@01 {
1612 ......
1613 };
1614
1615
1616 vi) Pin configuration nodes
1617
1618 Required properties:
1619 - linux,phandle : phandle of this node; likely referenced by a QE
1620 device.
1621 - pio-map : array of pin configurations. Each pin is defined by 6
1622 integers. The six numbers are respectively: port, pin, dir,
1623 open_drain, assignment, has_irq.
1624 - port : port number of the pin; 0-6 represent port A-G in UM.
1625 - pin : pin number in the port.
1626 - dir : direction of the pin, should encode as follows:
1627
1628 0 = The pin is disabled
1629 1 = The pin is an output
1630 2 = The pin is an input
1631 3 = The pin is I/O
1632
1633 - open_drain : indicates the pin is normal or wired-OR:
1634
1635 0 = The pin is actively driven as an output
1636 1 = The pin is an open-drain driver. As an output, the pin is
1637 driven active-low, otherwise it is three-stated.
1638
1639 - assignment : function number of the pin according to the Pin Assignment
1640 tables in User Manual. Each pin can have up to 4 possible functions in
1641 QE and two options for CPM.
1642 - has_irq : indicates if the pin is used as source of exteral
1643 interrupts.
1644
1645 Example:
1646 ucc_pin@01 {
1647 linux,phandle = <140001>;
1648 pio-map = <
1649 /* port pin dir open_drain assignment has_irq */
1650 0 3 1 0 1 0 /* TxD0 */
1651 0 4 1 0 1 0 /* TxD1 */
1652 0 5 1 0 1 0 /* TxD2 */
1653 0 6 1 0 1 0 /* TxD3 */
1654 1 6 1 0 3 0 /* TxD4 */
1655 1 7 1 0 1 0 /* TxD5 */
1656 1 9 1 0 2 0 /* TxD6 */
1657 1 a 1 0 2 0 /* TxD7 */
1658 0 9 2 0 1 0 /* RxD0 */
1659 0 a 2 0 1 0 /* RxD1 */
1660 0 b 2 0 1 0 /* RxD2 */
1661 0 c 2 0 1 0 /* RxD3 */
1662 0 d 2 0 1 0 /* RxD4 */
1663 1 1 2 0 2 0 /* RxD5 */
1664 1 0 2 0 2 0 /* RxD6 */
1665 1 4 2 0 2 0 /* RxD7 */
1666 0 7 1 0 1 0 /* TX_EN */
1667 0 8 1 0 1 0 /* TX_ER */
1668 0 f 2 0 1 0 /* RX_DV */
1669 0 10 2 0 1 0 /* RX_ER */
1670 0 0 2 0 1 0 /* RX_CLK */
1671 2 9 1 0 3 0 /* GTX_CLK - CLK10 */
1672 2 8 2 0 1 0>; /* GTX125 - CLK9 */
1673 };
1674
1675 vii) Multi-User RAM (MURAM)
1676
1677 Required properties:
1678 - device_type : should be "muram".
1679 - mode : the could be "host" or "slave".
1680 - ranges : Should be defined as specified in 1) to describe the
1681 translation of MURAM addresses.
1682 - data-only : sub-node which defines the address area under MURAM
1683 bus that can be allocated as data/parameter
1684
1685 Example:
1686
1687 muram@10000 {
1688 device_type = "muram";
1689 ranges = <0 00010000 0000c000>;
1690
1691 data-only@0{
1692 reg = <0 c000>;
1693 };
1694 };
1443 1695
1444 More devices will be defined as this spec matures. 1696 More devices will be defined as this spec matures.
1445 1697
diff --git a/Documentation/sound/oss/AWE32 b/Documentation/sound/oss/AWE32
deleted file mode 100644
index b5908a66ff55..000000000000
--- a/Documentation/sound/oss/AWE32
+++ /dev/null
@@ -1,76 +0,0 @@
1 Installing and using Creative AWE midi sound under Linux.
2
3This documentation is devoted to the Creative Sound Blaster AWE32, AWE64 and
4SB32.
5
61) Make sure you have an ORIGINAL Creative SB32, AWE32 or AWE64 card. This
7 is important, because the driver works only with real Creative cards.
8
92) The first thing you need to do is re-compile your kernel with support for
10 your sound card. Run your favourite tool to configure the kernel and when
11 you get to the "Sound" menu you should enable support for the following:
12
13 Sound card support,
14 OSS sound modules,
15 100% Sound Blaster compatibles (SB16/32/64, ESS, Jazz16) support,
16 AWE32 synth
17
18 If your card is "Plug and Play" you will also need to enable these two
19 options, found under the "Plug and Play configuration" menu:
20
21 Plug and Play support
22 ISA Plug and Play support
23
24 Now compile and install the kernel in normal fashion. If you don't know
25 how to do this you can find instructions for this in the README file
26 located in the root directory of the kernel source.
27
283) Before you can start playing midi files you will have to load a sound
29 bank file. The utility needed for doing this is called "sfxload", and it
30 is one of the utilities found in a package called "awesfx". If this
31 package is not available in your distribution you can download the AWE
32 snapshot from Creative Labs Open Source website:
33
34 http://www.opensource.creative.com/snapshot.html
35
36 Once you have unpacked the AWE snapshot you will see a "awesfx"
37 directory. Follow the instructions in awesfx/docs/INSTALL to install the
38 utilities in this package. After doing this, sfxload should be installed
39 as:
40
41 /usr/local/bin/sfxload
42
43 To enable AWE general midi synthesis you should also get the sound bank
44 file for general midi from:
45
46 http://members.xoom.com/yar/synthgm.sbk.gz
47
48 Copy it to a directory of your choice, and unpack it there.
49
504) Edit /etc/modprobe.conf, and insert the following lines at the end of the
51 file:
52
53 alias sound-slot-0 sb
54 alias sound-service-0-1 awe_wave
55 install awe_wave /sbin/modprobe --first-time -i awe_wave && /usr/local/bin/sfxload PATH_TO_SOUND_BANK_FILE
56
57 You will of course have to change "PATH_TO_SOUND_BANK_FILE" to the full
58 path of the sound bank file. That will enable the Sound Blaster and AWE
59 wave synthesis. To play midi files you should get one of these programs if
60 you don't already have them:
61
62 Playmidi: http://playmidi.openprojects.net
63
64 AWEMidi Player (drvmidi) Included in the previously mentioned AWE
65 snapshot.
66
67 You will probably have to pass the "-e" switch to playmidi to have it use
68 your midi device. drvmidi should work without switches.
69
70 If something goes wrong please e-mail me. All comments and suggestions are
71 welcome.
72
73 Yaroslav Rosomakho (alons55@dialup.ptt.ru)
74 http://www.yar.opennet.ru
75
76Last Updated: Feb 3 2001
diff --git a/Documentation/sound/oss/CMI8338 b/Documentation/sound/oss/CMI8338
deleted file mode 100644
index 387d058c3f95..000000000000
--- a/Documentation/sound/oss/CMI8338
+++ /dev/null
@@ -1,85 +0,0 @@
1Audio driver for CM8338/CM8738 chips by Chen-Li Tien
2
3
4HARDWARE SUPPORTED
5================================================================================
6C-Media CMI8338
7C-Media CMI8738
8On-board C-Media chips
9
10
11STEPS TO BUILD DRIVER
12================================================================================
13
14 1. Backup the Config.in and Makefile in the sound driver directory
15 (/usr/src/linux/driver/sound).
16 The Configure.help provide help when you config driver in step
17 4, please backup the original one (/usr/src/linux/Document) and
18 copy this file.
19 The cmpci is document for the driver in detail, please copy it
20 to /usr/src/linux/Document/sound so you can refer it. Backup if
21 there is already one.
22
23 2. Extract the tar file by 'tar xvzf cmpci-xx.tar.gz' in the above
24 directory.
25
26 3. Change directory to /usr/src/linux
27
28 4. Config cm8338 driver by 'make menuconfig', 'make config' or
29 'make xconfig' command.
30
31 5. Please select Sound Card (CONFIG_SOUND=m) support and CMPCI
32 driver (CONFIG_SOUND_CMPCI=m) as modules. Resident mode not tested.
33 For driver option, please refer 'DRIVER PARAMETER'
34
35 6. Compile the kernel if necessary.
36
37 7. Compile the modules by 'make modules'.
38
39 8. Install the modules by 'make modules_install'
40
41
42INSTALL DRIVER
43================================================================================
44
45 1. Before first time to run the driver, create module dependency by
46 'depmod -a'
47
48 2. To install the driver manually, enter 'modprobe cmpci'.
49
50 3. Driver installation for various distributions:
51
52 a. Slackware 4.0
53 Add the 'modprobe cmpci' command in your /etc/rc.d/rc.modules
54 file.so you can start the driver automatically each time booting.
55
56 b. Caldera OpenLinux 2.2
57 Use LISA to load the cmpci module.
58
59 c. RedHat 6.0 and S.u.S.E. 6.1
60 Add following command in /etc/conf.modules:
61
62 alias sound cmpci
63
64 also visit http://www.cmedia.com.tw for installation instruction.
65
66DRIVER PARAMETER
67================================================================================
68
69 Some functions for the cm8738 can be configured in Kernel Configuration
70 or modules parameters. Set these parameters to 1 to enable.
71
72 mpuio: I/O ports base for MPU-401, 0 if disabled.
73 fmio: I/O ports base for OPL-3, 0 if disabled.
74 spdif_inverse:Inverse the S/PDIF-in signal, this depends on your
75 CD-ROM or DVD-ROM.
76 spdif_loop: Enable S/PDIF loop, this route S/PDIF-in to S/PDIF-out
77 directly.
78 speakers: Number of speakers used.
79 use_line_as_rear:Enable this if you want to use line-in as
80 rear-out.
81 use_line_as_bass:Enable this if you want to use line-in as
82 bass-out.
83 joystick: Enable joystick. You will need to install Linux joystick
84 driver.
85
diff --git a/Documentation/sound/oss/INSTALL.awe b/Documentation/sound/oss/INSTALL.awe
deleted file mode 100644
index 310f42ca1e83..000000000000
--- a/Documentation/sound/oss/INSTALL.awe
+++ /dev/null
@@ -1,134 +0,0 @@
1================================================================
2 INSTALLATION OF AWE32 SOUND DRIVER FOR LINUX
3 Takashi Iwai <iwai@ww.uni-erlangen.de>
4================================================================
5
6----------------------------------------------------------------
7* Attention to SB-PnP Card Users
8
9If you're using PnP cards, the initialization of PnP is required
10before loading this driver. You have now three options:
11 1. Use isapnptools.
12 2. Use in-kernel isapnp support.
13 3. Initialize PnP on DOS/Windows, then boot linux by loadlin.
14In this document, only the case 1 case is treated.
15
16----------------------------------------------------------------
17* Installation on Red Hat 5.0 Sound Driver
18
19Please use install-rh.sh under RedHat5.0 directory.
20DO NOT USE install.sh below.
21See INSTALL.RH for more details.
22
23----------------------------------------------------------------
24* Installation/Update by Shell Script
25
26 1. Become root
27
28 % su
29
30 2. If you have never configured the kernel tree yet, run make config
31 once (to make dependencies and symlinks).
32
33 # cd /usr/src/linux
34 # make xconfig
35
36 3. Run install.sh script
37
38 # sh ./install.sh
39
40 4. Configure your kernel
41
42 (for Linux 2.[01].x user)
43 # cd /usr/src/linux
44 # make xconfig (or make menuconfig)
45
46 (for Linux 1.2.x user)
47 # cd /usr/src/linux
48 # make config
49
50 Answer YES to both "lowlevel drivers" and "AWE32 wave synth" items
51 in Sound menu. ("lowlevel drivers" will appear only in 2.x
52 kernel.)
53
54 5. Make your kernel (and modules), and install them as usual.
55
56 5a. make kernel image
57 # make zImage
58
59 5b. make modules and install them
60 # make modules && make modules_install
61
62 5c. If you're using lilo, copy the kernel image and run lilo.
63 Otherwise, copy the kernel image to suitable directory or
64 media for your system.
65
66 6. Reboot the kernel if necessary.
67 - If you updated only the modules, you don't have to reboot
68 the system. Just remove the old sound modules here.
69 in
70 # rmmod sound.o (linux-2.0 or OSS/Free)
71 # rmmod awe_wave.o (linux-2.1)
72
73 7. If your AWE card is a PnP and not initialized yet, you'll have to
74 do it by isapnp tools. Otherwise, skip to 8.
75
76 This section described only a brief explanation. For more
77 details, please see the AWE64-Mini-HOWTO or isapnp tools FAQ.
78
79 7a. If you have no isapnp.conf file, generate it by pnpdump.
80 Otherwise, skip to 7d.
81 # pnpdump > /etc/isapnp.conf
82
83 7b. Edit isapnp.conf file. Comment out the appropriate
84 lines containing desirable I/O ports, DMA and IRQs.
85 Don't forget to enable (ACT Y) line.
86
87 7c. Add two i/o ports (0xA20 and 0xE20) in WaveTable part.
88 ex)
89 (CONFIGURE CTL0048/58128 (LD 2
90 # ANSI string -->WaveTable<--
91 (IO 0 (BASE 0x0620))
92 (IO 1 (BASE 0x0A20))
93 (IO 2 (BASE 0x0E20))
94 (ACT Y)
95 ))
96
97 7d. Load the config file.
98 CAUTION: This will reset all PnP cards!
99
100 # isapnp /etc/isapnp.conf
101
102 8. Load the sound module (if you configured it as a module):
103
104 for 2.0 kernel or OSS/Free monolithic module:
105
106 # modprobe sound.o
107
108 for 2.1 kernel:
109
110 # modprobe sound
111 # insmod uart401
112 # insmod sb io=0x220 irq=5 dma=1 dma16=5 mpu_io=0x330
113 (These values depend on your settings.)
114 # insmod awe_wave
115 (Be sure to load awe_wave after sb!)
116
117 See Documentation/sound/oss/AWE32 for
118 more details.
119
120 9. (only for obsolete systems) If you don't have /dev/sequencer
121 device file, make it according to Readme.linux file on
122 /usr/src/linux/drivers/sound. (Run a shell script included in
123 that file). <-- This file no longer exists in the recent kernels!
124
125 10. OK, load your own soundfont file, and enjoy MIDI!
126
127 % sfxload synthgm.sbk
128 % drvmidi foo.mid
129
130 11. For more advanced use (eg. dynamic loading, virtual bank and
131 etc.), please read the awedrv FAQ or the instructions in awesfx
132 and awemidi packages.
133
134Good luck!
diff --git a/Documentation/sound/oss/MAD16 b/Documentation/sound/oss/MAD16
deleted file mode 100644
index 865dbd848742..000000000000
--- a/Documentation/sound/oss/MAD16
+++ /dev/null
@@ -1,56 +0,0 @@
1(This recipe has been edited to update the configuration symbols,
2 and change over to modprobe.conf for 2.6)
3
4From: Shaw Carruthers <shaw@shawc.demon.co.uk>
5
6I have been using mad16 sound for some time now with no problems, current
7kernel 2.1.89
8
9lsmod shows:
10
11mad16 5176 0
12sb 22044 0 [mad16]
13uart401 5576 0 [mad16 sb]
14ad1848 14176 1 [mad16]
15sound 61928 0 [mad16 sb uart401 ad1848]
16
17.config has:
18
19CONFIG_SOUND=m
20CONFIG_SOUND_ADLIB=m
21CONFIG_SOUND_MAD16=m
22CONFIG_SOUND_YM3812=m
23
24modprobe.conf has:
25
26alias char-major-14-* mad16
27options sb mad16=1
28options mad16 io=0x530 irq=7 dma=0 dma16=1 && /usr/local/bin/aumix -w 15 -p 20 -m 0 -1 0 -2 0 -3 0 -i 0
29
30
31To get the built in mixer to work this needs to be:
32
33options adlib_card io=0x388 # FM synthesizer
34options sb mad16=1
35options mad16 io=0x530 irq=7 dma=0 dma16=1 mpu_io=816 mpu_irq=5 && /usr/local/bin/aumix -w 15 -p 20 -m 0 -1 0 -2 0 -3 0 -i 0
36
37The addition of the "mpu_io=816 mpu_irq=5" to the mad16 options line is
38
39------------------------------------------------------------------------
40The mad16 module in addition supports the following options:
41
42option: meaning: default:
43joystick=0,1 disabled, enabled disabled
44cdtype=0x00,0x02,0x04, disabled, Sony CDU31A, disabled
45 0x06,0x08,0x0a Mitsumi, Panasonic,
46 Secondary IDE, Primary IDE
47cdport=0x340,0x320, 0x340
48 0x330,0x360
49cdirq=0,3,5,7,9,10,11 disabled, IRQ3, ... disabled
50cddma=0,5,6,7 disabled, DMA5, ... DMA5 for Mitsumi or IDE
51cddma=0,1,2,3 disabled, DMA1, ... DMA3 for Sony or Panasonic
52opl4=0,1 OPL3, OPL4 OPL3
53
54for more details see linux/drivers/sound/mad16.c
55
56Rui Sousa
diff --git a/Documentation/sound/oss/Maestro b/Documentation/sound/oss/Maestro
deleted file mode 100644
index 4a80eb3f8e00..000000000000
--- a/Documentation/sound/oss/Maestro
+++ /dev/null
@@ -1,123 +0,0 @@
1 An OSS/Lite Driver for the ESS Maestro family of sound cards
2
3 Zach Brown, December 1999
4
5Driver Status and Availability
6------------------------------
7
8The most recent version of this driver will hopefully always be available at
9 http://www.zabbo.net/maestro/
10
11I will try and maintain the most recent stable version of the driver
12in both the stable and development kernel lines.
13
14ESS Maestro Chip Family
15-----------------------
16
17There are 3 main variants of the ESS Maestro PCI sound chip. The first
18is the Maestro 1. It was originally produced by Platform Tech as the
19'AGOGO'. It can be recognized by Platform Tech's PCI ID 0x1285 with
200x0100 as the device ID. It was put on some sound boards and a few laptops.
21ESS bought the design and cleaned it up as the Maestro 2. This starts
22their marking with the ESS vendor ID 0x125D and the 'year' device IDs.
23The Maestro 2 claims 0x1968 while the Maestro 2e has 0x1978.
24
25The various families of Maestro are mostly identical as far as this
26driver is concerned. It doesn't touch the DSP parts that differ (though
27it could for FM synthesis).
28
29Driver OSS Behavior
30--------------------
31
32This OSS driver exports /dev/mixer and /dev/dsp to applications, which
33mostly adhere to the OSS spec. This driver doesn't register itself
34with /dev/sndstat, so don't expect information to appear there.
35
36The /dev/dsp device exported behaves almost as expected. Playback is
37supported in all the various lovely formats. 8/16bit stereo/mono from
388khz to 48khz, and mmap()ing for playback behaves. Capture/recording
39is limited due to oddities with the Maestro hardware. One can only
40record in 16bit stereo. For recording the maestro uses non interleaved
41stereo buffers so that mmap()ing the incoming data does not result in
42a ring buffer of LRLR data. mmap()ing of the read buffers is therefore
43disallowed until this can be cleaned up.
44
45/dev/mixer is an interface to the AC'97 codec on the Maestro. It is
46worth noting that there are a variety of AC'97s that can be wired to
47the Maestro. Which is used is entirely up to the hardware implementor.
48This should only be visible to the user by the presence, or lack, of
49'Bass' and 'Treble' sliders in the mixer. Not all AC'97s have them.
50
51The driver doesn't support MIDI or FM playback at the moment. Typically
52the Maestro is wired to an MPU MIDI chip, but some hardware implementations
53don't. We need to assemble a white list of hardware implementations that
54have MIDI wired properly before we can claim to support it safely.
55
56Compiling and Installing
57------------------------
58
59With the drivers inclusion into the kernel, compiling and installing
60is the same as most OSS/Lite modular sound drivers. Compilation
61of the driver is enabled through the CONFIG_SOUND_MAESTRO variable
62in the config system.
63
64It may be modular or statically linked. If it is modular it should be
65installed with the rest of the modules for the kernel on the system.
66Typically this will be in /lib/modules/ somewhere. 'alias sound maestro'
67should also be added to your module configs (typically /etc/conf.modules)
68if you're using modular OSS/Lite sound and want to default to using a
69maestro chip.
70
71As this is a PCI device, the module does not need to be informed of
72any IO or IRQ resources it should use, it devines these from the
73system. Sometimes, on sucky PCs, the BIOS fails to allocated resources
74for the maestro. This will result in a message like:
75 maestro: PCI subsystem reports IRQ 0, this might not be correct.
76from the kernel. Should this happen the sound chip most likely will
77not operate correctly. To solve this one has to dig through their BIOS
78(typically entered by hitting a hot key at boot time) and figure out
79what magic needs to happen so that the BIOS will reward the maestro with
80an IRQ. This operation is incredibly system specific, so you're on your
81own. Sometimes the magic lies in 'PNP Capable Operating System' settings.
82
83There are very few options to the driver. One is 'debug' which will
84tell the driver to print minimal debugging information as it runs. This
85can be collected with 'dmesg' or through the klogd daemon.
86
87The other, more interesting option, is 'dsps_order'. Typically at
88install time the driver will only register one available /dev/dsp device
89for its use. The 'dsps_order' module parameter allows for more devices
90to be allocated, as a power of two. Up to 4 devices can be registered
91( dsps_order=2 ). These devices act as fully distinct units and use
92separate channels in the maestro.
93
94Power Management
95----------------
96
97As of version 0.14, this driver has a minimal understanding of PCI
98Power Management. If it finds a valid power management capability
99on the PCI device it will attempt to use the power management
100functions of the maestro. It will only do this on Maestro 2Es and
101only on machines that are known to function well. You can
102force the use of power management by setting the 'use_pm' module
103option to 1, or can disable it entirely by setting it to 0.
104
105When using power management, the driver does a few things
106differently. It will keep the chip in a lower power mode
107when the module is inserted but /dev/dsp is not open. This
108allows the mixer to function but turns off the clocks
109on other parts of the chip. When /dev/dsp is opened the chip
110is brought into full power mode, and brought back down
111when it is closed. It also powers down the chip entirely
112when the module is removed or the machine is shutdown. This
113can have nonobvious consequences. CD audio may not work
114after a power managing driver is removed. Also, software that
115doesn't understand power management may not be able to talk
116to the powered down chip until the machine goes through a hard
117reboot to bring it back.
118
119.. more details ..
120------------------
121
122drivers/sound/maestro.c contains comments that hopefully explain
123the maestro implementation.
diff --git a/Documentation/sound/oss/Maestro3 b/Documentation/sound/oss/Maestro3
deleted file mode 100644
index a113718e8034..000000000000
--- a/Documentation/sound/oss/Maestro3
+++ /dev/null
@@ -1,92 +0,0 @@
1 An OSS/Lite Driver for the ESS Maestro3 family of sound chips
2
3 Zach Brown, January 2001
4
5Driver Status and Availability
6------------------------------
7
8The most recent version of this driver will hopefully always be available at
9 http://www.zabbo.net/maestro3/
10
11I will try and maintain the most recent stable version of the driver
12in both the stable and development kernel lines.
13
14Historically I've sucked pretty hard at actually doing that, however.
15
16ESS Maestro3 Chip Family
17-----------------------
18
19The 'Maestro3' is much like the Maestro2 chip. The noted improvement
20is the removal of the silicon in the '2' that did PCM mixing. All that
21work is now done through a custom DSP called the ASSP, the Asynchronus
22Specific Signal Processor.
23
24The 'Allegro' is a baby version of the Maestro3. I'm not entirely clear
25on the extent of the differences, but the driver supports them both :)
26
27The 'Allegro' shows up as PCI ID 0x1988 and the Maestro3 as 0x1998,
28both under ESS's vendor ID of 0x125D. The Maestro3 can also show up as
290x199a when hardware strapping is used.
30
31The chip can also act as a multi function device. The modem IDs follow
32the audio multimedia device IDs. (so the modem part of an Allegro shows
33up as 0x1989)
34
35Driver OSS Behavior
36--------------------
37
38This OSS driver exports /dev/mixer and /dev/dsp to applications, which
39mostly adhere to the OSS spec. This driver doesn't register itself
40with /dev/sndstat, so don't expect information to appear there.
41
42The /dev/dsp device exported behaves as expected. Playback is
43supported in all the various lovely formats. 8/16bit stereo/mono from
448khz to 48khz, with both read()/write(), and mmap().
45
46/dev/mixer is an interface to the AC'97 codec on the Maestro3. It is
47worth noting that there are a variety of AC'97s that can be wired to
48the Maestro3. Which is used is entirely up to the hardware implementor.
49This should only be visible to the user by the presence, or lack, of
50'Bass' and 'Treble' sliders in the mixer. Not all AC'97s have them.
51The Allegro has an onchip AC'97.
52
53The driver doesn't support MIDI or FM playback at the moment.
54
55Compiling and Installing
56------------------------
57
58With the drivers inclusion into the kernel, compiling and installing
59is the same as most OSS/Lite modular sound drivers. Compilation
60of the driver is enabled through the CONFIG_SOUND_MAESTRO3 variable
61in the config system.
62
63It may be modular or statically linked. If it is modular it should be
64installed with the rest of the modules for the kernel on the system.
65Typically this will be in /lib/modules/ somewhere. 'alias sound-slot-0
66maestro3' should also be added to your module configs (typically
67/etc/modprobe.conf) if you're using modular OSS/Lite sound and want to
68default to using a maestro3 chip.
69
70There are very few options to the driver. One is 'debug' which will
71tell the driver to print minimal debugging information as it runs. This
72can be collected with 'dmesg' or through the klogd daemon.
73
74One is 'external_amp', which tells the driver to attempt to enable
75an external amplifier. This defaults to '1', you can tell the driver
76not to bother enabling such an amplifier by setting it to '0'.
77
78And the last is 'gpio_pin', which tells the driver which GPIO pin number
79the external amp uses (0-15), The Allegro uses 8 by default, all others 1.
80If everything loads correctly and seems to be working but you get no sound,
81try tweaking this value.
82
83Systems known to need a different value
84 Panasonic ToughBook CF-72: gpio_pin=13
85
86Power Management
87----------------
88
89This driver has a minimal understanding of PCI Power Management. It will
90try and power down the chip when the system is suspended, and power
91it up with it is resumed. It will also try and power down the chip
92when the machine is shut down.
diff --git a/Documentation/sound/oss/NEWS b/Documentation/sound/oss/NEWS
deleted file mode 100644
index a81e0ef72ae9..000000000000
--- a/Documentation/sound/oss/NEWS
+++ /dev/null
@@ -1,42 +0,0 @@
1Linux 2.4 Sound Changes
22000-September-25
3Christoph Hellwig, <hch@infradead.org>
4
5
6
7=== isapnp support
8
9The Linux 2.4 Kernel does have reliable in-kernel isapnp support.
10Some drivers (sb.o, ad1816.o awe_wave.o) do now support automatically
11detecting and configuring isapnp devices.
12If you have a not yet supported isapnp soundcard, mail me the content
13of '/proc/isapnp' on your system and some information about your card
14and its driver(s) so I can try to get isapnp working for it.
15
16
17
18=== soundcard resources on kernel commandline
19
20Before Linux 2.4 you had to specify the resources for sounddrivers
21statically linked into the kernel at compile time
22(in make config/menuconfig/xconfig). In Linux 2.4 the resources are
23now specified at the boot-time kernel commandline (e.g. the lilo
24'append=' line or everything that's after the kernel name in grub).
25Read the Configure.help entry for your card for the parameters.
26
27
28=== softoss is gone
29
30In Linux 2.4 the softoss in-kernel software synthesizer is no more aviable.
31Use a user space software synthesizer like timidity instead.
32
33
34
35=== /dev/sndstat and /proc/sound are gone
36
37In older Linux versions those files exported some information about the
38OSS/Free configuration to userspace. In Linux 2.3 they were removed because
39they did not support the growing number of pci soundcards and there were
40some general problems with this interface.
41
42
diff --git a/Documentation/sound/oss/OPL3-SA b/Documentation/sound/oss/OPL3-SA
deleted file mode 100644
index 66a91835d918..000000000000
--- a/Documentation/sound/oss/OPL3-SA
+++ /dev/null
@@ -1,52 +0,0 @@
1OPL3-SA1 sound driver (opl3sa.o)
2
3---
4Note: This howto only describes how to setup the OPL3-SA1 chip; this info
5does not apply to the SA2, SA3, or SA4.
6---
7
8The Yamaha OPL3-SA1 sound chip is usually found built into motherboards, and
9it's a decent little chip offering a WSS mode, a SB Pro emulation mode, MPU401
10and OPL3 FM Synth capabilities.
11
12You can enable inclusion of the driver via CONFIG_SOUND_OPL3SA1=m, or
13CONFIG_SOUND_OPL3SA1=y through 'make config/xconfig/menuconfig'.
14
15You'll need to know all of the relevant info (irq, dma, and io port) for the
16chip's WSS mode, since that is the mode the kernel sound driver uses, and of
17course you'll also need to know about where the MPU401 and OPL3 ports and
18IRQs are if you want to use those.
19
20Here's the skinny on how to load it as a module:
21
22 modprobe opl3sa io=0x530 irq=11 dma=0 dma2=1 mpu_io=0x330 mpu_irq=5
23
24Module options in detail:
25
26 io: This is the WSS's port base.
27 irq: This is the WSS's IRQ.
28 dma: This is the WSS's DMA line. In my BIOS setup screen this was
29 listed as "WSS Play DMA"
30 dma2: This is the WSS's secondary DMA line. My BIOS calls it the
31 "WSS capture DMA"
32
33 mpu_io: This is the MPU401's port base.
34 mpu_irq: This is the MPU401's IRQ.
35
36If you'd like to use the OPL3 FM Synthesizer, make sure you enable
37CONFIG_SOUND_YM3812 (in 'make config'). That'll build the opl3.o module.
38
39Then a simple 'insmod opl3 io=0x388', and you now have FM Synth.
40
41You can also use the SoftOSS software synthesizer instead of the builtin OPL3.
42Here's how:
43
44Say 'y' or 'm' to "SoftOSS software wave table engine" in make config.
45
46If you said yes, the software synth is available once you boot your new
47kernel.
48
49If you chose to build it as a module, just insmod the resulting softoss2.o
50
51Questions? Comments?
52<stiker@northlink.com>
diff --git a/Documentation/sound/oss/README.awe b/Documentation/sound/oss/README.awe
deleted file mode 100644
index 80054cd8fcde..000000000000
--- a/Documentation/sound/oss/README.awe
+++ /dev/null
@@ -1,218 +0,0 @@
1================================================================
2 AWE32 Sound Driver for Linux / FreeBSD
3 version 0.4.3; Nov. 1, 1998
4
5 Takashi Iwai <iwai@ww.uni-erlangen.de>
6================================================================
7
8* GENERAL NOTES
9
10This is a sound driver extension for SoundBlaster AWE32 and other
11compatible cards (AWE32-PnP, SB32, SB32-PnP, AWE64 & etc) to enable
12the wave synth operations. The driver is provided for Linux 1.2.x
13and 2.[012].x kernels, as well as FreeBSD, on Intel x86 and DEC
14Alpha systems.
15
16This driver was written by Takashi Iwai <iwai@ww.uni-erlangen.de>,
17and provided "as is". The original source (awedrv-0.4.3.tar.gz) and
18binary packages are available on the following URL:
19 http://bahamut.mm.t.u-tokyo.ac.jp/~iwai/awedrv/
20Note that since the author is apart from this web site, the update is
21not frequent now.
22
23
24* NOTE TO LINUX USERS
25
26To enable this driver on linux-2.[01].x kernels, you need turn on
27"AWE32 synth" options in sound menu when configure your linux kernel
28and modules. The precise installation procedure is described in the
29AWE64-Mini-HOWTO and linux-kernel/Documetation/sound/AWE32.
30
31If you're using PnP cards, the card must be initialized before loading
32the sound driver. There're several options to do this:
33 - Initialize the card via ISA PnP tools, and load the sound module.
34 - Initialize the card on DOS, and load linux by loadlin.exe
35 - Use PnP kernel driver (for Linux-2.x.x)
36The detailed instruction for the solution using isapnp tools is found
37in many documents like above. A brief instruction is also included in
38the installation document of this package.
39For PnP driver project, please refer to the following URL:
40 http://www-jcr.lmh.ox.ac.uk/~pnp/
41
42
43* USING THE DRIVER
44
45The awedrv has several different playing modes to realize easy channel
46allocation for MIDI songs. To hear the exact sound quality, you need
47to obtain the extended sequencer program, drvmidi or playmidi-2.5.
48
49For playing MIDI files, you *MUST* load the soundfont file on the
50driver previously by sfxload utility. Otherwise you'll here no sounds
51at all! All the utilities and driver source packages are found in the
52above URL. The sfxload program is included in the package
53awesfx-0.4.3.tgz. Binary packages are available there, too. See the
54instruction in each package for installation.
55
56Loading a soundfont file is very simple. Just execute the command
57
58 % sfxload synthgm.sbk
59
60Then, sfxload transfers the file "synthgm.sbk" to the driver.
61Both SF1 and SF2 formats are accepted.
62
63Now you can hear midi musics by a midi player.
64
65 % drvmidi foo.mid
66
67If you run MIDI player after MOD player, you need to load soundfont
68files again, since MOD player programs clear the previous loaded
69samples by their own data.
70
71If you have only 512kb on the sound card, I recommend to use dynamic
72sample loading via -L option of drvmidi. 2MB GM/GS soundfont file is
73available in most midi files.
74
75 % sfxload synthgm
76 % drvmidi -L 2mbgmgs foo.mid
77
78This makes a big difference (believe me)! For more details, please
79refer to the FAQ list which is available on the URL above.
80
81The current chorus, reverb and equalizer status can be changed by
82aweset utility program (included in awesfx package). Note that
83some awedrv-native programs (like drvmidi and xmp) will change the
84current settings by themselves. The aweset program is effective
85only for other programs like playmidi.
86
87Enjoy.
88
89
90* COMPILE FLAGS
91
92Compile conditions are defined in awe_config.h.
93
94[Compatibility Conditions]
95The following flags are defined automatically when using installation
96shell script.
97
98- AWE_MODULE_SUPPORT
99 indicates your Linux kernel supports module for each sound card
100 (in recent 2.1 or 2.2 kernels and unofficial patched 2.0 kernels
101 as distributed in the RH5.0 package).
102 This flag is automatically set when you're using 2.1.x kernels.
103 You can pass the base address and memory size via the following
104 module options,
105 io = base I/O port address (eg. 0x620)
106 memsize = DRAM size in kilobytes (eg. 512)
107 As default, AWE driver probes these values automatically.
108
109
110[Hardware Conditions]
111You DON'T have to define the following two values.
112Define them only when the driver couldn't detect the card properly.
113
114- AWE_DEFAULT_BASE_ADDR (default: not defined)
115 specifies the base port address of your AWE32 card.
116 0 means to autodetect the address.
117
118- AWE_DEFAULT_MEM_SIZE (default: not defined)
119 specifies the memory size of your AWE32 card in kilobytes.
120 -1 means to autodetect its size.
121
122
123[Sample Table Size]
124From ver.0.4.0, sample tables are allocated dynamically (except
125Linux-1.2.x system), so you need NOT to touch these parameters.
126Linux-1.2.x users may need to increase these values to appropriate size
127if the sound card is equipped with more DRAM.
128
129- AWE_MAX_SF_LISTS, AWE_MAX_SAMPLES, AWE_MAX_INFOS
130
131
132[Other Conditions]
133
134- AWE_ALWAYS_INIT_FM (default: not defined)
135 indicates the AWE driver always initialize FM passthrough even
136 without DRAM on board. Emu8000 chip has a restriction for playing
137 samples on DRAM that at least two channels must be occupied as
138 passthrough channels.
139
140- AWE_DEBUG_ON (default: defined)
141 turns on debugging messages if defined.
142
143- AWE_HAS_GUS_COMPATIBILITY (default: defined)
144 Enables GUS compatibility mode if defined, reading GUS patches and
145 GUS control commands. Define this option to use GMOD or other
146 GUS module players.
147
148- CONFIG_AWE32_MIDIEMU (default: defined)
149 Adds a MIDI emulation device by Emu8000 wavetable. The emulation
150 device can be accessed as an external MIDI, and sends the MIDI
151 control codes directly. XG and GS sysex/NRPN are accepted.
152 No MIDI input is supported.
153
154- CONFIG_AWE32_MIXER (default: not defined)
155 Adds a mixer device for AWE32 bass/treble equalizer control.
156 You can access this device using /dev/mixer?? (usually mixer01).
157
158- AWE_USE_NEW_VOLUME_CALC (default: defined)
159 Use the new method to calculate the volume change as compatible
160 with DOS/Win drivers. This option can be toggled via aweset
161 program, or drvmidi player.
162
163- AWE_CHECK_VTARGET (default: defined)
164 Check the current volume target value when searching for an
165 empty channel to allocate a new voice. This is experimentally
166 implemented in this version. (probably, this option doesn't
167 affect the sound quality severely...)
168
169- AWE_ALLOW_SAMPLE_SHARING (default: defined)
170 Allow sample sharing for differently loaded patches.
171 This function is available only together with awesfx-0.4.3p3.
172 Note that this is still an experimental option.
173
174- DEF_FM_CHORUS_DEPTH (default: 0x10)
175 The default strength to be sent to the chorus effect engine.
176 From 0 to 0xff. Larger numbers may often cause weird sounds.
177
178- DEF_FM_REVERB_DEPTH (default: 0x10)
179 The default strength to be sent to the reverb effect engine.
180 From 0 to 0xff. Larger numbers may often cause weird sounds.
181
182
183* ACKNOWLEDGMENTS
184
185Thanks to Witold Jachimczyk (witek@xfactor.wpi.edu) for much advice
186on programming of AWE32. Much code is brought from his AWE32-native
187MOD player, ALMP.
188The port of awedrv to FreeBSD is done by Randall Hopper
189(rhh@ct.picker.com).
190The new volume calculation routine was derived from Mark Weaver's
191ADIP compatible routines.
192I also thank linux-awe-ml members for their efforts
193to reboot their system many times :-)
194
195
196* TODO'S
197
198- Complete DOS/Win compatibility
199- DSP-like output
200
201
202* COPYRIGHT
203
204Copyright (C) 1996-1998 Takashi Iwai
205
206This program is free software; you can redistribute it and/or modify
207it under the terms of the GNU General Public License as published by
208the Free Software Foundation; either version 2 of the License, or
209(at your option) any later version.
210
211This program is distributed in the hope that it will be useful,
212but WITHOUT ANY WARRANTY; without even the implied warranty of
213MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
214GNU General Public License for more details.
215
216You should have received a copy of the GNU General Public License
217along with this program; if not, write to the Free Software
218Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/Documentation/sound/oss/Wavefront b/Documentation/sound/oss/Wavefront
deleted file mode 100644
index 16f57ea43052..000000000000
--- a/Documentation/sound/oss/Wavefront
+++ /dev/null
@@ -1,339 +0,0 @@
1 An OSS/Free Driver for WaveFront soundcards
2 (Turtle Beach Maui, Tropez, Tropez Plus)
3
4 Paul Barton-Davis, July 1998
5
6 VERSION 0.2.5
7
8Driver Status
9-------------
10
11Requires: Kernel 2.1.106 or later (the driver is included with kernels
122.1.109 and above)
13
14As of 7/22/1998, this driver is currently in *BETA* state. This means
15that it compiles and runs, and that I use it on my system (Linux
162.1.106) with some reasonably demanding applications and uses. I
17believe the code is approaching an initial "finished" state that
18provides bug-free support for the Tropez Plus.
19
20Please note that to date, the driver has ONLY been tested on a Tropez
21Plus. I would very much like to hear (and help out) people with Tropez
22and Maui cards, since I think the driver can support those cards as
23well.
24
25Finally, the driver has not been tested (or even compiled) as a static
26(non-modular) part of the kernel. Alan Cox's good work in modularizing
27OSS/Free for Linux makes this rather unnecessary.
28
29Some Questions
30--------------
31
32**********************************************************************
330) What does this driver do that the maui driver did not ?
34**********************************************************************
35
36* can fully initialize a WaveFront card from cold boot - no DOS
37 utilities needed
38* working patch/sample/program loading and unloading (the maui
39 driver didn't document how to make this work, and assumed
40 user-level preparation of the patch data for writing
41 to the board. ick.)
42* full user-level access to all WaveFront commands
43* for the Tropez Plus, (primitive) control of the YSS225 FX processor
44* Virtual MIDI mode supported - 2 MIDI devices accessible via the
45 WaveFront's MPU401/UART emulation. One
46 accesses the WaveFront synth, the other accesses the
47 external MIDI connector. Full MIDI read/write semantics
48 for both devices.
49* OSS-compliant /dev/sequencer interface for the WaveFront synth,
50 including native and GUS-format patch downloading.
51* semi-intelligent patch management (prototypical at this point)
52
53**********************************************************************
541) What to do about MIDI interfaces ?
55**********************************************************************
56
57The Tropez Plus (and perhaps other WF cards) can in theory support up
58to 2 physical MIDI interfaces. One of these is connected to the
59ICS2115 chip (the WaveFront synth itself) and is controlled by
60MPU/UART-401 emulation code running as part of the WaveFront OS. The
61other is controlled by the CS4232 chip present on the board. However,
62physical access to the CS4232 connector is difficult, and it is
63unlikely (though not impossible) that you will want to use it.
64
65An older version of this driver introduced an additional kernel config
66variable which controlled whether or not the CS4232 MIDI interface was
67configured. Because of Alan Cox's work on modularizing the sound
68drivers, and now backporting them to 2.0.34 kernels, there seems to be
69little reason to support "static" configuration variables, and so this
70has been abandoned in favor of *only* module parameters. Specifying
71"mpuio" and "mpuirq" for the cs4232 parameter will result in the
72CS4232 MIDI interface being configured; leaving them unspecified will
73leave it unconfigured (and thus unusable).
74
75BTW, I have heard from one Tropez+ user that the CS4232 interface is
76more reliable than the ICS2115 one. I have had no problems with the
77latter, and I don't have the right cable to test the former one
78out. Reports welcome.
79
80**********************************************************************
812) Why does line XXX of the code look like this .... ?
82**********************************************************************
83
84Either because it's not finished yet, or because you're a better coder
85than I am, or because you don't understand some aspect of how the card
86or the code works.
87
88I absolutely welcome comments, criticisms and suggestions about the
89design and implementation of the driver.
90
91**********************************************************************
923) What files are included ?
93**********************************************************************
94
95 drivers/sound/README.wavefront -- this file
96
97 drivers/sound/wavefront.patch -- patches for the 2.1.106 sound drivers
98 needed to make the rest of this work
99 DO NOT USE IF YOU'VE APPLIED THEM
100 BEFORE, OR HAVE 2.1.109 OR ABOVE
101
102 drivers/sound/wavfront.c -- the driver
103 drivers/sound/ys225.h -- data declarations for FX config
104 drivers/sound/ys225.c -- data definitions for FX config
105 drivers/sound/wf_midi.c -- the "uart401" driver
106 to support virtual MIDI mode.
107 include/wavefront.h -- the header file
108 Documentation/sound/oss/Tropez+ -- short docs on configuration
109
110**********************************************************************
1114) How do I compile/install/use it ?
112**********************************************************************
113
114PART ONE: install the source code into your sound driver directory
115
116 cd <top-of-your-2.1.106-code-base-e.g.-/usr/src/linux>
117 tar -zxvf <where-you-put/wavefront.tar.gz>
118
119PART TWO: apply the patches
120
121 DO THIS ONLY IF YOU HAVE A KERNEL VERSION BELOW 2.1.109
122 AND HAVE NOT ALREADY INSTALLED THE PATCH(ES).
123
124 cd drivers/sound
125 patch < wavefront.patch
126
127PART THREE: configure your kernel
128
129 cd <top of your kernel tree>
130 make xconfig (or whichever config option you use)
131
132 - choose YES for Sound Support
133 - choose MODULE (M) for OSS Sound Modules
134 - choose MODULE(M) to YM3812/OPL3 support
135 - choose MODULE(M) for WaveFront support
136 - choose MODULE(M) for CS4232 support
137
138 - choose "N" for everything else (unless you have other
139 soundcards you want support for)
140
141
142 make boot
143 .
144 .
145 .
146 <whatever you normally do for a kernel install>
147 make modules
148 .
149 .
150 .
151 make modules_install
152
153Here's my autoconf.h SOUND section:
154
155/*
156 * Sound
157 */
158#define CONFIG_SOUND 1
159#undef CONFIG_SOUND_OSS
160#define CONFIG_SOUND_OSS_MODULE 1
161#undef CONFIG_SOUND_PAS
162#undef CONFIG_SOUND_SB
163#undef CONFIG_SOUND_ADLIB
164#undef CONFIG_SOUND_GUS
165#undef CONFIG_SOUND_MPU401
166#undef CONFIG_SOUND_PSS
167#undef CONFIG_SOUND_MSS
168#undef CONFIG_SOUND_SSCAPE
169#undef CONFIG_SOUND_TRIX
170#undef CONFIG_SOUND_MAD16
171#undef CONFIG_SOUND_WAVEFRONT
172#define CONFIG_SOUND_WAVEFRONT_MODULE 1
173#undef CONFIG_SOUND_CS4232
174#define CONFIG_SOUND_CS4232_MODULE 1
175#undef CONFIG_SOUND_MAUI
176#undef CONFIG_SOUND_SGALAXY
177#undef CONFIG_SOUND_OPL3SA1
178#undef CONFIG_SOUND_SOFTOSS
179#undef CONFIG_SOUND_YM3812
180#define CONFIG_SOUND_YM3812_MODULE 1
181#undef CONFIG_SOUND_VMIDI
182#undef CONFIG_SOUND_UART6850
183/*
184 * Additional low level sound drivers
185 */
186#undef CONFIG_LOWLEVEL_SOUND
187
188************************************************************
1896) How do I configure my card ?
190************************************************************
191
192You need to edit /etc/modprobe.conf. Here's mine (edited to show the
193relevant details):
194
195 # Sound system
196 alias char-major-14-* wavefront
197 alias synth0 wavefront
198 alias mixer0 cs4232
199 alias audio0 cs4232
200 install wavefront /sbin/modprobe cs4232 && /sbin/modprobe -i wavefront && /sbin/modprobe opl3
201 options wavefront io=0x200 irq=9
202 options cs4232 synthirq=9 synthio=0x200 io=0x530 irq=5 dma=1 dma2=0
203 options opl3 io=0x388
204
205Things to note:
206
207 the wavefront options "io" and "irq" ***MUST*** match the "synthio"
208 and "synthirq" cs4232 options.
209
210 you can do without the opl3 module if you don't
211 want to use the OPL/[34] FM synth on the soundcard
212
213 the opl3 io parameter is conventionally not adjustable.
214 In theory, any not-in-use IO port address would work, but
215 just use 0x388 and stick with the crowd.
216
217**********************************************************************
2187) What about firmware ?
219**********************************************************************
220
221Turtle Beach have not given me permission to distribute their firmware
222for the ICS2115. However, if you have a WaveFront card, then you
223almost certainly have the firmware, and if not, its freely available
224on their website, at:
225
226 http://www.tbeach.com/tbs/downloads/scardsdown.htm#tropezplus
227
228The file is called WFOS2001.MOT (for the Tropez+).
229
230This driver, however, doesn't use the pure firmware as distributed,
231but instead relies on a somewhat processed form of it. You can
232generate this very easily. Following an idea from Andrew Veliath's
233Pinnacle driver, the following flex program will generate the
234processed version:
235
236---- cut here -------------------------
237%option main
238%%
239^S[28].*\r$ printf ("%c%.*s", yyleng-1,yyleng-1,yytext);
240<<EOF>> { fputc ('\0', stdout); return; }
241\n {}
242. {}
243---- cut here -------------------------
244
245To use it, put the above in file (say, ws.l) compile it like this:
246
247 shell> flex -ows.c ws.l
248 shell> cc -o ws ws.c
249
250and then use it like this:
251
252 ws < my-copy-of-the-oswf.mot-file > /etc/sound/wavefront.os
253
254If you put it somewhere else, you'll always have to use the wf_ospath
255module parameter (see below) or alter the source code.
256
257**********************************************************************
2587) How do I get it working ?
259**********************************************************************
260
261Optionally, you can reboot with the "new" kernel (even though the only
262changes have really been made to a module).
263
264Then, as root do:
265
266 modprobe wavefront
267
268You should get something like this in /var/log/messages:
269
270 WaveFront: firmware 1.20 already loaded.
271
272or
273
274 WaveFront: no response to firmware probe, assume raw.
275
276then:
277
278 WaveFront: waiting for memory configuration ...
279 WaveFront: hardware version 1.64
280 WaveFront: available DRAM 8191k
281 WaveFront: 332 samples used (266 real, 13 aliases, 53 multi), 180 empty
282 WaveFront: 128 programs slots in use
283 WaveFront: 256 patch slots filled, 142 in use
284
285The whole process takes about 16 seconds, the longest waits being
286after reporting the hardware version (during the firmware download),
287and after reporting program status (during patch status inquiry). Its
288shorter (about 10 secs) if the firmware is already loaded (i.e. only
289warm reboots since the last firmware load).
290
291The "available DRAM" line will vary depending on how much added RAM
292your card has. Mine has 8MB.
293
294To check basically functionality, use play(1) or splay(1) to send a
295.WAV or other audio file through the audio portion. Then use playmidi
296to play a General MIDI file. Try the "-D 0" to hear the
297difference between sending MIDI to the WaveFront and using the OPL/3,
298which is the default (I think ...). If you have an external synth(s)
299hooked to the soundcard, you can use "-e" to route to the
300external synth(s) (in theory, -D 1 should work as well, but I think
301there is a bug in playmidi which prevents this from doing what it
302should).
303
304**********************************************************************
3058) What are the module parameters ?
306**********************************************************************
307
308Its best to read wavefront.c for this, but here is a summary:
309
310integers:
311 wf_raw - if set, ignore apparent presence of firmware
312 loaded onto the ICS2115, reset the whole
313 board, and initialize it from scratch. (default = 0)
314
315 fx_raw - if set, always initialize the YSS225 processor
316 on the Tropez plus. (default = 1)
317
318 < The next 4 are basically for kernel hackers to allow
319 tweaking the driver for testing purposes. >
320
321 wait_usecs - loop timer used when waiting for
322 status conditions on the board.
323 The default is 150.
324
325 debug_default - debugging flags. See sound/wavefront.h
326 for WF_DEBUG_* values. Default is zero.
327 Setting this allows you to debug the
328 driver during module installation.
329strings:
330 ospath - path to get to the pre-processed OS firmware.
331 (default: /etc/sound/wavefront.os)
332
333**********************************************************************
3349) Who should I contact if I have problems?
335**********************************************************************
336
337Just me: Paul Barton-Davis <pbd@op.net>
338
339
diff --git a/Documentation/sound/oss/es1370 b/Documentation/sound/oss/es1370
deleted file mode 100644
index 7b38b1a096a3..000000000000
--- a/Documentation/sound/oss/es1370
+++ /dev/null
@@ -1,70 +0,0 @@
1/proc/sound, /dev/sndstat
2-------------------------
3
4/proc/sound and /dev/sndstat is not supported by the
5driver. To find out whether the driver succeeded loading,
6check the kernel log (dmesg).
7
8
9ALaw/uLaw sample formats
10------------------------
11
12This driver does not support the ALaw/uLaw sample formats.
13ALaw is the default mode when opening a sound device
14using OSS/Free. The reason for the lack of support is
15that the hardware does not support these formats, and adding
16conversion routines to the kernel would lead to very ugly
17code in the presence of the mmap interface to the driver.
18And since xquake uses mmap, mmap is considered important :-)
19and no sane application uses ALaw/uLaw these days anyway.
20In short, playing a Sun .au file as follows:
21
22cat my_file.au > /dev/dsp
23
24does not work. Instead, you may use the play script from
25Chris Bagwell's sox-12.14 package (available from the URL
26below) to play many different audio file formats.
27The script automatically determines the audio format
28and does do audio conversions if necessary.
29http://home.sprynet.com/sprynet/cbagwell/projects.html
30
31
32Blocking vs. nonblocking IO
33---------------------------
34
35Unlike OSS/Free this driver honours the O_NONBLOCK file flag
36not only during open, but also during read and write.
37This is an effort to make the sound driver interface more
38regular. Timidity has problems with this; a patch
39is available from http://www.ife.ee.ethz.ch/~sailer/linux/pciaudio.html.
40(Timidity patched will also run on OSS/Free).
41
42
43MIDI UART
44---------
45
46The driver supports a simple MIDI UART interface, with
47no ioctl's supported.
48
49
50MIDI synthesizer
51----------------
52
53This soundcard does not have any hardware MIDI synthesizer;
54MIDI synthesis has to be done in software. To allow this
55the driver/soundcard supports two PCM (/dev/dsp) interfaces.
56The second one goes to the mixer "synth" setting and supports
57only a limited set of sampling rates (44100, 22050, 11025, 5512).
58By setting lineout to 1 on the driver command line
59(eg. insmod es1370 lineout=1) it is even possible on some
60cards to convert the LINEIN jack into a second LINEOUT jack, thus
61making it possible to output four independent audio channels!
62
63There is a freely available software package that allows
64MIDI file playback on this soundcard called Timidity.
65See http://www.cgs.fi/~tt/timidity/.
66
67
68
69Thomas Sailer
70t.sailer@alumni.ethz.ch
diff --git a/Documentation/sound/oss/rme96xx b/Documentation/sound/oss/rme96xx
deleted file mode 100644
index 87d7b7b65fa1..000000000000
--- a/Documentation/sound/oss/rme96xx
+++ /dev/null
@@ -1,767 +0,0 @@
1Beta release of the rme96xx (driver for RME 96XX cards like the
2"Hammerfall" and the "Hammerfall light")
3
4Important: The driver module has to be installed on a freshly rebooted system,
5otherwise the driver might not be able to acquire its buffers.
6
7features:
8
9 - OSS programming interface (i.e. runs with standard OSS soundsoftware)
10 - OSS/Multichannel interface (OSS multichannel is done by just aquiring
11 more than 2 channels). The driver does not use more than one device
12 ( yet .. this feature may be implemented later )
13 - more than one RME card supported
14
15The driver uses a specific multichannel interface, which I will document
16when the driver gets stable. (take a look at the defines in rme96xx.h,
17which adds blocked multichannel formats i.e instead of
18lrlrlrlr --> llllrrrr etc.
19
20Use the "rmectrl" programm to look at the status of the card ..
21or use xrmectrl, a GUI interface for the ctrl program.
22
23What you can do with the rmectrl program is to set the stereo device for
24OSS emulation (e.g. if you use SPDIF out).
25
26You do:
27
28./ctrl offset 24 24
29
30which makes the stereo device use channels 25 and 26.
31
32Guenter Geiger <geiger@epy.co.at>
33
34copy the first part of the attached source code into rmectrl.c
35and the second part into xrmectrl (or get the program from
36http://gige.xdv.org/pages/soft/pages/rme)
37
38to compile: gcc -o rmectrl rmectrl.c
39------------------------------ snip ------------------------------------
40
41#include <stdio.h>
42#include <sys/types.h>
43#include <sys/stat.h>
44#include <sys/ioctl.h>
45#include <fcntl.h>
46#include <linux/soundcard.h>
47#include <math.h>
48#include <unistd.h>
49#include <stdlib.h>
50#include "rme96xx.h"
51
52/*
53 remctrl.c
54 (C) 2000 Guenter Geiger <geiger@debian.org>
55 HP20020201 - Heiko Purnhagen <purnhage@tnt.uni-hannover.de>
56*/
57
58/* # define DEVICE_NAME "/dev/mixer" */
59# define DEVICE_NAME "/dev/mixer1"
60
61
62void usage(void)
63{
64 fprintf(stderr,"usage: rmectrl [/dev/mixer<n>] [command [options]]\n\n");
65 fprintf(stderr,"where command is one of:\n");
66 fprintf(stderr," help show this help\n");
67 fprintf(stderr," status show status bits\n");
68 fprintf(stderr," control show control bits\n");
69 fprintf(stderr," mix show mixer/offset status\n");
70 fprintf(stderr," master <n> set sync master\n");
71 fprintf(stderr," pro <n> set spdif out pro\n");
72 fprintf(stderr," emphasis <n> set spdif out emphasis\n");
73 fprintf(stderr," dolby <n> set spdif out no audio\n");
74 fprintf(stderr," optout <n> set spdif out optical\n");
75 fprintf(stderr," wordclock <n> set sync wordclock\n");
76 fprintf(stderr," spdifin <n> set spdif in (0=optical,1=coax,2=intern)\n");
77 fprintf(stderr," syncref <n> set sync source (0=ADAT1,1=ADAT2,2=ADAT3,3=SPDIF)\n");
78 fprintf(stderr," adat1cd <n> set ADAT1 on internal CD\n");
79 fprintf(stderr," offset <devnr> <in> <out> set dev (0..3) offset (0..25)\n");
80 exit(-1);
81}
82
83
84int main(int argc, char* argv[])
85{
86 int cards;
87 int ret;
88 int i;
89 double ft;
90 int fd, fdwr;
91 int param,orig;
92 rme_status_t stat;
93 rme_ctrl_t ctrl;
94 char *device;
95 int argidx;
96
97 if (argc < 2)
98 usage();
99
100 if (*argv[1]=='/') {
101 device = argv[1];
102 argidx = 2;
103 }
104 else {
105 device = DEVICE_NAME;
106 argidx = 1;
107 }
108
109 fprintf(stdout,"mixer device %s\n",device);
110 if ((fd = open(device,O_RDONLY)) < 0) {
111 fprintf(stdout,"opening device failed\n");
112 exit(-1);
113 }
114
115 if ((fdwr = open(device,O_WRONLY)) < 0) {
116 fprintf(stdout,"opening device failed\n");
117 exit(-1);
118 }
119
120 if (argc < argidx+1)
121 usage();
122
123 if (!strcmp(argv[argidx],"help"))
124 usage();
125 if (!strcmp(argv[argidx],"-h"))
126 usage();
127 if (!strcmp(argv[argidx],"--help"))
128 usage();
129
130 if (!strcmp(argv[argidx],"status")) {
131 ioctl(fd,SOUND_MIXER_PRIVATE2,&stat);
132 fprintf(stdout,"stat.irq %d\n",stat.irq);
133 fprintf(stdout,"stat.lockmask %d\n",stat.lockmask);
134 fprintf(stdout,"stat.sr48 %d\n",stat.sr48);
135 fprintf(stdout,"stat.wclock %d\n",stat.wclock);
136 fprintf(stdout,"stat.bufpoint %d\n",stat.bufpoint);
137 fprintf(stdout,"stat.syncmask %d\n",stat.syncmask);
138 fprintf(stdout,"stat.doublespeed %d\n",stat.doublespeed);
139 fprintf(stdout,"stat.tc_busy %d\n",stat.tc_busy);
140 fprintf(stdout,"stat.tc_out %d\n",stat.tc_out);
141 fprintf(stdout,"stat.crystalrate %d (0=64k 3=96k 4=88.2k 5=48k 6=44.1k 7=32k)\n",stat.crystalrate);
142 fprintf(stdout,"stat.spdif_error %d\n",stat.spdif_error);
143 fprintf(stdout,"stat.bufid %d\n",stat.bufid);
144 fprintf(stdout,"stat.tc_valid %d\n",stat.tc_valid);
145 exit (0);
146 }
147
148 if (!strcmp(argv[argidx],"control")) {
149 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
150 fprintf(stdout,"ctrl.start %d\n",ctrl.start);
151 fprintf(stdout,"ctrl.latency %d (0=64 .. 7=8192)\n",ctrl.latency);
152 fprintf(stdout,"ctrl.master %d\n",ctrl.master);
153 fprintf(stdout,"ctrl.ie %d\n",ctrl.ie);
154 fprintf(stdout,"ctrl.sr48 %d\n",ctrl.sr48);
155 fprintf(stdout,"ctrl.spare %d\n",ctrl.spare);
156 fprintf(stdout,"ctrl.doublespeed %d\n",ctrl.doublespeed);
157 fprintf(stdout,"ctrl.pro %d\n",ctrl.pro);
158 fprintf(stdout,"ctrl.emphasis %d\n",ctrl.emphasis);
159 fprintf(stdout,"ctrl.dolby %d\n",ctrl.dolby);
160 fprintf(stdout,"ctrl.opt_out %d\n",ctrl.opt_out);
161 fprintf(stdout,"ctrl.wordclock %d\n",ctrl.wordclock);
162 fprintf(stdout,"ctrl.spdif_in %d (0=optical,1=coax,2=intern)\n",ctrl.spdif_in);
163 fprintf(stdout,"ctrl.sync_ref %d (0=ADAT1,1=ADAT2,2=ADAT3,3=SPDIF)\n",ctrl.sync_ref);
164 fprintf(stdout,"ctrl.spdif_reset %d\n",ctrl.spdif_reset);
165 fprintf(stdout,"ctrl.spdif_select %d\n",ctrl.spdif_select);
166 fprintf(stdout,"ctrl.spdif_clock %d\n",ctrl.spdif_clock);
167 fprintf(stdout,"ctrl.spdif_write %d\n",ctrl.spdif_write);
168 fprintf(stdout,"ctrl.adat1_cd %d\n",ctrl.adat1_cd);
169 exit (0);
170 }
171
172 if (!strcmp(argv[argidx],"mix")) {
173 rme_mixer mix;
174 int i;
175
176 for (i=0; i<4; i++) {
177 mix.devnr = i;
178 ioctl(fd,SOUND_MIXER_PRIVATE1,&mix);
179 if (mix.devnr == i) {
180 fprintf(stdout,"devnr %d\n",mix.devnr);
181 fprintf(stdout,"mix.i_offset %2d (0-25)\n",mix.i_offset);
182 fprintf(stdout,"mix.o_offset %2d (0-25)\n",mix.o_offset);
183 }
184 }
185 exit (0);
186 }
187
188/* the control flags */
189
190 if (argc < argidx+2)
191 usage();
192
193 if (!strcmp(argv[argidx],"master")) {
194 int val = atoi(argv[argidx+1]);
195 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
196 printf("master = %d\n",val);
197 ctrl.master = val;
198 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
199 exit (0);
200 }
201
202 if (!strcmp(argv[argidx],"pro")) {
203 int val = atoi(argv[argidx+1]);
204 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
205 printf("pro = %d\n",val);
206 ctrl.pro = val;
207 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
208 exit (0);
209 }
210
211 if (!strcmp(argv[argidx],"emphasis")) {
212 int val = atoi(argv[argidx+1]);
213 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
214 printf("emphasis = %d\n",val);
215 ctrl.emphasis = val;
216 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
217 exit (0);
218 }
219
220 if (!strcmp(argv[argidx],"dolby")) {
221 int val = atoi(argv[argidx+1]);
222 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
223 printf("dolby = %d\n",val);
224 ctrl.dolby = val;
225 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
226 exit (0);
227 }
228
229 if (!strcmp(argv[argidx],"optout")) {
230 int val = atoi(argv[argidx+1]);
231 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
232 printf("optout = %d\n",val);
233 ctrl.opt_out = val;
234 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
235 exit (0);
236 }
237
238 if (!strcmp(argv[argidx],"wordclock")) {
239 int val = atoi(argv[argidx+1]);
240 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
241 printf("wordclock = %d\n",val);
242 ctrl.wordclock = val;
243 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
244 exit (0);
245 }
246
247 if (!strcmp(argv[argidx],"spdifin")) {
248 int val = atoi(argv[argidx+1]);
249 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
250 printf("spdifin = %d\n",val);
251 ctrl.spdif_in = val;
252 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
253 exit (0);
254 }
255
256 if (!strcmp(argv[argidx],"syncref")) {
257 int val = atoi(argv[argidx+1]);
258 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
259 printf("syncref = %d\n",val);
260 ctrl.sync_ref = val;
261 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
262 exit (0);
263 }
264
265 if (!strcmp(argv[argidx],"adat1cd")) {
266 int val = atoi(argv[argidx+1]);
267 ioctl(fd,SOUND_MIXER_PRIVATE3,&ctrl);
268 printf("adat1cd = %d\n",val);
269 ctrl.adat1_cd = val;
270 ioctl(fdwr,SOUND_MIXER_PRIVATE3,&ctrl);
271 exit (0);
272 }
273
274/* setting offset */
275
276 if (argc < argidx+4)
277 usage();
278
279 if (!strcmp(argv[argidx],"offset")) {
280 rme_mixer mix;
281
282 mix.devnr = atoi(argv[argidx+1]);
283
284 mix.i_offset = atoi(argv[argidx+2]);
285 mix.o_offset = atoi(argv[argidx+3]);
286 ioctl(fdwr,SOUND_MIXER_PRIVATE1,&mix);
287 fprintf(stdout,"devnr %d\n",mix.devnr);
288 fprintf(stdout,"mix.i_offset to %d\n",mix.i_offset);
289 fprintf(stdout,"mix.o_offset to %d\n",mix.o_offset);
290 exit (0);
291 }
292
293 usage();
294 exit (0); /* to avoid warning */
295}
296
297
298---------------------------- <snip> --------------------------------
299#!/usr/bin/wish
300
301# xrmectrl
302# (C) 2000 Guenter Geiger <geiger@debian.org>
303# HP20020201 - Heiko Purnhagen <purnhage@tnt.uni-hannover.de>
304
305#set defaults "-relief ridged"
306set CTRLPROG "./rmectrl"
307if {$argc} {
308 set CTRLPROG "$CTRLPROG $argv"
309}
310puts "CTRLPROG $CTRLPROG"
311
312frame .butts
313button .butts.exit -text "Exit" -command "exit" -relief ridge
314#button .butts.state -text "State" -command "get_all"
315
316pack .butts.exit -side left
317pack .butts -side bottom
318
319
320#
321# STATUS
322#
323
324frame .status
325
326# Sampling Rate
327
328frame .status.sr
329label .status.sr.text -text "Sampling Rate" -justify left
330radiobutton .status.sr.441 -selectcolor red -text "44.1 kHz" -width 10 -anchor nw -variable srate -value 44100 -font times
331radiobutton .status.sr.480 -selectcolor red -text "48 kHz" -width 10 -anchor nw -variable srate -value 48000 -font times
332radiobutton .status.sr.882 -selectcolor red -text "88.2 kHz" -width 10 -anchor nw -variable srate -value 88200 -font times
333radiobutton .status.sr.960 -selectcolor red -text "96 kHz" -width 10 -anchor nw -variable srate -value 96000 -font times
334
335pack .status.sr.text .status.sr.441 .status.sr.480 .status.sr.882 .status.sr.960 -side top -padx 3
336
337# Lock
338
339frame .status.lock
340label .status.lock.text -text "Lock" -justify left
341checkbutton .status.lock.adat1 -selectcolor red -text "ADAT1" -anchor nw -width 10 -variable adatlock1 -font times
342checkbutton .status.lock.adat2 -selectcolor red -text "ADAT2" -anchor nw -width 10 -variable adatlock2 -font times
343checkbutton .status.lock.adat3 -selectcolor red -text "ADAT3" -anchor nw -width 10 -variable adatlock3 -font times
344
345pack .status.lock.text .status.lock.adat1 .status.lock.adat2 .status.lock.adat3 -side top -padx 3
346
347# Sync
348
349frame .status.sync
350label .status.sync.text -text "Sync" -justify left
351checkbutton .status.sync.adat1 -selectcolor red -text "ADAT1" -anchor nw -width 10 -variable adatsync1 -font times
352checkbutton .status.sync.adat2 -selectcolor red -text "ADAT2" -anchor nw -width 10 -variable adatsync2 -font times
353checkbutton .status.sync.adat3 -selectcolor red -text "ADAT3" -anchor nw -width 10 -variable adatsync3 -font times
354
355pack .status.sync.text .status.sync.adat1 .status.sync.adat2 .status.sync.adat3 -side top -padx 3
356
357# Timecode
358
359frame .status.tc
360label .status.tc.text -text "Timecode" -justify left
361checkbutton .status.tc.busy -selectcolor red -text "busy" -anchor nw -width 10 -variable tcbusy -font times
362checkbutton .status.tc.out -selectcolor red -text "out" -anchor nw -width 10 -variable tcout -font times
363checkbutton .status.tc.valid -selectcolor red -text "valid" -anchor nw -width 10 -variable tcvalid -font times
364
365pack .status.tc.text .status.tc.busy .status.tc.out .status.tc.valid -side top -padx 3
366
367# SPDIF In
368
369frame .status.spdif
370label .status.spdif.text -text "SPDIF In" -justify left
371label .status.spdif.sr -text "--.- kHz" -anchor n -width 10 -font times
372checkbutton .status.spdif.error -selectcolor red -text "Input Lock" -anchor nw -width 10 -variable spdiferr -font times
373
374pack .status.spdif.text .status.spdif.sr .status.spdif.error -side top -padx 3
375
376pack .status.sr .status.lock .status.sync .status.tc .status.spdif -side left -fill x -anchor n -expand 1
377
378
379#
380# CONTROL
381#
382
383proc setprof {} {
384 global CTRLPROG
385 global spprof
386 exec $CTRLPROG pro $spprof
387}
388
389proc setemph {} {
390 global CTRLPROG
391 global spemph
392 exec $CTRLPROG emphasis $spemph
393}
394
395proc setnoaud {} {
396 global CTRLPROG
397 global spnoaud
398 exec $CTRLPROG dolby $spnoaud
399}
400
401proc setoptical {} {
402 global CTRLPROG
403 global spoptical
404 exec $CTRLPROG optout $spoptical
405}
406
407proc setspdifin {} {
408 global CTRLPROG
409 global spdifin
410 exec $CTRLPROG spdifin [expr $spdifin - 1]
411}
412
413proc setsyncsource {} {
414 global CTRLPROG
415 global syncsource
416 exec $CTRLPROG syncref [expr $syncsource -1]
417}
418
419
420proc setmaster {} {
421 global CTRLPROG
422 global master
423 exec $CTRLPROG master $master
424}
425
426proc setwordclock {} {
427 global CTRLPROG
428 global wordclock
429 exec $CTRLPROG wordclock $wordclock
430}
431
432proc setadat1cd {} {
433 global CTRLPROG
434 global adat1cd
435 exec $CTRLPROG adat1cd $adat1cd
436}
437
438
439frame .control
440
441# SPDIF In & SPDIF Out
442
443
444frame .control.spdif
445
446frame .control.spdif.in
447label .control.spdif.in.text -text "SPDIF In" -justify left
448radiobutton .control.spdif.in.input1 -text "Optical" -anchor nw -width 13 -variable spdifin -value 1 -command setspdifin -selectcolor blue -font times
449radiobutton .control.spdif.in.input2 -text "Coaxial" -anchor nw -width 13 -variable spdifin -value 2 -command setspdifin -selectcolor blue -font times
450radiobutton .control.spdif.in.input3 -text "Intern " -anchor nw -width 13 -variable spdifin -command setspdifin -value 3 -selectcolor blue -font times
451
452checkbutton .control.spdif.in.adat1cd -text "ADAT1 Intern" -anchor nw -width 13 -variable adat1cd -command setadat1cd -selectcolor blue -font times
453
454pack .control.spdif.in.text .control.spdif.in.input1 .control.spdif.in.input2 .control.spdif.in.input3 .control.spdif.in.adat1cd
455
456label .control.spdif.space
457
458frame .control.spdif.out
459label .control.spdif.out.text -text "SPDIF Out" -justify left
460checkbutton .control.spdif.out.pro -text "Professional" -anchor nw -width 13 -variable spprof -command setprof -selectcolor blue -font times
461checkbutton .control.spdif.out.emphasis -text "Emphasis" -anchor nw -width 13 -variable spemph -command setemph -selectcolor blue -font times
462checkbutton .control.spdif.out.dolby -text "NoAudio" -anchor nw -width 13 -variable spnoaud -command setnoaud -selectcolor blue -font times
463checkbutton .control.spdif.out.optout -text "Optical Out" -anchor nw -width 13 -variable spoptical -command setoptical -selectcolor blue -font times
464
465pack .control.spdif.out.optout .control.spdif.out.dolby .control.spdif.out.emphasis .control.spdif.out.pro .control.spdif.out.text -side bottom
466
467pack .control.spdif.in .control.spdif.space .control.spdif.out -side top -fill y -padx 3 -expand 1
468
469# Sync Mode & Sync Source
470
471frame .control.sync
472frame .control.sync.mode
473label .control.sync.mode.text -text "Sync Mode" -justify left
474checkbutton .control.sync.mode.master -text "Master" -anchor nw -width 13 -variable master -command setmaster -selectcolor blue -font times
475checkbutton .control.sync.mode.wc -text "Wordclock" -anchor nw -width 13 -variable wordclock -command setwordclock -selectcolor blue -font times
476
477pack .control.sync.mode.text .control.sync.mode.master .control.sync.mode.wc
478
479label .control.sync.space
480
481frame .control.sync.src
482label .control.sync.src.text -text "Sync Source" -justify left
483radiobutton .control.sync.src.input1 -text "ADAT1" -anchor nw -width 13 -variable syncsource -value 1 -command setsyncsource -selectcolor blue -font times
484radiobutton .control.sync.src.input2 -text "ADAT2" -anchor nw -width 13 -variable syncsource -value 2 -command setsyncsource -selectcolor blue -font times
485radiobutton .control.sync.src.input3 -text "ADAT3" -anchor nw -width 13 -variable syncsource -command setsyncsource -value 3 -selectcolor blue -font times
486radiobutton .control.sync.src.input4 -text "SPDIF" -anchor nw -width 13 -variable syncsource -command setsyncsource -value 4 -selectcolor blue -font times
487
488pack .control.sync.src.input4 .control.sync.src.input3 .control.sync.src.input2 .control.sync.src.input1 .control.sync.src.text -side bottom
489
490pack .control.sync.mode .control.sync.space .control.sync.src -side top -fill y -padx 3 -expand 1
491
492label .control.space -text "" -width 10
493
494# Buffer Size
495
496frame .control.buf
497label .control.buf.text -text "Buffer Size (Latency)" -justify left
498radiobutton .control.buf.b1 -selectcolor red -text "64 (1.5 ms)" -width 13 -anchor nw -variable ssrate -value 1 -font times
499radiobutton .control.buf.b2 -selectcolor red -text "128 (3 ms)" -width 13 -anchor nw -variable ssrate -value 2 -font times
500radiobutton .control.buf.b3 -selectcolor red -text "256 (6 ms)" -width 13 -anchor nw -variable ssrate -value 3 -font times
501radiobutton .control.buf.b4 -selectcolor red -text "512 (12 ms)" -width 13 -anchor nw -variable ssrate -value 4 -font times
502radiobutton .control.buf.b5 -selectcolor red -text "1024 (23 ms)" -width 13 -anchor nw -variable ssrate -value 5 -font times
503radiobutton .control.buf.b6 -selectcolor red -text "2048 (46 ms)" -width 13 -anchor nw -variable ssrate -value 6 -font times
504radiobutton .control.buf.b7 -selectcolor red -text "4096 (93 ms)" -width 13 -anchor nw -variable ssrate -value 7 -font times
505radiobutton .control.buf.b8 -selectcolor red -text "8192 (186 ms)" -width 13 -anchor nw -variable ssrate -value 8 -font times
506
507pack .control.buf.text .control.buf.b1 .control.buf.b2 .control.buf.b3 .control.buf.b4 .control.buf.b5 .control.buf.b6 .control.buf.b7 .control.buf.b8 -side top -padx 3
508
509# Offset
510
511frame .control.offset
512
513frame .control.offset.in
514label .control.offset.in.text -text "Offset In" -justify left
515label .control.offset.in.off0 -text "dev\#0: -" -anchor nw -width 10 -font times
516label .control.offset.in.off1 -text "dev\#1: -" -anchor nw -width 10 -font times
517label .control.offset.in.off2 -text "dev\#2: -" -anchor nw -width 10 -font times
518label .control.offset.in.off3 -text "dev\#3: -" -anchor nw -width 10 -font times
519
520pack .control.offset.in.text .control.offset.in.off0 .control.offset.in.off1 .control.offset.in.off2 .control.offset.in.off3
521
522label .control.offset.space
523
524frame .control.offset.out
525label .control.offset.out.text -text "Offset Out" -justify left
526label .control.offset.out.off0 -text "dev\#0: -" -anchor nw -width 10 -font times
527label .control.offset.out.off1 -text "dev\#1: -" -anchor nw -width 10 -font times
528label .control.offset.out.off2 -text "dev\#2: -" -anchor nw -width 10 -font times
529label .control.offset.out.off3 -text "dev\#3: -" -anchor nw -width 10 -font times
530
531pack .control.offset.out.off3 .control.offset.out.off2 .control.offset.out.off1 .control.offset.out.off0 .control.offset.out.text -side bottom
532
533pack .control.offset.in .control.offset.space .control.offset.out -side top -fill y -padx 3 -expand 1
534
535
536pack .control.spdif .control.sync .control.space .control.buf .control.offset -side left -fill both -anchor n -expand 1
537
538
539label .statustext -text Status -justify center -relief ridge
540label .controltext -text Control -justify center -relief ridge
541
542label .statusspace
543label .controlspace
544
545pack .statustext .status .statusspace .controltext .control .controlspace -side top -anchor nw -fill both -expand 1
546
547
548proc get_bit {output sstr} {
549 set idx1 [string last [concat $sstr 1] $output]
550 set idx1 [expr $idx1 != -1]
551 return $idx1
552}
553
554proc get_val {output sstr} {
555 set val [string wordend $output [string last $sstr $output]]
556 set val [string range $output $val [expr $val+1]]
557 return $val
558}
559
560proc get_val2 {output sstr} {
561 set val [string wordend $output [string first $sstr $output]]
562 set val [string range $output $val [expr $val+2]]
563 return $val
564}
565
566proc get_control {} {
567 global spprof
568 global spemph
569 global spnoaud
570 global spoptical
571 global spdifin
572 global ssrate
573 global master
574 global wordclock
575 global syncsource
576 global CTRLPROG
577
578 set f [open "| $CTRLPROG control" r+]
579 set ooo [read $f 1000]
580 close $f
581# puts $ooo
582
583 set spprof [ get_bit $ooo "pro"]
584 set spemph [ get_bit $ooo "emphasis"]
585 set spnoaud [ get_bit $ooo "dolby"]
586 set spoptical [ get_bit $ooo "opt_out"]
587 set spdifin [ expr [ get_val $ooo "spdif_in"] + 1]
588 set ssrate [ expr [ get_val $ooo "latency"] + 1]
589 set master [ expr [ get_val $ooo "master"]]
590 set wordclock [ expr [ get_val $ooo "wordclock"]]
591 set syncsource [ expr [ get_val $ooo "sync_ref"] + 1]
592}
593
594proc get_status {} {
595 global srate
596 global ctrlcom
597
598 global adatlock1
599 global adatlock2
600 global adatlock3
601
602 global adatsync1
603 global adatsync2
604 global adatsync3
605
606 global tcbusy
607 global tcout
608 global tcvalid
609
610 global spdiferr
611 global crystal
612 global .status.spdif.text
613 global CTRLPROG
614
615
616 set f [open "| $CTRLPROG status" r+]
617 set ooo [read $f 1000]
618 close $f
619# puts $ooo
620
621# samplerate
622
623 set idx1 [string last "sr48 1" $ooo]
624 set idx2 [string last "doublespeed 1" $ooo]
625 if {$idx1 >= 0} {
626 set fact1 48000
627 } else {
628 set fact1 44100
629 }
630
631 if {$idx2 >= 0} {
632 set fact2 2
633 } else {
634 set fact2 1
635 }
636 set srate [expr $fact1 * $fact2]
637# ADAT lock
638
639 set val [get_val $ooo lockmask]
640 set adatlock1 0
641 set adatlock2 0
642 set adatlock3 0
643 if {[expr $val & 1]} {
644 set adatlock3 1
645 }
646 if {[expr $val & 2]} {
647 set adatlock2 1
648 }
649 if {[expr $val & 4]} {
650 set adatlock1 1
651 }
652
653# ADAT sync
654 set val [get_val $ooo syncmask]
655 set adatsync1 0
656 set adatsync2 0
657 set adatsync3 0
658
659 if {[expr $val & 1]} {
660 set adatsync3 1
661 }
662 if {[expr $val & 2]} {
663 set adatsync2 1
664 }
665 if {[expr $val & 4]} {
666 set adatsync1 1
667 }
668
669# TC busy
670
671 set tcbusy [get_bit $ooo "busy"]
672 set tcout [get_bit $ooo "out"]
673 set tcvalid [get_bit $ooo "valid"]
674 set spdiferr [expr [get_bit $ooo "spdif_error"] == 0]
675
676# 000=64kHz, 100=88.2kHz, 011=96kHz
677# 111=32kHz, 110=44.1kHz, 101=48kHz
678
679 set val [get_val $ooo crystalrate]
680
681 set crystal "--.- kHz"
682 if {$val == 0} {
683 set crystal "64 kHz"
684 }
685 if {$val == 4} {
686 set crystal "88.2 kHz"
687 }
688 if {$val == 3} {
689 set crystal "96 kHz"
690 }
691 if {$val == 7} {
692 set crystal "32 kHz"
693 }
694 if {$val == 6} {
695 set crystal "44.1 kHz"
696 }
697 if {$val == 5} {
698 set crystal "48 kHz"
699 }
700 .status.spdif.sr configure -text $crystal
701}
702
703proc get_offset {} {
704 global inoffset
705 global outoffset
706 global CTRLPROG
707
708 set f [open "| $CTRLPROG mix" r+]
709 set ooo [read $f 1000]
710 close $f
711# puts $ooo
712
713 if { [string match "*devnr*" $ooo] } {
714 set ooo [string range $ooo [string wordend $ooo [string first devnr $ooo]] end]
715 set val [get_val2 $ooo i_offset]
716 .control.offset.in.off0 configure -text "dev\#0: $val"
717 set val [get_val2 $ooo o_offset]
718 .control.offset.out.off0 configure -text "dev\#0: $val"
719 } else {
720 .control.offset.in.off0 configure -text "dev\#0: -"
721 .control.offset.out.off0 configure -text "dev\#0: -"
722 }
723 if { [string match "*devnr*" $ooo] } {
724 set ooo [string range $ooo [string wordend $ooo [string first devnr $ooo]] end]
725 set val [get_val2 $ooo i_offset]
726 .control.offset.in.off1 configure -text "dev\#1: $val"
727 set val [get_val2 $ooo o_offset]
728 .control.offset.out.off1 configure -text "dev\#1: $val"
729 } else {
730 .control.offset.in.off1 configure -text "dev\#1: -"
731 .control.offset.out.off1 configure -text "dev\#1: -"
732 }
733 if { [string match "*devnr*" $ooo] } {
734 set ooo [string range $ooo [string wordend $ooo [string first devnr $ooo]] end]
735 set val [get_val2 $ooo i_offset]
736 .control.offset.in.off2 configure -text "dev\#2: $val"
737 set val [get_val2 $ooo o_offset]
738 .control.offset.out.off2 configure -text "dev\#2: $val"
739 } else {
740 .control.offset.in.off2 configure -text "dev\#2: -"
741 .control.offset.out.off2 configure -text "dev\#2: -"
742 }
743 if { [string match "*devnr*" $ooo] } {
744 set ooo [string range $ooo [string wordend $ooo [string first devnr $ooo]] end]
745 set val [get_val2 $ooo i_offset]
746 .control.offset.in.off3 configure -text "dev\#3: $val"
747 set val [get_val2 $ooo o_offset]
748 .control.offset.out.off3 configure -text "dev\#3: $val"
749 } else {
750 .control.offset.in.off3 configure -text "dev\#3: -"
751 .control.offset.out.off3 configure -text "dev\#3: -"
752 }
753}
754
755
756proc get_all {} {
757get_status
758get_control
759get_offset
760}
761
762# main
763while {1} {
764 after 200
765 get_all
766 update
767}
diff --git a/Documentation/sound/oss/solo1 b/Documentation/sound/oss/solo1
deleted file mode 100644
index 95c4c83422b3..000000000000
--- a/Documentation/sound/oss/solo1
+++ /dev/null
@@ -1,70 +0,0 @@
1Recording
2---------
3
4Recording does not work on the author's card, but there
5is at least one report of it working on later silicon.
6The chip behaves differently than described in the data sheet,
7likely due to a chip bug. Working around this would require
8the help of ESS (for example by publishing an errata sheet),
9but ESS has not done so far.
10
11Also, the chip only supports 24 bit addresses for recording,
12which means it cannot work on some Alpha mainboards.
13
14
15/proc/sound, /dev/sndstat
16-------------------------
17
18/proc/sound and /dev/sndstat is not supported by the
19driver. To find out whether the driver succeeded loading,
20check the kernel log (dmesg).
21
22
23ALaw/uLaw sample formats
24------------------------
25
26This driver does not support the ALaw/uLaw sample formats.
27ALaw is the default mode when opening a sound device
28using OSS/Free. The reason for the lack of support is
29that the hardware does not support these formats, and adding
30conversion routines to the kernel would lead to very ugly
31code in the presence of the mmap interface to the driver.
32And since xquake uses mmap, mmap is considered important :-)
33and no sane application uses ALaw/uLaw these days anyway.
34In short, playing a Sun .au file as follows:
35
36cat my_file.au > /dev/dsp
37
38does not work. Instead, you may use the play script from
39Chris Bagwell's sox-12.14 package (or later, available from the URL
40below) to play many different audio file formats.
41The script automatically determines the audio format
42and does do audio conversions if necessary.
43http://home.sprynet.com/sprynet/cbagwell/projects.html
44
45
46Blocking vs. nonblocking IO
47---------------------------
48
49Unlike OSS/Free this driver honours the O_NONBLOCK file flag
50not only during open, but also during read and write.
51This is an effort to make the sound driver interface more
52regular. Timidity has problems with this; a patch
53is available from http://www.ife.ee.ethz.ch/~sailer/linux/pciaudio.html.
54(Timidity patched will also run on OSS/Free).
55
56
57MIDI UART
58---------
59
60The driver supports a simple MIDI UART interface, with
61no ioctl's supported.
62
63
64MIDI synthesizer
65----------------
66
67The card has an OPL compatible FM synthesizer.
68
69Thomas Sailer
70t.sailer@alumni.ethz.ch
diff --git a/Documentation/sound/oss/sonicvibes b/Documentation/sound/oss/sonicvibes
deleted file mode 100644
index 84dee2e0b37d..000000000000
--- a/Documentation/sound/oss/sonicvibes
+++ /dev/null
@@ -1,81 +0,0 @@
1/proc/sound, /dev/sndstat
2-------------------------
3
4/proc/sound and /dev/sndstat is not supported by the
5driver. To find out whether the driver succeeded loading,
6check the kernel log (dmesg).
7
8
9ALaw/uLaw sample formats
10------------------------
11
12This driver does not support the ALaw/uLaw sample formats.
13ALaw is the default mode when opening a sound device
14using OSS/Free. The reason for the lack of support is
15that the hardware does not support these formats, and adding
16conversion routines to the kernel would lead to very ugly
17code in the presence of the mmap interface to the driver.
18And since xquake uses mmap, mmap is considered important :-)
19and no sane application uses ALaw/uLaw these days anyway.
20In short, playing a Sun .au file as follows:
21
22cat my_file.au > /dev/dsp
23
24does not work. Instead, you may use the play script from
25Chris Bagwell's sox-12.14 package (available from the URL
26below) to play many different audio file formats.
27The script automatically determines the audio format
28and does do audio conversions if necessary.
29http://home.sprynet.com/sprynet/cbagwell/projects.html
30
31
32Blocking vs. nonblocking IO
33---------------------------
34
35Unlike OSS/Free this driver honours the O_NONBLOCK file flag
36not only during open, but also during read and write.
37This is an effort to make the sound driver interface more
38regular. Timidity has problems with this; a patch
39is available from http://www.ife.ee.ethz.ch/~sailer/linux/pciaudio.html.
40(Timidity patched will also run on OSS/Free).
41
42
43MIDI UART
44---------
45
46The driver supports a simple MIDI UART interface, with
47no ioctl's supported.
48
49
50MIDI synthesizer
51----------------
52
53The card both has an OPL compatible FM synthesizer as well as
54a wavetable synthesizer.
55
56I haven't managed so far to get the OPL synth running.
57
58Using the wavetable synthesizer requires allocating
591-4MB of physically contiguous memory, which isn't possible
60currently on Linux without ugly hacks like the bigphysarea
61patch. Therefore, the driver doesn't support wavetable
62synthesis.
63
64
65No support from S3
66------------------
67
68I do not get any support from S3. Therefore, the driver
69still has many problems. For example, although the manual
70states that the chip should be able to access the sample
71buffer anywhere in 32bit address space, I haven't managed to
72get it working with buffers above 16M. Therefore, the card
73has the same disadvantages as ISA soundcards.
74
75Given that the card is also very noisy, and if you haven't
76already bought it, you should strongly opt for one of the
77comparatively priced Ensoniq products.
78
79
80Thomas Sailer
81t.sailer@alumni.ethz.ch
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c6223d3ce70..17becb9b1a96 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -898,6 +898,16 @@ M: jack@suse.cz
898L: linux-kernel@vger.kernel.org 898L: linux-kernel@vger.kernel.org
899S: Maintained 899S: Maintained
900 900
901DISTRIBUTED LOCK MANAGER
902P: Patrick Caulfield
903M: pcaulfie@redhat.com
904P: David Teigland
905M: teigland@redhat.com
906L: cluster-devel@redhat.com
907W: http://sources.redhat.com/cluster/
908T: git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
909S: Supported
910
901DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER 911DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
902P: Tobias Ringstrom 912P: Tobias Ringstrom
903M: tori@unhappy.mine.nu 913M: tori@unhappy.mine.nu
@@ -977,6 +987,13 @@ L: ebtables-devel@lists.sourceforge.net
977W: http://ebtables.sourceforge.net/ 987W: http://ebtables.sourceforge.net/
978S: Maintained 988S: Maintained
979 989
990ECRYPT FILE SYSTEM
991P: Mike Halcrow, Phillip Hellewell
992M: mhalcrow@us.ibm.com, phillip@hellewell.homeip.net
993L: ecryptfs-devel@lists.sourceforge.net
994W: http://ecryptfs.sourceforge.net/
995S: Supported
996
980EDAC-CORE 997EDAC-CORE
981P: Doug Thompson 998P: Doug Thompson
982M: norsk5@xmission.com 999M: norsk5@xmission.com
@@ -1166,6 +1183,14 @@ M: khc@pm.waw.pl
1166W: http://www.kernel.org/pub/linux/utils/net/hdlc/ 1183W: http://www.kernel.org/pub/linux/utils/net/hdlc/
1167S: Maintained 1184S: Maintained
1168 1185
1186GFS2 FILE SYSTEM
1187P: Steven Whitehouse
1188M: swhiteho@redhat.com
1189L: cluster-devel@redhat.com
1190W: http://sources.redhat.com/cluster/
1191T: git kernel.org:/pub/scm/linux/kernel/git/steve/gfs-2.6.git
1192S: Supported
1193
1169GIGASET ISDN DRIVERS 1194GIGASET ISDN DRIVERS
1170P: Hansjoerg Lipp 1195P: Hansjoerg Lipp
1171M: hjlipp@web.de 1196M: hjlipp@web.de
@@ -1893,11 +1918,6 @@ M: rroesler@syskonnect.de
1893W: http://www.syskonnect.com 1918W: http://www.syskonnect.com
1894S: Supported 1919S: Supported
1895 1920
1896MAESTRO PCI SOUND DRIVERS
1897P: Zach Brown
1898M: zab@zabbo.net
1899S: Odd Fixes
1900
1901MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 1921MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
1902P: Michael Kerrisk 1922P: Michael Kerrisk
1903M: mtk-manpages@gmx.net 1923M: mtk-manpages@gmx.net
@@ -2434,6 +2454,19 @@ M: mporter@kernel.crashing.org
2434L: linux-kernel@vger.kernel.org 2454L: linux-kernel@vger.kernel.org
2435S: Maintained 2455S: Maintained
2436 2456
2457READ-COPY UPDATE (RCU)
2458P: Dipankar Sarma
2459M: dipankar@in.ibm.com
2460W: http://www.rdrop.com/users/paulmck/rclock/
2461L: linux-kernel@vger.kernel.org
2462S: Supported
2463
2464RCUTORTURE MODULE
2465P: Josh Triplett
2466M: josh@freedesktop.org
2467L: linux-kernel@vger.kernel.org
2468S: Maintained
2469
2437REAL TIME CLOCK DRIVER 2470REAL TIME CLOCK DRIVER
2438P: Paul Gortmaker 2471P: Paul Gortmaker
2439M: p_gortmaker@yahoo.com 2472M: p_gortmaker@yahoo.com
@@ -2854,6 +2887,11 @@ M: hlhung3i@gmail.com
2854W: http://tcp-lp-mod.sourceforge.net/ 2887W: http://tcp-lp-mod.sourceforge.net/
2855S: Maintained 2888S: Maintained
2856 2889
2890TI FLASH MEDIA INTERFACE DRIVER
2891P: Alex Dubov
2892M: oakad@yahoo.com
2893S: Maintained
2894
2857TI OMAP RANDOM NUMBER GENERATOR SUPPORT 2895TI OMAP RANDOM NUMBER GENERATOR SUPPORT
2858P: Deepak Saxena 2896P: Deepak Saxena
2859M: dsaxena@plexity.net 2897M: dsaxena@plexity.net
@@ -3377,12 +3415,6 @@ M: Henk.Vergonet@gmail.com
3377L: usbb2k-api-dev@nongnu.org 3415L: usbb2k-api-dev@nongnu.org
3378S: Maintained 3416S: Maintained
3379 3417
3380YMFPCI YAMAHA PCI SOUND (Use ALSA instead)
3381P: Pete Zaitcev
3382M: zaitcev@yahoo.com
3383L: linux-kernel@vger.kernel.org
3384S: Obsolete
3385
3386Z8530 DRIVER FOR AX.25 3418Z8530 DRIVER FOR AX.25
3387P: Joerg Reuter 3419P: Joerg Reuter
3388M: jreuter@yaina.de 3420M: jreuter@yaina.de
diff --git a/Makefile b/Makefile
index 4c6c5e32ef96..adb2c748e105 100644
--- a/Makefile
+++ b/Makefile
@@ -1321,7 +1321,7 @@ define xtags
1321 --langdef=kconfig \ 1321 --langdef=kconfig \
1322 --language-force=kconfig \ 1322 --language-force=kconfig \
1323 --regex-kconfig='/^[[:blank:]]*config[[:blank:]]+([[:alnum:]_]+)/\1/'; \ 1323 --regex-kconfig='/^[[:blank:]]*config[[:blank:]]+([[:alnum:]_]+)/\1/'; \
1324 $(all-defconfigs) | xargs $1 -a \ 1324 $(all-defconfigs) | xargs -r $1 -a \
1325 --langdef=dotconfig \ 1325 --langdef=dotconfig \
1326 --language-force=dotconfig \ 1326 --language-force=dotconfig \
1327 --regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'; \ 1327 --regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'; \
@@ -1329,7 +1329,7 @@ define xtags
1329 $(all-sources) | xargs $1 -a; \ 1329 $(all-sources) | xargs $1 -a; \
1330 $(all-kconfigs) | xargs $1 -a \ 1330 $(all-kconfigs) | xargs $1 -a \
1331 --regex='/^[ \t]*config[ \t]+\([a-zA-Z0-9_]+\)/\1/'; \ 1331 --regex='/^[ \t]*config[ \t]+\([a-zA-Z0-9_]+\)/\1/'; \
1332 $(all-defconfigs) | xargs $1 -a \ 1332 $(all-defconfigs) | xargs -r $1 -a \
1333 --regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'; \ 1333 --regex='/^#?[ \t]?\(CONFIG_[a-zA-Z0-9_]+\)/\1/'; \
1334 else \ 1334 else \
1335 $(all-sources) | xargs $1 -a; \ 1335 $(all-sources) | xargs $1 -a; \
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index 1aaea6ab8c46..92f79cdd9a48 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -62,8 +62,6 @@ static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return
62#include <mach_mpparse.h> 62#include <mach_mpparse.h>
63#endif /* CONFIG_X86_LOCAL_APIC */ 63#endif /* CONFIG_X86_LOCAL_APIC */
64 64
65static inline int gsi_irq_sharing(int gsi) { return gsi; }
66
67#endif /* X86 */ 65#endif /* X86 */
68 66
69#define BAD_MADT_ENTRY(entry, end) ( \ 67#define BAD_MADT_ENTRY(entry, end) ( \
@@ -468,12 +466,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
468 466
469int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 467int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
470{ 468{
471#ifdef CONFIG_X86_IO_APIC 469 *irq = gsi;
472 if (use_pci_vector() && !platform_legacy_irq(gsi))
473 *irq = IO_APIC_VECTOR(gsi);
474 else
475#endif
476 *irq = gsi_irq_sharing(gsi);
477 return 0; 470 return 0;
478} 471}
479 472
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index ea5f4e7958d8..d07ed31f11e3 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -34,35 +34,15 @@
34 * moves to arch independent land 34 * moves to arch independent land
35 */ 35 */
36 36
37DEFINE_SPINLOCK(i8259A_lock);
38
39static void end_8259A_irq (unsigned int irq)
40{
41 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
42 irq_desc[irq].action)
43 enable_8259A_irq(irq);
44}
45
46#define shutdown_8259A_irq disable_8259A_irq
47
48static int i8259A_auto_eoi; 37static int i8259A_auto_eoi;
49 38DEFINE_SPINLOCK(i8259A_lock);
50static void mask_and_ack_8259A(unsigned int); 39static void mask_and_ack_8259A(unsigned int);
51 40
52unsigned int startup_8259A_irq(unsigned int irq) 41static struct irq_chip i8259A_chip = {
53{ 42 .name = "XT-PIC",
54 enable_8259A_irq(irq); 43 .mask = disable_8259A_irq,
55 return 0; /* never anything pending */ 44 .unmask = enable_8259A_irq,
56} 45 .mask_ack = mask_and_ack_8259A,
57
58static struct hw_interrupt_type i8259A_irq_type = {
59 .typename = "XT-PIC",
60 .startup = startup_8259A_irq,
61 .shutdown = shutdown_8259A_irq,
62 .enable = enable_8259A_irq,
63 .disable = disable_8259A_irq,
64 .ack = mask_and_ack_8259A,
65 .end = end_8259A_irq,
66}; 46};
67 47
68/* 48/*
@@ -133,7 +113,7 @@ void make_8259A_irq(unsigned int irq)
133{ 113{
134 disable_irq_nosync(irq); 114 disable_irq_nosync(irq);
135 io_apic_irqs &= ~(1<<irq); 115 io_apic_irqs &= ~(1<<irq);
136 irq_desc[irq].chip = &i8259A_irq_type; 116 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
137 enable_irq(irq); 117 enable_irq(irq);
138} 118}
139 119
@@ -327,12 +307,12 @@ void init_8259A(int auto_eoi)
327 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ 307 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
328 if (auto_eoi) 308 if (auto_eoi)
329 /* 309 /*
330 * in AEOI mode we just have to mask the interrupt 310 * In AEOI mode we just have to mask the interrupt
331 * when acking. 311 * when acking.
332 */ 312 */
333 i8259A_irq_type.ack = disable_8259A_irq; 313 i8259A_chip.mask_ack = disable_8259A_irq;
334 else 314 else
335 i8259A_irq_type.ack = mask_and_ack_8259A; 315 i8259A_chip.mask_ack = mask_and_ack_8259A;
336 316
337 udelay(100); /* wait for 8259A to initialize */ 317 udelay(100); /* wait for 8259A to initialize */
338 318
@@ -389,12 +369,13 @@ void __init init_ISA_irqs (void)
389 /* 369 /*
390 * 16 old-style INTA-cycle interrupts: 370 * 16 old-style INTA-cycle interrupts:
391 */ 371 */
392 irq_desc[i].chip = &i8259A_irq_type; 372 set_irq_chip_and_handler(i, &i8259A_chip,
373 handle_level_irq);
393 } else { 374 } else {
394 /* 375 /*
395 * 'high' PCI IRQs filled in on demand 376 * 'high' PCI IRQs filled in on demand
396 */ 377 */
397 irq_desc[i].chip = &no_irq_type; 378 irq_desc[i].chip = &no_irq_chip;
398 } 379 }
399 } 380 }
400} 381}
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fd0df75cfbda..b7287fb499f3 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -31,6 +31,9 @@
31#include <linux/acpi.h> 31#include <linux/acpi.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/sysdev.h> 33#include <linux/sysdev.h>
34#include <linux/pci.h>
35#include <linux/msi.h>
36#include <linux/htirq.h>
34 37
35#include <asm/io.h> 38#include <asm/io.h>
36#include <asm/smp.h> 39#include <asm/smp.h>
@@ -38,6 +41,8 @@
38#include <asm/timer.h> 41#include <asm/timer.h>
39#include <asm/i8259.h> 42#include <asm/i8259.h>
40#include <asm/nmi.h> 43#include <asm/nmi.h>
44#include <asm/msidef.h>
45#include <asm/hypertransport.h>
41 46
42#include <mach_apic.h> 47#include <mach_apic.h>
43#include <mach_apicdef.h> 48#include <mach_apicdef.h>
@@ -86,15 +91,6 @@ static struct irq_pin_list {
86 int apic, pin, next; 91 int apic, pin, next;
87} irq_2_pin[PIN_MAP_SIZE]; 92} irq_2_pin[PIN_MAP_SIZE];
88 93
89int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
90#ifdef CONFIG_PCI_MSI
91#define vector_to_irq(vector) \
92 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
93#else
94#define vector_to_irq(vector) (vector)
95#endif
96
97
98union entry_union { 94union entry_union {
99 struct { u32 w1, w2; }; 95 struct { u32 w1, w2; };
100 struct IO_APIC_route_entry entry; 96 struct IO_APIC_route_entry entry;
@@ -280,7 +276,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
280 break; 276 break;
281 entry = irq_2_pin + entry->next; 277 entry = irq_2_pin + entry->next;
282 } 278 }
283 set_irq_info(irq, cpumask); 279 set_native_irq_info(irq, cpumask);
284 spin_unlock_irqrestore(&ioapic_lock, flags); 280 spin_unlock_irqrestore(&ioapic_lock, flags);
285} 281}
286 282
@@ -1181,46 +1177,45 @@ static inline int IO_APIC_irq_trigger(int irq)
1181/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ 1177/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
1182u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; 1178u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1183 1179
1184int assign_irq_vector(int irq) 1180static int __assign_irq_vector(int irq)
1185{ 1181{
1186 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 1182 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
1187 unsigned long flags;
1188 int vector; 1183 int vector;
1189 1184
1190 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); 1185 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
1191 1186
1192 spin_lock_irqsave(&vector_lock, flags); 1187 if (IO_APIC_VECTOR(irq) > 0)
1193
1194 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
1195 spin_unlock_irqrestore(&vector_lock, flags);
1196 return IO_APIC_VECTOR(irq); 1188 return IO_APIC_VECTOR(irq);
1197 } 1189
1198next:
1199 current_vector += 8; 1190 current_vector += 8;
1200 if (current_vector == SYSCALL_VECTOR) 1191 if (current_vector == SYSCALL_VECTOR)
1201 goto next; 1192 current_vector += 8;
1202 1193
1203 if (current_vector >= FIRST_SYSTEM_VECTOR) { 1194 if (current_vector >= FIRST_SYSTEM_VECTOR) {
1204 offset++; 1195 offset++;
1205 if (!(offset%8)) { 1196 if (!(offset % 8))
1206 spin_unlock_irqrestore(&vector_lock, flags);
1207 return -ENOSPC; 1197 return -ENOSPC;
1208 }
1209 current_vector = FIRST_DEVICE_VECTOR + offset; 1198 current_vector = FIRST_DEVICE_VECTOR + offset;
1210 } 1199 }
1211 1200
1212 vector = current_vector; 1201 vector = current_vector;
1213 vector_irq[vector] = irq; 1202 IO_APIC_VECTOR(irq) = vector;
1214 if (irq != AUTO_ASSIGN) 1203
1215 IO_APIC_VECTOR(irq) = vector; 1204 return vector;
1205}
1206
1207static int assign_irq_vector(int irq)
1208{
1209 unsigned long flags;
1210 int vector;
1216 1211
1212 spin_lock_irqsave(&vector_lock, flags);
1213 vector = __assign_irq_vector(irq);
1217 spin_unlock_irqrestore(&vector_lock, flags); 1214 spin_unlock_irqrestore(&vector_lock, flags);
1218 1215
1219 return vector; 1216 return vector;
1220} 1217}
1221 1218static struct irq_chip ioapic_chip;
1222static struct hw_interrupt_type ioapic_level_type;
1223static struct hw_interrupt_type ioapic_edge_type;
1224 1219
1225#define IOAPIC_AUTO -1 1220#define IOAPIC_AUTO -1
1226#define IOAPIC_EDGE 0 1221#define IOAPIC_EDGE 0
@@ -1228,16 +1223,14 @@ static struct hw_interrupt_type ioapic_edge_type;
1228 1223
1229static void ioapic_register_intr(int irq, int vector, unsigned long trigger) 1224static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
1230{ 1225{
1231 unsigned idx;
1232
1233 idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
1234
1235 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1226 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1236 trigger == IOAPIC_LEVEL) 1227 trigger == IOAPIC_LEVEL)
1237 irq_desc[idx].chip = &ioapic_level_type; 1228 set_irq_chip_and_handler(irq, &ioapic_chip,
1229 handle_fasteoi_irq);
1238 else 1230 else
1239 irq_desc[idx].chip = &ioapic_edge_type; 1231 set_irq_chip_and_handler(irq, &ioapic_chip,
1240 set_intr_gate(vector, interrupt[idx]); 1232 handle_edge_irq);
1233 set_intr_gate(vector, interrupt[irq]);
1241} 1234}
1242 1235
1243static void __init setup_IO_APIC_irqs(void) 1236static void __init setup_IO_APIC_irqs(void)
@@ -1346,7 +1339,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
1346 * The timer IRQ doesn't have to know that behind the 1339 * The timer IRQ doesn't have to know that behind the
1347 * scene we have a 8259A-master in AEOI mode ... 1340 * scene we have a 8259A-master in AEOI mode ...
1348 */ 1341 */
1349 irq_desc[0].chip = &ioapic_edge_type; 1342 irq_desc[0].chip = &ioapic_chip;
1343 set_irq_handler(0, handle_edge_irq);
1350 1344
1351 /* 1345 /*
1352 * Add it to the IO-APIC irq-routing table: 1346 * Add it to the IO-APIC irq-routing table:
@@ -1481,17 +1475,12 @@ void __init print_IO_APIC(void)
1481 ); 1475 );
1482 } 1476 }
1483 } 1477 }
1484 if (use_pci_vector())
1485 printk(KERN_INFO "Using vector-based indexing\n");
1486 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1478 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1487 for (i = 0; i < NR_IRQS; i++) { 1479 for (i = 0; i < NR_IRQS; i++) {
1488 struct irq_pin_list *entry = irq_2_pin + i; 1480 struct irq_pin_list *entry = irq_2_pin + i;
1489 if (entry->pin < 0) 1481 if (entry->pin < 0)
1490 continue; 1482 continue;
1491 if (use_pci_vector() && !platform_legacy_irq(i)) 1483 printk(KERN_DEBUG "IRQ%d ", i);
1492 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
1493 else
1494 printk(KERN_DEBUG "IRQ%d ", i);
1495 for (;;) { 1484 for (;;) {
1496 printk("-> %d:%d", entry->apic, entry->pin); 1485 printk("-> %d:%d", entry->apic, entry->pin);
1497 if (!entry->next) 1486 if (!entry->next)
@@ -1918,6 +1907,8 @@ static int __init timer_irq_works(void)
1918 */ 1907 */
1919 1908
1920/* 1909/*
1910 * Startup quirk:
1911 *
1921 * Starting up a edge-triggered IO-APIC interrupt is 1912 * Starting up a edge-triggered IO-APIC interrupt is
1922 * nasty - we need to make sure that we get the edge. 1913 * nasty - we need to make sure that we get the edge.
1923 * If it is already asserted for some reason, we need 1914 * If it is already asserted for some reason, we need
@@ -1925,8 +1916,10 @@ static int __init timer_irq_works(void)
1925 * 1916 *
1926 * This is not complete - we should be able to fake 1917 * This is not complete - we should be able to fake
1927 * an edge even if it isn't on the 8259A... 1918 * an edge even if it isn't on the 8259A...
1919 *
1920 * (We do this for level-triggered IRQs too - it cannot hurt.)
1928 */ 1921 */
1929static unsigned int startup_edge_ioapic_irq(unsigned int irq) 1922static unsigned int startup_ioapic_irq(unsigned int irq)
1930{ 1923{
1931 int was_pending = 0; 1924 int was_pending = 0;
1932 unsigned long flags; 1925 unsigned long flags;
@@ -1943,47 +1936,18 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1943 return was_pending; 1936 return was_pending;
1944} 1937}
1945 1938
1946/* 1939static void ack_ioapic_irq(unsigned int irq)
1947 * Once we have recorded IRQ_PENDING already, we can mask the
1948 * interrupt for real. This prevents IRQ storms from unhandled
1949 * devices.
1950 */
1951static void ack_edge_ioapic_irq(unsigned int irq)
1952{ 1940{
1953 move_irq(irq); 1941 move_native_irq(irq);
1954 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1955 == (IRQ_PENDING | IRQ_DISABLED))
1956 mask_IO_APIC_irq(irq);
1957 ack_APIC_irq(); 1942 ack_APIC_irq();
1958} 1943}
1959 1944
1960/* 1945static void ack_ioapic_quirk_irq(unsigned int irq)
1961 * Level triggered interrupts can just be masked,
1962 * and shutting down and starting up the interrupt
1963 * is the same as enabling and disabling them -- except
1964 * with a startup need to return a "was pending" value.
1965 *
1966 * Level triggered interrupts are special because we
1967 * do not touch any IO-APIC register while handling
1968 * them. We ack the APIC in the end-IRQ handler, not
1969 * in the start-IRQ-handler. Protection against reentrance
1970 * from the same interrupt is still provided, both by the
1971 * generic IRQ layer and by the fact that an unacked local
1972 * APIC does not accept IRQs.
1973 */
1974static unsigned int startup_level_ioapic_irq (unsigned int irq)
1975{
1976 unmask_IO_APIC_irq(irq);
1977
1978 return 0; /* don't check for pending */
1979}
1980
1981static void end_level_ioapic_irq (unsigned int irq)
1982{ 1946{
1983 unsigned long v; 1947 unsigned long v;
1984 int i; 1948 int i;
1985 1949
1986 move_irq(irq); 1950 move_native_irq(irq);
1987/* 1951/*
1988 * It appears there is an erratum which affects at least version 0x11 1952 * It appears there is an erratum which affects at least version 0x11
1989 * of I/O APIC (that's the 82093AA and cores integrated into various 1953 * of I/O APIC (that's the 82093AA and cores integrated into various
@@ -2018,105 +1982,26 @@ static void end_level_ioapic_irq (unsigned int irq)
2018 } 1982 }
2019} 1983}
2020 1984
2021#ifdef CONFIG_PCI_MSI 1985static int ioapic_retrigger_irq(unsigned int irq)
2022static unsigned int startup_edge_ioapic_vector(unsigned int vector)
2023{
2024 int irq = vector_to_irq(vector);
2025
2026 return startup_edge_ioapic_irq(irq);
2027}
2028
2029static void ack_edge_ioapic_vector(unsigned int vector)
2030{
2031 int irq = vector_to_irq(vector);
2032
2033 move_native_irq(vector);
2034 ack_edge_ioapic_irq(irq);
2035}
2036
2037static unsigned int startup_level_ioapic_vector (unsigned int vector)
2038{
2039 int irq = vector_to_irq(vector);
2040
2041 return startup_level_ioapic_irq (irq);
2042}
2043
2044static void end_level_ioapic_vector (unsigned int vector)
2045{
2046 int irq = vector_to_irq(vector);
2047
2048 move_native_irq(vector);
2049 end_level_ioapic_irq(irq);
2050}
2051
2052static void mask_IO_APIC_vector (unsigned int vector)
2053{
2054 int irq = vector_to_irq(vector);
2055
2056 mask_IO_APIC_irq(irq);
2057}
2058
2059static void unmask_IO_APIC_vector (unsigned int vector)
2060{
2061 int irq = vector_to_irq(vector);
2062
2063 unmask_IO_APIC_irq(irq);
2064}
2065
2066#ifdef CONFIG_SMP
2067static void set_ioapic_affinity_vector (unsigned int vector,
2068 cpumask_t cpu_mask)
2069{
2070 int irq = vector_to_irq(vector);
2071
2072 set_native_irq_info(vector, cpu_mask);
2073 set_ioapic_affinity_irq(irq, cpu_mask);
2074}
2075#endif
2076#endif
2077
2078static int ioapic_retrigger(unsigned int irq)
2079{ 1986{
2080 send_IPI_self(IO_APIC_VECTOR(irq)); 1987 send_IPI_self(IO_APIC_VECTOR(irq));
2081 1988
2082 return 1; 1989 return 1;
2083} 1990}
2084 1991
2085/* 1992static struct irq_chip ioapic_chip __read_mostly = {
2086 * Level and edge triggered IO-APIC interrupts need different handling, 1993 .name = "IO-APIC",
2087 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 1994 .startup = startup_ioapic_irq,
2088 * handled with the level-triggered descriptor, but that one has slightly 1995 .mask = mask_IO_APIC_irq,
2089 * more overhead. Level-triggered interrupts cannot be handled with the 1996 .unmask = unmask_IO_APIC_irq,
2090 * edge-triggered handler, without risking IRQ storms and other ugly 1997 .ack = ack_ioapic_irq,
2091 * races. 1998 .eoi = ack_ioapic_quirk_irq,
2092 */
2093static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
2094 .typename = "IO-APIC-edge",
2095 .startup = startup_edge_ioapic,
2096 .shutdown = shutdown_edge_ioapic,
2097 .enable = enable_edge_ioapic,
2098 .disable = disable_edge_ioapic,
2099 .ack = ack_edge_ioapic,
2100 .end = end_edge_ioapic,
2101#ifdef CONFIG_SMP 1999#ifdef CONFIG_SMP
2102 .set_affinity = set_ioapic_affinity, 2000 .set_affinity = set_ioapic_affinity_irq,
2103#endif 2001#endif
2104 .retrigger = ioapic_retrigger, 2002 .retrigger = ioapic_retrigger_irq,
2105}; 2003};
2106 2004
2107static struct hw_interrupt_type ioapic_level_type __read_mostly = {
2108 .typename = "IO-APIC-level",
2109 .startup = startup_level_ioapic,
2110 .shutdown = shutdown_level_ioapic,
2111 .enable = enable_level_ioapic,
2112 .disable = disable_level_ioapic,
2113 .ack = mask_and_ack_level_ioapic,
2114 .end = end_level_ioapic,
2115#ifdef CONFIG_SMP
2116 .set_affinity = set_ioapic_affinity,
2117#endif
2118 .retrigger = ioapic_retrigger,
2119};
2120 2005
2121static inline void init_IO_APIC_traps(void) 2006static inline void init_IO_APIC_traps(void)
2122{ 2007{
@@ -2135,11 +2020,6 @@ static inline void init_IO_APIC_traps(void)
2135 */ 2020 */
2136 for (irq = 0; irq < NR_IRQS ; irq++) { 2021 for (irq = 0; irq < NR_IRQS ; irq++) {
2137 int tmp = irq; 2022 int tmp = irq;
2138 if (use_pci_vector()) {
2139 if (!platform_legacy_irq(tmp))
2140 if ((tmp = vector_to_irq(tmp)) == -1)
2141 continue;
2142 }
2143 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) { 2023 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
2144 /* 2024 /*
2145 * Hmm.. We don't have an entry for this, 2025 * Hmm.. We don't have an entry for this,
@@ -2150,20 +2030,21 @@ static inline void init_IO_APIC_traps(void)
2150 make_8259A_irq(irq); 2030 make_8259A_irq(irq);
2151 else 2031 else
2152 /* Strange. Oh, well.. */ 2032 /* Strange. Oh, well.. */
2153 irq_desc[irq].chip = &no_irq_type; 2033 irq_desc[irq].chip = &no_irq_chip;
2154 } 2034 }
2155 } 2035 }
2156} 2036}
2157 2037
2158static void enable_lapic_irq (unsigned int irq) 2038/*
2159{ 2039 * The local APIC irq-chip implementation:
2160 unsigned long v; 2040 */
2161 2041
2162 v = apic_read(APIC_LVT0); 2042static void ack_apic(unsigned int irq)
2163 apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED); 2043{
2044 ack_APIC_irq();
2164} 2045}
2165 2046
2166static void disable_lapic_irq (unsigned int irq) 2047static void mask_lapic_irq (unsigned int irq)
2167{ 2048{
2168 unsigned long v; 2049 unsigned long v;
2169 2050
@@ -2171,21 +2052,19 @@ static void disable_lapic_irq (unsigned int irq)
2171 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED); 2052 apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
2172} 2053}
2173 2054
2174static void ack_lapic_irq (unsigned int irq) 2055static void unmask_lapic_irq (unsigned int irq)
2175{ 2056{
2176 ack_APIC_irq(); 2057 unsigned long v;
2177}
2178 2058
2179static void end_lapic_irq (unsigned int i) { /* nothing */ } 2059 v = apic_read(APIC_LVT0);
2060 apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
2061}
2180 2062
2181static struct hw_interrupt_type lapic_irq_type __read_mostly = { 2063static struct irq_chip lapic_chip __read_mostly = {
2182 .typename = "local-APIC-edge", 2064 .name = "local-APIC-edge",
2183 .startup = NULL, /* startup_irq() not used for IRQ0 */ 2065 .mask = mask_lapic_irq,
2184 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ 2066 .unmask = unmask_lapic_irq,
2185 .enable = enable_lapic_irq, 2067 .eoi = ack_apic,
2186 .disable = disable_lapic_irq,
2187 .ack = ack_lapic_irq,
2188 .end = end_lapic_irq
2189}; 2068};
2190 2069
2191static void setup_nmi (void) 2070static void setup_nmi (void)
@@ -2356,7 +2235,7 @@ static inline void check_timer(void)
2356 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ..."); 2235 printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
2357 2236
2358 disable_8259A_irq(0); 2237 disable_8259A_irq(0);
2359 irq_desc[0].chip = &lapic_irq_type; 2238 set_irq_chip_and_handler(0, &lapic_chip, handle_fasteoi_irq);
2360 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */ 2239 apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
2361 enable_8259A_irq(0); 2240 enable_8259A_irq(0);
2362 2241
@@ -2531,6 +2410,238 @@ static int __init ioapic_init_sysfs(void)
2531 2410
2532device_initcall(ioapic_init_sysfs); 2411device_initcall(ioapic_init_sysfs);
2533 2412
2413/*
2414 * Dynamic irq allocate and deallocation
2415 */
2416int create_irq(void)
2417{
2418 /* Allocate an unused irq */
2419 int irq, new, vector;
2420 unsigned long flags;
2421
2422 irq = -ENOSPC;
2423 spin_lock_irqsave(&vector_lock, flags);
2424 for (new = (NR_IRQS - 1); new >= 0; new--) {
2425 if (platform_legacy_irq(new))
2426 continue;
2427 if (irq_vector[new] != 0)
2428 continue;
2429 vector = __assign_irq_vector(new);
2430 if (likely(vector > 0))
2431 irq = new;
2432 break;
2433 }
2434 spin_unlock_irqrestore(&vector_lock, flags);
2435
2436 if (irq >= 0) {
2437 set_intr_gate(vector, interrupt[irq]);
2438 dynamic_irq_init(irq);
2439 }
2440 return irq;
2441}
2442
2443void destroy_irq(unsigned int irq)
2444{
2445 unsigned long flags;
2446
2447 dynamic_irq_cleanup(irq);
2448
2449 spin_lock_irqsave(&vector_lock, flags);
2450 irq_vector[irq] = 0;
2451 spin_unlock_irqrestore(&vector_lock, flags);
2452}
2453
2454/*
2455 * MSI mesage composition
2456 */
2457#ifdef CONFIG_PCI_MSI
2458static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2459{
2460 int vector;
2461 unsigned dest;
2462
2463 vector = assign_irq_vector(irq);
2464 if (vector >= 0) {
2465 dest = cpu_mask_to_apicid(TARGET_CPUS);
2466
2467 msg->address_hi = MSI_ADDR_BASE_HI;
2468 msg->address_lo =
2469 MSI_ADDR_BASE_LO |
2470 ((INT_DEST_MODE == 0) ?
2471 MSI_ADDR_DEST_MODE_PHYSICAL:
2472 MSI_ADDR_DEST_MODE_LOGICAL) |
2473 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2474 MSI_ADDR_REDIRECTION_CPU:
2475 MSI_ADDR_REDIRECTION_LOWPRI) |
2476 MSI_ADDR_DEST_ID(dest);
2477
2478 msg->data =
2479 MSI_DATA_TRIGGER_EDGE |
2480 MSI_DATA_LEVEL_ASSERT |
2481 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2482 MSI_DATA_DELIVERY_FIXED:
2483 MSI_DATA_DELIVERY_LOWPRI) |
2484 MSI_DATA_VECTOR(vector);
2485 }
2486 return vector;
2487}
2488
2489#ifdef CONFIG_SMP
2490static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2491{
2492 struct msi_msg msg;
2493 unsigned int dest;
2494 cpumask_t tmp;
2495 int vector;
2496
2497 cpus_and(tmp, mask, cpu_online_map);
2498 if (cpus_empty(tmp))
2499 tmp = TARGET_CPUS;
2500
2501 vector = assign_irq_vector(irq);
2502 if (vector < 0)
2503 return;
2504
2505 dest = cpu_mask_to_apicid(mask);
2506
2507 read_msi_msg(irq, &msg);
2508
2509 msg.data &= ~MSI_DATA_VECTOR_MASK;
2510 msg.data |= MSI_DATA_VECTOR(vector);
2511 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
2512 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
2513
2514 write_msi_msg(irq, &msg);
2515 set_native_irq_info(irq, mask);
2516}
2517#endif /* CONFIG_SMP */
2518
2519/*
2520 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2521 * which implement the MSI or MSI-X Capability Structure.
2522 */
2523static struct irq_chip msi_chip = {
2524 .name = "PCI-MSI",
2525 .unmask = unmask_msi_irq,
2526 .mask = mask_msi_irq,
2527 .ack = ack_ioapic_irq,
2528#ifdef CONFIG_SMP
2529 .set_affinity = set_msi_irq_affinity,
2530#endif
2531 .retrigger = ioapic_retrigger_irq,
2532};
2533
2534int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
2535{
2536 struct msi_msg msg;
2537 int ret;
2538 ret = msi_compose_msg(dev, irq, &msg);
2539 if (ret < 0)
2540 return ret;
2541
2542 write_msi_msg(irq, &msg);
2543
2544 set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
2545
2546 return 0;
2547}
2548
2549void arch_teardown_msi_irq(unsigned int irq)
2550{
2551 return;
2552}
2553
2554#endif /* CONFIG_PCI_MSI */
2555
2556/*
2557 * Hypertransport interrupt support
2558 */
2559#ifdef CONFIG_HT_IRQ
2560
2561#ifdef CONFIG_SMP
2562
2563static void target_ht_irq(unsigned int irq, unsigned int dest)
2564{
2565 u32 low, high;
2566 low = read_ht_irq_low(irq);
2567 high = read_ht_irq_high(irq);
2568
2569 low &= ~(HT_IRQ_LOW_DEST_ID_MASK);
2570 high &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
2571
2572 low |= HT_IRQ_LOW_DEST_ID(dest);
2573 high |= HT_IRQ_HIGH_DEST_ID(dest);
2574
2575 write_ht_irq_low(irq, low);
2576 write_ht_irq_high(irq, high);
2577}
2578
2579static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
2580{
2581 unsigned int dest;
2582 cpumask_t tmp;
2583
2584 cpus_and(tmp, mask, cpu_online_map);
2585 if (cpus_empty(tmp))
2586 tmp = TARGET_CPUS;
2587
2588 cpus_and(mask, tmp, CPU_MASK_ALL);
2589
2590 dest = cpu_mask_to_apicid(mask);
2591
2592 target_ht_irq(irq, dest);
2593 set_native_irq_info(irq, mask);
2594}
2595#endif
2596
2597static struct hw_interrupt_type ht_irq_chip = {
2598 .name = "PCI-HT",
2599 .mask = mask_ht_irq,
2600 .unmask = unmask_ht_irq,
2601 .ack = ack_ioapic_irq,
2602#ifdef CONFIG_SMP
2603 .set_affinity = set_ht_irq_affinity,
2604#endif
2605 .retrigger = ioapic_retrigger_irq,
2606};
2607
2608int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
2609{
2610 int vector;
2611
2612 vector = assign_irq_vector(irq);
2613 if (vector >= 0) {
2614 u32 low, high;
2615 unsigned dest;
2616 cpumask_t tmp;
2617
2618 cpus_clear(tmp);
2619 cpu_set(vector >> 8, tmp);
2620 dest = cpu_mask_to_apicid(tmp);
2621
2622 high = HT_IRQ_HIGH_DEST_ID(dest);
2623
2624 low = HT_IRQ_LOW_BASE |
2625 HT_IRQ_LOW_DEST_ID(dest) |
2626 HT_IRQ_LOW_VECTOR(vector) |
2627 ((INT_DEST_MODE == 0) ?
2628 HT_IRQ_LOW_DM_PHYSICAL :
2629 HT_IRQ_LOW_DM_LOGICAL) |
2630 HT_IRQ_LOW_RQEOI_EDGE |
2631 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
2632 HT_IRQ_LOW_MT_FIXED :
2633 HT_IRQ_LOW_MT_ARBITRATED) |
2634 HT_IRQ_LOW_IRQ_MASKED;
2635
2636 write_ht_irq_low(irq, low);
2637 write_ht_irq_high(irq, high);
2638
2639 set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
2640 }
2641 return vector;
2642}
2643#endif /* CONFIG_HT_IRQ */
2644
2534/* -------------------------------------------------------------------------- 2645/* --------------------------------------------------------------------------
2535 ACPI-based IOAPIC Configuration 2646 ACPI-based IOAPIC Configuration
2536 -------------------------------------------------------------------------- */ 2647 -------------------------------------------------------------------------- */
@@ -2684,7 +2795,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2684 2795
2685 ioapic_write_entry(ioapic, pin, entry); 2796 ioapic_write_entry(ioapic, pin, entry);
2686 spin_lock_irqsave(&ioapic_lock, flags); 2797 spin_lock_irqsave(&ioapic_lock, flags);
2687 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS); 2798 set_native_irq_info(irq, TARGET_CPUS);
2688 spin_unlock_irqrestore(&ioapic_lock, flags); 2799 spin_unlock_irqrestore(&ioapic_lock, flags);
2689 2800
2690 return 0; 2801 return 0;
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 5fe547cd8f9f..3dd2e180151b 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -55,6 +55,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
55{ 55{
56 /* high bit used in ret_from_ code */ 56 /* high bit used in ret_from_ code */
57 int irq = ~regs->orig_eax; 57 int irq = ~regs->orig_eax;
58 struct irq_desc *desc = irq_desc + irq;
58#ifdef CONFIG_4KSTACKS 59#ifdef CONFIG_4KSTACKS
59 union irq_ctx *curctx, *irqctx; 60 union irq_ctx *curctx, *irqctx;
60 u32 *isp; 61 u32 *isp;
@@ -94,7 +95,7 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
94 * current stack (which is the irq stack already after all) 95 * current stack (which is the irq stack already after all)
95 */ 96 */
96 if (curctx != irqctx) { 97 if (curctx != irqctx) {
97 int arg1, arg2, ebx; 98 int arg1, arg2, arg3, ebx;
98 99
99 /* build the stack frame on the IRQ stack */ 100 /* build the stack frame on the IRQ stack */
100 isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); 101 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
@@ -110,16 +111,17 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
110 (curctx->tinfo.preempt_count & SOFTIRQ_MASK); 111 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
111 112
112 asm volatile( 113 asm volatile(
113 " xchgl %%ebx,%%esp \n" 114 " xchgl %%ebx,%%esp \n"
114 " call __do_IRQ \n" 115 " call *%%edi \n"
115 " movl %%ebx,%%esp \n" 116 " movl %%ebx,%%esp \n"
116 : "=a" (arg1), "=d" (arg2), "=b" (ebx) 117 : "=a" (arg1), "=d" (arg2), "=c" (arg3), "=b" (ebx)
117 : "0" (irq), "1" (regs), "2" (isp) 118 : "0" (irq), "1" (desc), "2" (regs), "3" (isp),
118 : "memory", "cc", "ecx" 119 "D" (desc->handle_irq)
120 : "memory", "cc"
119 ); 121 );
120 } else 122 } else
121#endif 123#endif
122 __do_IRQ(irq, regs); 124 desc->handle_irq(irq, desc, regs);
123 125
124 irq_exit(); 126 irq_exit();
125 127
@@ -253,7 +255,8 @@ int show_interrupts(struct seq_file *p, void *v)
253 for_each_online_cpu(j) 255 for_each_online_cpu(j)
254 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 256 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
255#endif 257#endif
256 seq_printf(p, " %14s", irq_desc[i].chip->typename); 258 seq_printf(p, " %8s", irq_desc[i].chip->name);
259 seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
257 seq_printf(p, " %s", action->name); 260 seq_printf(p, " %s", action->name);
258 261
259 for (action=action->next; action; action = action->next) 262 for (action=action->next; action; action = action->next)
diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c
index 4a8995c9c762..47f02af74be3 100644
--- a/arch/i386/pci/irq.c
+++ b/arch/i386/pci/irq.c
@@ -981,10 +981,6 @@ static void __init pcibios_fixup_irqs(void)
981 pci_name(bridge), 'A' + pin, irq); 981 pci_name(bridge), 'A' + pin, irq);
982 } 982 }
983 if (irq >= 0) { 983 if (irq >= 0) {
984 if (use_pci_vector() &&
985 !platform_legacy_irq(irq))
986 irq = IO_APIC_VECTOR(irq);
987
988 printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", 984 printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
989 pci_name(dev), 'A' + pin, irq); 985 pci_name(dev), 'A' + pin, irq);
990 dev->irq = irq; 986 dev->irq = irq;
@@ -1169,33 +1165,3 @@ static int pirq_enable_irq(struct pci_dev *dev)
1169 } 1165 }
1170 return 0; 1166 return 0;
1171} 1167}
1172
1173int pci_vector_resources(int last, int nr_released)
1174{
1175 int count = nr_released;
1176
1177 int next = last;
1178 int offset = (last % 8);
1179
1180 while (next < FIRST_SYSTEM_VECTOR) {
1181 next += 8;
1182#ifdef CONFIG_X86_64
1183 if (next == IA32_SYSCALL_VECTOR)
1184 continue;
1185#else
1186 if (next == SYSCALL_VECTOR)
1187 continue;
1188#endif
1189 count++;
1190 if (next >= FIRST_SYSTEM_VECTOR) {
1191 if (offset%8) {
1192 next = FIRST_DEVICE_VECTOR + offset;
1193 offset++;
1194 continue;
1195 }
1196 count--;
1197 }
1198 }
1199
1200 return count;
1201}
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 31497496eb4b..cfa099b04cda 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o 30obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 31obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
32obj-$(CONFIG_AUDIT) += audit.o 32obj-$(CONFIG_AUDIT) += audit.o
33obj-$(CONFIG_PCI_MSI) += msi_ia64.o
33mca_recovery-y += mca_drv.o mca_drv_asm.o 34mca_recovery-y += mca_drv.o mca_drv_asm.o
34 35
35obj-$(CONFIG_IA64_ESI) += esi.o 36obj-$(CONFIG_IA64_ESI) += esi.o
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index aafca18ab33b..ab2d19c3661f 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -30,6 +30,7 @@
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/threads.h> 31#include <linux/threads.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/irq.h>
33 34
34#include <asm/delay.h> 35#include <asm/delay.h>
35#include <asm/intrinsics.h> 36#include <asm/intrinsics.h>
@@ -105,6 +106,25 @@ reserve_irq_vector (int vector)
105 return test_and_set_bit(pos, ia64_vector_mask); 106 return test_and_set_bit(pos, ia64_vector_mask);
106} 107}
107 108
109/*
110 * Dynamic irq allocate and deallocation for MSI
111 */
112int create_irq(void)
113{
114 int vector = assign_irq_vector(AUTO_ASSIGN);
115
116 if (vector >= 0)
117 dynamic_irq_init(vector);
118
119 return vector;
120}
121
122void destroy_irq(unsigned int irq)
123{
124 dynamic_irq_cleanup(irq);
125 free_irq_vector(irq);
126}
127
108#ifdef CONFIG_SMP 128#ifdef CONFIG_SMP
109# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) 129# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
110#else 130#else
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
new file mode 100644
index 000000000000..822e59a1b822
--- /dev/null
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -0,0 +1,143 @@
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <linux/msi.h>
8#include <asm/smp.h>
9
10/*
11 * Shifts for APIC-based data
12 */
13
14#define MSI_DATA_VECTOR_SHIFT 0
15#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
16
17#define MSI_DATA_DELIVERY_SHIFT 8
18#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
19#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
20
21#define MSI_DATA_LEVEL_SHIFT 14
22#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
23#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
24
25#define MSI_DATA_TRIGGER_SHIFT 15
26#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
27#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
28
29/*
30 * Shift/mask fields for APIC-based bus address
31 */
32
33#define MSI_TARGET_CPU_SHIFT 4
34#define MSI_ADDR_HEADER 0xfee00000
35
36#define MSI_ADDR_DESTID_MASK 0xfff0000f
37#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
38
39#define MSI_ADDR_DESTMODE_SHIFT 2
40#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
41#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
42
43#define MSI_ADDR_REDIRECTION_SHIFT 3
44#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
45#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
46
47static struct irq_chip ia64_msi_chip;
48
49#ifdef CONFIG_SMP
50static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
51{
52 struct msi_msg msg;
53 u32 addr;
54
55 read_msi_msg(irq, &msg);
56
57 addr = msg.address_lo;
58 addr &= MSI_ADDR_DESTID_MASK;
59 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask)));
60 msg.address_lo = addr;
61
62 write_msi_msg(irq, &msg);
63 set_native_irq_info(irq, cpu_mask);
64}
65#endif /* CONFIG_SMP */
66
67int ia64_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
68{
69 struct msi_msg msg;
70 unsigned long dest_phys_id;
71 unsigned int vector;
72
73 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
74 vector = irq;
75
76 msg.address_hi = 0;
77 msg.address_lo =
78 MSI_ADDR_HEADER |
79 MSI_ADDR_DESTMODE_PHYS |
80 MSI_ADDR_REDIRECTION_CPU |
81 MSI_ADDR_DESTID_CPU(dest_phys_id);
82
83 msg.data =
84 MSI_DATA_TRIGGER_EDGE |
85 MSI_DATA_LEVEL_ASSERT |
86 MSI_DATA_DELIVERY_FIXED |
87 MSI_DATA_VECTOR(vector);
88
89 write_msi_msg(irq, &msg);
90 set_irq_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
91
92 return 0;
93}
94
95void ia64_teardown_msi_irq(unsigned int irq)
96{
97 return; /* no-op */
98}
99
100static void ia64_ack_msi_irq(unsigned int irq)
101{
102 move_native_irq(irq);
103 ia64_eoi();
104}
105
106static int ia64_msi_retrigger_irq(unsigned int irq)
107{
108 unsigned int vector = irq;
109 ia64_resend_irq(vector);
110
111 return 1;
112}
113
114/*
115 * Generic ops used on most IA64 platforms.
116 */
117static struct irq_chip ia64_msi_chip = {
118 .name = "PCI-MSI",
119 .mask = mask_msi_irq,
120 .unmask = unmask_msi_irq,
121 .ack = ia64_ack_msi_irq,
122#ifdef CONFIG_SMP
123 .set_affinity = ia64_set_msi_irq_affinity,
124#endif
125 .retrigger = ia64_msi_retrigger_irq,
126};
127
128
129int arch_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
130{
131 if (platform_setup_msi_irq)
132 return platform_setup_msi_irq(irq, pdev);
133
134 return ia64_setup_msi_irq(irq, pdev);
135}
136
137void arch_teardown_msi_irq(unsigned int irq)
138{
139 if (platform_teardown_msi_irq)
140 return platform_teardown_msi_irq(irq);
141
142 return ia64_teardown_msi_irq(irq);
143}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 15c7c670da39..b30be7c48ba8 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -810,12 +810,3 @@ pcibios_prep_mwi (struct pci_dev *dev)
810 } 810 }
811 return rc; 811 return rc;
812} 812}
813
814int pci_vector_resources(int last, int nr_released)
815{
816 int count = nr_released;
817
818 count += (IA64_LAST_DEVICE_VECTOR - last);
819
820 return count;
821}
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
index ab9c48c88012..2d78f34dd763 100644
--- a/arch/ia64/sn/kernel/Makefile
+++ b/arch/ia64/sn/kernel/Makefile
@@ -19,3 +19,4 @@ xp-y := xp_main.o xp_nofault.o
19obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o 19obj-$(CONFIG_IA64_SGI_SN_XP) += xpc.o
20xpc-y := xpc_main.o xpc_channel.o xpc_partition.o 20xpc-y := xpc_main.o xpc_channel.o xpc_partition.o
21obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o 21obj-$(CONFIG_IA64_SGI_SN_XP) += xpnet.o
22obj-$(CONFIG_PCI_MSI) += msi_sn.o
diff --git a/drivers/pci/msi-altix.c b/arch/ia64/sn/kernel/msi_sn.c
index bed4183a5e39..6ffd1f850d41 100644
--- a/drivers/pci/msi-altix.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -7,8 +7,10 @@
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/irq.h>
10#include <linux/pci.h> 11#include <linux/pci.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/msi.h>
12 14
13#include <asm/sn/addrs.h> 15#include <asm/sn/addrs.h>
14#include <asm/sn/intr.h> 16#include <asm/sn/intr.h>
@@ -16,17 +18,16 @@
16#include <asm/sn/pcidev.h> 18#include <asm/sn/pcidev.h>
17#include <asm/sn/nodepda.h> 19#include <asm/sn/nodepda.h>
18 20
19#include "msi.h"
20
21struct sn_msi_info { 21struct sn_msi_info {
22 u64 pci_addr; 22 u64 pci_addr;
23 struct sn_irq_info *sn_irq_info; 23 struct sn_irq_info *sn_irq_info;
24}; 24};
25 25
26static struct sn_msi_info *sn_msi_info; 26static struct sn_msi_info sn_msi_info[NR_IRQS];
27
28static struct irq_chip sn_msi_chip;
27 29
28static void 30void sn_teardown_msi_irq(unsigned int irq)
29sn_msi_teardown(unsigned int vector)
30{ 31{
31 nasid_t nasid; 32 nasid_t nasid;
32 int widget; 33 int widget;
@@ -36,7 +37,7 @@ sn_msi_teardown(unsigned int vector)
36 struct pcibus_bussoft *bussoft; 37 struct pcibus_bussoft *bussoft;
37 struct sn_pcibus_provider *provider; 38 struct sn_pcibus_provider *provider;
38 39
39 sn_irq_info = sn_msi_info[vector].sn_irq_info; 40 sn_irq_info = sn_msi_info[irq].sn_irq_info;
40 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 41 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
41 return; 42 return;
42 43
@@ -45,9 +46,9 @@ sn_msi_teardown(unsigned int vector)
45 provider = SN_PCIDEV_BUSPROVIDER(pdev); 46 provider = SN_PCIDEV_BUSPROVIDER(pdev);
46 47
47 (*provider->dma_unmap)(pdev, 48 (*provider->dma_unmap)(pdev,
48 sn_msi_info[vector].pci_addr, 49 sn_msi_info[irq].pci_addr,
49 PCI_DMA_FROMDEVICE); 50 PCI_DMA_FROMDEVICE);
50 sn_msi_info[vector].pci_addr = 0; 51 sn_msi_info[irq].pci_addr = 0;
51 52
52 bussoft = SN_PCIDEV_BUSSOFT(pdev); 53 bussoft = SN_PCIDEV_BUSSOFT(pdev);
53 nasid = NASID_GET(bussoft->bs_base); 54 nasid = NASID_GET(bussoft->bs_base);
@@ -56,15 +57,15 @@ sn_msi_teardown(unsigned int vector)
56 SWIN_WIDGETNUM(bussoft->bs_base); 57 SWIN_WIDGETNUM(bussoft->bs_base);
57 58
58 sn_intr_free(nasid, widget, sn_irq_info); 59 sn_intr_free(nasid, widget, sn_irq_info);
59 sn_msi_info[vector].sn_irq_info = NULL; 60 sn_msi_info[irq].sn_irq_info = NULL;
60 61
61 return; 62 return;
62} 63}
63 64
64int 65int sn_setup_msi_irq(unsigned int irq, struct pci_dev *pdev)
65sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
66 u32 *addr_hi, u32 *addr_lo, u32 *data)
67{ 66{
67 struct msi_msg msg;
68 struct msi_desc *entry;
68 int widget; 69 int widget;
69 int status; 70 int status;
70 nasid_t nasid; 71 nasid_t nasid;
@@ -73,6 +74,10 @@ sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
73 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev); 74 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
74 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 75 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
75 76
77 entry = get_irq_data(irq);
78 if (!entry->msi_attrib.is_64)
79 return -EINVAL;
80
76 if (bussoft == NULL) 81 if (bussoft == NULL)
77 return -EINVAL; 82 return -EINVAL;
78 83
@@ -93,7 +98,7 @@ sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
93 if (! sn_irq_info) 98 if (! sn_irq_info)
94 return -ENOMEM; 99 return -ENOMEM;
95 100
96 status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1); 101 status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
97 if (status) { 102 if (status) {
98 kfree(sn_irq_info); 103 kfree(sn_irq_info);
99 return -ENOMEM; 104 return -ENOMEM;
@@ -119,29 +124,32 @@ sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
119 return -ENOMEM; 124 return -ENOMEM;
120 } 125 }
121 126
122 sn_msi_info[vector].sn_irq_info = sn_irq_info; 127 sn_msi_info[irq].sn_irq_info = sn_irq_info;
123 sn_msi_info[vector].pci_addr = bus_addr; 128 sn_msi_info[irq].pci_addr = bus_addr;
124 129
125 *addr_hi = (u32)(bus_addr >> 32); 130 msg.address_hi = (u32)(bus_addr >> 32);
126 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff); 131 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
127 132
128 /* 133 /*
129 * In the SN platform, bit 16 is a "send vector" bit which 134 * In the SN platform, bit 16 is a "send vector" bit which
130 * must be present in order to move the vector through the system. 135 * must be present in order to move the vector through the system.
131 */ 136 */
132 *data = 0x100 + (unsigned int)vector; 137 msg.data = 0x100 + irq;
133 138
134#ifdef CONFIG_SMP 139#ifdef CONFIG_SMP
135 set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0); 140 set_irq_affinity_info(irq, sn_irq_info->irq_cpuid, 0);
136#endif 141#endif
137 142
143 write_msi_msg(irq, &msg);
144 set_irq_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);
145
138 return 0; 146 return 0;
139} 147}
140 148
141static void 149#ifdef CONFIG_SMP
142sn_msi_target(unsigned int vector, unsigned int cpu, 150static void sn_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
143 u32 *addr_hi, u32 *addr_lo)
144{ 151{
152 struct msi_msg msg;
145 int slice; 153 int slice;
146 nasid_t nasid; 154 nasid_t nasid;
147 u64 bus_addr; 155 u64 bus_addr;
@@ -150,8 +158,10 @@ sn_msi_target(unsigned int vector, unsigned int cpu,
150 struct sn_irq_info *sn_irq_info; 158 struct sn_irq_info *sn_irq_info;
151 struct sn_irq_info *new_irq_info; 159 struct sn_irq_info *new_irq_info;
152 struct sn_pcibus_provider *provider; 160 struct sn_pcibus_provider *provider;
161 unsigned int cpu;
153 162
154 sn_irq_info = sn_msi_info[vector].sn_irq_info; 163 cpu = first_cpu(cpu_mask);
164 sn_irq_info = sn_msi_info[irq].sn_irq_info;
155 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0) 165 if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
156 return; 166 return;
157 167
@@ -159,19 +169,20 @@ sn_msi_target(unsigned int vector, unsigned int cpu,
159 * Release XIO resources for the old MSI PCI address 169 * Release XIO resources for the old MSI PCI address
160 */ 170 */
161 171
172 read_msi_msg(irq, &msg);
162 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; 173 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
163 pdev = sn_pdev->pdi_linux_pcidev; 174 pdev = sn_pdev->pdi_linux_pcidev;
164 provider = SN_PCIDEV_BUSPROVIDER(pdev); 175 provider = SN_PCIDEV_BUSPROVIDER(pdev);
165 176
166 bus_addr = (u64)(*addr_hi) << 32 | (u64)(*addr_lo); 177 bus_addr = (u64)(msg.address_hi) << 32 | (u64)(msg.address_lo);
167 (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE); 178 (*provider->dma_unmap)(pdev, bus_addr, PCI_DMA_FROMDEVICE);
168 sn_msi_info[vector].pci_addr = 0; 179 sn_msi_info[irq].pci_addr = 0;
169 180
170 nasid = cpuid_to_nasid(cpu); 181 nasid = cpuid_to_nasid(cpu);
171 slice = cpuid_to_slice(cpu); 182 slice = cpuid_to_slice(cpu);
172 183
173 new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice); 184 new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
174 sn_msi_info[vector].sn_irq_info = new_irq_info; 185 sn_msi_info[irq].sn_irq_info = new_irq_info;
175 if (new_irq_info == NULL) 186 if (new_irq_info == NULL)
176 return; 187 return;
177 188
@@ -184,27 +195,36 @@ sn_msi_target(unsigned int vector, unsigned int cpu,
184 sizeof(new_irq_info->irq_xtalkaddr), 195 sizeof(new_irq_info->irq_xtalkaddr),
185 SN_DMA_MSI|SN_DMA_ADDR_XIO); 196 SN_DMA_MSI|SN_DMA_ADDR_XIO);
186 197
187 sn_msi_info[vector].pci_addr = bus_addr; 198 sn_msi_info[irq].pci_addr = bus_addr;
188 *addr_hi = (u32)(bus_addr >> 32); 199 msg.address_hi = (u32)(bus_addr >> 32);
189 *addr_lo = (u32)(bus_addr & 0x00000000ffffffff); 200 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
201
202 write_msi_msg(irq, &msg);
203 set_native_irq_info(irq, cpu_mask);
190} 204}
205#endif /* CONFIG_SMP */
191 206
192struct msi_ops sn_msi_ops = { 207static void sn_ack_msi_irq(unsigned int irq)
193 .setup = sn_msi_setup, 208{
194 .teardown = sn_msi_teardown, 209 move_native_irq(irq);
195#ifdef CONFIG_SMP 210 ia64_eoi();
196 .target = sn_msi_target, 211}
197#endif
198};
199 212
200int 213static int sn_msi_retrigger_irq(unsigned int irq)
201sn_msi_init(void)
202{ 214{
203 sn_msi_info = 215 unsigned int vector = irq;
204 kzalloc(sizeof(struct sn_msi_info) * NR_VECTORS, GFP_KERNEL); 216 ia64_resend_irq(vector);
205 if (! sn_msi_info)
206 return -ENOMEM;
207 217
208 msi_register(&sn_msi_ops); 218 return 1;
209 return 0;
210} 219}
220
221static struct irq_chip sn_msi_chip = {
222 .name = "PCI-MSI",
223 .mask = mask_msi_irq,
224 .unmask = unmask_msi_irq,
225 .ack = sn_ack_msi_irq,
226#ifdef CONFIG_SMP
227 .set_affinity = sn_set_msi_irq_affinity,
228#endif
229 .retrigger = sn_msi_retrigger_irq,
230};
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 6dd0ea8f88e0..d2101237442e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -127,7 +127,7 @@ config PA11
127 127
128config PREFETCH 128config PREFETCH
129 def_bool y 129 def_bool y
130 depends on PA8X00 130 depends on PA8X00 || PA7200
131 131
132config 64BIT 132config 64BIT
133 bool "64-bit kernel" 133 bool "64-bit kernel"
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 6e79dbf3f6bd..2d58b92b57e3 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -96,7 +96,7 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
96 put_user(namlen, &dirent->d_namlen); 96 put_user(namlen, &dirent->d_namlen);
97 copy_to_user(dirent->d_name, name, namlen); 97 copy_to_user(dirent->d_name, name, namlen);
98 put_user(0, dirent->d_name + namlen); 98 put_user(0, dirent->d_name + namlen);
99 ((char *) dirent) += reclen; 99 dirent = (void __user *)dirent + reclen;
100 buf->current_dir = dirent; 100 buf->current_dir = dirent;
101 buf->count -= reclen; 101 buf->count -= reclen;
102 return 0; 102 return 0;
diff --git a/arch/parisc/kernel/binfmt_elf32.c b/arch/parisc/kernel/binfmt_elf32.c
index d1833f164bbe..1e64e7b88110 100644
--- a/arch/parisc/kernel/binfmt_elf32.c
+++ b/arch/parisc/kernel/binfmt_elf32.c
@@ -87,7 +87,7 @@ struct elf_prpsinfo32
87 */ 87 */
88 88
89#define SET_PERSONALITY(ex, ibcs2) \ 89#define SET_PERSONALITY(ex, ibcs2) \
90 current->personality = PER_LINUX32; \ 90 set_thread_flag(TIF_32BIT); \
91 current->thread.map_base = DEFAULT_MAP_BASE32; \ 91 current->thread.map_base = DEFAULT_MAP_BASE32; \
92 current->thread.task_size = DEFAULT_TASK_SIZE32 \ 92 current->thread.task_size = DEFAULT_TASK_SIZE32 \
93 93
@@ -102,25 +102,3 @@ cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
102} 102}
103 103
104#include "../../../fs/binfmt_elf.c" 104#include "../../../fs/binfmt_elf.c"
105
106/* Set up a separate execution domain for ELF32 binaries running
107 * on an ELF64 kernel */
108
109static struct exec_domain parisc32_exec_domain = {
110 .name = "Linux/ELF32",
111 .pers_low = PER_LINUX32,
112 .pers_high = PER_LINUX32,
113};
114
115static int __init parisc32_exec_init(void)
116{
117 /* steal the identity signal mappings from the default domain */
118 parisc32_exec_domain.signal_map = default_exec_domain.signal_map;
119 parisc32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
120
121 register_exec_domain(&parisc32_exec_domain);
122
123 return 0;
124}
125
126__initcall(parisc32_exec_init);
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index bc7c4a4e26a1..0be51e92a2fc 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -35,15 +35,12 @@ int icache_stride __read_mostly;
35EXPORT_SYMBOL(dcache_stride); 35EXPORT_SYMBOL(dcache_stride);
36 36
37 37
38#if defined(CONFIG_SMP)
39/* On some machines (e.g. ones with the Merced bus), there can be 38/* On some machines (e.g. ones with the Merced bus), there can be
40 * only a single PxTLB broadcast at a time; this must be guaranteed 39 * only a single PxTLB broadcast at a time; this must be guaranteed
41 * by software. We put a spinlock around all TLB flushes to 40 * by software. We put a spinlock around all TLB flushes to
42 * ensure this. 41 * ensure this.
43 */ 42 */
44DEFINE_SPINLOCK(pa_tlb_lock); 43DEFINE_SPINLOCK(pa_tlb_lock);
45EXPORT_SYMBOL(pa_tlb_lock);
46#endif
47 44
48struct pdc_cache_info cache_info __read_mostly; 45struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20 46#ifndef CONFIG_PA20
@@ -91,7 +88,8 @@ update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
91 88
92 flush_kernel_dcache_page(page); 89 flush_kernel_dcache_page(page);
93 clear_bit(PG_dcache_dirty, &page->flags); 90 clear_bit(PG_dcache_dirty, &page->flags);
94 } 91 } else if (parisc_requires_coherency())
92 flush_kernel_dcache_page(page);
95} 93}
96 94
97void 95void
@@ -370,3 +368,45 @@ void parisc_setup_cache_timing(void)
370 368
371 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 369 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
372} 370}
371
372extern void purge_kernel_dcache_page(unsigned long);
373extern void clear_user_page_asm(void *page, unsigned long vaddr);
374
375void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
376{
377 purge_kernel_dcache_page((unsigned long)page);
378 purge_tlb_start();
379 pdtlb_kernel(page);
380 purge_tlb_end();
381 clear_user_page_asm(page, vaddr);
382}
383EXPORT_SYMBOL(clear_user_page);
384
385void flush_kernel_dcache_page_addr(void *addr)
386{
387 flush_kernel_dcache_page_asm(addr);
388 purge_tlb_start();
389 pdtlb_kernel(addr);
390 purge_tlb_end();
391}
392EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
393
394void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
395 struct page *pg)
396{
397 /* no coherency needed (all in kmap/kunmap) */
398 copy_user_page_asm(vto, vfrom);
399 if (!parisc_requires_coherency())
400 flush_kernel_dcache_page_asm(vto);
401}
402EXPORT_SYMBOL(copy_user_page);
403
404#ifdef CONFIG_PA8X00
405
406void kunmap_parisc(void *addr)
407{
408 if (parisc_requires_coherency())
409 flush_kernel_dcache_page_addr(addr);
410}
411EXPORT_SYMBOL(kunmap_parisc);
412#endif
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 192357a3b9fe..340b5e8d67ba 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -30,6 +30,7 @@
30 30
31 31
32#include <asm/psw.h> 32#include <asm/psw.h>
33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
33#include <asm/assembly.h> /* for LDREG/STREG defines */ 34#include <asm/assembly.h> /* for LDREG/STREG defines */
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
35#include <asm/signal.h> 36#include <asm/signal.h>
@@ -478,11 +479,7 @@
478 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
479 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
480 copy \pmd,%r9 481 copy \pmd,%r9
481#ifdef CONFIG_64BIT 482 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
482 shld %r9,PxD_VALUE_SHIFT,\pmd
483#else
484 shlw %r9,PxD_VALUE_SHIFT,\pmd
485#endif
486 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 483 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
487 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 484 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
488 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 485 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
@@ -970,11 +967,7 @@ intr_return:
970 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 967 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
971 ** irq_stat[] is defined using ____cacheline_aligned. 968 ** irq_stat[] is defined using ____cacheline_aligned.
972 */ 969 */
973#ifdef CONFIG_64BIT 970 SHLREG %r1,L1_CACHE_SHIFT,%r20
974 shld %r1, 6, %r20
975#else
976 shlw %r1, 5, %r20
977#endif
978 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 971 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
979#endif /* CONFIG_SMP */ 972#endif /* CONFIG_SMP */
980 973
@@ -1076,7 +1069,7 @@ intr_do_preempt:
1076 BL preempt_schedule_irq, %r2 1069 BL preempt_schedule_irq, %r2
1077 nop 1070 nop
1078 1071
1079 b intr_restore /* ssm PSW_SM_I done by intr_restore */ 1072 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1080#endif /* CONFIG_PREEMPT */ 1073#endif /* CONFIG_PREEMPT */
1081 1074
1082 .import do_signal,code 1075 .import do_signal,code
@@ -2115,11 +2108,7 @@ syscall_check_bh:
2115 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2108 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2116 2109
2117 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2110 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2118#ifdef CONFIG_64BIT 2111 SHLREG %r26,L1_CACHE_SHIFT,%r20
2119 shld %r26, 6, %r20
2120#else
2121 shlw %r26, 5, %r20
2122#endif
2123 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 2112 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2124#endif /* CONFIG_SMP */ 2113#endif /* CONFIG_SMP */
2125 2114
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 3058bffd8a2c..18ba4cb9159b 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -231,6 +231,7 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
231 {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"}, 231 {HPHW_NPROC,0x5E6,0x4,0x91,"Keystone/Matterhorn W2 650"},
232 {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"}, 232 {HPHW_NPROC,0x5E7,0x4,0x91,"Caribe W2 800"},
233 {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"}, 233 {HPHW_NPROC,0x5E8,0x4,0x91,"Pikes Peak W2"},
234 {HPHW_NPROC,0x5EB,0x4,0x91,"Perf/Leone 875 W2+"},
234 {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"}, 235 {HPHW_NPROC,0x5FF,0x4,0x91,"Hitachi W"},
235 {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"}, 236 {HPHW_NPROC,0x600,0x4,0x81,"Gecko (712/60)"},
236 {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"}, 237 {HPHW_NPROC,0x601,0x4,0x81,"Gecko 80 (712/80)"},
@@ -584,8 +585,10 @@ static struct hp_hardware hp_hardware_list[] __initdata = {
584 {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"}, 585 {HPHW_CONSOLE, 0x01A, 0x0001F, 0x00, "Jason/Anole 64 Null Console"},
585 {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"}, 586 {HPHW_CONSOLE, 0x01B, 0x0001F, 0x00, "Jason/Anole 100 Null Console"},
586 {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"}, 587 {HPHW_FABRIC, 0x004, 0x000AA, 0x80, "Halfdome DNA Central Agent"},
588 {HPHW_FABRIC, 0x005, 0x000AA, 0x80, "Keystone DNA Central Agent"},
587 {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"}, 589 {HPHW_FABRIC, 0x007, 0x000AA, 0x80, "Caribe DNA Central Agent"},
588 {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"}, 590 {HPHW_FABRIC, 0x004, 0x000AB, 0x00, "Halfdome TOGO Fabric Crossbar"},
591 {HPHW_FABRIC, 0x005, 0x000AB, 0x00, "Keystone TOGO Fabric Crossbar"},
589 {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"}, 592 {HPHW_FABRIC, 0x004, 0x000AC, 0x00, "Halfdome Sakura Fabric Router"},
590 {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"}, 593 {HPHW_FIO, 0x025, 0x0002E, 0x80, "Armyknife Optional X.25"},
591 {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"}, 594 {HPHW_FIO, 0x004, 0x0004F, 0x0, "8-Port X.25 EISA-ACC (AMSO)"},
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 5b8803cc3d69..9bdd0197ceb7 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -45,6 +45,17 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
45*/ 45*/
46static volatile unsigned long cpu_eiem = 0; 46static volatile unsigned long cpu_eiem = 0;
47 47
48/*
49** ack bitmap ... habitually set to 1, but reset to zero
50** between ->ack() and ->end() of the interrupt to prevent
51** re-interruption of a processing interrupt.
52*/
53static volatile unsigned long global_ack_eiem = ~0UL;
54/*
55** Local bitmap, same as above but for per-cpu interrupts
56*/
57static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
58
48static void cpu_disable_irq(unsigned int irq) 59static void cpu_disable_irq(unsigned int irq)
49{ 60{
50 unsigned long eirr_bit = EIEM_MASK(irq); 61 unsigned long eirr_bit = EIEM_MASK(irq);
@@ -62,13 +73,6 @@ static void cpu_enable_irq(unsigned int irq)
62 73
63 cpu_eiem |= eirr_bit; 74 cpu_eiem |= eirr_bit;
64 75
65 /* FIXME: while our interrupts aren't nested, we cannot reset
66 * the eiem mask if we're already in an interrupt. Once we
67 * implement nested interrupts, this can go away
68 */
69 if (!in_interrupt())
70 set_eiem(cpu_eiem);
71
72 /* This is just a simple NOP IPI. But what it does is cause 76 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end 77 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */ 78 * of the interrupt handler */
@@ -84,13 +88,45 @@ static unsigned int cpu_startup_irq(unsigned int irq)
84void no_ack_irq(unsigned int irq) { } 88void no_ack_irq(unsigned int irq) { }
85void no_end_irq(unsigned int irq) { } 89void no_end_irq(unsigned int irq) { }
86 90
91void cpu_ack_irq(unsigned int irq)
92{
93 unsigned long mask = EIEM_MASK(irq);
94 int cpu = smp_processor_id();
95
96 /* Clear in EIEM so we can no longer process */
97 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
98 per_cpu(local_ack_eiem, cpu) &= ~mask;
99 else
100 global_ack_eiem &= ~mask;
101
102 /* disable the interrupt */
103 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
104 /* and now ack it */
105 mtctl(mask, 23);
106}
107
108void cpu_end_irq(unsigned int irq)
109{
110 unsigned long mask = EIEM_MASK(irq);
111 int cpu = smp_processor_id();
112
113 /* set it in the eiems---it's no longer in process */
114 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status))
115 per_cpu(local_ack_eiem, cpu) |= mask;
116 else
117 global_ack_eiem |= mask;
118
119 /* enable the interrupt */
120 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
121}
122
87#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
88int cpu_check_affinity(unsigned int irq, cpumask_t *dest) 124int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
89{ 125{
90 int cpu_dest; 126 int cpu_dest;
91 127
92 /* timer and ipi have to always be received on all CPUs */ 128 /* timer and ipi have to always be received on all CPUs */
93 if (irq == TIMER_IRQ || irq == IPI_IRQ) { 129 if (CHECK_IRQ_PER_CPU(irq)) {
94 /* Bad linux design decision. The mask has already 130 /* Bad linux design decision. The mask has already
95 * been set; we must reset it */ 131 * been set; we must reset it */
96 irq_desc[irq].affinity = CPU_MASK_ALL; 132 irq_desc[irq].affinity = CPU_MASK_ALL;
@@ -119,8 +155,8 @@ static struct hw_interrupt_type cpu_interrupt_type = {
119 .shutdown = cpu_disable_irq, 155 .shutdown = cpu_disable_irq,
120 .enable = cpu_enable_irq, 156 .enable = cpu_enable_irq,
121 .disable = cpu_disable_irq, 157 .disable = cpu_disable_irq,
122 .ack = no_ack_irq, 158 .ack = cpu_ack_irq,
123 .end = no_end_irq, 159 .end = cpu_end_irq,
124#ifdef CONFIG_SMP 160#ifdef CONFIG_SMP
125 .set_affinity = cpu_set_affinity_irq, 161 .set_affinity = cpu_set_affinity_irq,
126#endif 162#endif
@@ -209,7 +245,7 @@ int show_interrupts(struct seq_file *p, void *v)
209** Then use that to get the Transaction address and data. 245** Then use that to get the Transaction address and data.
210*/ 246*/
211 247
212int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *type, void *data) 248int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
213{ 249{
214 if (irq_desc[irq].action) 250 if (irq_desc[irq].action)
215 return -EBUSY; 251 return -EBUSY;
@@ -298,82 +334,69 @@ unsigned int txn_alloc_data(unsigned int virt_irq)
298 return virt_irq - CPU_IRQ_BASE; 334 return virt_irq - CPU_IRQ_BASE;
299} 335}
300 336
337static inline int eirr_to_irq(unsigned long eirr)
338{
339#ifdef CONFIG_64BIT
340 int bit = fls64(eirr);
341#else
342 int bit = fls(eirr);
343#endif
344 return (BITS_PER_LONG - bit) + TIMER_IRQ;
345}
346
301/* ONLY called from entry.S:intr_extint() */ 347/* ONLY called from entry.S:intr_extint() */
302void do_cpu_irq_mask(struct pt_regs *regs) 348void do_cpu_irq_mask(struct pt_regs *regs)
303{ 349{
304 unsigned long eirr_val; 350 unsigned long eirr_val;
305 351 int irq, cpu = smp_processor_id();
306 irq_enter();
307
308 /*
309 * Don't allow TIMER or IPI nested interrupts.
310 * Allowing any single interrupt to nest can lead to that CPU
311 * handling interrupts with all enabled interrupts unmasked.
312 */
313 set_eiem(0UL);
314
315 /* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
316 * 2) We loop here on EIRR contents in order to avoid
317 * nested interrupts or having to take another interrupt
318 * when we could have just handled it right away.
319 */
320 for (;;) {
321 unsigned long bit = (1UL << (BITS_PER_LONG - 1));
322 unsigned int irq;
323 eirr_val = mfctl(23) & cpu_eiem;
324 if (!eirr_val)
325 break;
326
327 mtctl(eirr_val, 23); /* reset bits we are going to process */
328
329 /* Work our way from MSb to LSb...same order we alloc EIRs */
330 for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
331#ifdef CONFIG_SMP 352#ifdef CONFIG_SMP
332 cpumask_t dest = irq_desc[irq].affinity; 353 cpumask_t dest;
333#endif 354#endif
334 if (!(bit & eirr_val))
335 continue;
336 355
337 /* clear bit in mask - can exit loop sooner */ 356 local_irq_disable();
338 eirr_val &= ~bit; 357 irq_enter();
339 358
340#ifdef CONFIG_SMP 359 eirr_val = mfctl(23) & cpu_eiem & global_ack_eiem &
341 /* FIXME: because generic set affinity mucks 360 per_cpu(local_ack_eiem, cpu);
342 * with the affinity before sending it to us 361 if (!eirr_val)
343 * we can get the situation where the affinity is 362 goto set_out;
344 * wrong for our CPU type interrupts */ 363 irq = eirr_to_irq(eirr_val);
345 if (irq != TIMER_IRQ && irq != IPI_IRQ &&
346 !cpu_isset(smp_processor_id(), dest)) {
347 int cpu = first_cpu(dest);
348
349 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
350 irq, smp_processor_id(), cpu);
351 gsc_writel(irq + CPU_IRQ_BASE,
352 cpu_data[cpu].hpa);
353 continue;
354 }
355#endif
356 364
357 __do_IRQ(irq, regs); 365#ifdef CONFIG_SMP
358 } 366 dest = irq_desc[irq].affinity;
367 if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
368 !cpu_isset(smp_processor_id(), dest)) {
369 int cpu = first_cpu(dest);
370
371 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
372 irq, smp_processor_id(), cpu);
373 gsc_writel(irq + CPU_IRQ_BASE,
374 cpu_data[cpu].hpa);
375 goto set_out;
359 } 376 }
377#endif
378 __do_IRQ(irq, regs);
360 379
361 set_eiem(cpu_eiem); /* restore original mask */ 380 out:
362 irq_exit(); 381 irq_exit();
363} 382 return;
364 383
384 set_out:
385 set_eiem(cpu_eiem & global_ack_eiem & per_cpu(local_ack_eiem, cpu));
386 goto out;
387}
365 388
366static struct irqaction timer_action = { 389static struct irqaction timer_action = {
367 .handler = timer_interrupt, 390 .handler = timer_interrupt,
368 .name = "timer", 391 .name = "timer",
369 .flags = IRQF_DISABLED, 392 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
370}; 393};
371 394
372#ifdef CONFIG_SMP 395#ifdef CONFIG_SMP
373static struct irqaction ipi_action = { 396static struct irqaction ipi_action = {
374 .handler = ipi_interrupt, 397 .handler = ipi_interrupt,
375 .name = "IPI", 398 .name = "IPI",
376 .flags = IRQF_DISABLED, 399 .flags = IRQF_DISABLED | IRQF_PERCPU,
377}; 400};
378#endif 401#endif
379 402
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 99d7fca93104..fb81e5687e7c 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -143,8 +143,9 @@ static int __init processor_probe(struct parisc_device *dev)
143 p = &cpu_data[cpuid]; 143 p = &cpu_data[cpuid];
144 boot_cpu_data.cpu_count++; 144 boot_cpu_data.cpu_count++;
145 145
146 /* initialize counters */ 146 /* initialize counters - CPU 0 gets it_value set in time_init() */
147 memset(p, 0, sizeof(struct cpuinfo_parisc)); 147 if (cpuid)
148 memset(p, 0, sizeof(struct cpuinfo_parisc));
148 149
149 p->loops_per_jiffy = loops_per_jiffy; 150 p->loops_per_jiffy = loops_per_jiffy;
150 p->dev = dev; /* Save IODC data in case we need it */ 151 p->dev = dev; /* Save IODC data in case we need it */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index bb83880c5ee3..ee6653edeb7a 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -26,7 +26,6 @@
26#include <linux/stddef.h> 26#include <linux/stddef.h>
27#include <linux/compat.h> 27#include <linux/compat.h>
28#include <linux/elf.h> 28#include <linux/elf.h>
29#include <linux/personality.h>
30#include <asm/ucontext.h> 29#include <asm/ucontext.h>
31#include <asm/rt_sigframe.h> 30#include <asm/rt_sigframe.h>
32#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -433,13 +432,13 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
433 if (in_syscall) { 432 if (in_syscall) {
434 regs->gr[31] = haddr; 433 regs->gr[31] = haddr;
435#ifdef __LP64__ 434#ifdef __LP64__
436 if (personality(current->personality) == PER_LINUX) 435 if (!test_thread_flag(TIF_32BIT))
437 sigframe_size |= 1; 436 sigframe_size |= 1;
438#endif 437#endif
439 } else { 438 } else {
440 unsigned long psw = USER_PSW; 439 unsigned long psw = USER_PSW;
441#ifdef __LP64__ 440#ifdef __LP64__
442 if (personality(current->personality) == PER_LINUX) 441 if (!test_thread_flag(TIF_32BIT))
443 psw |= PSW_W; 442 psw |= PSW_W;
444#endif 443#endif
445 444
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 98e40959a564..faad338f310e 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -262,6 +262,9 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
262 this_cpu, which); 262 this_cpu, which);
263 return IRQ_NONE; 263 return IRQ_NONE;
264 } /* Switch */ 264 } /* Switch */
265 /* let in any pending interrupts */
266 local_irq_enable();
267 local_irq_disable();
265 } /* while (ops) */ 268 } /* while (ops) */
266 } 269 }
267 return IRQ_HANDLED; 270 return IRQ_HANDLED;
@@ -430,8 +433,9 @@ smp_do_timer(struct pt_regs *regs)
430static void __init 433static void __init
431smp_cpu_init(int cpunum) 434smp_cpu_init(int cpunum)
432{ 435{
433 extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */ 436 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
434 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ 437 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
438 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
435 439
436 /* Set modes and Enable floating point coprocessor */ 440 /* Set modes and Enable floating point coprocessor */
437 (void) init_per_cpu(cpunum); 441 (void) init_per_cpu(cpunum);
@@ -457,6 +461,7 @@ smp_cpu_init(int cpunum)
457 enter_lazy_tlb(&init_mm, current); 461 enter_lazy_tlb(&init_mm, current);
458 462
459 init_IRQ(); /* make sure no IRQ's are enabled or pending */ 463 init_IRQ(); /* make sure no IRQ's are enabled or pending */
464 start_cpu_itimer();
460} 465}
461 466
462 467
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 8b5df98e2b31..1db5588ceacf 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -31,6 +31,8 @@
31#include <linux/shm.h> 31#include <linux/shm.h>
32#include <linux/smp_lock.h> 32#include <linux/smp_lock.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/utsname.h>
35#include <linux/personality.h>
34 36
35int sys_pipe(int __user *fildes) 37int sys_pipe(int __user *fildes)
36{ 38{
@@ -248,3 +250,46 @@ asmlinkage int sys_free_hugepages(unsigned long addr)
248{ 250{
249 return -EINVAL; 251 return -EINVAL;
250} 252}
253
254long parisc_personality(unsigned long personality)
255{
256 long err;
257
258 if (personality(current->personality) == PER_LINUX32
259 && personality == PER_LINUX)
260 personality = PER_LINUX32;
261
262 err = sys_personality(personality);
263 if (err == PER_LINUX32)
264 err = PER_LINUX;
265
266 return err;
267}
268
269static inline int override_machine(char __user *mach) {
270#ifdef CONFIG_COMPAT
271 if (personality(current->personality) == PER_LINUX32) {
272 if (__put_user(0, mach + 6) ||
273 __put_user(0, mach + 7))
274 return -EFAULT;
275 }
276
277 return 0;
278#else /*!CONFIG_COMPAT*/
279 return 0;
280#endif /*CONFIG_COMPAT*/
281}
282
283long parisc_newuname(struct new_utsname __user *utsname)
284{
285 int err = 0;
286
287 down_read(&uts_sem);
288 if (copy_to_user(utsname, &system_utsname, sizeof(*utsname)))
289 err = -EFAULT;
290 up_read(&uts_sem);
291
292 err = override_machine(utsname->machine);
293
294 return (long)err;
295}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index e27b432f90a8..701d66a596e8 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -132,7 +132,7 @@
132 ENTRY_SAME(socketpair) 132 ENTRY_SAME(socketpair)
133 ENTRY_SAME(setpgid) 133 ENTRY_SAME(setpgid)
134 ENTRY_SAME(send) 134 ENTRY_SAME(send)
135 ENTRY_SAME(newuname) 135 ENTRY_OURS(newuname)
136 ENTRY_SAME(umask) /* 60 */ 136 ENTRY_SAME(umask) /* 60 */
137 ENTRY_SAME(chroot) 137 ENTRY_SAME(chroot)
138 ENTRY_SAME(ustat) 138 ENTRY_SAME(ustat)
@@ -221,7 +221,7 @@
221 ENTRY_SAME(fchdir) 221 ENTRY_SAME(fchdir)
222 ENTRY_SAME(bdflush) 222 ENTRY_SAME(bdflush)
223 ENTRY_SAME(sysfs) /* 135 */ 223 ENTRY_SAME(sysfs) /* 135 */
224 ENTRY_SAME(personality) 224 ENTRY_OURS(personality)
225 ENTRY_SAME(ni_syscall) /* for afs_syscall */ 225 ENTRY_SAME(ni_syscall) /* for afs_syscall */
226 ENTRY_SAME(setfsuid) 226 ENTRY_SAME(setfsuid)
227 ENTRY_SAME(setfsgid) 227 ENTRY_SAME(setfsgid)
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index ab641d67f551..b3496b592a2d 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -32,8 +32,7 @@
32 32
33#include <linux/timex.h> 33#include <linux/timex.h>
34 34
35static long clocktick __read_mostly; /* timer cycles per tick */ 35static unsigned long clocktick __read_mostly; /* timer cycles per tick */
36static long halftick __read_mostly;
37 36
38#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
39extern void smp_do_timer(struct pt_regs *regs); 38extern void smp_do_timer(struct pt_regs *regs);
@@ -41,46 +40,106 @@ extern void smp_do_timer(struct pt_regs *regs);
41 40
42irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 41irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
43{ 42{
44 long now; 43 unsigned long now;
45 long next_tick; 44 unsigned long next_tick;
46 int nticks; 45 unsigned long cycles_elapsed;
47 int cpu = smp_processor_id(); 46 unsigned long cycles_remainder;
47 unsigned int cpu = smp_processor_id();
48
49 /* gcc can optimize for "read-only" case with a local clocktick */
50 unsigned long cpt = clocktick;
48 51
49 profile_tick(CPU_PROFILING, regs); 52 profile_tick(CPU_PROFILING, regs);
50 53
51 now = mfctl(16); 54 /* Initialize next_tick to the expected tick time. */
52 /* initialize next_tick to time at last clocktick */
53 next_tick = cpu_data[cpu].it_value; 55 next_tick = cpu_data[cpu].it_value;
54 56
55 /* since time passes between the interrupt and the mfctl() 57 /* Get current interval timer.
56 * above, it is never true that last_tick + clocktick == now. If we 58 * CR16 reads as 64 bits in CPU wide mode.
57 * never miss a clocktick, we could set next_tick = last_tick + clocktick 59 * CR16 reads as 32 bits in CPU narrow mode.
58 * but maybe we'll miss ticks, hence the loop.
59 *
60 * Variables are *signed*.
61 */ 60 */
61 now = mfctl(16);
62
63 cycles_elapsed = now - next_tick;
62 64
63 nticks = 0; 65 if ((cycles_elapsed >> 5) < cpt) {
64 while((next_tick - now) < halftick) { 66 /* use "cheap" math (add/subtract) instead
65 next_tick += clocktick; 67 * of the more expensive div/mul method
66 nticks++; 68 */
69 cycles_remainder = cycles_elapsed;
70 while (cycles_remainder > cpt) {
71 cycles_remainder -= cpt;
72 }
73 } else {
74 cycles_remainder = cycles_elapsed % cpt;
67 } 75 }
68 mtctl(next_tick, 16); 76
77 /* Can we differentiate between "early CR16" (aka Scenario 1) and
78 * "long delay" (aka Scenario 3)? I don't think so.
79 *
80 * We expected timer_interrupt to be delivered at least a few hundred
81 * cycles after the IT fires. But it's arbitrary how much time passes
82 * before we call it "late". I've picked one second.
83 */
84/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
85#if HZ == 1000
86 if (cycles_elapsed > (cpt << 10) )
87#elif HZ == 250
88 if (cycles_elapsed > (cpt << 8) )
89#elif HZ == 100
90 if (cycles_elapsed > (cpt << 7) )
91#else
92#warn WTF is HZ set to anyway?
93 if (cycles_elapsed > (HZ * cpt) )
94#endif
95 {
96 /* Scenario 3: very long delay? bad in any case */
97 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
98 " cycles %lX rem %lX "
99 " next/now %lX/%lX\n",
100 cpu,
101 cycles_elapsed, cycles_remainder,
102 next_tick, now );
103 }
104
105 /* convert from "division remainder" to "remainder of clock tick" */
106 cycles_remainder = cpt - cycles_remainder;
107
108 /* Determine when (in CR16 cycles) next IT interrupt will fire.
109 * We want IT to fire modulo clocktick even if we miss/skip some.
110 * But those interrupts don't in fact get delivered that regularly.
111 */
112 next_tick = now + cycles_remainder;
113
69 cpu_data[cpu].it_value = next_tick; 114 cpu_data[cpu].it_value = next_tick;
70 115
71 while (nticks--) { 116 /* Skip one clocktick on purpose if we are likely to miss next_tick.
117 * We want to avoid the new next_tick being less than CR16.
118 * If that happened, itimer wouldn't fire until CR16 wrapped.
119 * We'll catch the tick we missed on the tick after that.
120 */
121 if (!(cycles_remainder >> 13))
122 next_tick += cpt;
123
124 /* Program the IT when to deliver the next interrupt. */
125 /* Only bottom 32-bits of next_tick are written to cr16. */
126 mtctl(next_tick, 16);
127
128
129 /* Done mucking with unreliable delivery of interrupts.
130 * Go do system house keeping.
131 */
72#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
73 smp_do_timer(regs); 133 smp_do_timer(regs);
74#else 134#else
75 update_process_times(user_mode(regs)); 135 update_process_times(user_mode(regs));
76#endif 136#endif
77 if (cpu == 0) { 137 if (cpu == 0) {
78 write_seqlock(&xtime_lock); 138 write_seqlock(&xtime_lock);
79 do_timer(1); 139 do_timer(regs);
80 write_sequnlock(&xtime_lock); 140 write_sequnlock(&xtime_lock);
81 }
82 } 141 }
83 142
84 /* check soft power switch status */ 143 /* check soft power switch status */
85 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 144 if (cpu == 0 && !atomic_read(&power_tasklet.count))
86 tasklet_schedule(&power_tasklet); 145 tasklet_schedule(&power_tasklet);
@@ -106,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs)
106EXPORT_SYMBOL(profile_pc); 165EXPORT_SYMBOL(profile_pc);
107 166
108 167
109/*** converted from ia64 ***/
110/* 168/*
111 * Return the number of micro-seconds that elapsed since the last 169 * Return the number of micro-seconds that elapsed since the last
112 * update to wall time (aka xtime). The xtime_lock 170 * update to wall time (aka xtime). The xtime_lock
113 * must be at least read-locked when calling this routine. 171 * must be at least read-locked when calling this routine.
114 */ 172 */
115static inline unsigned long 173static inline unsigned long gettimeoffset (void)
116gettimeoffset (void)
117{ 174{
118#ifndef CONFIG_SMP 175#ifndef CONFIG_SMP
119 /* 176 /*
@@ -121,21 +178,44 @@ gettimeoffset (void)
121 * Once parisc-linux learns the cr16 difference between processors, 178 * Once parisc-linux learns the cr16 difference between processors,
122 * this could be made to work. 179 * this could be made to work.
123 */ 180 */
124 long last_tick; 181 unsigned long now;
125 long elapsed_cycles; 182 unsigned long prev_tick;
126 183 unsigned long next_tick;
127 /* it_value is the intended time of the next tick */ 184 unsigned long elapsed_cycles;
128 last_tick = cpu_data[smp_processor_id()].it_value; 185 unsigned long usec;
129 186 unsigned long cpuid = smp_processor_id();
130 /* Subtract one tick and account for possible difference between 187 unsigned long cpt = clocktick;
131 * when we expected the tick and when it actually arrived. 188
132 * (aka wall vs real) 189 next_tick = cpu_data[cpuid].it_value;
133 */ 190 now = mfctl(16); /* Read the hardware interval timer. */
134 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 191
135 elapsed_cycles = mfctl(16) - last_tick; 192 prev_tick = next_tick - cpt;
193
194 /* Assume Scenario 1: "now" is later than prev_tick. */
195 elapsed_cycles = now - prev_tick;
196
197/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
198#if HZ == 1000
199 if (elapsed_cycles > (cpt << 10) )
200#elif HZ == 250
201 if (elapsed_cycles > (cpt << 8) )
202#elif HZ == 100
203 if (elapsed_cycles > (cpt << 7) )
204#else
205#warn WTF is HZ set to anyway?
206 if (elapsed_cycles > (HZ * cpt) )
207#endif
208 {
209 /* Scenario 3: clock ticks are missing. */
210 printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
211 " cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
212 cpuid, elapsed_cycles / cpt,
213 elapsed_cycles, prev_tick, now, next_tick, cpt);
214 }
136 215
137 /* the precision of this math could be improved */ 216 /* FIXME: Can we improve the precision? Not with PAGE0. */
138 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 217 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
218 return usec;
139#else 219#else
140 return 0; 220 return 0;
141#endif 221#endif
@@ -146,6 +226,7 @@ do_gettimeofday (struct timeval *tv)
146{ 226{
147 unsigned long flags, seq, usec, sec; 227 unsigned long flags, seq, usec, sec;
148 228
229 /* Hold xtime_lock and adjust timeval. */
149 do { 230 do {
150 seq = read_seqbegin_irqsave(&xtime_lock, flags); 231 seq = read_seqbegin_irqsave(&xtime_lock, flags);
151 usec = gettimeoffset(); 232 usec = gettimeoffset();
@@ -153,25 +234,13 @@ do_gettimeofday (struct timeval *tv)
153 usec += (xtime.tv_nsec / 1000); 234 usec += (xtime.tv_nsec / 1000);
154 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 235 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
155 236
156 if (unlikely(usec > LONG_MAX)) { 237 /* Move adjusted usec's into sec's. */
157 /* This can happen if the gettimeoffset adjustment is
158 * negative and xtime.tv_nsec is smaller than the
159 * adjustment */
160 printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec);
161 usec += USEC_PER_SEC;
162 --sec;
163 /* This should never happen, it means the negative
164 * time adjustment was more than a second, so there's
165 * something seriously wrong */
166 BUG_ON(usec > LONG_MAX);
167 }
168
169
170 while (usec >= USEC_PER_SEC) { 238 while (usec >= USEC_PER_SEC) {
171 usec -= USEC_PER_SEC; 239 usec -= USEC_PER_SEC;
172 ++sec; 240 ++sec;
173 } 241 }
174 242
243 /* Return adjusted result. */
175 tv->tv_sec = sec; 244 tv->tv_sec = sec;
176 tv->tv_usec = usec; 245 tv->tv_usec = usec;
177} 246}
@@ -223,22 +292,23 @@ unsigned long long sched_clock(void)
223} 292}
224 293
225 294
295void __init start_cpu_itimer(void)
296{
297 unsigned int cpu = smp_processor_id();
298 unsigned long next_tick = mfctl(16) + clocktick;
299
300 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
301
302 cpu_data[cpu].it_value = next_tick;
303}
304
226void __init time_init(void) 305void __init time_init(void)
227{ 306{
228 unsigned long next_tick;
229 static struct pdc_tod tod_data; 307 static struct pdc_tod tod_data;
230 308
231 clocktick = (100 * PAGE0->mem_10msec) / HZ; 309 clocktick = (100 * PAGE0->mem_10msec) / HZ;
232 halftick = clocktick / 2;
233 310
234 /* Setup clock interrupt timing */ 311 start_cpu_itimer(); /* get CPU 0 started */
235
236 next_tick = mfctl(16);
237 next_tick += clocktick;
238 cpu_data[smp_processor_id()].it_value = next_tick;
239
240 /* kick off Itimer (CR16) */
241 mtctl(next_tick, 16);
242 312
243 if(pdc_tod_read(&tod_data) == 0) { 313 if(pdc_tod_read(&tod_data) == 0) {
244 write_seqlock_irq(&xtime_lock); 314 write_seqlock_irq(&xtime_lock);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 77b28cb8aca6..65cd6ca32fed 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/delay.h>
19#include <linux/mm.h> 20#include <linux/mm.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/smp.h> 22#include <linux/smp.h>
@@ -245,6 +246,15 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
245 current->comm, current->pid, str, err); 246 current->comm, current->pid, str, err);
246 show_regs(regs); 247 show_regs(regs);
247 248
249 if (in_interrupt())
250 panic("Fatal exception in interrupt");
251
252 if (panic_on_oops) {
253 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
254 ssleep(5);
255 panic("Fatal exception");
256 }
257
248 /* Wot's wrong wif bein' racy? */ 258 /* Wot's wrong wif bein' racy? */
249 if (current->thread.flags & PARISC_KERNEL_DEATH) { 259 if (current->thread.flags & PARISC_KERNEL_DEATH) {
250 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__); 260 printk(KERN_CRIT "%s() recursion detected.\n", __FUNCTION__);
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 25ad28d63e88..0667f2b4f977 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -31,10 +31,7 @@
31 31
32DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
33 33
34extern char _text; /* start of kernel code, defined by linker */
35extern int data_start; 34extern int data_start;
36extern char _end; /* end of BSS, defined by linker */
37extern char __init_begin, __init_end;
38 35
39#ifdef CONFIG_DISCONTIGMEM 36#ifdef CONFIG_DISCONTIGMEM
40struct node_map_data node_data[MAX_NUMNODES] __read_mostly; 37struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
@@ -319,8 +316,8 @@ static void __init setup_bootmem(void)
319 316
320 reserve_bootmem_node(NODE_DATA(0), 0UL, 317 reserve_bootmem_node(NODE_DATA(0), 0UL,
321 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE)); 318 (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
322 reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text), 319 reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
323 (unsigned long)(&_end - &_text)); 320 (unsigned long)(_end - _text));
324 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), 321 reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
325 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT)); 322 ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
326 323
@@ -355,8 +352,8 @@ static void __init setup_bootmem(void)
355#endif 352#endif
356 353
357 data_resource.start = virt_to_phys(&data_start); 354 data_resource.start = virt_to_phys(&data_start);
358 data_resource.end = virt_to_phys(&_end)-1; 355 data_resource.end = virt_to_phys(_end) - 1;
359 code_resource.start = virt_to_phys(&_text); 356 code_resource.start = virt_to_phys(_text);
360 code_resource.end = virt_to_phys(&data_start)-1; 357 code_resource.end = virt_to_phys(&data_start)-1;
361 358
362 /* We don't know which region the kernel will be in, so try 359 /* We don't know which region the kernel will be in, so try
@@ -385,12 +382,12 @@ void free_initmem(void)
385 */ 382 */
386 local_irq_disable(); 383 local_irq_disable();
387 384
388 memset(&__init_begin, 0x00, 385 memset(__init_begin, 0x00,
389 (unsigned long)&__init_end - (unsigned long)&__init_begin); 386 (unsigned long)__init_end - (unsigned long)__init_begin);
390 387
391 flush_data_cache(); 388 flush_data_cache();
392 asm volatile("sync" : : ); 389 asm volatile("sync" : : );
393 flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end); 390 flush_icache_range((unsigned long)__init_begin, (unsigned long)__init_end);
394 asm volatile("sync" : : ); 391 asm volatile("sync" : : );
395 392
396 local_irq_enable(); 393 local_irq_enable();
@@ -398,8 +395,8 @@ void free_initmem(void)
398 395
399 /* align __init_begin and __init_end to page size, 396 /* align __init_begin and __init_end to page size,
400 ignoring linker script where we might have tried to save RAM */ 397 ignoring linker script where we might have tried to save RAM */
401 init_begin = PAGE_ALIGN((unsigned long)(&__init_begin)); 398 init_begin = PAGE_ALIGN((unsigned long)(__init_begin));
402 init_end = PAGE_ALIGN((unsigned long)(&__init_end)); 399 init_end = PAGE_ALIGN((unsigned long)(__init_end));
403 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 400 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
404 ClearPageReserved(virt_to_page(addr)); 401 ClearPageReserved(virt_to_page(addr));
405 init_page_count(virt_to_page(addr)); 402 init_page_count(virt_to_page(addr));
@@ -578,7 +575,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
578 extern const unsigned long fault_vector_20; 575 extern const unsigned long fault_vector_20;
579 extern void * const linux_gateway_page; 576 extern void * const linux_gateway_page;
580 577
581 ro_start = __pa((unsigned long)&_text); 578 ro_start = __pa((unsigned long)_text);
582 ro_end = __pa((unsigned long)&data_start); 579 ro_end = __pa((unsigned long)&data_start);
583 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; 580 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
584 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; 581 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index 27384567a1d0..47a1d2ac9419 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -188,7 +188,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
188} 188}
189EXPORT_SYMBOL(__ioremap); 189EXPORT_SYMBOL(__ioremap);
190 190
191void iounmap(void __iomem *addr) 191void iounmap(const volatile void __iomem *addr)
192{ 192{
193 if (addr > high_memory) 193 if (addr > high_memory)
194 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr)); 194 return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 96ef656e4669..8b6910465578 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -338,10 +338,6 @@ config PPC_MULTIPLATFORM
338 RS/6000 machine, an Apple machine, or a PReP, CHRP, 338 RS/6000 machine, an Apple machine, or a PReP, CHRP,
339 Maple or Cell-based machine. 339 Maple or Cell-based machine.
340 340
341config PPC_ISERIES
342 bool "IBM Legacy iSeries"
343 depends on PPC64
344
345config EMBEDDED6xx 341config EMBEDDED6xx
346 bool "Embedded 6xx/7xx/7xxx-based board" 342 bool "Embedded 6xx/7xx/7xxx-based board"
347 depends on PPC32 && (BROKEN||BROKEN_ON_SMP) 343 depends on PPC32 && (BROKEN||BROKEN_ON_SMP)
@@ -355,6 +351,16 @@ config APUS
355 <http://linux-apus.sourceforge.net/>. 351 <http://linux-apus.sourceforge.net/>.
356endchoice 352endchoice
357 353
354config QUICC_ENGINE
355 bool
356 depends on PPC_MPC836x || PPC_MPC832x
357 default y
358 help
359 The QUICC Engine (QE) is a new generation of communications
360 coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
361 Selecting this option means that you wish to build a kernel
362 for a machine with a QE coprocessor.
363
358config PPC_PSERIES 364config PPC_PSERIES
359 depends on PPC_MULTIPLATFORM && PPC64 365 depends on PPC_MULTIPLATFORM && PPC64
360 bool "IBM pSeries & new (POWER5-based) iSeries" 366 bool "IBM pSeries & new (POWER5-based) iSeries"
@@ -365,6 +371,10 @@ config PPC_PSERIES
365 select PPC_UDBG_16550 371 select PPC_UDBG_16550
366 default y 372 default y
367 373
374config PPC_ISERIES
375 bool "IBM Legacy iSeries"
376 depends on PPC_MULTIPLATFORM && PPC64
377
368config PPC_CHRP 378config PPC_CHRP
369 bool "Common Hardware Reference Platform (CHRP) based machines" 379 bool "Common Hardware Reference Platform (CHRP) based machines"
370 depends on PPC_MULTIPLATFORM && PPC32 380 depends on PPC_MULTIPLATFORM && PPC32
@@ -594,6 +604,7 @@ endmenu
594 604
595source arch/powerpc/platforms/embedded6xx/Kconfig 605source arch/powerpc/platforms/embedded6xx/Kconfig
596source arch/powerpc/platforms/4xx/Kconfig 606source arch/powerpc/platforms/4xx/Kconfig
607source arch/powerpc/platforms/82xx/Kconfig
597source arch/powerpc/platforms/83xx/Kconfig 608source arch/powerpc/platforms/83xx/Kconfig
598source arch/powerpc/platforms/85xx/Kconfig 609source arch/powerpc/platforms/85xx/Kconfig
599source arch/powerpc/platforms/86xx/Kconfig 610source arch/powerpc/platforms/86xx/Kconfig
@@ -1058,6 +1069,8 @@ source "fs/Kconfig"
1058 1069
1059# XXX source "arch/ppc/8260_io/Kconfig" 1070# XXX source "arch/ppc/8260_io/Kconfig"
1060 1071
1072source "arch/powerpc/sysdev/qe_lib/Kconfig"
1073
1061source "arch/powerpc/platforms/iseries/Kconfig" 1074source "arch/powerpc/platforms/iseries/Kconfig"
1062 1075
1063source "lib/Kconfig" 1076source "lib/Kconfig"
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index c383d56bbe18..003520b56303 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -113,7 +113,7 @@ endif
113endif 113endif
114 114
115quiet_cmd_wrap = WRAP $@ 115quiet_cmd_wrap = WRAP $@
116 cmd_wrap =$(wrapper) -c -o $@ -p $2 $(CROSSWRAP) vmlinux 116 cmd_wrap =$(CONFIG_SHELL) $(wrapper) -c -o $@ -p $2 $(CROSSWRAP) vmlinux
117quiet_cmd_wrap_initrd = WRAP $@ 117quiet_cmd_wrap_initrd = WRAP $@
118 cmd_wrap_initrd =$(wrapper) -c -o $@ -p $2 $(CROSSWRAP) \ 118 cmd_wrap_initrd =$(wrapper) -c -o $@ -p $2 $(CROSSWRAP) \
119 -i $(obj)/ramdisk.image.gz vmlinux 119 -i $(obj)/ramdisk.image.gz vmlinux
diff --git a/arch/powerpc/boot/dts/mpc8272ads.dts b/arch/powerpc/boot/dts/mpc8272ads.dts
new file mode 100644
index 000000000000..34efdd028c4f
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8272ads.dts
@@ -0,0 +1,223 @@
1/*
2 * MPC8272 ADS Device Tree Source
3 *
4 * Copyright 2005 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/ {
13 model = "MPC8272ADS";
14 compatible = "MPC8260ADS";
15 #address-cells = <1>;
16 #size-cells = <1>;
17 linux,phandle = <100>;
18
19 cpus {
20 #cpus = <1>;
21 #address-cells = <1>;
22 #size-cells = <0>;
23 linux,phandle = <200>;
24
25 PowerPC,8272@0 {
26 device_type = "cpu";
27 reg = <0>;
28 d-cache-line-size = <20>; // 32 bytes
29 i-cache-line-size = <20>; // 32 bytes
30 d-cache-size = <4000>; // L1, 16K
31 i-cache-size = <4000>; // L1, 16K
32 timebase-frequency = <0>;
33 bus-frequency = <0>;
34 clock-frequency = <0>;
35 32-bit;
36 linux,phandle = <201>;
37 linux,boot-cpu;
38 };
39 };
40
41 interrupt-controller@f8200000 {
42 linux,phandle = <f8200000>;
43 #address-cells = <0>;
44 #interrupt-cells = <2>;
45 interrupt-controller;
46 reg = <f8200000 f8200004>;
47 built-in;
48 device_type = "pci-pic";
49 };
50 memory {
51 device_type = "memory";
52 linux,phandle = <300>;
53 reg = <00000000 4000000 f4500000 00000020>;
54 };
55
56 soc8272@f0000000 {
57 #address-cells = <1>;
58 #size-cells = <1>;
59 #interrupt-cells = <2>;
60 device_type = "soc";
61 ranges = < 0 0 2 00000000 f0000000 00053000>;
62 reg = <f0000000 0>;
63
64 mdio@0 {
65 device_type = "mdio";
66 compatible = "fs_enet";
67 reg = <0 0>;
68 linux,phandle = <24520>;
69 #address-cells = <1>;
70 #size-cells = <0>;
71 ethernet-phy@0 {
72 linux,phandle = <2452000>;
73 interrupt-parent = <10c00>;
74 interrupts = <19 1>;
75 reg = <0>;
76 bitbang = [ 12 12 13 02 02 01 ];
77 device_type = "ethernet-phy";
78 };
79 ethernet-phy@1 {
80 linux,phandle = <2452001>;
81 interrupt-parent = <10c00>;
82 interrupts = <19 1>;
83 bitbang = [ 12 12 13 02 02 01 ];
84 reg = <3>;
85 device_type = "ethernet-phy";
86 };
87 };
88
89 ethernet@24000 {
90 #address-cells = <1>;
91 #size-cells = <0>;
92 device_type = "network";
93 device-id = <2>;
94 compatible = "fs_enet";
95 model = "FCC";
96 reg = <11300 20 8400 100 11380 30>;
97 mac-address = [ 00 11 2F 99 43 54 ];
98 interrupts = <20 2>;
99 interrupt-parent = <10c00>;
100 phy-handle = <2452000>;
101 rx-clock = <13>;
102 tx-clock = <12>;
103 };
104
105 ethernet@25000 {
106 device_type = "network";
107 device-id = <3>;
108 compatible = "fs_enet";
109 model = "FCC";
110 reg = <11320 20 8500 100 113b0 30>;
111 mac-address = [ 00 11 2F 99 44 54 ];
112 interrupts = <21 2>;
113 interrupt-parent = <10c00>;
114 phy-handle = <2452001>;
115 rx-clock = <17>;
116 tx-clock = <18>;
117 };
118
119 cpm@f0000000 {
120 linux,phandle = <f0000000>;
121 #address-cells = <1>;
122 #size-cells = <1>;
123 #interrupt-cells = <2>;
124 device_type = "cpm";
125 model = "CPM2";
126 ranges = <00000000 00000000 3ffff>;
127 reg = <10d80 3280>;
128 command-proc = <119c0>;
129 brg-frequency = <17D7840>;
130 cpm_clk = <BEBC200>;
131
132 scc@11a00 {
133 device_type = "serial";
134 compatible = "cpm_uart";
135 model = "SCC";
136 device-id = <2>;
137 reg = <11a00 20 8000 100>;
138 current-speed = <1c200>;
139 interrupts = <28 2>;
140 interrupt-parent = <10c00>;
141 clock-setup = <0 00ffffff>;
142 rx-clock = <1>;
143 tx-clock = <1>;
144 };
145
146 scc@11a60 {
147 device_type = "serial";
148 compatible = "cpm_uart";
149 model = "SCC";
150 device-id = <5>;
151 reg = <11a60 20 8300 100>;
152 current-speed = <1c200>;
153 interrupts = <2b 2>;
154 interrupt-parent = <10c00>;
155 clock-setup = <1b ffffff00>;
156 rx-clock = <4>;
157 tx-clock = <4>;
158 };
159
160 };
161 interrupt-controller@10c00 {
162 linux,phandle = <10c00>;
163 #address-cells = <0>;
164 #interrupt-cells = <2>;
165 interrupt-controller;
166 reg = <10c00 80>;
167 built-in;
168 device_type = "cpm-pic";
169 compatible = "CPM2";
170 };
171 pci@0500 {
172 linux,phandle = <0500>;
173 #interrupt-cells = <1>;
174 #size-cells = <2>;
175 #address-cells = <3>;
176 compatible = "8272";
177 device_type = "pci";
178 reg = <10430 4dc>;
179 clock-frequency = <3f940aa>;
180 interrupt-map-mask = <f800 0 0 7>;
181 interrupt-map = <
182
183 /* IDSEL 0x16 */
184 b000 0 0 1 f8200000 40 0
185 b000 0 0 2 f8200000 41 0
186 b000 0 0 3 f8200000 42 0
187 b000 0 0 4 f8200000 43 0
188
189 /* IDSEL 0x17 */
190 b800 0 0 1 f8200000 43 0
191 b800 0 0 2 f8200000 40 0
192 b800 0 0 3 f8200000 41 0
193 b800 0 0 4 f8200000 42 0
194
195 /* IDSEL 0x18 */
196 c000 0 0 1 f8200000 42 0
197 c000 0 0 2 f8200000 43 0
198 c000 0 0 3 f8200000 40 0
199 c000 0 0 4 f8200000 41 0>;
200 interrupt-parent = <10c00>;
201 interrupts = <14 3>;
202 bus-range = <0 0>;
203 ranges = <02000000 0 80000000 80000000 0 40000000
204 01000000 0 00000000 f6000000 0 02000000>;
205 };
206
207/* May need to remove if on a part without crypto engine */
208 crypto@30000 {
209 device_type = "crypto";
210 model = "SEC2";
211 compatible = "talitos";
212 reg = <30000 10000>;
213 interrupts = <b 0>;
214 interrupt-parent = <10c00>;
215 num-channels = <4>;
216 channel-fifo-len = <18>;
217 exec-units-mask = <0000007e>;
218/* desc mask is for rev1.x, we need runtime fixup for >=2.x */
219 descriptor-types-mask = <01010ebf>;
220 };
221
222 };
223};
diff --git a/arch/powerpc/boot/dts/mpc8360emds.dts b/arch/powerpc/boot/dts/mpc8360emds.dts
new file mode 100644
index 000000000000..9022192155b9
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8360emds.dts
@@ -0,0 +1,375 @@
1/*
2 * MPC8360E EMDS Device Tree Source
3 *
4 * Copyright 2006 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13/*
14/memreserve/ 00000000 1000000;
15*/
16
17/ {
18 model = "MPC8360EPB";
19 compatible = "MPC83xx";
20 #address-cells = <1>;
21 #size-cells = <1>;
22 linux,phandle = <100>;
23
24 cpus {
25 #cpus = <1>;
26 #address-cells = <1>;
27 #size-cells = <0>;
28 linux,phandle = <200>;
29
30 PowerPC,8360@0 {
31 device_type = "cpu";
32 reg = <0>;
33 d-cache-line-size = <20>; // 32 bytes
34 i-cache-line-size = <20>; // 32 bytes
35 d-cache-size = <8000>; // L1, 32K
36 i-cache-size = <8000>; // L1, 32K
37 timebase-frequency = <3EF1480>;
38 bus-frequency = <FBC5200>;
39 clock-frequency = <1F78A400>;
40 32-bit;
41 linux,phandle = <201>;
42 linux,boot-cpu;
43 };
44 };
45
46 memory {
47 device_type = "memory";
48 linux,phandle = <300>;
49 reg = <00000000 10000000>;
50 };
51
52 bcsr@f8000000 {
53 device_type = "board-control";
54 reg = <f8000000 8000>;
55 };
56
57 soc8360@e0000000 {
58 #address-cells = <1>;
59 #size-cells = <1>;
60 #interrupt-cells = <2>;
61 device_type = "soc";
62 ranges = <0 e0000000 00100000>;
63 reg = <e0000000 00000200>;
64 bus-frequency = <FBC5200>;
65
66 wdt@200 {
67 device_type = "watchdog";
68 compatible = "mpc83xx_wdt";
69 reg = <200 100>;
70 };
71
72 i2c@3000 {
73 device_type = "i2c";
74 compatible = "fsl-i2c";
75 reg = <3000 100>;
76 interrupts = <e 8>;
77 interrupt-parent = <700>;
78 dfsrr;
79 };
80
81 i2c@3100 {
82 device_type = "i2c";
83 compatible = "fsl-i2c";
84 reg = <3100 100>;
85 interrupts = <f 8>;
86 interrupt-parent = <700>;
87 dfsrr;
88 };
89
90 serial@4500 {
91 device_type = "serial";
92 compatible = "ns16550";
93 reg = <4500 100>;
94 clock-frequency = <FBC5200>;
95 interrupts = <9 8>;
96 interrupt-parent = <700>;
97 };
98
99 serial@4600 {
100 device_type = "serial";
101 compatible = "ns16550";
102 reg = <4600 100>;
103 clock-frequency = <FBC5200>;
104 interrupts = <a 8>;
105 interrupt-parent = <700>;
106 };
107
108 crypto@30000 {
109 device_type = "crypto";
110 model = "SEC2";
111 compatible = "talitos";
112 reg = <30000 10000>;
113 interrupts = <b 8>;
114 interrupt-parent = <700>;
115 num-channels = <4>;
116 channel-fifo-len = <18>;
117 exec-units-mask = <0000007e>;
118 /* desc mask is for rev1.x, we need runtime fixup for >=2.x */
119 descriptor-types-mask = <01010ebf>;
120 };
121
122 pci@8500 {
123 linux,phandle = <8500>;
124 interrupt-map-mask = <f800 0 0 7>;
125 interrupt-map = <
126
127 /* IDSEL 0x11 AD17 */
128 8800 0 0 1 700 14 8
129 8800 0 0 2 700 15 8
130 8800 0 0 3 700 16 8
131 8800 0 0 4 700 17 8
132
133 /* IDSEL 0x12 AD18 */
134 9000 0 0 1 700 16 8
135 9000 0 0 2 700 17 8
136 9000 0 0 3 700 14 8
137 9000 0 0 4 700 15 8
138
139 /* IDSEL 0x13 AD19 */
140 9800 0 0 1 700 17 8
141 9800 0 0 2 700 14 8
142 9800 0 0 3 700 15 8
143 9800 0 0 4 700 16 8
144
145 /* IDSEL 0x15 AD21*/
146 a800 0 0 1 700 14 8
147 a800 0 0 2 700 15 8
148 a800 0 0 3 700 16 8
149 a800 0 0 4 700 17 8
150
151 /* IDSEL 0x16 AD22*/
152 b000 0 0 1 700 17 8
153 b000 0 0 2 700 14 8
154 b000 0 0 3 700 15 8
155 b000 0 0 4 700 16 8
156
157 /* IDSEL 0x17 AD23*/
158 b800 0 0 1 700 16 8
159 b800 0 0 2 700 17 8
160 b800 0 0 3 700 14 8
161 b800 0 0 4 700 15 8
162
163 /* IDSEL 0x18 AD24*/
164 c000 0 0 1 700 15 8
165 c000 0 0 2 700 16 8
166 c000 0 0 3 700 17 8
167 c000 0 0 4 700 14 8>;
168 interrupt-parent = <700>;
169 interrupts = <42 8>;
170 bus-range = <0 0>;
171 ranges = <02000000 0 a0000000 a0000000 0 10000000
172 42000000 0 80000000 80000000 0 10000000
173 01000000 0 00000000 e2000000 0 00100000>;
174 clock-frequency = <3f940aa>;
175 #interrupt-cells = <1>;
176 #size-cells = <2>;
177 #address-cells = <3>;
178 reg = <8500 100>;
179 compatible = "83xx";
180 device_type = "pci";
181 };
182
183 pic@700 {
184 linux,phandle = <700>;
185 interrupt-controller;
186 #address-cells = <0>;
187 #interrupt-cells = <2>;
188 reg = <700 100>;
189 built-in;
190 device_type = "ipic";
191 };
192
193 par_io@1400 {
194 reg = <1400 100>;
195 device_type = "par_io";
196 num-ports = <7>;
197
198 ucc_pin@01 {
199 linux,phandle = <140001>;
200 pio-map = <
201 /* port pin dir open_drain assignment has_irq */
202 0 3 1 0 1 0 /* TxD0 */
203 0 4 1 0 1 0 /* TxD1 */
204 0 5 1 0 1 0 /* TxD2 */
205 0 6 1 0 1 0 /* TxD3 */
206 1 6 1 0 3 0 /* TxD4 */
207 1 7 1 0 1 0 /* TxD5 */
208 1 9 1 0 2 0 /* TxD6 */
209 1 a 1 0 2 0 /* TxD7 */
210 0 9 2 0 1 0 /* RxD0 */
211 0 a 2 0 1 0 /* RxD1 */
212 0 b 2 0 1 0 /* RxD2 */
213 0 c 2 0 1 0 /* RxD3 */
214 0 d 2 0 1 0 /* RxD4 */
215 1 1 2 0 2 0 /* RxD5 */
216 1 0 2 0 2 0 /* RxD6 */
217 1 4 2 0 2 0 /* RxD7 */
218 0 7 1 0 1 0 /* TX_EN */
219 0 8 1 0 1 0 /* TX_ER */
220 0 f 2 0 1 0 /* RX_DV */
221 0 10 2 0 1 0 /* RX_ER */
222 0 0 2 0 1 0 /* RX_CLK */
223 2 9 1 0 3 0 /* GTX_CLK - CLK10 */
224 2 8 2 0 1 0>; /* GTX125 - CLK9 */
225 };
226 ucc_pin@02 {
227 linux,phandle = <140002>;
228 pio-map = <
229 /* port pin dir open_drain assignment has_irq */
230 0 11 1 0 1 0 /* TxD0 */
231 0 12 1 0 1 0 /* TxD1 */
232 0 13 1 0 1 0 /* TxD2 */
233 0 14 1 0 1 0 /* TxD3 */
234 1 2 1 0 1 0 /* TxD4 */
235 1 3 1 0 2 0 /* TxD5 */
236 1 5 1 0 3 0 /* TxD6 */
237 1 8 1 0 3 0 /* TxD7 */
238 0 17 2 0 1 0 /* RxD0 */
239 0 18 2 0 1 0 /* RxD1 */
240 0 19 2 0 1 0 /* RxD2 */
241 0 1a 2 0 1 0 /* RxD3 */
242 0 1b 2 0 1 0 /* RxD4 */
243 1 c 2 0 2 0 /* RxD5 */
244 1 d 2 0 3 0 /* RxD6 */
245 1 b 2 0 2 0 /* RxD7 */
246 0 15 1 0 1 0 /* TX_EN */
247 0 16 1 0 1 0 /* TX_ER */
248 0 1d 2 0 1 0 /* RX_DV */
249 0 1e 2 0 1 0 /* RX_ER */
250 0 1f 2 0 1 0 /* RX_CLK */
251 2 2 1 0 2 0 /* GTX_CLK - CLK10 */
252 2 3 2 0 1 0 /* GTX125 - CLK4 */
253 0 1 3 0 2 0 /* MDIO */
254 0 2 1 0 1 0>; /* MDC */
255 };
256
257 };
258 };
259
260 qe@e0100000 {
261 #address-cells = <1>;
262 #size-cells = <1>;
263 device_type = "qe";
264 model = "QE";
265 ranges = <0 e0100000 00100000>;
266 reg = <e0100000 480>;
267 brg-frequency = <0>;
268 bus-frequency = <179A7B00>;
269
270 muram@10000 {
271 device_type = "muram";
272 ranges = <0 00010000 0000c000>;
273
274 data-only@0{
275 reg = <0 c000>;
276 };
277 };
278
279 spi@4c0 {
280 device_type = "spi";
281 compatible = "fsl_spi";
282 reg = <4c0 40>;
283 interrupts = <2>;
284 interrupt-parent = <80>;
285 mode = "cpu";
286 };
287
288 spi@500 {
289 device_type = "spi";
290 compatible = "fsl_spi";
291 reg = <500 40>;
292 interrupts = <1>;
293 interrupt-parent = <80>;
294 mode = "cpu";
295 };
296
297 usb@6c0 {
298 device_type = "usb";
299 compatible = "qe_udc";
300 reg = <6c0 40 8B00 100>;
301 interrupts = <b>;
302 interrupt-parent = <80>;
303 mode = "slave";
304 };
305
306 ucc@2000 {
307 device_type = "network";
308 compatible = "ucc_geth";
309 model = "UCC";
310 device-id = <1>;
311 reg = <2000 200>;
312 interrupts = <20>;
313 interrupt-parent = <80>;
314 mac-address = [ 00 04 9f 00 23 23 ];
315 rx-clock = <0>;
316 tx-clock = <19>;
317 phy-handle = <212000>;
318 pio-handle = <140001>;
319 };
320
321 ucc@3000 {
322 device_type = "network";
323 compatible = "ucc_geth";
324 model = "UCC";
325 device-id = <2>;
326 reg = <3000 200>;
327 interrupts = <21>;
328 interrupt-parent = <80>;
329 mac-address = [ 00 11 22 33 44 55 ];
330 rx-clock = <0>;
331 tx-clock = <14>;
332 phy-handle = <212001>;
333 pio-handle = <140002>;
334 };
335
336 mdio@2120 {
337 #address-cells = <1>;
338 #size-cells = <0>;
339 reg = <2120 18>;
340 device_type = "mdio";
341 compatible = "ucc_geth_phy";
342
343 ethernet-phy@00 {
344 linux,phandle = <212000>;
345 interrupt-parent = <700>;
346 interrupts = <11 2>;
347 reg = <0>;
348 device_type = "ethernet-phy";
349 interface = <6>; //ENET_1000_GMII
350 };
351 ethernet-phy@01 {
352 linux,phandle = <212001>;
353 interrupt-parent = <700>;
354 interrupts = <12 2>;
355 reg = <1>;
356 device_type = "ethernet-phy";
357 interface = <6>;
358 };
359 };
360
361 qeic@80 {
362 linux,phandle = <80>;
363 interrupt-controller;
364 device_type = "qeic";
365 #address-cells = <0>;
366 #interrupt-cells = <1>;
367 reg = <80 80>;
368 built-in;
369 big-endian;
370 interrupts = <20 8 21 8>; //high:32 low:33
371 interrupt-parent = <700>;
372 };
373
374 };
375};
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S
index 6016251a1a2c..05f32388b953 100644
--- a/arch/powerpc/boot/zImage.coff.lds.S
+++ b/arch/powerpc/boot/zImage.coff.lds.S
@@ -15,6 +15,7 @@ SECTIONS
15 { 15 {
16 *(.rodata*) 16 *(.rodata*)
17 *(.data*) 17 *(.data*)
18 *(__builtin_*)
18 *(.sdata*) 19 *(.sdata*)
19 __got2_start = .; 20 __got2_start = .;
20 *(.got2) 21 *(.got2)
diff --git a/arch/powerpc/configs/mpc8360emds_defconfig b/arch/powerpc/configs/mpc8360emds_defconfig
new file mode 100644
index 000000000000..c0703415d608
--- /dev/null
+++ b/arch/powerpc/configs/mpc8360emds_defconfig
@@ -0,0 +1,1018 @@
1#
2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.18
4# Thu Sep 21 18:14:27 2006
5#
6# CONFIG_PPC64 is not set
7CONFIG_PPC32=y
8CONFIG_PPC_MERGE=y
9CONFIG_MMU=y
10CONFIG_GENERIC_HARDIRQS=y
11CONFIG_IRQ_PER_CPU=y
12CONFIG_RWSEM_XCHGADD_ALGORITHM=y
13CONFIG_GENERIC_HWEIGHT=y
14CONFIG_GENERIC_CALIBRATE_DELAY=y
15CONFIG_GENERIC_FIND_NEXT_BIT=y
16CONFIG_PPC=y
17CONFIG_EARLY_PRINTK=y
18CONFIG_GENERIC_NVRAM=y
19CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
20CONFIG_ARCH_MAY_HAVE_PC_FDC=y
21CONFIG_PPC_OF=y
22CONFIG_PPC_UDBG_16550=y
23# CONFIG_GENERIC_TBSYNC is not set
24CONFIG_AUDIT_ARCH=y
25CONFIG_DEFAULT_UIMAGE=y
26
27#
28# Processor support
29#
30# CONFIG_CLASSIC32 is not set
31# CONFIG_PPC_52xx is not set
32# CONFIG_PPC_82xx is not set
33CONFIG_PPC_83xx=y
34# CONFIG_PPC_85xx is not set
35# CONFIG_PPC_86xx is not set
36# CONFIG_40x is not set
37# CONFIG_44x is not set
38# CONFIG_8xx is not set
39# CONFIG_E200 is not set
40CONFIG_6xx=y
41CONFIG_83xx=y
42CONFIG_PPC_FPU=y
43CONFIG_PPC_STD_MMU=y
44CONFIG_PPC_STD_MMU_32=y
45# CONFIG_SMP is not set
46CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
47
48#
49# Code maturity level options
50#
51CONFIG_EXPERIMENTAL=y
52CONFIG_BROKEN_ON_SMP=y
53CONFIG_INIT_ENV_ARG_LIMIT=32
54
55#
56# General setup
57#
58CONFIG_LOCALVERSION=""
59CONFIG_LOCALVERSION_AUTO=y
60CONFIG_SWAP=y
61CONFIG_SYSVIPC=y
62# CONFIG_POSIX_MQUEUE is not set
63# CONFIG_BSD_PROCESS_ACCT is not set
64# CONFIG_TASKSTATS is not set
65# CONFIG_AUDIT is not set
66# CONFIG_IKCONFIG is not set
67# CONFIG_RELAY is not set
68CONFIG_INITRAMFS_SOURCE=""
69# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
70CONFIG_EMBEDDED=y
71CONFIG_SYSCTL=y
72# CONFIG_KALLSYMS is not set
73CONFIG_HOTPLUG=y
74CONFIG_PRINTK=y
75CONFIG_BUG=y
76CONFIG_ELF_CORE=y
77CONFIG_BASE_FULL=y
78CONFIG_FUTEX=y
79# CONFIG_EPOLL is not set
80CONFIG_SHMEM=y
81CONFIG_SLAB=y
82CONFIG_VM_EVENT_COUNTERS=y
83CONFIG_RT_MUTEXES=y
84# CONFIG_TINY_SHMEM is not set
85CONFIG_BASE_SMALL=0
86# CONFIG_SLOB is not set
87
88#
89# Loadable module support
90#
91CONFIG_MODULES=y
92CONFIG_MODULE_UNLOAD=y
93# CONFIG_MODULE_FORCE_UNLOAD is not set
94# CONFIG_MODVERSIONS is not set
95# CONFIG_MODULE_SRCVERSION_ALL is not set
96# CONFIG_KMOD is not set
97
98#
99# Block layer
100#
101# CONFIG_LBD is not set
102# CONFIG_BLK_DEV_IO_TRACE is not set
103# CONFIG_LSF is not set
104
105#
106# IO Schedulers
107#
108CONFIG_IOSCHED_NOOP=y
109CONFIG_IOSCHED_AS=y
110CONFIG_IOSCHED_DEADLINE=y
111CONFIG_IOSCHED_CFQ=y
112CONFIG_DEFAULT_AS=y
113# CONFIG_DEFAULT_DEADLINE is not set
114# CONFIG_DEFAULT_CFQ is not set
115# CONFIG_DEFAULT_NOOP is not set
116CONFIG_DEFAULT_IOSCHED="anticipatory"
117CONFIG_QUICC_ENGINE=y
118CONFIG_PPC_GEN550=y
119# CONFIG_WANT_EARLY_SERIAL is not set
120
121#
122# Platform support
123#
124# CONFIG_MPC834x_SYS is not set
125# CONFIG_MPC834x_ITX is not set
126CONFIG_MPC8360E_PB=y
127CONFIG_PPC_MPC836x=y
128# CONFIG_MPIC is not set
129
130#
131# Kernel options
132#
133# CONFIG_HIGHMEM is not set
134# CONFIG_HZ_100 is not set
135CONFIG_HZ_250=y
136# CONFIG_HZ_1000 is not set
137CONFIG_HZ=250
138CONFIG_PREEMPT_NONE=y
139# CONFIG_PREEMPT_VOLUNTARY is not set
140# CONFIG_PREEMPT is not set
141CONFIG_BINFMT_ELF=y
142# CONFIG_BINFMT_MISC is not set
143CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
144CONFIG_ARCH_FLATMEM_ENABLE=y
145CONFIG_SELECT_MEMORY_MODEL=y
146CONFIG_FLATMEM_MANUAL=y
147# CONFIG_DISCONTIGMEM_MANUAL is not set
148# CONFIG_SPARSEMEM_MANUAL is not set
149CONFIG_FLATMEM=y
150CONFIG_FLAT_NODE_MEM_MAP=y
151# CONFIG_SPARSEMEM_STATIC is not set
152CONFIG_SPLIT_PTLOCK_CPUS=4
153# CONFIG_RESOURCES_64BIT is not set
154CONFIG_PROC_DEVICETREE=y
155# CONFIG_CMDLINE_BOOL is not set
156# CONFIG_PM is not set
157CONFIG_SECCOMP=y
158CONFIG_ISA_DMA_API=y
159
160#
161# Bus options
162#
163CONFIG_GENERIC_ISA_DMA=y
164# CONFIG_MPIC_WEIRD is not set
165# CONFIG_PPC_I8259 is not set
166CONFIG_PPC_INDIRECT_PCI=y
167CONFIG_FSL_SOC=y
168CONFIG_PCI=y
169CONFIG_PCI_DOMAINS=y
170# CONFIG_PCIEPORTBUS is not set
171
172#
173# PCCARD (PCMCIA/CardBus) support
174#
175# CONFIG_PCCARD is not set
176
177#
178# PCI Hotplug Support
179#
180# CONFIG_HOTPLUG_PCI is not set
181
182#
183# Advanced setup
184#
185# CONFIG_ADVANCED_OPTIONS is not set
186
187#
188# Default settings for advanced configuration options are used
189#
190CONFIG_HIGHMEM_START=0xfe000000
191CONFIG_LOWMEM_SIZE=0x30000000
192CONFIG_KERNEL_START=0xc0000000
193CONFIG_TASK_SIZE=0x80000000
194CONFIG_BOOT_LOAD=0x00800000
195
196#
197# Networking
198#
199CONFIG_NET=y
200
201#
202# Networking options
203#
204# CONFIG_NETDEBUG is not set
205CONFIG_PACKET=y
206# CONFIG_PACKET_MMAP is not set
207CONFIG_UNIX=y
208CONFIG_XFRM=y
209# CONFIG_XFRM_USER is not set
210# CONFIG_NET_KEY is not set
211CONFIG_INET=y
212CONFIG_IP_MULTICAST=y
213# CONFIG_IP_ADVANCED_ROUTER is not set
214CONFIG_IP_FIB_HASH=y
215CONFIG_IP_PNP=y
216CONFIG_IP_PNP_DHCP=y
217CONFIG_IP_PNP_BOOTP=y
218# CONFIG_IP_PNP_RARP is not set
219# CONFIG_NET_IPIP is not set
220# CONFIG_NET_IPGRE is not set
221# CONFIG_IP_MROUTE is not set
222# CONFIG_ARPD is not set
223CONFIG_SYN_COOKIES=y
224# CONFIG_INET_AH is not set
225# CONFIG_INET_ESP is not set
226# CONFIG_INET_IPCOMP is not set
227# CONFIG_INET_XFRM_TUNNEL is not set
228# CONFIG_INET_TUNNEL is not set
229CONFIG_INET_XFRM_MODE_TRANSPORT=y
230CONFIG_INET_XFRM_MODE_TUNNEL=y
231CONFIG_INET_DIAG=y
232CONFIG_INET_TCP_DIAG=y
233# CONFIG_TCP_CONG_ADVANCED is not set
234CONFIG_TCP_CONG_BIC=y
235# CONFIG_IPV6 is not set
236# CONFIG_INET6_XFRM_TUNNEL is not set
237# CONFIG_INET6_TUNNEL is not set
238# CONFIG_NETWORK_SECMARK is not set
239# CONFIG_NETFILTER is not set
240
241#
242# DCCP Configuration (EXPERIMENTAL)
243#
244# CONFIG_IP_DCCP is not set
245
246#
247# SCTP Configuration (EXPERIMENTAL)
248#
249# CONFIG_IP_SCTP is not set
250
251#
252# TIPC Configuration (EXPERIMENTAL)
253#
254# CONFIG_TIPC is not set
255# CONFIG_ATM is not set
256# CONFIG_BRIDGE is not set
257# CONFIG_VLAN_8021Q is not set
258# CONFIG_DECNET is not set
259# CONFIG_LLC2 is not set
260# CONFIG_IPX is not set
261# CONFIG_ATALK is not set
262# CONFIG_X25 is not set
263# CONFIG_LAPB is not set
264# CONFIG_ECONET is not set
265# CONFIG_WAN_ROUTER is not set
266
267#
268# QoS and/or fair queueing
269#
270# CONFIG_NET_SCHED is not set
271
272#
273# Network testing
274#
275# CONFIG_NET_PKTGEN is not set
276# CONFIG_HAMRADIO is not set
277# CONFIG_IRDA is not set
278# CONFIG_BT is not set
279# CONFIG_IEEE80211 is not set
280
281#
282# Device Drivers
283#
284
285#
286# Generic Driver Options
287#
288CONFIG_STANDALONE=y
289CONFIG_PREVENT_FIRMWARE_BUILD=y
290# CONFIG_FW_LOADER is not set
291# CONFIG_SYS_HYPERVISOR is not set
292
293#
294# Connector - unified userspace <-> kernelspace linker
295#
296# CONFIG_CONNECTOR is not set
297
298#
299# Memory Technology Devices (MTD)
300#
301# CONFIG_MTD is not set
302
303#
304# Parallel port support
305#
306# CONFIG_PARPORT is not set
307
308#
309# Plug and Play support
310#
311
312#
313# Block devices
314#
315# CONFIG_BLK_DEV_FD is not set
316# CONFIG_BLK_CPQ_DA is not set
317# CONFIG_BLK_CPQ_CISS_DA is not set
318# CONFIG_BLK_DEV_DAC960 is not set
319# CONFIG_BLK_DEV_UMEM is not set
320# CONFIG_BLK_DEV_COW_COMMON is not set
321CONFIG_BLK_DEV_LOOP=y
322# CONFIG_BLK_DEV_CRYPTOLOOP is not set
323# CONFIG_BLK_DEV_NBD is not set
324# CONFIG_BLK_DEV_SX8 is not set
325CONFIG_BLK_DEV_RAM=y
326CONFIG_BLK_DEV_RAM_COUNT=16
327CONFIG_BLK_DEV_RAM_SIZE=32768
328CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
329CONFIG_BLK_DEV_INITRD=y
330# CONFIG_CDROM_PKTCDVD is not set
331# CONFIG_ATA_OVER_ETH is not set
332
333#
334# ATA/ATAPI/MFM/RLL support
335#
336# CONFIG_IDE is not set
337
338#
339# SCSI device support
340#
341# CONFIG_RAID_ATTRS is not set
342CONFIG_SCSI=y
343CONFIG_SCSI_PROC_FS=y
344
345#
346# SCSI support type (disk, tape, CD-ROM)
347#
348# CONFIG_BLK_DEV_SD is not set
349# CONFIG_CHR_DEV_ST is not set
350# CONFIG_CHR_DEV_OSST is not set
351# CONFIG_BLK_DEV_SR is not set
352# CONFIG_CHR_DEV_SG is not set
353# CONFIG_CHR_DEV_SCH is not set
354
355#
356# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
357#
358# CONFIG_SCSI_MULTI_LUN is not set
359# CONFIG_SCSI_CONSTANTS is not set
360# CONFIG_SCSI_LOGGING is not set
361
362#
363# SCSI Transport Attributes
364#
365# CONFIG_SCSI_SPI_ATTRS is not set
366# CONFIG_SCSI_FC_ATTRS is not set
367# CONFIG_SCSI_ISCSI_ATTRS is not set
368# CONFIG_SCSI_SAS_ATTRS is not set
369
370#
371# SCSI low-level drivers
372#
373# CONFIG_ISCSI_TCP is not set
374# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
375# CONFIG_SCSI_3W_9XXX is not set
376# CONFIG_SCSI_ACARD is not set
377# CONFIG_SCSI_AACRAID is not set
378# CONFIG_SCSI_AIC7XXX is not set
379# CONFIG_SCSI_AIC7XXX_OLD is not set
380# CONFIG_SCSI_AIC79XX is not set
381# CONFIG_SCSI_DPT_I2O is not set
382# CONFIG_MEGARAID_NEWGEN is not set
383# CONFIG_MEGARAID_LEGACY is not set
384# CONFIG_MEGARAID_SAS is not set
385# CONFIG_SCSI_SATA is not set
386# CONFIG_SCSI_HPTIOP is not set
387# CONFIG_SCSI_BUSLOGIC is not set
388# CONFIG_SCSI_DMX3191D is not set
389# CONFIG_SCSI_EATA is not set
390# CONFIG_SCSI_FUTURE_DOMAIN is not set
391# CONFIG_SCSI_GDTH is not set
392# CONFIG_SCSI_IPS is not set
393# CONFIG_SCSI_INITIO is not set
394# CONFIG_SCSI_INIA100 is not set
395# CONFIG_SCSI_SYM53C8XX_2 is not set
396# CONFIG_SCSI_IPR is not set
397# CONFIG_SCSI_QLOGIC_1280 is not set
398# CONFIG_SCSI_QLA_FC is not set
399# CONFIG_SCSI_LPFC is not set
400# CONFIG_SCSI_DC395x is not set
401# CONFIG_SCSI_DC390T is not set
402# CONFIG_SCSI_NSP32 is not set
403# CONFIG_SCSI_DEBUG is not set
404
405#
406# Multi-device support (RAID and LVM)
407#
408# CONFIG_MD is not set
409
410#
411# Fusion MPT device support
412#
413# CONFIG_FUSION is not set
414# CONFIG_FUSION_SPI is not set
415# CONFIG_FUSION_FC is not set
416# CONFIG_FUSION_SAS is not set
417
418#
419# IEEE 1394 (FireWire) support
420#
421# CONFIG_IEEE1394 is not set
422
423#
424# I2O device support
425#
426# CONFIG_I2O is not set
427
428#
429# Macintosh device drivers
430#
431# CONFIG_WINDFARM is not set
432
433#
434# Network device support
435#
436CONFIG_NETDEVICES=y
437# CONFIG_DUMMY is not set
438# CONFIG_BONDING is not set
439# CONFIG_EQUALIZER is not set
440# CONFIG_TUN is not set
441
442#
443# ARCnet devices
444#
445# CONFIG_ARCNET is not set
446
447#
448# PHY device support
449#
450# CONFIG_PHYLIB is not set
451
452#
453# Ethernet (10 or 100Mbit)
454#
455CONFIG_NET_ETHERNET=y
456CONFIG_MII=y
457# CONFIG_HAPPYMEAL is not set
458# CONFIG_SUNGEM is not set
459# CONFIG_CASSINI is not set
460# CONFIG_NET_VENDOR_3COM is not set
461
462#
463# Tulip family network device support
464#
465# CONFIG_NET_TULIP is not set
466# CONFIG_HP100 is not set
467# CONFIG_NET_PCI is not set
468
469#
470# Ethernet (1000 Mbit)
471#
472# CONFIG_ACENIC is not set
473# CONFIG_DL2K is not set
474# CONFIG_E1000 is not set
475# CONFIG_NS83820 is not set
476# CONFIG_HAMACHI is not set
477# CONFIG_YELLOWFIN is not set
478# CONFIG_R8169 is not set
479# CONFIG_SIS190 is not set
480# CONFIG_SKGE is not set
481# CONFIG_SKY2 is not set
482# CONFIG_SK98LIN is not set
483# CONFIG_TIGON3 is not set
484# CONFIG_BNX2 is not set
485# CONFIG_GIANFAR is not set
486CONFIG_UCC_GETH=y
487# CONFIG_UGETH_NAPI is not set
488# CONFIG_UGETH_MAGIC_PACKET is not set
489# CONFIG_UGETH_FILTERING is not set
490# CONFIG_UGETH_TX_ON_DEMOND is not set
491
492#
493# Ethernet (10000 Mbit)
494#
495# CONFIG_CHELSIO_T1 is not set
496# CONFIG_IXGB is not set
497# CONFIG_S2IO is not set
498# CONFIG_MYRI10GE is not set
499
500#
501# Token Ring devices
502#
503# CONFIG_TR is not set
504
505#
506# Wireless LAN (non-hamradio)
507#
508# CONFIG_NET_RADIO is not set
509
510#
511# Wan interfaces
512#
513# CONFIG_WAN is not set
514# CONFIG_FDDI is not set
515# CONFIG_HIPPI is not set
516# CONFIG_PPP is not set
517# CONFIG_SLIP is not set
518# CONFIG_NET_FC is not set
519# CONFIG_SHAPER is not set
520# CONFIG_NETCONSOLE is not set
521# CONFIG_NETPOLL is not set
522# CONFIG_NET_POLL_CONTROLLER is not set
523
524#
525# ISDN subsystem
526#
527# CONFIG_ISDN is not set
528
529#
530# Telephony Support
531#
532# CONFIG_PHONE is not set
533
534#
535# Input device support
536#
537CONFIG_INPUT=y
538
539#
540# Userland interfaces
541#
542# CONFIG_INPUT_MOUSEDEV is not set
543# CONFIG_INPUT_JOYDEV is not set
544# CONFIG_INPUT_TSDEV is not set
545# CONFIG_INPUT_EVDEV is not set
546# CONFIG_INPUT_EVBUG is not set
547
548#
549# Input Device Drivers
550#
551# CONFIG_INPUT_KEYBOARD is not set
552# CONFIG_INPUT_MOUSE is not set
553# CONFIG_INPUT_JOYSTICK is not set
554# CONFIG_INPUT_TOUCHSCREEN is not set
555# CONFIG_INPUT_MISC is not set
556
557#
558# Hardware I/O ports
559#
560# CONFIG_SERIO is not set
561# CONFIG_GAMEPORT is not set
562
563#
564# Character devices
565#
566# CONFIG_VT is not set
567# CONFIG_SERIAL_NONSTANDARD is not set
568
569#
570# Serial drivers
571#
572CONFIG_SERIAL_8250=y
573CONFIG_SERIAL_8250_CONSOLE=y
574CONFIG_SERIAL_8250_PCI=y
575CONFIG_SERIAL_8250_NR_UARTS=4
576CONFIG_SERIAL_8250_RUNTIME_UARTS=4
577# CONFIG_SERIAL_8250_EXTENDED is not set
578
579#
580# Non-8250 serial port support
581#
582CONFIG_SERIAL_CORE=y
583CONFIG_SERIAL_CORE_CONSOLE=y
584# CONFIG_SERIAL_JSM is not set
585CONFIG_UNIX98_PTYS=y
586CONFIG_LEGACY_PTYS=y
587CONFIG_LEGACY_PTY_COUNT=256
588
589#
590# IPMI
591#
592# CONFIG_IPMI_HANDLER is not set
593
594#
595# Watchdog Cards
596#
597CONFIG_WATCHDOG=y
598# CONFIG_WATCHDOG_NOWAYOUT is not set
599
600#
601# Watchdog Device Drivers
602#
603# CONFIG_SOFT_WATCHDOG is not set
604CONFIG_83xx_WDT=y
605
606#
607# PCI-based Watchdog Cards
608#
609# CONFIG_PCIPCWATCHDOG is not set
610# CONFIG_WDTPCI is not set
611CONFIG_HW_RANDOM=y
612# CONFIG_NVRAM is not set
613CONFIG_GEN_RTC=y
614# CONFIG_GEN_RTC_X is not set
615# CONFIG_DTLK is not set
616# CONFIG_R3964 is not set
617# CONFIG_APPLICOM is not set
618
619#
620# Ftape, the floppy tape device driver
621#
622# CONFIG_AGP is not set
623# CONFIG_DRM is not set
624# CONFIG_RAW_DRIVER is not set
625
626#
627# TPM devices
628#
629# CONFIG_TCG_TPM is not set
630# CONFIG_TELCLOCK is not set
631
632#
633# I2C support
634#
635CONFIG_I2C=y
636CONFIG_I2C_CHARDEV=y
637
638#
639# I2C Algorithms
640#
641# CONFIG_I2C_ALGOBIT is not set
642# CONFIG_I2C_ALGOPCF is not set
643# CONFIG_I2C_ALGOPCA is not set
644
645#
646# I2C Hardware Bus support
647#
648# CONFIG_I2C_ALI1535 is not set
649# CONFIG_I2C_ALI1563 is not set
650# CONFIG_I2C_ALI15X3 is not set
651# CONFIG_I2C_AMD756 is not set
652# CONFIG_I2C_AMD8111 is not set
653# CONFIG_I2C_I801 is not set
654# CONFIG_I2C_I810 is not set
655# CONFIG_I2C_PIIX4 is not set
656CONFIG_I2C_MPC=y
657# CONFIG_I2C_NFORCE2 is not set
658# CONFIG_I2C_OCORES is not set
659# CONFIG_I2C_PARPORT_LIGHT is not set
660# CONFIG_I2C_PROSAVAGE is not set
661# CONFIG_I2C_SAVAGE4 is not set
662# CONFIG_I2C_SIS5595 is not set
663# CONFIG_I2C_SIS630 is not set
664# CONFIG_I2C_SIS96X is not set
665# CONFIG_I2C_STUB is not set
666# CONFIG_I2C_VIA is not set
667# CONFIG_I2C_VIAPRO is not set
668# CONFIG_I2C_VOODOO3 is not set
669# CONFIG_I2C_PCA_ISA is not set
670
671#
672# Miscellaneous I2C Chip support
673#
674# CONFIG_SENSORS_DS1337 is not set
675# CONFIG_SENSORS_DS1374 is not set
676# CONFIG_SENSORS_EEPROM is not set
677# CONFIG_SENSORS_PCF8574 is not set
678# CONFIG_SENSORS_PCA9539 is not set
679# CONFIG_SENSORS_PCF8591 is not set
680# CONFIG_SENSORS_M41T00 is not set
681# CONFIG_SENSORS_MAX6875 is not set
682# CONFIG_I2C_DEBUG_CORE is not set
683# CONFIG_I2C_DEBUG_ALGO is not set
684# CONFIG_I2C_DEBUG_BUS is not set
685# CONFIG_I2C_DEBUG_CHIP is not set
686
687#
688# SPI support
689#
690# CONFIG_SPI is not set
691# CONFIG_SPI_MASTER is not set
692
693#
694# Dallas's 1-wire bus
695#
696
697#
698# Hardware Monitoring support
699#
700CONFIG_HWMON=y
701# CONFIG_HWMON_VID is not set
702# CONFIG_SENSORS_ABITUGURU is not set
703# CONFIG_SENSORS_ADM1021 is not set
704# CONFIG_SENSORS_ADM1025 is not set
705# CONFIG_SENSORS_ADM1026 is not set
706# CONFIG_SENSORS_ADM1031 is not set
707# CONFIG_SENSORS_ADM9240 is not set
708# CONFIG_SENSORS_ASB100 is not set
709# CONFIG_SENSORS_ATXP1 is not set
710# CONFIG_SENSORS_DS1621 is not set
711# CONFIG_SENSORS_F71805F is not set
712# CONFIG_SENSORS_FSCHER is not set
713# CONFIG_SENSORS_FSCPOS is not set
714# CONFIG_SENSORS_GL518SM is not set
715# CONFIG_SENSORS_GL520SM is not set
716# CONFIG_SENSORS_IT87 is not set
717# CONFIG_SENSORS_LM63 is not set
718# CONFIG_SENSORS_LM75 is not set
719# CONFIG_SENSORS_LM77 is not set
720# CONFIG_SENSORS_LM78 is not set
721# CONFIG_SENSORS_LM80 is not set
722# CONFIG_SENSORS_LM83 is not set
723# CONFIG_SENSORS_LM85 is not set
724# CONFIG_SENSORS_LM87 is not set
725# CONFIG_SENSORS_LM90 is not set
726# CONFIG_SENSORS_LM92 is not set
727# CONFIG_SENSORS_MAX1619 is not set
728# CONFIG_SENSORS_PC87360 is not set
729# CONFIG_SENSORS_SIS5595 is not set
730# CONFIG_SENSORS_SMSC47M1 is not set
731# CONFIG_SENSORS_SMSC47M192 is not set
732# CONFIG_SENSORS_SMSC47B397 is not set
733# CONFIG_SENSORS_VIA686A is not set
734# CONFIG_SENSORS_VT8231 is not set
735# CONFIG_SENSORS_W83781D is not set
736# CONFIG_SENSORS_W83791D is not set
737# CONFIG_SENSORS_W83792D is not set
738# CONFIG_SENSORS_W83L785TS is not set
739# CONFIG_SENSORS_W83627HF is not set
740# CONFIG_SENSORS_W83627EHF is not set
741# CONFIG_HWMON_DEBUG_CHIP is not set
742
743#
744# Misc devices
745#
746
747#
748# Multimedia devices
749#
750# CONFIG_VIDEO_DEV is not set
751CONFIG_VIDEO_V4L2=y
752
753#
754# Digital Video Broadcasting Devices
755#
756# CONFIG_DVB is not set
757
758#
759# Graphics support
760#
761CONFIG_FIRMWARE_EDID=y
762# CONFIG_FB is not set
763# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
764
765#
766# Sound
767#
768# CONFIG_SOUND is not set
769
770#
771# USB support
772#
773CONFIG_USB_ARCH_HAS_HCD=y
774CONFIG_USB_ARCH_HAS_OHCI=y
775CONFIG_USB_ARCH_HAS_EHCI=y
776# CONFIG_USB is not set
777
778#
779# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
780#
781
782#
783# USB Gadget Support
784#
785# CONFIG_USB_GADGET is not set
786
787#
788# MMC/SD Card support
789#
790# CONFIG_MMC is not set
791
792#
793# LED devices
794#
795# CONFIG_NEW_LEDS is not set
796
797#
798# LED drivers
799#
800
801#
802# LED Triggers
803#
804
805#
806# InfiniBand support
807#
808# CONFIG_INFINIBAND is not set
809
810#
811# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
812#
813
814#
815# Real Time Clock
816#
817# CONFIG_RTC_CLASS is not set
818
819#
820# DMA Engine support
821#
822# CONFIG_DMA_ENGINE is not set
823
824#
825# DMA Clients
826#
827
828#
829# DMA Devices
830#
831
832#
833# File systems
834#
835CONFIG_EXT2_FS=y
836# CONFIG_EXT2_FS_XATTR is not set
837# CONFIG_EXT2_FS_XIP is not set
838CONFIG_EXT3_FS=y
839CONFIG_EXT3_FS_XATTR=y
840# CONFIG_EXT3_FS_POSIX_ACL is not set
841# CONFIG_EXT3_FS_SECURITY is not set
842CONFIG_JBD=y
843# CONFIG_JBD_DEBUG is not set
844CONFIG_FS_MBCACHE=y
845# CONFIG_REISERFS_FS is not set
846# CONFIG_JFS_FS is not set
847# CONFIG_FS_POSIX_ACL is not set
848# CONFIG_XFS_FS is not set
849# CONFIG_OCFS2_FS is not set
850# CONFIG_MINIX_FS is not set
851# CONFIG_ROMFS_FS is not set
852CONFIG_INOTIFY=y
853CONFIG_INOTIFY_USER=y
854# CONFIG_QUOTA is not set
855CONFIG_DNOTIFY=y
856# CONFIG_AUTOFS_FS is not set
857# CONFIG_AUTOFS4_FS is not set
858# CONFIG_FUSE_FS is not set
859
860#
861# CD-ROM/DVD Filesystems
862#
863# CONFIG_ISO9660_FS is not set
864# CONFIG_UDF_FS is not set
865
866#
867# DOS/FAT/NT Filesystems
868#
869# CONFIG_MSDOS_FS is not set
870# CONFIG_VFAT_FS is not set
871# CONFIG_NTFS_FS is not set
872
873#
874# Pseudo filesystems
875#
876CONFIG_PROC_FS=y
877CONFIG_PROC_KCORE=y
878CONFIG_SYSFS=y
879CONFIG_TMPFS=y
880# CONFIG_HUGETLB_PAGE is not set
881CONFIG_RAMFS=y
882# CONFIG_CONFIGFS_FS is not set
883
884#
885# Miscellaneous filesystems
886#
887# CONFIG_ADFS_FS is not set
888# CONFIG_AFFS_FS is not set
889# CONFIG_HFS_FS is not set
890# CONFIG_HFSPLUS_FS is not set
891# CONFIG_BEFS_FS is not set
892# CONFIG_BFS_FS is not set
893# CONFIG_EFS_FS is not set
894# CONFIG_CRAMFS is not set
895# CONFIG_VXFS_FS is not set
896# CONFIG_HPFS_FS is not set
897# CONFIG_QNX4FS_FS is not set
898# CONFIG_SYSV_FS is not set
899# CONFIG_UFS_FS is not set
900
901#
902# Network File Systems
903#
904CONFIG_NFS_FS=y
905CONFIG_NFS_V3=y
906# CONFIG_NFS_V3_ACL is not set
907CONFIG_NFS_V4=y
908# CONFIG_NFS_DIRECTIO is not set
909# CONFIG_NFSD is not set
910CONFIG_ROOT_NFS=y
911CONFIG_LOCKD=y
912CONFIG_LOCKD_V4=y
913CONFIG_NFS_COMMON=y
914CONFIG_SUNRPC=y
915CONFIG_SUNRPC_GSS=y
916CONFIG_RPCSEC_GSS_KRB5=y
917# CONFIG_RPCSEC_GSS_SPKM3 is not set
918# CONFIG_SMB_FS is not set
919# CONFIG_CIFS is not set
920# CONFIG_NCP_FS is not set
921# CONFIG_CODA_FS is not set
922# CONFIG_AFS_FS is not set
923# CONFIG_9P_FS is not set
924
925#
926# Partition Types
927#
928CONFIG_PARTITION_ADVANCED=y
929# CONFIG_ACORN_PARTITION is not set
930# CONFIG_OSF_PARTITION is not set
931# CONFIG_AMIGA_PARTITION is not set
932# CONFIG_ATARI_PARTITION is not set
933# CONFIG_MAC_PARTITION is not set
934# CONFIG_MSDOS_PARTITION is not set
935# CONFIG_LDM_PARTITION is not set
936# CONFIG_SGI_PARTITION is not set
937# CONFIG_ULTRIX_PARTITION is not set
938# CONFIG_SUN_PARTITION is not set
939# CONFIG_KARMA_PARTITION is not set
940# CONFIG_EFI_PARTITION is not set
941
942#
943# Native Language Support
944#
945# CONFIG_NLS is not set
946
947#
948# QE Options
949#
950# CONFIG_UCC_SLOW is not set
951CONFIG_UCC_FAST=y
952CONFIG_UCC=y
953
954#
955# Library routines
956#
957# CONFIG_CRC_CCITT is not set
958# CONFIG_CRC16 is not set
959CONFIG_CRC32=y
960# CONFIG_LIBCRC32C is not set
961CONFIG_PLIST=y
962
963#
964# Instrumentation Support
965#
966# CONFIG_PROFILING is not set
967
968#
969# Kernel hacking
970#
971# CONFIG_PRINTK_TIME is not set
972# CONFIG_MAGIC_SYSRQ is not set
973# CONFIG_UNUSED_SYMBOLS is not set
974# CONFIG_DEBUG_KERNEL is not set
975CONFIG_LOG_BUF_SHIFT=14
976# CONFIG_DEBUG_FS is not set
977# CONFIG_BOOTX_TEXT is not set
978# CONFIG_SERIAL_TEXT_DEBUG is not set
979# CONFIG_PPC_EARLY_DEBUG is not set
980
981#
982# Security options
983#
984# CONFIG_KEYS is not set
985# CONFIG_SECURITY is not set
986
987#
988# Cryptographic options
989#
990CONFIG_CRYPTO=y
991# CONFIG_CRYPTO_HMAC is not set
992# CONFIG_CRYPTO_NULL is not set
993# CONFIG_CRYPTO_MD4 is not set
994CONFIG_CRYPTO_MD5=y
995# CONFIG_CRYPTO_SHA1 is not set
996# CONFIG_CRYPTO_SHA256 is not set
997# CONFIG_CRYPTO_SHA512 is not set
998# CONFIG_CRYPTO_WP512 is not set
999# CONFIG_CRYPTO_TGR192 is not set
1000CONFIG_CRYPTO_DES=y
1001# CONFIG_CRYPTO_BLOWFISH is not set
1002# CONFIG_CRYPTO_TWOFISH is not set
1003# CONFIG_CRYPTO_SERPENT is not set
1004# CONFIG_CRYPTO_AES is not set
1005# CONFIG_CRYPTO_CAST5 is not set
1006# CONFIG_CRYPTO_CAST6 is not set
1007# CONFIG_CRYPTO_TEA is not set
1008# CONFIG_CRYPTO_ARC4 is not set
1009# CONFIG_CRYPTO_KHAZAD is not set
1010# CONFIG_CRYPTO_ANUBIS is not set
1011# CONFIG_CRYPTO_DEFLATE is not set
1012# CONFIG_CRYPTO_MICHAEL_MIC is not set
1013# CONFIG_CRYPTO_CRC32C is not set
1014# CONFIG_CRYPTO_TEST is not set
1015
1016#
1017# Hardware crypto devices
1018#
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 190a57e20765..47a613cdd775 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -763,10 +763,10 @@ struct cpu_spec cpu_specs[] = {
763 .cpu_setup = __setup_cpu_603, 763 .cpu_setup = __setup_cpu_603,
764 .platform = "ppc603", 764 .platform = "ppc603",
765 }, 765 },
766 { /* e300 (a 603e core, plus some) on 83xx */ 766 { /* e300c1 (a 603e core, plus some) on 83xx */
767 .pvr_mask = 0x7fff0000, 767 .pvr_mask = 0x7fff0000,
768 .pvr_value = 0x00830000, 768 .pvr_value = 0x00830000,
769 .cpu_name = "e300", 769 .cpu_name = "e300c1",
770 .cpu_features = CPU_FTRS_E300, 770 .cpu_features = CPU_FTRS_E300,
771 .cpu_user_features = COMMON_USER, 771 .cpu_user_features = COMMON_USER,
772 .icache_bsize = 32, 772 .icache_bsize = 32,
@@ -774,6 +774,17 @@ struct cpu_spec cpu_specs[] = {
774 .cpu_setup = __setup_cpu_603, 774 .cpu_setup = __setup_cpu_603,
775 .platform = "ppc603", 775 .platform = "ppc603",
776 }, 776 },
777 { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
778 .pvr_mask = 0x7fff0000,
779 .pvr_value = 0x00840000,
780 .cpu_name = "e300c2",
781 .cpu_features = CPU_FTRS_E300,
782 .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
783 .icache_bsize = 32,
784 .dcache_bsize = 32,
785 .cpu_setup = __setup_cpu_603,
786 .platform = "ppc603",
787 },
777 { /* default match, we assume split I/D cache & TB (non-601)... */ 788 { /* default match, we assume split I/D cache & TB (non-601)... */
778 .pvr_mask = 0x00000000, 789 .pvr_mask = 0x00000000,
779 .pvr_value = 0x00000000, 790 .pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2cd872b5283b..748e74fcf541 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -27,10 +27,7 @@
27#include <asm/ppc_asm.h> 27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/cputable.h> 29#include <asm/cputable.h>
30 30#include <asm/firmware.h>
31#ifdef CONFIG_PPC_ISERIES
32#define DO_SOFT_DISABLE
33#endif
34 31
35/* 32/*
36 * System calls. 33 * System calls.
@@ -91,6 +88,7 @@ system_call_common:
91 ld r11,exception_marker@toc(r2) 88 ld r11,exception_marker@toc(r2)
92 std r11,-16(r9) /* "regshere" marker */ 89 std r11,-16(r9) /* "regshere" marker */
93#ifdef CONFIG_PPC_ISERIES 90#ifdef CONFIG_PPC_ISERIES
91BEGIN_FW_FTR_SECTION
94 /* Hack for handling interrupts when soft-enabling on iSeries */ 92 /* Hack for handling interrupts when soft-enabling on iSeries */
95 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ 93 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
96 andi. r10,r12,MSR_PR /* from kernel */ 94 andi. r10,r12,MSR_PR /* from kernel */
@@ -98,6 +96,7 @@ system_call_common:
98 beq hardware_interrupt_entry 96 beq hardware_interrupt_entry
99 lbz r10,PACAPROCENABLED(r13) 97 lbz r10,PACAPROCENABLED(r13)
100 std r10,SOFTE(r1) 98 std r10,SOFTE(r1)
99END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
101#endif 100#endif
102 mfmsr r11 101 mfmsr r11
103 ori r11,r11,MSR_EE 102 ori r11,r11,MSR_EE
@@ -462,6 +461,7 @@ _GLOBAL(ret_from_except_lite)
462 461
463restore: 462restore:
464#ifdef CONFIG_PPC_ISERIES 463#ifdef CONFIG_PPC_ISERIES
464BEGIN_FW_FTR_SECTION
465 ld r5,SOFTE(r1) 465 ld r5,SOFTE(r1)
466 cmpdi 0,r5,0 466 cmpdi 0,r5,0
467 beq 4f 467 beq 4f
@@ -480,6 +480,7 @@ restore:
480 b .ret_from_except_lite /* loop back and handle more */ 480 b .ret_from_except_lite /* loop back and handle more */
481 481
4824: stb r5,PACAPROCENABLED(r13) 4824: stb r5,PACAPROCENABLED(r13)
483END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
483#endif 484#endif
484 485
485 ld r3,_MSR(r1) 486 ld r3,_MSR(r1)
@@ -538,18 +539,23 @@ do_work:
538 lwz r8,TI_PREEMPT(r9) 539 lwz r8,TI_PREEMPT(r9)
539 cmpwi cr1,r8,0 540 cmpwi cr1,r8,0
540#ifdef CONFIG_PPC_ISERIES 541#ifdef CONFIG_PPC_ISERIES
542BEGIN_FW_FTR_SECTION
541 ld r0,SOFTE(r1) 543 ld r0,SOFTE(r1)
542 cmpdi r0,0 544 cmpdi r0,0
543#else 545END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
544 andi. r0,r3,MSR_EE
545#endif 546#endif
547BEGIN_FW_FTR_SECTION
548 andi. r0,r3,MSR_EE
549END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
546 crandc eq,cr1*4+eq,eq 550 crandc eq,cr1*4+eq,eq
547 bne restore 551 bne restore
548 /* here we are preempting the current task */ 552 /* here we are preempting the current task */
5491: 5531:
550#ifdef CONFIG_PPC_ISERIES 554#ifdef CONFIG_PPC_ISERIES
555BEGIN_FW_FTR_SECTION
551 li r0,1 556 li r0,1
552 stb r0,PACAPROCENABLED(r13) 557 stb r0,PACAPROCENABLED(r13)
558END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
553#endif 559#endif
554 ori r10,r10,MSR_EE 560 ori r10,r10,MSR_EE
555 mtmsrd r10,1 /* reenable interrupts */ 561 mtmsrd r10,1 /* reenable interrupts */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 3065b472b95d..645c7f10fb28 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -33,6 +33,7 @@
33#include <asm/hvcall.h> 33#include <asm/hvcall.h>
34#include <asm/iseries/lpar_map.h> 34#include <asm/iseries/lpar_map.h>
35#include <asm/thread_info.h> 35#include <asm/thread_info.h>
36#include <asm/firmware.h>
36 37
37#ifdef CONFIG_PPC_ISERIES 38#ifdef CONFIG_PPC_ISERIES
38#define DO_SOFT_DISABLE 39#define DO_SOFT_DISABLE
@@ -365,19 +366,28 @@ label##_iSeries: \
365 366
366#ifdef DO_SOFT_DISABLE 367#ifdef DO_SOFT_DISABLE
367#define DISABLE_INTS \ 368#define DISABLE_INTS \
369BEGIN_FW_FTR_SECTION; \
368 lbz r10,PACAPROCENABLED(r13); \ 370 lbz r10,PACAPROCENABLED(r13); \
369 li r11,0; \ 371 li r11,0; \
370 std r10,SOFTE(r1); \ 372 std r10,SOFTE(r1); \
371 mfmsr r10; \ 373 mfmsr r10; \
372 stb r11,PACAPROCENABLED(r13); \ 374 stb r11,PACAPROCENABLED(r13); \
373 ori r10,r10,MSR_EE; \ 375 ori r10,r10,MSR_EE; \
374 mtmsrd r10,1 376 mtmsrd r10,1; \
377END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
375 378
376#define ENABLE_INTS \ 379#define ENABLE_INTS \
380BEGIN_FW_FTR_SECTION; \
377 lbz r10,PACAPROCENABLED(r13); \ 381 lbz r10,PACAPROCENABLED(r13); \
378 mfmsr r11; \ 382 mfmsr r11; \
379 std r10,SOFTE(r1); \ 383 std r10,SOFTE(r1); \
380 ori r11,r11,MSR_EE; \ 384 ori r11,r11,MSR_EE; \
385END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES); \
386BEGIN_FW_FTR_SECTION; \
387 ld r12,_MSR(r1); \
388 mfmsr r11; \
389 rlwimi r11,r12,0,MSR_EE; \
390END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \
381 mtmsrd r11,1 391 mtmsrd r11,1
382 392
383#else /* hard enable/disable interrupts */ 393#else /* hard enable/disable interrupts */
@@ -1071,8 +1081,10 @@ _GLOBAL(slb_miss_realmode)
1071 ld r3,PACA_EXSLB+EX_R3(r13) 1081 ld r3,PACA_EXSLB+EX_R3(r13)
1072 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 1082 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1073#ifdef CONFIG_PPC_ISERIES 1083#ifdef CONFIG_PPC_ISERIES
1084BEGIN_FW_FTR_SECTION
1074 ld r11,PACALPPACAPTR(r13) 1085 ld r11,PACALPPACAPTR(r13)
1075 ld r11,LPPACASRR0(r11) /* get SRR0 value */ 1086 ld r11,LPPACASRR0(r11) /* get SRR0 value */
1087END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1076#endif /* CONFIG_PPC_ISERIES */ 1088#endif /* CONFIG_PPC_ISERIES */
1077 1089
1078 mtlr r10 1090 mtlr r10
@@ -1087,8 +1099,10 @@ _GLOBAL(slb_miss_realmode)
1087.machine pop 1099.machine pop
1088 1100
1089#ifdef CONFIG_PPC_ISERIES 1101#ifdef CONFIG_PPC_ISERIES
1102BEGIN_FW_FTR_SECTION
1090 mtspr SPRN_SRR0,r11 1103 mtspr SPRN_SRR0,r11
1091 mtspr SPRN_SRR1,r12 1104 mtspr SPRN_SRR1,r12
1105END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1092#endif /* CONFIG_PPC_ISERIES */ 1106#endif /* CONFIG_PPC_ISERIES */
1093 ld r9,PACA_EXSLB+EX_R9(r13) 1107 ld r9,PACA_EXSLB+EX_R9(r13)
1094 ld r10,PACA_EXSLB+EX_R10(r13) 1108 ld r10,PACA_EXSLB+EX_R10(r13)
@@ -1301,6 +1315,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1301 cmpdi r3,0 /* see if hash_page succeeded */ 1315 cmpdi r3,0 /* see if hash_page succeeded */
1302 1316
1303#ifdef DO_SOFT_DISABLE 1317#ifdef DO_SOFT_DISABLE
1318BEGIN_FW_FTR_SECTION
1304 /* 1319 /*
1305 * If we had interrupts soft-enabled at the point where the 1320 * If we had interrupts soft-enabled at the point where the
1306 * DSI/ISI occurred, and an interrupt came in during hash_page, 1321 * DSI/ISI occurred, and an interrupt came in during hash_page,
@@ -1321,12 +1336,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
1321 ld r3,SOFTE(r1) 1336 ld r3,SOFTE(r1)
1322 bl .local_irq_restore 1337 bl .local_irq_restore
1323 b 11f 1338 b 11f
1324#else 1339END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1340#endif
1341BEGIN_FW_FTR_SECTION
1325 beq fast_exception_return /* Return from exception on success */ 1342 beq fast_exception_return /* Return from exception on success */
1326 ble- 12f /* Failure return from hash_page */ 1343 ble- 12f /* Failure return from hash_page */
1327 1344
1328 /* fall through */ 1345 /* fall through */
1329#endif 1346END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
1330 1347
1331/* Here we have a page fault that hash_page can't handle. */ 1348/* Here we have a page fault that hash_page can't handle. */
1332_GLOBAL(handle_page_fault) 1349_GLOBAL(handle_page_fault)
@@ -1861,7 +1878,9 @@ _GLOBAL(__secondary_start)
1861 LOAD_REG_ADDR(r3, .start_secondary_prolog) 1878 LOAD_REG_ADDR(r3, .start_secondary_prolog)
1862 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 1879 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
1863#ifdef DO_SOFT_DISABLE 1880#ifdef DO_SOFT_DISABLE
1881BEGIN_FW_FTR_SECTION
1864 ori r4,r4,MSR_EE 1882 ori r4,r4,MSR_EE
1883END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
1865#endif 1884#endif
1866 mtspr SPRN_SRR0,r3 1885 mtspr SPRN_SRR0,r3
1867 mtspr SPRN_SRR1,r4 1886 mtspr SPRN_SRR1,r4
@@ -1986,6 +2005,7 @@ _STATIC(start_here_common)
1986 */ 2005 */
1987 li r3,0 2006 li r3,0
1988 bl .do_cpu_ftr_fixups 2007 bl .do_cpu_ftr_fixups
2008 bl .do_fw_ftr_fixups
1989 2009
1990 /* ptr to current */ 2010 /* ptr to current */
1991 LOAD_REG_IMMEDIATE(r4, init_task) 2011 LOAD_REG_IMMEDIATE(r4, init_task)
@@ -2000,11 +2020,13 @@ _STATIC(start_here_common)
2000 /* Load up the kernel context */ 2020 /* Load up the kernel context */
20015: 20215:
2002#ifdef DO_SOFT_DISABLE 2022#ifdef DO_SOFT_DISABLE
2023BEGIN_FW_FTR_SECTION
2003 li r5,0 2024 li r5,0
2004 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ 2025 stb r5,PACAPROCENABLED(r13) /* Soft Disabled */
2005 mfmsr r5 2026 mfmsr r5
2006 ori r5,r5,MSR_EE /* Hard Enabled */ 2027 ori r5,r5,MSR_EE /* Hard Enabled */
2007 mtmsrd r5 2028 mtmsrd r5
2029END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
2008#endif 2030#endif
2009 2031
2010 bl .start_kernel 2032 bl .start_kernel
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 9c54eccad993..41521b30c3cd 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -325,6 +325,52 @@ _GLOBAL(do_cpu_ftr_fixups)
325 isync 325 isync
326 b 1b 326 b 1b
327 327
328/*
329 * do_fw_ftr_fixups - goes through the list of firmware feature fixups
330 * and writes nop's over sections of code that don't apply for this firmware.
331 * r3 = data offset (not changed)
332 */
333_GLOBAL(do_fw_ftr_fixups)
334 /* Get firmware features */
335 LOAD_REG_IMMEDIATE(r6,powerpc_firmware_features)
336 sub r6,r6,r3
337 ld r4,0(r6)
338 /* Get the fixup table */
339 LOAD_REG_IMMEDIATE(r6,__start___fw_ftr_fixup)
340 sub r6,r6,r3
341 LOAD_REG_IMMEDIATE(r7,__stop___fw_ftr_fixup)
342 sub r7,r7,r3
343 /* Do the fixup */
3441: cmpld r6,r7
345 bgelr
346 addi r6,r6,32
347 ld r8,-32(r6) /* mask */
348 and r8,r8,r4
349 ld r9,-24(r6) /* value */
350 cmpld r8,r9
351 beq 1b
352 ld r8,-16(r6) /* section begin */
353 ld r9,-8(r6) /* section end */
354 subf. r9,r8,r9
355 beq 1b
356 /* write nops over the section of code */
357 /* todo: if large section, add a branch at the start of it */
358 srwi r9,r9,2
359 mtctr r9
360 sub r8,r8,r3
361 lis r0,0x60000000@h /* nop */
3623: stw r0,0(r8)
363BEGIN_FTR_SECTION
364 dcbst 0,r8 /* suboptimal, but simpler */
365 sync
366 icbi 0,r8
367END_FTR_SECTION_IFSET(CPU_FTR_SPLIT_ID_CACHE)
368 addi r8,r8,4
369 bdnz 3b
370 sync /* additional sync needed on g4 */
371 isync
372 b 1b
373
328#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 374#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
329/* 375/*
330 * Do an IO access in real mode 376 * Do an IO access in real mode
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index c1b1e14775e4..78d3c0fc8dfb 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -30,6 +30,7 @@
30#include <asm/byteorder.h> 30#include <asm/byteorder.h>
31#include <asm/machdep.h> 31#include <asm/machdep.h>
32#include <asm/ppc-pci.h> 32#include <asm/ppc-pci.h>
33#include <asm/firmware.h>
33 34
34#ifdef DEBUG 35#ifdef DEBUG
35#include <asm/udbg.h> 36#include <asm/udbg.h>
@@ -209,7 +210,6 @@ void pcibios_free_controller(struct pci_controller *phb)
209 kfree(phb); 210 kfree(phb);
210} 211}
211 212
212#ifndef CONFIG_PPC_ISERIES
213void __devinit pcibios_claim_one_bus(struct pci_bus *b) 213void __devinit pcibios_claim_one_bus(struct pci_bus *b)
214{ 214{
215 struct pci_dev *dev; 215 struct pci_dev *dev;
@@ -238,10 +238,12 @@ static void __init pcibios_claim_of_setup(void)
238{ 238{
239 struct pci_bus *b; 239 struct pci_bus *b;
240 240
241 if (firmware_has_feature(FW_FEATURE_ISERIES))
242 return;
243
241 list_for_each_entry(b, &pci_root_buses, node) 244 list_for_each_entry(b, &pci_root_buses, node)
242 pcibios_claim_one_bus(b); 245 pcibios_claim_one_bus(b);
243} 246}
244#endif
245 247
246#ifdef CONFIG_PPC_MULTIPLATFORM 248#ifdef CONFIG_PPC_MULTIPLATFORM
247static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 249static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
@@ -554,9 +556,8 @@ static int __init pcibios_init(void)
554 */ 556 */
555 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 557 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
556 558
557#ifdef CONFIG_PPC_ISERIES 559 if (firmware_has_feature(FW_FEATURE_ISERIES))
558 iSeries_pcibios_init(); 560 iSeries_pcibios_init();
559#endif
560 561
561 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 562 printk(KERN_DEBUG "PCI: Probing PCI hardware\n");
562 563
@@ -566,15 +567,15 @@ static int __init pcibios_init(void)
566 pci_bus_add_devices(hose->bus); 567 pci_bus_add_devices(hose->bus);
567 } 568 }
568 569
569#ifndef CONFIG_PPC_ISERIES 570 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
570 if (pci_probe_only) 571 if (pci_probe_only)
571 pcibios_claim_of_setup(); 572 pcibios_claim_of_setup();
572 else 573 else
573 /* FIXME: `else' will be removed when 574 /* FIXME: `else' will be removed when
574 pci_assign_unassigned_resources() is able to work 575 pci_assign_unassigned_resources() is able to work
575 correctly with [partially] allocated PCI tree. */ 576 correctly with [partially] allocated PCI tree. */
576 pci_assign_unassigned_resources(); 577 pci_assign_unassigned_resources();
577#endif /* !CONFIG_PPC_ISERIES */ 578 }
578 579
579 /* Call machine dependent final fixup */ 580 /* Call machine dependent final fixup */
580 if (ppc_md.pcibios_fixup) 581 if (ppc_md.pcibios_fixup)
@@ -586,8 +587,9 @@ static int __init pcibios_init(void)
586 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); 587 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
587 588
588#ifdef CONFIG_PPC_MULTIPLATFORM 589#ifdef CONFIG_PPC_MULTIPLATFORM
589 /* map in PCI I/O space */ 590 if (!firmware_has_feature(FW_FEATURE_ISERIES))
590 phbs_remap_io(); 591 /* map in PCI I/O space */
592 phbs_remap_io();
591#endif 593#endif
592 594
593 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 595 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n");
@@ -637,13 +639,13 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
637 */ 639 */
638int pci_domain_nr(struct pci_bus *bus) 640int pci_domain_nr(struct pci_bus *bus)
639{ 641{
640#ifdef CONFIG_PPC_ISERIES 642 if (firmware_has_feature(FW_FEATURE_ISERIES))
641 return 0; 643 return 0;
642#else 644 else {
643 struct pci_controller *hose = pci_bus_to_host(bus); 645 struct pci_controller *hose = pci_bus_to_host(bus);
644 646
645 return hose->global_number; 647 return hose->global_number;
646#endif 648 }
647} 649}
648 650
649EXPORT_SYMBOL(pci_domain_nr); 651EXPORT_SYMBOL(pci_domain_nr);
@@ -651,12 +653,12 @@ EXPORT_SYMBOL(pci_domain_nr);
651/* Decide whether to display the domain number in /proc */ 653/* Decide whether to display the domain number in /proc */
652int pci_proc_domain(struct pci_bus *bus) 654int pci_proc_domain(struct pci_bus *bus)
653{ 655{
654#ifdef CONFIG_PPC_ISERIES 656 if (firmware_has_feature(FW_FEATURE_ISERIES))
655 return 0; 657 return 0;
656#else 658 else {
657 struct pci_controller *hose = pci_bus_to_host(bus); 659 struct pci_controller *hose = pci_bus_to_host(bus);
658 return hose->buid; 660 return hose->buid;
659#endif 661 }
660} 662}
661 663
662/* 664/*
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 0af3fc1bdcc9..89cfaf49d3de 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -442,31 +442,6 @@ void __init smp_setup_cpu_maps(void)
442} 442}
443#endif /* CONFIG_SMP */ 443#endif /* CONFIG_SMP */
444 444
445int __initdata do_early_xmon;
446#ifdef CONFIG_XMON
447extern int xmon_no_auto_backtrace;
448
449static int __init early_xmon(char *p)
450{
451 /* ensure xmon is enabled */
452 if (p) {
453 if (strncmp(p, "on", 2) == 0)
454 xmon_init(1);
455 if (strncmp(p, "off", 3) == 0)
456 xmon_init(0);
457 if (strncmp(p, "nobt", 4) == 0)
458 xmon_no_auto_backtrace = 1;
459 if (strncmp(p, "early", 5) != 0)
460 return 0;
461 }
462 xmon_init(1);
463 do_early_xmon = 1;
464
465 return 0;
466}
467early_param("xmon", early_xmon);
468#endif
469
470static __init int add_pcspkr(void) 445static __init int add_pcspkr(void)
471{ 446{
472 struct device_node *np; 447 struct device_node *np;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 79a17795d17b..191d0ab09222 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -238,12 +238,11 @@ void __init setup_arch(char **cmdline_p)
238 238
239 smp_setup_cpu_maps(); 239 smp_setup_cpu_maps();
240 240
241#ifdef CONFIG_XMON_DEFAULT
242 xmon_init(1);
243#endif
244 /* Register early console */ 241 /* Register early console */
245 register_early_udbg_console(); 242 register_early_udbg_console();
246 243
244 xmon_setup();
245
247#if defined(CONFIG_KGDB) 246#if defined(CONFIG_KGDB)
248 if (ppc_md.kgdb_map_scc) 247 if (ppc_md.kgdb_map_scc)
249 ppc_md.kgdb_map_scc(); 248 ppc_md.kgdb_map_scc();
@@ -280,9 +279,6 @@ void __init setup_arch(char **cmdline_p)
280 init_mm.end_data = (unsigned long) _edata; 279 init_mm.end_data = (unsigned long) _edata;
281 init_mm.brk = klimit; 280 init_mm.brk = klimit;
282 281
283 if (do_early_xmon)
284 debugger(NULL);
285
286 /* set up the bootmem stuff with available memory */ 282 /* set up the bootmem stuff with available memory */
287 do_init_bootmem(); 283 do_init_bootmem();
288 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); 284 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index cda2dbe70a76..4b2e32eab9dc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -391,18 +391,14 @@ void __init setup_system(void)
391 find_legacy_serial_ports(); 391 find_legacy_serial_ports();
392 392
393 /* 393 /*
394 * Initialize xmon
395 */
396#ifdef CONFIG_XMON_DEFAULT
397 xmon_init(1);
398#endif
399 /*
400 * Register early console 394 * Register early console
401 */ 395 */
402 register_early_udbg_console(); 396 register_early_udbg_console();
403 397
404 if (do_early_xmon) 398 /*
405 debugger(NULL); 399 * Initialize xmon
400 */
401 xmon_setup();
406 402
407 check_smt_enabled(); 403 check_smt_enabled();
408 smp_setup_cpu_maps(); 404 smp_setup_cpu_maps();
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 02665a02130d..cb0e8d46c3e8 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -132,6 +132,14 @@ SECTIONS
132 *(__ftr_fixup) 132 *(__ftr_fixup)
133 __stop___ftr_fixup = .; 133 __stop___ftr_fixup = .;
134 } 134 }
135#ifdef CONFIG_PPC64
136 . = ALIGN(8);
137 __fw_ftr_fixup : {
138 __start___fw_ftr_fixup = .;
139 *(__fw_ftr_fixup)
140 __stop___fw_ftr_fixup = .;
141 }
142#endif
135 143
136 . = ALIGN(PAGE_SIZE); 144 . = ALIGN(PAGE_SIZE);
137 .init.ramfs : { 145 .init.ramfs : {
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index b1da03165496..ac64f4aaa509 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -63,32 +63,13 @@
63#include <asm/iommu.h> 63#include <asm/iommu.h>
64#include <asm/abs_addr.h> 64#include <asm/abs_addr.h>
65#include <asm/vdso.h> 65#include <asm/vdso.h>
66#include <asm/firmware.h>
66 67
67#include "mmu_decl.h" 68#include "mmu_decl.h"
68 69
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
70static unsigned long phbs_io_bot = PHBS_IO_BASE; 71static unsigned long phbs_io_bot = PHBS_IO_BASE;
71 72
72#ifdef CONFIG_PPC_ISERIES
73
74void __iomem *ioremap(unsigned long addr, unsigned long size)
75{
76 return (void __iomem *)addr;
77}
78
79extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
80 unsigned long flags)
81{
82 return (void __iomem *)addr;
83}
84
85void iounmap(volatile void __iomem *addr)
86{
87 return;
88}
89
90#else
91
92/* 73/*
93 * map_io_page currently only called by __ioremap 74 * map_io_page currently only called by __ioremap
94 * map_io_page adds an entry to the ioremap page table 75 * map_io_page adds an entry to the ioremap page table
@@ -161,6 +142,9 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size,
161 unsigned long pa, ea; 142 unsigned long pa, ea;
162 void __iomem *ret; 143 void __iomem *ret;
163 144
145 if (firmware_has_feature(FW_FEATURE_ISERIES))
146 return (void __iomem *)addr;
147
164 /* 148 /*
165 * Choose an address to map it to. 149 * Choose an address to map it to.
166 * Once the imalloc system is running, we use it. 150 * Once the imalloc system is running, we use it.
@@ -255,6 +239,9 @@ void iounmap(volatile void __iomem *token)
255{ 239{
256 void *addr; 240 void *addr;
257 241
242 if (firmware_has_feature(FW_FEATURE_ISERIES))
243 return;
244
258 if (!mem_init_done) 245 if (!mem_init_done)
259 return; 246 return;
260 247
@@ -315,8 +302,6 @@ int iounmap_explicit(volatile void __iomem *start, unsigned long size)
315 return 0; 302 return 0;
316} 303}
317 304
318#endif
319
320EXPORT_SYMBOL(ioremap); 305EXPORT_SYMBOL(ioremap);
321EXPORT_SYMBOL(__ioremap); 306EXPORT_SYMBOL(__ioremap);
322EXPORT_SYMBOL(iounmap); 307EXPORT_SYMBOL(iounmap);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index dbc1abbde038..b10e4707d7c1 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -21,6 +21,7 @@
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/firmware.h>
24 25
25/* void slb_allocate_realmode(unsigned long ea); 26/* void slb_allocate_realmode(unsigned long ea);
26 * 27 *
@@ -183,6 +184,7 @@ slb_finish_load:
183 * dont have any LRU information to help us choose a slot. 184 * dont have any LRU information to help us choose a slot.
184 */ 185 */
185#ifdef CONFIG_PPC_ISERIES 186#ifdef CONFIG_PPC_ISERIES
187BEGIN_FW_FTR_SECTION
186 /* 188 /*
187 * On iSeries, the "bolted" stack segment can be cast out on 189 * On iSeries, the "bolted" stack segment can be cast out on
188 * shared processor switch so we need to check for a miss on 190 * shared processor switch so we need to check for a miss on
@@ -194,6 +196,7 @@ slb_finish_load:
194 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ 196 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
195 cmpld r9,r3 197 cmpld r9,r3
196 beq 3f 198 beq 3f
199END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
197#endif /* CONFIG_PPC_ISERIES */ 200#endif /* CONFIG_PPC_ISERIES */
198 201
199 ld r10,PACASTABRR(r13) 202 ld r10,PACASTABRR(r13)
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
new file mode 100644
index 000000000000..47d841ecf2e2
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -0,0 +1,21 @@
1menu "Platform support"
2 depends on PPC_82xx
3
4choice
5 prompt "Machine Type"
6 default MPC82xx_ADS
7
8config MPC82xx_ADS
9 bool "Freescale MPC82xx ADS"
10 select DEFAULT_UIMAGE
11 select PQ2ADS
12 select 8272
13 select 8260
14 select CPM2
15 select FSL_SOC
16 help
17 This option enables support for the MPC8272 ADS board
18
19endchoice
20
21endmenu
diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile
new file mode 100644
index 000000000000..d9fd4c84d2e0
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/Makefile
@@ -0,0 +1,5 @@
1#
2# Makefile for the PowerPC 82xx linux kernel.
3#
4obj-$(CONFIG_PPC_82xx) += mpc82xx.o
5obj-$(CONFIG_MPC82xx_ADS) += mpc82xx_ads.o
diff --git a/arch/powerpc/platforms/82xx/m82xx_pci.h b/arch/powerpc/platforms/82xx/m82xx_pci.h
new file mode 100644
index 000000000000..9cd8893b5a32
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/m82xx_pci.h
@@ -0,0 +1,19 @@
1#ifndef _PPC_KERNEL_M82XX_PCI_H
2#define _PPC_KERNEL_M82XX_PCI_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm/m8260_pci.h>
12
13#define SIU_INT_IRQ1 ((uint)0x13 + CPM_IRQ_OFFSET)
14
15#ifndef _IO_BASE
16#define _IO_BASE isa_io_base
17#endif
18
19#endif /* _PPC_KERNEL_M8260_PCI_H */
diff --git a/arch/powerpc/platforms/82xx/mpc82xx.c b/arch/powerpc/platforms/82xx/mpc82xx.c
new file mode 100644
index 000000000000..89d702de4863
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/mpc82xx.c
@@ -0,0 +1,111 @@
1/*
2 * MPC82xx setup and early boot code plus other random bits.
3 *
4 * Author: Vitaly Bordug <vbordug@ru.mvista.com>
5 *
6 * Copyright (c) 2006 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/config.h>
15#include <linux/stddef.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/reboot.h>
20#include <linux/pci.h>
21#include <linux/interrupt.h>
22#include <linux/kdev_t.h>
23#include <linux/major.h>
24#include <linux/console.h>
25#include <linux/delay.h>
26#include <linux/seq_file.h>
27#include <linux/root_dev.h>
28#include <linux/initrd.h>
29#include <linux/module.h>
30#include <linux/fsl_devices.h>
31#include <linux/fs_uart_pd.h>
32
33#include <asm/system.h>
34#include <asm/pgtable.h>
35#include <asm/page.h>
36#include <asm/atomic.h>
37#include <asm/time.h>
38#include <asm/io.h>
39#include <asm/machdep.h>
40#include <asm/bootinfo.h>
41#include <asm/pci-bridge.h>
42#include <asm/mpc8260.h>
43#include <asm/irq.h>
44#include <mm/mmu_decl.h>
45#include <asm/prom.h>
46#include <asm/cpm2.h>
47#include <asm/udbg.h>
48#include <asm/i8259.h>
49#include <linux/fs_enet_pd.h>
50
51#include <sysdev/fsl_soc.h>
52#include <sysdev/cpm2_pic.h>
53
54#include "pq2ads_pd.h"
55
56static int __init get_freq(char *name, unsigned long *val)
57{
58 struct device_node *cpu;
59 unsigned int *fp;
60 int found = 0;
61
62 /* The cpu node should have timebase and clock frequency properties */
63 cpu = of_find_node_by_type(NULL, "cpu");
64
65 if (cpu) {
66 fp = (unsigned int *)get_property(cpu, name, NULL);
67 if (fp) {
68 found = 1;
69 *val = *fp++;
70 }
71
72 of_node_put(cpu);
73 }
74
75 return found;
76}
77
78void __init m82xx_calibrate_decr(void)
79{
80 ppc_tb_freq = 125000000;
81 if (!get_freq("bus-frequency", &ppc_tb_freq)) {
82 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
83 "(not found)\n");
84 }
85 ppc_tb_freq /= 4;
86 ppc_proc_freq = 1000000000;
87 if (!get_freq("clock-frequency", &ppc_proc_freq))
88 printk(KERN_ERR "WARNING: Estimating processor frequency"
89 "(not found)\n");
90}
91
92void mpc82xx_ads_show_cpuinfo(struct seq_file *m)
93{
94 uint pvid, svid, phid1;
95 uint memsize = total_memory;
96
97 pvid = mfspr(SPRN_PVR);
98 svid = mfspr(SPRN_SVR);
99
100 seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
101 seq_printf(m, "Machine\t\t: %s\n", CPUINFO_MACHINE);
102 seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
103 seq_printf(m, "SVR\t\t: 0x%x\n", svid);
104
105 /* Display cpu Pll setting */
106 phid1 = mfspr(SPRN_HID1);
107 seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
108
109 /* Display the amount of memory */
110 seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024));
111}
diff --git a/arch/powerpc/platforms/82xx/mpc82xx_ads.c b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
new file mode 100644
index 000000000000..4276f087f26e
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/mpc82xx_ads.c
@@ -0,0 +1,661 @@
1/*
2 * MPC82xx_ads setup and early boot code plus other random bits.
3 *
4 * Author: Vitaly Bordug <vbordug@ru.mvista.com>
5 * m82xx_restart fix by Wade Farnsworth <wfarnsworth@mvista.com>
6 *
7 * Copyright (c) 2006 MontaVista Software, Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15
16#include <linux/config.h>
17#include <linux/stddef.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/reboot.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/kdev_t.h>
25#include <linux/major.h>
26#include <linux/console.h>
27#include <linux/delay.h>
28#include <linux/seq_file.h>
29#include <linux/root_dev.h>
30#include <linux/initrd.h>
31#include <linux/module.h>
32#include <linux/fsl_devices.h>
33#include <linux/fs_uart_pd.h>
34
35#include <asm/system.h>
36#include <asm/pgtable.h>
37#include <asm/page.h>
38#include <asm/atomic.h>
39#include <asm/time.h>
40#include <asm/io.h>
41#include <asm/machdep.h>
42#include <asm/bootinfo.h>
43#include <asm/pci-bridge.h>
44#include <asm/mpc8260.h>
45#include <asm/irq.h>
46#include <mm/mmu_decl.h>
47#include <asm/prom.h>
48#include <asm/cpm2.h>
49#include <asm/udbg.h>
50#include <asm/i8259.h>
51#include <linux/fs_enet_pd.h>
52
53#include <sysdev/fsl_soc.h>
54#include <../sysdev/cpm2_pic.h>
55
56#include "pq2ads_pd.h"
57
58#ifdef CONFIG_PCI
59static uint pci_clk_frq;
60static struct {
61 unsigned long *pci_int_stat_reg;
62 unsigned long *pci_int_mask_reg;
63} pci_regs;
64
65static unsigned long pci_int_base;
66static struct irq_host *pci_pic_host;
67static struct device_node *pci_pic_node;
68#endif
69
70static void __init mpc82xx_ads_pic_init(void)
71{
72 struct device_node *np = of_find_compatible_node(NULL, "cpm-pic", "CPM2");
73 struct resource r;
74 cpm2_map_t *cpm_reg;
75
76 if (np == NULL) {
77 printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
78 return;
79 }
80 if (of_address_to_resource(np, 0, &r)) {
81 printk(KERN_ERR "PIC init: invalid resource\n");
82 of_node_put(np);
83 return;
84 }
85 cpm2_pic_init(np);
86 of_node_put(np);
87
88 /* Initialize the default interrupt mapping priorities,
89 * in case the boot rom changed something on us.
90 */
91 cpm_reg = (cpm2_map_t *) ioremap(get_immrbase(), sizeof(cpm2_map_t));
92 cpm_reg->im_intctl.ic_siprr = 0x05309770;
93 iounmap(cpm_reg);
94#ifdef CONFIG_PCI
95 /* Initialize stuff for the 82xx CPLD IC and install demux */
96 m82xx_pci_init_irq();
97#endif
98}
99
100static void init_fcc1_ioports(struct fs_platform_info *fpi)
101{
102 struct io_port *io;
103 u32 tempval;
104 cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
105 struct device_node *np;
106 struct resource r;
107 u32 *bcsr;
108
109 np = of_find_node_by_type(NULL, "memory");
110 if (!np) {
111 printk(KERN_INFO "No memory node in device tree\n");
112 return;
113 }
114 if (of_address_to_resource(np, 1, &r)) {
115 printk(KERN_INFO "No memory reg property [1] in devicetree\n");
116 return;
117 }
118 of_node_put(np);
119 bcsr = ioremap(r.start + 4, sizeof(u32));
120 io = &immap->im_ioport;
121
122 /* Enable the PHY */
123 clrbits32(bcsr, BCSR1_FETHIEN);
124 setbits32(bcsr, BCSR1_FETH_RST);
125
126 /* FCC1 pins are on port A/C. */
127 /* Configure port A and C pins for FCC1 Ethernet. */
128
129 tempval = in_be32(&io->iop_pdira);
130 tempval &= ~PA1_DIRA0;
131 tempval |= PA1_DIRA1;
132 out_be32(&io->iop_pdira, tempval);
133
134 tempval = in_be32(&io->iop_psora);
135 tempval &= ~PA1_PSORA0;
136 tempval |= PA1_PSORA1;
137 out_be32(&io->iop_psora, tempval);
138
139 setbits32(&io->iop_ppara, PA1_DIRA0 | PA1_DIRA1);
140
141 /* Alter clocks */
142 tempval = PC_CLK(fpi->clk_tx - 8) | PC_CLK(fpi->clk_rx - 8);
143
144 clrbits32(&io->iop_psorc, tempval);
145 clrbits32(&io->iop_pdirc, tempval);
146 setbits32(&io->iop_pparc, tempval);
147
148 cpm2_clk_setup(CPM_CLK_FCC1, fpi->clk_rx, CPM_CLK_RX);
149 cpm2_clk_setup(CPM_CLK_FCC1, fpi->clk_tx, CPM_CLK_TX);
150
151 iounmap(bcsr);
152 iounmap(immap);
153}
154
155static void init_fcc2_ioports(struct fs_platform_info *fpi)
156{
157 cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
158 struct device_node *np;
159 struct resource r;
160 u32 *bcsr;
161
162 struct io_port *io;
163 u32 tempval;
164
165 np = of_find_node_by_type(NULL, "memory");
166 if (!np) {
167 printk(KERN_INFO "No memory node in device tree\n");
168 return;
169 }
170 if (of_address_to_resource(np, 1, &r)) {
171 printk(KERN_INFO "No memory reg property [1] in devicetree\n");
172 return;
173 }
174 of_node_put(np);
175 io = &immap->im_ioport;
176 bcsr = ioremap(r.start + 12, sizeof(u32));
177
178 /* Enable the PHY */
179 clrbits32(bcsr, BCSR3_FETHIEN2);
180 setbits32(bcsr, BCSR3_FETH2_RST);
181
182 /* FCC2 are port B/C. */
183 /* Configure port A and C pins for FCC2 Ethernet. */
184
185 tempval = in_be32(&io->iop_pdirb);
186 tempval &= ~PB2_DIRB0;
187 tempval |= PB2_DIRB1;
188 out_be32(&io->iop_pdirb, tempval);
189
190 tempval = in_be32(&io->iop_psorb);
191 tempval &= ~PB2_PSORB0;
192 tempval |= PB2_PSORB1;
193 out_be32(&io->iop_psorb, tempval);
194
195 setbits32(&io->iop_pparb, PB2_DIRB0 | PB2_DIRB1);
196
197 tempval = PC_CLK(fpi->clk_tx - 8) | PC_CLK(fpi->clk_rx - 8);
198
199 /* Alter clocks */
200 clrbits32(&io->iop_psorc, tempval);
201 clrbits32(&io->iop_pdirc, tempval);
202 setbits32(&io->iop_pparc, tempval);
203
204 cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_rx, CPM_CLK_RX);
205 cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_tx, CPM_CLK_TX);
206
207 iounmap(bcsr);
208 iounmap(immap);
209}
210
211void init_fcc_ioports(struct fs_platform_info *fpi)
212{
213 int fcc_no = fs_get_fcc_index(fpi->fs_no);
214
215 switch (fcc_no) {
216 case 0:
217 init_fcc1_ioports(fpi);
218 break;
219 case 1:
220 init_fcc2_ioports(fpi);
221 break;
222 default:
223 printk(KERN_ERR "init_fcc_ioports: invalid FCC number\n");
224 return;
225 }
226}
227
228static void init_scc1_uart_ioports(struct fs_uart_platform_info *data)
229{
230 cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
231
232 /* SCC1 is only on port D */
233 setbits32(&immap->im_ioport.iop_ppard, 0x00000003);
234 clrbits32(&immap->im_ioport.iop_psord, 0x00000001);
235 setbits32(&immap->im_ioport.iop_psord, 0x00000002);
236 clrbits32(&immap->im_ioport.iop_pdird, 0x00000001);
237 setbits32(&immap->im_ioport.iop_pdird, 0x00000002);
238
239 clrbits32(&immap->im_cpmux.cmx_scr, (0x00000007 << (4 - data->clk_tx)));
240 clrbits32(&immap->im_cpmux.cmx_scr, (0x00000038 << (4 - data->clk_rx)));
241 setbits32(&immap->im_cpmux.cmx_scr,
242 ((data->clk_tx - 1) << (4 - data->clk_tx)));
243 setbits32(&immap->im_cpmux.cmx_scr,
244 ((data->clk_rx - 1) << (4 - data->clk_rx)));
245
246 iounmap(immap);
247}
248
249static void init_scc4_uart_ioports(struct fs_uart_platform_info *data)
250{
251 cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
252
253 setbits32(&immap->im_ioport.iop_ppard, 0x00000600);
254 clrbits32(&immap->im_ioport.iop_psord, 0x00000600);
255 clrbits32(&immap->im_ioport.iop_pdird, 0x00000200);
256 setbits32(&immap->im_ioport.iop_pdird, 0x00000400);
257
258 clrbits32(&immap->im_cpmux.cmx_scr, (0x00000007 << (4 - data->clk_tx)));
259 clrbits32(&immap->im_cpmux.cmx_scr, (0x00000038 << (4 - data->clk_rx)));
260 setbits32(&immap->im_cpmux.cmx_scr,
261 ((data->clk_tx - 1) << (4 - data->clk_tx)));
262 setbits32(&immap->im_cpmux.cmx_scr,
263 ((data->clk_rx - 1) << (4 - data->clk_rx)));
264
265 iounmap(immap);
266}
267
268void init_scc_ioports(struct fs_uart_platform_info *data)
269{
270 int scc_no = fs_get_scc_index(data->fs_no);
271
272 switch (scc_no) {
273 case 0:
274 init_scc1_uart_ioports(data);
275 data->brg = data->clk_rx;
276 break;
277 case 3:
278 init_scc4_uart_ioports(data);
279 data->brg = data->clk_rx;
280 break;
281 default:
282 printk(KERN_ERR "init_scc_ioports: invalid SCC number\n");
283 return;
284 }
285}
286
287void __init m82xx_board_setup(void)
288{
289 cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t));
290 struct device_node *np;
291 struct resource r;
292 u32 *bcsr;
293
294 np = of_find_node_by_type(NULL, "memory");
295 if (!np) {
296 printk(KERN_INFO "No memory node in device tree\n");
297 return;
298 }
299 if (of_address_to_resource(np, 1, &r)) {
300 printk(KERN_INFO "No memory reg property [1] in devicetree\n");
301 return;
302 }
303 of_node_put(np);
304 bcsr = ioremap(r.start + 4, sizeof(u32));
305 /* Enable the 2nd UART port */
306 clrbits32(bcsr, BCSR1_RS232_EN2);
307
308#ifdef CONFIG_SERIAL_CPM_SCC1
309 clrbits32((u32 *) & immap->im_scc[0].scc_sccm,
310 UART_SCCM_TX | UART_SCCM_RX);
311 clrbits32((u32 *) & immap->im_scc[0].scc_gsmrl,
312 SCC_GSMRL_ENR | SCC_GSMRL_ENT);
313#endif
314
315#ifdef CONFIG_SERIAL_CPM_SCC2
316 clrbits32((u32 *) & immap->im_scc[1].scc_sccm,
317 UART_SCCM_TX | UART_SCCM_RX);
318 clrbits32((u32 *) & immap->im_scc[1].scc_gsmrl,
319 SCC_GSMRL_ENR | SCC_GSMRL_ENT);
320#endif
321
322#ifdef CONFIG_SERIAL_CPM_SCC3
323 clrbits32((u32 *) & immap->im_scc[2].scc_sccm,
324 UART_SCCM_TX | UART_SCCM_RX);
325 clrbits32((u32 *) & immap->im_scc[2].scc_gsmrl,
326 SCC_GSMRL_ENR | SCC_GSMRL_ENT);
327#endif
328
329#ifdef CONFIG_SERIAL_CPM_SCC4
330 clrbits32((u32 *) & immap->im_scc[3].scc_sccm,
331 UART_SCCM_TX | UART_SCCM_RX);
332 clrbits32((u32 *) & immap->im_scc[3].scc_gsmrl,
333 SCC_GSMRL_ENR | SCC_GSMRL_ENT);
334#endif
335
336 iounmap(bcsr);
337 iounmap(immap);
338}
339
340#ifdef CONFIG_PCI
341static void m82xx_pci_mask_irq(unsigned int irq)
342{
343 int bit = irq - pci_int_base;
344
345 *pci_regs.pci_int_mask_reg |= (1 << (31 - bit));
346 return;
347}
348
349static void m82xx_pci_unmask_irq(unsigned int irq)
350{
351 int bit = irq - pci_int_base;
352
353 *pci_regs.pci_int_mask_reg &= ~(1 << (31 - bit));
354 return;
355}
356
357static void m82xx_pci_mask_and_ack(unsigned int irq)
358{
359 int bit = irq - pci_int_base;
360
361 *pci_regs.pci_int_mask_reg |= (1 << (31 - bit));
362 return;
363}
364
365static void m82xx_pci_end_irq(unsigned int irq)
366{
367 int bit = irq - pci_int_base;
368
369 *pci_regs.pci_int_mask_reg &= ~(1 << (31 - bit));
370 return;
371}
372
373struct hw_interrupt_type m82xx_pci_ic = {
374 .typename = "MPC82xx ADS PCI",
375 .name = "MPC82xx ADS PCI",
376 .enable = m82xx_pci_unmask_irq,
377 .disable = m82xx_pci_mask_irq,
378 .ack = m82xx_pci_mask_and_ack,
379 .end = m82xx_pci_end_irq,
380 .mask = m82xx_pci_mask_irq,
381 .mask_ack = m82xx_pci_mask_and_ack,
382 .unmask = m82xx_pci_unmask_irq,
383 .eoi = m82xx_pci_end_irq,
384};
385
386static void
387m82xx_pci_irq_demux(unsigned int irq, struct irq_desc *desc,
388 struct pt_regs *regs)
389{
390 unsigned long stat, mask, pend;
391 int bit;
392
393 for (;;) {
394 stat = *pci_regs.pci_int_stat_reg;
395 mask = *pci_regs.pci_int_mask_reg;
396 pend = stat & ~mask & 0xf0000000;
397 if (!pend)
398 break;
399 for (bit = 0; pend != 0; ++bit, pend <<= 1) {
400 if (pend & 0x80000000)
401 __do_IRQ(pci_int_base + bit, regs);
402 }
403 }
404}
405
406static int pci_pic_host_match(struct irq_host *h, struct device_node *node)
407{
408 return node == pci_pic_node;
409}
410
411static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
412 irq_hw_number_t hw)
413{
414 get_irq_desc(virq)->status |= IRQ_LEVEL;
415 set_irq_chip(virq, &m82xx_pci_ic);
416 return 0;
417}
418
419static void pci_host_unmap(struct irq_host *h, unsigned int virq)
420{
421 /* remove chip and handler */
422 set_irq_chip(virq, NULL);
423}
424
425static struct irq_host_ops pci_pic_host_ops = {
426 .match = pci_pic_host_match,
427 .map = pci_pic_host_map,
428 .unmap = pci_host_unmap,
429};
430
431void m82xx_pci_init_irq(void)
432{
433 int irq;
434 cpm2_map_t *immap;
435 struct device_node *np;
436 struct resource r;
437 const u32 *regs;
438 unsigned int size;
439 const u32 *irq_map;
440 int i;
441 unsigned int irq_max, irq_min;
442
443 if ((np = of_find_node_by_type(NULL, "soc")) == NULL) {
444 printk(KERN_INFO "No SOC node in device tree\n");
445 return;
446 }
447 memset(&r, 0, sizeof(r));
448 if (of_address_to_resource(np, 0, &r)) {
449 printk(KERN_INFO "No SOC reg property in device tree\n");
450 return;
451 }
452 immap = ioremap(r.start, sizeof(*immap));
453 of_node_put(np);
454
455 /* install the demultiplexer for the PCI cascade interrupt */
456 np = of_find_node_by_type(NULL, "pci");
457 if (!np) {
458 printk(KERN_INFO "No pci node on device tree\n");
459 iounmap(immap);
460 return;
461 }
462 irq_map = get_property(np, "interrupt-map", &size);
463 if ((!irq_map) || (size <= 7)) {
464 printk(KERN_INFO "No interrupt-map property of pci node\n");
465 iounmap(immap);
466 return;
467 }
468 size /= sizeof(irq_map[0]);
469 for (i = 0, irq_max = 0, irq_min = 512; i < size; i += 7, irq_map += 7) {
470 if (irq_map[5] < irq_min)
471 irq_min = irq_map[5];
472 if (irq_map[5] > irq_max)
473 irq_max = irq_map[5];
474 }
475 pci_int_base = irq_min;
476 irq = irq_of_parse_and_map(np, 0);
477 set_irq_chained_handler(irq, m82xx_pci_irq_demux);
478 of_node_put(np);
479 np = of_find_node_by_type(NULL, "pci-pic");
480 if (!np) {
481 printk(KERN_INFO "No pci pic node on device tree\n");
482 iounmap(immap);
483 return;
484 }
485 pci_pic_node = of_node_get(np);
486 /* PCI interrupt controller registers: status and mask */
487 regs = get_property(np, "reg", &size);
488 if ((!regs) || (size <= 2)) {
489 printk(KERN_INFO "No reg property in pci pic node\n");
490 iounmap(immap);
491 return;
492 }
493 pci_regs.pci_int_stat_reg =
494 ioremap(regs[0], sizeof(*pci_regs.pci_int_stat_reg));
495 pci_regs.pci_int_mask_reg =
496 ioremap(regs[1], sizeof(*pci_regs.pci_int_mask_reg));
497 of_node_put(np);
498 /* configure chip select for PCI interrupt controller */
499 immap->im_memctl.memc_br3 = regs[0] | 0x00001801;
500 immap->im_memctl.memc_or3 = 0xffff8010;
501 /* make PCI IRQ level sensitive */
502 immap->im_intctl.ic_siexr &= ~(1 << (14 - (irq - SIU_INT_IRQ1)));
503
504 /* mask all PCI interrupts */
505 *pci_regs.pci_int_mask_reg |= 0xfff00000;
506 iounmap(immap);
507 pci_pic_host =
508 irq_alloc_host(IRQ_HOST_MAP_LINEAR, irq_max - irq_min + 1,
509 &pci_pic_host_ops, irq_max + 1);
510 return;
511}
512
513static int m82xx_pci_exclude_device(u_char bus, u_char devfn)
514{
515 if (bus == 0 && PCI_SLOT(devfn) == 0)
516 return PCIBIOS_DEVICE_NOT_FOUND;
517 else
518 return PCIBIOS_SUCCESSFUL;
519}
520
521static void
522__init mpc82xx_pcibios_fixup(void)
523{
524 struct pci_dev *dev = NULL;
525
526 for_each_pci_dev(dev) {
527 pci_read_irq_line(dev);
528 }
529}
530
531void __init add_bridge(struct device_node *np)
532{
533 int len;
534 struct pci_controller *hose;
535 struct resource r;
536 const int *bus_range;
537 const void *ptr;
538
539 memset(&r, 0, sizeof(r));
540 if (of_address_to_resource(np, 0, &r)) {
541 printk(KERN_INFO "No PCI reg property in device tree\n");
542 return;
543 }
544 if (!(ptr = get_property(np, "clock-frequency", NULL))) {
545 printk(KERN_INFO "No clock-frequency property in PCI node");
546 return;
547 }
548 pci_clk_frq = *(uint *) ptr;
549 of_node_put(np);
550 bus_range = get_property(np, "bus-range", &len);
551 if (bus_range == NULL || len < 2 * sizeof(int)) {
552 printk(KERN_WARNING "Can't get bus-range for %s, assume"
553 " bus 0\n", np->full_name);
554 }
555
556 pci_assign_all_buses = 1;
557
558 hose = pcibios_alloc_controller();
559
560 if (!hose)
561 return;
562
563 hose->arch_data = np;
564 hose->set_cfg_type = 1;
565
566 hose->first_busno = bus_range ? bus_range[0] : 0;
567 hose->last_busno = bus_range ? bus_range[1] : 0xff;
568 hose->bus_offset = 0;
569
570 hose->set_cfg_type = 1;
571
572 setup_indirect_pci(hose,
573 r.start + offsetof(pci_cpm2_t, pci_cfg_addr),
574 r.start + offsetof(pci_cpm2_t, pci_cfg_data));
575
576 pci_process_bridge_OF_ranges(hose, np, 1);
577}
578#endif
579
580/*
581 * Setup the architecture
582 */
583static void __init mpc82xx_ads_setup_arch(void)
584{
585#ifdef CONFIG_PCI
586 struct device_node *np;
587#endif
588
589 if (ppc_md.progress)
590 ppc_md.progress("mpc82xx_ads_setup_arch()", 0);
591 cpm2_reset();
592
593 /* Map I/O region to a 256MB BAT */
594
595 m82xx_board_setup();
596
597#ifdef CONFIG_PCI
598 ppc_md.pci_exclude_device = m82xx_pci_exclude_device;
599 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
600 add_bridge(np);
601
602 of_node_put(np);
603 ppc_md.pci_map_irq = NULL;
604 ppc_md.pcibios_fixup = mpc82xx_pcibios_fixup;
605 ppc_md.pcibios_fixup_bus = NULL;
606#endif
607
608#ifdef CONFIG_ROOT_NFS
609 ROOT_DEV = Root_NFS;
610#else
611 ROOT_DEV = Root_HDA1;
612#endif
613
614 if (ppc_md.progress)
615 ppc_md.progress("mpc82xx_ads_setup_arch(), finish", 0);
616}
617
618/*
619 * Called very early, device-tree isn't unflattened
620 */
621static int __init mpc82xx_ads_probe(void)
622{
623 /* We always match for now, eventually we should look at
624 * the flat dev tree to ensure this is the board we are
625 * supposed to run on
626 */
627 return 1;
628}
629
630#define RMR_CSRE 0x00000001
631static void m82xx_restart(char *cmd)
632{
633 __volatile__ unsigned char dummy;
634
635 local_irq_disable();
636 ((cpm2_map_t *) cpm2_immr)->im_clkrst.car_rmr |= RMR_CSRE;
637
638 /* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */
639 mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
640 dummy = ((cpm2_map_t *) cpm2_immr)->im_clkrst.res[0];
641 printk("Restart failed\n");
642 while (1) ;
643}
644
645static void m82xx_halt(void)
646{
647 local_irq_disable();
648 while (1) ;
649}
650
651define_machine(mpc82xx_ads)
652{
653 .name = "MPC82xx ADS",
654 .probe = mpc82xx_ads_probe,
655 .setup_arch = mpc82xx_ads_setup_arch,
656 .init_IRQ = mpc82xx_ads_pic_init,
657 .show_cpuinfo = mpc82xx_ads_show_cpuinfo,
658 .get_irq = cpm2_get_irq,
659 .calibrate_decr = m82xx_calibrate_decr,
660 .restart = m82xx_restart,.halt = m82xx_halt,
661};
diff --git a/arch/powerpc/platforms/82xx/pq2ads.h b/arch/powerpc/platforms/82xx/pq2ads.h
new file mode 100644
index 000000000000..a7348213508f
--- /dev/null
+++ b/arch/powerpc/platforms/82xx/pq2ads.h
@@ -0,0 +1,67 @@
1/*
2 * PQ2/mpc8260 board-specific stuff
3 *
4 * A collection of structures, addresses, and values associated with
5 * the Freescale MPC8260ADS/MPC8266ADS-PCI boards.
6 * Copied from the RPX-Classic and SBS8260 stuff.
7 *
8 * Author: Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Originally written by Dan Malek for Motorola MPC8260 family
11 *
12 * Copyright (c) 2001 Dan Malek <dan@embeddedalley.com>
13 * Copyright (c) 2006 MontaVista Software, Inc.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 */
20
21#ifdef __KERNEL__
22#ifndef __MACH_ADS8260_DEFS
23#define __MACH_ADS8260_DEFS
24
25#include <linux/config.h>
26
27#include <asm/ppcboot.h>
28
29/* For our show_cpuinfo hooks. */
30#define CPUINFO_VENDOR "Freescale Semiconductor"
31#define CPUINFO_MACHINE "PQ2 ADS PowerPC"
32
33/* Backword-compatibility stuff for the drivers */
34#define CPM_MAP_ADDR ((uint)0xf0000000)
35#define CPM_IRQ_OFFSET 0
36
37/* The ADS8260 has 16, 32-bit wide control/status registers, accessed
38 * only on word boundaries.
39 * Not all are used (yet), or are interesting to us (yet).
40 */
41
42/* Things of interest in the CSR.
43 */
44#define BCSR0_LED0 ((uint)0x02000000) /* 0 == on */
45#define BCSR0_LED1 ((uint)0x01000000) /* 0 == on */
46#define BCSR1_FETHIEN ((uint)0x08000000) /* 0 == enable*/
47#define BCSR1_FETH_RST ((uint)0x04000000) /* 0 == reset */
48#define BCSR1_RS232_EN1 ((uint)0x02000000) /* 0 ==enable */
49#define BCSR1_RS232_EN2 ((uint)0x01000000) /* 0 ==enable */
50#define BCSR3_FETHIEN2 ((uint)0x10000000) /* 0 == enable*/
51#define BCSR3_FETH2_RS ((uint)0x80000000) /* 0 == reset */
52
53/* cpm serial driver works with constants below */
54
55#define SIU_INT_SMC1 ((uint)0x04+CPM_IRQ_OFFSET)
56#define SIU_INT_SMC2i ((uint)0x05+CPM_IRQ_OFFSET)
57#define SIU_INT_SCC1 ((uint)0x28+CPM_IRQ_OFFSET)
58#define SIU_INT_SCC2 ((uint)0x29+CPM_IRQ_OFFSET)
59#define SIU_INT_SCC3 ((uint)0x2a+CPM_IRQ_OFFSET)
60#define SIU_INT_SCC4 ((uint)0x2b+CPM_IRQ_OFFSET)
61
62void m82xx_pci_init_irq(void);
63void mpc82xx_ads_show_cpuinfo(struct seq_file*);
64void m82xx_calibrate_decr(void);
65
66#endif /* __MACH_ADS8260_DEFS */
67#endif /* __KERNEL__ */
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 5fe7b7faf45f..0975e94ac7c4 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -5,6 +5,13 @@ choice
5 prompt "Machine Type" 5 prompt "Machine Type"
6 default MPC834x_SYS 6 default MPC834x_SYS
7 7
8config MPC832x_MDS
9 bool "Freescale MPC832x MDS"
10 select DEFAULT_UIMAGE
11 select QUICC_ENGINE
12 help
13 This option enables support for the MPC832x MDS evaluation board.
14
8config MPC834x_SYS 15config MPC834x_SYS
9 bool "Freescale MPC834x SYS" 16 bool "Freescale MPC834x SYS"
10 select DEFAULT_UIMAGE 17 select DEFAULT_UIMAGE
@@ -27,6 +34,12 @@ config MPC834x_ITX
27 34
28endchoice 35endchoice
29 36
37config PPC_MPC832x
38 bool
39 select PPC_UDBG_16550
40 select PPC_INDIRECT_PCI
41 default y if MPC832x_MDS
42
30config MPC834x 43config MPC834x
31 bool 44 bool
32 select PPC_UDBG_16550 45 select PPC_UDBG_16550
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
new file mode 100644
index 000000000000..54dea9d42dc9
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -0,0 +1,215 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Description:
5 * MPC832xE MDS board specific routines.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/stddef.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/reboot.h>
18#include <linux/pci.h>
19#include <linux/kdev_t.h>
20#include <linux/major.h>
21#include <linux/console.h>
22#include <linux/delay.h>
23#include <linux/seq_file.h>
24#include <linux/root_dev.h>
25#include <linux/initrd.h>
26
27#include <asm/system.h>
28#include <asm/atomic.h>
29#include <asm/time.h>
30#include <asm/io.h>
31#include <asm/machdep.h>
32#include <asm/ipic.h>
33#include <asm/bootinfo.h>
34#include <asm/irq.h>
35#include <asm/prom.h>
36#include <asm/udbg.h>
37#include <sysdev/fsl_soc.h>
38#include <asm/qe.h>
39#include <asm/qe_ic.h>
40
41#include "mpc83xx.h"
42#include "mpc832x_mds.h"
43
44#undef DEBUG
45#ifdef DEBUG
46#define DBG(fmt...) udbg_printf(fmt)
47#else
48#define DBG(fmt...)
49#endif
50
51#ifndef CONFIG_PCI
52unsigned long isa_io_base = 0;
53unsigned long isa_mem_base = 0;
54#endif
55
56static u8 *bcsr_regs = NULL;
57
58u8 *get_bcsr(void)
59{
60 return bcsr_regs;
61}
62
63/* ************************************************************************
64 *
65 * Setup the architecture
66 *
67 */
68static void __init mpc832x_sys_setup_arch(void)
69{
70 struct device_node *np;
71
72 if (ppc_md.progress)
73 ppc_md.progress("mpc832x_sys_setup_arch()", 0);
74
75 np = of_find_node_by_type(NULL, "cpu");
76 if (np != 0) {
77 unsigned int *fp =
78 (int *)get_property(np, "clock-frequency", NULL);
79 if (fp != 0)
80 loops_per_jiffy = *fp / HZ;
81 else
82 loops_per_jiffy = 50000000 / HZ;
83 of_node_put(np);
84 }
85
86 /* Map BCSR area */
87 np = of_find_node_by_name(NULL, "bcsr");
88 if (np != 0) {
89 struct resource res;
90
91 of_address_to_resource(np, 0, &res);
92 bcsr_regs = ioremap(res.start, res.end - res.start +1);
93 of_node_put(np);
94 }
95
96#ifdef CONFIG_PCI
97 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
98 add_bridge(np);
99
100 ppc_md.pci_swizzle = common_swizzle;
101 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
102#endif
103
104#ifdef CONFIG_QUICC_ENGINE
105 qe_reset();
106
107 if ((np = of_find_node_by_name(np, "par_io")) != NULL) {
108 par_io_init(np);
109 of_node_put(np);
110
111 for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
112 par_io_of_config(np);
113 }
114
115 if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
116 != NULL){
117 /* Reset the Ethernet PHY */
118 bcsr_regs[9] &= ~0x20;
119 udelay(1000);
120 bcsr_regs[9] |= 0x20;
121 iounmap(bcsr_regs);
122 of_node_put(np);
123 }
124
125#endif /* CONFIG_QUICC_ENGINE */
126
127#ifdef CONFIG_BLK_DEV_INITRD
128 if (initrd_start)
129 ROOT_DEV = Root_RAM0;
130 else
131#endif
132#ifdef CONFIG_ROOT_NFS
133 ROOT_DEV = Root_NFS;
134#else
135 ROOT_DEV = Root_HDA1;
136#endif
137}
138
139void __init mpc832x_sys_init_IRQ(void)
140{
141
142 struct device_node *np;
143
144 np = of_find_node_by_type(NULL, "ipic");
145 if (!np)
146 return;
147
148 ipic_init(np, 0);
149
150 /* Initialize the default interrupt mapping priorities,
151 * in case the boot rom changed something on us.
152 */
153 ipic_set_default_priority();
154 of_node_put(np);
155
156#ifdef CONFIG_QUICC_ENGINE
157 np = of_find_node_by_type(NULL, "qeic");
158 if (!np)
159 return;
160
161 qe_ic_init(np, 0);
162 of_node_put(np);
163#endif /* CONFIG_QUICC_ENGINE */
164}
165
166#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
167extern ulong ds1374_get_rtc_time(void);
168extern int ds1374_set_rtc_time(ulong);
169
170static int __init mpc832x_rtc_hookup(void)
171{
172 struct timespec tv;
173
174 ppc_md.get_rtc_time = ds1374_get_rtc_time;
175 ppc_md.set_rtc_time = ds1374_set_rtc_time;
176
177 tv.tv_nsec = 0;
178 tv.tv_sec = (ppc_md.get_rtc_time) ();
179 do_settimeofday(&tv);
180
181 return 0;
182}
183
184late_initcall(mpc832x_rtc_hookup);
185#endif
186
187/*
188 * Called very early, MMU is off, device-tree isn't unflattened
189 */
190static int __init mpc832x_sys_probe(void)
191{
192 char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
193 "model", NULL);
194
195 if (model == NULL)
196 return 0;
197 if (strcmp(model, "MPC8323EMDS"))
198 return 0;
199
200 DBG("%s found\n", model);
201
202 return 1;
203}
204
205define_machine(mpc832x_mds) {
206 .name = "MPC832x MDS",
207 .probe = mpc832x_sys_probe,
208 .setup_arch = mpc832x_sys_setup_arch,
209 .init_IRQ = mpc832x_sys_init_IRQ,
210 .get_irq = ipic_get_irq,
211 .restart = mpc83xx_restart,
212 .time_init = mpc83xx_time_init,
213 .calibrate_decr = generic_calibrate_decr,
214 .progress = udbg_progress,
215};
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.h b/arch/powerpc/platforms/83xx/mpc832x_mds.h
new file mode 100644
index 000000000000..a49588904f8a
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Description:
5 * MPC832x MDS board specific header.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 */
13
14#ifndef __MACH_MPC832x_MDS_H__
15#define __MACH_MPC832x_MDS_H__
16
17extern u8 *get_bcsr(void);
18
19#endif /* __MACH_MPC832x_MDS_H__ */
diff --git a/arch/powerpc/platforms/83xx/mpc8360e_pb.c b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
new file mode 100644
index 000000000000..c0191900fc25
--- /dev/null
+++ b/arch/powerpc/platforms/83xx/mpc8360e_pb.c
@@ -0,0 +1,219 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Li Yang <LeoLi@freescale.com>
5 * Yin Olivia <Hong-hua.Yin@freescale.com>
6 *
7 * Description:
8 * MPC8360E MDS PB board specific routines.
9 *
10 * Changelog:
11 * Jun 21, 2006 Initial version
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#include <linux/stddef.h>
20#include <linux/kernel.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/reboot.h>
24#include <linux/pci.h>
25#include <linux/kdev_t.h>
26#include <linux/major.h>
27#include <linux/console.h>
28#include <linux/delay.h>
29#include <linux/seq_file.h>
30#include <linux/root_dev.h>
31#include <linux/initrd.h>
32
33#include <asm/system.h>
34#include <asm/atomic.h>
35#include <asm/time.h>
36#include <asm/io.h>
37#include <asm/machdep.h>
38#include <asm/ipic.h>
39#include <asm/bootinfo.h>
40#include <asm/irq.h>
41#include <asm/prom.h>
42#include <asm/udbg.h>
43#include <sysdev/fsl_soc.h>
44#include <asm/qe.h>
45#include <asm/qe_ic.h>
46
47#include "mpc83xx.h"
48
49#undef DEBUG
50#ifdef DEBUG
51#define DBG(fmt...) udbg_printf(fmt)
52#else
53#define DBG(fmt...)
54#endif
55
56#ifndef CONFIG_PCI
57unsigned long isa_io_base = 0;
58unsigned long isa_mem_base = 0;
59#endif
60
61static u8 *bcsr_regs = NULL;
62
63u8 *get_bcsr(void)
64{
65 return bcsr_regs;
66}
67
68/* ************************************************************************
69 *
70 * Setup the architecture
71 *
72 */
73static void __init mpc8360_sys_setup_arch(void)
74{
75 struct device_node *np;
76
77 if (ppc_md.progress)
78 ppc_md.progress("mpc8360_sys_setup_arch()", 0);
79
80 np = of_find_node_by_type(NULL, "cpu");
81 if (np != 0) {
82 const unsigned int *fp =
83 get_property(np, "clock-frequency", NULL);
84 if (fp != 0)
85 loops_per_jiffy = *fp / HZ;
86 else
87 loops_per_jiffy = 50000000 / HZ;
88 of_node_put(np);
89 }
90
91 /* Map BCSR area */
92 np = of_find_node_by_name(NULL, "bcsr");
93 if (np != 0) {
94 struct resource res;
95
96 of_address_to_resource(np, 0, &res);
97 bcsr_regs = ioremap(res.start, res.end - res.start +1);
98 of_node_put(np);
99 }
100
101#ifdef CONFIG_PCI
102 for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
103 add_bridge(np);
104
105 ppc_md.pci_swizzle = common_swizzle;
106 ppc_md.pci_exclude_device = mpc83xx_exclude_device;
107#endif
108
109#ifdef CONFIG_QUICC_ENGINE
110 qe_reset();
111
112 if ((np = of_find_node_by_name(np, "par_io")) != NULL) {
113 par_io_init(np);
114 of_node_put(np);
115
116 for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
117 par_io_of_config(np);
118 }
119
120 if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
121 != NULL){
122 /* Reset the Ethernet PHY */
123 bcsr_regs[9] &= ~0x20;
124 udelay(1000);
125 bcsr_regs[9] |= 0x20;
126 iounmap(bcsr_regs);
127 of_node_put(np);
128 }
129
130#endif /* CONFIG_QUICC_ENGINE */
131
132#ifdef CONFIG_BLK_DEV_INITRD
133 if (initrd_start)
134 ROOT_DEV = Root_RAM0;
135 else
136#endif
137#ifdef CONFIG_ROOT_NFS
138 ROOT_DEV = Root_NFS;
139#else
140 ROOT_DEV = Root_HDA1;
141#endif
142}
143
144void __init mpc8360_sys_init_IRQ(void)
145{
146
147 struct device_node *np;
148
149 np = of_find_node_by_type(NULL, "ipic");
150 if (!np)
151 return;
152
153 ipic_init(np, 0);
154
155 /* Initialize the default interrupt mapping priorities,
156 * in case the boot rom changed something on us.
157 */
158 ipic_set_default_priority();
159 of_node_put(np);
160
161#ifdef CONFIG_QUICC_ENGINE
162 np = of_find_node_by_type(NULL, "qeic");
163 if (!np)
164 return;
165
166 qe_ic_init(np, 0);
167 of_node_put(np);
168#endif /* CONFIG_QUICC_ENGINE */
169}
170
171#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
172extern ulong ds1374_get_rtc_time(void);
173extern int ds1374_set_rtc_time(ulong);
174
175static int __init mpc8360_rtc_hookup(void)
176{
177 struct timespec tv;
178
179 ppc_md.get_rtc_time = ds1374_get_rtc_time;
180 ppc_md.set_rtc_time = ds1374_set_rtc_time;
181
182 tv.tv_nsec = 0;
183 tv.tv_sec = (ppc_md.get_rtc_time) ();
184 do_settimeofday(&tv);
185
186 return 0;
187}
188
189late_initcall(mpc8360_rtc_hookup);
190#endif
191
192/*
193 * Called very early, MMU is off, device-tree isn't unflattened
194 */
195static int __init mpc8360_sys_probe(void)
196{
197 char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
198 "model", NULL);
199 if (model == NULL)
200 return 0;
201 if (strcmp(model, "MPC8360EPB"))
202 return 0;
203
204 DBG("MPC8360EMDS-PB found\n");
205
206 return 1;
207}
208
209define_machine(mpc8360_sys) {
210 .name = "MPC8360E PB",
211 .probe = mpc8360_sys_probe,
212 .setup_arch = mpc8360_sys_setup_arch,
213 .init_IRQ = mpc8360_sys_init_IRQ,
214 .get_irq = ipic_get_irq,
215 .restart = mpc83xx_restart,
216 .time_init = mpc83xx_time_init,
217 .calibrate_decr = generic_calibrate_decr,
218 .progress = udbg_progress,
219};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 6b57a47c5d37..6cc59e0b4582 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -21,6 +21,12 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * TODO:
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node)
24 */ 30 */
25 31
26#include <linux/interrupt.h> 32#include <linux/interrupt.h>
@@ -44,24 +50,25 @@ struct iic {
44 u8 target_id; 50 u8 target_id;
45 u8 eoi_stack[16]; 51 u8 eoi_stack[16];
46 int eoi_ptr; 52 int eoi_ptr;
47 struct irq_host *host; 53 struct device_node *node;
48}; 54};
49 55
50static DEFINE_PER_CPU(struct iic, iic); 56static DEFINE_PER_CPU(struct iic, iic);
51#define IIC_NODE_COUNT 2 57#define IIC_NODE_COUNT 2
52static struct irq_host *iic_hosts[IIC_NODE_COUNT]; 58static struct irq_host *iic_host;
53 59
54/* Convert between "pending" bits and hw irq number */ 60/* Convert between "pending" bits and hw irq number */
55static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 61static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
56{ 62{
57 unsigned char unit = bits.source & 0xf; 63 unsigned char unit = bits.source & 0xf;
64 unsigned char node = bits.source >> 4;
65 unsigned char class = bits.class & 3;
58 66
67 /* Decode IPIs */
59 if (bits.flags & CBE_IIC_IRQ_IPI) 68 if (bits.flags & CBE_IIC_IRQ_IPI)
60 return IIC_IRQ_IPI0 | (bits.prio >> 4); 69 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
61 else if (bits.class <= 3)
62 return (bits.class << 4) | unit;
63 else 70 else
64 return IIC_IRQ_INVALID; 71 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
65} 72}
66 73
67static void iic_mask(unsigned int irq) 74static void iic_mask(unsigned int irq)
@@ -86,21 +93,70 @@ static struct irq_chip iic_chip = {
86 .eoi = iic_eoi, 93 .eoi = iic_eoi,
87}; 94};
88 95
96
97static void iic_ioexc_eoi(unsigned int irq)
98{
99}
100
101static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc,
102 struct pt_regs *regs)
103{
104 struct cbe_iic_regs *node_iic = desc->handler_data;
105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
106 unsigned long bits, ack;
107 int cascade;
108
109 for (;;) {
110 bits = in_be64(&node_iic->iic_is);
111 if (bits == 0)
112 break;
113 /* pre-ack edge interrupts */
114 ack = bits & IIC_ISR_EDGE_MASK;
115 if (ack)
116 out_be64(&node_iic->iic_is, ack);
117 /* handle them */
118 for (cascade = 63; cascade >= 0; cascade--)
119 if (bits & (0x8000000000000000UL >> cascade)) {
120 unsigned int cirq =
121 irq_linear_revmap(iic_host,
122 base | cascade);
123 if (cirq != NO_IRQ)
124 generic_handle_irq(cirq, regs);
125 }
126 /* post-ack level interrupts */
127 ack = bits & ~IIC_ISR_EDGE_MASK;
128 if (ack)
129 out_be64(&node_iic->iic_is, ack);
130 }
131 desc->chip->eoi(irq);
132}
133
134
135static struct irq_chip iic_ioexc_chip = {
136 .typename = " CELL-IOEX",
137 .mask = iic_mask,
138 .unmask = iic_unmask,
139 .eoi = iic_ioexc_eoi,
140};
141
89/* Get an IRQ number from the pending state register of the IIC */ 142/* Get an IRQ number from the pending state register of the IIC */
90static unsigned int iic_get_irq(struct pt_regs *regs) 143static unsigned int iic_get_irq(struct pt_regs *regs)
91{ 144{
92 struct cbe_iic_pending_bits pending; 145 struct cbe_iic_pending_bits pending;
93 struct iic *iic; 146 struct iic *iic;
147 unsigned int virq;
94 148
95 iic = &__get_cpu_var(iic); 149 iic = &__get_cpu_var(iic);
96 *(unsigned long *) &pending = 150 *(unsigned long *) &pending =
97 in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 151 in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
152 if (!(pending.flags & CBE_IIC_IRQ_VALID))
153 return NO_IRQ;
154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
155 if (virq == NO_IRQ)
156 return NO_IRQ;
98 iic->eoi_stack[++iic->eoi_ptr] = pending.prio; 157 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
99 BUG_ON(iic->eoi_ptr > 15); 158 BUG_ON(iic->eoi_ptr > 15);
100 if (pending.flags & CBE_IIC_IRQ_VALID) 159 return virq;
101 return irq_linear_revmap(iic->host,
102 iic_pending_to_hwnum(pending));
103 return NO_IRQ;
104} 160}
105 161
106#ifdef CONFIG_SMP 162#ifdef CONFIG_SMP
@@ -108,12 +164,7 @@ static unsigned int iic_get_irq(struct pt_regs *regs)
108/* Use the highest interrupt priorities for IPI */ 164/* Use the highest interrupt priorities for IPI */
109static inline int iic_ipi_to_irq(int ipi) 165static inline int iic_ipi_to_irq(int ipi)
110{ 166{
111 return IIC_IRQ_IPI0 + IIC_NUM_IPIS - 1 - ipi; 167 return IIC_IRQ_TYPE_IPI + 0xf - ipi;
112}
113
114static inline int iic_irq_to_ipi(int irq)
115{
116 return IIC_NUM_IPIS - 1 - (irq - IIC_IRQ_IPI0);
117} 168}
118 169
119void iic_setup_cpu(void) 170void iic_setup_cpu(void)
@@ -123,7 +174,7 @@ void iic_setup_cpu(void)
123 174
124void iic_cause_IPI(int cpu, int mesg) 175void iic_cause_IPI(int cpu, int mesg)
125{ 176{
126 out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); 177 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4);
127} 178}
128 179
129u8 iic_get_target_id(int cpu) 180u8 iic_get_target_id(int cpu)
@@ -134,9 +185,7 @@ EXPORT_SYMBOL_GPL(iic_get_target_id);
134 185
135struct irq_host *iic_get_irq_host(int node) 186struct irq_host *iic_get_irq_host(int node)
136{ 187{
137 if (node < 0 || node >= IIC_NODE_COUNT) 188 return iic_host;
138 return NULL;
139 return iic_hosts[node];
140} 189}
141EXPORT_SYMBOL_GPL(iic_get_irq_host); 190EXPORT_SYMBOL_GPL(iic_get_irq_host);
142 191
@@ -149,34 +198,20 @@ static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
149 198
150 return IRQ_HANDLED; 199 return IRQ_HANDLED;
151} 200}
152
153static void iic_request_ipi(int ipi, const char *name) 201static void iic_request_ipi(int ipi, const char *name)
154{ 202{
155 int node, virq; 203 int virq;
156 204
157 for (node = 0; node < IIC_NODE_COUNT; node++) { 205 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
158 char *rname; 206 if (virq == NO_IRQ) {
159 if (iic_hosts[node] == NULL) 207 printk(KERN_ERR
160 continue; 208 "iic: failed to map IPI %s\n", name);
161 virq = irq_create_mapping(iic_hosts[node], 209 return;
162 iic_ipi_to_irq(ipi));
163 if (virq == NO_IRQ) {
164 printk(KERN_ERR
165 "iic: failed to map IPI %s on node %d\n",
166 name, node);
167 continue;
168 }
169 rname = kzalloc(strlen(name) + 16, GFP_KERNEL);
170 if (rname)
171 sprintf(rname, "%s node %d", name, node);
172 else
173 rname = (char *)name;
174 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED,
175 rname, (void *)(long)ipi))
176 printk(KERN_ERR
177 "iic: failed to request IPI %s on node %d\n",
178 name, node);
179 } 210 }
211 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
212 (void *)(long)ipi))
213 printk(KERN_ERR
214 "iic: failed to request IPI %s\n", name);
180} 215}
181 216
182void iic_request_IPIs(void) 217void iic_request_IPIs(void)
@@ -193,16 +228,24 @@ void iic_request_IPIs(void)
193 228
194static int iic_host_match(struct irq_host *h, struct device_node *node) 229static int iic_host_match(struct irq_host *h, struct device_node *node)
195{ 230{
196 return h->host_data != NULL && node == h->host_data; 231 return device_is_compatible(node,
232 "IBM,CBEA-Internal-Interrupt-Controller");
197} 233}
198 234
199static int iic_host_map(struct irq_host *h, unsigned int virq, 235static int iic_host_map(struct irq_host *h, unsigned int virq,
200 irq_hw_number_t hw) 236 irq_hw_number_t hw)
201{ 237{
202 if (hw < IIC_IRQ_IPI0) 238 switch (hw & IIC_IRQ_TYPE_MASK) {
203 set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq); 239 case IIC_IRQ_TYPE_IPI:
204 else
205 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 240 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
241 break;
242 case IIC_IRQ_TYPE_IOEXC:
243 set_irq_chip_and_handler(virq, &iic_ioexc_chip,
244 handle_fasteoi_irq);
245 break;
246 default:
247 set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
248 }
206 return 0; 249 return 0;
207} 250}
208 251
@@ -211,11 +254,39 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
211 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 254 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
212 255
213{ 256{
214 /* Currently, we don't translate anything. That needs to be fixed as 257 unsigned int node, ext, unit, class;
215 * we get better defined device-trees. iic interrupts have to be 258 const u32 *val;
216 * explicitely mapped by whoever needs them 259
217 */ 260 if (!device_is_compatible(ct,
218 return -ENODEV; 261 "IBM,CBEA-Internal-Interrupt-Controller"))
262 return -ENODEV;
263 if (intsize != 1)
264 return -ENODEV;
265 val = get_property(ct, "#interrupt-cells", NULL);
266 if (val == NULL || *val != 1)
267 return -ENODEV;
268
269 node = intspec[0] >> 24;
270 ext = (intspec[0] >> 16) & 0xff;
271 class = (intspec[0] >> 8) & 0xff;
272 unit = intspec[0] & 0xff;
273
274 /* Check if node is in supported range */
275 if (node > 1)
276 return -EINVAL;
277
278 /* Build up interrupt number, special case for IO exceptions */
279 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
280 if (unit == IIC_UNIT_IIC && class == 1)
281 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
282 else
283 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
284 (class << IIC_IRQ_CLASS_SHIFT) | unit;
285
286 /* Dummy flags, ignored by iic code */
287 *out_flags = IRQ_TYPE_EDGE_RISING;
288
289 return 0;
219} 290}
220 291
221static struct irq_host_ops iic_host_ops = { 292static struct irq_host_ops iic_host_ops = {
@@ -225,7 +296,7 @@ static struct irq_host_ops iic_host_ops = {
225}; 296};
226 297
227static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, 298static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
228 struct irq_host *host) 299 struct device_node *node)
229{ 300{
230 /* XXX FIXME: should locate the linux CPU number from the HW cpu 301 /* XXX FIXME: should locate the linux CPU number from the HW cpu
231 * number properly. We are lucky for now 302 * number properly. We are lucky for now
@@ -237,19 +308,19 @@ static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
237 308
238 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); 309 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
239 iic->eoi_stack[0] = 0xff; 310 iic->eoi_stack[0] = 0xff;
240 iic->host = host; 311 iic->node = of_node_get(node);
241 out_be64(&iic->regs->prio, 0); 312 out_be64(&iic->regs->prio, 0);
242 313
243 printk(KERN_INFO "IIC for CPU %d at %lx mapped to %p, target id 0x%x\n", 314 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
244 hw_cpu, addr, iic->regs, iic->target_id); 315 hw_cpu, iic->target_id, node->full_name);
245} 316}
246 317
247static int __init setup_iic(void) 318static int __init setup_iic(void)
248{ 319{
249 struct device_node *dn; 320 struct device_node *dn;
250 struct resource r0, r1; 321 struct resource r0, r1;
251 struct irq_host *host; 322 unsigned int node, cascade, found = 0;
252 int found = 0; 323 struct cbe_iic_regs *node_iic;
253 const u32 *np; 324 const u32 *np;
254 325
255 for (dn = NULL; 326 for (dn = NULL;
@@ -269,19 +340,33 @@ static int __init setup_iic(void)
269 of_node_put(dn); 340 of_node_put(dn);
270 return -ENODEV; 341 return -ENODEV;
271 } 342 }
272 host = NULL; 343 found++;
273 if (found < IIC_NODE_COUNT) { 344 init_one_iic(np[0], r0.start, dn);
274 host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, 345 init_one_iic(np[1], r1.start, dn);
275 IIC_SOURCE_COUNT, 346
276 &iic_host_ops, 347 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
277 IIC_IRQ_INVALID); 348 * node vs CPU etc...
278 iic_hosts[found] = host; 349 * Note that we configure the IIC_IRR here with a hard coded
279 BUG_ON(iic_hosts[found] == NULL); 350 * priority of 1. We might want to improve that later.
280 iic_hosts[found]->host_data = of_node_get(dn); 351 */
281 found++; 352 node = np[0] >> 1;
282 } 353 node_iic = cbe_get_cpu_iic_regs(np[0]);
283 init_one_iic(np[0], r0.start, host); 354 cascade = node << IIC_IRQ_NODE_SHIFT;
284 init_one_iic(np[1], r1.start, host); 355 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
356 cascade |= IIC_UNIT_IIC;
357 cascade = irq_create_mapping(iic_host, cascade);
358 if (cascade == NO_IRQ)
359 continue;
360 set_irq_data(cascade, node_iic);
361 set_irq_chained_handler(cascade , iic_ioexc_cascade);
362 out_be64(&node_iic->iic_ir,
363 (1 << 12) /* priority */ |
364 (node << 4) /* dest node */ |
365 IIC_UNIT_THREAD_0 /* route them to thread 0 */);
366 /* Flush pending (make sure it triggers if there is
367 * anything pending
368 */
369 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
285 } 370 }
286 371
287 if (found) 372 if (found)
@@ -292,6 +377,12 @@ static int __init setup_iic(void)
292 377
293void __init iic_init_IRQ(void) 378void __init iic_init_IRQ(void)
294{ 379{
380 /* Setup an irq host data structure */
381 iic_host = irq_alloc_host(IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
382 &iic_host_ops, IIC_IRQ_INVALID);
383 BUG_ON(iic_host == NULL);
384 irq_set_default_host(iic_host);
385
295 /* Discover and initialize iics */ 386 /* Discover and initialize iics */
296 if (setup_iic() < 0) 387 if (setup_iic() < 0)
297 panic("IIC: Failed to initialize !\n"); 388 panic("IIC: Failed to initialize !\n");
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h
index 5560a92ec3ab..9ba1d3c17b4b 100644
--- a/arch/powerpc/platforms/cell/interrupt.h
+++ b/arch/powerpc/platforms/cell/interrupt.h
@@ -2,48 +2,76 @@
2#define ASM_CELL_PIC_H 2#define ASM_CELL_PIC_H
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4/* 4/*
5 * Mapping of IIC pending bits into per-node 5 * Mapping of IIC pending bits into per-node interrupt numbers.
6 * interrupt numbers.
7 * 6 *
8 * IRQ FF CC SS PP FF CC SS PP Description 7 * Interrupt numbers are in the range 0...0x1ff where the top bit
8 * (0x100) represent the source node. Only 2 nodes are supported with
9 * the current code though it's trivial to extend that if necessary using
10 * higher level bits
9 * 11 *
10 * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge 12 * The bottom 8 bits are split into 2 type bits and 6 data bits that
11 * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge 13 * depend on the type:
12 * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0
13 * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1
14 * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2
15 * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI
16 * 14 *
17 * F flags 15 * 00 (0x00 | data) : normal interrupt. data is (class << 4) | source
18 * C class 16 * 01 (0x40 | data) : IO exception. data is the exception number as
19 * S source 17 * defined by bit numbers in IIC_SR
20 * P Priority 18 * 10 (0x80 | data) : IPI. data is the IPI number (obtained from the priority)
21 * + node number 19 * and node is always 0 (IPIs are per-cpu, their source is
22 * * don't care 20 * not relevant)
21 * 11 (0xc0 | data) : reserved
23 * 22 *
24 * A node consists of a Cell Broadband Engine and an optional 23 * In addition, interrupt number 0x80000000 is defined as always invalid
25 * south bridge device providing a maximum of 64 IRQs. 24 * (that is the node field is expected to never extend to move than 23 bits)
26 * The south bridge may be connected to either IOIF0
27 * or IOIF1.
28 * Each SPE is represented as three IRQ lines, one per
29 * interrupt class.
30 * 16 IRQ numbers are reserved for inter processor
31 * interruptions, although these are only used in the
32 * range of the first node.
33 * 25 *
34 * This scheme needs 128 IRQ numbers per BIF node ID,
35 * which means that with the total of 512 lines
36 * available, we can have a maximum of four nodes.
37 */ 26 */
38 27
39enum { 28enum {
40 IIC_IRQ_INVALID = 0xff, 29 IIC_IRQ_INVALID = 0x80000000u,
41 IIC_IRQ_MAX = 0x3f, 30 IIC_IRQ_NODE_MASK = 0x100,
42 IIC_IRQ_EXT_IOIF0 = 0x20, 31 IIC_IRQ_NODE_SHIFT = 8,
43 IIC_IRQ_EXT_IOIF1 = 0x2b, 32 IIC_IRQ_MAX = 0x1ff,
44 IIC_IRQ_IPI0 = 0x40, 33 IIC_IRQ_TYPE_MASK = 0xc0,
45 IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ 34 IIC_IRQ_TYPE_NORMAL = 0x00,
46 IIC_SOURCE_COUNT = 0x50, 35 IIC_IRQ_TYPE_IOEXC = 0x40,
36 IIC_IRQ_TYPE_IPI = 0x80,
37 IIC_IRQ_CLASS_SHIFT = 4,
38 IIC_IRQ_CLASS_0 = 0x00,
39 IIC_IRQ_CLASS_1 = 0x10,
40 IIC_IRQ_CLASS_2 = 0x20,
41 IIC_SOURCE_COUNT = 0x200,
42
43 /* Here are defined the various source/dest units. Avoid using those
44 * definitions if you can, they are mostly here for reference
45 */
46 IIC_UNIT_SPU_0 = 0x4,
47 IIC_UNIT_SPU_1 = 0x7,
48 IIC_UNIT_SPU_2 = 0x3,
49 IIC_UNIT_SPU_3 = 0x8,
50 IIC_UNIT_SPU_4 = 0x2,
51 IIC_UNIT_SPU_5 = 0x9,
52 IIC_UNIT_SPU_6 = 0x1,
53 IIC_UNIT_SPU_7 = 0xa,
54 IIC_UNIT_IOC_0 = 0x0,
55 IIC_UNIT_IOC_1 = 0xb,
56 IIC_UNIT_THREAD_0 = 0xe, /* target only */
57 IIC_UNIT_THREAD_1 = 0xf, /* target only */
58 IIC_UNIT_IIC = 0xe, /* source only (IO exceptions) */
59
60 /* Base numbers for the external interrupts */
61 IIC_IRQ_EXT_IOIF0 =
62 IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_0,
63 IIC_IRQ_EXT_IOIF1 =
64 IIC_IRQ_TYPE_NORMAL | IIC_IRQ_CLASS_2 | IIC_UNIT_IOC_1,
65
66 /* Base numbers for the IIC_ISR interrupts */
67 IIC_IRQ_IOEX_TMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 63,
68 IIC_IRQ_IOEX_PMI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 62,
69 IIC_IRQ_IOEX_ATI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 61,
70 IIC_IRQ_IOEX_MATBFI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 60,
71 IIC_IRQ_IOEX_ELDI = IIC_IRQ_TYPE_IOEXC | IIC_IRQ_CLASS_1 | 59,
72
73 /* Which bits in IIC_ISR are edge sensitive */
74 IIC_ISR_EDGE_MASK = 0x4ul,
47}; 75};
48 76
49extern void iic_init_IRQ(void); 77extern void iic_init_IRQ(void);
@@ -52,7 +80,6 @@ extern void iic_request_IPIs(void);
52extern void iic_setup_cpu(void); 80extern void iic_setup_cpu(void);
53 81
54extern u8 iic_get_target_id(int cpu); 82extern u8 iic_get_target_id(int cpu);
55extern struct irq_host *iic_get_irq_host(int node);
56 83
57extern void spider_init_IRQ(void); 84extern void spider_init_IRQ(void);
58 85
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 742a03282b44..608b1ebc56b2 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -243,7 +243,6 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
243 const u32 *imap, *tmp; 243 const u32 *imap, *tmp;
244 int imaplen, intsize, unit; 244 int imaplen, intsize, unit;
245 struct device_node *iic; 245 struct device_node *iic;
246 struct irq_host *iic_host;
247 246
248#if 0 /* Enable that when we have a way to retreive the node as well */ 247#if 0 /* Enable that when we have a way to retreive the node as well */
249 /* First, we check wether we have a real "interrupts" in the device 248 /* First, we check wether we have a real "interrupts" in the device
@@ -289,11 +288,11 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
289 * the iic host from the iic OF node, but that way I'm still compatible 288 * the iic host from the iic OF node, but that way I'm still compatible
290 * with really really old old firmwares for which we don't have a node 289 * with really really old old firmwares for which we don't have a node
291 */ 290 */
292 iic_host = iic_get_irq_host(pic->node_id);
293 if (iic_host == NULL)
294 return NO_IRQ;
295 /* Manufacture an IIC interrupt number of class 2 */ 291 /* Manufacture an IIC interrupt number of class 2 */
296 virq = irq_create_mapping(iic_host, 0x20 | unit); 292 virq = irq_create_mapping(NULL,
293 (pic->node_id << IIC_IRQ_NODE_SHIFT) |
294 (2 << IIC_IRQ_CLASS_SHIFT) |
295 unit);
297 if (virq == NO_IRQ) 296 if (virq == NO_IRQ)
298 printk(KERN_ERR "spider_pic: failed to map cascade !"); 297 printk(KERN_ERR "spider_pic: failed to map cascade !");
299 return virq; 298 return virq;
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 0f5c8ebc7fc3..f78680346e5f 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -568,24 +568,23 @@ static void spu_unmap(struct spu *spu)
568/* This function shall be abstracted for HV platforms */ 568/* This function shall be abstracted for HV platforms */
569static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) 569static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
570{ 570{
571 struct irq_host *host;
572 unsigned int isrc; 571 unsigned int isrc;
573 const u32 *tmp; 572 const u32 *tmp;
574 573
575 host = iic_get_irq_host(spu->node); 574 /* Get the interrupt source unit from the device-tree */
576 if (host == NULL)
577 return -ENODEV;
578
579 /* Get the interrupt source from the device-tree */
580 tmp = get_property(np, "isrc", NULL); 575 tmp = get_property(np, "isrc", NULL);
581 if (!tmp) 576 if (!tmp)
582 return -ENODEV; 577 return -ENODEV;
583 spu->isrc = isrc = tmp[0]; 578 isrc = tmp[0];
579
580 /* Add the node number */
581 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
582 spu->isrc = isrc;
584 583
585 /* Now map interrupts of all 3 classes */ 584 /* Now map interrupts of all 3 classes */
586 spu->irqs[0] = irq_create_mapping(host, 0x00 | isrc); 585 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
587 spu->irqs[1] = irq_create_mapping(host, 0x10 | isrc); 586 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
588 spu->irqs[2] = irq_create_mapping(host, 0x20 | isrc); 587 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
589 588
590 /* Right now, we only fail if class 2 failed */ 589 /* Right now, we only fail if class 2 failed */
591 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; 590 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 3eb12065df23..4aa165e010d9 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -262,14 +262,6 @@ void __init iSeries_pci_final_fixup(void)
262 mf_display_src(0xC9000200); 262 mf_display_src(0xC9000200);
263} 263}
264 264
265void pcibios_fixup_bus(struct pci_bus *PciBus)
266{
267}
268
269void pcibios_fixup_resources(struct pci_dev *pdev)
270{
271}
272
273/* 265/*
274 * Look down the chain to find the matching Device Device 266 * Look down the chain to find the matching Device Device
275 */ 267 */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f1953066ff8..a0ff7ba7d666 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -649,15 +649,21 @@ static void iseries_dedicated_idle(void)
649void __init iSeries_init_IRQ(void) { } 649void __init iSeries_init_IRQ(void) { }
650#endif 650#endif
651 651
652/*
653 * iSeries has no legacy IO, anything calling this function has to
654 * fail or bad things will happen
655 */
656static int iseries_check_legacy_ioport(unsigned int baseport)
657{
658 return -ENODEV;
659}
660
652static int __init iseries_probe(void) 661static int __init iseries_probe(void)
653{ 662{
654 unsigned long root = of_get_flat_dt_root(); 663 unsigned long root = of_get_flat_dt_root();
655 if (!of_flat_dt_is_compatible(root, "IBM,iSeries")) 664 if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
656 return 0; 665 return 0;
657 666
658 powerpc_firmware_features |= FW_FEATURE_ISERIES;
659 powerpc_firmware_features |= FW_FEATURE_LPAR;
660
661 hpte_init_iSeries(); 667 hpte_init_iSeries();
662 668
663 return 1; 669 return 1;
@@ -680,6 +686,7 @@ define_machine(iseries) {
680 .calibrate_decr = generic_calibrate_decr, 686 .calibrate_decr = generic_calibrate_decr,
681 .progress = iSeries_progress, 687 .progress = iSeries_progress,
682 .probe = iseries_probe, 688 .probe = iseries_probe,
689 .check_legacy_ioport = iseries_check_legacy_ioport,
683 /* XXX Implement enable_pmcs for iSeries */ 690 /* XXX Implement enable_pmcs for iSeries */
684}; 691};
685 692
@@ -687,6 +694,9 @@ void * __init iSeries_early_setup(void)
687{ 694{
688 unsigned long phys_mem_size; 695 unsigned long phys_mem_size;
689 696
697 powerpc_firmware_features |= FW_FEATURE_ISERIES;
698 powerpc_firmware_features |= FW_FEATURE_LPAR;
699
690 iSeries_fixup_klimit(); 700 iSeries_fixup_klimit();
691 701
692 /* 702 /*
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index ce1a235855f7..379db05b0082 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -111,8 +111,6 @@ void udbg_scc_init(int force_scc)
111 pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch, 111 pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch,
112 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1); 112 PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1);
113 113
114
115 /* Setup for 57600 8N1 */
116 if (ch == ch_a) 114 if (ch == ch_a)
117 addr += 0x20; 115 addr += 0x20;
118 sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ; 116 sccc = ioremap(addr & PAGE_MASK, PAGE_SIZE) ;
@@ -125,9 +123,21 @@ void udbg_scc_init(int force_scc)
125 x = in_8(sccc); 123 x = in_8(sccc);
126 out_8(sccc, 0x09); /* reset A or B side */ 124 out_8(sccc, 0x09); /* reset A or B side */
127 out_8(sccc, 0xc0); 125 out_8(sccc, 0xc0);
126
127 /* If SCC was the OF output port, read the BRG value, else
128 * Setup for 57600 8N1
129 */
130 if (ch_def != NULL) {
131 out_8(sccc, 13);
132 scc_inittab[1] = in_8(sccc);
133 out_8(sccc, 12);
134 scc_inittab[3] = in_8(sccc);
135 }
136
128 for (i = 0; i < sizeof(scc_inittab); ++i) 137 for (i = 0; i < sizeof(scc_inittab); ++i)
129 out_8(sccc, scc_inittab[i]); 138 out_8(sccc, scc_inittab[i]);
130 139
140
131 udbg_putc = udbg_scc_putc; 141 udbg_putc = udbg_scc_putc;
132 udbg_getc = udbg_scc_getc; 142 udbg_getc = udbg_scc_getc;
133 udbg_getc_poll = udbg_scc_getc_poll; 143 udbg_getc_poll = udbg_scc_getc_poll;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 43dbf737698c..f82b13e531a3 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -180,7 +180,7 @@ static void __init pseries_mpic_init_IRQ(void)
180 180
181 cascade_irq = irq_of_parse_and_map(cascade, 0); 181 cascade_irq = irq_of_parse_and_map(cascade, 0);
182 if (cascade == NO_IRQ) { 182 if (cascade == NO_IRQ) {
183 printk(KERN_ERR "xics: failed to map cascade interrupt"); 183 printk(KERN_ERR "mpic: failed to map cascade interrupt");
184 return; 184 return;
185 } 185 }
186 186
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index f15f4d78aee9..91f052d8cce0 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
12obj-$(CONFIG_FSL_SOC) += fsl_soc.o 12obj-$(CONFIG_FSL_SOC) += fsl_soc.o
13obj-$(CONFIG_PPC_TODC) += todc.o 13obj-$(CONFIG_PPC_TODC) += todc.o
14obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o 14obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
15obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
15 16
16ifeq ($(CONFIG_PPC_MERGE),y) 17ifeq ($(CONFIG_PPC_MERGE),y)
17obj-$(CONFIG_PPC_I8259) += i8259.o 18obj-$(CONFIG_PPC_I8259) += i8259.o
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 51752990f7b9..28b018994746 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -147,7 +147,7 @@ static struct irq_chip cpm2_pic = {
147 .end = cpm2_end_irq, 147 .end = cpm2_end_irq,
148}; 148};
149 149
150int cpm2_get_irq(struct pt_regs *regs) 150unsigned int cpm2_get_irq(struct pt_regs *regs)
151{ 151{
152 int irq; 152 int irq;
153 unsigned long bits; 153 unsigned long bits;
diff --git a/arch/powerpc/sysdev/cpm2_pic.h b/arch/powerpc/sysdev/cpm2_pic.h
index d63e45d4df58..3c513e5a688e 100644
--- a/arch/powerpc/sysdev/cpm2_pic.h
+++ b/arch/powerpc/sysdev/cpm2_pic.h
@@ -3,7 +3,7 @@
3 3
4extern intctl_cpm2_t *cpm2_intctl; 4extern intctl_cpm2_t *cpm2_intctl;
5 5
6extern int cpm2_get_irq(struct pt_regs *regs); 6extern unsigned int cpm2_get_irq(struct pt_regs *regs);
7 7
8extern void cpm2_pic_init(struct device_node*); 8extern void cpm2_pic_init(struct device_node*);
9 9
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 022ed275ea68..7d759f1c26b1 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -37,6 +37,7 @@
37#include <asm/cpm2.h> 37#include <asm/cpm2.h>
38 38
39extern void init_fcc_ioports(struct fs_platform_info*); 39extern void init_fcc_ioports(struct fs_platform_info*);
40extern void init_scc_ioports(struct fs_uart_platform_info*);
40static phys_addr_t immrbase = -1; 41static phys_addr_t immrbase = -1;
41 42
42phys_addr_t get_immrbase(void) 43phys_addr_t get_immrbase(void)
@@ -566,7 +567,7 @@ static int __init fs_enet_of_init(void)
566 struct resource r[4]; 567 struct resource r[4];
567 struct device_node *phy, *mdio; 568 struct device_node *phy, *mdio;
568 struct fs_platform_info fs_enet_data; 569 struct fs_platform_info fs_enet_data;
569 const unsigned int *id, *phy_addr; 570 const unsigned int *id, *phy_addr, phy_irq;
570 const void *mac_addr; 571 const void *mac_addr;
571 const phandle *ph; 572 const phandle *ph;
572 const char *model; 573 const char *model;
@@ -588,6 +589,7 @@ static int __init fs_enet_of_init(void)
588 if (ret) 589 if (ret)
589 goto err; 590 goto err;
590 r[2].name = fcc_regs_c; 591 r[2].name = fcc_regs_c;
592 fs_enet_data.fcc_regs_c = r[2].start;
591 593
592 r[3].start = r[3].end = irq_of_parse_and_map(np, 0); 594 r[3].start = r[3].end = irq_of_parse_and_map(np, 0);
593 r[3].flags = IORESOURCE_IRQ; 595 r[3].flags = IORESOURCE_IRQ;
@@ -620,6 +622,8 @@ static int __init fs_enet_of_init(void)
620 phy_addr = get_property(phy, "reg", NULL); 622 phy_addr = get_property(phy, "reg", NULL);
621 fs_enet_data.phy_addr = *phy_addr; 623 fs_enet_data.phy_addr = *phy_addr;
622 624
625 phy_irq = get_property(phy, "interrupts", NULL);
626
623 id = get_property(np, "device-id", NULL); 627 id = get_property(np, "device-id", NULL);
624 fs_enet_data.fs_no = *id; 628 fs_enet_data.fs_no = *id;
625 strcpy(fs_enet_data.fs_type, model); 629 strcpy(fs_enet_data.fs_type, model);
@@ -637,6 +641,7 @@ static int __init fs_enet_of_init(void)
637 641
638 if (strstr(model, "FCC")) { 642 if (strstr(model, "FCC")) {
639 int fcc_index = *id - 1; 643 int fcc_index = *id - 1;
644 unsigned char* mdio_bb_prop;
640 645
641 fs_enet_data.dpram_offset = (u32)cpm_dpram_addr(0); 646 fs_enet_data.dpram_offset = (u32)cpm_dpram_addr(0);
642 fs_enet_data.rx_ring = 32; 647 fs_enet_data.rx_ring = 32;
@@ -652,14 +657,57 @@ static int __init fs_enet_of_init(void)
652 (u32)res.start, fs_enet_data.phy_addr); 657 (u32)res.start, fs_enet_data.phy_addr);
653 fs_enet_data.bus_id = (char*)&bus_id[(*id)]; 658 fs_enet_data.bus_id = (char*)&bus_id[(*id)];
654 fs_enet_data.init_ioports = init_fcc_ioports; 659 fs_enet_data.init_ioports = init_fcc_ioports;
655 }
656 660
657 of_node_put(phy); 661 mdio_bb_prop = get_property(phy, "bitbang", NULL);
658 of_node_put(mdio); 662 if (mdio_bb_prop) {
663 struct platform_device *fs_enet_mdio_bb_dev;
664 struct fs_mii_bb_platform_info fs_enet_mdio_bb_data;
665
666 fs_enet_mdio_bb_dev =
667 platform_device_register_simple("fsl-bb-mdio",
668 i, NULL, 0);
669 memset(&fs_enet_mdio_bb_data, 0,
670 sizeof(struct fs_mii_bb_platform_info));
671 fs_enet_mdio_bb_data.mdio_dat.bit =
672 mdio_bb_prop[0];
673 fs_enet_mdio_bb_data.mdio_dir.bit =
674 mdio_bb_prop[1];
675 fs_enet_mdio_bb_data.mdc_dat.bit =
676 mdio_bb_prop[2];
677 fs_enet_mdio_bb_data.mdio_port =
678 mdio_bb_prop[3];
679 fs_enet_mdio_bb_data.mdc_port =
680 mdio_bb_prop[4];
681 fs_enet_mdio_bb_data.delay =
682 mdio_bb_prop[5];
683
684 fs_enet_mdio_bb_data.irq[0] = phy_irq[0];
685 fs_enet_mdio_bb_data.irq[1] = -1;
686 fs_enet_mdio_bb_data.irq[2] = -1;
687 fs_enet_mdio_bb_data.irq[3] = phy_irq[0];
688 fs_enet_mdio_bb_data.irq[31] = -1;
689
690 fs_enet_mdio_bb_data.mdio_dat.offset =
691 (u32)&cpm2_immr->im_ioport.iop_pdatc;
692 fs_enet_mdio_bb_data.mdio_dir.offset =
693 (u32)&cpm2_immr->im_ioport.iop_pdirc;
694 fs_enet_mdio_bb_data.mdc_dat.offset =
695 (u32)&cpm2_immr->im_ioport.iop_pdatc;
696
697 ret = platform_device_add_data(
698 fs_enet_mdio_bb_dev,
699 &fs_enet_mdio_bb_data,
700 sizeof(struct fs_mii_bb_platform_info));
701 if (ret)
702 goto unreg;
703 }
704
705 of_node_put(phy);
706 of_node_put(mdio);
659 707
660 ret = platform_device_add_data(fs_enet_dev, &fs_enet_data, 708 ret = platform_device_add_data(fs_enet_dev, &fs_enet_data,
661 sizeof(struct 709 sizeof(struct
662 fs_platform_info)); 710 fs_platform_info));
663 if (ret) 711 if (ret)
664 goto unreg; 712 goto unreg;
665 } 713 }
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 723972bb5bd9..3ee03a9a98fa 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -341,7 +341,7 @@ static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
341 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 341 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
342 if (id == PCI_CAP_ID_HT) { 342 if (id == PCI_CAP_ID_HT) {
343 id = readb(devbase + pos + 3); 343 id = readb(devbase + pos + 3);
344 if (id == 0x80) 344 if (id == HT_CAPTYPE_IRQ)
345 break; 345 break;
346 } 346 }
347 } 347 }
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
new file mode 100644
index 000000000000..a725e80befa8
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -0,0 +1,30 @@
1#
2# QE Communication options
3#
4
5menu "QE Options"
6 depends on QUICC_ENGINE
7
8config UCC_SLOW
9 bool "UCC Slow Protocols Support"
10 default n
11 select UCC
12 help
13 This option provides qe_lib support to UCC slow
14 protocols: UART, BISYNC, QMC
15
16config UCC_FAST
17 bool "UCC Fast Protocols Support"
18 default n
19 select UCC
20 select UCC_SLOW
21 help
22 This option provides qe_lib support to UCC fast
23 protocols: HDLC, Ethernet, ATM, transparent
24
25config UCC
26 bool
27 default y if UCC_FAST || UCC_SLOW
28
29endmenu
30
diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile
new file mode 100644
index 000000000000..874fe1a5b1cf
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the linux ppc-specific parts of QE
3#
4obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o
5
6obj-$(CONFIG_UCC) += ucc.o
7obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
8obj-$(CONFIG_UCC_FAST) += ucc_fast.o
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
new file mode 100644
index 000000000000..2bae632d3ad7
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -0,0 +1,353 @@
1/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 * Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
7 *
8 * Description:
9 * General Purpose functions for the global management of the
10 * QUICC Engine (QE).
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/param.h>
21#include <linux/string.h>
22#include <linux/mm.h>
23#include <linux/interrupt.h>
24#include <linux/bootmem.h>
25#include <linux/module.h>
26#include <linux/delay.h>
27#include <linux/ioport.h>
28#include <asm/irq.h>
29#include <asm/page.h>
30#include <asm/pgtable.h>
31#include <asm/immap_qe.h>
32#include <asm/qe.h>
33#include <asm/prom.h>
34#include <asm/rheap.h>
35
36static void qe_snums_init(void);
37static void qe_muram_init(void);
38static int qe_sdma_init(void);
39
40static DEFINE_SPINLOCK(qe_lock);
41
42/* QE snum state */
43enum qe_snum_state {
44 QE_SNUM_STATE_USED,
45 QE_SNUM_STATE_FREE
46};
47
48/* QE snum */
49struct qe_snum {
50 u8 num;
51 enum qe_snum_state state;
52};
53
54/* We allocate this here because it is used almost exclusively for
55 * the communication processor devices.
56 */
57struct qe_immap *qe_immr = NULL;
58EXPORT_SYMBOL(qe_immr);
59
60static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
61
62static phys_addr_t qebase = -1;
63
64phys_addr_t get_qe_base(void)
65{
66 struct device_node *qe;
67
68 if (qebase != -1)
69 return qebase;
70
71 qe = of_find_node_by_type(NULL, "qe");
72 if (qe) {
73 unsigned int size;
74 const void *prop = get_property(qe, "reg", &size);
75 qebase = of_translate_address(qe, prop);
76 of_node_put(qe);
77 };
78
79 return qebase;
80}
81
82EXPORT_SYMBOL(get_qe_base);
83
84void qe_reset(void)
85{
86 if (qe_immr == NULL)
87 qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
88
89 qe_snums_init();
90
91 qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
92 QE_CR_PROTOCOL_UNSPECIFIED, 0);
93
94 /* Reclaim the MURAM memory for our use. */
95 qe_muram_init();
96
97 if (qe_sdma_init())
98 panic("sdma init failed!");
99}
100
101int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
102{
103 unsigned long flags;
104 u8 mcn_shift = 0, dev_shift = 0;
105
106 spin_lock_irqsave(&qe_lock, flags);
107 if (cmd == QE_RESET) {
108 out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
109 } else {
110 if (cmd == QE_ASSIGN_PAGE) {
111 /* Here device is the SNUM, not sub-block */
112 dev_shift = QE_CR_SNUM_SHIFT;
113 } else if (cmd == QE_ASSIGN_RISC) {
114 /* Here device is the SNUM, and mcnProtocol is
115 * e_QeCmdRiscAssignment value */
116 dev_shift = QE_CR_SNUM_SHIFT;
117 mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
118 } else {
119 if (device == QE_CR_SUBBLOCK_USB)
120 mcn_shift = QE_CR_MCN_USB_SHIFT;
121 else
122 mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
123 }
124
125 out_be32(&qe_immr->cp.cecdr,
126 immrbar_virt_to_phys((void *)cmd_input));
127 out_be32(&qe_immr->cp.cecr,
128 (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
129 mcn_protocol << mcn_shift));
130 }
131
132 /* wait for the QE_CR_FLG to clear */
133 while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG)
134 cpu_relax();
135 spin_unlock_irqrestore(&qe_lock, flags);
136
137 return 0;
138}
139EXPORT_SYMBOL(qe_issue_cmd);
140
141/* Set a baud rate generator. This needs lots of work. There are
142 * 16 BRGs, which can be connected to the QE channels or output
143 * as clocks. The BRGs are in two different block of internal
144 * memory mapped space.
145 * The baud rate clock is the system clock divided by something.
146 * It was set up long ago during the initial boot phase and is
147 * is given to us.
148 * Baud rate clocks are zero-based in the driver code (as that maps
149 * to port numbers). Documentation uses 1-based numbering.
150 */
151static unsigned int brg_clk = 0;
152
153unsigned int get_brg_clk(void)
154{
155 struct device_node *qe;
156 if (brg_clk)
157 return brg_clk;
158
159 qe = of_find_node_by_type(NULL, "qe");
160 if (qe) {
161 unsigned int size;
162 const u32 *prop = get_property(qe, "brg-frequency", &size);
163 brg_clk = *prop;
164 of_node_put(qe);
165 };
166 return brg_clk;
167}
168
169/* This function is used by UARTS, or anything else that uses a 16x
170 * oversampled clock.
171 */
172void qe_setbrg(u32 brg, u32 rate)
173{
174 volatile u32 *bp;
175 u32 divisor, tempval;
176 int div16 = 0;
177
178 bp = &qe_immr->brg.brgc1;
179 bp += brg;
180
181 divisor = (get_brg_clk() / rate);
182 if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
183 div16 = 1;
184 divisor /= 16;
185 }
186
187 tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
188 if (div16)
189 tempval |= QE_BRGC_DIV16;
190
191 out_be32(bp, tempval);
192}
193
194/* Initialize SNUMs (thread serial numbers) according to
195 * QE Module Control chapter, SNUM table
196 */
197static void qe_snums_init(void)
198{
199 int i;
200 static const u8 snum_init[] = {
201 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
202 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
203 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
204 0xD8, 0xD9, 0xE8, 0xE9,
205 };
206
207 for (i = 0; i < QE_NUM_OF_SNUM; i++) {
208 snums[i].num = snum_init[i];
209 snums[i].state = QE_SNUM_STATE_FREE;
210 }
211}
212
213int qe_get_snum(void)
214{
215 unsigned long flags;
216 int snum = -EBUSY;
217 int i;
218
219 spin_lock_irqsave(&qe_lock, flags);
220 for (i = 0; i < QE_NUM_OF_SNUM; i++) {
221 if (snums[i].state == QE_SNUM_STATE_FREE) {
222 snums[i].state = QE_SNUM_STATE_USED;
223 snum = snums[i].num;
224 break;
225 }
226 }
227 spin_unlock_irqrestore(&qe_lock, flags);
228
229 return snum;
230}
231EXPORT_SYMBOL(qe_get_snum);
232
233void qe_put_snum(u8 snum)
234{
235 int i;
236
237 for (i = 0; i < QE_NUM_OF_SNUM; i++) {
238 if (snums[i].num == snum) {
239 snums[i].state = QE_SNUM_STATE_FREE;
240 break;
241 }
242 }
243}
244EXPORT_SYMBOL(qe_put_snum);
245
246static int qe_sdma_init(void)
247{
248 struct sdma *sdma = &qe_immr->sdma;
249 u32 sdma_buf_offset;
250
251 if (!sdma)
252 return -ENODEV;
253
254 /* allocate 2 internal temporary buffers (512 bytes size each) for
255 * the SDMA */
256 sdma_buf_offset = qe_muram_alloc(512 * 2, 64);
257 if (IS_MURAM_ERR(sdma_buf_offset))
258 return -ENOMEM;
259
260 out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
261 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | (0x1 >>
262 QE_SDMR_CEN_SHIFT)));
263
264 return 0;
265}
266
267/*
268 * muram_alloc / muram_free bits.
269 */
270static DEFINE_SPINLOCK(qe_muram_lock);
271
272/* 16 blocks should be enough to satisfy all requests
273 * until the memory subsystem goes up... */
274static rh_block_t qe_boot_muram_rh_block[16];
275static rh_info_t qe_muram_info;
276
277static void qe_muram_init(void)
278{
279 struct device_node *np;
280 u32 address;
281 u64 size;
282 unsigned int flags;
283
284 /* initialize the info header */
285 rh_init(&qe_muram_info, 1,
286 sizeof(qe_boot_muram_rh_block) /
287 sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block);
288
289 /* Attach the usable muram area */
290 /* XXX: This is a subset of the available muram. It
291 * varies with the processor and the microcode patches activated.
292 */
293 if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
294 address = *of_get_address(np, 0, &size, &flags);
295 of_node_put(np);
296 rh_attach_region(&qe_muram_info,
297 (void *)address, (int)size);
298 }
299}
300
301/* This function returns an index into the MURAM area.
302 */
303u32 qe_muram_alloc(u32 size, u32 align)
304{
305 void *start;
306 unsigned long flags;
307
308 spin_lock_irqsave(&qe_muram_lock, flags);
309 start = rh_alloc_align(&qe_muram_info, size, align, "QE");
310 spin_unlock_irqrestore(&qe_muram_lock, flags);
311
312 return (u32) start;
313}
314EXPORT_SYMBOL(qe_muram_alloc);
315
316int qe_muram_free(u32 offset)
317{
318 int ret;
319 unsigned long flags;
320
321 spin_lock_irqsave(&qe_muram_lock, flags);
322 ret = rh_free(&qe_muram_info, (void *)offset);
323 spin_unlock_irqrestore(&qe_muram_lock, flags);
324
325 return ret;
326}
327EXPORT_SYMBOL(qe_muram_free);
328
329/* not sure if this is ever needed */
330u32 qe_muram_alloc_fixed(u32 offset, u32 size)
331{
332 void *start;
333 unsigned long flags;
334
335 spin_lock_irqsave(&qe_muram_lock, flags);
336 start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc");
337 spin_unlock_irqrestore(&qe_muram_lock, flags);
338
339 return (u32) start;
340}
341EXPORT_SYMBOL(qe_muram_alloc_fixed);
342
343void qe_muram_dump(void)
344{
345 rh_dump(&qe_muram_info);
346}
347EXPORT_SYMBOL(qe_muram_dump);
348
349void *qe_muram_addr(u32 offset)
350{
351 return (void *)&qe_immr->muram[offset];
352}
353EXPORT_SYMBOL(qe_muram_addr);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
new file mode 100644
index 000000000000..c229d07d4957
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -0,0 +1,555 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_ic.c
3 *
4 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
5 *
6 * Author: Li Yang <leoli@freescale.com>
7 * Based on code from Shlomi Gridish <gridish@freescale.com>
8 *
9 * QUICC ENGINE Interrupt Controller
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/reboot.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/sched.h>
24#include <linux/signal.h>
25#include <linux/sysdev.h>
26#include <linux/device.h>
27#include <linux/bootmem.h>
28#include <linux/spinlock.h>
29#include <asm/irq.h>
30#include <asm/io.h>
31#include <asm/prom.h>
32#include <asm/qe_ic.h>
33
34#include "qe_ic.h"
35
36static DEFINE_SPINLOCK(qe_ic_lock);
37
38static struct qe_ic_info qe_ic_info[] = {
39 [1] = {
40 .mask = 0x00008000,
41 .mask_reg = QEIC_CIMR,
42 .pri_code = 0,
43 .pri_reg = QEIC_CIPWCC,
44 },
45 [2] = {
46 .mask = 0x00004000,
47 .mask_reg = QEIC_CIMR,
48 .pri_code = 1,
49 .pri_reg = QEIC_CIPWCC,
50 },
51 [3] = {
52 .mask = 0x00002000,
53 .mask_reg = QEIC_CIMR,
54 .pri_code = 2,
55 .pri_reg = QEIC_CIPWCC,
56 },
57 [10] = {
58 .mask = 0x00000040,
59 .mask_reg = QEIC_CIMR,
60 .pri_code = 1,
61 .pri_reg = QEIC_CIPZCC,
62 },
63 [11] = {
64 .mask = 0x00000020,
65 .mask_reg = QEIC_CIMR,
66 .pri_code = 2,
67 .pri_reg = QEIC_CIPZCC,
68 },
69 [12] = {
70 .mask = 0x00000010,
71 .mask_reg = QEIC_CIMR,
72 .pri_code = 3,
73 .pri_reg = QEIC_CIPZCC,
74 },
75 [13] = {
76 .mask = 0x00000008,
77 .mask_reg = QEIC_CIMR,
78 .pri_code = 4,
79 .pri_reg = QEIC_CIPZCC,
80 },
81 [14] = {
82 .mask = 0x00000004,
83 .mask_reg = QEIC_CIMR,
84 .pri_code = 5,
85 .pri_reg = QEIC_CIPZCC,
86 },
87 [15] = {
88 .mask = 0x00000002,
89 .mask_reg = QEIC_CIMR,
90 .pri_code = 6,
91 .pri_reg = QEIC_CIPZCC,
92 },
93 [20] = {
94 .mask = 0x10000000,
95 .mask_reg = QEIC_CRIMR,
96 .pri_code = 3,
97 .pri_reg = QEIC_CIPRTA,
98 },
99 [25] = {
100 .mask = 0x00800000,
101 .mask_reg = QEIC_CRIMR,
102 .pri_code = 0,
103 .pri_reg = QEIC_CIPRTB,
104 },
105 [26] = {
106 .mask = 0x00400000,
107 .mask_reg = QEIC_CRIMR,
108 .pri_code = 1,
109 .pri_reg = QEIC_CIPRTB,
110 },
111 [27] = {
112 .mask = 0x00200000,
113 .mask_reg = QEIC_CRIMR,
114 .pri_code = 2,
115 .pri_reg = QEIC_CIPRTB,
116 },
117 [28] = {
118 .mask = 0x00100000,
119 .mask_reg = QEIC_CRIMR,
120 .pri_code = 3,
121 .pri_reg = QEIC_CIPRTB,
122 },
123 [32] = {
124 .mask = 0x80000000,
125 .mask_reg = QEIC_CIMR,
126 .pri_code = 0,
127 .pri_reg = QEIC_CIPXCC,
128 },
129 [33] = {
130 .mask = 0x40000000,
131 .mask_reg = QEIC_CIMR,
132 .pri_code = 1,
133 .pri_reg = QEIC_CIPXCC,
134 },
135 [34] = {
136 .mask = 0x20000000,
137 .mask_reg = QEIC_CIMR,
138 .pri_code = 2,
139 .pri_reg = QEIC_CIPXCC,
140 },
141 [35] = {
142 .mask = 0x10000000,
143 .mask_reg = QEIC_CIMR,
144 .pri_code = 3,
145 .pri_reg = QEIC_CIPXCC,
146 },
147 [36] = {
148 .mask = 0x08000000,
149 .mask_reg = QEIC_CIMR,
150 .pri_code = 4,
151 .pri_reg = QEIC_CIPXCC,
152 },
153 [40] = {
154 .mask = 0x00800000,
155 .mask_reg = QEIC_CIMR,
156 .pri_code = 0,
157 .pri_reg = QEIC_CIPYCC,
158 },
159 [41] = {
160 .mask = 0x00400000,
161 .mask_reg = QEIC_CIMR,
162 .pri_code = 1,
163 .pri_reg = QEIC_CIPYCC,
164 },
165 [42] = {
166 .mask = 0x00200000,
167 .mask_reg = QEIC_CIMR,
168 .pri_code = 2,
169 .pri_reg = QEIC_CIPYCC,
170 },
171 [43] = {
172 .mask = 0x00100000,
173 .mask_reg = QEIC_CIMR,
174 .pri_code = 3,
175 .pri_reg = QEIC_CIPYCC,
176 },
177};
178
179static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
180{
181 return in_be32(base + (reg >> 2));
182}
183
184static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
185 u32 value)
186{
187 out_be32(base + (reg >> 2), value);
188}
189
190static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
191{
192 return irq_desc[virq].chip_data;
193}
194
195#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
196
197static void qe_ic_unmask_irq(unsigned int virq)
198{
199 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
200 unsigned int src = virq_to_hw(virq);
201 unsigned long flags;
202 u32 temp;
203
204 spin_lock_irqsave(&qe_ic_lock, flags);
205
206 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
207 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
208 temp | qe_ic_info[src].mask);
209
210 spin_unlock_irqrestore(&qe_ic_lock, flags);
211}
212
213static void qe_ic_mask_irq(unsigned int virq)
214{
215 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
216 unsigned int src = virq_to_hw(virq);
217 unsigned long flags;
218 u32 temp;
219
220 spin_lock_irqsave(&qe_ic_lock, flags);
221
222 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
223 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
224 temp & ~qe_ic_info[src].mask);
225
226 spin_unlock_irqrestore(&qe_ic_lock, flags);
227}
228
229static void qe_ic_mask_irq_and_ack(unsigned int virq)
230{
231 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
232 unsigned int src = virq_to_hw(virq);
233 unsigned long flags;
234 u32 temp;
235
236 spin_lock_irqsave(&qe_ic_lock, flags);
237
238 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
239 qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
240 temp & ~qe_ic_info[src].mask);
241
242 /* There is nothing to do for ack here, ack is handled in ISR */
243
244 spin_unlock_irqrestore(&qe_ic_lock, flags);
245}
246
247static struct irq_chip qe_ic_irq_chip = {
248 .typename = " QEIC ",
249 .unmask = qe_ic_unmask_irq,
250 .mask = qe_ic_mask_irq,
251 .mask_ack = qe_ic_mask_irq_and_ack,
252};
253
254static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
255{
256 struct qe_ic *qe_ic = h->host_data;
257
258 /* Exact match, unless qe_ic node is NULL */
259 return qe_ic->of_node == NULL || qe_ic->of_node == node;
260}
261
262static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
263 irq_hw_number_t hw)
264{
265 struct qe_ic *qe_ic = h->host_data;
266 struct irq_chip *chip;
267
268 if (qe_ic_info[hw].mask == 0) {
269 printk(KERN_ERR "Can't map reserved IRQ \n");
270 return -EINVAL;
271 }
272 /* Default chip */
273 chip = &qe_ic->hc_irq;
274
275 set_irq_chip_data(virq, qe_ic);
276 get_irq_desc(virq)->status |= IRQ_LEVEL;
277
278 set_irq_chip_and_handler(virq, chip, handle_level_irq);
279
280 return 0;
281}
282
283static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
284 u32 * intspec, unsigned int intsize,
285 irq_hw_number_t * out_hwirq,
286 unsigned int *out_flags)
287{
288 *out_hwirq = intspec[0];
289 if (intsize > 1)
290 *out_flags = intspec[1];
291 else
292 *out_flags = IRQ_TYPE_NONE;
293 return 0;
294}
295
296static struct irq_host_ops qe_ic_host_ops = {
297 .match = qe_ic_host_match,
298 .map = qe_ic_host_map,
299 .xlate = qe_ic_host_xlate,
300};
301
302/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
303unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic, struct pt_regs *regs)
304{
305 int irq;
306
307 BUG_ON(qe_ic == NULL);
308
309 /* get the interrupt source vector. */
310 irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
311
312 if (irq == 0)
313 return NO_IRQ;
314
315 return irq_linear_revmap(qe_ic->irqhost, irq);
316}
317
318/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
319unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic, struct pt_regs *regs)
320{
321 int irq;
322
323 BUG_ON(qe_ic == NULL);
324
325 /* get the interrupt source vector. */
326 irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
327
328 if (irq == 0)
329 return NO_IRQ;
330
331 return irq_linear_revmap(qe_ic->irqhost, irq);
332}
333
334/* FIXME: We mask all the QE Low interrupts while handling. We should
335 * let other interrupt come in, but BAD interrupts are generated */
336void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc,
337 struct pt_regs *regs)
338{
339 struct qe_ic *qe_ic = desc->handler_data;
340 struct irq_chip *chip = irq_desc[irq].chip;
341
342 unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic, regs);
343
344 chip->mask_ack(irq);
345 if (cascade_irq != NO_IRQ)
346 generic_handle_irq(cascade_irq, regs);
347 chip->unmask(irq);
348}
349
350/* FIXME: We mask all the QE High interrupts while handling. We should
351 * let other interrupt come in, but BAD interrupts are generated */
352void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc,
353 struct pt_regs *regs)
354{
355 struct qe_ic *qe_ic = desc->handler_data;
356 struct irq_chip *chip = irq_desc[irq].chip;
357
358 unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic, regs);
359
360 chip->mask_ack(irq);
361 if (cascade_irq != NO_IRQ)
362 generic_handle_irq(cascade_irq, regs);
363 chip->unmask(irq);
364}
365
366void __init qe_ic_init(struct device_node *node, unsigned int flags)
367{
368 struct qe_ic *qe_ic;
369 struct resource res;
370 u32 temp = 0, ret, high_active = 0;
371
372 qe_ic = alloc_bootmem(sizeof(struct qe_ic));
373 if (qe_ic == NULL)
374 return;
375
376 memset(qe_ic, 0, sizeof(struct qe_ic));
377 qe_ic->of_node = node ? of_node_get(node) : NULL;
378
379 qe_ic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
380 NR_QE_IC_INTS, &qe_ic_host_ops, 0);
381 if (qe_ic->irqhost == NULL) {
382 of_node_put(node);
383 return;
384 }
385
386 ret = of_address_to_resource(node, 0, &res);
387 if (ret)
388 return;
389
390 qe_ic->regs = ioremap(res.start, res.end - res.start + 1);
391
392 qe_ic->irqhost->host_data = qe_ic;
393 qe_ic->hc_irq = qe_ic_irq_chip;
394
395 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
396 qe_ic->virq_low = irq_of_parse_and_map(node, 1);
397
398 if (qe_ic->virq_low == NO_IRQ) {
399 printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
400 return;
401 }
402
403 /* default priority scheme is grouped. If spread mode is */
404 /* required, configure cicr accordingly. */
405 if (flags & QE_IC_SPREADMODE_GRP_W)
406 temp |= CICR_GWCC;
407 if (flags & QE_IC_SPREADMODE_GRP_X)
408 temp |= CICR_GXCC;
409 if (flags & QE_IC_SPREADMODE_GRP_Y)
410 temp |= CICR_GYCC;
411 if (flags & QE_IC_SPREADMODE_GRP_Z)
412 temp |= CICR_GZCC;
413 if (flags & QE_IC_SPREADMODE_GRP_RISCA)
414 temp |= CICR_GRTA;
415 if (flags & QE_IC_SPREADMODE_GRP_RISCB)
416 temp |= CICR_GRTB;
417
418 /* choose destination signal for highest priority interrupt */
419 if (flags & QE_IC_HIGH_SIGNAL) {
420 temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
421 high_active = 1;
422 }
423
424 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
425
426 set_irq_data(qe_ic->virq_low, qe_ic);
427 set_irq_chained_handler(qe_ic->virq_low, qe_ic_cascade_low);
428
429 if (qe_ic->virq_high != NO_IRQ) {
430 set_irq_data(qe_ic->virq_high, qe_ic);
431 set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high);
432 }
433
434 printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs);
435}
436
437void qe_ic_set_highest_priority(unsigned int virq, int high)
438{
439 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
440 unsigned int src = virq_to_hw(virq);
441 u32 temp = 0;
442
443 temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
444
445 temp &= ~CICR_HP_MASK;
446 temp |= src << CICR_HP_SHIFT;
447
448 temp &= ~CICR_HPIT_MASK;
449 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
450
451 qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
452}
453
454/* Set Priority level within its group, from 1 to 8 */
455int qe_ic_set_priority(unsigned int virq, unsigned int priority)
456{
457 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
458 unsigned int src = virq_to_hw(virq);
459 u32 temp;
460
461 if (priority > 8 || priority == 0)
462 return -EINVAL;
463 if (src > 127)
464 return -EINVAL;
465 if (qe_ic_info[src].pri_reg == 0)
466 return -EINVAL;
467
468 temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
469
470 if (priority < 4) {
471 temp &= ~(0x7 << (32 - priority * 3));
472 temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
473 } else {
474 temp &= ~(0x7 << (24 - priority * 3));
475 temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
476 }
477
478 qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
479
480 return 0;
481}
482
483/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
484int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
485{
486 struct qe_ic *qe_ic = qe_ic_from_irq(virq);
487 unsigned int src = virq_to_hw(virq);
488 u32 temp, control_reg = QEIC_CICNR, shift = 0;
489
490 if (priority > 2 || priority == 0)
491 return -EINVAL;
492
493 switch (qe_ic_info[src].pri_reg) {
494 case QEIC_CIPZCC:
495 shift = CICNR_ZCC1T_SHIFT;
496 break;
497 case QEIC_CIPWCC:
498 shift = CICNR_WCC1T_SHIFT;
499 break;
500 case QEIC_CIPYCC:
501 shift = CICNR_YCC1T_SHIFT;
502 break;
503 case QEIC_CIPXCC:
504 shift = CICNR_XCC1T_SHIFT;
505 break;
506 case QEIC_CIPRTA:
507 shift = CRICR_RTA1T_SHIFT;
508 control_reg = QEIC_CRICR;
509 break;
510 case QEIC_CIPRTB:
511 shift = CRICR_RTB1T_SHIFT;
512 control_reg = QEIC_CRICR;
513 break;
514 default:
515 return -EINVAL;
516 }
517
518 shift += (2 - priority) * 2;
519 temp = qe_ic_read(qe_ic->regs, control_reg);
520 temp &= ~(SIGNAL_MASK << shift);
521 temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
522 qe_ic_write(qe_ic->regs, control_reg, temp);
523
524 return 0;
525}
526
527static struct sysdev_class qe_ic_sysclass = {
528 set_kset_name("qe_ic"),
529};
530
531static struct sys_device device_qe_ic = {
532 .id = 0,
533 .cls = &qe_ic_sysclass,
534};
535
536static int __init init_qe_ic_sysfs(void)
537{
538 int rc;
539
540 printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
541
542 rc = sysdev_class_register(&qe_ic_sysclass);
543 if (rc) {
544 printk(KERN_ERR "Failed registering qe_ic sys class\n");
545 return -ENODEV;
546 }
547 rc = sysdev_register(&device_qe_ic);
548 if (rc) {
549 printk(KERN_ERR "Failed registering qe_ic sys device\n");
550 return -ENODEV;
551 }
552 return 0;
553}
554
555subsys_initcall(init_qe_ic_sysfs);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
new file mode 100644
index 000000000000..9a631adb189d
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -0,0 +1,106 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_ic.h
3 *
4 * QUICC ENGINE Interrupt Controller Header
5 *
6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
7 *
8 * Author: Li Yang <leoli@freescale.com>
9 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#ifndef _POWERPC_SYSDEV_QE_IC_H
17#define _POWERPC_SYSDEV_QE_IC_H
18
19#include <asm/qe_ic.h>
20
21#define NR_QE_IC_INTS 64
22
23/* QE IC registers offset */
24#define QEIC_CICR 0x00
25#define QEIC_CIVEC 0x04
26#define QEIC_CRIPNR 0x08
27#define QEIC_CIPNR 0x0c
28#define QEIC_CIPXCC 0x10
29#define QEIC_CIPYCC 0x14
30#define QEIC_CIPWCC 0x18
31#define QEIC_CIPZCC 0x1c
32#define QEIC_CIMR 0x20
33#define QEIC_CRIMR 0x24
34#define QEIC_CICNR 0x28
35#define QEIC_CIPRTA 0x30
36#define QEIC_CIPRTB 0x34
37#define QEIC_CRICR 0x3c
38#define QEIC_CHIVEC 0x60
39
40/* Interrupt priority registers */
41#define CIPCC_SHIFT_PRI0 29
42#define CIPCC_SHIFT_PRI1 26
43#define CIPCC_SHIFT_PRI2 23
44#define CIPCC_SHIFT_PRI3 20
45#define CIPCC_SHIFT_PRI4 13
46#define CIPCC_SHIFT_PRI5 10
47#define CIPCC_SHIFT_PRI6 7
48#define CIPCC_SHIFT_PRI7 4
49
50/* CICR priority modes */
51#define CICR_GWCC 0x00040000
52#define CICR_GXCC 0x00020000
53#define CICR_GYCC 0x00010000
54#define CICR_GZCC 0x00080000
55#define CICR_GRTA 0x00200000
56#define CICR_GRTB 0x00400000
57#define CICR_HPIT_SHIFT 8
58#define CICR_HPIT_MASK 0x00000300
59#define CICR_HP_SHIFT 24
60#define CICR_HP_MASK 0x3f000000
61
62/* CICNR */
63#define CICNR_WCC1T_SHIFT 20
64#define CICNR_ZCC1T_SHIFT 28
65#define CICNR_YCC1T_SHIFT 12
66#define CICNR_XCC1T_SHIFT 4
67
68/* CRICR */
69#define CRICR_RTA1T_SHIFT 20
70#define CRICR_RTB1T_SHIFT 28
71
72/* Signal indicator */
73#define SIGNAL_MASK 3
74#define SIGNAL_HIGH 2
75#define SIGNAL_LOW 0
76
77struct qe_ic {
78 /* Control registers offset */
79 volatile u32 __iomem *regs;
80
81 /* The remapper for this QEIC */
82 struct irq_host *irqhost;
83
84 /* The "linux" controller struct */
85 struct irq_chip hc_irq;
86
87 /* The device node of the interrupt controller */
88 struct device_node *of_node;
89
90 /* VIRQ numbers of QE high/low irqs */
91 unsigned int virq_high;
92 unsigned int virq_low;
93};
94
95/*
96 * QE interrupt controller internal structure
97 */
98struct qe_ic_info {
99 u32 mask; /* location of this source at the QIMR register. */
100 u32 mask_reg; /* Mask register offset */
101 u8 pri_code; /* for grouped interrupts sources - the interrupt
102 code as appears at the group priority register */
103 u32 pri_reg; /* Group priority register offset */
104};
105
106#endif /* _POWERPC_SYSDEV_QE_IC_H */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
new file mode 100644
index 000000000000..aea435970389
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -0,0 +1,226 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/qe_io.c
3 *
4 * QE Parallel I/O ports configuration routines
5 *
6 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
7 *
8 * Author: Li Yang <LeoLi@freescale.com>
9 * Based on code from Shlomi Gridish <gridish@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16
17#include <linux/config.h>
18#include <linux/stddef.h>
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/module.h>
23#include <linux/ioport.h>
24
25#include <asm/io.h>
26#include <asm/prom.h>
27#include <sysdev/fsl_soc.h>
28
29#undef DEBUG
30
31#define NUM_OF_PINS 32
32
33struct port_regs {
34 __be32 cpodr; /* Open drain register */
35 __be32 cpdata; /* Data register */
36 __be32 cpdir1; /* Direction register */
37 __be32 cpdir2; /* Direction register */
38 __be32 cppar1; /* Pin assignment register */
39 __be32 cppar2; /* Pin assignment register */
40};
41
42static struct port_regs *par_io = NULL;
43static int num_par_io_ports = 0;
44
45int par_io_init(struct device_node *np)
46{
47 struct resource res;
48 int ret;
49 const u32 *num_ports;
50
51 /* Map Parallel I/O ports registers */
52 ret = of_address_to_resource(np, 0, &res);
53 if (ret)
54 return ret;
55 par_io = ioremap(res.start, res.end - res.start + 1);
56
57 num_ports = get_property(np, "num-ports", NULL);
58 if (num_ports)
59 num_par_io_ports = *num_ports;
60
61 return 0;
62}
63
64int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
65 int assignment, int has_irq)
66{
67 u32 pin_mask1bit, pin_mask2bits, new_mask2bits, tmp_val;
68
69 if (!par_io)
70 return -1;
71
72 /* calculate pin location for single and 2 bits information */
73 pin_mask1bit = (u32) (1 << (NUM_OF_PINS - (pin + 1)));
74
75 /* Set open drain, if required */
76 tmp_val = in_be32(&par_io[port].cpodr);
77 if (open_drain)
78 out_be32(&par_io[port].cpodr, pin_mask1bit | tmp_val);
79 else
80 out_be32(&par_io[port].cpodr, ~pin_mask1bit & tmp_val);
81
82 /* define direction */
83 tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
84 in_be32(&par_io[port].cpdir2) :
85 in_be32(&par_io[port].cpdir1);
86
87 /* get all bits mask for 2 bit per port */
88 pin_mask2bits = (u32) (0x3 << (NUM_OF_PINS -
89 (pin % (NUM_OF_PINS / 2) + 1) * 2));
90
91 /* Get the final mask we need for the right definition */
92 new_mask2bits = (u32) (dir << (NUM_OF_PINS -
93 (pin % (NUM_OF_PINS / 2) + 1) * 2));
94
95 /* clear and set 2 bits mask */
96 if (pin > (NUM_OF_PINS / 2) - 1) {
97 out_be32(&par_io[port].cpdir2,
98 ~pin_mask2bits & tmp_val);
99 tmp_val &= ~pin_mask2bits;
100 out_be32(&par_io[port].cpdir2, new_mask2bits | tmp_val);
101 } else {
102 out_be32(&par_io[port].cpdir1,
103 ~pin_mask2bits & tmp_val);
104 tmp_val &= ~pin_mask2bits;
105 out_be32(&par_io[port].cpdir1, new_mask2bits | tmp_val);
106 }
107 /* define pin assignment */
108 tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
109 in_be32(&par_io[port].cppar2) :
110 in_be32(&par_io[port].cppar1);
111
112 new_mask2bits = (u32) (assignment << (NUM_OF_PINS -
113 (pin % (NUM_OF_PINS / 2) + 1) * 2));
114 /* clear and set 2 bits mask */
115 if (pin > (NUM_OF_PINS / 2) - 1) {
116 out_be32(&par_io[port].cppar2,
117 ~pin_mask2bits & tmp_val);
118 tmp_val &= ~pin_mask2bits;
119 out_be32(&par_io[port].cppar2, new_mask2bits | tmp_val);
120 } else {
121 out_be32(&par_io[port].cppar1,
122 ~pin_mask2bits & tmp_val);
123 tmp_val &= ~pin_mask2bits;
124 out_be32(&par_io[port].cppar1, new_mask2bits | tmp_val);
125 }
126
127 return 0;
128}
129EXPORT_SYMBOL(par_io_config_pin);
130
131int par_io_data_set(u8 port, u8 pin, u8 val)
132{
133 u32 pin_mask, tmp_val;
134
135 if (port >= num_par_io_ports)
136 return -EINVAL;
137 if (pin >= NUM_OF_PINS)
138 return -EINVAL;
139 /* calculate pin location */
140 pin_mask = (u32) (1 << (NUM_OF_PINS - 1 - pin));
141
142 tmp_val = in_be32(&par_io[port].cpdata);
143
144 if (val == 0) /* clear */
145 out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
146 else /* set */
147 out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
148
149 return 0;
150}
151EXPORT_SYMBOL(par_io_data_set);
152
153int par_io_of_config(struct device_node *np)
154{
155 struct device_node *pio;
156 const phandle *ph;
157 int pio_map_len;
158 const unsigned int *pio_map;
159
160 if (par_io == NULL) {
161 printk(KERN_ERR "par_io not initialized \n");
162 return -1;
163 }
164
165 ph = get_property(np, "pio-handle", NULL);
166 if (ph == 0) {
167 printk(KERN_ERR "pio-handle not available \n");
168 return -1;
169 }
170
171 pio = of_find_node_by_phandle(*ph);
172
173 pio_map = get_property(pio, "pio-map", &pio_map_len);
174 if (pio_map == NULL) {
175 printk(KERN_ERR "pio-map is not set! \n");
176 return -1;
177 }
178 pio_map_len /= sizeof(unsigned int);
179 if ((pio_map_len % 6) != 0) {
180 printk(KERN_ERR "pio-map format wrong! \n");
181 return -1;
182 }
183
184 while (pio_map_len > 0) {
185 par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
186 (int) pio_map[2], (int) pio_map[3],
187 (int) pio_map[4], (int) pio_map[5]);
188 pio_map += 6;
189 pio_map_len -= 6;
190 }
191 of_node_put(pio);
192 return 0;
193}
194EXPORT_SYMBOL(par_io_of_config);
195
196#ifdef DEBUG
197static void dump_par_io(void)
198{
199 int i;
200
201 printk(KERN_INFO "PAR IO registars:\n");
202 printk(KERN_INFO "Base address: 0x%08x\n", (u32) par_io);
203 for (i = 0; i < num_par_io_ports; i++) {
204 printk(KERN_INFO "cpodr[%d] : addr - 0x%08x, val - 0x%08x\n",
205 i, (u32) & par_io[i].cpodr,
206 in_be32(&par_io[i].cpodr));
207 printk(KERN_INFO "cpdata[%d]: addr - 0x%08x, val - 0x%08x\n",
208 i, (u32) & par_io[i].cpdata,
209 in_be32(&par_io[i].cpdata));
210 printk(KERN_INFO "cpdir1[%d]: addr - 0x%08x, val - 0x%08x\n",
211 i, (u32) & par_io[i].cpdir1,
212 in_be32(&par_io[i].cpdir1));
213 printk(KERN_INFO "cpdir2[%d]: addr - 0x%08x, val - 0x%08x\n",
214 i, (u32) & par_io[i].cpdir2,
215 in_be32(&par_io[i].cpdir2));
216 printk(KERN_INFO "cppar1[%d]: addr - 0x%08x, val - 0x%08x\n",
217 i, (u32) & par_io[i].cppar1,
218 in_be32(&par_io[i].cppar1));
219 printk(KERN_INFO "cppar2[%d]: addr - 0x%08x, val - 0x%08x\n",
220 i, (u32) & par_io[i].cppar2,
221 in_be32(&par_io[i].cppar2));
222 }
223
224}
225EXPORT_SYMBOL(dump_par_io);
226#endif /* DEBUG */
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
new file mode 100644
index 000000000000..916c9e5df57f
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -0,0 +1,251 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/ucc.c
3 *
4 * QE UCC API Set - UCC specific routines implementations.
5 *
6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
7 *
8 * Authors: Shlomi Gridish <gridish@freescale.com>
9 * Li Yang <leoli@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26#include <asm/ucc.h>
27
28static DEFINE_SPINLOCK(ucc_lock);
29
30int ucc_set_qe_mux_mii_mng(int ucc_num)
31{
32 unsigned long flags;
33
34 spin_lock_irqsave(&ucc_lock, flags);
35 out_be32(&qe_immr->qmx.cmxgcr,
36 ((in_be32(&qe_immr->qmx.cmxgcr) &
37 ~QE_CMXGCR_MII_ENET_MNG) |
38 (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT)));
39 spin_unlock_irqrestore(&ucc_lock, flags);
40
41 return 0;
42}
43
44int ucc_set_type(int ucc_num, struct ucc_common *regs,
45 enum ucc_speed_type speed)
46{
47 u8 guemr = 0;
48
49 /* check if the UCC number is in range. */
50 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
51 return -EINVAL;
52
53 guemr = regs->guemr;
54 guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX);
55 switch (speed) {
56 case UCC_SPEED_TYPE_SLOW:
57 guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
58 break;
59 case UCC_SPEED_TYPE_FAST:
60 guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX);
61 break;
62 default:
63 return -EINVAL;
64 }
65 regs->guemr = guemr;
66
67 return 0;
68}
69
70int ucc_init_guemr(struct ucc_common *regs)
71{
72 u8 guemr = 0;
73
74 if (!regs)
75 return -EINVAL;
76
77 /* Set bit 3 (which is reserved in the GUEMR register) to 1 */
78 guemr = UCC_GUEMR_SET_RESERVED3;
79
80 regs->guemr = guemr;
81
82 return 0;
83}
84
85static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num,
86 u8 * shift)
87{
88 switch (ucc_num) {
89 case 0: *p_cmxucr = &(qe_immr->qmx.cmxucr1);
90 *reg_num = 1;
91 *shift = 16;
92 break;
93 case 2: *p_cmxucr = &(qe_immr->qmx.cmxucr1);
94 *reg_num = 1;
95 *shift = 0;
96 break;
97 case 4: *p_cmxucr = &(qe_immr->qmx.cmxucr2);
98 *reg_num = 2;
99 *shift = 16;
100 break;
101 case 6: *p_cmxucr = &(qe_immr->qmx.cmxucr2);
102 *reg_num = 2;
103 *shift = 0;
104 break;
105 case 1: *p_cmxucr = &(qe_immr->qmx.cmxucr3);
106 *reg_num = 3;
107 *shift = 16;
108 break;
109 case 3: *p_cmxucr = &(qe_immr->qmx.cmxucr3);
110 *reg_num = 3;
111 *shift = 0;
112 break;
113 case 5: *p_cmxucr = &(qe_immr->qmx.cmxucr4);
114 *reg_num = 4;
115 *shift = 16;
116 break;
117 case 7: *p_cmxucr = &(qe_immr->qmx.cmxucr4);
118 *reg_num = 4;
119 *shift = 0;
120 break;
121 default:
122 break;
123 }
124}
125
126int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask)
127{
128 volatile u32 *p_cmxucr;
129 u8 reg_num;
130 u8 shift;
131
132 /* check if the UCC number is in range. */
133 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
134 return -EINVAL;
135
136 get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
137
138 if (set)
139 out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift));
140 else
141 out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift));
142
143 return 0;
144}
145
146int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode)
147{
148 volatile u32 *p_cmxucr;
149 u8 reg_num;
150 u8 shift;
151 u32 clock_bits;
152 u32 clock_mask;
153 int source = -1;
154
155 /* check if the UCC number is in range. */
156 if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
157 return -EINVAL;
158
159 if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) {
160 printk(KERN_ERR
161 "ucc_set_qe_mux_rxtx: bad comm mode type passed.");
162 return -EINVAL;
163 }
164
165 get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
166
167 switch (reg_num) {
168 case 1:
169 switch (clock) {
170 case QE_BRG1: source = 1; break;
171 case QE_BRG2: source = 2; break;
172 case QE_BRG7: source = 3; break;
173 case QE_BRG8: source = 4; break;
174 case QE_CLK9: source = 5; break;
175 case QE_CLK10: source = 6; break;
176 case QE_CLK11: source = 7; break;
177 case QE_CLK12: source = 8; break;
178 case QE_CLK15: source = 9; break;
179 case QE_CLK16: source = 10; break;
180 default: source = -1; break;
181 }
182 break;
183 case 2:
184 switch (clock) {
185 case QE_BRG5: source = 1; break;
186 case QE_BRG6: source = 2; break;
187 case QE_BRG7: source = 3; break;
188 case QE_BRG8: source = 4; break;
189 case QE_CLK13: source = 5; break;
190 case QE_CLK14: source = 6; break;
191 case QE_CLK19: source = 7; break;
192 case QE_CLK20: source = 8; break;
193 case QE_CLK15: source = 9; break;
194 case QE_CLK16: source = 10; break;
195 default: source = -1; break;
196 }
197 break;
198 case 3:
199 switch (clock) {
200 case QE_BRG9: source = 1; break;
201 case QE_BRG10: source = 2; break;
202 case QE_BRG15: source = 3; break;
203 case QE_BRG16: source = 4; break;
204 case QE_CLK3: source = 5; break;
205 case QE_CLK4: source = 6; break;
206 case QE_CLK17: source = 7; break;
207 case QE_CLK18: source = 8; break;
208 case QE_CLK7: source = 9; break;
209 case QE_CLK8: source = 10; break;
210 default: source = -1; break;
211 }
212 break;
213 case 4:
214 switch (clock) {
215 case QE_BRG13: source = 1; break;
216 case QE_BRG14: source = 2; break;
217 case QE_BRG15: source = 3; break;
218 case QE_BRG16: source = 4; break;
219 case QE_CLK5: source = 5; break;
220 case QE_CLK6: source = 6; break;
221 case QE_CLK21: source = 7; break;
222 case QE_CLK22: source = 8; break;
223 case QE_CLK7: source = 9; break;
224 case QE_CLK8: source = 10; break;
225 default: source = -1; break;
226 }
227 break;
228 default:
229 source = -1;
230 break;
231 }
232
233 if (source == -1) {
234 printk(KERN_ERR
235 "ucc_set_qe_mux_rxtx: Bad combination of clock and UCC.");
236 return -ENOENT;
237 }
238
239 clock_bits = (u32) source;
240 clock_mask = QE_CMXUCR_TX_CLK_SRC_MASK;
241 if (mode == COMM_DIR_RX) {
242 clock_bits <<= 4; /* Rx field is 4 bits to left of Tx field */
243 clock_mask <<= 4; /* Rx field is 4 bits to left of Tx field */
244 }
245 clock_bits <<= shift;
246 clock_mask <<= shift;
247
248 out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clock_mask) | clock_bits);
249
250 return 0;
251}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
new file mode 100644
index 000000000000..c2be7348fcbd
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -0,0 +1,396 @@
1/*
2 * arch/powerpc/sysdev/qe_lib/ucc_fast.c
3 *
4 * QE UCC Fast API Set - UCC Fast specific routines implementations.
5 *
6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
7 *
8 * Authors: Shlomi Gridish <gridish@freescale.com>
9 * Li Yang <leoli@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/errno.h>
19#include <linux/slab.h>
20#include <linux/stddef.h>
21#include <linux/interrupt.h>
22
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26
27#include <asm/ucc.h>
28#include <asm/ucc_fast.h>
29
30#define uccf_printk(level, format, arg...) \
31 printk(level format "\n", ## arg)
32
33#define uccf_dbg(format, arg...) \
34 uccf_printk(KERN_DEBUG , format , ## arg)
35#define uccf_err(format, arg...) \
36 uccf_printk(KERN_ERR , format , ## arg)
37#define uccf_info(format, arg...) \
38 uccf_printk(KERN_INFO , format , ## arg)
39#define uccf_warn(format, arg...) \
40 uccf_printk(KERN_WARNING , format , ## arg)
41
42#ifdef UCCF_VERBOSE_DEBUG
43#define uccf_vdbg uccf_dbg
44#else
45#define uccf_vdbg(fmt, args...) do { } while (0)
46#endif /* UCCF_VERBOSE_DEBUG */
47
48void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
49{
50 uccf_info("UCC%d Fast registers:", uccf->uf_info->ucc_num);
51 uccf_info("Base address: 0x%08x", (u32) uccf->uf_regs);
52
53 uccf_info("gumr : addr - 0x%08x, val - 0x%08x",
54 (u32) & uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
55 uccf_info("upsmr : addr - 0x%08x, val - 0x%08x",
56 (u32) & uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
57 uccf_info("utodr : addr - 0x%08x, val - 0x%04x",
58 (u32) & uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
59 uccf_info("udsr : addr - 0x%08x, val - 0x%04x",
60 (u32) & uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
61 uccf_info("ucce : addr - 0x%08x, val - 0x%08x",
62 (u32) & uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
63 uccf_info("uccm : addr - 0x%08x, val - 0x%08x",
64 (u32) & uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
65 uccf_info("uccs : addr - 0x%08x, val - 0x%02x",
66 (u32) & uccf->uf_regs->uccs, uccf->uf_regs->uccs);
67 uccf_info("urfb : addr - 0x%08x, val - 0x%08x",
68 (u32) & uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
69 uccf_info("urfs : addr - 0x%08x, val - 0x%04x",
70 (u32) & uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
71 uccf_info("urfet : addr - 0x%08x, val - 0x%04x",
72 (u32) & uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
73 uccf_info("urfset: addr - 0x%08x, val - 0x%04x",
74 (u32) & uccf->uf_regs->urfset,
75 in_be16(&uccf->uf_regs->urfset));
76 uccf_info("utfb : addr - 0x%08x, val - 0x%08x",
77 (u32) & uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
78 uccf_info("utfs : addr - 0x%08x, val - 0x%04x",
79 (u32) & uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
80 uccf_info("utfet : addr - 0x%08x, val - 0x%04x",
81 (u32) & uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
82 uccf_info("utftt : addr - 0x%08x, val - 0x%04x",
83 (u32) & uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
84 uccf_info("utpt : addr - 0x%08x, val - 0x%04x",
85 (u32) & uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
86 uccf_info("urtry : addr - 0x%08x, val - 0x%08x",
87 (u32) & uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
88 uccf_info("guemr : addr - 0x%08x, val - 0x%02x",
89 (u32) & uccf->uf_regs->guemr, uccf->uf_regs->guemr);
90}
91
92u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
93{
94 switch (uccf_num) {
95 case 0: return QE_CR_SUBBLOCK_UCCFAST1;
96 case 1: return QE_CR_SUBBLOCK_UCCFAST2;
97 case 2: return QE_CR_SUBBLOCK_UCCFAST3;
98 case 3: return QE_CR_SUBBLOCK_UCCFAST4;
99 case 4: return QE_CR_SUBBLOCK_UCCFAST5;
100 case 5: return QE_CR_SUBBLOCK_UCCFAST6;
101 case 6: return QE_CR_SUBBLOCK_UCCFAST7;
102 case 7: return QE_CR_SUBBLOCK_UCCFAST8;
103 default: return QE_CR_SUBBLOCK_INVALID;
104 }
105}
106
107void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
108{
109 out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
110}
111
112void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
113{
114 struct ucc_fast *uf_regs;
115 u32 gumr;
116
117 uf_regs = uccf->uf_regs;
118
119 /* Enable reception and/or transmission on this UCC. */
120 gumr = in_be32(&uf_regs->gumr);
121 if (mode & COMM_DIR_TX) {
122 gumr |= UCC_FAST_GUMR_ENT;
123 uccf->enabled_tx = 1;
124 }
125 if (mode & COMM_DIR_RX) {
126 gumr |= UCC_FAST_GUMR_ENR;
127 uccf->enabled_rx = 1;
128 }
129 out_be32(&uf_regs->gumr, gumr);
130}
131
132void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
133{
134 struct ucc_fast *uf_regs;
135 u32 gumr;
136
137 uf_regs = uccf->uf_regs;
138
139 /* Disable reception and/or transmission on this UCC. */
140 gumr = in_be32(&uf_regs->gumr);
141 if (mode & COMM_DIR_TX) {
142 gumr &= ~UCC_FAST_GUMR_ENT;
143 uccf->enabled_tx = 0;
144 }
145 if (mode & COMM_DIR_RX) {
146 gumr &= ~UCC_FAST_GUMR_ENR;
147 uccf->enabled_rx = 0;
148 }
149 out_be32(&uf_regs->gumr, gumr);
150}
151
152int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
153{
154 struct ucc_fast_private *uccf;
155 struct ucc_fast *uf_regs;
156 u32 gumr = 0;
157 int ret;
158
159 uccf_vdbg("%s: IN", __FUNCTION__);
160
161 if (!uf_info)
162 return -EINVAL;
163
164 /* check if the UCC port number is in range. */
165 if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
166 uccf_err("ucc_fast_init: Illagal UCC number!");
167 return -EINVAL;
168 }
169
170 /* Check that 'max_rx_buf_length' is properly aligned (4). */
171 if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
172 uccf_err("ucc_fast_init: max_rx_buf_length not aligned.");
173 return -EINVAL;
174 }
175
176 /* Validate Virtual Fifo register values */
177 if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
178 uccf_err
179 ("ucc_fast_init: Virtual Fifo register urfs too small.");
180 return -EINVAL;
181 }
182
183 if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
184 uccf_err
185 ("ucc_fast_init: Virtual Fifo register urfs not aligned.");
186 return -EINVAL;
187 }
188
189 if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
190 uccf_err
191 ("ucc_fast_init: Virtual Fifo register urfet not aligned.");
192 return -EINVAL;
193 }
194
195 if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
196 uccf_err
197 ("ucc_fast_init: Virtual Fifo register urfset not aligned.");
198 return -EINVAL;
199 }
200
201 if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
202 uccf_err
203 ("ucc_fast_init: Virtual Fifo register utfs not aligned.");
204 return -EINVAL;
205 }
206
207 if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
208 uccf_err
209 ("ucc_fast_init: Virtual Fifo register utfet not aligned.");
210 return -EINVAL;
211 }
212
213 if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
214 uccf_err
215 ("ucc_fast_init: Virtual Fifo register utftt not aligned.");
216 return -EINVAL;
217 }
218
219 uccf = (struct ucc_fast_private *)
220 kmalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
221 if (!uccf) {
222 uccf_err
223 ("ucc_fast_init: No memory for UCC slow data structure!");
224 return -ENOMEM;
225 }
226 memset(uccf, 0, sizeof(struct ucc_fast_private));
227
228 /* Fill fast UCC structure */
229 uccf->uf_info = uf_info;
230 /* Set the PHY base address */
231 uccf->uf_regs =
232 (struct ucc_fast *) ioremap(uf_info->regs, sizeof(struct ucc_fast));
233 if (uccf->uf_regs == NULL) {
234 uccf_err
235 ("ucc_fast_init: No memory map for UCC slow controller!");
236 return -ENOMEM;
237 }
238
239 uccf->enabled_tx = 0;
240 uccf->enabled_rx = 0;
241 uccf->stopped_tx = 0;
242 uccf->stopped_rx = 0;
243 uf_regs = uccf->uf_regs;
244 uccf->p_ucce = (u32 *) & (uf_regs->ucce);
245 uccf->p_uccm = (u32 *) & (uf_regs->uccm);
246#ifdef STATISTICS
247 uccf->tx_frames = 0;
248 uccf->rx_frames = 0;
249 uccf->rx_discarded = 0;
250#endif /* STATISTICS */
251
252 /* Init Guemr register */
253 if ((ret = ucc_init_guemr((struct ucc_common *) (uf_regs)))) {
254 uccf_err("ucc_fast_init: Could not init the guemr register.");
255 ucc_fast_free(uccf);
256 return ret;
257 }
258
259 /* Set UCC to fast type */
260 if ((ret = ucc_set_type(uf_info->ucc_num,
261 (struct ucc_common *) (uf_regs),
262 UCC_SPEED_TYPE_FAST))) {
263 uccf_err("ucc_fast_init: Could not set type to fast.");
264 ucc_fast_free(uccf);
265 return ret;
266 }
267
268 uccf->mrblr = uf_info->max_rx_buf_length;
269
270 /* Set GUMR */
271 /* For more details see the hardware spec. */
272 /* gumr starts as zero. */
273 if (uf_info->tci)
274 gumr |= UCC_FAST_GUMR_TCI;
275 gumr |= uf_info->ttx_trx;
276 if (uf_info->cdp)
277 gumr |= UCC_FAST_GUMR_CDP;
278 if (uf_info->ctsp)
279 gumr |= UCC_FAST_GUMR_CTSP;
280 if (uf_info->cds)
281 gumr |= UCC_FAST_GUMR_CDS;
282 if (uf_info->ctss)
283 gumr |= UCC_FAST_GUMR_CTSS;
284 if (uf_info->txsy)
285 gumr |= UCC_FAST_GUMR_TXSY;
286 if (uf_info->rsyn)
287 gumr |= UCC_FAST_GUMR_RSYN;
288 gumr |= uf_info->synl;
289 if (uf_info->rtsm)
290 gumr |= UCC_FAST_GUMR_RTSM;
291 gumr |= uf_info->renc;
292 if (uf_info->revd)
293 gumr |= UCC_FAST_GUMR_REVD;
294 gumr |= uf_info->tenc;
295 gumr |= uf_info->tcrc;
296 gumr |= uf_info->mode;
297 out_be32(&uf_regs->gumr, gumr);
298
299 /* Allocate memory for Tx Virtual Fifo */
300 uccf->ucc_fast_tx_virtual_fifo_base_offset =
301 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
302 if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
303 uccf_err
304 ("ucc_fast_init: Can not allocate MURAM memory for "
305 "struct ucc_fastx_virtual_fifo_base_offset.");
306 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
307 ucc_fast_free(uccf);
308 return -ENOMEM;
309 }
310
311 /* Allocate memory for Rx Virtual Fifo */
312 uccf->ucc_fast_rx_virtual_fifo_base_offset =
313 qe_muram_alloc(uf_info->urfs +
314 (u32)
315 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
316 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
317 if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
318 uccf_err
319 ("ucc_fast_init: Can not allocate MURAM memory for "
320 "ucc_fast_rx_virtual_fifo_base_offset.");
321 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
322 ucc_fast_free(uccf);
323 return -ENOMEM;
324 }
325
326 /* Set Virtual Fifo registers */
327 out_be16(&uf_regs->urfs, uf_info->urfs);
328 out_be16(&uf_regs->urfet, uf_info->urfet);
329 out_be16(&uf_regs->urfset, uf_info->urfset);
330 out_be16(&uf_regs->utfs, uf_info->utfs);
331 out_be16(&uf_regs->utfet, uf_info->utfet);
332 out_be16(&uf_regs->utftt, uf_info->utftt);
333 /* utfb, urfb are offsets from MURAM base */
334 out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
335 out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
336
337 /* Mux clocking */
338 /* Grant Support */
339 ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
340 /* Breakpoint Support */
341 ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
342 /* Set Tsa or NMSI mode. */
343 ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
344 /* If NMSI (not Tsa), set Tx and Rx clock. */
345 if (!uf_info->tsa) {
346 /* Rx clock routing */
347 if (uf_info->rx_clock != QE_CLK_NONE) {
348 if (ucc_set_qe_mux_rxtx
349 (uf_info->ucc_num, uf_info->rx_clock,
350 COMM_DIR_RX)) {
351 uccf_err
352 ("ucc_fast_init: Illegal value for parameter 'RxClock'.");
353 ucc_fast_free(uccf);
354 return -EINVAL;
355 }
356 }
357 /* Tx clock routing */
358 if (uf_info->tx_clock != QE_CLK_NONE) {
359 if (ucc_set_qe_mux_rxtx
360 (uf_info->ucc_num, uf_info->tx_clock,
361 COMM_DIR_TX)) {
362 uccf_err
363 ("ucc_fast_init: Illegal value for parameter 'TxClock'.");
364 ucc_fast_free(uccf);
365 return -EINVAL;
366 }
367 }
368 }
369
370 /* Set interrupt mask register at UCC level. */
371 out_be32(&uf_regs->uccm, uf_info->uccm_mask);
372
373 /* First, clear anything pending at UCC level,
374 * otherwise, old garbage may come through
375 * as soon as the dam is opened
376 * Writing '1' clears
377 */
378 out_be32(&uf_regs->ucce, 0xffffffff);
379
380 *uccf_ret = uccf;
381 return 0;
382}
383
384void ucc_fast_free(struct ucc_fast_private * uccf)
385{
386 if (!uccf)
387 return;
388
389 if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
390 qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
391
392 if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
393 qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
394
395 kfree(uccf);
396}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
new file mode 100644
index 000000000000..1fb88ef7cf06
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -0,0 +1,404 @@
1/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QE UCC Slow API Set - UCC Slow specific routines implementations.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/errno.h>
18#include <linux/slab.h>
19#include <linux/stddef.h>
20#include <linux/interrupt.h>
21
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/immap_qe.h>
25#include <asm/qe.h>
26
27#include <asm/ucc.h>
28#include <asm/ucc_slow.h>
29
30#define uccs_printk(level, format, arg...) \
31 printk(level format "\n", ## arg)
32
33#define uccs_dbg(format, arg...) \
34 uccs_printk(KERN_DEBUG , format , ## arg)
35#define uccs_err(format, arg...) \
36 uccs_printk(KERN_ERR , format , ## arg)
37#define uccs_info(format, arg...) \
38 uccs_printk(KERN_INFO , format , ## arg)
39#define uccs_warn(format, arg...) \
40 uccs_printk(KERN_WARNING , format , ## arg)
41
42#ifdef UCCS_VERBOSE_DEBUG
43#define uccs_vdbg uccs_dbg
44#else
45#define uccs_vdbg(fmt, args...) do { } while (0)
46#endif /* UCCS_VERBOSE_DEBUG */
47
48u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
49{
50 switch (uccs_num) {
51 case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
52 case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
53 case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
54 case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
55 case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
56 case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
57 case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
58 case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
59 default: return QE_CR_SUBBLOCK_INVALID;
60 }
61}
62
63void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
64{
65 out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
66}
67
68void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
69{
70 struct ucc_slow_info *us_info = uccs->us_info;
71 u32 id;
72
73 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
74 qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
75 QE_CR_PROTOCOL_UNSPECIFIED, 0);
76}
77
78void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
79{
80 struct ucc_slow_info *us_info = uccs->us_info;
81 u32 id;
82
83 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
84 qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
85}
86
87void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
88{
89 struct ucc_slow_info *us_info = uccs->us_info;
90 u32 id;
91
92 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
93 qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
94}
95
96void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
97{
98 struct ucc_slow *us_regs;
99 u32 gumr_l;
100
101 us_regs = uccs->us_regs;
102
103 /* Enable reception and/or transmission on this UCC. */
104 gumr_l = in_be32(&us_regs->gumr_l);
105 if (mode & COMM_DIR_TX) {
106 gumr_l |= UCC_SLOW_GUMR_L_ENT;
107 uccs->enabled_tx = 1;
108 }
109 if (mode & COMM_DIR_RX) {
110 gumr_l |= UCC_SLOW_GUMR_L_ENR;
111 uccs->enabled_rx = 1;
112 }
113 out_be32(&us_regs->gumr_l, gumr_l);
114}
115
116void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
117{
118 struct ucc_slow *us_regs;
119 u32 gumr_l;
120
121 us_regs = uccs->us_regs;
122
123 /* Disable reception and/or transmission on this UCC. */
124 gumr_l = in_be32(&us_regs->gumr_l);
125 if (mode & COMM_DIR_TX) {
126 gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
127 uccs->enabled_tx = 0;
128 }
129 if (mode & COMM_DIR_RX) {
130 gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
131 uccs->enabled_rx = 0;
132 }
133 out_be32(&us_regs->gumr_l, gumr_l);
134}
135
136int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
137{
138 u32 i;
139 struct ucc_slow *us_regs;
140 u32 gumr;
141 u8 function_code = 0;
142 u8 *bd;
143 struct ucc_slow_private *uccs;
144 u32 id;
145 u32 command;
146 int ret;
147
148 uccs_vdbg("%s: IN", __FUNCTION__);
149
150 if (!us_info)
151 return -EINVAL;
152
153 /* check if the UCC port number is in range. */
154 if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
155 uccs_err("ucc_slow_init: Illagal UCC number!");
156 return -EINVAL;
157 }
158
159 /*
160 * Set mrblr
161 * Check that 'max_rx_buf_length' is properly aligned (4), unless
162 * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
163 * case when QE accepts 32 bits at a time.
164 */
165 if ((!us_info->rfw) &&
166 (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
167 uccs_err("max_rx_buf_length not aligned.");
168 return -EINVAL;
169 }
170
171 uccs = (struct ucc_slow_private *)
172 kmalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
173 if (!uccs) {
174 uccs_err
175 ("ucc_slow_init: No memory for UCC slow data structure!");
176 return -ENOMEM;
177 }
178 memset(uccs, 0, sizeof(struct ucc_slow_private));
179
180 /* Fill slow UCC structure */
181 uccs->us_info = us_info;
182 uccs->saved_uccm = 0;
183 uccs->p_rx_frame = 0;
184 uccs->us_regs = us_info->us_regs;
185 us_regs = uccs->us_regs;
186 uccs->p_ucce = (u16 *) & (us_regs->ucce);
187 uccs->p_uccm = (u16 *) & (us_regs->uccm);
188#ifdef STATISTICS
189 uccs->rx_frames = 0;
190 uccs->tx_frames = 0;
191 uccs->rx_discarded = 0;
192#endif /* STATISTICS */
193
194 /* Get PRAM base */
195 uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE,
196 ALIGNMENT_OF_UCC_SLOW_PRAM);
197 if (IS_MURAM_ERR(uccs->us_pram_offset)) {
198 uccs_err
199 ("ucc_slow_init: Can not allocate MURAM memory "
200 "for Slow UCC.");
201 ucc_slow_free(uccs);
202 return -ENOMEM;
203 }
204 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
205 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED,
206 (u32) uccs->us_pram_offset);
207
208 uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
209
210 /* Init Guemr register */
211 if ((ret = ucc_init_guemr((struct ucc_common *) (us_info->us_regs)))) {
212 uccs_err("ucc_slow_init: Could not init the guemr register.");
213 ucc_slow_free(uccs);
214 return ret;
215 }
216
217 /* Set UCC to slow type */
218 if ((ret = ucc_set_type(us_info->ucc_num,
219 (struct ucc_common *) (us_info->us_regs),
220 UCC_SPEED_TYPE_SLOW))) {
221 uccs_err("ucc_slow_init: Could not init the guemr register.");
222 ucc_slow_free(uccs);
223 return ret;
224 }
225
226 out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
227
228 INIT_LIST_HEAD(&uccs->confQ);
229
230 /* Allocate BDs. */
231 uccs->rx_base_offset =
232 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
233 QE_ALIGNMENT_OF_BD);
234 if (IS_MURAM_ERR(uccs->rx_base_offset)) {
235 uccs_err("ucc_slow_init: No memory for Rx BD's.");
236 uccs->rx_base_offset = 0;
237 ucc_slow_free(uccs);
238 return -ENOMEM;
239 }
240
241 uccs->tx_base_offset =
242 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
243 QE_ALIGNMENT_OF_BD);
244 if (IS_MURAM_ERR(uccs->tx_base_offset)) {
245 uccs_err("ucc_slow_init: No memory for Tx BD's.");
246 uccs->tx_base_offset = 0;
247 ucc_slow_free(uccs);
248 return -ENOMEM;
249 }
250
251 /* Init Tx bds */
252 bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
253 for (i = 0; i < us_info->tx_bd_ring_len; i++) {
254 /* clear bd buffer */
255 out_be32(&(((struct qe_bd *)bd)->buf), 0);
256 /* set bd status and length */
257 out_be32((u32*)bd, 0);
258 bd += sizeof(struct qe_bd);
259 }
260 bd -= sizeof(struct qe_bd);
261 /* set bd status and length */
262 out_be32((u32*)bd, T_W); /* for last BD set Wrap bit */
263
264 /* Init Rx bds */
265 bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
266 for (i = 0; i < us_info->rx_bd_ring_len; i++) {
267 /* set bd status and length */
268 out_be32((u32*)bd, 0);
269 /* clear bd buffer */
270 out_be32(&(((struct qe_bd *)bd)->buf), 0);
271 bd += sizeof(struct qe_bd);
272 }
273 bd -= sizeof(struct qe_bd);
274 /* set bd status and length */
275 out_be32((u32*)bd, R_W); /* for last BD set Wrap bit */
276
277 /* Set GUMR (For more details see the hardware spec.). */
278 /* gumr_h */
279 gumr = 0;
280 gumr |= us_info->tcrc;
281 if (us_info->cdp)
282 gumr |= UCC_SLOW_GUMR_H_CDP;
283 if (us_info->ctsp)
284 gumr |= UCC_SLOW_GUMR_H_CTSP;
285 if (us_info->cds)
286 gumr |= UCC_SLOW_GUMR_H_CDS;
287 if (us_info->ctss)
288 gumr |= UCC_SLOW_GUMR_H_CTSS;
289 if (us_info->tfl)
290 gumr |= UCC_SLOW_GUMR_H_TFL;
291 if (us_info->rfw)
292 gumr |= UCC_SLOW_GUMR_H_RFW;
293 if (us_info->txsy)
294 gumr |= UCC_SLOW_GUMR_H_TXSY;
295 if (us_info->rtsm)
296 gumr |= UCC_SLOW_GUMR_H_RTSM;
297 out_be32(&us_regs->gumr_h, gumr);
298
299 /* gumr_l */
300 gumr = 0;
301 if (us_info->tci)
302 gumr |= UCC_SLOW_GUMR_L_TCI;
303 if (us_info->rinv)
304 gumr |= UCC_SLOW_GUMR_L_RINV;
305 if (us_info->tinv)
306 gumr |= UCC_SLOW_GUMR_L_TINV;
307 if (us_info->tend)
308 gumr |= UCC_SLOW_GUMR_L_TEND;
309 gumr |= us_info->tdcr;
310 gumr |= us_info->rdcr;
311 gumr |= us_info->tenc;
312 gumr |= us_info->renc;
313 gumr |= us_info->diag;
314 gumr |= us_info->mode;
315 out_be32(&us_regs->gumr_l, gumr);
316
317 /* Function code registers */
318 /* function_code has initial value 0 */
319
320 /* if the data is in cachable memory, the 'global' */
321 /* in the function code should be set. */
322 function_code |= us_info->data_mem_part;
323 function_code |= QE_BMR_BYTE_ORDER_BO_MOT; /* Required for QE */
324 uccs->us_pram->tfcr = function_code;
325 uccs->us_pram->rfcr = function_code;
326
327 /* rbase, tbase are offsets from MURAM base */
328 out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset);
329 out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset);
330
331 /* Mux clocking */
332 /* Grant Support */
333 ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
334 /* Breakpoint Support */
335 ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
336 /* Set Tsa or NMSI mode. */
337 ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
338 /* If NMSI (not Tsa), set Tx and Rx clock. */
339 if (!us_info->tsa) {
340 /* Rx clock routing */
341 if (ucc_set_qe_mux_rxtx
342 (us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) {
343 uccs_err
344 ("ucc_slow_init: Illegal value for parameter"
345 " 'RxClock'.");
346 ucc_slow_free(uccs);
347 return -EINVAL;
348 }
349 /* Tx clock routing */
350 if (ucc_set_qe_mux_rxtx(us_info->ucc_num,
351 us_info->tx_clock, COMM_DIR_TX)) {
352 uccs_err
353 ("ucc_slow_init: Illegal value for parameter "
354 "'TxClock'.");
355 ucc_slow_free(uccs);
356 return -EINVAL;
357 }
358 }
359
360 /*
361 * INTERRUPTS
362 */
363 /* Set interrupt mask register at UCC level. */
364 out_be16(&us_regs->uccm, us_info->uccm_mask);
365
366 /* First, clear anything pending at UCC level, */
367 /* otherwise, old garbage may come through */
368 /* as soon as the dam is opened. */
369
370 /* Writing '1' clears */
371 out_be16(&us_regs->ucce, 0xffff);
372
373 /* Issue QE Init command */
374 if (us_info->init_tx && us_info->init_rx)
375 command = QE_INIT_TX_RX;
376 else if (us_info->init_tx)
377 command = QE_INIT_TX;
378 else
379 command = QE_INIT_RX; /* We know at least one is TRUE */
380 id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
381 qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
382
383 *uccs_ret = uccs;
384 return 0;
385}
386
387void ucc_slow_free(struct ucc_slow_private * uccs)
388{
389 if (!uccs)
390 return;
391
392 if (uccs->rx_base_offset)
393 qe_muram_free(uccs->rx_base_offset);
394
395 if (uccs->tx_base_offset)
396 qe_muram_free(uccs->tx_base_offset);
397
398 if (uccs->us_pram) {
399 qe_muram_free(uccs->us_pram_offset);
400 uccs->us_pram = NULL;
401 }
402
403 kfree(uccs);
404}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 8adad1444a51..708236f34746 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2,6 +2,8 @@
2 * Routines providing a simple monitor for use on the PowerMac. 2 * Routines providing a simple monitor for use on the PowerMac.
3 * 3 *
4 * Copyright (C) 1996-2005 Paul Mackerras. 4 * Copyright (C) 1996-2005 Paul Mackerras.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * Copyrignt (C) 2006 Michael Ellerman, IBM Corp
5 * 7 *
6 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -503,7 +505,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
503 505
504 mtmsr(msr); /* restore interrupt enable */ 506 mtmsr(msr); /* restore interrupt enable */
505 507
506 return cmd != 'X'; 508 return cmd != 'X' && cmd != EOF;
507} 509}
508 510
509int xmon(struct pt_regs *excp) 511int xmon(struct pt_regs *excp)
@@ -2597,3 +2599,34 @@ static int __init setup_xmon_sysrq(void)
2597} 2599}
2598__initcall(setup_xmon_sysrq); 2600__initcall(setup_xmon_sysrq);
2599#endif /* CONFIG_MAGIC_SYSRQ */ 2601#endif /* CONFIG_MAGIC_SYSRQ */
2602
2603int __initdata xmon_early, xmon_off;
2604
2605static int __init early_parse_xmon(char *p)
2606{
2607 if (!p || strncmp(p, "early", 5) == 0) {
2608 /* just "xmon" is equivalent to "xmon=early" */
2609 xmon_init(1);
2610 xmon_early = 1;
2611 } else if (strncmp(p, "on", 2) == 0)
2612 xmon_init(1);
2613 else if (strncmp(p, "off", 3) == 0)
2614 xmon_off = 1;
2615 else if (strncmp(p, "nobt", 4) == 0)
2616 xmon_no_auto_backtrace = 1;
2617 else
2618 return 1;
2619
2620 return 0;
2621}
2622early_param("xmon", early_parse_xmon);
2623
2624void __init xmon_setup(void)
2625{
2626#ifdef CONFIG_XMON_DEFAULT
2627 if (!xmon_off)
2628 xmon_init(1);
2629#endif
2630 if (xmon_early)
2631 debugger(NULL);
2632}
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 2dd51f364ea2..0612a33bb896 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -43,17 +43,10 @@
43 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \ 43 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
44 BI(x,c) BI(x,d) BI(x,e) BI(x,f) 44 BI(x,c) BI(x,d) BI(x,e) BI(x,f)
45 45
46#define BUILD_15_IRQS(x) \
47 BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
48 BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
49 BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
50 BI(x,c) BI(x,d) BI(x,e)
51
52/* 46/*
53 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: 47 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
54 * (these are usually mapped to vectors 0x20-0x2f) 48 * (these are usually mapped to vectors 0x20-0x2f)
55 */ 49 */
56BUILD_16_IRQS(0x0)
57 50
58/* 51/*
59 * The IO-APIC gives us many more interrupt sources. Most of these 52 * The IO-APIC gives us many more interrupt sources. Most of these
@@ -65,17 +58,12 @@ BUILD_16_IRQS(0x0)
65 * 58 *
66 * (these are usually mapped into the 0x30-0xff vector range) 59 * (these are usually mapped into the 0x30-0xff vector range)
67 */ 60 */
68 BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3) 61 BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
69BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7) 62BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
70BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb) 63BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
71BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) 64BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd) BUILD_16_IRQS(0xe) BUILD_16_IRQS(0xf)
72
73#ifdef CONFIG_PCI_MSI
74 BUILD_15_IRQS(0xe)
75#endif
76 65
77#undef BUILD_16_IRQS 66#undef BUILD_16_IRQS
78#undef BUILD_15_IRQS
79#undef BI 67#undef BI
80 68
81 69
@@ -88,29 +76,15 @@ BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
88 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \ 76 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
89 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f) 77 IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
90 78
91#define IRQLIST_15(x) \
92 IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
93 IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
94 IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
95 IRQ(x,c), IRQ(x,d), IRQ(x,e)
96
97void (*interrupt[NR_IRQS])(void) = { 79void (*interrupt[NR_IRQS])(void) = {
98 IRQLIST_16(0x0), 80 IRQLIST_16(0x2), IRQLIST_16(0x3),
99
100 IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
101 IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7), 81 IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
102 IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb), 82 IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
103 IRQLIST_16(0xc), IRQLIST_16(0xd) 83 IRQLIST_16(0xc), IRQLIST_16(0xd), IRQLIST_16(0xe), IRQLIST_16(0xf)
104
105#ifdef CONFIG_PCI_MSI
106 , IRQLIST_15(0xe)
107#endif
108
109}; 84};
110 85
111#undef IRQ 86#undef IRQ
112#undef IRQLIST_16 87#undef IRQLIST_16
113#undef IRQLIST_14
114 88
115/* 89/*
116 * This is the 'legacy' 8259A Programmable Interrupt Controller, 90 * This is the 'legacy' 8259A Programmable Interrupt Controller,
@@ -121,42 +95,15 @@ void (*interrupt[NR_IRQS])(void) = {
121 * moves to arch independent land 95 * moves to arch independent land
122 */ 96 */
123 97
124DEFINE_SPINLOCK(i8259A_lock);
125
126static int i8259A_auto_eoi; 98static int i8259A_auto_eoi;
127 99DEFINE_SPINLOCK(i8259A_lock);
128static void end_8259A_irq (unsigned int irq)
129{
130 if (irq > 256) {
131 char var;
132 printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, task_thread_info(current));
133
134 BUG();
135 }
136
137 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
138 irq_desc[irq].action)
139 enable_8259A_irq(irq);
140}
141
142#define shutdown_8259A_irq disable_8259A_irq
143
144static void mask_and_ack_8259A(unsigned int); 100static void mask_and_ack_8259A(unsigned int);
145 101
146static unsigned int startup_8259A_irq(unsigned int irq) 102static struct irq_chip i8259A_chip = {
147{ 103 .name = "XT-PIC",
148 enable_8259A_irq(irq); 104 .mask = disable_8259A_irq,
149 return 0; /* never anything pending */ 105 .unmask = enable_8259A_irq,
150} 106 .mask_ack = mask_and_ack_8259A,
151
152static struct hw_interrupt_type i8259A_irq_type = {
153 .typename = "XT-PIC",
154 .startup = startup_8259A_irq,
155 .shutdown = shutdown_8259A_irq,
156 .enable = enable_8259A_irq,
157 .disable = disable_8259A_irq,
158 .ack = mask_and_ack_8259A,
159 .end = end_8259A_irq,
160}; 107};
161 108
162/* 109/*
@@ -231,7 +178,7 @@ void make_8259A_irq(unsigned int irq)
231{ 178{
232 disable_irq_nosync(irq); 179 disable_irq_nosync(irq);
233 io_apic_irqs &= ~(1<<irq); 180 io_apic_irqs &= ~(1<<irq);
234 irq_desc[irq].chip = &i8259A_irq_type; 181 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
235 enable_irq(irq); 182 enable_irq(irq);
236} 183}
237 184
@@ -367,9 +314,9 @@ void init_8259A(int auto_eoi)
367 * in AEOI mode we just have to mask the interrupt 314 * in AEOI mode we just have to mask the interrupt
368 * when acking. 315 * when acking.
369 */ 316 */
370 i8259A_irq_type.ack = disable_8259A_irq; 317 i8259A_chip.mask_ack = disable_8259A_irq;
371 else 318 else
372 i8259A_irq_type.ack = mask_and_ack_8259A; 319 i8259A_chip.mask_ack = mask_and_ack_8259A;
373 320
374 udelay(100); /* wait for 8259A to initialize */ 321 udelay(100); /* wait for 8259A to initialize */
375 322
@@ -447,6 +394,26 @@ device_initcall(i8259A_init_sysfs);
447 */ 394 */
448 395
449static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL}; 396static struct irqaction irq2 = { no_action, 0, CPU_MASK_NONE, "cascade", NULL, NULL};
397DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
398 [0 ... FIRST_EXTERNAL_VECTOR - 1] = -1,
399 [FIRST_EXTERNAL_VECTOR + 0] = 0,
400 [FIRST_EXTERNAL_VECTOR + 1] = 1,
401 [FIRST_EXTERNAL_VECTOR + 2] = 2,
402 [FIRST_EXTERNAL_VECTOR + 3] = 3,
403 [FIRST_EXTERNAL_VECTOR + 4] = 4,
404 [FIRST_EXTERNAL_VECTOR + 5] = 5,
405 [FIRST_EXTERNAL_VECTOR + 6] = 6,
406 [FIRST_EXTERNAL_VECTOR + 7] = 7,
407 [FIRST_EXTERNAL_VECTOR + 8] = 8,
408 [FIRST_EXTERNAL_VECTOR + 9] = 9,
409 [FIRST_EXTERNAL_VECTOR + 10] = 10,
410 [FIRST_EXTERNAL_VECTOR + 11] = 11,
411 [FIRST_EXTERNAL_VECTOR + 12] = 12,
412 [FIRST_EXTERNAL_VECTOR + 13] = 13,
413 [FIRST_EXTERNAL_VECTOR + 14] = 14,
414 [FIRST_EXTERNAL_VECTOR + 15] = 15,
415 [FIRST_EXTERNAL_VECTOR + 16 ... NR_VECTORS - 1] = -1
416};
450 417
451void __init init_ISA_irqs (void) 418void __init init_ISA_irqs (void)
452{ 419{
@@ -464,12 +431,13 @@ void __init init_ISA_irqs (void)
464 /* 431 /*
465 * 16 old-style INTA-cycle interrupts: 432 * 16 old-style INTA-cycle interrupts:
466 */ 433 */
467 irq_desc[i].chip = &i8259A_irq_type; 434 set_irq_chip_and_handler(i, &i8259A_chip,
435 handle_level_irq);
468 } else { 436 } else {
469 /* 437 /*
470 * 'high' PCI IRQs filled in on demand 438 * 'high' PCI IRQs filled in on demand
471 */ 439 */
472 irq_desc[i].chip = &no_irq_type; 440 irq_desc[i].chip = &no_irq_chip;
473 } 441 }
474 } 442 }
475} 443}
@@ -543,8 +511,6 @@ void __init init_IRQ(void)
543 */ 511 */
544 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) { 512 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
545 int vector = FIRST_EXTERNAL_VECTOR + i; 513 int vector = FIRST_EXTERNAL_VECTOR + i;
546 if (i >= NR_IRQS)
547 break;
548 if (vector != IA32_SYSCALL_VECTOR) 514 if (vector != IA32_SYSCALL_VECTOR)
549 set_intr_gate(vector, interrupt[i]); 515 set_intr_gate(vector, interrupt[i]);
550 } 516 }
@@ -554,7 +520,7 @@ void __init init_IRQ(void)
554 * IRQ0 must be given a fixed assignment and initialized, 520 * IRQ0 must be given a fixed assignment and initialized,
555 * because it's used before the IO-APIC is set up. 521 * because it's used before the IO-APIC is set up.
556 */ 522 */
557 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]); 523 __get_cpu_var(vector_irq)[FIRST_DEVICE_VECTOR] = 0;
558 524
559 /* 525 /*
560 * The reschedule interrupt is a CPU-to-CPU reschedule-helper 526 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0491019d4c8d..91728d9d3472 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -26,9 +26,12 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/smp_lock.h> 28#include <linux/smp_lock.h>
29#include <linux/pci.h>
29#include <linux/mc146818rtc.h> 30#include <linux/mc146818rtc.h>
30#include <linux/acpi.h> 31#include <linux/acpi.h>
31#include <linux/sysdev.h> 32#include <linux/sysdev.h>
33#include <linux/msi.h>
34#include <linux/htirq.h>
32#ifdef CONFIG_ACPI 35#ifdef CONFIG_ACPI
33#include <acpi/acpi_bus.h> 36#include <acpi/acpi_bus.h>
34#endif 37#endif
@@ -41,6 +44,10 @@
41#include <asm/acpi.h> 44#include <asm/acpi.h>
42#include <asm/dma.h> 45#include <asm/dma.h>
43#include <asm/nmi.h> 46#include <asm/nmi.h>
47#include <asm/msidef.h>
48#include <asm/hypertransport.h>
49
50static int assign_irq_vector(int irq, cpumask_t mask);
44 51
45#define __apicdebuginit __init 52#define __apicdebuginit __init
46 53
@@ -81,14 +88,6 @@ static struct irq_pin_list {
81 short apic, pin, next; 88 short apic, pin, next;
82} irq_2_pin[PIN_MAP_SIZE]; 89} irq_2_pin[PIN_MAP_SIZE];
83 90
84int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
85#ifdef CONFIG_PCI_MSI
86#define vector_to_irq(vector) \
87 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
88#else
89#define vector_to_irq(vector) (vector)
90#endif
91
92#define __DO_ACTION(R, ACTION, FINAL) \ 91#define __DO_ACTION(R, ACTION, FINAL) \
93 \ 92 \
94{ \ 93{ \
@@ -139,11 +138,35 @@ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
139} 138}
140 139
141#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
141static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
142{
143 int apic, pin;
144 struct irq_pin_list *entry = irq_2_pin + irq;
145
146 BUG_ON(irq >= NR_IRQS);
147 for (;;) {
148 unsigned int reg;
149 apic = entry->apic;
150 pin = entry->pin;
151 if (pin == -1)
152 break;
153 io_apic_write(apic, 0x11 + pin*2, dest);
154 reg = io_apic_read(apic, 0x10 + pin*2);
155 reg &= ~0x000000ff;
156 reg |= vector;
157 io_apic_modify(apic, reg);
158 if (!entry->next)
159 break;
160 entry = irq_2_pin + entry->next;
161 }
162}
163
142static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) 164static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
143{ 165{
144 unsigned long flags; 166 unsigned long flags;
145 unsigned int dest; 167 unsigned int dest;
146 cpumask_t tmp; 168 cpumask_t tmp;
169 int vector;
147 170
148 cpus_and(tmp, mask, cpu_online_map); 171 cpus_and(tmp, mask, cpu_online_map);
149 if (cpus_empty(tmp)) 172 if (cpus_empty(tmp))
@@ -151,7 +174,13 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
151 174
152 cpus_and(mask, tmp, CPU_MASK_ALL); 175 cpus_and(mask, tmp, CPU_MASK_ALL);
153 176
154 dest = cpu_mask_to_apicid(mask); 177 vector = assign_irq_vector(irq, mask);
178 if (vector < 0)
179 return;
180
181 cpus_clear(tmp);
182 cpu_set(vector >> 8, tmp);
183 dest = cpu_mask_to_apicid(tmp);
155 184
156 /* 185 /*
157 * Only the high 8 bits are valid. 186 * Only the high 8 bits are valid.
@@ -159,14 +188,12 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
159 dest = SET_APIC_LOGICAL_ID(dest); 188 dest = SET_APIC_LOGICAL_ID(dest);
160 189
161 spin_lock_irqsave(&ioapic_lock, flags); 190 spin_lock_irqsave(&ioapic_lock, flags);
162 __DO_ACTION(1, = dest, ) 191 __target_IO_APIC_irq(irq, dest, vector & 0xff);
163 set_irq_info(irq, mask); 192 set_native_irq_info(irq, mask);
164 spin_unlock_irqrestore(&ioapic_lock, flags); 193 spin_unlock_irqrestore(&ioapic_lock, flags);
165} 194}
166#endif 195#endif
167 196
168static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
169
170/* 197/*
171 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 198 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
172 * shared ISA-space IRQs, so we have to support them. We are super 199 * shared ISA-space IRQs, so we have to support them. We are super
@@ -492,64 +519,6 @@ static inline int irq_trigger(int idx)
492 return MPBIOS_trigger(idx); 519 return MPBIOS_trigger(idx);
493} 520}
494 521
495static int next_irq = 16;
496
497/*
498 * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
499 * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
500 * from ACPI, which can reach 800 in large boxen.
501 *
502 * Compact the sparse GSI space into a sequential IRQ series and reuse
503 * vectors if possible.
504 */
505int gsi_irq_sharing(int gsi)
506{
507 int i, tries, vector;
508
509 BUG_ON(gsi >= NR_IRQ_VECTORS);
510
511 if (platform_legacy_irq(gsi))
512 return gsi;
513
514 if (gsi_2_irq[gsi] != 0xFF)
515 return (int)gsi_2_irq[gsi];
516
517 tries = NR_IRQS;
518 try_again:
519 vector = assign_irq_vector(gsi);
520
521 /*
522 * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
523 * use of vector and if found, return that IRQ. However, we never want
524 * to share legacy IRQs, which usually have a different trigger mode
525 * than PCI.
526 */
527 for (i = 0; i < NR_IRQS; i++)
528 if (IO_APIC_VECTOR(i) == vector)
529 break;
530 if (platform_legacy_irq(i)) {
531 if (--tries >= 0) {
532 IO_APIC_VECTOR(i) = 0;
533 goto try_again;
534 }
535 panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
536 }
537 if (i < NR_IRQS) {
538 gsi_2_irq[gsi] = i;
539 printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
540 gsi, vector, i);
541 return i;
542 }
543
544 i = next_irq++;
545 BUG_ON(i >= NR_IRQS);
546 gsi_2_irq[gsi] = i;
547 IO_APIC_VECTOR(i) = vector;
548 printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
549 gsi, vector, i);
550 return i;
551}
552
553static int pin_2_irq(int idx, int apic, int pin) 522static int pin_2_irq(int idx, int apic, int pin)
554{ 523{
555 int irq, i; 524 int irq, i;
@@ -571,7 +540,6 @@ static int pin_2_irq(int idx, int apic, int pin)
571 while (i < apic) 540 while (i < apic)
572 irq += nr_ioapic_registers[i++]; 541 irq += nr_ioapic_registers[i++];
573 irq += pin; 542 irq += pin;
574 irq = gsi_irq_sharing(irq);
575 } 543 }
576 BUG_ON(irq >= NR_IRQS); 544 BUG_ON(irq >= NR_IRQS);
577 return irq; 545 return irq;
@@ -595,46 +563,83 @@ static inline int IO_APIC_irq_trigger(int irq)
595} 563}
596 564
597/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ 565/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
598u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }; 566unsigned int irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_EXTERNAL_VECTOR, 0 };
599 567
600int assign_irq_vector(int irq) 568static int __assign_irq_vector(int irq, cpumask_t mask)
601{ 569{
602 static int current_vector = FIRST_DEVICE_VECTOR, offset = 0; 570 /*
603 unsigned long flags; 571 * NOTE! The local APIC isn't very good at handling
604 int vector; 572 * multiple interrupts at the same interrupt level.
605 573 * As the interrupt level is determined by taking the
606 BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS); 574 * vector number and shifting that right by 4, we
607 575 * want to spread these out a bit so that they don't
608 spin_lock_irqsave(&vector_lock, flags); 576 * all fall in the same interrupt level.
609 577 *
610 if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) { 578 * Also, we've got to be careful not to trash gate
611 spin_unlock_irqrestore(&vector_lock, flags); 579 * 0x80, because int 0x80 is hm, kind of importantish. ;)
612 return IO_APIC_VECTOR(irq); 580 */
581 static struct {
582 int vector;
583 int offset;
584 } pos[NR_CPUS] = { [ 0 ... NR_CPUS - 1] = {FIRST_DEVICE_VECTOR, 0} };
585 int old_vector = -1;
586 int cpu;
587
588 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
589
590 if (IO_APIC_VECTOR(irq) > 0)
591 old_vector = IO_APIC_VECTOR(irq);
592 if ((old_vector > 0) && cpu_isset(old_vector >> 8, mask)) {
593 return old_vector;
613 } 594 }
595
596 for_each_cpu_mask(cpu, mask) {
597 int vector, offset;
598 vector = pos[cpu].vector;
599 offset = pos[cpu].offset;
614next: 600next:
615 current_vector += 8; 601 vector += 8;
616 if (current_vector == IA32_SYSCALL_VECTOR) 602 if (vector >= FIRST_SYSTEM_VECTOR) {
617 goto next; 603 /* If we run out of vectors on large boxen, must share them. */
618 604 offset = (offset + 1) % 8;
619 if (current_vector >= FIRST_SYSTEM_VECTOR) { 605 vector = FIRST_DEVICE_VECTOR + offset;
620 /* If we run out of vectors on large boxen, must share them. */ 606 }
621 offset = (offset + 1) % 8; 607 if (unlikely(pos[cpu].vector == vector))
622 current_vector = FIRST_DEVICE_VECTOR + offset; 608 continue;
609 if (vector == IA32_SYSCALL_VECTOR)
610 goto next;
611 if (per_cpu(vector_irq, cpu)[vector] != -1)
612 goto next;
613 /* Found one! */
614 pos[cpu].vector = vector;
615 pos[cpu].offset = offset;
616 if (old_vector >= 0) {
617 int old_cpu = old_vector >> 8;
618 old_vector &= 0xff;
619 per_cpu(vector_irq, old_cpu)[old_vector] = -1;
620 }
621 per_cpu(vector_irq, cpu)[vector] = irq;
622 vector |= cpu << 8;
623 IO_APIC_VECTOR(irq) = vector;
624 return vector;
623 } 625 }
626 return -ENOSPC;
627}
624 628
625 vector = current_vector; 629static int assign_irq_vector(int irq, cpumask_t mask)
626 vector_irq[vector] = irq; 630{
627 if (irq != AUTO_ASSIGN) 631 int vector;
628 IO_APIC_VECTOR(irq) = vector; 632 unsigned long flags;
629 633
634 spin_lock_irqsave(&vector_lock, flags);
635 vector = __assign_irq_vector(irq, mask);
630 spin_unlock_irqrestore(&vector_lock, flags); 636 spin_unlock_irqrestore(&vector_lock, flags);
631
632 return vector; 637 return vector;
633} 638}
634 639
635extern void (*interrupt[NR_IRQS])(void); 640extern void (*interrupt[NR_IRQS])(void);
636static struct hw_interrupt_type ioapic_level_type; 641
637static struct hw_interrupt_type ioapic_edge_type; 642static struct irq_chip ioapic_chip;
638 643
639#define IOAPIC_AUTO -1 644#define IOAPIC_AUTO -1
640#define IOAPIC_EDGE 0 645#define IOAPIC_EDGE 0
@@ -642,16 +647,13 @@ static struct hw_interrupt_type ioapic_edge_type;
642 647
643static void ioapic_register_intr(int irq, int vector, unsigned long trigger) 648static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
644{ 649{
645 unsigned idx;
646
647 idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
648
649 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 650 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
650 trigger == IOAPIC_LEVEL) 651 trigger == IOAPIC_LEVEL)
651 irq_desc[idx].chip = &ioapic_level_type; 652 set_irq_chip_and_handler(irq, &ioapic_chip,
653 handle_fasteoi_irq);
652 else 654 else
653 irq_desc[idx].chip = &ioapic_edge_type; 655 set_irq_chip_and_handler(irq, &ioapic_chip,
654 set_intr_gate(vector, interrupt[idx]); 656 handle_edge_irq);
655} 657}
656 658
657static void __init setup_IO_APIC_irqs(void) 659static void __init setup_IO_APIC_irqs(void)
@@ -701,8 +703,15 @@ static void __init setup_IO_APIC_irqs(void)
701 continue; 703 continue;
702 704
703 if (IO_APIC_IRQ(irq)) { 705 if (IO_APIC_IRQ(irq)) {
704 vector = assign_irq_vector(irq); 706 cpumask_t mask;
705 entry.vector = vector; 707 vector = assign_irq_vector(irq, TARGET_CPUS);
708 if (vector < 0)
709 continue;
710
711 cpus_clear(mask);
712 cpu_set(vector >> 8, mask);
713 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
714 entry.vector = vector & 0xff;
706 715
707 ioapic_register_intr(irq, vector, IOAPIC_AUTO); 716 ioapic_register_intr(irq, vector, IOAPIC_AUTO);
708 if (!apic && (irq < 16)) 717 if (!apic && (irq < 16))
@@ -752,7 +761,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, in
752 * The timer IRQ doesn't have to know that behind the 761 * The timer IRQ doesn't have to know that behind the
753 * scene we have a 8259A-master in AEOI mode ... 762 * scene we have a 8259A-master in AEOI mode ...
754 */ 763 */
755 irq_desc[0].chip = &ioapic_edge_type; 764 set_irq_chip_and_handler(0, &ioapic_chip, handle_edge_irq);
756 765
757 /* 766 /*
758 * Add it to the IO-APIC irq-routing table: 767 * Add it to the IO-APIC irq-routing table:
@@ -868,17 +877,12 @@ void __apicdebuginit print_IO_APIC(void)
868 ); 877 );
869 } 878 }
870 } 879 }
871 if (use_pci_vector())
872 printk(KERN_INFO "Using vector-based indexing\n");
873 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 880 printk(KERN_DEBUG "IRQ to pin mappings:\n");
874 for (i = 0; i < NR_IRQS; i++) { 881 for (i = 0; i < NR_IRQS; i++) {
875 struct irq_pin_list *entry = irq_2_pin + i; 882 struct irq_pin_list *entry = irq_2_pin + i;
876 if (entry->pin < 0) 883 if (entry->pin < 0)
877 continue; 884 continue;
878 if (use_pci_vector() && !platform_legacy_irq(i)) 885 printk(KERN_DEBUG "IRQ%d ", i);
879 printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
880 else
881 printk(KERN_DEBUG "IRQ%d ", i);
882 for (;;) { 886 for (;;) {
883 printk("-> %d:%d", entry->apic, entry->pin); 887 printk("-> %d:%d", entry->apic, entry->pin);
884 if (!entry->next) 888 if (!entry->next)
@@ -1185,7 +1189,7 @@ static int __init timer_irq_works(void)
1185 * an edge even if it isn't on the 8259A... 1189 * an edge even if it isn't on the 8259A...
1186 */ 1190 */
1187 1191
1188static unsigned int startup_edge_ioapic_irq(unsigned int irq) 1192static unsigned int startup_ioapic_irq(unsigned int irq)
1189{ 1193{
1190 int was_pending = 0; 1194 int was_pending = 0;
1191 unsigned long flags; 1195 unsigned long flags;
@@ -1202,107 +1206,16 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1202 return was_pending; 1206 return was_pending;
1203} 1207}
1204 1208
1205/* 1209static int ioapic_retrigger_irq(unsigned int irq)
1206 * Once we have recorded IRQ_PENDING already, we can mask the
1207 * interrupt for real. This prevents IRQ storms from unhandled
1208 * devices.
1209 */
1210static void ack_edge_ioapic_irq(unsigned int irq)
1211{
1212 move_irq(irq);
1213 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1214 == (IRQ_PENDING | IRQ_DISABLED))
1215 mask_IO_APIC_irq(irq);
1216 ack_APIC_irq();
1217}
1218
1219/*
1220 * Level triggered interrupts can just be masked,
1221 * and shutting down and starting up the interrupt
1222 * is the same as enabling and disabling them -- except
1223 * with a startup need to return a "was pending" value.
1224 *
1225 * Level triggered interrupts are special because we
1226 * do not touch any IO-APIC register while handling
1227 * them. We ack the APIC in the end-IRQ handler, not
1228 * in the start-IRQ-handler. Protection against reentrance
1229 * from the same interrupt is still provided, both by the
1230 * generic IRQ layer and by the fact that an unacked local
1231 * APIC does not accept IRQs.
1232 */
1233static unsigned int startup_level_ioapic_irq (unsigned int irq)
1234{
1235 unmask_IO_APIC_irq(irq);
1236
1237 return 0; /* don't check for pending */
1238}
1239
1240static void end_level_ioapic_irq (unsigned int irq)
1241{
1242 move_irq(irq);
1243 ack_APIC_irq();
1244}
1245
1246#ifdef CONFIG_PCI_MSI
1247static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1248{
1249 int irq = vector_to_irq(vector);
1250
1251 return startup_edge_ioapic_irq(irq);
1252}
1253
1254static void ack_edge_ioapic_vector(unsigned int vector)
1255{
1256 int irq = vector_to_irq(vector);
1257
1258 move_native_irq(vector);
1259 ack_edge_ioapic_irq(irq);
1260}
1261
1262static unsigned int startup_level_ioapic_vector (unsigned int vector)
1263{ 1210{
1264 int irq = vector_to_irq(vector); 1211 cpumask_t mask;
1212 unsigned vector;
1265 1213
1266 return startup_level_ioapic_irq (irq); 1214 vector = irq_vector[irq];
1267} 1215 cpus_clear(mask);
1268 1216 cpu_set(vector >> 8, mask);
1269static void end_level_ioapic_vector (unsigned int vector)
1270{
1271 int irq = vector_to_irq(vector);
1272
1273 move_native_irq(vector);
1274 end_level_ioapic_irq(irq);
1275}
1276
1277static void mask_IO_APIC_vector (unsigned int vector)
1278{
1279 int irq = vector_to_irq(vector);
1280
1281 mask_IO_APIC_irq(irq);
1282}
1283 1217
1284static void unmask_IO_APIC_vector (unsigned int vector) 1218 send_IPI_mask(mask, vector & 0xff);
1285{
1286 int irq = vector_to_irq(vector);
1287
1288 unmask_IO_APIC_irq(irq);
1289}
1290
1291#ifdef CONFIG_SMP
1292static void set_ioapic_affinity_vector (unsigned int vector,
1293 cpumask_t cpu_mask)
1294{
1295 int irq = vector_to_irq(vector);
1296
1297 set_native_irq_info(vector, cpu_mask);
1298 set_ioapic_affinity_irq(irq, cpu_mask);
1299}
1300#endif // CONFIG_SMP
1301#endif // CONFIG_PCI_MSI
1302
1303static int ioapic_retrigger(unsigned int irq)
1304{
1305 send_IPI_self(IO_APIC_VECTOR(irq));
1306 1219
1307 return 1; 1220 return 1;
1308} 1221}
@@ -1316,32 +1229,47 @@ static int ioapic_retrigger(unsigned int irq)
1316 * races. 1229 * races.
1317 */ 1230 */
1318 1231
1319static struct hw_interrupt_type ioapic_edge_type __read_mostly = { 1232static void ack_apic_edge(unsigned int irq)
1320 .typename = "IO-APIC-edge", 1233{
1321 .startup = startup_edge_ioapic, 1234 move_native_irq(irq);
1322 .shutdown = shutdown_edge_ioapic, 1235 ack_APIC_irq();
1323 .enable = enable_edge_ioapic, 1236}
1324 .disable = disable_edge_ioapic, 1237
1325 .ack = ack_edge_ioapic, 1238static void ack_apic_level(unsigned int irq)
1326 .end = end_edge_ioapic, 1239{
1327#ifdef CONFIG_SMP 1240 int do_unmask_irq = 0;
1328 .set_affinity = set_ioapic_affinity, 1241
1242#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
1243 /* If we are moving the irq we need to mask it */
1244 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
1245 do_unmask_irq = 1;
1246 mask_IO_APIC_irq(irq);
1247 }
1329#endif 1248#endif
1330 .retrigger = ioapic_retrigger,
1331};
1332 1249
1333static struct hw_interrupt_type ioapic_level_type __read_mostly = { 1250 /*
1334 .typename = "IO-APIC-level", 1251 * We must acknowledge the irq before we move it or the acknowledge will
1335 .startup = startup_level_ioapic, 1252 * not propogate properly.
1336 .shutdown = shutdown_level_ioapic, 1253 */
1337 .enable = enable_level_ioapic, 1254 ack_APIC_irq();
1338 .disable = disable_level_ioapic, 1255
1339 .ack = mask_and_ack_level_ioapic, 1256 /* Now we can move and renable the irq */
1340 .end = end_level_ioapic, 1257 move_masked_irq(irq);
1258 if (unlikely(do_unmask_irq))
1259 unmask_IO_APIC_irq(irq);
1260}
1261
1262static struct irq_chip ioapic_chip __read_mostly = {
1263 .name = "IO-APIC",
1264 .startup = startup_ioapic_irq,
1265 .mask = mask_IO_APIC_irq,
1266 .unmask = unmask_IO_APIC_irq,
1267 .ack = ack_apic_edge,
1268 .eoi = ack_apic_level,
1341#ifdef CONFIG_SMP 1269#ifdef CONFIG_SMP
1342 .set_affinity = set_ioapic_affinity, 1270 .set_affinity = set_ioapic_affinity_irq,
1343#endif 1271#endif
1344 .retrigger = ioapic_retrigger, 1272 .retrigger = ioapic_retrigger_irq,
1345}; 1273};
1346 1274
1347static inline void init_IO_APIC_traps(void) 1275static inline void init_IO_APIC_traps(void)
@@ -1361,11 +1289,6 @@ static inline void init_IO_APIC_traps(void)
1361 */ 1289 */
1362 for (irq = 0; irq < NR_IRQS ; irq++) { 1290 for (irq = 0; irq < NR_IRQS ; irq++) {
1363 int tmp = irq; 1291 int tmp = irq;
1364 if (use_pci_vector()) {
1365 if (!platform_legacy_irq(tmp))
1366 if ((tmp = vector_to_irq(tmp)) == -1)
1367 continue;
1368 }
1369 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) { 1292 if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
1370 /* 1293 /*
1371 * Hmm.. We don't have an entry for this, 1294 * Hmm.. We don't have an entry for this,
@@ -1376,7 +1299,7 @@ static inline void init_IO_APIC_traps(void)
1376 make_8259A_irq(irq); 1299 make_8259A_irq(irq);
1377 else 1300 else
1378 /* Strange. Oh, well.. */ 1301 /* Strange. Oh, well.. */
1379 irq_desc[irq].chip = &no_irq_type; 1302 irq_desc[irq].chip = &no_irq_chip;
1380 } 1303 }
1381 } 1304 }
1382} 1305}
@@ -1495,8 +1418,6 @@ static inline void unlock_ExtINT_logic(void)
1495 spin_unlock_irqrestore(&ioapic_lock, flags); 1418 spin_unlock_irqrestore(&ioapic_lock, flags);
1496} 1419}
1497 1420
1498int timer_uses_ioapic_pin_0;
1499
1500/* 1421/*
1501 * This code may look a bit paranoid, but it's supposed to cooperate with 1422 * This code may look a bit paranoid, but it's supposed to cooperate with
1502 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 1423 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
@@ -1514,8 +1435,7 @@ static inline void check_timer(void)
1514 * get/set the timer IRQ vector: 1435 * get/set the timer IRQ vector:
1515 */ 1436 */
1516 disable_8259A_irq(0); 1437 disable_8259A_irq(0);
1517 vector = assign_irq_vector(0); 1438 vector = assign_irq_vector(0, TARGET_CPUS);
1518 set_intr_gate(vector, interrupt[0]);
1519 1439
1520 /* 1440 /*
1521 * Subtle, code in do_timer_interrupt() expects an AEOI 1441 * Subtle, code in do_timer_interrupt() expects an AEOI
@@ -1534,9 +1454,6 @@ static inline void check_timer(void)
1534 pin2 = ioapic_i8259.pin; 1454 pin2 = ioapic_i8259.pin;
1535 apic2 = ioapic_i8259.apic; 1455 apic2 = ioapic_i8259.apic;
1536 1456
1537 if (pin1 == 0)
1538 timer_uses_ioapic_pin_0 = 1;
1539
1540 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n", 1457 apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
1541 vector, apic1, pin1, apic2, pin2); 1458 vector, apic1, pin1, apic2, pin2);
1542 1459
@@ -1740,6 +1657,253 @@ static int __init ioapic_init_sysfs(void)
1740 1657
1741device_initcall(ioapic_init_sysfs); 1658device_initcall(ioapic_init_sysfs);
1742 1659
1660/*
1661 * Dynamic irq allocate and deallocation
1662 */
1663int create_irq(void)
1664{
1665 /* Allocate an unused irq */
1666 int irq;
1667 int new;
1668 int vector = 0;
1669 unsigned long flags;
1670
1671 irq = -ENOSPC;
1672 spin_lock_irqsave(&vector_lock, flags);
1673 for (new = (NR_IRQS - 1); new >= 0; new--) {
1674 if (platform_legacy_irq(new))
1675 continue;
1676 if (irq_vector[new] != 0)
1677 continue;
1678 vector = __assign_irq_vector(new, TARGET_CPUS);
1679 if (likely(vector > 0))
1680 irq = new;
1681 break;
1682 }
1683 spin_unlock_irqrestore(&vector_lock, flags);
1684
1685 if (irq >= 0) {
1686 dynamic_irq_init(irq);
1687 }
1688 return irq;
1689}
1690
1691void destroy_irq(unsigned int irq)
1692{
1693 unsigned long flags;
1694
1695 dynamic_irq_cleanup(irq);
1696
1697 spin_lock_irqsave(&vector_lock, flags);
1698 irq_vector[irq] = 0;
1699 spin_unlock_irqrestore(&vector_lock, flags);
1700}
1701
1702/*
1703 * MSI mesage composition
1704 */
1705#ifdef CONFIG_PCI_MSI
1706static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
1707{
1708 int vector;
1709 unsigned dest;
1710
1711 vector = assign_irq_vector(irq, TARGET_CPUS);
1712 if (vector >= 0) {
1713 cpumask_t tmp;
1714
1715 cpus_clear(tmp);
1716 cpu_set(vector >> 8, tmp);
1717 dest = cpu_mask_to_apicid(tmp);
1718
1719 msg->address_hi = MSI_ADDR_BASE_HI;
1720 msg->address_lo =
1721 MSI_ADDR_BASE_LO |
1722 ((INT_DEST_MODE == 0) ?
1723 MSI_ADDR_DEST_MODE_PHYSICAL:
1724 MSI_ADDR_DEST_MODE_LOGICAL) |
1725 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1726 MSI_ADDR_REDIRECTION_CPU:
1727 MSI_ADDR_REDIRECTION_LOWPRI) |
1728 MSI_ADDR_DEST_ID(dest);
1729
1730 msg->data =
1731 MSI_DATA_TRIGGER_EDGE |
1732 MSI_DATA_LEVEL_ASSERT |
1733 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1734 MSI_DATA_DELIVERY_FIXED:
1735 MSI_DATA_DELIVERY_LOWPRI) |
1736 MSI_DATA_VECTOR(vector);
1737 }
1738 return vector;
1739}
1740
1741#ifdef CONFIG_SMP
1742static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
1743{
1744 struct msi_msg msg;
1745 unsigned int dest;
1746 cpumask_t tmp;
1747 int vector;
1748
1749 cpus_and(tmp, mask, cpu_online_map);
1750 if (cpus_empty(tmp))
1751 tmp = TARGET_CPUS;
1752
1753 cpus_and(mask, tmp, CPU_MASK_ALL);
1754
1755 vector = assign_irq_vector(irq, mask);
1756 if (vector < 0)
1757 return;
1758
1759 cpus_clear(tmp);
1760 cpu_set(vector >> 8, tmp);
1761 dest = cpu_mask_to_apicid(tmp);
1762
1763 read_msi_msg(irq, &msg);
1764
1765 msg.data &= ~MSI_DATA_VECTOR_MASK;
1766 msg.data |= MSI_DATA_VECTOR(vector);
1767 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
1768 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
1769
1770 write_msi_msg(irq, &msg);
1771 set_native_irq_info(irq, mask);
1772}
1773#endif /* CONFIG_SMP */
1774
1775/*
1776 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
1777 * which implement the MSI or MSI-X Capability Structure.
1778 */
1779static struct irq_chip msi_chip = {
1780 .name = "PCI-MSI",
1781 .unmask = unmask_msi_irq,
1782 .mask = mask_msi_irq,
1783 .ack = ack_apic_edge,
1784#ifdef CONFIG_SMP
1785 .set_affinity = set_msi_irq_affinity,
1786#endif
1787 .retrigger = ioapic_retrigger_irq,
1788};
1789
1790int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
1791{
1792 struct msi_msg msg;
1793 int ret;
1794 ret = msi_compose_msg(dev, irq, &msg);
1795 if (ret < 0)
1796 return ret;
1797
1798 write_msi_msg(irq, &msg);
1799
1800 set_irq_chip_and_handler(irq, &msi_chip, handle_edge_irq);
1801
1802 return 0;
1803}
1804
1805void arch_teardown_msi_irq(unsigned int irq)
1806{
1807 return;
1808}
1809
1810#endif /* CONFIG_PCI_MSI */
1811
1812/*
1813 * Hypertransport interrupt support
1814 */
1815#ifdef CONFIG_HT_IRQ
1816
1817#ifdef CONFIG_SMP
1818
1819static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
1820{
1821 u32 low, high;
1822 low = read_ht_irq_low(irq);
1823 high = read_ht_irq_high(irq);
1824
1825 low &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
1826 high &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
1827
1828 low |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
1829 high |= HT_IRQ_HIGH_DEST_ID(dest);
1830
1831 write_ht_irq_low(irq, low);
1832 write_ht_irq_high(irq, high);
1833}
1834
1835static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
1836{
1837 unsigned int dest;
1838 cpumask_t tmp;
1839 int vector;
1840
1841 cpus_and(tmp, mask, cpu_online_map);
1842 if (cpus_empty(tmp))
1843 tmp = TARGET_CPUS;
1844
1845 cpus_and(mask, tmp, CPU_MASK_ALL);
1846
1847 vector = assign_irq_vector(irq, mask);
1848 if (vector < 0)
1849 return;
1850
1851 cpus_clear(tmp);
1852 cpu_set(vector >> 8, tmp);
1853 dest = cpu_mask_to_apicid(tmp);
1854
1855 target_ht_irq(irq, dest, vector & 0xff);
1856 set_native_irq_info(irq, mask);
1857}
1858#endif
1859
1860static struct hw_interrupt_type ht_irq_chip = {
1861 .name = "PCI-HT",
1862 .mask = mask_ht_irq,
1863 .unmask = unmask_ht_irq,
1864 .ack = ack_apic_edge,
1865#ifdef CONFIG_SMP
1866 .set_affinity = set_ht_irq_affinity,
1867#endif
1868 .retrigger = ioapic_retrigger_irq,
1869};
1870
1871int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
1872{
1873 int vector;
1874
1875 vector = assign_irq_vector(irq, TARGET_CPUS);
1876 if (vector >= 0) {
1877 u32 low, high;
1878 unsigned dest;
1879 cpumask_t tmp;
1880
1881 cpus_clear(tmp);
1882 cpu_set(vector >> 8, tmp);
1883 dest = cpu_mask_to_apicid(tmp);
1884
1885 high = HT_IRQ_HIGH_DEST_ID(dest);
1886
1887 low = HT_IRQ_LOW_BASE |
1888 HT_IRQ_LOW_DEST_ID(dest) |
1889 HT_IRQ_LOW_VECTOR(vector) |
1890 ((INT_DEST_MODE == 0) ?
1891 HT_IRQ_LOW_DM_PHYSICAL :
1892 HT_IRQ_LOW_DM_LOGICAL) |
1893 HT_IRQ_LOW_RQEOI_EDGE |
1894 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
1895 HT_IRQ_LOW_MT_FIXED :
1896 HT_IRQ_LOW_MT_ARBITRATED);
1897
1898 write_ht_irq_low(irq, low);
1899 write_ht_irq_high(irq, high);
1900
1901 set_irq_chip_and_handler(irq, &ht_irq_chip, handle_edge_irq);
1902 }
1903 return vector;
1904}
1905#endif /* CONFIG_HT_IRQ */
1906
1743/* -------------------------------------------------------------------------- 1907/* --------------------------------------------------------------------------
1744 ACPI-based IOAPIC Configuration 1908 ACPI-based IOAPIC Configuration
1745 -------------------------------------------------------------------------- */ 1909 -------------------------------------------------------------------------- */
@@ -1765,6 +1929,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
1765{ 1929{
1766 struct IO_APIC_route_entry entry; 1930 struct IO_APIC_route_entry entry;
1767 unsigned long flags; 1931 unsigned long flags;
1932 int vector;
1933 cpumask_t mask;
1768 1934
1769 if (!IO_APIC_IRQ(irq)) { 1935 if (!IO_APIC_IRQ(irq)) {
1770 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 1936 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
@@ -1773,6 +1939,20 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
1773 } 1939 }
1774 1940
1775 /* 1941 /*
1942 * IRQs < 16 are already in the irq_2_pin[] map
1943 */
1944 if (irq >= 16)
1945 add_pin_to_irq(irq, ioapic, pin);
1946
1947
1948 vector = assign_irq_vector(irq, TARGET_CPUS);
1949 if (vector < 0)
1950 return vector;
1951
1952 cpus_clear(mask);
1953 cpu_set(vector >> 8, mask);
1954
1955 /*
1776 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. 1956 * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
1777 * Note that we mask (disable) IRQs now -- these get enabled when the 1957 * Note that we mask (disable) IRQs now -- these get enabled when the
1778 * corresponding device driver registers for this IRQ. 1958 * corresponding device driver registers for this IRQ.
@@ -1782,19 +1962,11 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
1782 1962
1783 entry.delivery_mode = INT_DELIVERY_MODE; 1963 entry.delivery_mode = INT_DELIVERY_MODE;
1784 entry.dest_mode = INT_DEST_MODE; 1964 entry.dest_mode = INT_DEST_MODE;
1785 entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS); 1965 entry.dest.logical.logical_dest = cpu_mask_to_apicid(mask);
1786 entry.trigger = triggering; 1966 entry.trigger = triggering;
1787 entry.polarity = polarity; 1967 entry.polarity = polarity;
1788 entry.mask = 1; /* Disabled (masked) */ 1968 entry.mask = 1; /* Disabled (masked) */
1789 1969 entry.vector = vector & 0xff;
1790 irq = gsi_irq_sharing(irq);
1791 /*
1792 * IRQs < 16 are already in the irq_2_pin[] map
1793 */
1794 if (irq >= 16)
1795 add_pin_to_irq(irq, ioapic, pin);
1796
1797 entry.vector = assign_irq_vector(irq);
1798 1970
1799 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " 1971 apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
1800 "IRQ %d Mode:%i Active:%i)\n", ioapic, 1972 "IRQ %d Mode:%i Active:%i)\n", ioapic,
@@ -1809,7 +1981,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int p
1809 ioapic_write_entry(ioapic, pin, entry); 1981 ioapic_write_entry(ioapic, pin, entry);
1810 1982
1811 spin_lock_irqsave(&ioapic_lock, flags); 1983 spin_lock_irqsave(&ioapic_lock, flags);
1812 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS); 1984 set_native_irq_info(irq, TARGET_CPUS);
1813 spin_unlock_irqrestore(&ioapic_lock, flags); 1985 spin_unlock_irqrestore(&ioapic_lock, flags);
1814 1986
1815 return 0; 1987 return 0;
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index b3677e6ccc6e..506f27c85ca5 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -74,7 +74,8 @@ int show_interrupts(struct seq_file *p, void *v)
74 for_each_online_cpu(j) 74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 75 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
76#endif 76#endif
77 seq_printf(p, " %14s", irq_desc[i].chip->typename); 77 seq_printf(p, " %8s", irq_desc[i].chip->name);
78 seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq));
78 79
79 seq_printf(p, " %s", action->name); 80 seq_printf(p, " %s", action->name);
80 for (action=action->next; action; action = action->next) 81 for (action=action->next; action; action = action->next)
@@ -104,7 +105,12 @@ skip:
104asmlinkage unsigned int do_IRQ(struct pt_regs *regs) 105asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
105{ 106{
106 /* high bit used in ret_from_ code */ 107 /* high bit used in ret_from_ code */
107 unsigned irq = ~regs->orig_rax; 108 unsigned vector = ~regs->orig_rax;
109 unsigned irq;
110
111 exit_idle();
112 irq_enter();
113 irq = __get_cpu_var(vector_irq)[vector];
108 114
109 if (unlikely(irq >= NR_IRQS)) { 115 if (unlikely(irq >= NR_IRQS)) {
110 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 116 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
@@ -112,12 +118,10 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
112 BUG(); 118 BUG();
113 } 119 }
114 120
115 exit_idle();
116 irq_enter();
117#ifdef CONFIG_DEBUG_STACKOVERFLOW 121#ifdef CONFIG_DEBUG_STACKOVERFLOW
118 stack_overflow_check(regs); 122 stack_overflow_check(regs);
119#endif 123#endif
120 __do_IRQ(irq, regs); 124 generic_handle_irq(irq, regs);
121 irq_exit(); 125 irq_exit();
122 126
123 return 1; 127 return 1;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index b8d53dfa9931..b147ab19fbd4 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -790,20 +790,11 @@ void __init mp_config_acpi_legacy_irqs(void)
790 } 790 }
791} 791}
792 792
793#define MAX_GSI_NUM 4096
794
795int mp_register_gsi(u32 gsi, int triggering, int polarity) 793int mp_register_gsi(u32 gsi, int triggering, int polarity)
796{ 794{
797 int ioapic = -1; 795 int ioapic = -1;
798 int ioapic_pin = 0; 796 int ioapic_pin = 0;
799 int idx, bit = 0; 797 int idx, bit = 0;
800 static int pci_irq = 16;
801 /*
802 * Mapping between Global System Interrupts, which
803 * represent all possible interrupts, to the IRQs
804 * assigned to actual devices.
805 */
806 static int gsi_to_irq[MAX_GSI_NUM];
807 798
808 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) 799 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
809 return gsi; 800 return gsi;
@@ -836,42 +827,11 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
836 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 827 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
837 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 828 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
838 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 829 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
839 return gsi_to_irq[gsi]; 830 return gsi;
840 } 831 }
841 832
842 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 833 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
843 834
844 if (triggering == ACPI_LEVEL_SENSITIVE) {
845 /*
846 * For PCI devices assign IRQs in order, avoiding gaps
847 * due to unused I/O APIC pins.
848 */
849 int irq = gsi;
850 if (gsi < MAX_GSI_NUM) {
851 /*
852 * Retain the VIA chipset work-around (gsi > 15), but
853 * avoid a problem where the 8254 timer (IRQ0) is setup
854 * via an override (so it's not on pin 0 of the ioapic),
855 * and at the same time, the pin 0 interrupt is a PCI
856 * type. The gsi > 15 test could cause these two pins
857 * to be shared as IRQ0, and they are not shareable.
858 * So test for this condition, and if necessary, avoid
859 * the pin collision.
860 */
861 if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
862 gsi = pci_irq++;
863 /*
864 * Don't assign IRQ used by ACPI SCI
865 */
866 if (gsi == acpi_fadt.sci_int)
867 gsi = pci_irq++;
868 gsi_to_irq[irq] = gsi;
869 } else {
870 printk(KERN_ERR "GSI %u is too high\n", gsi);
871 return gsi;
872 }
873 }
874
875 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 835 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
876 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, 836 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
877 polarity == ACPI_ACTIVE_HIGH ? 0 : 1); 837 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 1aabc81d82f1..54e1f38ce301 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -299,76 +299,46 @@ static const struct ata_port_info ahci_port_info[] = {
299 299
300static const struct pci_device_id ahci_pci_tbl[] = { 300static const struct pci_device_id ahci_pci_tbl[] = {
301 /* Intel */ 301 /* Intel */
302 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 302 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
303 board_ahci }, /* ICH6 */ 303 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
304 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 304 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
305 board_ahci }, /* ICH6M */ 305 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
306 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 306 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
307 board_ahci }, /* ICH7 */ 307 { PCI_VDEVICE(AL, 0x5288), board_ahci }, /* ULi M5288 */
308 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 308 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
309 board_ahci }, /* ICH7M */ 309 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
310 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 310 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
311 board_ahci }, /* ICH7R */ 311 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
312 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 312 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
313 board_ahci }, /* ULi M5288 */ 313 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
314 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 314 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
315 board_ahci }, /* ESB2 */ 315 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
316 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 316 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
317 board_ahci }, /* ESB2 */
318 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
319 board_ahci }, /* ESB2 */
320 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
321 board_ahci }, /* ICH7-M DH */
322 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
323 board_ahci }, /* ICH8 */
324 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
325 board_ahci }, /* ICH8 */
326 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
327 board_ahci }, /* ICH8 */
328 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
329 board_ahci }, /* ICH8M */
330 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
331 board_ahci }, /* ICH8M */
332 317
333 /* JMicron */ 318 /* JMicron */
334 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 319 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci }, /* JMicron JMB360 */
335 board_ahci }, /* JMicron JMB360 */ 320 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci }, /* JMicron JMB361 */
336 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 321 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci }, /* JMicron JMB363 */
337 board_ahci }, /* JMicron JMB361 */ 322 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci }, /* JMicron JMB365 */
338 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 323 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci }, /* JMicron JMB366 */
339 board_ahci }, /* JMicron JMB363 */
340 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
341 board_ahci }, /* JMicron JMB365 */
342 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
343 board_ahci }, /* JMicron JMB366 */
344 324
345 /* ATI */ 325 /* ATI */
346 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 326 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
347 board_ahci }, /* ATI SB600 non-raid */ 327 { PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */
348 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
349 board_ahci }, /* ATI SB600 raid */
350 328
351 /* VIA */ 329 /* VIA */
352 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 330 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
353 board_ahci_vt8251 }, /* VIA VT8251 */
354 331
355 /* NVIDIA */ 332 /* NVIDIA */
356 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 333 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
357 board_ahci }, /* MCP65 */ 334 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
358 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 335 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
359 board_ahci }, /* MCP65 */ 336 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
360 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
361 board_ahci }, /* MCP65 */
362 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
363 board_ahci }, /* MCP65 */
364 337
365 /* SiS */ 338 /* SiS */
366 { PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 339 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
367 board_ahci }, /* SiS 966 */ 340 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
368 { PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 341 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
369 board_ahci }, /* SiS 966 */
370 { PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
371 board_ahci }, /* SiS 968 */
372 342
373 { } /* terminate list */ 343 { } /* terminate list */
374}; 344};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b4abd6850367..dce65651d858 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2340,7 +2340,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2340 2340
2341 if (status & ATA_BUSY) 2341 if (status & ATA_BUSY)
2342 ata_port_printk(ap, KERN_WARNING, 2342 ata_port_printk(ap, KERN_WARNING,
2343 "port is slow to respond, please be patient\n"); 2343 "port is slow to respond, please be patient "
2344 "(Status 0x%x)\n", status);
2344 2345
2345 timeout = timer_start + tmout; 2346 timeout = timer_start + tmout;
2346 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 2347 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
@@ -2350,7 +2351,8 @@ unsigned int ata_busy_sleep (struct ata_port *ap,
2350 2351
2351 if (status & ATA_BUSY) { 2352 if (status & ATA_BUSY) {
2352 ata_port_printk(ap, KERN_ERR, "port failed to respond " 2353 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2353 "(%lu secs)\n", tmout / HZ); 2354 "(%lu secs, Status 0x%x)\n",
2355 tmout / HZ, status);
2354 return 1; 2356 return 1;
2355 } 2357 }
2356 2358
@@ -5478,11 +5480,10 @@ int ata_device_add(const struct ata_probe_ent *ent)
5478 int irq_line = ent->irq; 5480 int irq_line = ent->irq;
5479 5481
5480 ap = ata_port_add(ent, host, i); 5482 ap = ata_port_add(ent, host, i);
5483 host->ports[i] = ap;
5481 if (!ap) 5484 if (!ap)
5482 goto err_out; 5485 goto err_out;
5483 5486
5484 host->ports[i] = ap;
5485
5486 /* dummy? */ 5487 /* dummy? */
5487 if (ent->dummy_port_mask & (1 << i)) { 5488 if (ent->dummy_port_mask & (1 << i)) {
5488 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 5489 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
@@ -5740,7 +5741,7 @@ void ata_host_remove(struct ata_host *host)
5740 5741
5741/** 5742/**
5742 * ata_scsi_release - SCSI layer callback hook for host unload 5743 * ata_scsi_release - SCSI layer callback hook for host unload
5743 * @host: libata host to be unloaded 5744 * @shost: libata host to be unloaded
5744 * 5745 *
5745 * Performs all duties necessary to shut down a libata port... 5746 * Performs all duties necessary to shut down a libata port...
5746 * Kill port kthread, disable port, and release resources. 5747 * Kill port kthread, disable port, and release resources.
@@ -5786,6 +5787,7 @@ ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5786 probe_ent->mwdma_mask = port->mwdma_mask; 5787 probe_ent->mwdma_mask = port->mwdma_mask;
5787 probe_ent->udma_mask = port->udma_mask; 5788 probe_ent->udma_mask = port->udma_mask;
5788 probe_ent->port_ops = port->port_ops; 5789 probe_ent->port_ops = port->port_ops;
5790 probe_ent->private_data = port->private_data;
5789 5791
5790 return probe_ent; 5792 return probe_ent;
5791} 5793}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3986ec8741b4..b0d0cc41f3e8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -889,6 +889,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
889{ 889{
890 struct ata_port *ap = ata_shost_to_port(sdev->host); 890 struct ata_port *ap = ata_shost_to_port(sdev->host);
891 struct ata_device *dev; 891 struct ata_device *dev;
892 unsigned long flags;
892 int max_depth; 893 int max_depth;
893 894
894 if (queue_depth < 1) 895 if (queue_depth < 1)
@@ -904,6 +905,14 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
904 queue_depth = max_depth; 905 queue_depth = max_depth;
905 906
906 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); 907 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
908
909 spin_lock_irqsave(ap->lock, flags);
910 if (queue_depth > 1)
911 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
912 else
913 dev->flags |= ATA_DFLAG_NCQ_OFF;
914 spin_unlock_irqrestore(ap->lock, flags);
915
907 return queue_depth; 916 return queue_depth;
908} 917}
909 918
@@ -1293,7 +1302,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1293 */ 1302 */
1294 goto nothing_to_do; 1303 goto nothing_to_do;
1295 1304
1296 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) { 1305 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
1306 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ) {
1297 /* yay, NCQ */ 1307 /* yay, NCQ */
1298 if (!lba_48_ok(block, n_block)) 1308 if (!lba_48_ok(block, n_block))
1299 goto out_of_range; 1309 goto out_of_range;
@@ -3174,7 +3184,7 @@ void ata_scsi_dev_rescan(void *data)
3174 3184
3175/** 3185/**
3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 3186 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device
3177 * @pdev: PCI device that the scsi device is attached to 3187 * @host: ATA host container for all SAS ports
3178 * @port_info: Information from low-level host driver 3188 * @port_info: Information from low-level host driver
3179 * @shost: SCSI host that the scsi device is attached to 3189 * @shost: SCSI host that the scsi device is attached to
3180 * 3190 *
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 08b3a407473e..06daaa3736a2 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -828,7 +828,6 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
828 828
829 probe_ent->irq = pdev->irq; 829 probe_ent->irq = pdev->irq;
830 probe_ent->irq_flags = IRQF_SHARED; 830 probe_ent->irq_flags = IRQF_SHARED;
831 probe_ent->private_data = port[0]->private_data;
832 831
833 if (ports & ATA_PORT_PRIMARY) { 832 if (ports & ATA_PORT_PRIMARY) {
834 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); 833 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
@@ -878,7 +877,6 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
878 return NULL; 877 return NULL;
879 878
880 probe_ent->n_ports = 2; 879 probe_ent->n_ports = 2;
881 probe_ent->private_data = port[0]->private_data;
882 880
883 if (port_mask & ATA_PORT_PRIMARY) { 881 if (port_mask & ATA_PORT_PRIMARY) {
884 probe_ent->irq = ATA_PRIMARY_IRQ; 882 probe_ent->irq = ATA_PRIMARY_IRQ;
@@ -908,6 +906,8 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
908 probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 906 probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
909 } 907 }
910 ata_std_ports(&probe_ent->port[1]); 908 ata_std_ports(&probe_ent->port[1]);
909
910 /* FIXME: could be pointing to stack area; must copy */
911 probe_ent->pinfo2 = port[1]; 911 probe_ent->pinfo2 = port[1];
912 } else 912 } else
913 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY; 913 probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
@@ -946,35 +946,21 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
946{ 946{
947 struct ata_probe_ent *probe_ent = NULL; 947 struct ata_probe_ent *probe_ent = NULL;
948 struct ata_port_info *port[2]; 948 struct ata_port_info *port[2];
949 u8 tmp8, mask; 949 u8 mask;
950 unsigned int legacy_mode = 0; 950 unsigned int legacy_mode = 0;
951 int disable_dev_on_err = 1; 951 int disable_dev_on_err = 1;
952 int rc; 952 int rc;
953 953
954 DPRINTK("ENTER\n"); 954 DPRINTK("ENTER\n");
955 955
956 BUG_ON(n_ports < 1 || n_ports > 2);
957
956 port[0] = port_info[0]; 958 port[0] = port_info[0];
957 if (n_ports > 1) 959 if (n_ports > 1)
958 port[1] = port_info[1]; 960 port[1] = port_info[1];
959 else 961 else
960 port[1] = port[0]; 962 port[1] = port[0];
961 963
962 if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
963 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
964 /* TODO: What if one channel is in native mode ... */
965 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
966 mask = (1 << 2) | (1 << 0);
967 if ((tmp8 & mask) != mask)
968 legacy_mode = (1 << 3);
969 }
970
971 /* FIXME... */
972 if ((!legacy_mode) && (n_ports > 2)) {
973 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
974 n_ports = 2;
975 /* For now */
976 }
977
978 /* FIXME: Really for ATA it isn't safe because the device may be 964 /* FIXME: Really for ATA it isn't safe because the device may be
979 multi-purpose and we want to leave it alone if it was already 965 multi-purpose and we want to leave it alone if it was already
980 enabled. Secondly for shared use as Arjan says we want refcounting 966 enabled. Secondly for shared use as Arjan says we want refcounting
@@ -987,6 +973,16 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
987 if (rc) 973 if (rc)
988 return rc; 974 return rc;
989 975
976 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
977 u8 tmp8;
978
979 /* TODO: What if one channel is in native mode ... */
980 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
981 mask = (1 << 2) | (1 << 0);
982 if ((tmp8 & mask) != mask)
983 legacy_mode = (1 << 3);
984 }
985
990 rc = pci_request_regions(pdev, DRV_NAME); 986 rc = pci_request_regions(pdev, DRV_NAME);
991 if (rc) { 987 if (rc) {
992 disable_dev_on_err = 0; 988 disable_dev_on_err = 0;
@@ -1039,7 +1035,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1039 goto err_out_regions; 1035 goto err_out_regions;
1040 } 1036 }
1041 1037
1042 /* FIXME: If we get no DMA mask we should fall back to PIO */ 1038 /* TODO: If we get no DMA mask we should fall back to PIO */
1043 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 1039 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1044 if (rc) 1040 if (rc)
1045 goto err_out_regions; 1041 goto err_out_regions;
@@ -1062,13 +1058,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
1062 1058
1063 pci_set_master(pdev); 1059 pci_set_master(pdev);
1064 1060
1065 /* FIXME: check ata_device_add return */ 1061 if (!ata_device_add(probe_ent)) {
1066 ata_device_add(probe_ent); 1062 rc = -ENODEV;
1063 goto err_out_ent;
1064 }
1067 1065
1068 kfree(probe_ent); 1066 kfree(probe_ent);
1069 1067
1070 return 0; 1068 return 0;
1071 1069
1070err_out_ent:
1071 kfree(probe_ent);
1072err_out_regions: 1072err_out_regions:
1073 if (legacy_mode & ATA_PORT_PRIMARY) 1073 if (legacy_mode & ATA_PORT_PRIMARY)
1074 release_region(ATA_PRIMARY_CMD, 8); 1074 release_region(ATA_PRIMARY_CMD, 8);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 87af3b5861ab..1d695df5860a 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -34,7 +34,7 @@
34#include <linux/dmi.h> 34#include <linux/dmi.h>
35 35
36#define DRV_NAME "pata_ali" 36#define DRV_NAME "pata_ali"
37#define DRV_VERSION "0.6.5" 37#define DRV_VERSION "0.6.6"
38 38
39/* 39/*
40 * Cable special cases 40 * Cable special cases
@@ -630,7 +630,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
630 pci_read_config_byte(pdev, 0x53, &tmp); 630 pci_read_config_byte(pdev, 0x53, &tmp);
631 if (rev <= 0x20) 631 if (rev <= 0x20)
632 tmp &= ~0x02; 632 tmp &= ~0x02;
633 if (rev == 0xc7) 633 if (rev >= 0xc7)
634 tmp |= 0x03; 634 tmp |= 0x03;
635 else 635 else
636 tmp |= 0x01; /* CD_ROM enable for DMA */ 636 tmp |= 0x01; /* CD_ROM enable for DMA */
@@ -644,10 +644,11 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
644 return ata_pci_init_one(pdev, port_info, 2); 644 return ata_pci_init_one(pdev, port_info, 2);
645} 645}
646 646
647static struct pci_device_id ali[] = { 647static const struct pci_device_id ali[] = {
648 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5228), }, 648 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), },
649 { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229), }, 649 { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), },
650 { 0, }, 650
651 { },
651}; 652};
652 653
653static struct pci_driver ali_pci_driver = { 654static struct pci_driver ali_pci_driver = {
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 599ee266722c..29234c897118 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -662,27 +662,28 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
662} 662}
663 663
664static const struct pci_device_id amd[] = { 664static const struct pci_device_id amd[] = {
665 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_COBRA_7401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 665 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_COBRA_7401), 0 },
666 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, 666 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7409), 1 },
667 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, 667 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_VIPER_7411), 3 },
668 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 668 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_OPUS_7441), 4 },
669 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, 669 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_8111_IDE), 5 },
670 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, 670 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE), 7 },
671 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 671 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE), 8 },
672 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 672 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE), 8 },
673 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 673 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE), 8 },
674 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 674 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE), 8 },
675 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 675 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE), 8 },
676 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 676 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE), 8 },
677 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 677 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE), 8 },
678 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 678 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE), 8 },
679 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 679 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE), 8 },
680 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, 680 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
681 { 0, }, 681
682 { },
682}; 683};
683 684
684static struct pci_driver amd_pci_driver = { 685static struct pci_driver amd_pci_driver = {
685 .name = DRV_NAME, 686 .name = DRV_NAME,
686 .id_table = amd, 687 .id_table = amd,
687 .probe = amd_init_one, 688 .probe = amd_init_one,
688 .remove = ata_pci_remove_one 689 .remove = ata_pci_remove_one
@@ -698,7 +699,6 @@ static void __exit amd_exit(void)
698 pci_unregister_driver(&amd_pci_driver); 699 pci_unregister_driver(&amd_pci_driver);
699} 700}
700 701
701
702MODULE_AUTHOR("Alan Cox"); 702MODULE_AUTHOR("Alan Cox");
703MODULE_DESCRIPTION("low-level driver for AMD PATA IDE"); 703MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
704MODULE_LICENSE("GPL"); 704MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index c4ccb75a4f1d..690828eb5226 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -426,7 +426,7 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
426 .port_ops = &artop6260_ops, 426 .port_ops = &artop6260_ops,
427 }; 427 };
428 struct ata_port_info *port_info[2]; 428 struct ata_port_info *port_info[2];
429 struct ata_port_info *info; 429 struct ata_port_info *info = NULL;
430 int ports = 2; 430 int ports = 2;
431 431
432 if (!printed_version++) 432 if (!printed_version++)
@@ -470,16 +470,20 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
470 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80); 470 pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
471 471
472 } 472 }
473
474 BUG_ON(info == NULL);
475
473 port_info[0] = port_info[1] = info; 476 port_info[0] = port_info[1] = info;
474 return ata_pci_init_one(pdev, port_info, ports); 477 return ata_pci_init_one(pdev, port_info, ports);
475} 478}
476 479
477static const struct pci_device_id artop_pci_tbl[] = { 480static const struct pci_device_id artop_pci_tbl[] = {
478 { 0x1191, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 481 { PCI_VDEVICE(ARTOP, 0x0005), 0 },
479 { 0x1191, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 482 { PCI_VDEVICE(ARTOP, 0x0006), 1 },
480 { 0x1191, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 483 { PCI_VDEVICE(ARTOP, 0x0007), 1 },
481 { 0x1191, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 484 { PCI_VDEVICE(ARTOP, 0x0008), 2 },
482 { 0x1191, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 485 { PCI_VDEVICE(ARTOP, 0x0009), 2 },
486
483 { } /* terminate list */ 487 { } /* terminate list */
484}; 488};
485 489
@@ -500,7 +504,6 @@ static void __exit artop_exit(void)
500 pci_unregister_driver(&artop_pci_driver); 504 pci_unregister_driver(&artop_pci_driver);
501} 505}
502 506
503
504module_init(artop_init); 507module_init(artop_init);
505module_exit(artop_exit); 508module_exit(artop_exit);
506 509
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 6c2269b6bd3c..1ce28d2125f4 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -267,12 +267,13 @@ static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
267 return ata_pci_init_one(dev, port_info, 2); 267 return ata_pci_init_one(dev, port_info, 2);
268} 268}
269 269
270static struct pci_device_id atiixp[] = { 270static const struct pci_device_id atiixp[] = {
271 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), }, 271 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
272 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), }, 272 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
273 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, 273 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
274 { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, 274 { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
275 { 0, }, 275
276 { },
276}; 277};
277 278
278static struct pci_driver atiixp_pci_driver = { 279static struct pci_driver atiixp_pci_driver = {
@@ -293,7 +294,6 @@ static void __exit atiixp_exit(void)
293 pci_unregister_driver(&atiixp_pci_driver); 294 pci_unregister_driver(&atiixp_pci_driver);
294} 295}
295 296
296
297MODULE_AUTHOR("Alan Cox"); 297MODULE_AUTHOR("Alan Cox");
298MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400"); 298MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
299MODULE_LICENSE("GPL"); 299MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index e92b0ef43ec5..b9bbd1d454bf 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -468,16 +468,17 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
468 return ata_pci_init_one(pdev, port_info, 2); 468 return ata_pci_init_one(pdev, port_info, 2);
469} 469}
470 470
471static struct pci_device_id cmd64x[] = { 471static const struct pci_device_id cmd64x[] = {
472 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 472 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_643), 0 },
473 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 473 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_646), 1 },
474 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 474 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_648), 4 },
475 { PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5}, 475 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_CMD_649), 5 },
476 { 0, }, 476
477 { },
477}; 478};
478 479
479static struct pci_driver cmd64x_pci_driver = { 480static struct pci_driver cmd64x_pci_driver = {
480 .name = DRV_NAME, 481 .name = DRV_NAME,
481 .id_table = cmd64x, 482 .id_table = cmd64x,
482 .probe = cmd64x_init_one, 483 .probe = cmd64x_init_one,
483 .remove = ata_pci_remove_one 484 .remove = ata_pci_remove_one
@@ -488,13 +489,11 @@ static int __init cmd64x_init(void)
488 return pci_register_driver(&cmd64x_pci_driver); 489 return pci_register_driver(&cmd64x_pci_driver);
489} 490}
490 491
491
492static void __exit cmd64x_exit(void) 492static void __exit cmd64x_exit(void)
493{ 493{
494 pci_unregister_driver(&cmd64x_pci_driver); 494 pci_unregister_driver(&cmd64x_pci_driver);
495} 495}
496 496
497
498MODULE_AUTHOR("Alan Cox"); 497MODULE_AUTHOR("Alan Cox");
499MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers"); 498MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
500MODULE_LICENSE("GPL"); 499MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index a6c6cebd0dae..2cd3c0ff76df 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -299,10 +299,11 @@ static void __devexit cs5520_remove_one(struct pci_dev *pdev)
299/* For now keep DMA off. We can set it for all but A rev CS5510 once the 299/* For now keep DMA off. We can set it for all but A rev CS5510 once the
300 core ATA code can handle it */ 300 core ATA code can handle it */
301 301
302static struct pci_device_id pata_cs5520[] = { 302static const struct pci_device_id pata_cs5520[] = {
303 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, 303 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
304 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, 304 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
305 { 0, }, 305
306 { },
306}; 307};
307 308
308static struct pci_driver cs5520_pci_driver = { 309static struct pci_driver cs5520_pci_driver = {
@@ -312,7 +313,6 @@ static struct pci_driver cs5520_pci_driver = {
312 .remove = cs5520_remove_one 313 .remove = cs5520_remove_one
313}; 314};
314 315
315
316static int __init cs5520_init(void) 316static int __init cs5520_init(void)
317{ 317{
318 return pci_register_driver(&cs5520_pci_driver); 318 return pci_register_driver(&cs5520_pci_driver);
diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
index 7bba4d954e9c..a07cc81ef791 100644
--- a/drivers/ata/pata_cs5530.c
+++ b/drivers/ata/pata_cs5530.c
@@ -353,13 +353,14 @@ fail_put:
353 return -ENODEV; 353 return -ENODEV;
354} 354}
355 355
356static struct pci_device_id cs5530[] = { 356static const struct pci_device_id cs5530[] = {
357 { PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, 357 { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
358 { 0, }, 358
359 { },
359}; 360};
360 361
361static struct pci_driver cs5530_pci_driver = { 362static struct pci_driver cs5530_pci_driver = {
362 .name = DRV_NAME, 363 .name = DRV_NAME,
363 .id_table = cs5530, 364 .id_table = cs5530,
364 .probe = cs5530_init_one, 365 .probe = cs5530_init_one,
365 .remove = ata_pci_remove_one 366 .remove = ata_pci_remove_one
@@ -370,13 +371,11 @@ static int __init cs5530_init(void)
370 return pci_register_driver(&cs5530_pci_driver); 371 return pci_register_driver(&cs5530_pci_driver);
371} 372}
372 373
373
374static void __exit cs5530_exit(void) 374static void __exit cs5530_exit(void)
375{ 375{
376 pci_unregister_driver(&cs5530_pci_driver); 376 pci_unregister_driver(&cs5530_pci_driver);
377} 377}
378 378
379
380MODULE_AUTHOR("Alan Cox"); 379MODULE_AUTHOR("Alan Cox");
381MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530"); 380MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
382MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index d64fcdceaf01..f8def3f9c618 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -257,9 +257,10 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
257 return ata_pci_init_one(dev, ports, 1); 257 return ata_pci_init_one(dev, ports, 1);
258} 258}
259 259
260static struct pci_device_id cs5535[] = { 260static const struct pci_device_id cs5535[] = {
261 { PCI_DEVICE(PCI_VENDOR_ID_NS, 0x002D), }, 261 { PCI_VDEVICE(NS, 0x002D), },
262 { 0, }, 262
263 { },
263}; 264};
264 265
265static struct pci_driver cs5535_pci_driver = { 266static struct pci_driver cs5535_pci_driver = {
@@ -274,13 +275,11 @@ static int __init cs5535_init(void)
274 return pci_register_driver(&cs5535_pci_driver); 275 return pci_register_driver(&cs5535_pci_driver);
275} 276}
276 277
277
278static void __exit cs5535_exit(void) 278static void __exit cs5535_exit(void)
279{ 279{
280 pci_unregister_driver(&cs5535_pci_driver); 280 pci_unregister_driver(&cs5535_pci_driver);
281} 281}
282 282
283
284MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch"); 283MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
285MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530"); 284MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
286MODULE_LICENSE("GPL"); 285MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index dfa5ac539048..247b43608b14 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -184,8 +184,8 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
184 }; 184 };
185 static struct ata_port_info *port_info[1] = { &info }; 185 static struct ata_port_info *port_info[1] = { &info };
186 186
187 /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. For the 187 /* Devfn 1 is the ATA primary. The secondary is magic and on devfn2.
188 moment we don't handle the secondary. FIXME */ 188 For the moment we don't handle the secondary. FIXME */
189 189
190 if (PCI_FUNC(pdev->devfn) != 1) 190 if (PCI_FUNC(pdev->devfn) != 1)
191 return -ENODEV; 191 return -ENODEV;
@@ -193,13 +193,14 @@ static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *i
193 return ata_pci_init_one(pdev, port_info, 1); 193 return ata_pci_init_one(pdev, port_info, 1);
194} 194}
195 195
196static struct pci_device_id cy82c693[] = { 196static const struct pci_device_id cy82c693[] = {
197 { PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 197 { PCI_VDEVICE(CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693), },
198 { 0, }, 198
199 { },
199}; 200};
200 201
201static struct pci_driver cy82c693_pci_driver = { 202static struct pci_driver cy82c693_pci_driver = {
202 .name = DRV_NAME, 203 .name = DRV_NAME,
203 .id_table = cy82c693, 204 .id_table = cy82c693,
204 .probe = cy82c693_init_one, 205 .probe = cy82c693_init_one,
205 .remove = ata_pci_remove_one 206 .remove = ata_pci_remove_one
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 95cd1ca181f5..ef18c60fe140 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -305,7 +305,8 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
305} 305}
306 306
307static const struct pci_device_id efar_pci_tbl[] = { 307static const struct pci_device_id efar_pci_tbl[] = {
308 { 0x1055, 0x9130, PCI_ANY_ID, PCI_ANY_ID, }, 308 { PCI_VDEVICE(EFAR, 0x9130), },
309
309 { } /* terminate list */ 310 { } /* terminate list */
310}; 311};
311 312
@@ -326,7 +327,6 @@ static void __exit efar_exit(void)
326 pci_unregister_driver(&efar_pci_driver); 327 pci_unregister_driver(&efar_pci_driver);
327} 328}
328 329
329
330module_init(efar_init); 330module_init(efar_init);
331module_exit(efar_exit); 331module_exit(efar_exit);
332 332
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 8c757438f350..6d3e4c0f15fe 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -444,13 +444,14 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
444 return ata_pci_init_one(dev, port_info, 2); 444 return ata_pci_init_one(dev, port_info, 2);
445} 445}
446 446
447static struct pci_device_id hpt36x[] = { 447static const struct pci_device_id hpt36x[] = {
448 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, 448 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
449 { 0, }, 449
450 { },
450}; 451};
451 452
452static struct pci_driver hpt36x_pci_driver = { 453static struct pci_driver hpt36x_pci_driver = {
453 .name = DRV_NAME, 454 .name = DRV_NAME,
454 .id_table = hpt36x, 455 .id_table = hpt36x,
455 .probe = hpt36x_init_one, 456 .probe = hpt36x_init_one,
456 .remove = ata_pci_remove_one 457 .remove = ata_pci_remove_one
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 10318c0012ef..7350443948c1 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -1219,17 +1219,18 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
1219 return ata_pci_init_one(dev, port_info, 2); 1219 return ata_pci_init_one(dev, port_info, 2);
1220} 1220}
1221 1221
1222static struct pci_device_id hpt37x[] = { 1222static const struct pci_device_id hpt37x[] = {
1223 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, 1223 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
1224 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT371), }, 1224 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT371), },
1225 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), }, 1225 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
1226 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT374), }, 1226 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT374), },
1227 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), }, 1227 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
1228 { 0, }, 1228
1229 { },
1229}; 1230};
1230 1231
1231static struct pci_driver hpt37x_pci_driver = { 1232static struct pci_driver hpt37x_pci_driver = {
1232 .name = DRV_NAME, 1233 .name = DRV_NAME,
1233 .id_table = hpt37x, 1234 .id_table = hpt37x,
1234 .probe = hpt37x_init_one, 1235 .probe = hpt37x_init_one,
1235 .remove = ata_pci_remove_one 1236 .remove = ata_pci_remove_one
@@ -1240,13 +1241,11 @@ static int __init hpt37x_init(void)
1240 return pci_register_driver(&hpt37x_pci_driver); 1241 return pci_register_driver(&hpt37x_pci_driver);
1241} 1242}
1242 1243
1243
1244static void __exit hpt37x_exit(void) 1244static void __exit hpt37x_exit(void)
1245{ 1245{
1246 pci_unregister_driver(&hpt37x_pci_driver); 1246 pci_unregister_driver(&hpt37x_pci_driver);
1247} 1247}
1248 1248
1249
1250MODULE_AUTHOR("Alan Cox"); 1249MODULE_AUTHOR("Alan Cox");
1251MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x"); 1250MODULE_DESCRIPTION("low-level driver for the Highpoint HPT37x/30x");
1252MODULE_LICENSE("GPL"); 1251MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 5c5d4f6ab901..58cfb2bc8098 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -560,16 +560,17 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
560 return ata_pci_init_one(dev, port_info, 2); 560 return ata_pci_init_one(dev, port_info, 2);
561} 561}
562 562
563static struct pci_device_id hpt3x2n[] = { 563static const struct pci_device_id hpt3x2n[] = {
564 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), }, 564 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT366), },
565 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), }, 565 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372), },
566 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), }, 566 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT302), },
567 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N), }, 567 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT372N), },
568 { 0, }, 568
569 { },
569}; 570};
570 571
571static struct pci_driver hpt3x2n_pci_driver = { 572static struct pci_driver hpt3x2n_pci_driver = {
572 .name = DRV_NAME, 573 .name = DRV_NAME,
573 .id_table = hpt3x2n, 574 .id_table = hpt3x2n,
574 .probe = hpt3x2n_init_one, 575 .probe = hpt3x2n_init_one,
575 .remove = ata_pci_remove_one 576 .remove = ata_pci_remove_one
@@ -580,13 +581,11 @@ static int __init hpt3x2n_init(void)
580 return pci_register_driver(&hpt3x2n_pci_driver); 581 return pci_register_driver(&hpt3x2n_pci_driver);
581} 582}
582 583
583
584static void __exit hpt3x2n_exit(void) 584static void __exit hpt3x2n_exit(void)
585{ 585{
586 pci_unregister_driver(&hpt3x2n_pci_driver); 586 pci_unregister_driver(&hpt3x2n_pci_driver);
587} 587}
588 588
589
590MODULE_AUTHOR("Alan Cox"); 589MODULE_AUTHOR("Alan Cox");
591MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x"); 590MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
592MODULE_LICENSE("GPL"); 591MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 1f084ab1ccc6..3334d72e251b 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -192,13 +192,14 @@ static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
192 return ata_pci_init_one(dev, port_info, 2); 192 return ata_pci_init_one(dev, port_info, 2);
193} 193}
194 194
195static struct pci_device_id hpt3x3[] = { 195static const struct pci_device_id hpt3x3[] = {
196 { PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT343), }, 196 { PCI_VDEVICE(TTI, PCI_DEVICE_ID_TTI_HPT343), },
197 { 0, }, 197
198 { },
198}; 199};
199 200
200static struct pci_driver hpt3x3_pci_driver = { 201static struct pci_driver hpt3x3_pci_driver = {
201 .name = DRV_NAME, 202 .name = DRV_NAME,
202 .id_table = hpt3x3, 203 .id_table = hpt3x3,
203 .probe = hpt3x3_init_one, 204 .probe = hpt3x3_init_one,
204 .remove = ata_pci_remove_one 205 .remove = ata_pci_remove_one
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 82a46ff40000..18ff3e59a89b 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -808,14 +808,15 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
808 return ata_pci_init_one(pdev, port_info, 2); 808 return ata_pci_init_one(pdev, port_info, 2);
809} 809}
810 810
811static struct pci_device_id it821x[] = { 811static const struct pci_device_id it821x[] = {
812 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211), }, 812 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8211), },
813 { PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212), }, 813 { PCI_VDEVICE(ITE, PCI_DEVICE_ID_ITE_8212), },
814 { 0, }, 814
815 { },
815}; 816};
816 817
817static struct pci_driver it821x_pci_driver = { 818static struct pci_driver it821x_pci_driver = {
818 .name = DRV_NAME, 819 .name = DRV_NAME,
819 .id_table = it821x, 820 .id_table = it821x,
820 .probe = it821x_init_one, 821 .probe = it821x_init_one,
821 .remove = ata_pci_remove_one 822 .remove = ata_pci_remove_one
@@ -826,13 +827,11 @@ static int __init it821x_init(void)
826 return pci_register_driver(&it821x_pci_driver); 827 return pci_register_driver(&it821x_pci_driver);
827} 828}
828 829
829
830static void __exit it821x_exit(void) 830static void __exit it821x_exit(void)
831{ 831{
832 pci_unregister_driver(&it821x_pci_driver); 832 pci_unregister_driver(&it821x_pci_driver);
833} 833}
834 834
835
836MODULE_AUTHOR("Alan Cox"); 835MODULE_AUTHOR("Alan Cox");
837MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller"); 836MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
838MODULE_LICENSE("GPL"); 837MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index be3a866b111f..52a2bdf3c38d 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -229,11 +229,12 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
229} 229}
230 230
231static const struct pci_device_id jmicron_pci_tbl[] = { 231static const struct pci_device_id jmicron_pci_tbl[] = {
232 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361}, 232 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
233 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363}, 233 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
234 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365}, 234 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365},
235 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366}, 235 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366},
236 { PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368}, 236 { PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368},
237
237 { } /* terminate list */ 238 { } /* terminate list */
238}; 239};
239 240
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 3c65393c1f01..9dfe3e9abea3 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -274,11 +274,10 @@ static void __devexit mpiix_remove_one(struct pci_dev *pdev)
274 dev_set_drvdata(dev, NULL); 274 dev_set_drvdata(dev, NULL);
275} 275}
276 276
277
278
279static const struct pci_device_id mpiix[] = { 277static const struct pci_device_id mpiix[] = {
280 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX), }, 278 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
281 { 0, }, 279
280 { },
282}; 281};
283 282
284static struct pci_driver mpiix_pci_driver = { 283static struct pci_driver mpiix_pci_driver = {
@@ -293,13 +292,11 @@ static int __init mpiix_init(void)
293 return pci_register_driver(&mpiix_pci_driver); 292 return pci_register_driver(&mpiix_pci_driver);
294} 293}
295 294
296
297static void __exit mpiix_exit(void) 295static void __exit mpiix_exit(void)
298{ 296{
299 pci_unregister_driver(&mpiix_pci_driver); 297 pci_unregister_driver(&mpiix_pci_driver);
300} 298}
301 299
302
303MODULE_AUTHOR("Alan Cox"); 300MODULE_AUTHOR("Alan Cox");
304MODULE_DESCRIPTION("low-level driver for Intel MPIIX"); 301MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
305MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 76eb9c90bee1..f5672de99c22 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -142,7 +142,8 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
142} 142}
143 143
144static const struct pci_device_id netcell_pci_tbl[] = { 144static const struct pci_device_id netcell_pci_tbl[] = {
145 { PCI_DEVICE(PCI_VENDOR_ID_NETCELL, PCI_DEVICE_ID_REVOLUTION), }, 145 { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), },
146
146 { } /* terminate list */ 147 { } /* terminate list */
147}; 148};
148 149
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 2005a95f48f6..2a3dbeed89b4 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -200,12 +200,13 @@ static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
200} 200}
201 201
202static const struct pci_device_id ns87410[] = { 202static const struct pci_device_id ns87410[] = {
203 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410), }, 203 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), },
204 { 0, }, 204
205 { },
205}; 206};
206 207
207static struct pci_driver ns87410_pci_driver = { 208static struct pci_driver ns87410_pci_driver = {
208 .name = DRV_NAME, 209 .name = DRV_NAME,
209 .id_table = ns87410, 210 .id_table = ns87410,
210 .probe = ns87410_init_one, 211 .probe = ns87410_init_one,
211 .remove = ata_pci_remove_one 212 .remove = ata_pci_remove_one
@@ -216,13 +217,11 @@ static int __init ns87410_init(void)
216 return pci_register_driver(&ns87410_pci_driver); 217 return pci_register_driver(&ns87410_pci_driver);
217} 218}
218 219
219
220static void __exit ns87410_exit(void) 220static void __exit ns87410_exit(void)
221{ 221{
222 pci_unregister_driver(&ns87410_pci_driver); 222 pci_unregister_driver(&ns87410_pci_driver);
223} 223}
224 224
225
226MODULE_AUTHOR("Alan Cox"); 225MODULE_AUTHOR("Alan Cox");
227MODULE_DESCRIPTION("low-level driver for Nat Semi 87410"); 226MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
228MODULE_LICENSE("GPL"); 227MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 31a285ca88dc..fc947dfecd73 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -303,7 +303,8 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
303} 303}
304 304
305static const struct pci_device_id oldpiix_pci_tbl[] = { 305static const struct pci_device_id oldpiix_pci_tbl[] = {
306 { PCI_DEVICE(0x8086, 0x1230), }, 306 { PCI_VDEVICE(INTEL, 0x1230), },
307
307 { } /* terminate list */ 308 { } /* terminate list */
308}; 309};
309 310
@@ -324,7 +325,6 @@ static void __exit oldpiix_exit(void)
324 pci_unregister_driver(&oldpiix_pci_driver); 325 pci_unregister_driver(&oldpiix_pci_driver);
325} 326}
326 327
327
328module_init(oldpiix_init); 328module_init(oldpiix_init);
329module_exit(oldpiix_exit); 329module_exit(oldpiix_exit);
330 330
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 57fe21f3a975..a7320ba15575 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -256,13 +256,14 @@ static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
256} 256}
257 257
258static const struct pci_device_id opti[] = { 258static const struct pci_device_id opti[] = {
259 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 259 { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 },
260 { PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 260 { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 1 },
261 { 0, }, 261
262 { },
262}; 263};
263 264
264static struct pci_driver opti_pci_driver = { 265static struct pci_driver opti_pci_driver = {
265 .name = DRV_NAME, 266 .name = DRV_NAME,
266 .id_table = opti, 267 .id_table = opti,
267 .probe = opti_init_one, 268 .probe = opti_init_one,
268 .remove = ata_pci_remove_one 269 .remove = ata_pci_remove_one
@@ -273,7 +274,6 @@ static int __init opti_init(void)
273 return pci_register_driver(&opti_pci_driver); 274 return pci_register_driver(&opti_pci_driver);
274} 275}
275 276
276
277static void __exit opti_exit(void) 277static void __exit opti_exit(void)
278{ 278{
279 pci_unregister_driver(&opti_pci_driver); 279 pci_unregister_driver(&opti_pci_driver);
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 7296a20cd107..c6906b4215de 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -512,12 +512,13 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
512} 512}
513 513
514static const struct pci_device_id optidma[] = { 514static const struct pci_device_id optidma[] = {
515 { PCI_DEVICE(0x1045, 0xD568), }, /* Opti 82C700 */ 515 { PCI_VDEVICE(OPTI, 0xD568), }, /* Opti 82C700 */
516 { 0, }, 516
517 { },
517}; 518};
518 519
519static struct pci_driver optidma_pci_driver = { 520static struct pci_driver optidma_pci_driver = {
520 .name = DRV_NAME, 521 .name = DRV_NAME,
521 .id_table = optidma, 522 .id_table = optidma,
522 .probe = optidma_init_one, 523 .probe = optidma_init_one,
523 .remove = ata_pci_remove_one 524 .remove = ata_pci_remove_one
@@ -528,13 +529,11 @@ static int __init optidma_init(void)
528 return pci_register_driver(&optidma_pci_driver); 529 return pci_register_driver(&optidma_pci_driver);
529} 530}
530 531
531
532static void __exit optidma_exit(void) 532static void __exit optidma_exit(void)
533{ 533{
534 pci_unregister_driver(&optidma_pci_driver); 534 pci_unregister_driver(&optidma_pci_driver);
535} 535}
536 536
537
538MODULE_AUTHOR("Alan Cox"); 537MODULE_AUTHOR("Alan Cox");
539MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus"); 538MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
540MODULE_LICENSE("GPL"); 539MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index cb501e145a42..e93ea2702c73 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -42,7 +42,7 @@
42 42
43 43
44#define DRV_NAME "pata_pcmcia" 44#define DRV_NAME "pata_pcmcia"
45#define DRV_VERSION "0.2.9" 45#define DRV_VERSION "0.2.11"
46 46
47/* 47/*
48 * Private data structure to glue stuff together 48 * Private data structure to glue stuff together
@@ -355,6 +355,8 @@ static struct pcmcia_device_id pcmcia_devices[] = {
355 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d), 355 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
356 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), 356 PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
357 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), 357 PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
358 PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443),
359 PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
358 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852), 360 PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
359 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209), 361 PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
360 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e), 362 PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index bd4ed6734edc..d894d9918b1d 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -108,13 +108,14 @@ static struct pdc2027x_udma_timing {
108}; 108};
109 109
110static const struct pci_device_id pdc2027x_pci_tbl[] = { 110static const struct pci_device_id pdc2027x_pci_tbl[] = {
111 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20268, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 }, 111 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), PDC_UDMA_100 },
112 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20269, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, 112 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), PDC_UDMA_133 },
113 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 }, 113 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), PDC_UDMA_100 },
114 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20271, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, 114 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), PDC_UDMA_133 },
115 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20275, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, 115 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), PDC_UDMA_133 },
116 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20276, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, 116 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), PDC_UDMA_133 },
117 { PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20277, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 }, 117 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), PDC_UDMA_133 },
118
118 { } /* terminate list */ 119 { } /* terminate list */
119}; 120};
120 121
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 48f43432764e..5ba9eb20a6c2 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -385,17 +385,18 @@ static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
385 return ata_pci_init_one(dev, port_info, 2); 385 return ata_pci_init_one(dev, port_info, 2);
386} 386}
387 387
388static struct pci_device_id pdc[] = { 388static const struct pci_device_id pdc[] = {
389 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0}, 389 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
390 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1}, 390 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
391 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1}, 391 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
392 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2}, 392 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
393 { PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2}, 393 { PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
394 { 0, }, 394
395 { },
395}; 396};
396 397
397static struct pci_driver pdc_pci_driver = { 398static struct pci_driver pdc_pci_driver = {
398 .name = DRV_NAME, 399 .name = DRV_NAME,
399 .id_table = pdc, 400 .id_table = pdc,
400 .probe = pdc_init_one, 401 .probe = pdc_init_one,
401 .remove = ata_pci_remove_one 402 .remove = ata_pci_remove_one
@@ -406,13 +407,11 @@ static int __init pdc_init(void)
406 return pci_register_driver(&pdc_pci_driver); 407 return pci_register_driver(&pdc_pci_driver);
407} 408}
408 409
409
410static void __exit pdc_exit(void) 410static void __exit pdc_exit(void)
411{ 411{
412 pci_unregister_driver(&pdc_pci_driver); 412 pci_unregister_driver(&pdc_pci_driver);
413} 413}
414 414
415
416MODULE_AUTHOR("Alan Cox"); 415MODULE_AUTHOR("Alan Cox");
417MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267"); 416MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
418MODULE_LICENSE("GPL"); 417MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index c20bcf43ed6d..1af83d7694d5 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -300,7 +300,8 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
300} 300}
301 301
302static const struct pci_device_id radisys_pci_tbl[] = { 302static const struct pci_device_id radisys_pci_tbl[] = {
303 { 0x1331, 0x8201, PCI_ANY_ID, PCI_ANY_ID, }, 303 { PCI_VDEVICE(RADISYS, 0x8201), },
304
304 { } /* terminate list */ 305 { } /* terminate list */
305}; 306};
306 307
@@ -321,7 +322,6 @@ static void __exit radisys_exit(void)
321 pci_unregister_driver(&radisys_pci_driver); 322 pci_unregister_driver(&radisys_pci_driver);
322} 323}
323 324
324
325module_init(radisys_init); 325module_init(radisys_init);
326module_exit(radisys_exit); 326module_exit(radisys_exit);
327 327
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index eccc6fd45032..4533b6357d99 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -170,20 +170,20 @@ fail:
170 return -ENODEV; 170 return -ENODEV;
171} 171}
172 172
173static struct pci_device_id pata_rz1000[] = { 173static const struct pci_device_id pata_rz1000[] = {
174 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), }, 174 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
175 { PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), }, 175 { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
176 { 0, }, 176
177 { },
177}; 178};
178 179
179static struct pci_driver rz1000_pci_driver = { 180static struct pci_driver rz1000_pci_driver = {
180 .name = DRV_NAME, 181 .name = DRV_NAME,
181 .id_table = pata_rz1000, 182 .id_table = pata_rz1000,
182 .probe = rz1000_init_one, 183 .probe = rz1000_init_one,
183 .remove = ata_pci_remove_one 184 .remove = ata_pci_remove_one
184}; 185};
185 186
186
187static int __init rz1000_init(void) 187static int __init rz1000_init(void)
188{ 188{
189 return pci_register_driver(&rz1000_pci_driver); 189 return pci_register_driver(&rz1000_pci_driver);
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index 107e6cd3dc0d..067d9d223e35 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -253,13 +253,14 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
253 return ata_pci_init_one(dev, port_info, 1); 253 return ata_pci_init_one(dev, port_info, 1);
254} 254}
255 255
256static struct pci_device_id sc1200[] = { 256static const struct pci_device_id sc1200[] = {
257 { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), }, 257 { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
258 { 0, }, 258
259 { },
259}; 260};
260 261
261static struct pci_driver sc1200_pci_driver = { 262static struct pci_driver sc1200_pci_driver = {
262 .name = DRV_NAME, 263 .name = DRV_NAME,
263 .id_table = sc1200, 264 .id_table = sc1200,
264 .probe = sc1200_init_one, 265 .probe = sc1200_init_one,
265 .remove = ata_pci_remove_one 266 .remove = ata_pci_remove_one
@@ -270,13 +271,11 @@ static int __init sc1200_init(void)
270 return pci_register_driver(&sc1200_pci_driver); 271 return pci_register_driver(&sc1200_pci_driver);
271} 272}
272 273
273
274static void __exit sc1200_exit(void) 274static void __exit sc1200_exit(void)
275{ 275{
276 pci_unregister_driver(&sc1200_pci_driver); 276 pci_unregister_driver(&sc1200_pci_driver);
277} 277}
278 278
279
280MODULE_AUTHOR("Alan Cox, Mark Lord"); 279MODULE_AUTHOR("Alan Cox, Mark Lord");
281MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200"); 280MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
282MODULE_LICENSE("GPL"); 281MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index a5c8d7e121d1..5bbf76ec14a4 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -553,13 +553,14 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
553 return ata_pci_init_one(pdev, port_info, ports); 553 return ata_pci_init_one(pdev, port_info, ports);
554} 554}
555 555
556static struct pci_device_id serverworks[] = { 556static const struct pci_device_id serverworks[] = {
557 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0}, 557 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
558 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2}, 558 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
559 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2}, 559 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
560 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2}, 560 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
561 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2}, 561 { PCI_VDEVICE(SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
562 { 0, }, 562
563 { },
563}; 564};
564 565
565static struct pci_driver serverworks_pci_driver = { 566static struct pci_driver serverworks_pci_driver = {
@@ -574,13 +575,11 @@ static int __init serverworks_init(void)
574 return pci_register_driver(&serverworks_pci_driver); 575 return pci_register_driver(&serverworks_pci_driver);
575} 576}
576 577
577
578static void __exit serverworks_exit(void) 578static void __exit serverworks_exit(void)
579{ 579{
580 pci_unregister_driver(&serverworks_pci_driver); 580 pci_unregister_driver(&serverworks_pci_driver);
581} 581}
582 582
583
584MODULE_AUTHOR("Alan Cox"); 583MODULE_AUTHOR("Alan Cox");
585MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6"); 584MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
586MODULE_LICENSE("GPL"); 585MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index c8b2e26db70d..4a2b72b4be8a 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -348,12 +348,13 @@ static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
348} 348}
349 349
350static const struct pci_device_id sil680[] = { 350static const struct pci_device_id sil680[] = {
351 { PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), }, 351 { PCI_VDEVICE(CMD, PCI_DEVICE_ID_SII_680), },
352 { 0, }, 352
353 { },
353}; 354};
354 355
355static struct pci_driver sil680_pci_driver = { 356static struct pci_driver sil680_pci_driver = {
356 .name = DRV_NAME, 357 .name = DRV_NAME,
357 .id_table = sil680, 358 .id_table = sil680,
358 .probe = sil680_init_one, 359 .probe = sil680_init_one,
359 .remove = ata_pci_remove_one 360 .remove = ata_pci_remove_one
@@ -364,13 +365,11 @@ static int __init sil680_init(void)
364 return pci_register_driver(&sil680_pci_driver); 365 return pci_register_driver(&sil680_pci_driver);
365} 366}
366 367
367
368static void __exit sil680_exit(void) 368static void __exit sil680_exit(void)
369{ 369{
370 pci_unregister_driver(&sil680_pci_driver); 370 pci_unregister_driver(&sil680_pci_driver);
371} 371}
372 372
373
374MODULE_AUTHOR("Alan Cox"); 373MODULE_AUTHOR("Alan Cox");
375MODULE_DESCRIPTION("low-level driver for SI680 PATA"); 374MODULE_DESCRIPTION("low-level driver for SI680 PATA");
376MODULE_LICENSE("GPL"); 375MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 17791e2785f9..b9ffafb4198c 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -988,8 +988,9 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
988} 988}
989 989
990static const struct pci_device_id sis_pci_tbl[] = { 990static const struct pci_device_id sis_pci_tbl[] = {
991 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5513), }, /* SiS 5513 */ 991 { PCI_VDEVICE(SI, 0x5513), }, /* SiS 5513 */
992 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x5518), }, /* SiS 5518 */ 992 { PCI_VDEVICE(SI, 0x5518), }, /* SiS 5518 */
993
993 { } 994 { }
994}; 995};
995 996
@@ -1010,7 +1011,6 @@ static void __exit sis_exit(void)
1010 pci_unregister_driver(&sis_pci_driver); 1011 pci_unregister_driver(&sis_pci_driver);
1011} 1012}
1012 1013
1013
1014module_init(sis_init); 1014module_init(sis_init);
1015module_exit(sis_exit); 1015module_exit(sis_exit);
1016 1016
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 5b762acc5687..08a6dc88676f 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -351,9 +351,10 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
351 return ata_pci_init_one(dev, port_info, 1); /* For now */ 351 return ata_pci_init_one(dev, port_info, 1); /* For now */
352} 352}
353 353
354static struct pci_device_id sl82c105[] = { 354static const struct pci_device_id sl82c105[] = {
355 { PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), }, 355 { PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
356 { 0, }, 356
357 { },
357}; 358};
358 359
359static struct pci_driver sl82c105_pci_driver = { 360static struct pci_driver sl82c105_pci_driver = {
@@ -368,13 +369,11 @@ static int __init sl82c105_init(void)
368 return pci_register_driver(&sl82c105_pci_driver); 369 return pci_register_driver(&sl82c105_pci_driver);
369} 370}
370 371
371
372static void __exit sl82c105_exit(void) 372static void __exit sl82c105_exit(void)
373{ 373{
374 pci_unregister_driver(&sl82c105_pci_driver); 374 pci_unregister_driver(&sl82c105_pci_driver);
375} 375}
376 376
377
378MODULE_AUTHOR("Alan Cox"); 377MODULE_AUTHOR("Alan Cox");
379MODULE_DESCRIPTION("low-level driver for Sl82c105"); 378MODULE_DESCRIPTION("low-level driver for Sl82c105");
380MODULE_LICENSE("GPL"); 379MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index a954ed93a40c..9640f80e8b0d 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -248,13 +248,13 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
248} 248}
249 249
250static const struct pci_device_id triflex[] = { 250static const struct pci_device_id triflex[] = {
251 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE, 251 { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), },
252 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 252
253 { 0, }, 253 { },
254}; 254};
255 255
256static struct pci_driver triflex_pci_driver = { 256static struct pci_driver triflex_pci_driver = {
257 .name = DRV_NAME, 257 .name = DRV_NAME,
258 .id_table = triflex, 258 .id_table = triflex,
259 .probe = triflex_init_one, 259 .probe = triflex_init_one,
260 .remove = ata_pci_remove_one 260 .remove = ata_pci_remove_one
@@ -265,13 +265,11 @@ static int __init triflex_init(void)
265 return pci_register_driver(&triflex_pci_driver); 265 return pci_register_driver(&triflex_pci_driver);
266} 266}
267 267
268
269static void __exit triflex_exit(void) 268static void __exit triflex_exit(void)
270{ 269{
271 pci_unregister_driver(&triflex_pci_driver); 270 pci_unregister_driver(&triflex_pci_driver);
272} 271}
273 272
274
275MODULE_AUTHOR("Alan Cox"); 273MODULE_AUTHOR("Alan Cox");
276MODULE_DESCRIPTION("low-level driver for Compaq Triflex"); 274MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
277MODULE_LICENSE("GPL"); 275MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 7b5dd2343b9a..1e7be9eee9c3 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -529,15 +529,16 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
529} 529}
530 530
531static const struct pci_device_id via[] = { 531static const struct pci_device_id via[] = {
532 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1), }, 532 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), },
533 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1), }, 533 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), },
534 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410), }, 534 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), },
535 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), }, 535 { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), },
536 { 0, }, 536
537 { },
537}; 538};
538 539
539static struct pci_driver via_pci_driver = { 540static struct pci_driver via_pci_driver = {
540 .name = DRV_NAME, 541 .name = DRV_NAME,
541 .id_table = via, 542 .id_table = via,
542 .probe = via_init_one, 543 .probe = via_init_one,
543 .remove = ata_pci_remove_one 544 .remove = ata_pci_remove_one
@@ -548,13 +549,11 @@ static int __init via_init(void)
548 return pci_register_driver(&via_pci_driver); 549 return pci_register_driver(&via_pci_driver);
549} 550}
550 551
551
552static void __exit via_exit(void) 552static void __exit via_exit(void)
553{ 553{
554 pci_unregister_driver(&via_pci_driver); 554 pci_unregister_driver(&via_pci_driver);
555} 555}
556 556
557
558MODULE_AUTHOR("Alan Cox"); 557MODULE_AUTHOR("Alan Cox");
559MODULE_DESCRIPTION("low-level driver for VIA PATA"); 558MODULE_DESCRIPTION("low-level driver for VIA PATA");
560MODULE_LICENSE("GPL"); 559MODULE_LICENSE("GPL");
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 0e23ecb77bc2..81f3d219e70e 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -192,8 +192,7 @@ static struct ata_port_info adma_port_info[] = {
192}; 192};
193 193
194static const struct pci_device_id adma_ata_pci_tbl[] = { 194static const struct pci_device_id adma_ata_pci_tbl[] = {
195 { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 195 { PCI_VDEVICE(PDC, 0x1841), board_1841_idx },
196 board_1841_idx },
197 196
198 { } /* terminate list */ 197 { } /* terminate list */
199}; 198};
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index c01496df4a99..e6aa1a86d5cf 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -533,19 +533,20 @@ static const struct ata_port_info mv_port_info[] = {
533}; 533};
534 534
535static const struct pci_device_id mv_pci_tbl[] = { 535static const struct pci_device_id mv_pci_tbl[] = {
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5040), 0, 0, chip_504x}, 536 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5041), 0, 0, chip_504x}, 537 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5080), 0, 0, chip_5080}, 538 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5081), 0, 0, chip_508x}, 539 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
540 540
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6040), 0, 0, chip_604x}, 541 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
542 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6041), 0, 0, chip_604x}, 542 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
543 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6042), 0, 0, chip_6042}, 543 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
544 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6080), 0, 0, chip_608x}, 544 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
545 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x6081), 0, 0, chip_608x}, 545 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
546 546
547 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x0241), 0, 0, chip_604x}, 547 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
548 {} /* terminate list */ 548
549 { } /* terminate list */
549}; 550};
550 551
551static struct pci_driver mv_pci_driver = { 552static struct pci_driver mv_pci_driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 8cd730fe5dd3..d09d20a17790 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -106,45 +106,32 @@ enum nv_host_type
106}; 106};
107 107
108static const struct pci_device_id nv_pci_tbl[] = { 108static const struct pci_device_id nv_pci_tbl[] = {
109 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, 109 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE2 }, 110 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
111 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, 111 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 }, 112 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
113 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, 113 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, NFORCE3 }, 114 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
115 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA, 115 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 116 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
117 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2, 117 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 118 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
119 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA, 119 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 120 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
121 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, 121 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, 122 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
123 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, 123 { PCI_VDEVICE(NVIDIA, 0x045c), GENERIC },
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 124 { PCI_VDEVICE(NVIDIA, 0x045d), GENERIC },
125 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, 125 { PCI_VDEVICE(NVIDIA, 0x045e), GENERIC },
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, 126 { PCI_VDEVICE(NVIDIA, 0x045f), GENERIC },
127 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
129 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
131 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
133 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
135 { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
137 { PCI_VENDOR_ID_NVIDIA, 0x045c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
138 { PCI_VENDOR_ID_NVIDIA, 0x045d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
139 { PCI_VENDOR_ID_NVIDIA, 0x045e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
140 { PCI_VENDOR_ID_NVIDIA, 0x045f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
141 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 127 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
142 PCI_ANY_ID, PCI_ANY_ID, 128 PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, 129 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
144 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, 130 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
145 PCI_ANY_ID, PCI_ANY_ID, 131 PCI_ANY_ID, PCI_ANY_ID,
146 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC }, 132 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
147 { 0, } /* terminate list */ 133
134 { } /* terminate list */
148}; 135};
149 136
150static struct pci_driver nv_pci_driver = { 137static struct pci_driver nv_pci_driver = {
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index d627812ea73d..15c9437710fc 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -234,48 +234,31 @@ static const struct ata_port_info pdc_port_info[] = {
234}; 234};
235 235
236static const struct pci_device_id pdc_ata_pci_tbl[] = { 236static const struct pci_device_id pdc_ata_pci_tbl[] = {
237 { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 237 { PCI_VDEVICE(PROMISE, 0x3371), board_2037x },
238 board_2037x }, 238 { PCI_VDEVICE(PROMISE, 0x3570), board_2037x },
239 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 239 { PCI_VDEVICE(PROMISE, 0x3571), board_2037x },
240 board_2037x }, 240 { PCI_VDEVICE(PROMISE, 0x3373), board_2037x },
241 { PCI_VENDOR_ID_PROMISE, 0x3571, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 241 { PCI_VDEVICE(PROMISE, 0x3375), board_2037x },
242 board_2037x }, 242 { PCI_VDEVICE(PROMISE, 0x3376), board_2037x },
243 { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 243 { PCI_VDEVICE(PROMISE, 0x3574), board_2057x },
244 board_2037x }, 244 { PCI_VDEVICE(PROMISE, 0x3d75), board_2057x },
245 { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 245 { PCI_VDEVICE(PROMISE, 0x3d73), board_2037x },
246 board_2037x }, 246
247 { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 247 { PCI_VDEVICE(PROMISE, 0x3318), board_20319 },
248 board_2037x }, 248 { PCI_VDEVICE(PROMISE, 0x3319), board_20319 },
249 { PCI_VENDOR_ID_PROMISE, 0x3574, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 249 { PCI_VDEVICE(PROMISE, 0x3515), board_20319 },
250 board_2057x }, 250 { PCI_VDEVICE(PROMISE, 0x3519), board_20319 },
251 { PCI_VENDOR_ID_PROMISE, 0x3d75, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 251 { PCI_VDEVICE(PROMISE, 0x3d17), board_20319 },
252 board_2057x }, 252 { PCI_VDEVICE(PROMISE, 0x3d18), board_40518 },
253 { PCI_VENDOR_ID_PROMISE, 0x3d73, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 253
254 board_2037x }, 254 { PCI_VDEVICE(PROMISE, 0x6629), board_20619 },
255
256 { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
257 board_20319 },
258 { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
259 board_20319 },
260 { PCI_VENDOR_ID_PROMISE, 0x3515, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
261 board_20319 },
262 { PCI_VENDOR_ID_PROMISE, 0x3519, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
263 board_20319 },
264 { PCI_VENDOR_ID_PROMISE, 0x3d17, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
265 board_20319 },
266 { PCI_VENDOR_ID_PROMISE, 0x3d18, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
267 board_40518 },
268
269 { PCI_VENDOR_ID_PROMISE, 0x6629, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
270 board_20619 },
271 255
272/* TODO: remove all associated board_20771 code, as it completely 256/* TODO: remove all associated board_20771 code, as it completely
273 * duplicates board_2037x code, unless reason for separation can be 257 * duplicates board_2037x code, unless reason for separation can be
274 * divined. 258 * divined.
275 */ 259 */
276#if 0 260#if 0
277 { PCI_VENDOR_ID_PROMISE, 0x3570, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 261 { PCI_VDEVICE(PROMISE, 0x3570), board_20771 },
278 board_20771 },
279#endif 262#endif
280 263
281 { } /* terminate list */ 264 { } /* terminate list */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index fa29dfe2a7b5..7f6cc3c07de5 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -185,8 +185,7 @@ static const struct ata_port_info qs_port_info[] = {
185}; 185};
186 186
187static const struct pci_device_id qs_ata_pci_tbl[] = { 187static const struct pci_device_id qs_ata_pci_tbl[] = {
188 { PCI_VENDOR_ID_PDC, 0x2068, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 188 { PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
189 board_2068_idx },
190 189
191 { } /* terminate list */ 190 { } /* terminate list */
192}; 191};
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index c63dbabc0cd9..3d9fa1cc834d 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -123,13 +123,14 @@ static void sil_thaw(struct ata_port *ap);
123 123
124 124
125static const struct pci_device_id sil_pci_tbl[] = { 125static const struct pci_device_id sil_pci_tbl[] = {
126 { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 126 { PCI_VDEVICE(CMD, 0x3112), sil_3112 },
127 { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 127 { PCI_VDEVICE(CMD, 0x0240), sil_3112 },
128 { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 }, 128 { PCI_VDEVICE(CMD, 0x3512), sil_3512 },
129 { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, 129 { PCI_VDEVICE(CMD, 0x3114), sil_3114 },
130 { 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, 130 { PCI_VDEVICE(ATI, 0x436e), sil_3112 },
131 { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, 131 { PCI_VDEVICE(ATI, 0x4379), sil_3112_no_sata_irq },
132 { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq }, 132 { PCI_VDEVICE(ATI, 0x437a), sil_3112_no_sata_irq },
133
133 { } /* terminate list */ 134 { } /* terminate list */
134}; 135};
135 136
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 39cb07baebae..a951f40c2f21 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -344,11 +344,12 @@ static int sil24_pci_device_resume(struct pci_dev *pdev);
344#endif 344#endif
345 345
346static const struct pci_device_id sil24_pci_tbl[] = { 346static const struct pci_device_id sil24_pci_tbl[] = {
347 { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 347 { PCI_VDEVICE(CMD, 0x3124), BID_SIL3124 },
348 { 0x8086, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, 348 { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 },
349 { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, 349 { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 },
350 { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 350 { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 },
351 { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, 351 { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 },
352
352 { } /* terminate list */ 353 { } /* terminate list */
353}; 354};
354 355
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 18d49fff8dc4..0738f52463a9 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -67,13 +67,13 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg);
67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 67static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
68 68
69static const struct pci_device_id sis_pci_tbl[] = { 69static const struct pci_device_id sis_pci_tbl[] = {
70 { PCI_VENDOR_ID_SI, 0x180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, 70 { PCI_VDEVICE(SI, 0x180), sis_180 },
71 { PCI_VENDOR_ID_SI, 0x181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, 71 { PCI_VDEVICE(SI, 0x181), sis_180 },
72 { PCI_VENDOR_ID_SI, 0x182, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, 72 { PCI_VDEVICE(SI, 0x182), sis_180 },
73
73 { } /* terminate list */ 74 { } /* terminate list */
74}; 75};
75 76
76
77static struct pci_driver sis_pci_driver = { 77static struct pci_driver sis_pci_driver = {
78 .name = DRV_NAME, 78 .name = DRV_NAME,
79 .id_table = sis_pci_tbl, 79 .id_table = sis_pci_tbl,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index d6d6658d8328..84025a2fd5be 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -469,15 +469,15 @@ err_out:
469 * controller 469 * controller
470 * */ 470 * */
471static const struct pci_device_id k2_sata_pci_tbl[] = { 471static const struct pci_device_id k2_sata_pci_tbl[] = {
472 { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 472 { PCI_VDEVICE(SERVERWORKS, 0x0240), 4 },
473 { 0x1166, 0x0241, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 473 { PCI_VDEVICE(SERVERWORKS, 0x0241), 4 },
474 { 0x1166, 0x0242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, 474 { PCI_VDEVICE(SERVERWORKS, 0x0242), 8 },
475 { 0x1166, 0x024a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 475 { PCI_VDEVICE(SERVERWORKS, 0x024a), 4 },
476 { 0x1166, 0x024b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, 476 { PCI_VDEVICE(SERVERWORKS, 0x024b), 4 },
477
477 { } 478 { }
478}; 479};
479 480
480
481static struct pci_driver k2_sata_pci_driver = { 481static struct pci_driver k2_sata_pci_driver = {
482 .name = DRV_NAME, 482 .name = DRV_NAME,
483 .id_table = k2_sata_pci_tbl, 483 .id_table = k2_sata_pci_tbl,
@@ -485,19 +485,16 @@ static struct pci_driver k2_sata_pci_driver = {
485 .remove = ata_pci_remove_one, 485 .remove = ata_pci_remove_one,
486}; 486};
487 487
488
489static int __init k2_sata_init(void) 488static int __init k2_sata_init(void)
490{ 489{
491 return pci_register_driver(&k2_sata_pci_driver); 490 return pci_register_driver(&k2_sata_pci_driver);
492} 491}
493 492
494
495static void __exit k2_sata_exit(void) 493static void __exit k2_sata_exit(void)
496{ 494{
497 pci_unregister_driver(&k2_sata_pci_driver); 495 pci_unregister_driver(&k2_sata_pci_driver);
498} 496}
499 497
500
501MODULE_AUTHOR("Benjamin Herrenschmidt"); 498MODULE_AUTHOR("Benjamin Herrenschmidt");
502MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); 499MODULE_DESCRIPTION("low-level driver for K2 SATA controller");
503MODULE_LICENSE("GPL"); 500MODULE_LICENSE("GPL");
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 091867e10ea3..8c74f2ff4344 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -230,12 +230,11 @@ static const struct ata_port_info pdc_port_info[] = {
230}; 230};
231 231
232static const struct pci_device_id pdc_sata_pci_tbl[] = { 232static const struct pci_device_id pdc_sata_pci_tbl[] = {
233 { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 233 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
234 board_20621 }, 234
235 { } /* terminate list */ 235 { } /* terminate list */
236}; 236};
237 237
238
239static struct pci_driver pdc_sata_pci_driver = { 238static struct pci_driver pdc_sata_pci_driver = {
240 .name = DRV_NAME, 239 .name = DRV_NAME,
241 .id_table = pdc_sata_pci_tbl, 240 .id_table = pdc_sata_pci_tbl,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index dd76f37be182..5c603ca3a50a 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -61,13 +61,13 @@ static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 61static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
62 62
63static const struct pci_device_id uli_pci_tbl[] = { 63static const struct pci_device_id uli_pci_tbl[] = {
64 { PCI_VENDOR_ID_AL, 0x5289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5289 }, 64 { PCI_VDEVICE(AL, 0x5289), uli_5289 },
65 { PCI_VENDOR_ID_AL, 0x5287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5287 }, 65 { PCI_VDEVICE(AL, 0x5287), uli_5287 },
66 { PCI_VENDOR_ID_AL, 0x5281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, uli_5281 }, 66 { PCI_VDEVICE(AL, 0x5281), uli_5281 },
67
67 { } /* terminate list */ 68 { } /* terminate list */
68}; 69};
69 70
70
71static struct pci_driver uli_pci_driver = { 71static struct pci_driver uli_pci_driver = {
72 .name = DRV_NAME, 72 .name = DRV_NAME,
73 .id_table = uli_pci_tbl, 73 .id_table = uli_pci_tbl,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index a72a2389a11c..f4455a1efe2d 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -77,9 +77,9 @@ static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
77static void vt6420_error_handler(struct ata_port *ap); 77static void vt6420_error_handler(struct ata_port *ap);
78 78
79static const struct pci_device_id svia_pci_tbl[] = { 79static const struct pci_device_id svia_pci_tbl[] = {
80 { 0x1106, 0x0591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, 80 { PCI_VDEVICE(VIA, 0x0591), vt6420 },
81 { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6420 }, 81 { PCI_VDEVICE(VIA, 0x3149), vt6420 },
82 { 0x1106, 0x3249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, vt6421 }, 82 { PCI_VDEVICE(VIA, 0x3249), vt6421 },
83 83
84 { } /* terminate list */ 84 { } /* terminate list */
85}; 85};
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index d0d92f33de54..273d88fcf980 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -442,16 +442,15 @@ err_out:
442 return rc; 442 return rc;
443} 443}
444 444
445
446static const struct pci_device_id vsc_sata_pci_tbl[] = { 445static const struct pci_device_id vsc_sata_pci_tbl[] = {
447 { PCI_VENDOR_ID_VITESSE, 0x7174, 446 { PCI_VENDOR_ID_VITESSE, 0x7174,
448 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 447 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
449 { PCI_VENDOR_ID_INTEL, 0x3200, 448 { PCI_VENDOR_ID_INTEL, 0x3200,
450 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, 449 PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
450
451 { } /* terminate list */ 451 { } /* terminate list */
452}; 452};
453 453
454
455static struct pci_driver vsc_sata_pci_driver = { 454static struct pci_driver vsc_sata_pci_driver = {
456 .name = DRV_NAME, 455 .name = DRV_NAME,
457 .id_table = vsc_sata_pci_tbl, 456 .id_table = vsc_sata_pci_tbl,
@@ -459,19 +458,16 @@ static struct pci_driver vsc_sata_pci_driver = {
459 .remove = ata_pci_remove_one, 458 .remove = ata_pci_remove_one,
460}; 459};
461 460
462
463static int __init vsc_sata_init(void) 461static int __init vsc_sata_init(void)
464{ 462{
465 return pci_register_driver(&vsc_sata_pci_driver); 463 return pci_register_driver(&vsc_sata_pci_driver);
466} 464}
467 465
468
469static void __exit vsc_sata_exit(void) 466static void __exit vsc_sata_exit(void)
470{ 467{
471 pci_unregister_driver(&vsc_sata_pci_driver); 468 pci_unregister_driver(&vsc_sata_pci_driver);
472} 469}
473 470
474
475MODULE_AUTHOR("Jeremy Higdon"); 471MODULE_AUTHOR("Jeremy Higdon");
476MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller"); 472MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
477MODULE_LICENSE("GPL"); 473MODULE_LICENSE("GPL");
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index 6cc93de0b71d..ac2c10822be0 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -113,15 +113,13 @@ static int __init adummy_init(void)
113 113
114 printk(KERN_ERR "adummy: version %s\n", DRV_VERSION); 114 printk(KERN_ERR "adummy: version %s\n", DRV_VERSION);
115 115
116 adummy_dev = (struct adummy_dev *) kmalloc(sizeof(struct adummy_dev), 116 adummy_dev = kzalloc(sizeof(struct adummy_dev),
117 GFP_KERNEL); 117 GFP_KERNEL);
118 if (!adummy_dev) { 118 if (!adummy_dev) {
119 printk(KERN_ERR DEV_LABEL ": kmalloc() failed\n"); 119 printk(KERN_ERR DEV_LABEL ": kzalloc() failed\n");
120 err = -ENOMEM; 120 err = -ENOMEM;
121 goto out; 121 goto out;
122 } 122 }
123 memset(adummy_dev, 0, sizeof(struct adummy_dev));
124
125 atm_dev = atm_dev_register(DEV_LABEL, &adummy_ops, -1, NULL); 123 atm_dev = atm_dev_register(DEV_LABEL, &adummy_ops, -1, NULL);
126 if (!atm_dev) { 124 if (!atm_dev) {
127 printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n"); 125 printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n");
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 4521a249dd56..da599e6e9d34 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -915,8 +915,8 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id,
915 915
916/********** make rate (not quite as much fun as Horizon) **********/ 916/********** make rate (not quite as much fun as Horizon) **********/
917 917
918static unsigned int make_rate (unsigned int rate, rounding r, 918static int make_rate (unsigned int rate, rounding r,
919 u16 * bits, unsigned int * actual) { 919 u16 * bits, unsigned int * actual) {
920 unsigned char exp = -1; // hush gcc 920 unsigned char exp = -1; // hush gcc
921 unsigned int man = -1; // hush gcc 921 unsigned int man = -1; // hush gcc
922 922
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 38fc054bd671..5f25e5efefcd 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1784,7 +1784,7 @@ static int __devinit fs_init (struct fs_dev *dev)
1784 write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1); 1784 write_fs (dev, RAM, (1 << (28 - FS155_VPI_BITS - FS155_VCI_BITS)) - 1);
1785 dev->nchannels = FS155_NR_CHANNELS; 1785 dev->nchannels = FS155_NR_CHANNELS;
1786 } 1786 }
1787 dev->atm_vccs = kmalloc (dev->nchannels * sizeof (struct atm_vcc *), 1787 dev->atm_vccs = kcalloc (dev->nchannels, sizeof (struct atm_vcc *),
1788 GFP_KERNEL); 1788 GFP_KERNEL);
1789 fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%Zd)\n", 1789 fs_dprintk (FS_DEBUG_ALLOC, "Alloc atmvccs: %p(%Zd)\n",
1790 dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *)); 1790 dev->atm_vccs, dev->nchannels * sizeof (struct atm_vcc *));
@@ -1794,9 +1794,8 @@ static int __devinit fs_init (struct fs_dev *dev)
1794 /* XXX Clean up..... */ 1794 /* XXX Clean up..... */
1795 return 1; 1795 return 1;
1796 } 1796 }
1797 memset (dev->atm_vccs, 0, dev->nchannels * sizeof (struct atm_vcc *));
1798 1797
1799 dev->tx_inuse = kmalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL); 1798 dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
1800 fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n", 1799 fs_dprintk (FS_DEBUG_ALLOC, "Alloc tx_inuse: %p(%d)\n",
1801 dev->atm_vccs, dev->nchannels / 8); 1800 dev->atm_vccs, dev->nchannels / 8);
1802 1801
@@ -1805,8 +1804,6 @@ static int __devinit fs_init (struct fs_dev *dev)
1805 /* XXX Clean up..... */ 1804 /* XXX Clean up..... */
1806 return 1; 1805 return 1;
1807 } 1806 }
1808 memset (dev->tx_inuse, 0, dev->nchannels / 8);
1809
1810 /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */ 1807 /* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
1811 /* -- RAS2 : FS50 only: Default is OK. */ 1808 /* -- RAS2 : FS50 only: Default is OK. */
1812 1809
@@ -1893,14 +1890,11 @@ static int __devinit firestream_init_one (struct pci_dev *pci_dev,
1893 if (pci_enable_device(pci_dev)) 1890 if (pci_enable_device(pci_dev))
1894 goto err_out; 1891 goto err_out;
1895 1892
1896 fs_dev = kmalloc (sizeof (struct fs_dev), GFP_KERNEL); 1893 fs_dev = kzalloc (sizeof (struct fs_dev), GFP_KERNEL);
1897 fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%Zd)\n", 1894 fs_dprintk (FS_DEBUG_ALLOC, "Alloc fs-dev: %p(%Zd)\n",
1898 fs_dev, sizeof (struct fs_dev)); 1895 fs_dev, sizeof (struct fs_dev));
1899 if (!fs_dev) 1896 if (!fs_dev)
1900 goto err_out; 1897 goto err_out;
1901
1902 memset (fs_dev, 0, sizeof (struct fs_dev));
1903
1904 atm_dev = atm_dev_register("fs", &ops, -1, NULL); 1898 atm_dev = atm_dev_register("fs", &ops, -1, NULL);
1905 if (!atm_dev) 1899 if (!atm_dev)
1906 goto err_out_free_fs_dev; 1900 goto err_out_free_fs_dev;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index f2511b42dba2..b22a9142b240 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -383,14 +383,12 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
383 } 383 }
384 pci_set_drvdata(pci_dev, atm_dev); 384 pci_set_drvdata(pci_dev, atm_dev);
385 385
386 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev), 386 he_dev = kzalloc(sizeof(struct he_dev),
387 GFP_KERNEL); 387 GFP_KERNEL);
388 if (!he_dev) { 388 if (!he_dev) {
389 err = -ENOMEM; 389 err = -ENOMEM;
390 goto init_one_failure; 390 goto init_one_failure;
391 } 391 }
392 memset(he_dev, 0, sizeof(struct he_dev));
393
394 he_dev->pci_dev = pci_dev; 392 he_dev->pci_dev = pci_dev;
395 he_dev->atm_dev = atm_dev; 393 he_dev->atm_dev = atm_dev;
396 he_dev->atm_dev->dev_data = he_dev; 394 he_dev->atm_dev->dev_data = he_dev;
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index d1113e845f95..209dba1c70da 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2719,7 +2719,7 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_
2719 goto out_disable; 2719 goto out_disable;
2720 } 2720 }
2721 2721
2722 dev = kmalloc(sizeof(hrz_dev), GFP_KERNEL); 2722 dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
2723 if (!dev) { 2723 if (!dev) {
2724 // perhaps we should be nice: deregister all adapters and abort? 2724 // perhaps we should be nice: deregister all adapters and abort?
2725 PRINTD(DBG_ERR, "out of memory"); 2725 PRINTD(DBG_ERR, "out of memory");
@@ -2727,8 +2727,6 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_
2727 goto out_release; 2727 goto out_release;
2728 } 2728 }
2729 2729
2730 memset(dev, 0, sizeof(hrz_dev));
2731
2732 pci_set_drvdata(pci_dev, dev); 2730 pci_set_drvdata(pci_dev, dev);
2733 2731
2734 // grab IRQ and install handler - move this someplace more sensible 2732 // grab IRQ and install handler - move this someplace more sensible
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index b0369bb20f08..7487f0ad68e9 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -642,11 +642,9 @@ alloc_scq(struct idt77252_dev *card, int class)
642{ 642{
643 struct scq_info *scq; 643 struct scq_info *scq;
644 644
645 scq = (struct scq_info *) kmalloc(sizeof(struct scq_info), GFP_KERNEL); 645 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
646 if (!scq) 646 if (!scq)
647 return NULL; 647 return NULL;
648 memset(scq, 0, sizeof(struct scq_info));
649
650 scq->base = pci_alloc_consistent(card->pcidev, SCQ_SIZE, 648 scq->base = pci_alloc_consistent(card->pcidev, SCQ_SIZE,
651 &scq->paddr); 649 &scq->paddr);
652 if (scq->base == NULL) { 650 if (scq->base == NULL) {
@@ -2142,11 +2140,9 @@ idt77252_init_est(struct vc_map *vc, int pcr)
2142{ 2140{
2143 struct rate_estimator *est; 2141 struct rate_estimator *est;
2144 2142
2145 est = kmalloc(sizeof(struct rate_estimator), GFP_KERNEL); 2143 est = kzalloc(sizeof(struct rate_estimator), GFP_KERNEL);
2146 if (!est) 2144 if (!est)
2147 return NULL; 2145 return NULL;
2148 memset(est, 0, sizeof(*est));
2149
2150 est->maxcps = pcr < 0 ? -pcr : pcr; 2146 est->maxcps = pcr < 0 ? -pcr : pcr;
2151 est->cps = est->maxcps; 2147 est->cps = est->maxcps;
2152 est->avcps = est->cps << 5; 2148 est->avcps = est->cps << 5;
@@ -2451,14 +2447,12 @@ idt77252_open(struct atm_vcc *vcc)
2451 2447
2452 index = VPCI2VC(card, vpi, vci); 2448 index = VPCI2VC(card, vpi, vci);
2453 if (!card->vcs[index]) { 2449 if (!card->vcs[index]) {
2454 card->vcs[index] = kmalloc(sizeof(struct vc_map), GFP_KERNEL); 2450 card->vcs[index] = kzalloc(sizeof(struct vc_map), GFP_KERNEL);
2455 if (!card->vcs[index]) { 2451 if (!card->vcs[index]) {
2456 printk("%s: can't alloc vc in open()\n", card->name); 2452 printk("%s: can't alloc vc in open()\n", card->name);
2457 up(&card->mutex); 2453 up(&card->mutex);
2458 return -ENOMEM; 2454 return -ENOMEM;
2459 } 2455 }
2460 memset(card->vcs[index], 0, sizeof(struct vc_map));
2461
2462 card->vcs[index]->card = card; 2456 card->vcs[index]->card = card;
2463 card->vcs[index]->index = index; 2457 card->vcs[index]->index = index;
2464 2458
@@ -2926,13 +2920,11 @@ open_card_oam(struct idt77252_dev *card)
2926 for (vci = 3; vci < 5; vci++) { 2920 for (vci = 3; vci < 5; vci++) {
2927 index = VPCI2VC(card, vpi, vci); 2921 index = VPCI2VC(card, vpi, vci);
2928 2922
2929 vc = kmalloc(sizeof(struct vc_map), GFP_KERNEL); 2923 vc = kzalloc(sizeof(struct vc_map), GFP_KERNEL);
2930 if (!vc) { 2924 if (!vc) {
2931 printk("%s: can't alloc vc\n", card->name); 2925 printk("%s: can't alloc vc\n", card->name);
2932 return -ENOMEM; 2926 return -ENOMEM;
2933 } 2927 }
2934 memset(vc, 0, sizeof(struct vc_map));
2935
2936 vc->index = index; 2928 vc->index = index;
2937 card->vcs[index] = vc; 2929 card->vcs[index] = vc;
2938 2930
@@ -2995,12 +2987,11 @@ open_card_ubr0(struct idt77252_dev *card)
2995{ 2987{
2996 struct vc_map *vc; 2988 struct vc_map *vc;
2997 2989
2998 vc = kmalloc(sizeof(struct vc_map), GFP_KERNEL); 2990 vc = kzalloc(sizeof(struct vc_map), GFP_KERNEL);
2999 if (!vc) { 2991 if (!vc) {
3000 printk("%s: can't alloc vc\n", card->name); 2992 printk("%s: can't alloc vc\n", card->name);
3001 return -ENOMEM; 2993 return -ENOMEM;
3002 } 2994 }
3003 memset(vc, 0, sizeof(struct vc_map));
3004 card->vcs[0] = vc; 2995 card->vcs[0] = vc;
3005 vc->class = SCHED_UBR0; 2996 vc->class = SCHED_UBR0;
3006 2997
@@ -3695,14 +3686,12 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
3695 goto err_out_disable_pdev; 3686 goto err_out_disable_pdev;
3696 } 3687 }
3697 3688
3698 card = kmalloc(sizeof(struct idt77252_dev), GFP_KERNEL); 3689 card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
3699 if (!card) { 3690 if (!card) {
3700 printk("idt77252-%d: can't allocate private data\n", index); 3691 printk("idt77252-%d: can't allocate private data\n", index);
3701 err = -ENOMEM; 3692 err = -ENOMEM;
3702 goto err_out_disable_pdev; 3693 goto err_out_disable_pdev;
3703 } 3694 }
3704 memset(card, 0, sizeof(struct idt77252_dev));
3705
3706 card->revision = revision; 3695 card->revision = revision;
3707 card->index = index; 3696 card->index = index;
3708 card->pcidev = pcidev; 3697 card->pcidev = pcidev;
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index fe60a59b7fc0..b9568e10965a 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1482,16 +1482,10 @@ static inline void vcc_table_deallocate(const struct lanai_dev *lanai)
1482static inline struct lanai_vcc *new_lanai_vcc(void) 1482static inline struct lanai_vcc *new_lanai_vcc(void)
1483{ 1483{
1484 struct lanai_vcc *lvcc; 1484 struct lanai_vcc *lvcc;
1485 lvcc = (struct lanai_vcc *) kmalloc(sizeof(*lvcc), GFP_KERNEL); 1485 lvcc = kzalloc(sizeof(*lvcc), GFP_KERNEL);
1486 if (likely(lvcc != NULL)) { 1486 if (likely(lvcc != NULL)) {
1487 lvcc->vbase = NULL;
1488 lvcc->rx.atmvcc = lvcc->tx.atmvcc = NULL;
1489 lvcc->nref = 0;
1490 memset(&lvcc->stats, 0, sizeof lvcc->stats);
1491 lvcc->rx.buf.start = lvcc->tx.buf.start = NULL;
1492 skb_queue_head_init(&lvcc->tx.backlog); 1487 skb_queue_head_init(&lvcc->tx.backlog);
1493#ifdef DEBUG 1488#ifdef DEBUG
1494 lvcc->tx.unqueue = NULL;
1495 lvcc->vci = -1; 1489 lvcc->vci = -1;
1496#endif 1490#endif
1497 } 1491 }
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 2c65e82f0d6b..083c5d3f2e18 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -603,9 +603,8 @@ static int start_rx(struct atm_dev *dev)
603DPRINTK("start_rx\n"); 603DPRINTK("start_rx\n");
604 zatm_dev = ZATM_DEV(dev); 604 zatm_dev = ZATM_DEV(dev);
605 size = sizeof(struct atm_vcc *)*zatm_dev->chans; 605 size = sizeof(struct atm_vcc *)*zatm_dev->chans;
606 zatm_dev->rx_map = (struct atm_vcc **) kmalloc(size,GFP_KERNEL); 606 zatm_dev->rx_map = kzalloc(size,GFP_KERNEL);
607 if (!zatm_dev->rx_map) return -ENOMEM; 607 if (!zatm_dev->rx_map) return -ENOMEM;
608 memset(zatm_dev->rx_map,0,size);
609 /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */ 608 /* set VPI/VCI split (use all VCIs and give what's left to VPIs) */
610 zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR); 609 zpokel(zatm_dev,(1 << dev->ci_range.vci_bits)-1,uPD98401_VRR);
611 /* prepare free buffer pools */ 610 /* prepare free buffer pools */
@@ -801,6 +800,7 @@ static int alloc_shaper(struct atm_dev *dev,int *pcr,int min,int max,int ubr)
801 i = m = 1; 800 i = m = 1;
802 zatm_dev->ubr_ref_cnt++; 801 zatm_dev->ubr_ref_cnt++;
803 zatm_dev->ubr = shaper; 802 zatm_dev->ubr = shaper;
803 *pcr = 0;
804 } 804 }
805 else { 805 else {
806 if (min) { 806 if (min) {
@@ -951,9 +951,8 @@ static int open_tx_first(struct atm_vcc *vcc)
951 skb_queue_head_init(&zatm_vcc->tx_queue); 951 skb_queue_head_init(&zatm_vcc->tx_queue);
952 init_waitqueue_head(&zatm_vcc->tx_wait); 952 init_waitqueue_head(&zatm_vcc->tx_wait);
953 /* initialize ring */ 953 /* initialize ring */
954 zatm_vcc->ring = kmalloc(RING_SIZE,GFP_KERNEL); 954 zatm_vcc->ring = kzalloc(RING_SIZE,GFP_KERNEL);
955 if (!zatm_vcc->ring) return -ENOMEM; 955 if (!zatm_vcc->ring) return -ENOMEM;
956 memset(zatm_vcc->ring,0,RING_SIZE);
957 loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS; 956 loop = zatm_vcc->ring+RING_ENTRIES*RING_WORDS;
958 loop[0] = uPD98401_TXPD_V; 957 loop[0] = uPD98401_TXPD_V;
959 loop[1] = loop[2] = 0; 958 loop[1] = loop[2] = 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a6b2aa67c9b2..f2904f67af47 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -62,6 +62,8 @@
62 62
63#include <asm/uaccess.h> 63#include <asm/uaccess.h>
64 64
65#define DRIVER_NAME "pktcdvd"
66
65#if PACKET_DEBUG 67#if PACKET_DEBUG
66#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args) 68#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
67#else 69#else
@@ -80,7 +82,7 @@
80 82
81static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; 83static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
82static struct proc_dir_entry *pkt_proc; 84static struct proc_dir_entry *pkt_proc;
83static int pkt_major; 85static int pktdev_major;
84static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */ 86static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
85static mempool_t *psd_pool; 87static mempool_t *psd_pool;
86 88
@@ -89,7 +91,7 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
89{ 91{
90 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); 92 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
91 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { 93 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
92 VPRINTK("pktcdvd: queue empty\n"); 94 VPRINTK(DRIVER_NAME": queue empty\n");
93 atomic_set(&pd->iosched.attention, 1); 95 atomic_set(&pd->iosched.attention, 1);
94 wake_up(&pd->wqueue); 96 wake_up(&pd->wqueue);
95 } 97 }
@@ -400,7 +402,7 @@ static void pkt_dump_sense(struct packet_command *cgc)
400 int i; 402 int i;
401 struct request_sense *sense = cgc->sense; 403 struct request_sense *sense = cgc->sense;
402 404
403 printk("pktcdvd:"); 405 printk(DRIVER_NAME":");
404 for (i = 0; i < CDROM_PACKET_SIZE; i++) 406 for (i = 0; i < CDROM_PACKET_SIZE; i++)
405 printk(" %02x", cgc->cmd[i]); 407 printk(" %02x", cgc->cmd[i]);
406 printk(" - "); 408 printk(" - ");
@@ -528,7 +530,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
528 need_write_seek = 0; 530 need_write_seek = 0;
529 if (need_write_seek && reads_queued) { 531 if (need_write_seek && reads_queued) {
530 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 532 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
531 VPRINTK("pktcdvd: write, waiting\n"); 533 VPRINTK(DRIVER_NAME": write, waiting\n");
532 break; 534 break;
533 } 535 }
534 pkt_flush_cache(pd); 536 pkt_flush_cache(pd);
@@ -537,7 +539,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
537 } else { 539 } else {
538 if (!reads_queued && writes_queued) { 540 if (!reads_queued && writes_queued) {
539 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 541 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
540 VPRINTK("pktcdvd: read, waiting\n"); 542 VPRINTK(DRIVER_NAME": read, waiting\n");
541 break; 543 break;
542 } 544 }
543 pd->iosched.writing = 1; 545 pd->iosched.writing = 1;
@@ -600,7 +602,7 @@ static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q
600 set_bit(PACKET_MERGE_SEGS, &pd->flags); 602 set_bit(PACKET_MERGE_SEGS, &pd->flags);
601 return 0; 603 return 0;
602 } else { 604 } else {
603 printk("pktcdvd: cdrom max_phys_segments too small\n"); 605 printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
604 return -EIO; 606 return -EIO;
605 } 607 }
606} 608}
@@ -1049,7 +1051,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1049 for (f = 0; f < pkt->frames; f++) 1051 for (f = 0; f < pkt->frames; f++)
1050 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset)) 1052 if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
1051 BUG(); 1053 BUG();
1052 VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt); 1054 VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
1053 1055
1054 atomic_set(&pkt->io_wait, 1); 1056 atomic_set(&pkt->io_wait, 1);
1055 pkt->w_bio->bi_rw = WRITE; 1057 pkt->w_bio->bi_rw = WRITE;
@@ -1286,7 +1288,7 @@ work_to_do:
1286 1288
1287static void pkt_print_settings(struct pktcdvd_device *pd) 1289static void pkt_print_settings(struct pktcdvd_device *pd)
1288{ 1290{
1289 printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable"); 1291 printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1290 printk("%u blocks, ", pd->settings.size >> 2); 1292 printk("%u blocks, ", pd->settings.size >> 2);
1291 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2'); 1293 printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1292} 1294}
@@ -1471,7 +1473,7 @@ static int pkt_set_write_settings(struct pktcdvd_device *pd)
1471 /* 1473 /*
1472 * paranoia 1474 * paranoia
1473 */ 1475 */
1474 printk("pktcdvd: write mode wrong %d\n", wp->data_block_type); 1476 printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
1475 return 1; 1477 return 1;
1476 } 1478 }
1477 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); 1479 wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
@@ -1515,7 +1517,7 @@ static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
1515 if (ti->rt == 1 && ti->blank == 0) 1517 if (ti->rt == 1 && ti->blank == 0)
1516 return 1; 1518 return 1;
1517 1519
1518 printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); 1520 printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1519 return 0; 1521 return 0;
1520} 1522}
1521 1523
@@ -1533,7 +1535,7 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1533 case 0x12: /* DVD-RAM */ 1535 case 0x12: /* DVD-RAM */
1534 return 1; 1536 return 1;
1535 default: 1537 default:
1536 VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); 1538 VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
1537 return 0; 1539 return 0;
1538 } 1540 }
1539 1541
@@ -1542,22 +1544,22 @@ static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
1542 * but i'm not sure, should we leave this to user apps? probably. 1544 * but i'm not sure, should we leave this to user apps? probably.
1543 */ 1545 */
1544 if (di->disc_type == 0xff) { 1546 if (di->disc_type == 0xff) {
1545 printk("pktcdvd: Unknown disc. No track?\n"); 1547 printk(DRIVER_NAME": Unknown disc. No track?\n");
1546 return 0; 1548 return 0;
1547 } 1549 }
1548 1550
1549 if (di->disc_type != 0x20 && di->disc_type != 0) { 1551 if (di->disc_type != 0x20 && di->disc_type != 0) {
1550 printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type); 1552 printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
1551 return 0; 1553 return 0;
1552 } 1554 }
1553 1555
1554 if (di->erasable == 0) { 1556 if (di->erasable == 0) {
1555 printk("pktcdvd: Disc not erasable\n"); 1557 printk(DRIVER_NAME": Disc not erasable\n");
1556 return 0; 1558 return 0;
1557 } 1559 }
1558 1560
1559 if (di->border_status == PACKET_SESSION_RESERVED) { 1561 if (di->border_status == PACKET_SESSION_RESERVED) {
1560 printk("pktcdvd: Can't write to last track (reserved)\n"); 1562 printk(DRIVER_NAME": Can't write to last track (reserved)\n");
1561 return 0; 1563 return 0;
1562 } 1564 }
1563 1565
@@ -1593,12 +1595,12 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1593 1595
1594 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ 1596 track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1595 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { 1597 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1596 printk("pktcdvd: failed get_track\n"); 1598 printk(DRIVER_NAME": failed get_track\n");
1597 return ret; 1599 return ret;
1598 } 1600 }
1599 1601
1600 if (!pkt_writable_track(pd, &ti)) { 1602 if (!pkt_writable_track(pd, &ti)) {
1601 printk("pktcdvd: can't write to this track\n"); 1603 printk(DRIVER_NAME": can't write to this track\n");
1602 return -EROFS; 1604 return -EROFS;
1603 } 1605 }
1604 1606
@@ -1608,11 +1610,11 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1608 */ 1610 */
1609 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; 1611 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1610 if (pd->settings.size == 0) { 1612 if (pd->settings.size == 0) {
1611 printk("pktcdvd: detected zero packet size!\n"); 1613 printk(DRIVER_NAME": detected zero packet size!\n");
1612 return -ENXIO; 1614 return -ENXIO;
1613 } 1615 }
1614 if (pd->settings.size > PACKET_MAX_SECTORS) { 1616 if (pd->settings.size > PACKET_MAX_SECTORS) {
1615 printk("pktcdvd: packet size is too big\n"); 1617 printk(DRIVER_NAME": packet size is too big\n");
1616 return -EROFS; 1618 return -EROFS;
1617 } 1619 }
1618 pd->settings.fp = ti.fp; 1620 pd->settings.fp = ti.fp;
@@ -1654,7 +1656,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
1654 pd->settings.block_mode = PACKET_BLOCK_MODE2; 1656 pd->settings.block_mode = PACKET_BLOCK_MODE2;
1655 break; 1657 break;
1656 default: 1658 default:
1657 printk("pktcdvd: unknown data mode\n"); 1659 printk(DRIVER_NAME": unknown data mode\n");
1658 return -EROFS; 1660 return -EROFS;
1659 } 1661 }
1660 return 0; 1662 return 0;
@@ -1688,10 +1690,10 @@ static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1688 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff)); 1690 cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1689 ret = pkt_mode_select(pd, &cgc); 1691 ret = pkt_mode_select(pd, &cgc);
1690 if (ret) { 1692 if (ret) {
1691 printk("pktcdvd: write caching control failed\n"); 1693 printk(DRIVER_NAME": write caching control failed\n");
1692 pkt_dump_sense(&cgc); 1694 pkt_dump_sense(&cgc);
1693 } else if (!ret && set) 1695 } else if (!ret && set)
1694 printk("pktcdvd: enabled write caching on %s\n", pd->name); 1696 printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
1695 return ret; 1697 return ret;
1696} 1698}
1697 1699
@@ -1805,11 +1807,11 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1805 } 1807 }
1806 1808
1807 if (!buf[6] & 0x40) { 1809 if (!buf[6] & 0x40) {
1808 printk("pktcdvd: Disc type is not CD-RW\n"); 1810 printk(DRIVER_NAME": Disc type is not CD-RW\n");
1809 return 1; 1811 return 1;
1810 } 1812 }
1811 if (!buf[6] & 0x4) { 1813 if (!buf[6] & 0x4) {
1812 printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n"); 1814 printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
1813 return 1; 1815 return 1;
1814 } 1816 }
1815 1817
@@ -1829,14 +1831,14 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1829 *speed = us_clv_to_speed[sp]; 1831 *speed = us_clv_to_speed[sp];
1830 break; 1832 break;
1831 default: 1833 default:
1832 printk("pktcdvd: Unknown disc sub-type %d\n",st); 1834 printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
1833 return 1; 1835 return 1;
1834 } 1836 }
1835 if (*speed) { 1837 if (*speed) {
1836 printk("pktcdvd: Max. media speed: %d\n",*speed); 1838 printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
1837 return 0; 1839 return 0;
1838 } else { 1840 } else {
1839 printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st); 1841 printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
1840 return 1; 1842 return 1;
1841 } 1843 }
1842} 1844}
@@ -1847,7 +1849,7 @@ static int pkt_perform_opc(struct pktcdvd_device *pd)
1847 struct request_sense sense; 1849 struct request_sense sense;
1848 int ret; 1850 int ret;
1849 1851
1850 VPRINTK("pktcdvd: Performing OPC\n"); 1852 VPRINTK(DRIVER_NAME": Performing OPC\n");
1851 1853
1852 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 1854 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1853 cgc.sense = &sense; 1855 cgc.sense = &sense;
@@ -1865,12 +1867,12 @@ static int pkt_open_write(struct pktcdvd_device *pd)
1865 unsigned int write_speed, media_write_speed, read_speed; 1867 unsigned int write_speed, media_write_speed, read_speed;
1866 1868
1867 if ((ret = pkt_probe_settings(pd))) { 1869 if ((ret = pkt_probe_settings(pd))) {
1868 VPRINTK("pktcdvd: %s failed probe\n", pd->name); 1870 VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
1869 return ret; 1871 return ret;
1870 } 1872 }
1871 1873
1872 if ((ret = pkt_set_write_settings(pd))) { 1874 if ((ret = pkt_set_write_settings(pd))) {
1873 DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name); 1875 DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
1874 return -EIO; 1876 return -EIO;
1875 } 1877 }
1876 1878
@@ -1882,26 +1884,26 @@ static int pkt_open_write(struct pktcdvd_device *pd)
1882 case 0x13: /* DVD-RW */ 1884 case 0x13: /* DVD-RW */
1883 case 0x1a: /* DVD+RW */ 1885 case 0x1a: /* DVD+RW */
1884 case 0x12: /* DVD-RAM */ 1886 case 0x12: /* DVD-RAM */
1885 DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed); 1887 DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
1886 break; 1888 break;
1887 default: 1889 default:
1888 if ((ret = pkt_media_speed(pd, &media_write_speed))) 1890 if ((ret = pkt_media_speed(pd, &media_write_speed)))
1889 media_write_speed = 16; 1891 media_write_speed = 16;
1890 write_speed = min(write_speed, media_write_speed * 177); 1892 write_speed = min(write_speed, media_write_speed * 177);
1891 DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176); 1893 DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
1892 break; 1894 break;
1893 } 1895 }
1894 read_speed = write_speed; 1896 read_speed = write_speed;
1895 1897
1896 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { 1898 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
1897 DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name); 1899 DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
1898 return -EIO; 1900 return -EIO;
1899 } 1901 }
1900 pd->write_speed = write_speed; 1902 pd->write_speed = write_speed;
1901 pd->read_speed = read_speed; 1903 pd->read_speed = read_speed;
1902 1904
1903 if ((ret = pkt_perform_opc(pd))) { 1905 if ((ret = pkt_perform_opc(pd))) {
1904 DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name); 1906 DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
1905 } 1907 }
1906 1908
1907 return 0; 1909 return 0;
@@ -1929,7 +1931,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1929 goto out_putdev; 1931 goto out_putdev;
1930 1932
1931 if ((ret = pkt_get_last_written(pd, &lba))) { 1933 if ((ret = pkt_get_last_written(pd, &lba))) {
1932 printk("pktcdvd: pkt_get_last_written failed\n"); 1934 printk(DRIVER_NAME": pkt_get_last_written failed\n");
1933 goto out_unclaim; 1935 goto out_unclaim;
1934 } 1936 }
1935 1937
@@ -1959,11 +1961,11 @@ static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1959 1961
1960 if (write) { 1962 if (write) {
1961 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { 1963 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
1962 printk("pktcdvd: not enough memory for buffers\n"); 1964 printk(DRIVER_NAME": not enough memory for buffers\n");
1963 ret = -ENOMEM; 1965 ret = -ENOMEM;
1964 goto out_unclaim; 1966 goto out_unclaim;
1965 } 1967 }
1966 printk("pktcdvd: %lukB available on disc\n", lba << 1); 1968 printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
1967 } 1969 }
1968 1970
1969 return 0; 1971 return 0;
@@ -1983,7 +1985,7 @@ out:
1983static void pkt_release_dev(struct pktcdvd_device *pd, int flush) 1985static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1984{ 1986{
1985 if (flush && pkt_flush_cache(pd)) 1987 if (flush && pkt_flush_cache(pd))
1986 DPRINTK("pktcdvd: %s not flushing cache\n", pd->name); 1988 DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
1987 1989
1988 pkt_lock_door(pd, 0); 1990 pkt_lock_door(pd, 0);
1989 1991
@@ -2006,7 +2008,7 @@ static int pkt_open(struct inode *inode, struct file *file)
2006 struct pktcdvd_device *pd = NULL; 2008 struct pktcdvd_device *pd = NULL;
2007 int ret; 2009 int ret;
2008 2010
2009 VPRINTK("pktcdvd: entering open\n"); 2011 VPRINTK(DRIVER_NAME": entering open\n");
2010 2012
2011 mutex_lock(&ctl_mutex); 2013 mutex_lock(&ctl_mutex);
2012 pd = pkt_find_dev_from_minor(iminor(inode)); 2014 pd = pkt_find_dev_from_minor(iminor(inode));
@@ -2040,7 +2042,7 @@ static int pkt_open(struct inode *inode, struct file *file)
2040out_dec: 2042out_dec:
2041 pd->refcnt--; 2043 pd->refcnt--;
2042out: 2044out:
2043 VPRINTK("pktcdvd: failed open (%d)\n", ret); 2045 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
2044 mutex_unlock(&ctl_mutex); 2046 mutex_unlock(&ctl_mutex);
2045 return ret; 2047 return ret;
2046} 2048}
@@ -2088,7 +2090,7 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
2088 2090
2089 pd = q->queuedata; 2091 pd = q->queuedata;
2090 if (!pd) { 2092 if (!pd) {
2091 printk("pktcdvd: %s incorrect request queue\n", bdevname(bio->bi_bdev, b)); 2093 printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
2092 goto end_io; 2094 goto end_io;
2093 } 2095 }
2094 2096
@@ -2110,13 +2112,13 @@ static int pkt_make_request(request_queue_t *q, struct bio *bio)
2110 } 2112 }
2111 2113
2112 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2114 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2113 printk("pktcdvd: WRITE for ro device %s (%llu)\n", 2115 printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
2114 pd->name, (unsigned long long)bio->bi_sector); 2116 pd->name, (unsigned long long)bio->bi_sector);
2115 goto end_io; 2117 goto end_io;
2116 } 2118 }
2117 2119
2118 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { 2120 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2119 printk("pktcdvd: wrong bio size\n"); 2121 printk(DRIVER_NAME": wrong bio size\n");
2120 goto end_io; 2122 goto end_io;
2121 } 2123 }
2122 2124
@@ -2319,7 +2321,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2319 struct block_device *bdev; 2321 struct block_device *bdev;
2320 2322
2321 if (pd->pkt_dev == dev) { 2323 if (pd->pkt_dev == dev) {
2322 printk("pktcdvd: Recursive setup not allowed\n"); 2324 printk(DRIVER_NAME": Recursive setup not allowed\n");
2323 return -EBUSY; 2325 return -EBUSY;
2324 } 2326 }
2325 for (i = 0; i < MAX_WRITERS; i++) { 2327 for (i = 0; i < MAX_WRITERS; i++) {
@@ -2327,11 +2329,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2327 if (!pd2) 2329 if (!pd2)
2328 continue; 2330 continue;
2329 if (pd2->bdev->bd_dev == dev) { 2331 if (pd2->bdev->bd_dev == dev) {
2330 printk("pktcdvd: %s already setup\n", bdevname(pd2->bdev, b)); 2332 printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
2331 return -EBUSY; 2333 return -EBUSY;
2332 } 2334 }
2333 if (pd2->pkt_dev == dev) { 2335 if (pd2->pkt_dev == dev) {
2334 printk("pktcdvd: Can't chain pktcdvd devices\n"); 2336 printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
2335 return -EBUSY; 2337 return -EBUSY;
2336 } 2338 }
2337 } 2339 }
@@ -2354,7 +2356,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2354 atomic_set(&pd->cdrw.pending_bios, 0); 2356 atomic_set(&pd->cdrw.pending_bios, 0);
2355 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); 2357 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
2356 if (IS_ERR(pd->cdrw.thread)) { 2358 if (IS_ERR(pd->cdrw.thread)) {
2357 printk("pktcdvd: can't start kernel thread\n"); 2359 printk(DRIVER_NAME": can't start kernel thread\n");
2358 ret = -ENOMEM; 2360 ret = -ENOMEM;
2359 goto out_mem; 2361 goto out_mem;
2360 } 2362 }
@@ -2364,7 +2366,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
2364 proc->data = pd; 2366 proc->data = pd;
2365 proc->proc_fops = &pkt_proc_fops; 2367 proc->proc_fops = &pkt_proc_fops;
2366 } 2368 }
2367 DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); 2369 DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
2368 return 0; 2370 return 0;
2369 2371
2370out_mem: 2372out_mem:
@@ -2401,7 +2403,7 @@ static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, u
2401 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg); 2403 return blkdev_ioctl(pd->bdev->bd_inode, file, cmd, arg);
2402 2404
2403 default: 2405 default:
2404 VPRINTK("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd); 2406 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
2405 return -ENOTTY; 2407 return -ENOTTY;
2406 } 2408 }
2407 2409
@@ -2446,7 +2448,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2446 if (!pkt_devs[idx]) 2448 if (!pkt_devs[idx])
2447 break; 2449 break;
2448 if (idx == MAX_WRITERS) { 2450 if (idx == MAX_WRITERS) {
2449 printk("pktcdvd: max %d writers supported\n", MAX_WRITERS); 2451 printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
2450 return -EBUSY; 2452 return -EBUSY;
2451 } 2453 }
2452 2454
@@ -2470,15 +2472,15 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2470 2472
2471 spin_lock_init(&pd->lock); 2473 spin_lock_init(&pd->lock);
2472 spin_lock_init(&pd->iosched.lock); 2474 spin_lock_init(&pd->iosched.lock);
2473 sprintf(pd->name, "pktcdvd%d", idx); 2475 sprintf(pd->name, DRIVER_NAME"%d", idx);
2474 init_waitqueue_head(&pd->wqueue); 2476 init_waitqueue_head(&pd->wqueue);
2475 pd->bio_queue = RB_ROOT; 2477 pd->bio_queue = RB_ROOT;
2476 2478
2477 disk->major = pkt_major; 2479 disk->major = pktdev_major;
2478 disk->first_minor = idx; 2480 disk->first_minor = idx;
2479 disk->fops = &pktcdvd_ops; 2481 disk->fops = &pktcdvd_ops;
2480 disk->flags = GENHD_FL_REMOVABLE; 2482 disk->flags = GENHD_FL_REMOVABLE;
2481 sprintf(disk->disk_name, "pktcdvd%d", idx); 2483 sprintf(disk->disk_name, DRIVER_NAME"%d", idx);
2482 disk->private_data = pd; 2484 disk->private_data = pd;
2483 disk->queue = blk_alloc_queue(GFP_KERNEL); 2485 disk->queue = blk_alloc_queue(GFP_KERNEL);
2484 if (!disk->queue) 2486 if (!disk->queue)
@@ -2520,7 +2522,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2520 break; 2522 break;
2521 } 2523 }
2522 if (idx == MAX_WRITERS) { 2524 if (idx == MAX_WRITERS) {
2523 DPRINTK("pktcdvd: dev not setup\n"); 2525 DPRINTK(DRIVER_NAME": dev not setup\n");
2524 return -ENXIO; 2526 return -ENXIO;
2525 } 2527 }
2526 2528
@@ -2533,7 +2535,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
2533 blkdev_put(pd->bdev); 2535 blkdev_put(pd->bdev);
2534 2536
2535 remove_proc_entry(pd->name, pkt_proc); 2537 remove_proc_entry(pd->name, pkt_proc);
2536 DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); 2538 DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
2537 2539
2538 del_gendisk(pd->disk); 2540 del_gendisk(pd->disk);
2539 blk_cleanup_queue(pd->disk->queue); 2541 blk_cleanup_queue(pd->disk->queue);
@@ -2610,7 +2612,7 @@ static struct file_operations pkt_ctl_fops = {
2610 2612
2611static struct miscdevice pkt_misc = { 2613static struct miscdevice pkt_misc = {
2612 .minor = MISC_DYNAMIC_MINOR, 2614 .minor = MISC_DYNAMIC_MINOR,
2613 .name = "pktcdvd", 2615 .name = DRIVER_NAME,
2614 .fops = &pkt_ctl_fops 2616 .fops = &pkt_ctl_fops
2615}; 2617};
2616 2618
@@ -2623,28 +2625,28 @@ static int __init pkt_init(void)
2623 if (!psd_pool) 2625 if (!psd_pool)
2624 return -ENOMEM; 2626 return -ENOMEM;
2625 2627
2626 ret = register_blkdev(pkt_major, "pktcdvd"); 2628 ret = register_blkdev(pktdev_major, DRIVER_NAME);
2627 if (ret < 0) { 2629 if (ret < 0) {
2628 printk("pktcdvd: Unable to register block device\n"); 2630 printk(DRIVER_NAME": Unable to register block device\n");
2629 goto out2; 2631 goto out2;
2630 } 2632 }
2631 if (!pkt_major) 2633 if (!pktdev_major)
2632 pkt_major = ret; 2634 pktdev_major = ret;
2633 2635
2634 ret = misc_register(&pkt_misc); 2636 ret = misc_register(&pkt_misc);
2635 if (ret) { 2637 if (ret) {
2636 printk("pktcdvd: Unable to register misc device\n"); 2638 printk(DRIVER_NAME": Unable to register misc device\n");
2637 goto out; 2639 goto out;
2638 } 2640 }
2639 2641
2640 mutex_init(&ctl_mutex); 2642 mutex_init(&ctl_mutex);
2641 2643
2642 pkt_proc = proc_mkdir("pktcdvd", proc_root_driver); 2644 pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver);
2643 2645
2644 return 0; 2646 return 0;
2645 2647
2646out: 2648out:
2647 unregister_blkdev(pkt_major, "pktcdvd"); 2649 unregister_blkdev(pktdev_major, DRIVER_NAME);
2648out2: 2650out2:
2649 mempool_destroy(psd_pool); 2651 mempool_destroy(psd_pool);
2650 return ret; 2652 return ret;
@@ -2652,9 +2654,9 @@ out2:
2652 2654
2653static void __exit pkt_exit(void) 2655static void __exit pkt_exit(void)
2654{ 2656{
2655 remove_proc_entry("pktcdvd", proc_root_driver); 2657 remove_proc_entry(DRIVER_NAME, proc_root_driver);
2656 misc_deregister(&pkt_misc); 2658 misc_deregister(&pkt_misc);
2657 unregister_blkdev(pkt_major, "pktcdvd"); 2659 unregister_blkdev(pktdev_major, DRIVER_NAME);
2658 mempool_destroy(psd_pool); 2660 mempool_destroy(psd_pool);
2659} 2661}
2660 2662
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index f2305ee792a1..fdc8f892eb86 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -636,7 +636,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
636 intr = in_8(&sw->intr); 636 intr = in_8(&sw->intr);
637 err = (intr & ERROR_INTR)? in_8(&sw->error): 0; 637 err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
638 if ((intr & ERROR_INTR) && fs->state != do_transfer) 638 if ((intr & ERROR_INTR) && fs->state != do_transfer)
639 printk(KERN_ERR "swim3_interrupt, state=%d, dir=%lx, intr=%x, err=%x\n", 639 printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
640 fs->state, rq_data_dir(fd_req), intr, err); 640 fs->state, rq_data_dir(fd_req), intr, err);
641 switch (fs->state) { 641 switch (fs->state) {
642 case locating: 642 case locating:
@@ -742,7 +742,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
742 if ((stat & ACTIVE) == 0 || resid != 0) { 742 if ((stat & ACTIVE) == 0 || resid != 0) {
743 /* musta been an error */ 743 /* musta been an error */
744 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 744 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
745 printk(KERN_ERR " state=%d, dir=%lx, intr=%x, err=%x\n", 745 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
746 fs->state, rq_data_dir(fd_req), intr, err); 746 fs->state, rq_data_dir(fd_req), intr, err);
747 end_request(fd_req, 0); 747 end_request(fd_req, 0);
748 fs->state = idle; 748 fs->state = idle;
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 22f8cf218cc6..c603bf291580 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -1,6 +1,6 @@
1config AGP 1config AGP
2 tristate "/dev/agpgart (AGP Support)" 2 tristate "/dev/agpgart (AGP Support)"
3 depends on ALPHA || IA64 || PPC || X86 3 depends on ALPHA || IA64 || PARISC || PPC || X86
4 depends on PCI 4 depends on PCI
5 ---help--- 5 ---help---
6 AGP (Accelerated Graphics Port) is a bus system mainly used to 6 AGP (Accelerated Graphics Port) is a bus system mainly used to
@@ -122,6 +122,14 @@ config AGP_HP_ZX1
122 This option gives you AGP GART support for the HP ZX1 chipset 122 This option gives you AGP GART support for the HP ZX1 chipset
123 for IA64 processors. 123 for IA64 processors.
124 124
125config AGP_PARISC
126 tristate "HP Quicksilver AGP support"
127 depends on AGP && PARISC && 64BIT
128 help
129 This option gives you AGP GART support for the HP Quicksilver
130 AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
131 workstation...)
132
125config AGP_ALPHA_CORE 133config AGP_ALPHA_CORE
126 tristate "Alpha AGP support" 134 tristate "Alpha AGP support"
127 depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL) 135 depends on AGP && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL)
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index d33a22f2fa0b..3e581603d0a8 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
8obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o 8obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
9obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o 9obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
10obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o 10obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
11obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
11obj-$(CONFIG_AGP_I460) += i460-agp.o 12obj-$(CONFIG_AGP_I460) += i460-agp.o
12obj-$(CONFIG_AGP_INTEL) += intel-agp.o 13obj-$(CONFIG_AGP_INTEL) += intel-agp.o
13obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o 14obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
new file mode 100644
index 000000000000..17c50b0f83f0
--- /dev/null
+++ b/drivers/char/agp/parisc-agp.c
@@ -0,0 +1,416 @@
1/*
2 * HP Quicksilver AGP GART routines
3 *
4 * Copyright (c) 2006, Kyle McMartin <kyle@parisc-linux.org>
5 *
6 * Based on drivers/char/agpgart/hp-agp.c which is
7 * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/init.h>
19#include <linux/klist.h>
20#include <linux/agp_backend.h>
21
22#include <asm-parisc/parisc-device.h>
23#include <asm-parisc/ropes.h>
24
25#include "agp.h"
26
27#define DRVNAME "quicksilver"
28#define DRVPFX DRVNAME ": "
29
30#ifndef log2
31#define log2(x) ffz(~(x))
32#endif
33
34#define AGP8X_MODE_BIT 3
35#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
36
37static struct _parisc_agp_info {
38 void __iomem *ioc_regs;
39 void __iomem *lba_regs;
40
41 int lba_cap_offset;
42
43 u64 *gatt;
44 u64 gatt_entries;
45
46 u64 gart_base;
47 u64 gart_size;
48
49 int io_page_size;
50 int io_pages_per_kpage;
51} parisc_agp_info;
52
53static struct gatt_mask parisc_agp_masks[] =
54{
55 {
56 .mask = SBA_PDIR_VALID_BIT,
57 .type = 0
58 }
59};
60
61static struct aper_size_info_fixed parisc_agp_sizes[] =
62{
63 {0, 0, 0}, /* filled in by parisc_agp_fetch_size() */
64};
65
66static int
67parisc_agp_fetch_size(void)
68{
69 int size;
70
71 size = parisc_agp_info.gart_size / MB(1);
72 parisc_agp_sizes[0].size = size;
73 agp_bridge->current_size = (void *) &parisc_agp_sizes[0];
74
75 return size;
76}
77
78static int
79parisc_agp_configure(void)
80{
81 struct _parisc_agp_info *info = &parisc_agp_info;
82
83 agp_bridge->gart_bus_addr = info->gart_base;
84 agp_bridge->capndx = info->lba_cap_offset;
85 agp_bridge->mode = readl(info->lba_regs+info->lba_cap_offset+PCI_AGP_STATUS);
86
87 return 0;
88}
89
90static void
91parisc_agp_tlbflush(struct agp_memory *mem)
92{
93 struct _parisc_agp_info *info = &parisc_agp_info;
94
95 writeq(info->gart_base | log2(info->gart_size), info->ioc_regs+IOC_PCOM);
96 readq(info->ioc_regs+IOC_PCOM); /* flush */
97}
98
99static int
100parisc_agp_create_gatt_table(struct agp_bridge_data *bridge)
101{
102 struct _parisc_agp_info *info = &parisc_agp_info;
103 int i;
104
105 for (i = 0; i < info->gatt_entries; i++) {
106 info->gatt[i] = (unsigned long)agp_bridge->scratch_page;
107 }
108
109 return 0;
110}
111
112static int
113parisc_agp_free_gatt_table(struct agp_bridge_data *bridge)
114{
115 struct _parisc_agp_info *info = &parisc_agp_info;
116
117 info->gatt[0] = SBA_AGPGART_COOKIE;
118
119 return 0;
120}
121
122static int
123parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
124{
125 struct _parisc_agp_info *info = &parisc_agp_info;
126 int i, k;
127 off_t j, io_pg_start;
128 int io_pg_count;
129
130 if (type != 0 || mem->type != 0) {
131 return -EINVAL;
132 }
133
134 io_pg_start = info->io_pages_per_kpage * pg_start;
135 io_pg_count = info->io_pages_per_kpage * mem->page_count;
136 if ((io_pg_start + io_pg_count) > info->gatt_entries) {
137 return -EINVAL;
138 }
139
140 j = io_pg_start;
141 while (j < (io_pg_start + io_pg_count)) {
142 if (info->gatt[j])
143 return -EBUSY;
144 j++;
145 }
146
147 if (mem->is_flushed == FALSE) {
148 global_cache_flush();
149 mem->is_flushed = TRUE;
150 }
151
152 for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
153 unsigned long paddr;
154
155 paddr = mem->memory[i];
156 for (k = 0;
157 k < info->io_pages_per_kpage;
158 k++, j++, paddr += info->io_page_size) {
159 info->gatt[j] =
160 agp_bridge->driver->mask_memory(agp_bridge,
161 paddr, type);
162 }
163 }
164
165 agp_bridge->driver->tlb_flush(mem);
166
167 return 0;
168}
169
170static int
171parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
172{
173 struct _parisc_agp_info *info = &parisc_agp_info;
174 int i, io_pg_start, io_pg_count;
175
176 if (type != 0 || mem->type != 0) {
177 return -EINVAL;
178 }
179
180 io_pg_start = info->io_pages_per_kpage * pg_start;
181 io_pg_count = info->io_pages_per_kpage * mem->page_count;
182 for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
183 info->gatt[i] = agp_bridge->scratch_page;
184 }
185
186 agp_bridge->driver->tlb_flush(mem);
187 return 0;
188}
189
190static unsigned long
191parisc_agp_mask_memory(struct agp_bridge_data *bridge,
192 unsigned long addr, int type)
193{
194 return SBA_PDIR_VALID_BIT | addr;
195}
196
197static void
198parisc_agp_enable(struct agp_bridge_data *bridge, u32 mode)
199{
200 struct _parisc_agp_info *info = &parisc_agp_info;
201 u32 command;
202
203 command = readl(info->lba_regs + info->lba_cap_offset + PCI_AGP_STATUS);
204
205 command = agp_collect_device_status(bridge, mode, command);
206 command |= 0x00000100;
207
208 writel(command, info->lba_regs + info->lba_cap_offset + PCI_AGP_COMMAND);
209
210 agp_device_command(command, (mode & AGP8X_MODE) != 0);
211}
212
213struct agp_bridge_driver parisc_agp_driver = {
214 .owner = THIS_MODULE,
215 .size_type = FIXED_APER_SIZE,
216 .configure = parisc_agp_configure,
217 .fetch_size = parisc_agp_fetch_size,
218 .tlb_flush = parisc_agp_tlbflush,
219 .mask_memory = parisc_agp_mask_memory,
220 .masks = parisc_agp_masks,
221 .agp_enable = parisc_agp_enable,
222 .cache_flush = global_cache_flush,
223 .create_gatt_table = parisc_agp_create_gatt_table,
224 .free_gatt_table = parisc_agp_free_gatt_table,
225 .insert_memory = parisc_agp_insert_memory,
226 .remove_memory = parisc_agp_remove_memory,
227 .alloc_by_type = agp_generic_alloc_by_type,
228 .free_by_type = agp_generic_free_by_type,
229 .agp_alloc_page = agp_generic_alloc_page,
230 .agp_destroy_page = agp_generic_destroy_page,
231 .cant_use_aperture = 1,
232};
233
234static int __init
235agp_ioc_init(void __iomem *ioc_regs)
236{
237 struct _parisc_agp_info *info = &parisc_agp_info;
238 u64 *iova_base, *io_pdir, io_tlb_ps;
239 int io_tlb_shift;
240
241 printk(KERN_INFO DRVPFX "IO PDIR shared with sba_iommu\n");
242
243 info->ioc_regs = ioc_regs;
244
245 io_tlb_ps = readq(info->ioc_regs+IOC_TCNFG);
246 switch (io_tlb_ps) {
247 case 0: io_tlb_shift = 12; break;
248 case 1: io_tlb_shift = 13; break;
249 case 2: io_tlb_shift = 14; break;
250 case 3: io_tlb_shift = 16; break;
251 default:
252 printk(KERN_ERR DRVPFX "Invalid IOTLB page size "
253 "configuration 0x%llx\n", io_tlb_ps);
254 info->gatt = NULL;
255 info->gatt_entries = 0;
256 return -ENODEV;
257 }
258 info->io_page_size = 1 << io_tlb_shift;
259 info->io_pages_per_kpage = PAGE_SIZE / info->io_page_size;
260
261 iova_base = readq(info->ioc_regs+IOC_IBASE) & ~0x1;
262 info->gart_base = iova_base + PLUTO_IOVA_SIZE - PLUTO_GART_SIZE;
263
264 info->gart_size = PLUTO_GART_SIZE;
265 info->gatt_entries = info->gart_size / info->io_page_size;
266
267 io_pdir = phys_to_virt(readq(info->ioc_regs+IOC_PDIR_BASE));
268 info->gatt = &io_pdir[(PLUTO_IOVA_SIZE/2) >> PAGE_SHIFT];
269
270 if (info->gatt[0] != SBA_AGPGART_COOKIE) {
271 info->gatt = NULL;
272 info->gatt_entries = 0;
273 printk(KERN_ERR DRVPFX "No reserved IO PDIR entry found; "
274 "GART disabled\n");
275 return -ENODEV;
276 }
277
278 return 0;
279}
280
281static int
282lba_find_capability(int cap)
283{
284 struct _parisc_agp_info *info = &parisc_agp_info;
285 u16 status;
286 u8 pos, id;
287 int ttl = 48;
288
289 status = readw(info->lba_regs + PCI_STATUS);
290 if (!(status & PCI_STATUS_CAP_LIST))
291 return 0;
292 pos = readb(info->lba_regs + PCI_CAPABILITY_LIST);
293 while (ttl-- && pos >= 0x40) {
294 pos &= ~3;
295 id = readb(info->lba_regs + pos + PCI_CAP_LIST_ID);
296 if (id == 0xff)
297 break;
298 if (id == cap)
299 return pos;
300 pos = readb(info->lba_regs + pos + PCI_CAP_LIST_NEXT);
301 }
302 return 0;
303}
304
305static int __init
306agp_lba_init(void __iomem *lba_hpa)
307{
308 struct _parisc_agp_info *info = &parisc_agp_info;
309 int cap;
310
311 info->lba_regs = lba_hpa;
312 info->lba_cap_offset = lba_find_capability(PCI_CAP_ID_AGP);
313
314 cap = readl(lba_hpa + info->lba_cap_offset) & 0xff;
315 if (cap != PCI_CAP_ID_AGP) {
316 printk(KERN_ERR DRVPFX "Invalid capability ID 0x%02x at 0x%x\n",
317 cap, info->lba_cap_offset);
318 return -ENODEV;
319 }
320
321 return 0;
322}
323
324static int __init
325parisc_agp_setup(void __iomem *ioc_hpa, void __iomem *lba_hpa)
326{
327 struct pci_dev *fake_bridge_dev = NULL;
328 struct agp_bridge_data *bridge;
329 int error = 0;
330
331 fake_bridge_dev = kmalloc(sizeof (struct pci_dev), GFP_KERNEL);
332 if (!fake_bridge_dev) {
333 error = -ENOMEM;
334 goto fail;
335 }
336
337 error = agp_ioc_init(ioc_hpa);
338 if (error)
339 goto fail;
340
341 error = agp_lba_init(lba_hpa);
342 if (error)
343 goto fail;
344
345 bridge = agp_alloc_bridge();
346 if (!bridge) {
347 error = -ENOMEM;
348 goto fail;
349 }
350 bridge->driver = &parisc_agp_driver;
351
352 fake_bridge_dev->vendor = PCI_VENDOR_ID_HP;
353 fake_bridge_dev->device = PCI_DEVICE_ID_HP_PCIX_LBA;
354 bridge->dev = fake_bridge_dev;
355
356 error = agp_add_bridge(bridge);
357
358fail:
359 return error;
360}
361
362static struct device *next_device(struct klist_iter *i) {
363 struct klist_node * n = klist_next(i);
364 return n ? container_of(n, struct device, knode_parent) : NULL;
365}
366
367static int
368parisc_agp_init(void)
369{
370 extern struct sba_device *sba_list;
371
372 int err = -1;
373 struct parisc_device *sba = NULL, *lba = NULL;
374 struct lba_device *lbadev = NULL;
375 struct device *dev = NULL;
376 struct klist_iter i;
377
378 if (!sba_list)
379 goto out;
380
381 /* Find our parent Pluto */
382 sba = sba_list->dev;
383 if (!IS_PLUTO(sba)) {
384 printk(KERN_INFO DRVPFX "No Pluto found, so no AGPGART for you.\n");
385 goto out;
386 }
387
388 /* Now search our Pluto for our precious AGP device... */
389 klist_iter_init(&sba->dev.klist_children, &i);
390 while ((dev = next_device(&i))) {
391 struct parisc_device *padev = to_parisc_device(dev);
392 if (IS_QUICKSILVER(padev))
393 lba = padev;
394 }
395 klist_iter_exit(&i);
396
397 if (!lba) {
398 printk(KERN_INFO DRVPFX "No AGP devices found.\n");
399 goto out;
400 }
401
402 lbadev = parisc_get_drvdata(lba);
403
404 /* w00t, let's go find our cookies... */
405 parisc_agp_setup(sba_list->ioc[0].ioc_hpa, lbadev->hba.base_addr);
406
407 return 0;
408
409out:
410 return err;
411}
412
413module_init(parisc_agp_init);
414
415MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>");
416MODULE_LICENSE("GPL");
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index d0e92ed0a367..486f97c3f4e5 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -112,17 +112,6 @@ static struct serial_state rs_table[1];
112 112
113#define NR_PORTS ARRAY_SIZE(rs_table) 113#define NR_PORTS ARRAY_SIZE(rs_table)
114 114
115/*
116 * tmp_buf is used as a temporary buffer by serial_write. We need to
117 * lock it in case the copy_from_user blocks while swapping in a page,
118 * and some other program tries to do a serial write at the same time.
119 * Since the lock will only come under contention when the system is
120 * swapping and available memory is low, it makes sense to share one
121 * buffer across all the serial ports, since it significantly saves
122 * memory if large numbers of serial ports are open.
123 */
124static unsigned char *tmp_buf;
125
126#include <asm/uaccess.h> 115#include <asm/uaccess.h>
127 116
128#define serial_isroot() (capable(CAP_SYS_ADMIN)) 117#define serial_isroot() (capable(CAP_SYS_ADMIN))
@@ -912,7 +901,7 @@ static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count
912 if (serial_paranoia_check(info, tty->name, "rs_write")) 901 if (serial_paranoia_check(info, tty->name, "rs_write"))
913 return 0; 902 return 0;
914 903
915 if (!info->xmit.buf || !tmp_buf) 904 if (!info->xmit.buf)
916 return 0; 905 return 0;
917 906
918 local_save_flags(flags); 907 local_save_flags(flags);
@@ -1778,7 +1767,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
1778{ 1767{
1779 struct async_struct *info; 1768 struct async_struct *info;
1780 int retval, line; 1769 int retval, line;
1781 unsigned long page;
1782 1770
1783 line = tty->index; 1771 line = tty->index;
1784 if ((line < 0) || (line >= NR_PORTS)) { 1772 if ((line < 0) || (line >= NR_PORTS)) {
@@ -1798,17 +1786,6 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
1798#endif 1786#endif
1799 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; 1787 info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1800 1788
1801 if (!tmp_buf) {
1802 page = get_zeroed_page(GFP_KERNEL);
1803 if (!page) {
1804 return -ENOMEM;
1805 }
1806 if (tmp_buf)
1807 free_page(page);
1808 else
1809 tmp_buf = (unsigned char *) page;
1810 }
1811
1812 /* 1789 /*
1813 * If the port is the middle of closing, bail out now 1790 * If the port is the middle of closing, bail out now
1814 */ 1791 */
@@ -2090,11 +2067,6 @@ static __exit void rs_exit(void)
2090 kfree(info); 2067 kfree(info);
2091 } 2068 }
2092 2069
2093 if (tmp_buf) {
2094 free_page((unsigned long) tmp_buf);
2095 tmp_buf = NULL;
2096 }
2097
2098 release_mem_region(CUSTOM_PHYSADDR+0x30, 4); 2070 release_mem_region(CUSTOM_PHYSADDR+0x30, 4);
2099} 2071}
2100 2072
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index f85b4eb16618..87b2fb510871 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -748,18 +748,6 @@ static struct cyclades_port cy_port[NR_PORTS];
748static int cy_next_channel; /* next minor available */ 748static int cy_next_channel; /* next minor available */
749 749
750/* 750/*
751 * tmp_buf is used as a temporary buffer by serial_write. We need to
752 * lock it in case the copy_from_user blocks while swapping in a page,
753 * and some other program tries to do a serial write at the same time.
754 * Since the lock will only come under contention when the system is
755 * swapping and available memory is low, it makes sense to share one
756 * buffer across all the serial ports, since it significantly saves
757 * memory if large numbers of serial ports are open. This buffer is
758 * allocated when the first cy_open occurs.
759 */
760static unsigned char *tmp_buf;
761
762/*
763 * This is used to look up the divisor speeds and the timeouts 751 * This is used to look up the divisor speeds and the timeouts
764 * We're normally limited to 15 distinct baud rates. The extra 752 * We're normally limited to 15 distinct baud rates. The extra
765 * are accessed via settings in info->flags. 753 * are accessed via settings in info->flags.
@@ -2466,7 +2454,6 @@ cy_open(struct tty_struct *tty, struct file * filp)
2466{ 2454{
2467 struct cyclades_port *info; 2455 struct cyclades_port *info;
2468 int retval, line; 2456 int retval, line;
2469 unsigned long page;
2470 2457
2471 line = tty->index; 2458 line = tty->index;
2472 if ((line < 0) || (NR_PORTS <= line)){ 2459 if ((line < 0) || (NR_PORTS <= line)){
@@ -2545,15 +2532,6 @@ cy_open(struct tty_struct *tty, struct file * filp)
2545 printk("cyc:cy_open (%d): incrementing count to %d\n", 2532 printk("cyc:cy_open (%d): incrementing count to %d\n",
2546 current->pid, info->count); 2533 current->pid, info->count);
2547#endif 2534#endif
2548 if (!tmp_buf) {
2549 page = get_zeroed_page(GFP_KERNEL);
2550 if (!page)
2551 return -ENOMEM;
2552 if (tmp_buf)
2553 free_page(page);
2554 else
2555 tmp_buf = (unsigned char *) page;
2556 }
2557 2535
2558 /* 2536 /*
2559 * If the port is the middle of closing, bail out now 2537 * If the port is the middle of closing, bail out now
@@ -2832,7 +2810,7 @@ cy_write(struct tty_struct * tty, const unsigned char *buf, int count)
2832 return 0; 2810 return 0;
2833 } 2811 }
2834 2812
2835 if (!info->xmit_buf || !tmp_buf) 2813 if (!info->xmit_buf)
2836 return 0; 2814 return 0;
2837 2815
2838 CY_LOCK(info, flags); 2816 CY_LOCK(info, flags);
@@ -5490,10 +5468,6 @@ cy_cleanup_module(void)
5490#endif 5468#endif
5491 } 5469 }
5492 } 5470 }
5493 if (tmp_buf) {
5494 free_page((unsigned long) tmp_buf);
5495 tmp_buf = NULL;
5496 }
5497} /* cy_cleanup_module */ 5471} /* cy_cleanup_module */
5498 5472
5499module_init(cy_init); 5473module_init(cy_init);
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 3baa2ab8cbd4..c3f95583a120 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -1113,11 +1113,8 @@ static void __exit epca_module_exit(void)
1113 ch = card_ptr[crd]; 1113 ch = card_ptr[crd];
1114 for (count = 0; count < bd->numports; count++, ch++) 1114 for (count = 0; count < bd->numports; count++, ch++)
1115 { /* Begin for each port */ 1115 { /* Begin for each port */
1116 if (ch) { 1116 if (ch && ch->tty)
1117 if (ch->tty) 1117 tty_hangup(ch->tty);
1118 tty_hangup(ch->tty);
1119 kfree(ch->tmp_buf);
1120 }
1121 } /* End for each port */ 1118 } /* End for each port */
1122 } /* End for each card */ 1119 } /* End for each card */
1123 pci_unregister_driver (&epca_driver); 1120 pci_unregister_driver (&epca_driver);
@@ -1635,16 +1632,6 @@ static void post_fep_init(unsigned int crd)
1635 init_waitqueue_head(&ch->close_wait); 1632 init_waitqueue_head(&ch->close_wait);
1636 1633
1637 spin_unlock_irqrestore(&epca_lock, flags); 1634 spin_unlock_irqrestore(&epca_lock, flags);
1638
1639 ch->tmp_buf = kmalloc(ch->txbufsize,GFP_KERNEL);
1640 if (!ch->tmp_buf) {
1641 printk(KERN_ERR "POST FEP INIT : kmalloc failed for port 0x%x\n",i);
1642 release_region((int)bd->port, 4);
1643 while(i-- > 0)
1644 kfree((ch--)->tmp_buf);
1645 return;
1646 } else
1647 memset((void *)ch->tmp_buf,0,ch->txbufsize);
1648 } /* End for each port */ 1635 } /* End for each port */
1649 1636
1650 printk(KERN_INFO 1637 printk(KERN_INFO
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 456d6c8f94a8..a297238cd3ba 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -130,7 +130,6 @@ struct channel
130 unsigned long c_oflag; 130 unsigned long c_oflag;
131 unsigned char __iomem *txptr; 131 unsigned char __iomem *txptr;
132 unsigned char __iomem *rxptr; 132 unsigned char __iomem *rxptr;
133 unsigned char *tmp_buf;
134 struct board_info *board; 133 struct board_info *board;
135 struct board_chan __iomem *brdchan; 134 struct board_chan __iomem *brdchan;
136 struct digi_struct digiext; 135 struct digi_struct digiext;
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c
index 4711d9b3a595..87127e49c0db 100644
--- a/drivers/char/generic_serial.c
+++ b/drivers/char/generic_serial.c
@@ -33,8 +33,6 @@
33 33
34#define DEBUG 34#define DEBUG
35 35
36static char * tmp_buf;
37
38static int gs_debug; 36static int gs_debug;
39 37
40#ifdef DEBUG 38#ifdef DEBUG
@@ -205,7 +203,7 @@ int gs_write(struct tty_struct * tty,
205 if (!tty) return -EIO; 203 if (!tty) return -EIO;
206 204
207 port = tty->driver_data; 205 port = tty->driver_data;
208 if (!port || !port->xmit_buf || !tmp_buf) 206 if (!port || !port->xmit_buf)
209 return -EIO; 207 return -EIO;
210 208
211 local_save_flags(flags); 209 local_save_flags(flags);
@@ -837,24 +835,9 @@ void gs_set_termios (struct tty_struct * tty,
837int gs_init_port(struct gs_port *port) 835int gs_init_port(struct gs_port *port)
838{ 836{
839 unsigned long flags; 837 unsigned long flags;
840 unsigned long page;
841 838
842 func_enter (); 839 func_enter ();
843 840
844 if (!tmp_buf) {
845 page = get_zeroed_page(GFP_KERNEL);
846 spin_lock_irqsave (&port->driver_lock, flags); /* Don't expect this to make a difference. */
847 if (tmp_buf)
848 free_page(page);
849 else
850 tmp_buf = (unsigned char *) page;
851 spin_unlock_irqrestore (&port->driver_lock, flags);
852 if (!tmp_buf) {
853 func_exit ();
854 return -ENOMEM;
855 }
856 }
857
858 if (port->flags & ASYNC_INITIALIZED) { 841 if (port->flags & ASYNC_INITIALIZED) {
859 func_exit (); 842 func_exit ();
860 return 0; 843 return 0;
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index 8b6f197e5f8c..f144a947bd17 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -29,6 +29,7 @@
29#include <asm/hvconsole.h> 29#include <asm/hvconsole.h>
30#include <asm/vio.h> 30#include <asm/vio.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32#include <asm/firmware.h>
32#include <asm/iseries/vio.h> 33#include <asm/iseries/vio.h>
33#include <asm/iseries/hv_call.h> 34#include <asm/iseries/hv_call.h>
34#include <asm/iseries/hv_lp_config.h> 35#include <asm/iseries/hv_lp_config.h>
@@ -488,6 +489,9 @@ static int hvc_vio_init(void)
488 atomic_t wait_flag; 489 atomic_t wait_flag;
489 int rc; 490 int rc;
490 491
492 if (!firmware_has_feature(FW_FEATURE_ISERIES))
493 return -EIO;
494
491 /* +2 for fudge */ 495 /* +2 for fudge */
492 rc = viopath_open(HvLpConfig_getPrimaryLpIndex(), 496 rc = viopath_open(HvLpConfig_getPrimaryLpIndex(),
493 viomajorsubtype_chario, VIOCHAR_WINDOW + 2); 497 viomajorsubtype_chario, VIOCHAR_WINDOW + 2);
@@ -562,7 +566,7 @@ static int hvc_find_vtys(void)
562 566
563 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL; 567 for (vty = of_find_node_by_name(NULL, "vty"); vty != NULL;
564 vty = of_find_node_by_name(vty, "vty")) { 568 vty = of_find_node_by_name(vty, "vty")) {
565 uint32_t *vtermno; 569 const uint32_t *vtermno;
566 570
567 /* We have statically defined space for only a certain number 571 /* We have statically defined space for only a certain number
568 * of console adapters. 572 * of console adapters.
@@ -571,7 +575,7 @@ static int hvc_find_vtys(void)
571 (num_found >= VTTY_PORTS)) 575 (num_found >= VTTY_PORTS))
572 break; 576 break;
573 577
574 vtermno = (uint32_t *)get_property(vty, "reg", NULL); 578 vtermno = get_property(vty, "reg", NULL);
575 if (!vtermno) 579 if (!vtermno)
576 continue; 580 continue;
577 581
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index cc95941148fb..f9c00844d2bf 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -35,6 +35,7 @@
35#include <asm/hvconsole.h> 35#include <asm/hvconsole.h>
36#include <asm/vio.h> 36#include <asm/vio.h>
37#include <asm/prom.h> 37#include <asm/prom.h>
38#include <asm/firmware.h>
38 39
39#include "hvc_console.h" 40#include "hvc_console.h"
40 41
@@ -120,6 +121,9 @@ static int hvc_vio_init(void)
120{ 121{
121 int rc; 122 int rc;
122 123
124 if (firmware_has_feature(FW_FEATURE_ISERIES))
125 return -EIO;
126
123 /* Register as a vio device to receive callbacks */ 127 /* Register as a vio device to receive callbacks */
124 rc = vio_register_driver(&hvc_vio_driver); 128 rc = vio_register_driver(&hvc_vio_driver);
125 129
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 214d850112fd..b0ab3f28cc6a 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -81,7 +81,6 @@
81 81
82static struct riscom_board * IRQ_to_board[16]; 82static struct riscom_board * IRQ_to_board[16];
83static struct tty_driver *riscom_driver; 83static struct tty_driver *riscom_driver;
84static unsigned char * tmp_buf;
85 84
86static unsigned long baud_table[] = { 85static unsigned long baud_table[] = {
87 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 86 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
@@ -1124,7 +1123,7 @@ static int rc_write(struct tty_struct * tty,
1124 1123
1125 bp = port_Board(port); 1124 bp = port_Board(port);
1126 1125
1127 if (!tty || !port->xmit_buf || !tmp_buf) 1126 if (!tty || !port->xmit_buf)
1128 return 0; 1127 return 0;
1129 1128
1130 save_flags(flags); 1129 save_flags(flags);
@@ -1612,11 +1611,6 @@ static inline int rc_init_drivers(void)
1612 if (!riscom_driver) 1611 if (!riscom_driver)
1613 return -ENOMEM; 1612 return -ENOMEM;
1614 1613
1615 if (!(tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL))) {
1616 printk(KERN_ERR "rc: Couldn't get free page.\n");
1617 put_tty_driver(riscom_driver);
1618 return 1;
1619 }
1620 memset(IRQ_to_board, 0, sizeof(IRQ_to_board)); 1614 memset(IRQ_to_board, 0, sizeof(IRQ_to_board));
1621 riscom_driver->owner = THIS_MODULE; 1615 riscom_driver->owner = THIS_MODULE;
1622 riscom_driver->name = "ttyL"; 1616 riscom_driver->name = "ttyL";
@@ -1629,7 +1623,6 @@ static inline int rc_init_drivers(void)
1629 riscom_driver->flags = TTY_DRIVER_REAL_RAW; 1623 riscom_driver->flags = TTY_DRIVER_REAL_RAW;
1630 tty_set_operations(riscom_driver, &riscom_ops); 1624 tty_set_operations(riscom_driver, &riscom_ops);
1631 if ((error = tty_register_driver(riscom_driver))) { 1625 if ((error = tty_register_driver(riscom_driver))) {
1632 free_page((unsigned long)tmp_buf);
1633 put_tty_driver(riscom_driver); 1626 put_tty_driver(riscom_driver);
1634 printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, " 1627 printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, "
1635 "error = %d\n", 1628 "error = %d\n",
@@ -1657,7 +1650,6 @@ static void rc_release_drivers(void)
1657 1650
1658 save_flags(flags); 1651 save_flags(flags);
1659 cli(); 1652 cli();
1660 free_page((unsigned long)tmp_buf);
1661 tty_unregister_driver(riscom_driver); 1653 tty_unregister_driver(riscom_driver);
1662 put_tty_driver(riscom_driver); 1654 put_tty_driver(riscom_driver);
1663 restore_flags(flags); 1655 restore_flags(flags);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index b4ea1266b663..f4809c8183cc 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -119,17 +119,6 @@ struct cyclades_port cy_port[] = {
119#define NR_PORTS ARRAY_SIZE(cy_port) 119#define NR_PORTS ARRAY_SIZE(cy_port)
120 120
121/* 121/*
122 * tmp_buf is used as a temporary buffer by serial_write. We need to
123 * lock it in case the copy_from_user blocks while swapping in a page,
124 * and some other program tries to do a serial write at the same time.
125 * Since the lock will only come under contention when the system is
126 * swapping and available memory is low, it makes sense to share one
127 * buffer across all the serial ports, since it significantly saves
128 * memory if large numbers of serial ports are open.
129 */
130static unsigned char *tmp_buf = 0;
131
132/*
133 * This is used to look up the divisor speeds and the timeouts 122 * This is used to look up the divisor speeds and the timeouts
134 * We're normally limited to 15 distinct baud rates. The extra 123 * We're normally limited to 15 distinct baud rates. The extra
135 * are accessed via settings in info->flags. 124 * are accessed via settings in info->flags.
@@ -1132,7 +1121,7 @@ cy_put_char(struct tty_struct *tty, unsigned char ch)
1132 if (serial_paranoia_check(info, tty->name, "cy_put_char")) 1121 if (serial_paranoia_check(info, tty->name, "cy_put_char"))
1133 return; 1122 return;
1134 1123
1135 if (!tty || !info->xmit_buf) 1124 if (!info->xmit_buf)
1136 return; 1125 return;
1137 1126
1138 local_irq_save(flags); 1127 local_irq_save(flags);
@@ -1198,7 +1187,7 @@ cy_write(struct tty_struct * tty,
1198 return 0; 1187 return 0;
1199 } 1188 }
1200 1189
1201 if (!tty || !info->xmit_buf || !tmp_buf){ 1190 if (!info->xmit_buf){
1202 return 0; 1191 return 0;
1203 } 1192 }
1204 1193
@@ -1983,13 +1972,6 @@ cy_open(struct tty_struct *tty, struct file * filp)
1983 tty->driver_data = info; 1972 tty->driver_data = info;
1984 info->tty = tty; 1973 info->tty = tty;
1985 1974
1986 if (!tmp_buf) {
1987 tmp_buf = (unsigned char *) get_zeroed_page(GFP_KERNEL);
1988 if (!tmp_buf){
1989 return -ENOMEM;
1990 }
1991 }
1992
1993 /* 1975 /*
1994 * Start up serial port 1976 * Start up serial port
1995 */ 1977 */
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index d418b8297211..22915cc46ba7 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -63,7 +63,7 @@ static struct clocksource cs_hrt = {
63 63
64static int __init init_hrt_clocksource(void) 64static int __init init_hrt_clocksource(void)
65{ 65{
66 /* Make sure scx200 has initializedd the configuration block */ 66 /* Make sure scx200 has initialized the configuration block */
67 if (!scx200_cb_present()) 67 if (!scx200_cb_present())
68 return -ENODEV; 68 return -ENODEV;
69 69
@@ -76,7 +76,7 @@ static int __init init_hrt_clocksource(void)
76 } 76 }
77 77
78 /* write timer config */ 78 /* write timer config */
79 outb(HR_TMEN | (mhz27) ? HR_TMCLKSEL : 0, 79 outb(HR_TMEN | (mhz27 ? HR_TMCLKSEL : 0),
80 scx200_cb_base + SCx200_TMCNFG_OFFSET); 80 scx200_cb_base + SCx200_TMCNFG_OFFSET);
81 81
82 if (mhz27) { 82 if (mhz27) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2caaf71d80c8..86e69b7f9122 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -52,8 +52,14 @@ static void handle_update(void *data);
52 * The mutex locks both lists. 52 * The mutex locks both lists.
53 */ 53 */
54static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 54static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
55static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list); 55static struct srcu_notifier_head cpufreq_transition_notifier_list;
56 56
57static int __init init_cpufreq_transition_notifier_list(void)
58{
59 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
60 return 0;
61}
62core_initcall(init_cpufreq_transition_notifier_list);
57 63
58static LIST_HEAD(cpufreq_governor_list); 64static LIST_HEAD(cpufreq_governor_list);
59static DEFINE_MUTEX (cpufreq_governor_mutex); 65static DEFINE_MUTEX (cpufreq_governor_mutex);
@@ -262,14 +268,14 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
262 freqs->old = policy->cur; 268 freqs->old = policy->cur;
263 } 269 }
264 } 270 }
265 blocking_notifier_call_chain(&cpufreq_transition_notifier_list, 271 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
266 CPUFREQ_PRECHANGE, freqs); 272 CPUFREQ_PRECHANGE, freqs);
267 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 273 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
268 break; 274 break;
269 275
270 case CPUFREQ_POSTCHANGE: 276 case CPUFREQ_POSTCHANGE:
271 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 277 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
272 blocking_notifier_call_chain(&cpufreq_transition_notifier_list, 278 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
273 CPUFREQ_POSTCHANGE, freqs); 279 CPUFREQ_POSTCHANGE, freqs);
274 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 280 if (likely(policy) && likely(policy->cpu == freqs->cpu))
275 policy->cur = freqs->new; 281 policy->cur = freqs->new;
@@ -1049,7 +1055,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
1049 freqs.old = cpu_policy->cur; 1055 freqs.old = cpu_policy->cur;
1050 freqs.new = cur_freq; 1056 freqs.new = cur_freq;
1051 1057
1052 blocking_notifier_call_chain(&cpufreq_transition_notifier_list, 1058 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1053 CPUFREQ_SUSPENDCHANGE, &freqs); 1059 CPUFREQ_SUSPENDCHANGE, &freqs);
1054 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); 1060 adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1055 1061
@@ -1130,7 +1136,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
1130 freqs.old = cpu_policy->cur; 1136 freqs.old = cpu_policy->cur;
1131 freqs.new = cur_freq; 1137 freqs.new = cur_freq;
1132 1138
1133 blocking_notifier_call_chain( 1139 srcu_notifier_call_chain(
1134 &cpufreq_transition_notifier_list, 1140 &cpufreq_transition_notifier_list,
1135 CPUFREQ_RESUMECHANGE, &freqs); 1141 CPUFREQ_RESUMECHANGE, &freqs);
1136 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); 1142 adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
@@ -1176,7 +1182,7 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1176 1182
1177 switch (list) { 1183 switch (list) {
1178 case CPUFREQ_TRANSITION_NOTIFIER: 1184 case CPUFREQ_TRANSITION_NOTIFIER:
1179 ret = blocking_notifier_chain_register( 1185 ret = srcu_notifier_chain_register(
1180 &cpufreq_transition_notifier_list, nb); 1186 &cpufreq_transition_notifier_list, nb);
1181 break; 1187 break;
1182 case CPUFREQ_POLICY_NOTIFIER: 1188 case CPUFREQ_POLICY_NOTIFIER:
@@ -1208,7 +1214,7 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1208 1214
1209 switch (list) { 1215 switch (list) {
1210 case CPUFREQ_TRANSITION_NOTIFIER: 1216 case CPUFREQ_TRANSITION_NOTIFIER:
1211 ret = blocking_notifier_chain_unregister( 1217 ret = srcu_notifier_chain_unregister(
1212 &cpufreq_transition_notifier_list, nb); 1218 &cpufreq_transition_notifier_list, nb);
1213 break; 1219 break;
1214 case CPUFREQ_POLICY_NOTIFIER: 1220 case CPUFREQ_POLICY_NOTIFIER:
diff --git a/drivers/isdn/hisax/niccy.c b/drivers/isdn/hisax/niccy.c
index 489022bdef7b..0945336c28da 100644
--- a/drivers/isdn/hisax/niccy.c
+++ b/drivers/isdn/hisax/niccy.c
@@ -13,7 +13,6 @@
13 * 13 *
14 */ 14 */
15 15
16
17#include <linux/init.h> 16#include <linux/init.h>
18#include "hisax.h" 17#include "hisax.h"
19#include "isac.h" 18#include "isac.h"
@@ -45,33 +44,31 @@ static const char *niccy_revision = "$Revision: 1.21.2.4 $";
45#define PCI_IRQ_DISABLE 0xff0000 44#define PCI_IRQ_DISABLE 0xff0000
46#define PCI_IRQ_ASSERT 0x800000 45#define PCI_IRQ_ASSERT 0x800000
47 46
48static inline u_char 47static inline u_char readreg(unsigned int ale, unsigned int adr, u_char off)
49readreg(unsigned int ale, unsigned int adr, u_char off)
50{ 48{
51 register u_char ret; 49 register u_char ret;
52 50
53 byteout(ale, off); 51 byteout(ale, off);
54 ret = bytein(adr); 52 ret = bytein(adr);
55 return (ret); 53 return ret;
56} 54}
57 55
58static inline void 56static inline void readfifo(unsigned int ale, unsigned int adr, u_char off,
59readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) 57 u_char *data, int size)
60{ 58{
61 byteout(ale, off); 59 byteout(ale, off);
62 insb(adr, data, size); 60 insb(adr, data, size);
63} 61}
64 62
65 63static inline void writereg(unsigned int ale, unsigned int adr, u_char off,
66static inline void 64 u_char data)
67writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
68{ 65{
69 byteout(ale, off); 66 byteout(ale, off);
70 byteout(adr, data); 67 byteout(adr, data);
71} 68}
72 69
73static inline void 70static inline void writefifo(unsigned int ale, unsigned int adr, u_char off,
74writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size) 71 u_char *data, int size)
75{ 72{
76 byteout(ale, off); 73 byteout(ale, off);
77 outsb(adr, data, size); 74 outsb(adr, data, size);
@@ -79,39 +76,34 @@ writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int siz
79 76
80/* Interface functions */ 77/* Interface functions */
81 78
82static u_char 79static u_char ReadISAC(struct IsdnCardState *cs, u_char offset)
83ReadISAC(struct IsdnCardState *cs, u_char offset)
84{ 80{
85 return (readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset)); 81 return readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset);
86} 82}
87 83
88static void 84static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
89WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
90{ 85{
91 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset, value); 86 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, offset, value);
92} 87}
93 88
94static void 89static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
95ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
96{ 90{
97 readfifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size); 91 readfifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size);
98} 92}
99 93
100static void 94static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
101WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
102{ 95{
103 writefifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size); 96 writefifo(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, 0, data, size);
104} 97}
105 98
106static u_char 99static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
107ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
108{ 100{
109 return (readreg(cs->hw.niccy.hscx_ale, 101 return readreg(cs->hw.niccy.hscx_ale,
110 cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0))); 102 cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0));
111} 103}
112 104
113static void 105static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset,
114WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) 106 u_char value)
115{ 107{
116 writereg(cs->hw.niccy.hscx_ale, 108 writereg(cs->hw.niccy.hscx_ale,
117 cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0), value); 109 cs->hw.niccy.hscx, offset + (hscx ? 0x40 : 0), value);
@@ -130,8 +122,8 @@ WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
130 122
131#include "hscx_irq.c" 123#include "hscx_irq.c"
132 124
133static irqreturn_t 125static irqreturn_t niccy_interrupt(int intno, void *dev_id,
134niccy_interrupt(int intno, void *dev_id, struct pt_regs *regs) 126 struct pt_regs *regs)
135{ 127{
136 struct IsdnCardState *cs = dev_id; 128 struct IsdnCardState *cs = dev_id;
137 u_char val; 129 u_char val;
@@ -141,21 +133,23 @@ niccy_interrupt(int intno, void *dev_id, struct pt_regs *regs)
141 if (cs->subtyp == NICCY_PCI) { 133 if (cs->subtyp == NICCY_PCI) {
142 int ival; 134 int ival;
143 ival = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG); 135 ival = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
144 if (!(ival & PCI_IRQ_ASSERT)) { /* IRQ not for us (shared) */ 136 if (!(ival & PCI_IRQ_ASSERT)) { /* IRQ not for us (shared) */
145 spin_unlock_irqrestore(&cs->lock, flags); 137 spin_unlock_irqrestore(&cs->lock, flags);
146 return IRQ_NONE; 138 return IRQ_NONE;
147 } 139 }
148 outl(ival, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG); 140 outl(ival, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
149 } 141 }
150 val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_ISTA + 0x40); 142 val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx,
151 Start_HSCX: 143 HSCX_ISTA + 0x40);
144Start_HSCX:
152 if (val) 145 if (val)
153 hscx_int_main(cs, val); 146 hscx_int_main(cs, val);
154 val = readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_ISTA); 147 val = readreg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_ISTA);
155 Start_ISAC: 148Start_ISAC:
156 if (val) 149 if (val)
157 isac_interrupt(cs, val); 150 isac_interrupt(cs, val);
158 val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_ISTA + 0x40); 151 val = readreg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx,
152 HSCX_ISTA + 0x40);
159 if (val) { 153 if (val) {
160 if (cs->debug & L1_DEB_HSCX) 154 if (cs->debug & L1_DEB_HSCX)
161 debugl1(cs, "HSCX IntStat after IntRoutine"); 155 debugl1(cs, "HSCX IntStat after IntRoutine");
@@ -168,21 +162,21 @@ niccy_interrupt(int intno, void *dev_id, struct pt_regs *regs)
168 goto Start_ISAC; 162 goto Start_ISAC;
169 } 163 }
170 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0xFF); 164 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0xFF);
171 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40, 0xFF); 165 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40,
166 0xFF);
172 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0xFF); 167 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0xFF);
173 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0); 168 writereg(cs->hw.niccy.isac_ale, cs->hw.niccy.isac, ISAC_MASK, 0);
174 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0); 169 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK, 0);
175 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40, 0); 170 writereg(cs->hw.niccy.hscx_ale, cs->hw.niccy.hscx, HSCX_MASK + 0x40,0);
176 spin_unlock_irqrestore(&cs->lock, flags); 171 spin_unlock_irqrestore(&cs->lock, flags);
177 return IRQ_HANDLED; 172 return IRQ_HANDLED;
178} 173}
179 174
180static void 175static void release_io_niccy(struct IsdnCardState *cs)
181release_io_niccy(struct IsdnCardState *cs)
182{ 176{
183 if (cs->subtyp == NICCY_PCI) { 177 if (cs->subtyp == NICCY_PCI) {
184 int val; 178 int val;
185 179
186 val = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG); 180 val = inl(cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
187 val &= PCI_IRQ_DISABLE; 181 val &= PCI_IRQ_DISABLE;
188 outl(val, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG); 182 outl(val, cs->hw.niccy.cfg_reg + PCI_IRQ_CTRL_REG);
@@ -194,8 +188,7 @@ release_io_niccy(struct IsdnCardState *cs)
194 } 188 }
195} 189}
196 190
197static void 191static void niccy_reset(struct IsdnCardState *cs)
198niccy_reset(struct IsdnCardState *cs)
199{ 192{
200 if (cs->subtyp == NICCY_PCI) { 193 if (cs->subtyp == NICCY_PCI) {
201 int val; 194 int val;
@@ -207,29 +200,28 @@ niccy_reset(struct IsdnCardState *cs)
207 inithscxisac(cs, 3); 200 inithscxisac(cs, 3);
208} 201}
209 202
210static int 203static int niccy_card_msg(struct IsdnCardState *cs, int mt, void *arg)
211niccy_card_msg(struct IsdnCardState *cs, int mt, void *arg)
212{ 204{
213 u_long flags; 205 u_long flags;
214 206
215 switch (mt) { 207 switch (mt) {
216 case CARD_RESET: 208 case CARD_RESET:
217 spin_lock_irqsave(&cs->lock, flags); 209 spin_lock_irqsave(&cs->lock, flags);
218 niccy_reset(cs); 210 niccy_reset(cs);
219 spin_unlock_irqrestore(&cs->lock, flags); 211 spin_unlock_irqrestore(&cs->lock, flags);
220 return(0); 212 return 0;
221 case CARD_RELEASE: 213 case CARD_RELEASE:
222 release_io_niccy(cs); 214 release_io_niccy(cs);
223 return(0); 215 return 0;
224 case CARD_INIT: 216 case CARD_INIT:
225 spin_lock_irqsave(&cs->lock, flags); 217 spin_lock_irqsave(&cs->lock, flags);
226 niccy_reset(cs); 218 niccy_reset(cs);
227 spin_unlock_irqrestore(&cs->lock, flags); 219 spin_unlock_irqrestore(&cs->lock, flags);
228 return(0); 220 return 0;
229 case CARD_TEST: 221 case CARD_TEST:
230 return(0); 222 return 0;
231 } 223 }
232 return(0); 224 return 0;
233} 225}
234 226
235static struct pci_dev *niccy_dev __devinitdata = NULL; 227static struct pci_dev *niccy_dev __devinitdata = NULL;
@@ -237,8 +229,7 @@ static struct pci_dev *niccy_dev __devinitdata = NULL;
237static struct pnp_card *pnp_c __devinitdata = NULL; 229static struct pnp_card *pnp_c __devinitdata = NULL;
238#endif 230#endif
239 231
240int __devinit 232int __devinit setup_niccy(struct IsdnCard *card)
241setup_niccy(struct IsdnCard *card)
242{ 233{
243 struct IsdnCardState *cs = card->cs; 234 struct IsdnCardState *cs = card->cs;
244 char tmp[64]; 235 char tmp[64];
@@ -246,40 +237,44 @@ setup_niccy(struct IsdnCard *card)
246 strcpy(tmp, niccy_revision); 237 strcpy(tmp, niccy_revision);
247 printk(KERN_INFO "HiSax: Niccy driver Rev. %s\n", HiSax_getrev(tmp)); 238 printk(KERN_INFO "HiSax: Niccy driver Rev. %s\n", HiSax_getrev(tmp));
248 if (cs->typ != ISDN_CTYPE_NICCY) 239 if (cs->typ != ISDN_CTYPE_NICCY)
249 return (0); 240 return 0;
250#ifdef __ISAPNP__ 241#ifdef __ISAPNP__
251 if (!card->para[1] && isapnp_present()) { 242 if (!card->para[1] && isapnp_present()) {
252 struct pnp_dev *pnp_d = NULL; 243 struct pnp_dev *pnp_d = NULL;
253 int err; 244 int err;
254 245
255 if ((pnp_c = pnp_find_card( 246 pnp_c = pnp_find_card(ISAPNP_VENDOR('S', 'D', 'A'),
256 ISAPNP_VENDOR('S', 'D', 'A'), 247 ISAPNP_FUNCTION(0x0150), pnp_c);
257 ISAPNP_FUNCTION(0x0150), pnp_c))) { 248 if (pnp_c) {
258 if (!(pnp_d = pnp_find_dev(pnp_c, 249 pnp_d = pnp_find_dev(pnp_c,
259 ISAPNP_VENDOR('S', 'D', 'A'), 250 ISAPNP_VENDOR('S', 'D', 'A'),
260 ISAPNP_FUNCTION(0x0150), pnp_d))) { 251 ISAPNP_FUNCTION(0x0150), pnp_d);
261 printk(KERN_ERR "NiccyPnP: PnP error card found, no device\n"); 252 if (!pnp_d) {
262 return (0); 253 printk(KERN_ERR "NiccyPnP: PnP error card "
254 "found, no device\n");
255 return 0;
263 } 256 }
264 pnp_disable_dev(pnp_d); 257 pnp_disable_dev(pnp_d);
265 err = pnp_activate_dev(pnp_d); 258 err = pnp_activate_dev(pnp_d);
266 if (err<0) { 259 if (err < 0) {
267 printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n", 260 printk(KERN_WARNING "%s: pnp_activate_dev "
268 __FUNCTION__, err); 261 "ret(%d)\n", __FUNCTION__, err);
269 return(0); 262 return 0;
270 } 263 }
271 card->para[1] = pnp_port_start(pnp_d, 0); 264 card->para[1] = pnp_port_start(pnp_d, 0);
272 card->para[2] = pnp_port_start(pnp_d, 1); 265 card->para[2] = pnp_port_start(pnp_d, 1);
273 card->para[0] = pnp_irq(pnp_d, 0); 266 card->para[0] = pnp_irq(pnp_d, 0);
274 if (!card->para[0] || !card->para[1] || !card->para[2]) { 267 if (!card->para[0] || !card->para[1] ||
275 printk(KERN_ERR "NiccyPnP:some resources are missing %ld/%lx/%lx\n", 268 !card->para[2]) {
276 card->para[0], card->para[1], card->para[2]); 269 printk(KERN_ERR "NiccyPnP:some resources are "
270 "missing %ld/%lx/%lx\n",
271 card->para[0], card->para[1],
272 card->para[2]);
277 pnp_disable_dev(pnp_d); 273 pnp_disable_dev(pnp_d);
278 return(0); 274 return 0;
279 } 275 }
280 } else { 276 } else
281 printk(KERN_INFO "NiccyPnP: no ISAPnP card found\n"); 277 printk(KERN_INFO "NiccyPnP: no ISAPnP card found\n");
282 }
283 } 278 }
284#endif 279#endif
285 if (card->para[1]) { 280 if (card->para[1]) {
@@ -291,50 +286,51 @@ setup_niccy(struct IsdnCard *card)
291 cs->subtyp = NICCY_PNP; 286 cs->subtyp = NICCY_PNP;
292 cs->irq = card->para[0]; 287 cs->irq = card->para[0];
293 if (!request_region(cs->hw.niccy.isac, 2, "niccy data")) { 288 if (!request_region(cs->hw.niccy.isac, 2, "niccy data")) {
294 printk(KERN_WARNING 289 printk(KERN_WARNING "HiSax: %s data port %x-%x "
295 "HiSax: %s data port %x-%x already in use\n", 290 "already in use\n", CardType[card->typ],
296 CardType[card->typ], 291 cs->hw.niccy.isac, cs->hw.niccy.isac + 1);
297 cs->hw.niccy.isac, 292 return 0;
298 cs->hw.niccy.isac + 1);
299 return (0);
300 } 293 }
301 if (!request_region(cs->hw.niccy.isac_ale, 2, "niccy addr")) { 294 if (!request_region(cs->hw.niccy.isac_ale, 2, "niccy addr")) {
302 printk(KERN_WARNING 295 printk(KERN_WARNING "HiSax: %s address port %x-%x "
303 "HiSax: %s address port %x-%x already in use\n", 296 "already in use\n", CardType[card->typ],
304 CardType[card->typ],
305 cs->hw.niccy.isac_ale, 297 cs->hw.niccy.isac_ale,
306 cs->hw.niccy.isac_ale + 1); 298 cs->hw.niccy.isac_ale + 1);
307 release_region(cs->hw.niccy.isac, 2); 299 release_region(cs->hw.niccy.isac, 2);
308 return (0); 300 return 0;
309 } 301 }
310 } else { 302 } else {
311#ifdef CONFIG_PCI 303#ifdef CONFIG_PCI
312 u_int pci_ioaddr; 304 u_int pci_ioaddr;
313 cs->subtyp = 0; 305 cs->subtyp = 0;
314 if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM, 306 if ((niccy_dev = pci_find_device(PCI_VENDOR_ID_SATSAGEM,
315 PCI_DEVICE_ID_SATSAGEM_NICCY, niccy_dev))) { 307 PCI_DEVICE_ID_SATSAGEM_NICCY,
308 niccy_dev))) {
316 if (pci_enable_device(niccy_dev)) 309 if (pci_enable_device(niccy_dev))
317 return(0); 310 return 0;
318 /* get IRQ */ 311 /* get IRQ */
319 if (!niccy_dev->irq) { 312 if (!niccy_dev->irq) {
320 printk(KERN_WARNING "Niccy: No IRQ for PCI card found\n"); 313 printk(KERN_WARNING
321 return(0); 314 "Niccy: No IRQ for PCI card found\n");
315 return 0;
322 } 316 }
323 cs->irq = niccy_dev->irq; 317 cs->irq = niccy_dev->irq;
324 cs->hw.niccy.cfg_reg = pci_resource_start(niccy_dev, 0); 318 cs->hw.niccy.cfg_reg = pci_resource_start(niccy_dev, 0);
325 if (!cs->hw.niccy.cfg_reg) { 319 if (!cs->hw.niccy.cfg_reg) {
326 printk(KERN_WARNING "Niccy: No IO-Adr for PCI cfg found\n"); 320 printk(KERN_WARNING
327 return(0); 321 "Niccy: No IO-Adr for PCI cfg found\n");
322 return 0;
328 } 323 }
329 pci_ioaddr = pci_resource_start(niccy_dev, 1); 324 pci_ioaddr = pci_resource_start(niccy_dev, 1);
330 if (!pci_ioaddr) { 325 if (!pci_ioaddr) {
331 printk(KERN_WARNING "Niccy: No IO-Adr for PCI card found\n"); 326 printk(KERN_WARNING
332 return(0); 327 "Niccy: No IO-Adr for PCI card found\n");
328 return 0;
333 } 329 }
334 cs->subtyp = NICCY_PCI; 330 cs->subtyp = NICCY_PCI;
335 } else { 331 } else {
336 printk(KERN_WARNING "Niccy: No PCI card found\n"); 332 printk(KERN_WARNING "Niccy: No PCI card found\n");
337 return(0); 333 return 0;
338 } 334 }
339 cs->irq_flags |= IRQF_SHARED; 335 cs->irq_flags |= IRQF_SHARED;
340 cs->hw.niccy.isac = pci_ioaddr + ISAC_PCI_DATA; 336 cs->hw.niccy.isac = pci_ioaddr + ISAC_PCI_DATA;
@@ -343,29 +339,28 @@ setup_niccy(struct IsdnCard *card)
343 cs->hw.niccy.hscx_ale = pci_ioaddr + HSCX_PCI_ADDR; 339 cs->hw.niccy.hscx_ale = pci_ioaddr + HSCX_PCI_ADDR;
344 if (!request_region(cs->hw.niccy.isac, 4, "niccy")) { 340 if (!request_region(cs->hw.niccy.isac, 4, "niccy")) {
345 printk(KERN_WARNING 341 printk(KERN_WARNING
346 "HiSax: %s data port %x-%x already in use\n", 342 "HiSax: %s data port %x-%x already in use\n",
347 CardType[card->typ], 343 CardType[card->typ],
348 cs->hw.niccy.isac, 344 cs->hw.niccy.isac, cs->hw.niccy.isac + 4);
349 cs->hw.niccy.isac + 4); 345 return 0;
350 return (0);
351 } 346 }
352 if (!request_region(cs->hw.niccy.cfg_reg, 0x40, "niccy pci")) { 347 if (!request_region(cs->hw.niccy.cfg_reg, 0x40, "niccy pci")) {
353 printk(KERN_WARNING 348 printk(KERN_WARNING
354 "HiSax: %s pci port %x-%x already in use\n", 349 "HiSax: %s pci port %x-%x already in use\n",
355 CardType[card->typ], 350 CardType[card->typ],
356 cs->hw.niccy.cfg_reg, 351 cs->hw.niccy.cfg_reg,
357 cs->hw.niccy.cfg_reg + 0x40); 352 cs->hw.niccy.cfg_reg + 0x40);
358 release_region(cs->hw.niccy.isac, 4); 353 release_region(cs->hw.niccy.isac, 4);
359 return (0); 354 return 0;
360 } 355 }
361#else 356#else
362 printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n"); 357 printk(KERN_WARNING "Niccy: io0 0 and NO_PCI_BIOS\n");
363 printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n"); 358 printk(KERN_WARNING "Niccy: unable to config NICCY PCI\n");
364 return (0); 359 return 0;
365#endif /* CONFIG_PCI */ 360#endif /* CONFIG_PCI */
366 } 361 }
367 printk(KERN_INFO "HiSax: %s %s config irq:%d data:0x%X ale:0x%X\n", 362 printk(KERN_INFO "HiSax: %s %s config irq:%d data:0x%X ale:0x%X\n",
368 CardType[cs->typ], (cs->subtyp==1) ? "PnP":"PCI", 363 CardType[cs->typ], (cs->subtyp == 1) ? "PnP" : "PCI",
369 cs->irq, cs->hw.niccy.isac, cs->hw.niccy.isac_ale); 364 cs->irq, cs->hw.niccy.isac, cs->hw.niccy.isac_ale);
370 setup_isac(cs); 365 setup_isac(cs);
371 cs->readisac = &ReadISAC; 366 cs->readisac = &ReadISAC;
@@ -379,10 +374,10 @@ setup_niccy(struct IsdnCard *card)
379 cs->irq_func = &niccy_interrupt; 374 cs->irq_func = &niccy_interrupt;
380 ISACVersion(cs, "Niccy:"); 375 ISACVersion(cs, "Niccy:");
381 if (HscxVersion(cs, "Niccy:")) { 376 if (HscxVersion(cs, "Niccy:")) {
382 printk(KERN_WARNING 377 printk(KERN_WARNING "Niccy: wrong HSCX versions check IO "
383 "Niccy: wrong HSCX versions check IO address\n"); 378 "address\n");
384 release_io_niccy(cs); 379 release_io_niccy(cs);
385 return (0); 380 return 0;
386 } 381 }
387 return (1); 382 return 1;
388} 383}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 7fc692a8f5b0..3df0e7a07c46 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -18,7 +18,7 @@ config IBM_ASM
18 service processor board as a regular serial port. To make use of 18 service processor board as a regular serial port. To make use of
19 this feature serial driver support (CONFIG_SERIAL_8250) must be 19 this feature serial driver support (CONFIG_SERIAL_8250) must be
20 enabled. 20 enabled.
21 21
22 WARNING: This software may not be supported or function 22 WARNING: This software may not be supported or function
23 correctly on your IBM server. Please consult the IBM ServerProven 23 correctly on your IBM server. Please consult the IBM ServerProven
24 website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for 24 website <http://www.pc.ibm.com/ww/eserver/xseries/serverproven> for
@@ -28,5 +28,33 @@ config IBM_ASM
28 28
29 If unsure, say N. 29 If unsure, say N.
30 30
31endmenu 31config TIFM_CORE
32 tristate "TI Flash Media interface support (EXPERIMENTAL)"
33 depends on EXPERIMENTAL
34 help
35 If you want support for Texas Instruments(R) Flash Media adapters
36 you should select this option and then also choose an appropriate
37 host adapter, such as 'TI Flash Media PCI74xx/PCI76xx host adapter
38 support', if you have a TI PCI74xx compatible card reader, for
39 example.
40 You will also have to select some flash card format drivers. MMC/SD
41 cards are supported via 'MMC/SD Card support: TI Flash Media MMC/SD
42 Interface support (MMC_TIFM_SD)'.
43
44 To compile this driver as a module, choose M here: the module will
45 be called tifm_core.
32 46
47config TIFM_7XX1
48 tristate "TI Flash Media PCI74xx/PCI76xx host adapter support (EXPERIMENTAL)"
49 depends on PCI && TIFM_CORE && EXPERIMENTAL
50 default TIFM_CORE
51 help
52 This option enables support for Texas Instruments(R) PCI74xx and
53 PCI76xx families of Flash Media adapters, found in many laptops.
54 To make actual use of the device, you will have to select some
55 flash card format drivers, as outlined in the TIFM_CORE Help.
56
57 To compile this driver as a module, choose M here: the module will
58 be called tifm_7xx1.
59
60endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index c1bf1fb04c5c..d65ece76095a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,3 +6,5 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
6obj-$(CONFIG_IBM_ASM) += ibmasm/ 6obj-$(CONFIG_IBM_ASM) += ibmasm/
7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ 7obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
8obj-$(CONFIG_LKDTM) += lkdtm.o 8obj-$(CONFIG_LKDTM) += lkdtm.o
9obj-$(CONFIG_TIFM_CORE) += tifm_core.o
10obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
new file mode 100644
index 000000000000..a7ed30446185
--- /dev/null
+++ b/drivers/misc/tifm_7xx1.c
@@ -0,0 +1,437 @@
1/*
2 * tifm_7xx1.c - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/tifm.h>
13#include <linux/dma-mapping.h>
14
15#define DRIVER_NAME "tifm_7xx1"
16#define DRIVER_VERSION "0.6"
17
18static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
19{
20 int cnt;
21 unsigned long flags;
22
23 spin_lock_irqsave(&fm->lock, flags);
24 if (!fm->inhibit_new_cards) {
25 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
26 if (fm->sockets[cnt] == sock) {
27 fm->remove_mask |= (1 << cnt);
28 queue_work(fm->wq, &fm->media_remover);
29 break;
30 }
31 }
32 }
33 spin_unlock_irqrestore(&fm->lock, flags);
34}
35
36static void tifm_7xx1_remove_media(void *adapter)
37{
38 struct tifm_adapter *fm = adapter;
39 unsigned long flags;
40 int cnt;
41 struct tifm_dev *sock;
42
43 if (!class_device_get(&fm->cdev))
44 return;
45 spin_lock_irqsave(&fm->lock, flags);
46 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
47 if (fm->sockets[cnt] && (fm->remove_mask & (1 << cnt))) {
48 printk(KERN_INFO DRIVER_NAME
49 ": demand removing card from socket %d\n", cnt);
50 sock = fm->sockets[cnt];
51 fm->sockets[cnt] = 0;
52 fm->remove_mask &= ~(1 << cnt);
53
54 writel(0x0e00, sock->addr + SOCK_CONTROL);
55
56 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
57 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
58 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
59 fm->addr + FM_SET_INTERRUPT_ENABLE);
60
61 spin_unlock_irqrestore(&fm->lock, flags);
62 device_unregister(&sock->dev);
63 spin_lock_irqsave(&fm->lock, flags);
64 }
65 }
66 spin_unlock_irqrestore(&fm->lock, flags);
67 class_device_put(&fm->cdev);
68}
69
70static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id, struct pt_regs *regs)
71{
72 struct tifm_adapter *fm = dev_id;
73 unsigned int irq_status;
74 unsigned int sock_irq_status, cnt;
75
76 spin_lock(&fm->lock);
77 irq_status = readl(fm->addr + FM_INTERRUPT_STATUS);
78 if (irq_status == 0 || irq_status == (~0)) {
79 spin_unlock(&fm->lock);
80 return IRQ_NONE;
81 }
82
83 if (irq_status & TIFM_IRQ_ENABLE) {
84 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
85
86 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
87 sock_irq_status = (irq_status >> cnt) &
88 (TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK);
89
90 if (fm->sockets[cnt]) {
91 if (sock_irq_status &&
92 fm->sockets[cnt]->signal_irq)
93 sock_irq_status = fm->sockets[cnt]->
94 signal_irq(fm->sockets[cnt],
95 sock_irq_status);
96
97 if (irq_status & (1 << cnt))
98 fm->remove_mask |= 1 << cnt;
99 } else {
100 if (irq_status & (1 << cnt))
101 fm->insert_mask |= 1 << cnt;
102 }
103 }
104 }
105 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
106
107 if (!fm->inhibit_new_cards) {
108 if (!fm->remove_mask && !fm->insert_mask) {
109 writel(TIFM_IRQ_ENABLE,
110 fm->addr + FM_SET_INTERRUPT_ENABLE);
111 } else {
112 queue_work(fm->wq, &fm->media_remover);
113 queue_work(fm->wq, &fm->media_inserter);
114 }
115 }
116
117 spin_unlock(&fm->lock);
118 return IRQ_HANDLED;
119}
120
121static tifm_media_id tifm_7xx1_toggle_sock_power(char *sock_addr, int is_x2)
122{
123 unsigned int s_state;
124 int cnt;
125
126 writel(0x0e00, sock_addr + SOCK_CONTROL);
127
128 for (cnt = 0; cnt < 100; cnt++) {
129 if (!(TIFM_SOCK_STATE_POWERED &
130 readl(sock_addr + SOCK_PRESENT_STATE)))
131 break;
132 msleep(10);
133 }
134
135 s_state = readl(sock_addr + SOCK_PRESENT_STATE);
136 if (!(TIFM_SOCK_STATE_OCCUPIED & s_state))
137 return FM_NULL;
138
139 if (is_x2) {
140 writel((s_state & 7) | 0x0c00, sock_addr + SOCK_CONTROL);
141 } else {
142 // SmartMedia cards need extra 40 msec
143 if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7) == 1)
144 msleep(40);
145 writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED,
146 sock_addr + SOCK_CONTROL);
147 msleep(10);
148 writel((s_state & 0x7) | 0x0c00 | TIFM_CTRL_LED,
149 sock_addr + SOCK_CONTROL);
150 }
151
152 for (cnt = 0; cnt < 100; cnt++) {
153 if ((TIFM_SOCK_STATE_POWERED &
154 readl(sock_addr + SOCK_PRESENT_STATE)))
155 break;
156 msleep(10);
157 }
158
159 if (!is_x2)
160 writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED),
161 sock_addr + SOCK_CONTROL);
162
163 return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
164}
165
166inline static char *tifm_7xx1_sock_addr(char *base_addr, unsigned int sock_num)
167{
168 return base_addr + ((sock_num + 1) << 10);
169}
170
171static void tifm_7xx1_insert_media(void *adapter)
172{
173 struct tifm_adapter *fm = adapter;
174 unsigned long flags;
175 tifm_media_id media_id;
176 char *card_name = "xx";
177 int cnt, ok_to_register;
178 unsigned int insert_mask;
179 struct tifm_dev *new_sock = 0;
180
181 if (!class_device_get(&fm->cdev))
182 return;
183 spin_lock_irqsave(&fm->lock, flags);
184 insert_mask = fm->insert_mask;
185 fm->insert_mask = 0;
186 if (fm->inhibit_new_cards) {
187 spin_unlock_irqrestore(&fm->lock, flags);
188 class_device_put(&fm->cdev);
189 return;
190 }
191 spin_unlock_irqrestore(&fm->lock, flags);
192
193 for (cnt = 0; cnt < fm->max_sockets; cnt++) {
194 if (!(insert_mask & (1 << cnt)))
195 continue;
196
197 media_id = tifm_7xx1_toggle_sock_power(tifm_7xx1_sock_addr(fm->addr, cnt),
198 fm->max_sockets == 2);
199 if (media_id) {
200 ok_to_register = 0;
201 new_sock = tifm_alloc_device(fm, cnt);
202 if (new_sock) {
203 new_sock->addr = tifm_7xx1_sock_addr(fm->addr,
204 cnt);
205 new_sock->media_id = media_id;
206 switch (media_id) {
207 case 1:
208 card_name = "xd";
209 break;
210 case 2:
211 card_name = "ms";
212 break;
213 case 3:
214 card_name = "sd";
215 break;
216 default:
217 break;
218 }
219 snprintf(new_sock->dev.bus_id, BUS_ID_SIZE,
220 "tifm_%s%u:%u", card_name, fm->id, cnt);
221 printk(KERN_INFO DRIVER_NAME
222 ": %s card detected in socket %d\n",
223 card_name, cnt);
224 spin_lock_irqsave(&fm->lock, flags);
225 if (!fm->sockets[cnt]) {
226 fm->sockets[cnt] = new_sock;
227 ok_to_register = 1;
228 }
229 spin_unlock_irqrestore(&fm->lock, flags);
230 if (!ok_to_register ||
231 device_register(&new_sock->dev)) {
232 spin_lock_irqsave(&fm->lock, flags);
233 fm->sockets[cnt] = 0;
234 spin_unlock_irqrestore(&fm->lock,
235 flags);
236 tifm_free_device(&new_sock->dev);
237 }
238 }
239 }
240 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
241 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
242 writel((TIFM_IRQ_FIFOMASK | TIFM_IRQ_CARDMASK) << cnt,
243 fm->addr + FM_SET_INTERRUPT_ENABLE);
244 }
245
246 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
247 class_device_put(&fm->cdev);
248}
249
250static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
251{
252 struct tifm_adapter *fm = pci_get_drvdata(dev);
253 unsigned long flags;
254
255 spin_lock_irqsave(&fm->lock, flags);
256 fm->inhibit_new_cards = 1;
257 fm->remove_mask = 0xf;
258 fm->insert_mask = 0;
259 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
260 spin_unlock_irqrestore(&fm->lock, flags);
261 flush_workqueue(fm->wq);
262
263 tifm_7xx1_remove_media(fm);
264
265 pci_set_power_state(dev, PCI_D3hot);
266 pci_disable_device(dev);
267 pci_save_state(dev);
268 return 0;
269}
270
271static int tifm_7xx1_resume(struct pci_dev *dev)
272{
273 struct tifm_adapter *fm = pci_get_drvdata(dev);
274 unsigned long flags;
275
276 pci_restore_state(dev);
277 pci_enable_device(dev);
278 pci_set_power_state(dev, PCI_D0);
279 pci_set_master(dev);
280
281 spin_lock_irqsave(&fm->lock, flags);
282 fm->inhibit_new_cards = 0;
283 writel(TIFM_IRQ_SETALL, fm->addr + FM_INTERRUPT_STATUS);
284 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
285 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK,
286 fm->addr + FM_SET_INTERRUPT_ENABLE);
287 fm->insert_mask = 0xf;
288 spin_unlock_irqrestore(&fm->lock, flags);
289 return 0;
290}
291
292static int tifm_7xx1_probe(struct pci_dev *dev,
293 const struct pci_device_id *dev_id)
294{
295 struct tifm_adapter *fm;
296 int pci_dev_busy = 0;
297 int rc;
298
299 rc = pci_set_dma_mask(dev, DMA_32BIT_MASK);
300 if (rc)
301 return rc;
302
303 rc = pci_enable_device(dev);
304 if (rc)
305 return rc;
306
307 pci_set_master(dev);
308
309 rc = pci_request_regions(dev, DRIVER_NAME);
310 if (rc) {
311 pci_dev_busy = 1;
312 goto err_out;
313 }
314
315 pci_intx(dev, 1);
316
317 fm = tifm_alloc_adapter();
318 if (!fm) {
319 rc = -ENOMEM;
320 goto err_out_int;
321 }
322
323 fm->dev = &dev->dev;
324 fm->max_sockets = (dev->device == 0x803B) ? 2 : 4;
325 fm->sockets = kzalloc(sizeof(struct tifm_dev*) * fm->max_sockets,
326 GFP_KERNEL);
327 if (!fm->sockets)
328 goto err_out_free;
329
330 INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm);
331 INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm);
332 fm->eject = tifm_7xx1_eject;
333 pci_set_drvdata(dev, fm);
334
335 fm->addr = ioremap(pci_resource_start(dev, 0),
336 pci_resource_len(dev, 0));
337 if (!fm->addr)
338 goto err_out_free;
339
340 rc = request_irq(dev->irq, tifm_7xx1_isr, SA_SHIRQ, DRIVER_NAME, fm);
341 if (rc)
342 goto err_out_unmap;
343
344 rc = tifm_add_adapter(fm);
345 if (rc)
346 goto err_out_irq;
347
348 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
349 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SETALLSOCK,
350 fm->addr + FM_SET_INTERRUPT_ENABLE);
351
352 fm->insert_mask = 0xf;
353
354 return 0;
355
356err_out_irq:
357 free_irq(dev->irq, fm);
358err_out_unmap:
359 iounmap(fm->addr);
360err_out_free:
361 pci_set_drvdata(dev, NULL);
362 tifm_free_adapter(fm);
363err_out_int:
364 pci_intx(dev, 0);
365 pci_release_regions(dev);
366err_out:
367 if (!pci_dev_busy)
368 pci_disable_device(dev);
369 return rc;
370}
371
372static void tifm_7xx1_remove(struct pci_dev *dev)
373{
374 struct tifm_adapter *fm = pci_get_drvdata(dev);
375 unsigned long flags;
376
377 spin_lock_irqsave(&fm->lock, flags);
378 fm->inhibit_new_cards = 1;
379 fm->remove_mask = 0xf;
380 fm->insert_mask = 0;
381 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
382 spin_unlock_irqrestore(&fm->lock, flags);
383
384 flush_workqueue(fm->wq);
385
386 tifm_7xx1_remove_media(fm);
387
388 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
389 free_irq(dev->irq, fm);
390
391 tifm_remove_adapter(fm);
392
393 pci_set_drvdata(dev, 0);
394
395 iounmap(fm->addr);
396 pci_intx(dev, 0);
397 pci_release_regions(dev);
398
399 pci_disable_device(dev);
400 tifm_free_adapter(fm);
401}
402
403static struct pci_device_id tifm_7xx1_pci_tbl [] = {
404 { PCI_VENDOR_ID_TI, 0x8033, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
405 0 }, /* xx21 - the one I have */
406 { PCI_VENDOR_ID_TI, 0x803B, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
407 0 }, /* xx12 - should be also supported */
408 { }
409};
410
411static struct pci_driver tifm_7xx1_driver = {
412 .name = DRIVER_NAME,
413 .id_table = tifm_7xx1_pci_tbl,
414 .probe = tifm_7xx1_probe,
415 .remove = tifm_7xx1_remove,
416 .suspend = tifm_7xx1_suspend,
417 .resume = tifm_7xx1_resume,
418};
419
420static int __init tifm_7xx1_init(void)
421{
422 return pci_register_driver(&tifm_7xx1_driver);
423}
424
425static void __exit tifm_7xx1_exit(void)
426{
427 pci_unregister_driver(&tifm_7xx1_driver);
428}
429
430MODULE_AUTHOR("Alex Dubov");
431MODULE_DESCRIPTION("TI FlashMedia host driver");
432MODULE_LICENSE("GPL");
433MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl);
434MODULE_VERSION(DRIVER_VERSION);
435
436module_init(tifm_7xx1_init);
437module_exit(tifm_7xx1_exit);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
new file mode 100644
index 000000000000..cca5f8522469
--- /dev/null
+++ b/drivers/misc/tifm_core.c
@@ -0,0 +1,272 @@
1/*
2 * tifm_core.c - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/tifm.h>
13#include <linux/init.h>
14#include <linux/idr.h>
15
16#define DRIVER_NAME "tifm_core"
17#define DRIVER_VERSION "0.6"
18
19static DEFINE_IDR(tifm_adapter_idr);
20static DEFINE_SPINLOCK(tifm_adapter_lock);
21
22static tifm_media_id *tifm_device_match(tifm_media_id *ids,
23 struct tifm_dev *dev)
24{
25 while (*ids) {
26 if (dev->media_id == *ids)
27 return ids;
28 ids++;
29 }
30 return NULL;
31}
32
33static int tifm_match(struct device *dev, struct device_driver *drv)
34{
35 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
36 struct tifm_driver *fm_drv;
37
38 fm_drv = container_of(drv, struct tifm_driver, driver);
39 if (!fm_drv->id_table)
40 return -EINVAL;
41 if (tifm_device_match(fm_drv->id_table, fm_dev))
42 return 1;
43 return -ENODEV;
44}
45
46static int tifm_uevent(struct device *dev, char **envp, int num_envp,
47 char *buffer, int buffer_size)
48{
49 struct tifm_dev *fm_dev;
50 int i = 0;
51 int length = 0;
52 const char *card_type_name[] = {"INV", "SM", "MS", "SD"};
53
54 if (!dev || !(fm_dev = container_of(dev, struct tifm_dev, dev)))
55 return -ENODEV;
56 if (add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
57 "TIFM_CARD_TYPE=%s", card_type_name[fm_dev->media_id]))
58 return -ENOMEM;
59
60 return 0;
61}
62
63static struct bus_type tifm_bus_type = {
64 .name = "tifm",
65 .match = tifm_match,
66 .uevent = tifm_uevent,
67};
68
69static void tifm_free(struct class_device *cdev)
70{
71 struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
72
73 kfree(fm->sockets);
74 if (fm->wq)
75 destroy_workqueue(fm->wq);
76 kfree(fm);
77}
78
79static struct class tifm_adapter_class = {
80 .name = "tifm_adapter",
81 .release = tifm_free
82};
83
84struct tifm_adapter *tifm_alloc_adapter(void)
85{
86 struct tifm_adapter *fm;
87
88 fm = kzalloc(sizeof(struct tifm_adapter), GFP_KERNEL);
89 if (fm) {
90 fm->cdev.class = &tifm_adapter_class;
91 spin_lock_init(&fm->lock);
92 class_device_initialize(&fm->cdev);
93 }
94 return fm;
95}
96EXPORT_SYMBOL(tifm_alloc_adapter);
97
98void tifm_free_adapter(struct tifm_adapter *fm)
99{
100 class_device_put(&fm->cdev);
101}
102EXPORT_SYMBOL(tifm_free_adapter);
103
104int tifm_add_adapter(struct tifm_adapter *fm)
105{
106 int rc;
107
108 if (!idr_pre_get(&tifm_adapter_idr, GFP_KERNEL))
109 return -ENOMEM;
110
111 spin_lock(&tifm_adapter_lock);
112 rc = idr_get_new(&tifm_adapter_idr, fm, &fm->id);
113 spin_unlock(&tifm_adapter_lock);
114 if (!rc) {
115 snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
116 strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN);
117
118 fm->wq = create_singlethread_workqueue(fm->wq_name);
119 if (fm->wq)
120 return class_device_add(&fm->cdev);
121
122 spin_lock(&tifm_adapter_lock);
123 idr_remove(&tifm_adapter_idr, fm->id);
124 spin_unlock(&tifm_adapter_lock);
125 rc = -ENOMEM;
126 }
127 return rc;
128}
129EXPORT_SYMBOL(tifm_add_adapter);
130
131void tifm_remove_adapter(struct tifm_adapter *fm)
132{
133 class_device_del(&fm->cdev);
134
135 spin_lock(&tifm_adapter_lock);
136 idr_remove(&tifm_adapter_idr, fm->id);
137 spin_unlock(&tifm_adapter_lock);
138}
139EXPORT_SYMBOL(tifm_remove_adapter);
140
141void tifm_free_device(struct device *dev)
142{
143 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
144 if (fm_dev->wq)
145 destroy_workqueue(fm_dev->wq);
146 kfree(fm_dev);
147}
148EXPORT_SYMBOL(tifm_free_device);
149
150struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id)
151{
152 struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
153
154 if (dev) {
155 spin_lock_init(&dev->lock);
156 snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id);
157 dev->wq = create_singlethread_workqueue(dev->wq_name);
158 if (!dev->wq) {
159 kfree(dev);
160 return 0;
161 }
162 dev->dev.parent = fm->dev;
163 dev->dev.bus = &tifm_bus_type;
164 dev->dev.release = tifm_free_device;
165 }
166 return dev;
167}
168EXPORT_SYMBOL(tifm_alloc_device);
169
170void tifm_eject(struct tifm_dev *sock)
171{
172 struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent);
173 fm->eject(fm, sock);
174}
175EXPORT_SYMBOL(tifm_eject);
176
177int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
178 int direction)
179{
180 return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
181}
182EXPORT_SYMBOL(tifm_map_sg);
183
184void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
185 int direction)
186{
187 pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
188}
189EXPORT_SYMBOL(tifm_unmap_sg);
190
191static int tifm_device_probe(struct device *dev)
192{
193 struct tifm_driver *drv;
194 struct tifm_dev *fm_dev;
195 int rc = 0;
196 const tifm_media_id *id;
197
198 drv = container_of(dev->driver, struct tifm_driver, driver);
199 fm_dev = container_of(dev, struct tifm_dev, dev);
200 get_device(dev);
201 if (!fm_dev->drv && drv->probe && drv->id_table) {
202 rc = -ENODEV;
203 id = tifm_device_match(drv->id_table, fm_dev);
204 if (id)
205 rc = drv->probe(fm_dev);
206 if (rc >= 0) {
207 rc = 0;
208 fm_dev->drv = drv;
209 }
210 }
211 if (rc)
212 put_device(dev);
213 return rc;
214}
215
216static int tifm_device_remove(struct device *dev)
217{
218 struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
219 struct tifm_driver *drv = fm_dev->drv;
220
221 if (drv) {
222 if (drv->remove) drv->remove(fm_dev);
223 fm_dev->drv = 0;
224 }
225
226 put_device(dev);
227 return 0;
228}
229
230int tifm_register_driver(struct tifm_driver *drv)
231{
232 drv->driver.bus = &tifm_bus_type;
233 drv->driver.probe = tifm_device_probe;
234 drv->driver.remove = tifm_device_remove;
235
236 return driver_register(&drv->driver);
237}
238EXPORT_SYMBOL(tifm_register_driver);
239
240void tifm_unregister_driver(struct tifm_driver *drv)
241{
242 driver_unregister(&drv->driver);
243}
244EXPORT_SYMBOL(tifm_unregister_driver);
245
246static int __init tifm_init(void)
247{
248 int rc = bus_register(&tifm_bus_type);
249
250 if (!rc) {
251 rc = class_register(&tifm_adapter_class);
252 if (rc)
253 bus_unregister(&tifm_bus_type);
254 }
255
256 return rc;
257}
258
259static void __exit tifm_exit(void)
260{
261 class_unregister(&tifm_adapter_class);
262 bus_unregister(&tifm_bus_type);
263}
264
265subsys_initcall(tifm_init);
266module_exit(tifm_exit);
267
268MODULE_LICENSE("GPL");
269MODULE_AUTHOR("Alex Dubov");
270MODULE_DESCRIPTION("TI FlashMedia core driver");
271MODULE_LICENSE("GPL");
272MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f540bd88dc5a..ea41852ec8cd 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -109,4 +109,20 @@ config MMC_IMX
109 109
110 If unsure, say N. 110 If unsure, say N.
111 111
112config MMC_TIFM_SD
113 tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)"
114 depends on MMC && EXPERIMENTAL
115 select TIFM_CORE
116 help
117 Say Y here if you want to be able to access MMC/SD cards with
118 the Texas Instruments(R) Flash Media card reader, found in many
119 laptops.
120 This option 'selects' (turns on, enables) 'TIFM_CORE', but you
121 probably also need appropriate card reader host adapter, such as
122 'Misc devices: TI Flash Media PCI74xx/PCI76xx host adapter support
123 (TIFM_7XX1)'.
124
125 To compile this driver as a module, choose M here: the
126 module will be called tifm_sd.
127
112endmenu 128endmenu
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index b1f6e03e7aa9..acfd4de0aba5 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_MMC_WBSD) += wbsd.o
23obj-$(CONFIG_MMC_AU1X) += au1xmmc.o 23obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
24obj-$(CONFIG_MMC_OMAP) += omap.o 24obj-$(CONFIG_MMC_OMAP) += omap.o
25obj-$(CONFIG_MMC_AT91RM9200) += at91_mci.o 25obj-$(CONFIG_MMC_AT91RM9200) += at91_mci.o
26obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o
26 27
27mmc_core-y := mmc.o mmc_sysfs.o 28mmc_core-y := mmc.o mmc_sysfs.o
28mmc_core-$(CONFIG_BLOCK) += mmc_queue.o 29mmc_core-$(CONFIG_BLOCK) += mmc_queue.o
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 5b9caa7978d3..ee8863c123e3 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -1166,9 +1166,9 @@ static void mmc_setup(struct mmc_host *host)
1166void mmc_detect_change(struct mmc_host *host, unsigned long delay) 1166void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1167{ 1167{
1168 if (delay) 1168 if (delay)
1169 schedule_delayed_work(&host->detect, delay); 1169 mmc_schedule_delayed_work(&host->detect, delay);
1170 else 1170 else
1171 schedule_work(&host->detect); 1171 mmc_schedule_work(&host->detect);
1172} 1172}
1173 1173
1174EXPORT_SYMBOL(mmc_detect_change); 1174EXPORT_SYMBOL(mmc_detect_change);
@@ -1311,7 +1311,7 @@ EXPORT_SYMBOL(mmc_remove_host);
1311 */ 1311 */
1312void mmc_free_host(struct mmc_host *host) 1312void mmc_free_host(struct mmc_host *host)
1313{ 1313{
1314 flush_scheduled_work(); 1314 mmc_flush_scheduled_work();
1315 mmc_free_host_sysfs(host); 1315 mmc_free_host_sysfs(host);
1316} 1316}
1317 1317
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h
index 97bae00292fa..cd5e0ab3d84b 100644
--- a/drivers/mmc/mmc.h
+++ b/drivers/mmc/mmc.h
@@ -18,4 +18,8 @@ struct mmc_host *mmc_alloc_host_sysfs(int extra, struct device *dev);
18int mmc_add_host_sysfs(struct mmc_host *host); 18int mmc_add_host_sysfs(struct mmc_host *host);
19void mmc_remove_host_sysfs(struct mmc_host *host); 19void mmc_remove_host_sysfs(struct mmc_host *host);
20void mmc_free_host_sysfs(struct mmc_host *host); 20void mmc_free_host_sysfs(struct mmc_host *host);
21
22int mmc_schedule_work(struct work_struct *work);
23int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay);
24void mmc_flush_scheduled_work(void);
21#endif 25#endif
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index db0e8ad439a5..c1293f1bda87 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -158,13 +158,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
158{ 158{
159 struct mmc_blk_data *md = mq->data; 159 struct mmc_blk_data *md = mq->data;
160 struct mmc_card *card = md->queue.card; 160 struct mmc_card *card = md->queue.card;
161 struct mmc_blk_request brq;
161 int ret; 162 int ret;
162 163
163 if (mmc_card_claim_host(card)) 164 if (mmc_card_claim_host(card))
164 goto cmd_err; 165 goto cmd_err;
165 166
166 do { 167 do {
167 struct mmc_blk_request brq;
168 struct mmc_command cmd; 168 struct mmc_command cmd;
169 u32 readcmd, writecmd; 169 u32 readcmd, writecmd;
170 170
@@ -278,17 +278,27 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
278 cmd_err: 278 cmd_err:
279 mmc_card_release_host(card); 279 mmc_card_release_host(card);
280 280
281 ret = 1;
282
281 /* 283 /*
282 * This is a little draconian, but until we get proper 284 * For writes and where the host claims to support proper
283 * error handling sorted out here, its the best we can 285 * error reporting, we first ok the successful blocks.
284 * do - especially as some hosts have no idea how much 286 *
285 * data was transferred before the error occurred. 287 * For reads we just fail the entire chunk as that should
288 * be safe in all cases.
286 */ 289 */
290 if (rq_data_dir(req) != READ &&
291 (card->host->caps & MMC_CAP_MULTIWRITE)) {
292 spin_lock_irq(&md->lock);
293 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
294 spin_unlock_irq(&md->lock);
295 }
296
287 spin_lock_irq(&md->lock); 297 spin_lock_irq(&md->lock);
288 do { 298 while (ret) {
289 ret = end_that_request_chunk(req, 0, 299 ret = end_that_request_chunk(req, 0,
290 req->current_nr_sectors << 9); 300 req->current_nr_sectors << 9);
291 } while (ret); 301 }
292 302
293 add_disk_randomness(req->rq_disk); 303 add_disk_randomness(req->rq_disk);
294 blkdev_dequeue_request(req); 304 blkdev_dequeue_request(req);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index a2a35fd946ee..10cc9734eaa0 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/idr.h> 15#include <linux/idr.h>
16#include <linux/workqueue.h>
16 17
17#include <linux/mmc/card.h> 18#include <linux/mmc/card.h>
18#include <linux/mmc/host.h> 19#include <linux/mmc/host.h>
@@ -317,10 +318,41 @@ void mmc_free_host_sysfs(struct mmc_host *host)
317 class_device_put(&host->class_dev); 318 class_device_put(&host->class_dev);
318} 319}
319 320
321static struct workqueue_struct *workqueue;
322
323/*
324 * Internal function. Schedule work in the MMC work queue.
325 */
326int mmc_schedule_work(struct work_struct *work)
327{
328 return queue_work(workqueue, work);
329}
330
331/*
332 * Internal function. Schedule delayed work in the MMC work queue.
333 */
334int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay)
335{
336 return queue_delayed_work(workqueue, work, delay);
337}
338
339/*
340 * Internal function. Flush all scheduled work from the MMC work queue.
341 */
342void mmc_flush_scheduled_work(void)
343{
344 flush_workqueue(workqueue);
345}
320 346
321static int __init mmc_init(void) 347static int __init mmc_init(void)
322{ 348{
323 int ret = bus_register(&mmc_bus_type); 349 int ret;
350
351 workqueue = create_singlethread_workqueue("kmmcd");
352 if (!workqueue)
353 return -ENOMEM;
354
355 ret = bus_register(&mmc_bus_type);
324 if (ret == 0) { 356 if (ret == 0) {
325 ret = class_register(&mmc_host_class); 357 ret = class_register(&mmc_host_class);
326 if (ret) 358 if (ret)
@@ -333,6 +365,7 @@ static void __exit mmc_exit(void)
333{ 365{
334 class_unregister(&mmc_host_class); 366 class_unregister(&mmc_host_class);
335 bus_unregister(&mmc_bus_type); 367 bus_unregister(&mmc_bus_type);
368 destroy_workqueue(workqueue);
336} 369}
337 370
338module_init(mmc_init); 371module_init(mmc_init);
diff --git a/drivers/mmc/sdhci.c b/drivers/mmc/sdhci.c
index 4dab5ec392ea..20711acb0120 100644
--- a/drivers/mmc/sdhci.c
+++ b/drivers/mmc/sdhci.c
@@ -35,6 +35,8 @@ static unsigned int debug_quirks = 0;
35 35
36#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0) 36#define SDHCI_QUIRK_CLOCK_BEFORE_RESET (1<<0)
37#define SDHCI_QUIRK_FORCE_DMA (1<<1) 37#define SDHCI_QUIRK_FORCE_DMA (1<<1)
38/* Controller doesn't like some resets when there is no card inserted. */
39#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
38 40
39static const struct pci_device_id pci_ids[] __devinitdata = { 41static const struct pci_device_id pci_ids[] __devinitdata = {
40 { 42 {
@@ -51,7 +53,8 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
51 .device = PCI_DEVICE_ID_RICOH_R5C822, 53 .device = PCI_DEVICE_ID_RICOH_R5C822,
52 .subvendor = PCI_ANY_ID, 54 .subvendor = PCI_ANY_ID,
53 .subdevice = PCI_ANY_ID, 55 .subdevice = PCI_ANY_ID,
54 .driver_data = SDHCI_QUIRK_FORCE_DMA, 56 .driver_data = SDHCI_QUIRK_FORCE_DMA |
57 SDHCI_QUIRK_NO_CARD_NO_RESET,
55 }, 58 },
56 59
57 { 60 {
@@ -125,6 +128,12 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
125{ 128{
126 unsigned long timeout; 129 unsigned long timeout;
127 130
131 if (host->chip->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
132 if (!(readl(host->ioaddr + SDHCI_PRESENT_STATE) &
133 SDHCI_CARD_PRESENT))
134 return;
135 }
136
128 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET); 137 writeb(mask, host->ioaddr + SDHCI_SOFTWARE_RESET);
129 138
130 if (mask & SDHCI_RESET_ALL) 139 if (mask & SDHCI_RESET_ALL)
@@ -717,6 +726,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
717 } else 726 } else
718 sdhci_send_command(host, mrq->cmd); 727 sdhci_send_command(host, mrq->cmd);
719 728
729 mmiowb();
720 spin_unlock_irqrestore(&host->lock, flags); 730 spin_unlock_irqrestore(&host->lock, flags);
721} 731}
722 732
@@ -753,6 +763,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
753 ctrl &= ~SDHCI_CTRL_4BITBUS; 763 ctrl &= ~SDHCI_CTRL_4BITBUS;
754 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); 764 writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
755 765
766 mmiowb();
756 spin_unlock_irqrestore(&host->lock, flags); 767 spin_unlock_irqrestore(&host->lock, flags);
757} 768}
758 769
@@ -860,6 +871,7 @@ static void sdhci_tasklet_finish(unsigned long param)
860 871
861 sdhci_deactivate_led(host); 872 sdhci_deactivate_led(host);
862 873
874 mmiowb();
863 spin_unlock_irqrestore(&host->lock, flags); 875 spin_unlock_irqrestore(&host->lock, flags);
864 876
865 mmc_request_done(host->mmc, mrq); 877 mmc_request_done(host->mmc, mrq);
@@ -893,6 +905,7 @@ static void sdhci_timeout_timer(unsigned long data)
893 } 905 }
894 } 906 }
895 907
908 mmiowb();
896 spin_unlock_irqrestore(&host->lock, flags); 909 spin_unlock_irqrestore(&host->lock, flags);
897} 910}
898 911
@@ -1030,6 +1043,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id, struct pt_regs *regs)
1030 1043
1031 result = IRQ_HANDLED; 1044 result = IRQ_HANDLED;
1032 1045
1046 mmiowb();
1033out: 1047out:
1034 spin_unlock(&host->lock); 1048 spin_unlock(&host->lock);
1035 1049
@@ -1095,6 +1109,7 @@ static int sdhci_resume (struct pci_dev *pdev)
1095 if (chip->hosts[i]->flags & SDHCI_USE_DMA) 1109 if (chip->hosts[i]->flags & SDHCI_USE_DMA)
1096 pci_set_master(pdev); 1110 pci_set_master(pdev);
1097 sdhci_init(chip->hosts[i]); 1111 sdhci_init(chip->hosts[i]);
1112 mmiowb();
1098 ret = mmc_resume_host(chip->hosts[i]->mmc); 1113 ret = mmc_resume_host(chip->hosts[i]->mmc);
1099 if (ret) 1114 if (ret)
1100 return ret; 1115 return ret;
@@ -1168,6 +1183,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1168 host = mmc_priv(mmc); 1183 host = mmc_priv(mmc);
1169 host->mmc = mmc; 1184 host->mmc = mmc;
1170 1185
1186 host->chip = chip;
1187 chip->hosts[slot] = host;
1188
1171 host->bar = first_bar + slot; 1189 host->bar = first_bar + slot;
1172 1190
1173 host->addr = pci_resource_start(pdev, host->bar); 1191 host->addr = pci_resource_start(pdev, host->bar);
@@ -1324,8 +1342,7 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
1324 sdhci_dumpregs(host); 1342 sdhci_dumpregs(host);
1325#endif 1343#endif
1326 1344
1327 host->chip = chip; 1345 mmiowb();
1328 chip->hosts[slot] = host;
1329 1346
1330 mmc_add_host(mmc); 1347 mmc_add_host(mmc);
1331 1348
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c
new file mode 100644
index 000000000000..6d23dc08d169
--- /dev/null
+++ b/drivers/mmc/tifm_sd.c
@@ -0,0 +1,933 @@
1/*
2 * tifm_sd.c - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12
13#include <linux/tifm.h>
14#include <linux/mmc/protocol.h>
15#include <linux/mmc/host.h>
16#include <linux/highmem.h>
17
18#define DRIVER_NAME "tifm_sd"
19#define DRIVER_VERSION "0.6"
20
21static int no_dma = 0;
22static int fixed_timeout = 0;
23module_param(no_dma, bool, 0644);
24module_param(fixed_timeout, bool, 0644);
25
26/* Constants here are mostly from OMAP5912 datasheet */
27#define TIFM_MMCSD_RESET 0x0002
28#define TIFM_MMCSD_CLKMASK 0x03ff
29#define TIFM_MMCSD_POWER 0x0800
30#define TIFM_MMCSD_4BBUS 0x8000
31#define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */
32#define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */
33#define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */
34#define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */
35#define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */
36#define TIFM_MMCSD_READ 0x8000
37
38#define TIFM_MMCSD_DATAMASK 0x001d /* set bits: EOFB, BRS, CB, EOC */
39#define TIFM_MMCSD_ERRMASK 0x41e0 /* set bits: CERR, CCRC, CTO, DCRC, DTO */
40#define TIFM_MMCSD_EOC 0x0001 /* end of command phase */
41#define TIFM_MMCSD_CB 0x0004 /* card enter busy state */
42#define TIFM_MMCSD_BRS 0x0008 /* block received/sent */
43#define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */
44#define TIFM_MMCSD_DTO 0x0020 /* data time-out */
45#define TIFM_MMCSD_DCRC 0x0040 /* data crc error */
46#define TIFM_MMCSD_CTO 0x0080 /* command time-out */
47#define TIFM_MMCSD_CCRC 0x0100 /* command crc error */
48#define TIFM_MMCSD_AF 0x0400 /* fifo almost full */
49#define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */
50#define TIFM_MMCSD_CERR 0x4000 /* card status error */
51
52#define TIFM_MMCSD_FIFO_SIZE 0x0020
53
54#define TIFM_MMCSD_RSP_R0 0x0000
55#define TIFM_MMCSD_RSP_R1 0x0100
56#define TIFM_MMCSD_RSP_R2 0x0200
57#define TIFM_MMCSD_RSP_R3 0x0300
58#define TIFM_MMCSD_RSP_R4 0x0400
59#define TIFM_MMCSD_RSP_R5 0x0500
60#define TIFM_MMCSD_RSP_R6 0x0600
61
62#define TIFM_MMCSD_RSP_BUSY 0x0800
63
64#define TIFM_MMCSD_CMD_BC 0x0000
65#define TIFM_MMCSD_CMD_BCR 0x1000
66#define TIFM_MMCSD_CMD_AC 0x2000
67#define TIFM_MMCSD_CMD_ADTC 0x3000
68
69typedef enum {
70 IDLE = 0,
71 CMD, /* main command ended */
72 BRS, /* block transfer finished */
73 SCMD, /* stop command ended */
74 CARD, /* card left busy state */
75 FIFO, /* FIFO operation completed (uncertain) */
76 READY
77} card_state_t;
78
79enum {
80 FIFO_RDY = 0x0001, /* hardware dependent value */
81 HOST_REG = 0x0002,
82 EJECT = 0x0004,
83 EJECT_DONE = 0x0008,
84 CARD_BUSY = 0x0010,
85 OPENDRAIN = 0x0040, /* hardware dependent value */
86 CARD_EVENT = 0x0100, /* hardware dependent value */
87 CARD_RO = 0x0200, /* hardware dependent value */
88 FIFO_EVENT = 0x10000 }; /* hardware dependent value */
89
90struct tifm_sd {
91 struct tifm_dev *dev;
92
93 unsigned int flags;
94 card_state_t state;
95 unsigned int clk_freq;
96 unsigned int clk_div;
97 unsigned long timeout_jiffies; // software timeout - 2 sec
98
99 struct mmc_request *req;
100 struct work_struct cmd_handler;
101 struct work_struct abort_handler;
102 wait_queue_head_t can_eject;
103
104 size_t written_blocks;
105 char *buffer;
106 size_t buffer_size;
107 size_t buffer_pos;
108
109};
110
111static int tifm_sd_transfer_data(struct tifm_dev *sock, struct tifm_sd *host,
112 unsigned int host_status)
113{
114 struct mmc_command *cmd = host->req->cmd;
115 unsigned int t_val = 0, cnt = 0;
116
117 if (host_status & TIFM_MMCSD_BRS) {
118 /* in non-dma rx mode BRS fires when fifo is still not empty */
119 if (host->buffer && (cmd->data->flags & MMC_DATA_READ)) {
120 while (host->buffer_size > host->buffer_pos) {
121 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
122 host->buffer[host->buffer_pos++] = t_val & 0xff;
123 host->buffer[host->buffer_pos++] =
124 (t_val >> 8) & 0xff;
125 }
126 }
127 return 1;
128 } else if (host->buffer) {
129 if ((cmd->data->flags & MMC_DATA_READ) &&
130 (host_status & TIFM_MMCSD_AF)) {
131 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
132 t_val = readl(sock->addr + SOCK_MMCSD_DATA);
133 if (host->buffer_size > host->buffer_pos) {
134 host->buffer[host->buffer_pos++] =
135 t_val & 0xff;
136 host->buffer[host->buffer_pos++] =
137 (t_val >> 8) & 0xff;
138 }
139 }
140 } else if ((cmd->data->flags & MMC_DATA_WRITE)
141 && (host_status & TIFM_MMCSD_AE)) {
142 for (cnt = 0; cnt < TIFM_MMCSD_FIFO_SIZE; cnt++) {
143 if (host->buffer_size > host->buffer_pos) {
144 t_val = host->buffer[host->buffer_pos++] & 0x00ff;
145 t_val |= ((host->buffer[host->buffer_pos++]) << 8)
146 & 0xff00;
147 writel(t_val,
148 sock->addr + SOCK_MMCSD_DATA);
149 }
150 }
151 }
152 }
153 return 0;
154}
155
156static unsigned int tifm_sd_op_flags(struct mmc_command *cmd)
157{
158 unsigned int rc = 0;
159
160 switch (mmc_resp_type(cmd)) {
161 case MMC_RSP_NONE:
162 rc |= TIFM_MMCSD_RSP_R0;
163 break;
164 case MMC_RSP_R1B:
165 rc |= TIFM_MMCSD_RSP_BUSY; // deliberate fall-through
166 case MMC_RSP_R1:
167 rc |= TIFM_MMCSD_RSP_R1;
168 break;
169 case MMC_RSP_R2:
170 rc |= TIFM_MMCSD_RSP_R2;
171 break;
172 case MMC_RSP_R3:
173 rc |= TIFM_MMCSD_RSP_R3;
174 break;
175 case MMC_RSP_R6:
176 rc |= TIFM_MMCSD_RSP_R6;
177 break;
178 default:
179 BUG();
180 }
181
182 switch (mmc_cmd_type(cmd)) {
183 case MMC_CMD_BC:
184 rc |= TIFM_MMCSD_CMD_BC;
185 break;
186 case MMC_CMD_BCR:
187 rc |= TIFM_MMCSD_CMD_BCR;
188 break;
189 case MMC_CMD_AC:
190 rc |= TIFM_MMCSD_CMD_AC;
191 break;
192 case MMC_CMD_ADTC:
193 rc |= TIFM_MMCSD_CMD_ADTC;
194 break;
195 default:
196 BUG();
197 }
198 return rc;
199}
200
201static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd)
202{
203 struct tifm_dev *sock = host->dev;
204 unsigned int cmd_mask = tifm_sd_op_flags(cmd) |
205 (host->flags & OPENDRAIN);
206
207 if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
208 cmd_mask |= TIFM_MMCSD_READ;
209
210 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n",
211 cmd->opcode, cmd->arg, cmd_mask);
212
213 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH);
214 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW);
215 writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND);
216}
217
218static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock)
219{
220 cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16)
221 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18);
222 cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16)
223 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10);
224 cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16)
225 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08);
226 cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16)
227 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00);
228}
229
230static void tifm_sd_process_cmd(struct tifm_dev *sock, struct tifm_sd *host,
231 unsigned int host_status)
232{
233 struct mmc_command *cmd = host->req->cmd;
234
235change_state:
236 switch (host->state) {
237 case IDLE:
238 return;
239 case CMD:
240 if (host_status & TIFM_MMCSD_EOC) {
241 tifm_sd_fetch_resp(cmd, sock);
242 if (cmd->data) {
243 host->state = BRS;
244 } else
245 host->state = READY;
246 goto change_state;
247 }
248 break;
249 case BRS:
250 if (tifm_sd_transfer_data(sock, host, host_status)) {
251 if (!host->req->stop) {
252 if (cmd->data->flags & MMC_DATA_WRITE) {
253 host->state = CARD;
254 } else {
255 host->state =
256 host->buffer ? READY : FIFO;
257 }
258 goto change_state;
259 }
260 tifm_sd_exec(host, host->req->stop);
261 host->state = SCMD;
262 }
263 break;
264 case SCMD:
265 if (host_status & TIFM_MMCSD_EOC) {
266 tifm_sd_fetch_resp(host->req->stop, sock);
267 if (cmd->error) {
268 host->state = READY;
269 } else if (cmd->data->flags & MMC_DATA_WRITE) {
270 host->state = CARD;
271 } else {
272 host->state = host->buffer ? READY : FIFO;
273 }
274 goto change_state;
275 }
276 break;
277 case CARD:
278 if (!(host->flags & CARD_BUSY)
279 && (host->written_blocks == cmd->data->blocks)) {
280 host->state = host->buffer ? READY : FIFO;
281 goto change_state;
282 }
283 break;
284 case FIFO:
285 if (host->flags & FIFO_RDY) {
286 host->state = READY;
287 host->flags &= ~FIFO_RDY;
288 goto change_state;
289 }
290 break;
291 case READY:
292 queue_work(sock->wq, &host->cmd_handler);
293 return;
294 }
295
296 queue_delayed_work(sock->wq, &host->abort_handler,
297 host->timeout_jiffies);
298}
299
300/* Called from interrupt handler */
301static unsigned int tifm_sd_signal_irq(struct tifm_dev *sock,
302 unsigned int sock_irq_status)
303{
304 struct tifm_sd *host;
305 unsigned int host_status = 0, fifo_status = 0;
306 int error_code = 0;
307
308 spin_lock(&sock->lock);
309 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock));
310 cancel_delayed_work(&host->abort_handler);
311
312 if (sock_irq_status & FIFO_EVENT) {
313 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS);
314 writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS);
315
316 host->flags |= fifo_status & FIFO_RDY;
317 }
318
319 if (sock_irq_status & CARD_EVENT) {
320 host_status = readl(sock->addr + SOCK_MMCSD_STATUS);
321 writel(host_status, sock->addr + SOCK_MMCSD_STATUS);
322
323 if (!(host->flags & HOST_REG))
324 queue_work(sock->wq, &host->cmd_handler);
325 if (!host->req)
326 goto done;
327
328 if (host_status & TIFM_MMCSD_ERRMASK) {
329 if (host_status & TIFM_MMCSD_CERR)
330 error_code = MMC_ERR_FAILED;
331 else if (host_status &
332 (TIFM_MMCSD_CTO | TIFM_MMCSD_DTO))
333 error_code = MMC_ERR_TIMEOUT;
334 else if (host_status &
335 (TIFM_MMCSD_CCRC | TIFM_MMCSD_DCRC))
336 error_code = MMC_ERR_BADCRC;
337
338 writel(TIFM_FIFO_INT_SETALL,
339 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
340 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL);
341
342 if (host->req->stop) {
343 if (host->state == SCMD) {
344 host->req->stop->error = error_code;
345 } else if(host->state == BRS) {
346 host->req->cmd->error = error_code;
347 tifm_sd_exec(host, host->req->stop);
348 queue_delayed_work(sock->wq,
349 &host->abort_handler,
350 host->timeout_jiffies);
351 host->state = SCMD;
352 goto done;
353 } else {
354 host->req->cmd->error = error_code;
355 }
356 } else {
357 host->req->cmd->error = error_code;
358 }
359 host->state = READY;
360 }
361
362 if (host_status & TIFM_MMCSD_CB)
363 host->flags |= CARD_BUSY;
364 if ((host_status & TIFM_MMCSD_EOFB) &&
365 (host->flags & CARD_BUSY)) {
366 host->written_blocks++;
367 host->flags &= ~CARD_BUSY;
368 }
369 }
370
371 if (host->req)
372 tifm_sd_process_cmd(sock, host, host_status);
373done:
374 dev_dbg(&sock->dev, "host_status %x, fifo_status %x\n",
375 host_status, fifo_status);
376 spin_unlock(&sock->lock);
377 return sock_irq_status;
378}
379
380static void tifm_sd_prepare_data(struct tifm_sd *card, struct mmc_command *cmd)
381{
382 struct tifm_dev *sock = card->dev;
383 unsigned int dest_cnt;
384
385 /* DMA style IO */
386
387 writel(TIFM_FIFO_INT_SETALL,
388 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
389 writel(long_log2(cmd->data->blksz) - 2,
390 sock->addr + SOCK_FIFO_PAGE_SIZE);
391 writel(TIFM_FIFO_ENABLE, sock->addr + SOCK_FIFO_CONTROL);
392 writel(TIFM_FIFO_INTMASK, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
393
394 dest_cnt = (cmd->data->blocks) << 8;
395
396 writel(sg_dma_address(cmd->data->sg), sock->addr + SOCK_DMA_ADDRESS);
397
398 writel(cmd->data->blocks - 1, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
399 writel(cmd->data->blksz - 1, sock->addr + SOCK_MMCSD_BLOCK_LEN);
400
401 if (cmd->data->flags & MMC_DATA_WRITE) {
402 writel(TIFM_MMCSD_TXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
403 writel(dest_cnt | TIFM_DMA_TX | TIFM_DMA_EN,
404 sock->addr + SOCK_DMA_CONTROL);
405 } else {
406 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
407 writel(dest_cnt | TIFM_DMA_EN, sock->addr + SOCK_DMA_CONTROL);
408 }
409}
410
411static void tifm_sd_set_data_timeout(struct tifm_sd *host,
412 struct mmc_data *data)
413{
414 struct tifm_dev *sock = host->dev;
415 unsigned int data_timeout = data->timeout_clks;
416
417 if (fixed_timeout)
418 return;
419
420 data_timeout += data->timeout_ns /
421 ((1000000000 / host->clk_freq) * host->clk_div);
422 data_timeout *= 10; // call it fudge factor for now
423
424 if (data_timeout < 0xffff) {
425 writel((~TIFM_MMCSD_DPE) &
426 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
427 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
428 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
429 } else {
430 writel(TIFM_MMCSD_DPE |
431 readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG),
432 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG);
433 data_timeout = (data_timeout >> 10) + 1;
434 if(data_timeout > 0xffff)
435 data_timeout = 0; /* set to unlimited */
436 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO);
437 }
438}
439
440static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)
441{
442 struct tifm_sd *host = mmc_priv(mmc);
443 struct tifm_dev *sock = host->dev;
444 unsigned long flags;
445 int sg_count = 0;
446 struct mmc_data *r_data = mrq->cmd->data;
447
448 spin_lock_irqsave(&sock->lock, flags);
449 if (host->flags & EJECT) {
450 spin_unlock_irqrestore(&sock->lock, flags);
451 goto err_out;
452 }
453
454 if (host->req) {
455 printk(KERN_ERR DRIVER_NAME ": unfinished request detected\n");
456 spin_unlock_irqrestore(&sock->lock, flags);
457 goto err_out;
458 }
459
460 if (r_data) {
461 tifm_sd_set_data_timeout(host, r_data);
462
463 sg_count = tifm_map_sg(sock, r_data->sg, r_data->sg_len,
464 mrq->cmd->flags & MMC_DATA_WRITE
465 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
466 if (sg_count != 1) {
467 printk(KERN_ERR DRIVER_NAME
468 ": scatterlist map failed\n");
469 spin_unlock_irqrestore(&sock->lock, flags);
470 goto err_out;
471 }
472
473 host->written_blocks = 0;
474 host->flags &= ~CARD_BUSY;
475 tifm_sd_prepare_data(host, mrq->cmd);
476 }
477
478 host->req = mrq;
479 host->state = CMD;
480 queue_delayed_work(sock->wq, &host->abort_handler,
481 host->timeout_jiffies);
482 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
483 sock->addr + SOCK_CONTROL);
484 tifm_sd_exec(host, mrq->cmd);
485 spin_unlock_irqrestore(&sock->lock, flags);
486 return;
487
488err_out:
489 if (sg_count > 0)
490 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
491 (r_data->flags & MMC_DATA_WRITE)
492 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
493
494 mrq->cmd->error = MMC_ERR_TIMEOUT;
495 mmc_request_done(mmc, mrq);
496}
497
498static void tifm_sd_end_cmd(void *data)
499{
500 struct tifm_sd *host = data;
501 struct tifm_dev *sock = host->dev;
502 struct mmc_host *mmc = tifm_get_drvdata(sock);
503 struct mmc_request *mrq;
504 struct mmc_data *r_data = 0;
505 unsigned long flags;
506
507 spin_lock_irqsave(&sock->lock, flags);
508
509 mrq = host->req;
510 host->req = 0;
511 host->state = IDLE;
512
513 if (!mrq) {
514 printk(KERN_ERR DRIVER_NAME ": no request to complete?\n");
515 spin_unlock_irqrestore(&sock->lock, flags);
516 return;
517 }
518
519 r_data = mrq->cmd->data;
520 if (r_data) {
521 if (r_data->flags & MMC_DATA_WRITE) {
522 r_data->bytes_xfered = host->written_blocks *
523 r_data->blksz;
524 } else {
525 r_data->bytes_xfered = r_data->blocks -
526 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
527 r_data->bytes_xfered *= r_data->blksz;
528 r_data->bytes_xfered += r_data->blksz -
529 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
530 }
531 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len,
532 (r_data->flags & MMC_DATA_WRITE)
533 ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
534 }
535
536 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
537 sock->addr + SOCK_CONTROL);
538
539 spin_unlock_irqrestore(&sock->lock, flags);
540 mmc_request_done(mmc, mrq);
541}
542
543static void tifm_sd_request_nodma(struct mmc_host *mmc, struct mmc_request *mrq)
544{
545 struct tifm_sd *host = mmc_priv(mmc);
546 struct tifm_dev *sock = host->dev;
547 unsigned long flags;
548 struct mmc_data *r_data = mrq->cmd->data;
549 char *t_buffer = 0;
550
551 if (r_data) {
552 t_buffer = kmap(r_data->sg->page);
553 if (!t_buffer) {
554 printk(KERN_ERR DRIVER_NAME ": kmap failed\n");
555 goto err_out;
556 }
557 }
558
559 spin_lock_irqsave(&sock->lock, flags);
560 if (host->flags & EJECT) {
561 spin_unlock_irqrestore(&sock->lock, flags);
562 goto err_out;
563 }
564
565 if (host->req) {
566 printk(KERN_ERR DRIVER_NAME ": unfinished request detected\n");
567 spin_unlock_irqrestore(&sock->lock, flags);
568 goto err_out;
569 }
570
571 if (r_data) {
572 tifm_sd_set_data_timeout(host, r_data);
573
574 host->buffer = t_buffer + r_data->sg->offset;
575 host->buffer_size = mrq->cmd->data->blocks *
576 mrq->cmd->data->blksz;
577
578 writel(TIFM_MMCSD_BUFINT |
579 readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
580 sock->addr + SOCK_MMCSD_INT_ENABLE);
581 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) |
582 (TIFM_MMCSD_FIFO_SIZE - 1),
583 sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
584
585 host->written_blocks = 0;
586 host->flags &= ~CARD_BUSY;
587 host->buffer_pos = 0;
588 writel(r_data->blocks - 1, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
589 writel(r_data->blksz - 1, sock->addr + SOCK_MMCSD_BLOCK_LEN);
590 }
591
592 host->req = mrq;
593 host->state = CMD;
594 queue_delayed_work(sock->wq, &host->abort_handler,
595 host->timeout_jiffies);
596 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL),
597 sock->addr + SOCK_CONTROL);
598 tifm_sd_exec(host, mrq->cmd);
599 spin_unlock_irqrestore(&sock->lock, flags);
600 return;
601
602err_out:
603 if (t_buffer)
604 kunmap(r_data->sg->page);
605
606 mrq->cmd->error = MMC_ERR_TIMEOUT;
607 mmc_request_done(mmc, mrq);
608}
609
610static void tifm_sd_end_cmd_nodma(void *data)
611{
612 struct tifm_sd *host = (struct tifm_sd*)data;
613 struct tifm_dev *sock = host->dev;
614 struct mmc_host *mmc = tifm_get_drvdata(sock);
615 struct mmc_request *mrq;
616 struct mmc_data *r_data = 0;
617 unsigned long flags;
618
619 spin_lock_irqsave(&sock->lock, flags);
620
621 mrq = host->req;
622 host->req = 0;
623 host->state = IDLE;
624
625 if (!mrq) {
626 printk(KERN_ERR DRIVER_NAME ": no request to complete?\n");
627 spin_unlock_irqrestore(&sock->lock, flags);
628 return;
629 }
630
631 r_data = mrq->cmd->data;
632 if (r_data) {
633 writel((~TIFM_MMCSD_BUFINT) &
634 readl(sock->addr + SOCK_MMCSD_INT_ENABLE),
635 sock->addr + SOCK_MMCSD_INT_ENABLE);
636
637 if (r_data->flags & MMC_DATA_WRITE) {
638 r_data->bytes_xfered = host->written_blocks *
639 r_data->blksz;
640 } else {
641 r_data->bytes_xfered = r_data->blocks -
642 readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1;
643 r_data->bytes_xfered *= r_data->blksz;
644 r_data->bytes_xfered += r_data->blksz -
645 readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1;
646 }
647 host->buffer = 0;
648 host->buffer_pos = 0;
649 host->buffer_size = 0;
650 }
651
652 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL),
653 sock->addr + SOCK_CONTROL);
654
655 spin_unlock_irqrestore(&sock->lock, flags);
656
657 if (r_data)
658 kunmap(r_data->sg->page);
659
660 mmc_request_done(mmc, mrq);
661}
662
663static void tifm_sd_abort(void *data)
664{
665 printk(KERN_ERR DRIVER_NAME
666 ": card failed to respond for a long period of time");
667 tifm_eject(((struct tifm_sd*)data)->dev);
668}
669
670static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios)
671{
672 struct tifm_sd *host = mmc_priv(mmc);
673 struct tifm_dev *sock = host->dev;
674 unsigned int clk_div1, clk_div2;
675 unsigned long flags;
676
677 spin_lock_irqsave(&sock->lock, flags);
678
679 dev_dbg(&sock->dev, "Setting bus width %d, power %d\n", ios->bus_width,
680 ios->power_mode);
681 if (ios->bus_width == MMC_BUS_WIDTH_4) {
682 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG),
683 sock->addr + SOCK_MMCSD_CONFIG);
684 } else {
685 writel((~TIFM_MMCSD_4BBUS) &
686 readl(sock->addr + SOCK_MMCSD_CONFIG),
687 sock->addr + SOCK_MMCSD_CONFIG);
688 }
689
690 if (ios->clock) {
691 clk_div1 = 20000000 / ios->clock;
692 if (!clk_div1)
693 clk_div1 = 1;
694
695 clk_div2 = 24000000 / ios->clock;
696 if (!clk_div2)
697 clk_div2 = 1;
698
699 if ((20000000 / clk_div1) > ios->clock)
700 clk_div1++;
701 if ((24000000 / clk_div2) > ios->clock)
702 clk_div2++;
703 if ((20000000 / clk_div1) > (24000000 / clk_div2)) {
704 host->clk_freq = 20000000;
705 host->clk_div = clk_div1;
706 writel((~TIFM_CTRL_FAST_CLK) &
707 readl(sock->addr + SOCK_CONTROL),
708 sock->addr + SOCK_CONTROL);
709 } else {
710 host->clk_freq = 24000000;
711 host->clk_div = clk_div2;
712 writel(TIFM_CTRL_FAST_CLK |
713 readl(sock->addr + SOCK_CONTROL),
714 sock->addr + SOCK_CONTROL);
715 }
716 } else {
717 host->clk_div = 0;
718 }
719 host->clk_div &= TIFM_MMCSD_CLKMASK;
720 writel(host->clk_div | ((~TIFM_MMCSD_CLKMASK) &
721 readl(sock->addr + SOCK_MMCSD_CONFIG)),
722 sock->addr + SOCK_MMCSD_CONFIG);
723
724 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
725 host->flags |= OPENDRAIN;
726 else
727 host->flags &= ~OPENDRAIN;
728
729 /* chip_select : maybe later */
730 //vdd
731 //power is set before probe / after remove
732 //I believe, power_off when already marked for eject is sufficient to
733 // allow removal.
734 if ((host->flags & EJECT) && ios->power_mode == MMC_POWER_OFF) {
735 host->flags |= EJECT_DONE;
736 wake_up_all(&host->can_eject);
737 }
738
739 spin_unlock_irqrestore(&sock->lock, flags);
740}
741
742static int tifm_sd_ro(struct mmc_host *mmc)
743{
744 int rc;
745 struct tifm_sd *host = mmc_priv(mmc);
746 struct tifm_dev *sock = host->dev;
747 unsigned long flags;
748
749 spin_lock_irqsave(&sock->lock, flags);
750
751 host->flags |= (CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE));
752 rc = (host->flags & CARD_RO) ? 1 : 0;
753
754 spin_unlock_irqrestore(&sock->lock, flags);
755 return rc;
756}
757
758static struct mmc_host_ops tifm_sd_ops = {
759 .request = tifm_sd_request,
760 .set_ios = tifm_sd_ios,
761 .get_ro = tifm_sd_ro
762};
763
764static void tifm_sd_register_host(void *data)
765{
766 struct tifm_sd *host = (struct tifm_sd*)data;
767 struct tifm_dev *sock = host->dev;
768 struct mmc_host *mmc = tifm_get_drvdata(sock);
769 unsigned long flags;
770
771 spin_lock_irqsave(&sock->lock, flags);
772 host->flags |= HOST_REG;
773 PREPARE_WORK(&host->cmd_handler,
774 no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd,
775 data);
776 spin_unlock_irqrestore(&sock->lock, flags);
777 dev_dbg(&sock->dev, "adding host\n");
778 mmc_add_host(mmc);
779}
780
781static int tifm_sd_probe(struct tifm_dev *sock)
782{
783 struct mmc_host *mmc;
784 struct tifm_sd *host;
785 int rc = -EIO;
786
787 if (!(TIFM_SOCK_STATE_OCCUPIED &
788 readl(sock->addr + SOCK_PRESENT_STATE))) {
789 printk(KERN_WARNING DRIVER_NAME ": card gone, unexpectedly\n");
790 return rc;
791 }
792
793 mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev);
794 if (!mmc)
795 return -ENOMEM;
796
797 host = mmc_priv(mmc);
798 host->dev = sock;
799 host->clk_div = 61;
800 init_waitqueue_head(&host->can_eject);
801 INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host);
802 INIT_WORK(&host->abort_handler, tifm_sd_abort, host);
803
804 tifm_set_drvdata(sock, mmc);
805 sock->signal_irq = tifm_sd_signal_irq;
806
807 host->clk_freq = 20000000;
808 host->timeout_jiffies = msecs_to_jiffies(1000);
809
810 tifm_sd_ops.request = no_dma ? tifm_sd_request_nodma : tifm_sd_request;
811 mmc->ops = &tifm_sd_ops;
812 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
813 mmc->caps = MMC_CAP_4_BIT_DATA;
814 mmc->f_min = 20000000 / 60;
815 mmc->f_max = 24000000;
816 mmc->max_hw_segs = 1;
817 mmc->max_phys_segs = 1;
818 mmc->max_sectors = 127;
819 mmc->max_seg_size = mmc->max_sectors << 11; //2k maximum hw block length
820
821 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
822 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL);
823 writel(host->clk_div | TIFM_MMCSD_POWER,
824 sock->addr + SOCK_MMCSD_CONFIG);
825
826 for (rc = 0; rc < 50; rc++) {
827 /* Wait for reset ack */
828 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) {
829 rc = 0;
830 break;
831 }
832 msleep(10);
833 }
834
835 if (rc) {
836 printk(KERN_ERR DRIVER_NAME
837 ": card not ready - probe failed\n");
838 mmc_free_host(mmc);
839 return -ENODEV;
840 }
841
842 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS);
843 writel(host->clk_div | TIFM_MMCSD_POWER,
844 sock->addr + SOCK_MMCSD_CONFIG);
845 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG);
846 writel(TIFM_MMCSD_DATAMASK | TIFM_MMCSD_ERRMASK,
847 sock->addr + SOCK_MMCSD_INT_ENABLE);
848
849 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); // command timeout 64 clocks for now
850 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND);
851 writel(host->clk_div | TIFM_MMCSD_POWER,
852 sock->addr + SOCK_MMCSD_CONFIG);
853
854 queue_delayed_work(sock->wq, &host->abort_handler,
855 host->timeout_jiffies);
856
857 return 0;
858}
859
860static int tifm_sd_host_is_down(struct tifm_dev *sock)
861{
862 struct mmc_host *mmc = tifm_get_drvdata(sock);
863 struct tifm_sd *host = mmc_priv(mmc);
864 unsigned long flags;
865 int rc = 0;
866
867 spin_lock_irqsave(&sock->lock, flags);
868 rc = (host->flags & EJECT_DONE);
869 spin_unlock_irqrestore(&sock->lock, flags);
870 return rc;
871}
872
873static void tifm_sd_remove(struct tifm_dev *sock)
874{
875 struct mmc_host *mmc = tifm_get_drvdata(sock);
876 struct tifm_sd *host = mmc_priv(mmc);
877 unsigned long flags;
878
879 spin_lock_irqsave(&sock->lock, flags);
880 host->flags |= EJECT;
881 if (host->req)
882 queue_work(sock->wq, &host->cmd_handler);
883 spin_unlock_irqrestore(&sock->lock, flags);
884 wait_event_timeout(host->can_eject, tifm_sd_host_is_down(sock),
885 host->timeout_jiffies);
886
887 if (host->flags & HOST_REG)
888 mmc_remove_host(mmc);
889
890 /* The meaning of the bit majority in this constant is unknown. */
891 writel(0xfff8 & readl(sock->addr + SOCK_CONTROL),
892 sock->addr + SOCK_CONTROL);
893 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE);
894 writel(TIFM_FIFO_INT_SETALL,
895 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR);
896 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET);
897
898 tifm_set_drvdata(sock, 0);
899 mmc_free_host(mmc);
900}
901
902static tifm_media_id tifm_sd_id_tbl[] = {
903 FM_SD, 0
904};
905
906static struct tifm_driver tifm_sd_driver = {
907 .driver = {
908 .name = DRIVER_NAME,
909 .owner = THIS_MODULE
910 },
911 .id_table = tifm_sd_id_tbl,
912 .probe = tifm_sd_probe,
913 .remove = tifm_sd_remove
914};
915
916static int __init tifm_sd_init(void)
917{
918 return tifm_register_driver(&tifm_sd_driver);
919}
920
921static void __exit tifm_sd_exit(void)
922{
923 tifm_unregister_driver(&tifm_sd_driver);
924}
925
926MODULE_AUTHOR("Alex Dubov");
927MODULE_DESCRIPTION("TI FlashMedia SD driver");
928MODULE_LICENSE("GPL");
929MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl);
930MODULE_VERSION(DRIVER_VERSION);
931
932module_init(tifm_sd_init);
933module_exit(tifm_sd_exit);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1fbda77cefc2..c2949b4367e5 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -146,7 +146,7 @@
146#include <asm/superio.h> 146#include <asm/superio.h>
147#endif 147#endif
148 148
149#include <asm/iosapic.h> 149#include <asm/ropes.h>
150#include "./iosapic_private.h" 150#include "./iosapic_private.h"
151 151
152#define MODULE_NAME "iosapic" 152#define MODULE_NAME "iosapic"
@@ -692,6 +692,7 @@ static void iosapic_end_irq(unsigned int irq)
692 DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq, 692 DBG(KERN_DEBUG "end_irq(%d): eoi(%p, 0x%x)\n", irq,
693 vi->eoi_addr, vi->eoi_data); 693 vi->eoi_addr, vi->eoi_data);
694 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 694 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
695 cpu_end_irq(irq);
695} 696}
696 697
697static unsigned int iosapic_startup_irq(unsigned int irq) 698static unsigned int iosapic_startup_irq(unsigned int irq)
@@ -728,7 +729,7 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
728 .shutdown = iosapic_disable_irq, 729 .shutdown = iosapic_disable_irq,
729 .enable = iosapic_enable_irq, 730 .enable = iosapic_enable_irq,
730 .disable = iosapic_disable_irq, 731 .disable = iosapic_disable_irq,
731 .ack = no_ack_irq, 732 .ack = cpu_ack_irq,
732 .end = iosapic_end_irq, 733 .end = iosapic_end_irq,
733#ifdef CONFIG_SMP 734#ifdef CONFIG_SMP
734 .set_affinity = iosapic_set_affinity_irq, 735 .set_affinity = iosapic_set_affinity_irq,
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 3fe4a77fa16a..ba6769934c77 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -46,9 +46,9 @@
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/system.h> 47#include <asm/system.h>
48 48
49#include <asm/ropes.h>
49#include <asm/hardware.h> /* for register_parisc_driver() stuff */ 50#include <asm/hardware.h> /* for register_parisc_driver() stuff */
50#include <asm/parisc-device.h> 51#include <asm/parisc-device.h>
51#include <asm/iosapic.h> /* for iosapic_register() */
52#include <asm/io.h> /* read/write stuff */ 52#include <asm/io.h> /* read/write stuff */
53 53
54#undef DEBUG_LBA /* general stuff */ 54#undef DEBUG_LBA /* general stuff */
@@ -100,113 +100,10 @@
100 100
101#define MODULE_NAME "LBA" 101#define MODULE_NAME "LBA"
102 102
103#define LBA_FUNC_ID 0x0000 /* function id */
104#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
105#define LBA_CAPABLE 0x0030 /* capabilities register */
106
107#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
108#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
109
110#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
111#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
112#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
113
114#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
115#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
116#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
117#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
118
119#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
120
121#define LBA_STAT_CTL 0x0108 /* Status & Control */
122#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
123#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
124#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
125#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
126
127#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
128#define LBA_LMMIO_MASK 0x0208
129
130#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
131#define LBA_GMMIO_MASK 0x0218
132
133#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
134#define LBA_WLMMIO_MASK 0x0228
135
136#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
137#define LBA_WGMMIO_MASK 0x0238
138
139#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
140#define LBA_IOS_MASK 0x0248
141
142#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
143#define LBA_ELMMIO_MASK 0x0258
144
145#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
146#define LBA_EIOS_MASK 0x0268
147
148#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
149#define LBA_DMA_CTL 0x0278 /* firmware sets this */
150
151#define LBA_IBASE 0x0300 /* SBA DMA support */
152#define LBA_IMASK 0x0308
153
154/* FIXME: ignore DMA Hint stuff until we can measure performance */
155#define LBA_HINT_CFG 0x0310
156#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
157
158#define LBA_BUS_MODE 0x0620
159
160/* ERROR regs are needed for config cycle kluges */
161#define LBA_ERROR_CONFIG 0x0680
162#define LBA_SMART_MODE 0x20
163#define LBA_ERROR_STATUS 0x0688
164#define LBA_ROPE_CTL 0x06A0
165
166#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
167
168/* non-postable I/O port space, densely packed */ 103/* non-postable I/O port space, densely packed */
169#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL) 104#define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
170static void __iomem *astro_iop_base __read_mostly; 105static void __iomem *astro_iop_base __read_mostly;
171 106
172#define ELROY_HVERS 0x782
173#define MERCURY_HVERS 0x783
174#define QUICKSILVER_HVERS 0x784
175
176static inline int IS_ELROY(struct parisc_device *d)
177{
178 return (d->id.hversion == ELROY_HVERS);
179}
180
181static inline int IS_MERCURY(struct parisc_device *d)
182{
183 return (d->id.hversion == MERCURY_HVERS);
184}
185
186static inline int IS_QUICKSILVER(struct parisc_device *d)
187{
188 return (d->id.hversion == QUICKSILVER_HVERS);
189}
190
191
192/*
193** lba_device: Per instance Elroy data structure
194*/
195struct lba_device {
196 struct pci_hba_data hba;
197
198 spinlock_t lba_lock;
199 void *iosapic_obj;
200
201#ifdef CONFIG_64BIT
202 void __iomem * iop_base; /* PA_VIEW - for IO port accessor funcs */
203#endif
204
205 int flags; /* state/functionality enabled */
206 int hw_rev; /* HW revision of chip */
207};
208
209
210static u32 lba_t32; 107static u32 lba_t32;
211 108
212/* lba flags */ 109/* lba flags */
@@ -1542,8 +1439,8 @@ lba_driver_probe(struct parisc_device *dev)
1542 default: version = "TR4+"; 1439 default: version = "TR4+";
1543 } 1440 }
1544 1441
1545 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", 1442 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1546 MODULE_NAME, version, func_class & 0xf, dev->hpa.start); 1443 version, func_class & 0xf, dev->hpa.start);
1547 1444
1548 if (func_class < 2) { 1445 if (func_class < 2) {
1549 printk(KERN_WARNING "Can't support LBA older than " 1446 printk(KERN_WARNING "Can't support LBA older than "
@@ -1563,14 +1460,18 @@ lba_driver_probe(struct parisc_device *dev)
1563 } 1460 }
1564 1461
1565 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) { 1462 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1463 int major, minor;
1464
1566 func_class &= 0xff; 1465 func_class &= 0xff;
1567 version = kmalloc(6, GFP_KERNEL); 1466 major = func_class >> 4, minor = func_class & 0xf;
1568 snprintf(version, 6, "TR%d.%d",(func_class >> 4),(func_class & 0xf)); 1467
1569 /* We could use one printk for both Elroy and Mercury, 1468 /* We could use one printk for both Elroy and Mercury,
1570 * but for the mask for func_class. 1469 * but for the mask for func_class.
1571 */ 1470 */
1572 printk(KERN_INFO "%s version %s (0x%x) found at 0x%lx\n", 1471 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1573 MODULE_NAME, version, func_class & 0xff, dev->hpa.start); 1472 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1473 minor, func_class, dev->hpa.start);
1474
1574 cfg_ops = &mercury_cfg_ops; 1475 cfg_ops = &mercury_cfg_ops;
1575 } else { 1476 } else {
1576 printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start); 1477 printk(KERN_ERR "Unknown LBA found at 0x%lx\n", dev->hpa.start);
@@ -1600,6 +1501,7 @@ lba_driver_probe(struct parisc_device *dev)
1600 lba_dev->hba.dev = dev; 1501 lba_dev->hba.dev = dev;
1601 lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */ 1502 lba_dev->iosapic_obj = tmp_obj; /* save interrupt handle */
1602 lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */ 1503 lba_dev->hba.iommu = sba_get_iommu(dev); /* get iommu data */
1504 parisc_set_drvdata(dev, lba_dev);
1603 1505
1604 /* ------------ Second : initialize common stuff ---------- */ 1506 /* ------------ Second : initialize common stuff ---------- */
1605 pci_bios = &lba_bios_ops; 1507 pci_bios = &lba_bios_ops;
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8b4732815511..294c1117098d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -38,22 +38,15 @@
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40 40
41#include <asm/ropes.h>
42#include <asm/mckinley.h> /* for proc_mckinley_root */
41#include <asm/runway.h> /* for proc_runway_root */ 43#include <asm/runway.h> /* for proc_runway_root */
42#include <asm/pdc.h> /* for PDC_MODEL_* */ 44#include <asm/pdc.h> /* for PDC_MODEL_* */
43#include <asm/pdcpat.h> /* for is_pdc_pat() */ 45#include <asm/pdcpat.h> /* for is_pdc_pat() */
44#include <asm/parisc-device.h> 46#include <asm/parisc-device.h>
45 47
46
47/* declared in arch/parisc/kernel/setup.c */
48extern struct proc_dir_entry * proc_mckinley_root;
49
50#define MODULE_NAME "SBA" 48#define MODULE_NAME "SBA"
51 49
52#ifdef CONFIG_PROC_FS
53/* depends on proc fs support. But costs CPU performance */
54#undef SBA_COLLECT_STATS
55#endif
56
57/* 50/*
58** The number of debug flags is a clue - this code is fragile. 51** The number of debug flags is a clue - this code is fragile.
59** Don't even think about messing with it unless you have 52** Don't even think about messing with it unless you have
@@ -92,202 +85,12 @@ extern struct proc_dir_entry * proc_mckinley_root;
92#define DBG_RES(x...) 85#define DBG_RES(x...)
93#endif 86#endif
94 87
95#if defined(CONFIG_64BIT)
96/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
97#define ZX1_SUPPORT
98#endif
99
100#define SBA_INLINE __inline__ 88#define SBA_INLINE __inline__
101 89
102
103/*
104** The number of pdir entries to "free" before issueing
105** a read to PCOM register to flush out PCOM writes.
106** Interacts with allocation granularity (ie 4 or 8 entries
107** allocated and free'd/purged at a time might make this
108** less interesting).
109*/
110#define DELAYED_RESOURCE_CNT 16
111
112#define DEFAULT_DMA_HINT_REG 0 90#define DEFAULT_DMA_HINT_REG 0
113 91
114#define ASTRO_RUNWAY_PORT 0x582 92struct sba_device *sba_list;
115#define IKE_MERCED_PORT 0x803 93EXPORT_SYMBOL_GPL(sba_list);
116#define REO_MERCED_PORT 0x804
117#define REOG_MERCED_PORT 0x805
118#define PLUTO_MCKINLEY_PORT 0x880
119
120#define SBA_FUNC_ID 0x0000 /* function id */
121#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
122
123#define IS_ASTRO(id) ((id)->hversion == ASTRO_RUNWAY_PORT)
124#define IS_IKE(id) ((id)->hversion == IKE_MERCED_PORT)
125#define IS_PLUTO(id) ((id)->hversion == PLUTO_MCKINLEY_PORT)
126
127#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
128
129#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
130#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
131/* Ike's IOC's occupy functions 2 and 3 */
132#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
133
134#define IOC_CTRL 0x8 /* IOC_CTRL offset */
135#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
136#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
137#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
138#define IOC_CTRL_RM (1 << 8) /* Real Mode */
139#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
140#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
141#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
142
143#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
144
145#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
146
147
148/*
149** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
150** Firmware programs this stuff. Don't touch it.
151*/
152#define LMMIO_DIRECT0_BASE 0x300
153#define LMMIO_DIRECT0_MASK 0x308
154#define LMMIO_DIRECT0_ROUTE 0x310
155
156#define LMMIO_DIST_BASE 0x360
157#define LMMIO_DIST_MASK 0x368
158#define LMMIO_DIST_ROUTE 0x370
159
160#define IOS_DIST_BASE 0x390
161#define IOS_DIST_MASK 0x398
162#define IOS_DIST_ROUTE 0x3A0
163
164#define IOS_DIRECT_BASE 0x3C0
165#define IOS_DIRECT_MASK 0x3C8
166#define IOS_DIRECT_ROUTE 0x3D0
167
168/*
169** Offsets into I/O TLB (Function 2 and 3 on Ike)
170*/
171#define ROPE0_CTL 0x200 /* "regbus pci0" */
172#define ROPE1_CTL 0x208
173#define ROPE2_CTL 0x210
174#define ROPE3_CTL 0x218
175#define ROPE4_CTL 0x220
176#define ROPE5_CTL 0x228
177#define ROPE6_CTL 0x230
178#define ROPE7_CTL 0x238
179
180#define IOC_ROPE0_CFG 0x500 /* pluto only */
181#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
182
183
184
185#define HF_ENABLE 0x40
186
187
188#define IOC_IBASE 0x300 /* IO TLB */
189#define IOC_IMASK 0x308
190#define IOC_PCOM 0x310
191#define IOC_TCNFG 0x318
192#define IOC_PDIR_BASE 0x320
193
194/* AGP GART driver looks for this */
195#define SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
196
197
198/*
199** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
200** It's safer (avoid memory corruption) to keep DMA page mappings
201** equivalently sized to VM PAGE_SIZE.
202**
203** We really can't avoid generating a new mapping for each
204** page since the Virtual Coherence Index has to be generated
205** and updated for each page.
206**
207** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
208*/
209#define IOVP_SIZE PAGE_SIZE
210#define IOVP_SHIFT PAGE_SHIFT
211#define IOVP_MASK PAGE_MASK
212
213#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
214#define SBA_PERF_MASK1 0x718
215#define SBA_PERF_MASK2 0x730
216
217
218/*
219** Offsets into PCI Performance Counters (functions 12 and 13)
220** Controlled by PERF registers in function 2 & 3 respectively.
221*/
222#define SBA_PERF_CNT1 0x200
223#define SBA_PERF_CNT2 0x208
224#define SBA_PERF_CNT3 0x210
225
226
227struct ioc {
228 void __iomem *ioc_hpa; /* I/O MMU base address */
229 char *res_map; /* resource map, bit == pdir entry */
230 u64 *pdir_base; /* physical base address */
231 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
232 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
233#ifdef ZX1_SUPPORT
234 unsigned long iovp_mask; /* help convert IOVA to IOVP */
235#endif
236 unsigned long *res_hint; /* next avail IOVP - circular search */
237 spinlock_t res_lock;
238 unsigned int res_bitshift; /* from the LEFT! */
239 unsigned int res_size; /* size of resource map in bytes */
240#ifdef SBA_HINT_SUPPORT
241/* FIXME : DMA HINTs not used */
242 unsigned long hint_mask_pdir; /* bits used for DMA hints */
243 unsigned int hint_shift_pdir;
244#endif
245#if DELAYED_RESOURCE_CNT > 0
246 int saved_cnt;
247 struct sba_dma_pair {
248 dma_addr_t iova;
249 size_t size;
250 } saved[DELAYED_RESOURCE_CNT];
251#endif
252
253#ifdef SBA_COLLECT_STATS
254#define SBA_SEARCH_SAMPLE 0x100
255 unsigned long avg_search[SBA_SEARCH_SAMPLE];
256 unsigned long avg_idx; /* current index into avg_search */
257 unsigned long used_pages;
258 unsigned long msingle_calls;
259 unsigned long msingle_pages;
260 unsigned long msg_calls;
261 unsigned long msg_pages;
262 unsigned long usingle_calls;
263 unsigned long usingle_pages;
264 unsigned long usg_calls;
265 unsigned long usg_pages;
266#endif
267
268 /* STUFF We don't need in performance path */
269 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
270};
271
272struct sba_device {
273 struct sba_device *next; /* list of SBA's in system */
274 struct parisc_device *dev; /* dev found in bus walk */
275 struct parisc_device_id *iodc; /* data about dev from firmware */
276 const char *name;
277 void __iomem *sba_hpa; /* base address */
278 spinlock_t sba_lock;
279 unsigned int flags; /* state/functionality enabled */
280 unsigned int hw_rev; /* HW revision of chip */
281
282 struct resource chip_resv; /* MMIO reserved for chip */
283 struct resource iommu_resv; /* MMIO reserved for iommu */
284
285 unsigned int num_ioc; /* number of on-board IOC's */
286 struct ioc ioc[MAX_IOC];
287};
288
289
290static struct sba_device *sba_list;
291 94
292static unsigned long ioc_needs_fdc = 0; 95static unsigned long ioc_needs_fdc = 0;
293 96
@@ -300,8 +103,14 @@ static unsigned long piranha_bad_128k = 0;
300/* Looks nice and keeps the compiler happy */ 103/* Looks nice and keeps the compiler happy */
301#define SBA_DEV(d) ((struct sba_device *) (d)) 104#define SBA_DEV(d) ((struct sba_device *) (d))
302 105
106#ifdef CONFIG_AGP_PARISC
107#define SBA_AGP_SUPPORT
108#endif /*CONFIG_AGP_PARISC*/
109
303#ifdef SBA_AGP_SUPPORT 110#ifdef SBA_AGP_SUPPORT
304static int reserve_sba_gart = 1; 111static int sba_reserve_agpgart = 1;
112module_param(sba_reserve_agpgart, int, 1);
113MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
305#endif 114#endif
306 115
307#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) 116#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
@@ -741,7 +550,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
741 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); 550 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
742 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */ 551 pa |= (ci >> 12) & 0xff; /* move CI (8 bits) into lowest byte */
743 552
744 pa |= 0x8000000000000000ULL; /* set "valid" bit */ 553 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
745 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 554 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
746 555
747 /* 556 /*
@@ -1498,6 +1307,10 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1498 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); 1307 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1499 1308
1500#ifdef SBA_AGP_SUPPORT 1309#ifdef SBA_AGP_SUPPORT
1310{
1311 struct klist_iter i;
1312 struct device *dev = NULL;
1313
1501 /* 1314 /*
1502 ** If an AGP device is present, only use half of the IOV space 1315 ** If an AGP device is present, only use half of the IOV space
1503 ** for PCI DMA. Unfortunately we can't know ahead of time 1316 ** for PCI DMA. Unfortunately we can't know ahead of time
@@ -1506,20 +1319,22 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1506 ** We program the next pdir index after we stop w/ a key for 1319 ** We program the next pdir index after we stop w/ a key for
1507 ** the GART code to handshake on. 1320 ** the GART code to handshake on.
1508 */ 1321 */
1509 device=NULL; 1322 klist_iter_init(&sba->dev.klist_children, &i);
1510 for (lba = sba->child; lba; lba = lba->sibling) { 1323 while (dev = next_device(&i)) {
1324 struct parisc_device *lba = to_parisc_device(dev);
1511 if (IS_QUICKSILVER(lba)) 1325 if (IS_QUICKSILVER(lba))
1512 break; 1326 agp_found = 1;
1513 } 1327 }
1328 klist_iter_exit(&sba->dev.klist_children, &i);
1514 1329
1515 if (lba) { 1330 if (agp_found && sba_reserve_agpgart) {
1516 DBG_INIT("%s: Reserving half of IOVA space for AGP GART support\n", __FUNCTION__); 1331 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1332 __FUNCTION__, (iova_space_size/2) >> 20);
1517 ioc->pdir_size /= 2; 1333 ioc->pdir_size /= 2;
1518 ((u64 *)ioc->pdir_base)[PDIR_INDEX(iova_space_size/2)] = SBA_IOMMU_COOKIE; 1334 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1519 } else {
1520 DBG_INIT("%s: No GART needed - no AGP controller found\n", __FUNCTION__);
1521 } 1335 }
1522#endif /* 0 */ 1336}
1337#endif /*SBA_AGP_SUPPORT*/
1523 1338
1524} 1339}
1525 1340
@@ -1701,7 +1516,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1701 } 1516 }
1702#endif 1517#endif
1703 1518
1704 if (!IS_PLUTO(sba_dev->iodc)) { 1519 if (!IS_PLUTO(sba_dev->dev)) {
1705 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL); 1520 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1706 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->", 1521 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1707 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl); 1522 __FUNCTION__, sba_dev->sba_hpa, ioc_ctl);
@@ -1718,9 +1533,8 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1718#endif 1533#endif
1719 } /* if !PLUTO */ 1534 } /* if !PLUTO */
1720 1535
1721 if (IS_ASTRO(sba_dev->iodc)) { 1536 if (IS_ASTRO(sba_dev->dev)) {
1722 int err; 1537 int err;
1723 /* PAT_PDC (L-class) also reports the same goofy base */
1724 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); 1538 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1725 num_ioc = 1; 1539 num_ioc = 1;
1726 1540
@@ -1730,13 +1544,9 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1730 err = request_resource(&iomem_resource, &(sba_dev->chip_resv)); 1544 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1731 BUG_ON(err < 0); 1545 BUG_ON(err < 0);
1732 1546
1733 } else if (IS_PLUTO(sba_dev->iodc)) { 1547 } else if (IS_PLUTO(sba_dev->dev)) {
1734 int err; 1548 int err;
1735 1549
1736 /* We use a negative value for IOC HPA so it gets
1737 * corrected when we add it with IKE's IOC offset.
1738 * Doesnt look clean, but fewer code.
1739 */
1740 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); 1550 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1741 num_ioc = 1; 1551 num_ioc = 1;
1742 1552
@@ -1752,14 +1562,14 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1752 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv)); 1562 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1753 WARN_ON(err < 0); 1563 WARN_ON(err < 0);
1754 } else { 1564 } else {
1755 /* IS_IKE (ie N-class, L3000, L1500) */ 1565 /* IKE, REO */
1756 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); 1566 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1757 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); 1567 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1758 num_ioc = 2; 1568 num_ioc = 2;
1759 1569
1760 /* TODO - LOOKUP Ike/Stretch chipset mem map */ 1570 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1761 } 1571 }
1762 /* XXX: What about Reo? */ 1572 /* XXX: What about Reo Grande? */
1763 1573
1764 sba_dev->num_ioc = num_ioc; 1574 sba_dev->num_ioc = num_ioc;
1765 for (i = 0; i < num_ioc; i++) { 1575 for (i = 0; i < num_ioc; i++) {
@@ -1774,7 +1584,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1774 * Overrides bit 1 in DMA Hint Sets. 1584 * Overrides bit 1 in DMA Hint Sets.
1775 * Improves netperf UDP_STREAM by ~10% for bcm5701. 1585 * Improves netperf UDP_STREAM by ~10% for bcm5701.
1776 */ 1586 */
1777 if (IS_PLUTO(sba_dev->iodc)) { 1587 if (IS_PLUTO(sba_dev->dev)) {
1778 void __iomem *rope_cfg; 1588 void __iomem *rope_cfg;
1779 unsigned long cfg_val; 1589 unsigned long cfg_val;
1780 1590
@@ -1803,7 +1613,7 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1803 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) 1613 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1804 ); 1614 );
1805 1615
1806 if (IS_PLUTO(sba_dev->iodc)) { 1616 if (IS_PLUTO(sba_dev->dev)) {
1807 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); 1617 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1808 } else { 1618 } else {
1809 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); 1619 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
@@ -2067,7 +1877,7 @@ sba_driver_callback(struct parisc_device *dev)
2067 /* Read HW Rev First */ 1877 /* Read HW Rev First */
2068 func_class = READ_REG(sba_addr + SBA_FCLASS); 1878 func_class = READ_REG(sba_addr + SBA_FCLASS);
2069 1879
2070 if (IS_ASTRO(&dev->id)) { 1880 if (IS_ASTRO(dev)) {
2071 unsigned long fclass; 1881 unsigned long fclass;
2072 static char astro_rev[]="Astro ?.?"; 1882 static char astro_rev[]="Astro ?.?";
2073 1883
@@ -2078,11 +1888,11 @@ sba_driver_callback(struct parisc_device *dev)
2078 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3); 1888 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
2079 version = astro_rev; 1889 version = astro_rev;
2080 1890
2081 } else if (IS_IKE(&dev->id)) { 1891 } else if (IS_IKE(dev)) {
2082 static char ike_rev[] = "Ike rev ?"; 1892 static char ike_rev[] = "Ike rev ?";
2083 ike_rev[8] = '0' + (char) (func_class & 0xff); 1893 ike_rev[8] = '0' + (char) (func_class & 0xff);
2084 version = ike_rev; 1894 version = ike_rev;
2085 } else if (IS_PLUTO(&dev->id)) { 1895 } else if (IS_PLUTO(dev)) {
2086 static char pluto_rev[]="Pluto ?.?"; 1896 static char pluto_rev[]="Pluto ?.?";
2087 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4); 1897 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
2088 pluto_rev[8] = '0' + (char) (func_class & 0x0f); 1898 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
@@ -2097,7 +1907,7 @@ sba_driver_callback(struct parisc_device *dev)
2097 global_ioc_cnt = count_parisc_driver(&sba_driver); 1907 global_ioc_cnt = count_parisc_driver(&sba_driver);
2098 1908
2099 /* Astro and Pluto have one IOC per SBA */ 1909 /* Astro and Pluto have one IOC per SBA */
2100 if ((!IS_ASTRO(&dev->id)) || (!IS_PLUTO(&dev->id))) 1910 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
2101 global_ioc_cnt *= 2; 1911 global_ioc_cnt *= 2;
2102 } 1912 }
2103 1913
@@ -2117,7 +1927,6 @@ sba_driver_callback(struct parisc_device *dev)
2117 1927
2118 sba_dev->dev = dev; 1928 sba_dev->dev = dev;
2119 sba_dev->hw_rev = func_class; 1929 sba_dev->hw_rev = func_class;
2120 sba_dev->iodc = &dev->id;
2121 sba_dev->name = dev->name; 1930 sba_dev->name = dev->name;
2122 sba_dev->sba_hpa = sba_addr; 1931 sba_dev->sba_hpa = sba_addr;
2123 1932
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index c27e782e6df9..30294127a0aa 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -52,3 +52,11 @@ config PCI_DEBUG
52 52
53 When in doubt, say N. 53 When in doubt, say N.
54 54
55config HT_IRQ
56 bool "Interrupts on hypertransport devices"
57 default y
58 depends on X86_LOCAL_APIC && X86_IO_APIC
59 help
60 This allows native hypertransport devices to use interrupts.
61
62 If unsure say Y.
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f2d152b818f0..e3beb784406f 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -14,6 +14,12 @@ obj-$(CONFIG_HOTPLUG) += hotplug.o
14# Build the PCI Hotplug drivers if we were asked to 14# Build the PCI Hotplug drivers if we were asked to
15obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ 15obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
16 16
17# Build the PCI MSI interrupt support
18obj-$(CONFIG_PCI_MSI) += msi.o
19
20# Build the Hypertransport interrupt support
21obj-$(CONFIG_HT_IRQ) += htirq.o
22
17# 23#
18# Some architectures use the generic PCI setup functions 24# Some architectures use the generic PCI setup functions
19# 25#
@@ -27,11 +33,6 @@ obj-$(CONFIG_PPC64) += setup-bus.o
27obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o 33obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
28obj-$(CONFIG_X86_VISWS) += setup-irq.o 34obj-$(CONFIG_X86_VISWS) += setup-irq.o
29 35
30msiobj-y := msi.o msi-apic.o
31msiobj-$(CONFIG_IA64_GENERIC) += msi-altix.o
32msiobj-$(CONFIG_IA64_SGI_SN2) += msi-altix.o
33obj-$(CONFIG_PCI_MSI) += $(msiobj-y)
34
35# 36#
36# ACPI Related PCI FW Functions 37# ACPI Related PCI FW Functions
37# 38#
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
new file mode 100644
index 000000000000..0e27f2404a83
--- /dev/null
+++ b/drivers/pci/htirq.c
@@ -0,0 +1,190 @@
1/*
2 * File: htirq.c
3 * Purpose: Hypertransport Interrupt Capability
4 *
5 * Copyright (C) 2006 Linux Networx
6 * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
7 */
8
9#include <linux/irq.h>
10#include <linux/pci.h>
11#include <linux/spinlock.h>
12#include <linux/slab.h>
13#include <linux/gfp.h>
14#include <linux/htirq.h>
15
16/* Global ht irq lock.
17 *
18 * This is needed to serialize access to the data port in hypertransport
19 * irq capability.
20 *
21 * With multiple simultaneous hypertransport irq devices it might pay
22 * to make this more fine grained. But start with simple, stupid, and correct.
23 */
24static DEFINE_SPINLOCK(ht_irq_lock);
25
26struct ht_irq_cfg {
27 struct pci_dev *dev;
28 unsigned pos;
29 unsigned idx;
30};
31
32void write_ht_irq_low(unsigned int irq, u32 data)
33{
34 struct ht_irq_cfg *cfg = get_irq_data(irq);
35 unsigned long flags;
36 spin_lock_irqsave(&ht_irq_lock, flags);
37 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
38 pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
39 spin_unlock_irqrestore(&ht_irq_lock, flags);
40}
41
42void write_ht_irq_high(unsigned int irq, u32 data)
43{
44 struct ht_irq_cfg *cfg = get_irq_data(irq);
45 unsigned long flags;
46 spin_lock_irqsave(&ht_irq_lock, flags);
47 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
48 pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
49 spin_unlock_irqrestore(&ht_irq_lock, flags);
50}
51
52u32 read_ht_irq_low(unsigned int irq)
53{
54 struct ht_irq_cfg *cfg = get_irq_data(irq);
55 unsigned long flags;
56 u32 data;
57 spin_lock_irqsave(&ht_irq_lock, flags);
58 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
59 pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
60 spin_unlock_irqrestore(&ht_irq_lock, flags);
61 return data;
62}
63
64u32 read_ht_irq_high(unsigned int irq)
65{
66 struct ht_irq_cfg *cfg = get_irq_data(irq);
67 unsigned long flags;
68 u32 data;
69 spin_lock_irqsave(&ht_irq_lock, flags);
70 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
71 pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
72 spin_unlock_irqrestore(&ht_irq_lock, flags);
73 return data;
74}
75
76void mask_ht_irq(unsigned int irq)
77{
78 struct ht_irq_cfg *cfg;
79 unsigned long flags;
80 u32 data;
81
82 cfg = get_irq_data(irq);
83
84 spin_lock_irqsave(&ht_irq_lock, flags);
85 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
86 pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
87 data |= 1;
88 pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
89 spin_unlock_irqrestore(&ht_irq_lock, flags);
90}
91
92void unmask_ht_irq(unsigned int irq)
93{
94 struct ht_irq_cfg *cfg;
95 unsigned long flags;
96 u32 data;
97
98 cfg = get_irq_data(irq);
99
100 spin_lock_irqsave(&ht_irq_lock, flags);
101 pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
102 pci_read_config_dword(cfg->dev, cfg->pos + 4, &data);
103 data &= ~1;
104 pci_write_config_dword(cfg->dev, cfg->pos + 4, data);
105 spin_unlock_irqrestore(&ht_irq_lock, flags);
106}
107
108/**
109 * ht_create_irq - create an irq and attach it to a device.
110 * @dev: The hypertransport device to find the irq capability on.
111 * @idx: Which of the possible irqs to attach to.
112 *
113 * ht_create_irq is needs to be called for all hypertransport devices
114 * that generate irqs.
115 *
116 * The irq number of the new irq or a negative error value is returned.
117 */
118int ht_create_irq(struct pci_dev *dev, int idx)
119{
120 struct ht_irq_cfg *cfg;
121 unsigned long flags;
122 u32 data;
123 int max_irq;
124 int pos;
125 int irq;
126
127 pos = pci_find_capability(dev, PCI_CAP_ID_HT);
128 while (pos) {
129 u8 subtype;
130 pci_read_config_byte(dev, pos + 3, &subtype);
131 if (subtype == HT_CAPTYPE_IRQ)
132 break;
133 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_HT);
134 }
135 if (!pos)
136 return -EINVAL;
137
138 /* Verify the idx I want to use is in range */
139 spin_lock_irqsave(&ht_irq_lock, flags);
140 pci_write_config_byte(dev, pos + 2, 1);
141 pci_read_config_dword(dev, pos + 4, &data);
142 spin_unlock_irqrestore(&ht_irq_lock, flags);
143
144 max_irq = (data >> 16) & 0xff;
145 if ( idx > max_irq)
146 return -EINVAL;
147
148 cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
149 if (!cfg)
150 return -ENOMEM;
151
152 cfg->dev = dev;
153 cfg->pos = pos;
154 cfg->idx = 0x10 + (idx * 2);
155
156 irq = create_irq();
157 if (irq < 0) {
158 kfree(cfg);
159 return -EBUSY;
160 }
161 set_irq_data(irq, cfg);
162
163 if (arch_setup_ht_irq(irq, dev) < 0) {
164 ht_destroy_irq(irq);
165 return -EBUSY;
166 }
167
168 return irq;
169}
170
171/**
172 * ht_destroy_irq - destroy an irq created with ht_create_irq
173 *
174 * This reverses ht_create_irq removing the specified irq from
175 * existence. The irq should be free before this happens.
176 */
177void ht_destroy_irq(unsigned int irq)
178{
179 struct ht_irq_cfg *cfg;
180
181 cfg = get_irq_data(irq);
182 set_irq_chip(irq, NULL);
183 set_irq_data(irq, NULL);
184 destroy_irq(irq);
185
186 kfree(cfg);
187}
188
189EXPORT_SYMBOL(ht_create_irq);
190EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/msi-apic.c b/drivers/pci/msi-apic.c
deleted file mode 100644
index 5ed798b319c7..000000000000
--- a/drivers/pci/msi-apic.c
+++ /dev/null
@@ -1,101 +0,0 @@
1/*
2 * MSI hooks for standard x86 apic
3 */
4
5#include <linux/pci.h>
6#include <linux/irq.h>
7#include <asm/smp.h>
8
9#include "msi.h"
10
11/*
12 * Shifts for APIC-based data
13 */
14
15#define MSI_DATA_VECTOR_SHIFT 0
16#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
17
18#define MSI_DATA_DELIVERY_SHIFT 8
19#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
20#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT)
21
22#define MSI_DATA_LEVEL_SHIFT 14
23#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
24#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
25
26#define MSI_DATA_TRIGGER_SHIFT 15
27#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
28#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
29
30/*
31 * Shift/mask fields for APIC-based bus address
32 */
33
34#define MSI_ADDR_HEADER 0xfee00000
35
36#define MSI_ADDR_DESTID_MASK 0xfff0000f
37#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT)
38
39#define MSI_ADDR_DESTMODE_SHIFT 2
40#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT)
41#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT)
42
43#define MSI_ADDR_REDIRECTION_SHIFT 3
44#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
45#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
46
47
48static void
49msi_target_apic(unsigned int vector,
50 unsigned int dest_cpu,
51 u32 *address_hi, /* in/out */
52 u32 *address_lo) /* in/out */
53{
54 u32 addr = *address_lo;
55
56 addr &= MSI_ADDR_DESTID_MASK;
57 addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(dest_cpu));
58
59 *address_lo = addr;
60}
61
62static int
63msi_setup_apic(struct pci_dev *pdev, /* unused in generic */
64 unsigned int vector,
65 u32 *address_hi,
66 u32 *address_lo,
67 u32 *data)
68{
69 unsigned long dest_phys_id;
70
71 dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
72
73 *address_hi = 0;
74 *address_lo = MSI_ADDR_HEADER |
75 MSI_ADDR_DESTMODE_PHYS |
76 MSI_ADDR_REDIRECTION_CPU |
77 MSI_ADDR_DESTID_CPU(dest_phys_id);
78
79 *data = MSI_DATA_TRIGGER_EDGE |
80 MSI_DATA_LEVEL_ASSERT |
81 MSI_DATA_DELIVERY_FIXED |
82 MSI_DATA_VECTOR(vector);
83
84 return 0;
85}
86
87static void
88msi_teardown_apic(unsigned int vector)
89{
90 return; /* no-op */
91}
92
93/*
94 * Generic ops used on most IA archs/platforms. Set with msi_register()
95 */
96
97struct msi_ops msi_apic_ops = {
98 .setup = msi_setup_apic,
99 .teardown = msi_teardown_apic,
100 .target = msi_target_apic,
101};
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 27a057409eca..f9fdc54473c4 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -6,6 +6,7 @@
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 */ 7 */
8 8
9#include <linux/err.h>
9#include <linux/mm.h> 10#include <linux/mm.h>
10#include <linux/irq.h> 11#include <linux/irq.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
@@ -14,6 +15,7 @@
14#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
18#include <linux/msi.h>
17 19
18#include <asm/errno.h> 20#include <asm/errno.h>
19#include <asm/io.h> 21#include <asm/io.h>
@@ -27,23 +29,6 @@ static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
27static kmem_cache_t* msi_cachep; 29static kmem_cache_t* msi_cachep;
28 30
29static int pci_msi_enable = 1; 31static int pci_msi_enable = 1;
30static int last_alloc_vector;
31static int nr_released_vectors;
32static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
33static int nr_msix_devices;
34
35#ifndef CONFIG_X86_IO_APIC
36int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
37#endif
38
39static struct msi_ops *msi_ops;
40
41int
42msi_register(struct msi_ops *ops)
43{
44 msi_ops = ops;
45 return 0;
46}
47 32
48static int msi_cache_init(void) 33static int msi_cache_init(void)
49{ 34{
@@ -55,26 +40,25 @@ static int msi_cache_init(void)
55 return 0; 40 return 0;
56} 41}
57 42
58static void msi_set_mask_bit(unsigned int vector, int flag) 43static void msi_set_mask_bit(unsigned int irq, int flag)
59{ 44{
60 struct msi_desc *entry; 45 struct msi_desc *entry;
61 46
62 entry = (struct msi_desc *)msi_desc[vector]; 47 entry = msi_desc[irq];
63 if (!entry || !entry->dev || !entry->mask_base) 48 BUG_ON(!entry || !entry->dev);
64 return;
65 switch (entry->msi_attrib.type) { 49 switch (entry->msi_attrib.type) {
66 case PCI_CAP_ID_MSI: 50 case PCI_CAP_ID_MSI:
67 { 51 if (entry->msi_attrib.maskbit) {
68 int pos; 52 int pos;
69 u32 mask_bits; 53 u32 mask_bits;
70 54
71 pos = (long)entry->mask_base; 55 pos = (long)entry->mask_base;
72 pci_read_config_dword(entry->dev, pos, &mask_bits); 56 pci_read_config_dword(entry->dev, pos, &mask_bits);
73 mask_bits &= ~(1); 57 mask_bits &= ~(1);
74 mask_bits |= flag; 58 mask_bits |= flag;
75 pci_write_config_dword(entry->dev, pos, mask_bits); 59 pci_write_config_dword(entry->dev, pos, mask_bits);
60 }
76 break; 61 break;
77 }
78 case PCI_CAP_ID_MSIX: 62 case PCI_CAP_ID_MSIX:
79 { 63 {
80 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 64 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
@@ -83,261 +67,101 @@ static void msi_set_mask_bit(unsigned int vector, int flag)
83 break; 67 break;
84 } 68 }
85 default: 69 default:
70 BUG();
86 break; 71 break;
87 } 72 }
88} 73}
89 74
90#ifdef CONFIG_SMP 75void read_msi_msg(unsigned int irq, struct msi_msg *msg)
91static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
92{ 76{
93 struct msi_desc *entry; 77 struct msi_desc *entry = get_irq_data(irq);
94 u32 address_hi, address_lo; 78 switch(entry->msi_attrib.type) {
95 unsigned int irq = vector;
96 unsigned int dest_cpu = first_cpu(cpu_mask);
97
98 entry = (struct msi_desc *)msi_desc[vector];
99 if (!entry || !entry->dev)
100 return;
101
102 switch (entry->msi_attrib.type) {
103 case PCI_CAP_ID_MSI: 79 case PCI_CAP_ID_MSI:
104 { 80 {
105 int pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI); 81 struct pci_dev *dev = entry->dev;
106 82 int pos = entry->msi_attrib.pos;
107 if (!pos) 83 u16 data;
108 return; 84
109 85 pci_read_config_dword(dev, msi_lower_address_reg(pos),
110 pci_read_config_dword(entry->dev, msi_upper_address_reg(pos), 86 &msg->address_lo);
111 &address_hi); 87 if (entry->msi_attrib.is_64) {
112 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), 88 pci_read_config_dword(dev, msi_upper_address_reg(pos),
113 &address_lo); 89 &msg->address_hi);
114 90 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
115 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); 91 } else {
116 92 msg->address_hi = 0;
117 pci_write_config_dword(entry->dev, msi_upper_address_reg(pos), 93 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
118 address_hi); 94 }
119 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 95 msg->data = data;
120 address_lo);
121 set_native_irq_info(irq, cpu_mask);
122 break; 96 break;
123 } 97 }
124 case PCI_CAP_ID_MSIX: 98 case PCI_CAP_ID_MSIX:
125 { 99 {
126 int offset_hi = 100 void __iomem *base;
127 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 101 base = entry->mask_base +
128 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET; 102 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
129 int offset_lo =
130 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
131 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET;
132
133 address_hi = readl(entry->mask_base + offset_hi);
134 address_lo = readl(entry->mask_base + offset_lo);
135 103
136 msi_ops->target(vector, dest_cpu, &address_hi, &address_lo); 104 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
105 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
106 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
107 break;
108 }
109 default:
110 BUG();
111 }
112}
137 113
138 writel(address_hi, entry->mask_base + offset_hi); 114void write_msi_msg(unsigned int irq, struct msi_msg *msg)
139 writel(address_lo, entry->mask_base + offset_lo); 115{
140 set_native_irq_info(irq, cpu_mask); 116 struct msi_desc *entry = get_irq_data(irq);
117 switch (entry->msi_attrib.type) {
118 case PCI_CAP_ID_MSI:
119 {
120 struct pci_dev *dev = entry->dev;
121 int pos = entry->msi_attrib.pos;
122
123 pci_write_config_dword(dev, msi_lower_address_reg(pos),
124 msg->address_lo);
125 if (entry->msi_attrib.is_64) {
126 pci_write_config_dword(dev, msi_upper_address_reg(pos),
127 msg->address_hi);
128 pci_write_config_word(dev, msi_data_reg(pos, 1),
129 msg->data);
130 } else {
131 pci_write_config_word(dev, msi_data_reg(pos, 0),
132 msg->data);
133 }
141 break; 134 break;
142 } 135 }
143 default: 136 case PCI_CAP_ID_MSIX:
137 {
138 void __iomem *base;
139 base = entry->mask_base +
140 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
141
142 writel(msg->address_lo,
143 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
144 writel(msg->address_hi,
145 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
146 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
144 break; 147 break;
145 } 148 }
146} 149 default:
147#else 150 BUG();
148#define set_msi_affinity NULL
149#endif /* CONFIG_SMP */
150
151static void mask_MSI_irq(unsigned int vector)
152{
153 msi_set_mask_bit(vector, 1);
154}
155
156static void unmask_MSI_irq(unsigned int vector)
157{
158 msi_set_mask_bit(vector, 0);
159}
160
161static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
162{
163 struct msi_desc *entry;
164 unsigned long flags;
165
166 spin_lock_irqsave(&msi_lock, flags);
167 entry = msi_desc[vector];
168 if (!entry || !entry->dev) {
169 spin_unlock_irqrestore(&msi_lock, flags);
170 return 0;
171 } 151 }
172 entry->msi_attrib.state = 1; /* Mark it active */
173 spin_unlock_irqrestore(&msi_lock, flags);
174
175 return 0; /* never anything pending */
176} 152}
177 153
178static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) 154void mask_msi_irq(unsigned int irq)
179{ 155{
180 startup_msi_irq_wo_maskbit(vector); 156 msi_set_mask_bit(irq, 1);
181 unmask_MSI_irq(vector);
182 return 0; /* never anything pending */
183}
184
185static void shutdown_msi_irq(unsigned int vector)
186{
187 struct msi_desc *entry;
188 unsigned long flags;
189
190 spin_lock_irqsave(&msi_lock, flags);
191 entry = msi_desc[vector];
192 if (entry && entry->dev)
193 entry->msi_attrib.state = 0; /* Mark it not active */
194 spin_unlock_irqrestore(&msi_lock, flags);
195} 157}
196 158
197static void end_msi_irq_wo_maskbit(unsigned int vector) 159void unmask_msi_irq(unsigned int irq)
198{ 160{
199 move_native_irq(vector); 161 msi_set_mask_bit(irq, 0);
200 ack_APIC_irq();
201}
202
203static void end_msi_irq_w_maskbit(unsigned int vector)
204{
205 move_native_irq(vector);
206 unmask_MSI_irq(vector);
207 ack_APIC_irq();
208}
209
210static void do_nothing(unsigned int vector)
211{
212}
213
214/*
215 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
216 * which implement the MSI-X Capability Structure.
217 */
218static struct hw_interrupt_type msix_irq_type = {
219 .typename = "PCI-MSI-X",
220 .startup = startup_msi_irq_w_maskbit,
221 .shutdown = shutdown_msi_irq,
222 .enable = unmask_MSI_irq,
223 .disable = mask_MSI_irq,
224 .ack = mask_MSI_irq,
225 .end = end_msi_irq_w_maskbit,
226 .set_affinity = set_msi_affinity
227};
228
229/*
230 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
231 * which implement the MSI Capability Structure with
232 * Mask-and-Pending Bits.
233 */
234static struct hw_interrupt_type msi_irq_w_maskbit_type = {
235 .typename = "PCI-MSI",
236 .startup = startup_msi_irq_w_maskbit,
237 .shutdown = shutdown_msi_irq,
238 .enable = unmask_MSI_irq,
239 .disable = mask_MSI_irq,
240 .ack = mask_MSI_irq,
241 .end = end_msi_irq_w_maskbit,
242 .set_affinity = set_msi_affinity
243};
244
245/*
246 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices,
247 * which implement the MSI Capability Structure without
248 * Mask-and-Pending Bits.
249 */
250static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
251 .typename = "PCI-MSI",
252 .startup = startup_msi_irq_wo_maskbit,
253 .shutdown = shutdown_msi_irq,
254 .enable = do_nothing,
255 .disable = do_nothing,
256 .ack = do_nothing,
257 .end = end_msi_irq_wo_maskbit,
258 .set_affinity = set_msi_affinity
259};
260
261static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
262static int assign_msi_vector(void)
263{
264 static int new_vector_avail = 1;
265 int vector;
266 unsigned long flags;
267
268 /*
269 * msi_lock is provided to ensure that successful allocation of MSI
270 * vector is assigned unique among drivers.
271 */
272 spin_lock_irqsave(&msi_lock, flags);
273
274 if (!new_vector_avail) {
275 int free_vector = 0;
276
277 /*
278 * vector_irq[] = -1 indicates that this specific vector is:
279 * - assigned for MSI (since MSI have no associated IRQ) or
280 * - assigned for legacy if less than 16, or
281 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping
282 * vector_irq[] = 0 indicates that this vector, previously
283 * assigned for MSI, is freed by hotplug removed operations.
284 * This vector will be reused for any subsequent hotplug added
285 * operations.
286 * vector_irq[] > 0 indicates that this vector is assigned for
287 * IOxAPIC IRQs. This vector and its value provides a 1-to-1
288 * vector-to-IOxAPIC IRQ mapping.
289 */
290 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) {
291 if (vector_irq[vector] != 0)
292 continue;
293 free_vector = vector;
294 if (!msi_desc[vector])
295 break;
296 else
297 continue;
298 }
299 if (!free_vector) {
300 spin_unlock_irqrestore(&msi_lock, flags);
301 return -EBUSY;
302 }
303 vector_irq[free_vector] = -1;
304 nr_released_vectors--;
305 spin_unlock_irqrestore(&msi_lock, flags);
306 if (msi_desc[free_vector] != NULL) {
307 struct pci_dev *dev;
308 int tail;
309
310 /* free all linked vectors before re-assign */
311 do {
312 spin_lock_irqsave(&msi_lock, flags);
313 dev = msi_desc[free_vector]->dev;
314 tail = msi_desc[free_vector]->link.tail;
315 spin_unlock_irqrestore(&msi_lock, flags);
316 msi_free_vector(dev, tail, 1);
317 } while (free_vector != tail);
318 }
319
320 return free_vector;
321 }
322 vector = assign_irq_vector(AUTO_ASSIGN);
323 last_alloc_vector = vector;
324 if (vector == LAST_DEVICE_VECTOR)
325 new_vector_avail = 0;
326
327 spin_unlock_irqrestore(&msi_lock, flags);
328 return vector;
329}
330
331static int get_new_vector(void)
332{
333 int vector = assign_msi_vector();
334
335 if (vector > 0)
336 set_intr_gate(vector, interrupt[vector]);
337
338 return vector;
339} 162}
340 163
164static int msi_free_irq(struct pci_dev* dev, int irq);
341static int msi_init(void) 165static int msi_init(void)
342{ 166{
343 static int status = -ENOMEM; 167 static int status = -ENOMEM;
@@ -352,22 +176,6 @@ static int msi_init(void)
352 return status; 176 return status;
353 } 177 }
354 178
355 status = msi_arch_init();
356 if (status < 0) {
357 pci_msi_enable = 0;
358 printk(KERN_WARNING
359 "PCI: MSI arch init failed. MSI disabled.\n");
360 return status;
361 }
362
363 if (! msi_ops) {
364 printk(KERN_WARNING
365 "PCI: MSI ops not registered. MSI disabled.\n");
366 status = -EINVAL;
367 return status;
368 }
369
370 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN);
371 status = msi_cache_init(); 179 status = msi_cache_init();
372 if (status < 0) { 180 if (status < 0) {
373 pci_msi_enable = 0; 181 pci_msi_enable = 0;
@@ -375,23 +183,9 @@ static int msi_init(void)
375 return status; 183 return status;
376 } 184 }
377 185
378 if (last_alloc_vector < 0) {
379 pci_msi_enable = 0;
380 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n");
381 status = -EBUSY;
382 return status;
383 }
384 vector_irq[last_alloc_vector] = 0;
385 nr_released_vectors++;
386
387 return status; 186 return status;
388} 187}
389 188
390static int get_msi_vector(struct pci_dev *dev)
391{
392 return get_new_vector();
393}
394
395static struct msi_desc* alloc_msi_entry(void) 189static struct msi_desc* alloc_msi_entry(void)
396{ 190{
397 struct msi_desc *entry; 191 struct msi_desc *entry;
@@ -406,29 +200,44 @@ static struct msi_desc* alloc_msi_entry(void)
406 return entry; 200 return entry;
407} 201}
408 202
409static void attach_msi_entry(struct msi_desc *entry, int vector) 203static void attach_msi_entry(struct msi_desc *entry, int irq)
410{ 204{
411 unsigned long flags; 205 unsigned long flags;
412 206
413 spin_lock_irqsave(&msi_lock, flags); 207 spin_lock_irqsave(&msi_lock, flags);
414 msi_desc[vector] = entry; 208 msi_desc[irq] = entry;
415 spin_unlock_irqrestore(&msi_lock, flags); 209 spin_unlock_irqrestore(&msi_lock, flags);
416} 210}
417 211
418static void irq_handler_init(int cap_id, int pos, int mask) 212static int create_msi_irq(void)
419{ 213{
420 unsigned long flags; 214 struct msi_desc *entry;
215 int irq;
421 216
422 spin_lock_irqsave(&irq_desc[pos].lock, flags); 217 entry = alloc_msi_entry();
423 if (cap_id == PCI_CAP_ID_MSIX) 218 if (!entry)
424 irq_desc[pos].chip = &msix_irq_type; 219 return -ENOMEM;
425 else { 220
426 if (!mask) 221 irq = create_irq();
427 irq_desc[pos].chip = &msi_irq_wo_maskbit_type; 222 if (irq < 0) {
428 else 223 kmem_cache_free(msi_cachep, entry);
429 irq_desc[pos].chip = &msi_irq_w_maskbit_type; 224 return -EBUSY;
430 } 225 }
431 spin_unlock_irqrestore(&irq_desc[pos].lock, flags); 226
227 set_irq_data(irq, entry);
228
229 return irq;
230}
231
232static void destroy_msi_irq(unsigned int irq)
233{
234 struct msi_desc *entry;
235
236 entry = get_irq_data(irq);
237 set_irq_chip(irq, NULL);
238 set_irq_data(irq, NULL);
239 destroy_irq(irq);
240 kmem_cache_free(msi_cachep, entry);
432} 241}
433 242
434static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 243static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
@@ -473,21 +282,21 @@ void disable_msi_mode(struct pci_dev *dev, int pos, int type)
473 } 282 }
474} 283}
475 284
476static int msi_lookup_vector(struct pci_dev *dev, int type) 285static int msi_lookup_irq(struct pci_dev *dev, int type)
477{ 286{
478 int vector; 287 int irq;
479 unsigned long flags; 288 unsigned long flags;
480 289
481 spin_lock_irqsave(&msi_lock, flags); 290 spin_lock_irqsave(&msi_lock, flags);
482 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { 291 for (irq = 0; irq < NR_IRQS; irq++) {
483 if (!msi_desc[vector] || msi_desc[vector]->dev != dev || 292 if (!msi_desc[irq] || msi_desc[irq]->dev != dev ||
484 msi_desc[vector]->msi_attrib.type != type || 293 msi_desc[irq]->msi_attrib.type != type ||
485 msi_desc[vector]->msi_attrib.default_vector != dev->irq) 294 msi_desc[irq]->msi_attrib.default_irq != dev->irq)
486 continue; 295 continue;
487 spin_unlock_irqrestore(&msi_lock, flags); 296 spin_unlock_irqrestore(&msi_lock, flags);
488 /* This pre-assigned MSI vector for this device 297 /* This pre-assigned MSI irq for this device
489 already exits. Override dev->irq with this vector */ 298 already exits. Override dev->irq with this irq */
490 dev->irq = vector; 299 dev->irq = irq;
491 return 0; 300 return 0;
492 } 301 }
493 spin_unlock_irqrestore(&msi_lock, flags); 302 spin_unlock_irqrestore(&msi_lock, flags);
@@ -499,11 +308,6 @@ void pci_scan_msi_device(struct pci_dev *dev)
499{ 308{
500 if (!dev) 309 if (!dev)
501 return; 310 return;
502
503 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0)
504 nr_msix_devices++;
505 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0)
506 nr_reserved_vectors++;
507} 311}
508 312
509#ifdef CONFIG_PM 313#ifdef CONFIG_PM
@@ -577,7 +381,7 @@ int pci_save_msix_state(struct pci_dev *dev)
577{ 381{
578 int pos; 382 int pos;
579 int temp; 383 int temp;
580 int vector, head, tail = 0; 384 int irq, head, tail = 0;
581 u16 control; 385 u16 control;
582 struct pci_cap_saved_state *save_state; 386 struct pci_cap_saved_state *save_state;
583 387
@@ -599,33 +403,20 @@ int pci_save_msix_state(struct pci_dev *dev)
599 403
600 /* save the table */ 404 /* save the table */
601 temp = dev->irq; 405 temp = dev->irq;
602 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 406 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
603 kfree(save_state); 407 kfree(save_state);
604 return -EINVAL; 408 return -EINVAL;
605 } 409 }
606 410
607 vector = head = dev->irq; 411 irq = head = dev->irq;
608 while (head != tail) { 412 while (head != tail) {
609 int j;
610 void __iomem *base;
611 struct msi_desc *entry; 413 struct msi_desc *entry;
612 414
613 entry = msi_desc[vector]; 415 entry = msi_desc[irq];
614 base = entry->mask_base; 416 read_msi_msg(irq, &entry->msg_save);
615 j = entry->msi_attrib.entry_nr; 417
616 418 tail = msi_desc[irq]->link.tail;
617 entry->address_lo_save = 419 irq = tail;
618 readl(base + j * PCI_MSIX_ENTRY_SIZE +
619 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
620 entry->address_hi_save =
621 readl(base + j * PCI_MSIX_ENTRY_SIZE +
622 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
623 entry->data_save =
624 readl(base + j * PCI_MSIX_ENTRY_SIZE +
625 PCI_MSIX_ENTRY_DATA_OFFSET);
626
627 tail = msi_desc[vector]->link.tail;
628 vector = tail;
629 } 420 }
630 dev->irq = temp; 421 dev->irq = temp;
631 422
@@ -638,9 +429,7 @@ void pci_restore_msix_state(struct pci_dev *dev)
638{ 429{
639 u16 save; 430 u16 save;
640 int pos; 431 int pos;
641 int vector, head, tail = 0; 432 int irq, head, tail = 0;
642 void __iomem *base;
643 int j;
644 struct msi_desc *entry; 433 struct msi_desc *entry;
645 int temp; 434 int temp;
646 struct pci_cap_saved_state *save_state; 435 struct pci_cap_saved_state *save_state;
@@ -658,26 +447,15 @@ void pci_restore_msix_state(struct pci_dev *dev)
658 447
659 /* route the table */ 448 /* route the table */
660 temp = dev->irq; 449 temp = dev->irq;
661 if (msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) 450 if (msi_lookup_irq(dev, PCI_CAP_ID_MSIX))
662 return; 451 return;
663 vector = head = dev->irq; 452 irq = head = dev->irq;
664 while (head != tail) { 453 while (head != tail) {
665 entry = msi_desc[vector]; 454 entry = msi_desc[irq];
666 base = entry->mask_base; 455 write_msi_msg(irq, &entry->msg_save);
667 j = entry->msi_attrib.entry_nr; 456
668 457 tail = msi_desc[irq]->link.tail;
669 writel(entry->address_lo_save, 458 irq = tail;
670 base + j * PCI_MSIX_ENTRY_SIZE +
671 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
672 writel(entry->address_hi_save,
673 base + j * PCI_MSIX_ENTRY_SIZE +
674 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
675 writel(entry->data_save,
676 base + j * PCI_MSIX_ENTRY_SIZE +
677 PCI_MSIX_ENTRY_DATA_OFFSET);
678
679 tail = msi_desc[vector]->link.tail;
680 vector = tail;
681 } 459 }
682 dev->irq = temp; 460 dev->irq = temp;
683 461
@@ -686,104 +464,68 @@ void pci_restore_msix_state(struct pci_dev *dev)
686} 464}
687#endif 465#endif
688 466
689static int msi_register_init(struct pci_dev *dev, struct msi_desc *entry)
690{
691 int status;
692 u32 address_hi;
693 u32 address_lo;
694 u32 data;
695 int pos, vector = dev->irq;
696 u16 control;
697
698 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
699 pci_read_config_word(dev, msi_control_reg(pos), &control);
700
701 /* Configure MSI capability structure */
702 status = msi_ops->setup(dev, vector, &address_hi, &address_lo, &data);
703 if (status < 0)
704 return status;
705
706 pci_write_config_dword(dev, msi_lower_address_reg(pos), address_lo);
707 if (is_64bit_address(control)) {
708 pci_write_config_dword(dev,
709 msi_upper_address_reg(pos), address_hi);
710 pci_write_config_word(dev,
711 msi_data_reg(pos, 1), data);
712 } else
713 pci_write_config_word(dev,
714 msi_data_reg(pos, 0), data);
715 if (entry->msi_attrib.maskbit) {
716 unsigned int maskbits, temp;
717 /* All MSIs are unmasked by default, Mask them all */
718 pci_read_config_dword(dev,
719 msi_mask_bits_reg(pos, is_64bit_address(control)),
720 &maskbits);
721 temp = (1 << multi_msi_capable(control));
722 temp = ((temp - 1) & ~temp);
723 maskbits |= temp;
724 pci_write_config_dword(dev,
725 msi_mask_bits_reg(pos, is_64bit_address(control)),
726 maskbits);
727 }
728
729 return 0;
730}
731
732/** 467/**
733 * msi_capability_init - configure device's MSI capability structure 468 * msi_capability_init - configure device's MSI capability structure
734 * @dev: pointer to the pci_dev data structure of MSI device function 469 * @dev: pointer to the pci_dev data structure of MSI device function
735 * 470 *
736 * Setup the MSI capability structure of device function with a single 471 * Setup the MSI capability structure of device function with a single
737 * MSI vector, regardless of device function is capable of handling 472 * MSI irq, regardless of device function is capable of handling
738 * multiple messages. A return of zero indicates the successful setup 473 * multiple messages. A return of zero indicates the successful setup
739 * of an entry zero with the new MSI vector or non-zero for otherwise. 474 * of an entry zero with the new MSI irq or non-zero for otherwise.
740 **/ 475 **/
741static int msi_capability_init(struct pci_dev *dev) 476static int msi_capability_init(struct pci_dev *dev)
742{ 477{
743 int status; 478 int status;
744 struct msi_desc *entry; 479 struct msi_desc *entry;
745 int pos, vector; 480 int pos, irq;
746 u16 control; 481 u16 control;
747 482
748 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 483 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
749 pci_read_config_word(dev, msi_control_reg(pos), &control); 484 pci_read_config_word(dev, msi_control_reg(pos), &control);
750 /* MSI Entry Initialization */ 485 /* MSI Entry Initialization */
751 entry = alloc_msi_entry(); 486 irq = create_msi_irq();
752 if (!entry) 487 if (irq < 0)
753 return -ENOMEM; 488 return irq;
754 489
755 vector = get_msi_vector(dev); 490 entry = get_irq_data(irq);
756 if (vector < 0) { 491 entry->link.head = irq;
757 kmem_cache_free(msi_cachep, entry); 492 entry->link.tail = irq;
758 return -EBUSY;
759 }
760 entry->link.head = vector;
761 entry->link.tail = vector;
762 entry->msi_attrib.type = PCI_CAP_ID_MSI; 493 entry->msi_attrib.type = PCI_CAP_ID_MSI;
763 entry->msi_attrib.state = 0; /* Mark it not active */ 494 entry->msi_attrib.is_64 = is_64bit_address(control);
764 entry->msi_attrib.entry_nr = 0; 495 entry->msi_attrib.entry_nr = 0;
765 entry->msi_attrib.maskbit = is_mask_bit_support(control); 496 entry->msi_attrib.maskbit = is_mask_bit_support(control);
766 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */ 497 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
767 dev->irq = vector; 498 entry->msi_attrib.pos = pos;
768 entry->dev = dev;
769 if (is_mask_bit_support(control)) { 499 if (is_mask_bit_support(control)) {
770 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 500 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
771 is_64bit_address(control)); 501 is_64bit_address(control));
772 } 502 }
773 /* Replace with MSI handler */ 503 entry->dev = dev;
774 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); 504 if (entry->msi_attrib.maskbit) {
505 unsigned int maskbits, temp;
506 /* All MSIs are unmasked by default, Mask them all */
507 pci_read_config_dword(dev,
508 msi_mask_bits_reg(pos, is_64bit_address(control)),
509 &maskbits);
510 temp = (1 << multi_msi_capable(control));
511 temp = ((temp - 1) & ~temp);
512 maskbits |= temp;
513 pci_write_config_dword(dev,
514 msi_mask_bits_reg(pos, is_64bit_address(control)),
515 maskbits);
516 }
775 /* Configure MSI capability structure */ 517 /* Configure MSI capability structure */
776 status = msi_register_init(dev, entry); 518 status = arch_setup_msi_irq(irq, dev);
777 if (status != 0) { 519 if (status < 0) {
778 dev->irq = entry->msi_attrib.default_vector; 520 destroy_msi_irq(irq);
779 kmem_cache_free(msi_cachep, entry);
780 return status; 521 return status;
781 } 522 }
782 523
783 attach_msi_entry(entry, vector); 524 attach_msi_entry(entry, irq);
784 /* Set MSI enabled bits */ 525 /* Set MSI enabled bits */
785 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 526 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
786 527
528 dev->irq = irq;
787 return 0; 529 return 0;
788} 530}
789 531
@@ -794,18 +536,15 @@ static int msi_capability_init(struct pci_dev *dev)
794 * @nvec: number of @entries 536 * @nvec: number of @entries
795 * 537 *
796 * Setup the MSI-X capability structure of device function with a 538 * Setup the MSI-X capability structure of device function with a
797 * single MSI-X vector. A return of zero indicates the successful setup of 539 * single MSI-X irq. A return of zero indicates the successful setup of
798 * requested MSI-X entries with allocated vectors or non-zero for otherwise. 540 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
799 **/ 541 **/
800static int msix_capability_init(struct pci_dev *dev, 542static int msix_capability_init(struct pci_dev *dev,
801 struct msix_entry *entries, int nvec) 543 struct msix_entry *entries, int nvec)
802{ 544{
803 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 545 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL;
804 u32 address_hi;
805 u32 address_lo;
806 u32 data;
807 int status; 546 int status;
808 int vector, pos, i, j, nr_entries, temp = 0; 547 int irq, pos, i, j, nr_entries, temp = 0;
809 unsigned long phys_addr; 548 unsigned long phys_addr;
810 u32 table_offset; 549 u32 table_offset;
811 u16 control; 550 u16 control;
@@ -827,65 +566,56 @@ static int msix_capability_init(struct pci_dev *dev,
827 566
828 /* MSI-X Table Initialization */ 567 /* MSI-X Table Initialization */
829 for (i = 0; i < nvec; i++) { 568 for (i = 0; i < nvec; i++) {
830 entry = alloc_msi_entry(); 569 irq = create_msi_irq();
831 if (!entry) 570 if (irq < 0)
832 break; 571 break;
833 vector = get_msi_vector(dev);
834 if (vector < 0) {
835 kmem_cache_free(msi_cachep, entry);
836 break;
837 }
838 572
573 entry = get_irq_data(irq);
839 j = entries[i].entry; 574 j = entries[i].entry;
840 entries[i].vector = vector; 575 entries[i].vector = irq;
841 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 576 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
842 entry->msi_attrib.state = 0; /* Mark it not active */ 577 entry->msi_attrib.is_64 = 1;
843 entry->msi_attrib.entry_nr = j; 578 entry->msi_attrib.entry_nr = j;
844 entry->msi_attrib.maskbit = 1; 579 entry->msi_attrib.maskbit = 1;
845 entry->msi_attrib.default_vector = dev->irq; 580 entry->msi_attrib.default_irq = dev->irq;
581 entry->msi_attrib.pos = pos;
846 entry->dev = dev; 582 entry->dev = dev;
847 entry->mask_base = base; 583 entry->mask_base = base;
848 if (!head) { 584 if (!head) {
849 entry->link.head = vector; 585 entry->link.head = irq;
850 entry->link.tail = vector; 586 entry->link.tail = irq;
851 head = entry; 587 head = entry;
852 } else { 588 } else {
853 entry->link.head = temp; 589 entry->link.head = temp;
854 entry->link.tail = tail->link.tail; 590 entry->link.tail = tail->link.tail;
855 tail->link.tail = vector; 591 tail->link.tail = irq;
856 head->link.head = vector; 592 head->link.head = irq;
857 } 593 }
858 temp = vector; 594 temp = irq;
859 tail = entry; 595 tail = entry;
860 /* Replace with MSI-X handler */
861 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1);
862 /* Configure MSI-X capability structure */ 596 /* Configure MSI-X capability structure */
863 status = msi_ops->setup(dev, vector, 597 status = arch_setup_msi_irq(irq, dev);
864 &address_hi, 598 if (status < 0) {
865 &address_lo, 599 destroy_msi_irq(irq);
866 &data);
867 if (status < 0)
868 break; 600 break;
601 }
869 602
870 writel(address_lo, 603 attach_msi_entry(entry, irq);
871 base + j * PCI_MSIX_ENTRY_SIZE +
872 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
873 writel(address_hi,
874 base + j * PCI_MSIX_ENTRY_SIZE +
875 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
876 writel(data,
877 base + j * PCI_MSIX_ENTRY_SIZE +
878 PCI_MSIX_ENTRY_DATA_OFFSET);
879 attach_msi_entry(entry, vector);
880 } 604 }
881 if (i != nvec) { 605 if (i != nvec) {
606 int avail = i - 1;
882 i--; 607 i--;
883 for (; i >= 0; i--) { 608 for (; i >= 0; i--) {
884 vector = (entries + i)->vector; 609 irq = (entries + i)->vector;
885 msi_free_vector(dev, vector, 0); 610 msi_free_irq(dev, irq);
886 (entries + i)->vector = 0; 611 (entries + i)->vector = 0;
887 } 612 }
888 return -EBUSY; 613 /* If we had some success report the number of irqs
614 * we succeeded in setting up.
615 */
616 if (avail <= 0)
617 avail = -EBUSY;
618 return avail;
889 } 619 }
890 /* Set MSI-X enabled bits */ 620 /* Set MSI-X enabled bits */
891 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 621 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
@@ -925,15 +655,14 @@ int pci_msi_supported(struct pci_dev * dev)
925 * @dev: pointer to the pci_dev data structure of MSI device function 655 * @dev: pointer to the pci_dev data structure of MSI device function
926 * 656 *
927 * Setup the MSI capability structure of device function with 657 * Setup the MSI capability structure of device function with
928 * a single MSI vector upon its software driver call to request for 658 * a single MSI irq upon its software driver call to request for
929 * MSI mode enabled on its hardware device function. A return of zero 659 * MSI mode enabled on its hardware device function. A return of zero
930 * indicates the successful setup of an entry zero with the new MSI 660 * indicates the successful setup of an entry zero with the new MSI
931 * vector or non-zero for otherwise. 661 * irq or non-zero for otherwise.
932 **/ 662 **/
933int pci_enable_msi(struct pci_dev* dev) 663int pci_enable_msi(struct pci_dev* dev)
934{ 664{
935 int pos, temp, status; 665 int pos, temp, status;
936 u16 control;
937 666
938 if (pci_msi_supported(dev) < 0) 667 if (pci_msi_supported(dev) < 0)
939 return -EINVAL; 668 return -EINVAL;
@@ -948,52 +677,25 @@ int pci_enable_msi(struct pci_dev* dev)
948 if (!pos) 677 if (!pos)
949 return -EINVAL; 678 return -EINVAL;
950 679
951 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 680 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSI));
952 /* Lookup Sucess */
953 unsigned long flags;
954 681
955 pci_read_config_word(dev, msi_control_reg(pos), &control); 682 /* Check whether driver already requested for MSI-X irqs */
956 if (control & PCI_MSI_FLAGS_ENABLE)
957 return 0; /* Already in MSI mode */
958 spin_lock_irqsave(&msi_lock, flags);
959 if (!vector_irq[dev->irq]) {
960 msi_desc[dev->irq]->msi_attrib.state = 0;
961 vector_irq[dev->irq] = -1;
962 nr_released_vectors--;
963 spin_unlock_irqrestore(&msi_lock, flags);
964 status = msi_register_init(dev, msi_desc[dev->irq]);
965 if (status == 0)
966 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
967 return status;
968 }
969 spin_unlock_irqrestore(&msi_lock, flags);
970 dev->irq = temp;
971 }
972 /* Check whether driver already requested for MSI-X vectors */
973 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 683 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
974 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 684 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
975 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 685 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
976 "Device already has MSI-X vectors assigned\n", 686 "Device already has MSI-X irq assigned\n",
977 pci_name(dev)); 687 pci_name(dev));
978 dev->irq = temp; 688 dev->irq = temp;
979 return -EINVAL; 689 return -EINVAL;
980 } 690 }
981 status = msi_capability_init(dev); 691 status = msi_capability_init(dev);
982 if (!status) {
983 if (!pos)
984 nr_reserved_vectors--; /* Only MSI capable */
985 else if (nr_msix_devices > 0)
986 nr_msix_devices--; /* Both MSI and MSI-X capable,
987 but choose enabling MSI */
988 }
989
990 return status; 692 return status;
991} 693}
992 694
993void pci_disable_msi(struct pci_dev* dev) 695void pci_disable_msi(struct pci_dev* dev)
994{ 696{
995 struct msi_desc *entry; 697 struct msi_desc *entry;
996 int pos, default_vector; 698 int pos, default_irq;
997 u16 control; 699 u16 control;
998 unsigned long flags; 700 unsigned long flags;
999 701
@@ -1010,41 +712,41 @@ void pci_disable_msi(struct pci_dev* dev)
1010 if (!(control & PCI_MSI_FLAGS_ENABLE)) 712 if (!(control & PCI_MSI_FLAGS_ENABLE))
1011 return; 713 return;
1012 714
715 disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
716
1013 spin_lock_irqsave(&msi_lock, flags); 717 spin_lock_irqsave(&msi_lock, flags);
1014 entry = msi_desc[dev->irq]; 718 entry = msi_desc[dev->irq];
1015 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 719 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
1016 spin_unlock_irqrestore(&msi_lock, flags); 720 spin_unlock_irqrestore(&msi_lock, flags);
1017 return; 721 return;
1018 } 722 }
1019 if (entry->msi_attrib.state) { 723 if (irq_has_action(dev->irq)) {
1020 spin_unlock_irqrestore(&msi_lock, flags); 724 spin_unlock_irqrestore(&msi_lock, flags);
1021 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 725 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without "
1022 "free_irq() on MSI vector %d\n", 726 "free_irq() on MSI irq %d\n",
1023 pci_name(dev), dev->irq); 727 pci_name(dev), dev->irq);
1024 BUG_ON(entry->msi_attrib.state > 0); 728 BUG_ON(irq_has_action(dev->irq));
1025 } else { 729 } else {
1026 vector_irq[dev->irq] = 0; /* free it */ 730 default_irq = entry->msi_attrib.default_irq;
1027 nr_released_vectors++;
1028 default_vector = entry->msi_attrib.default_vector;
1029 spin_unlock_irqrestore(&msi_lock, flags); 731 spin_unlock_irqrestore(&msi_lock, flags);
1030 /* Restore dev->irq to its default pin-assertion vector */ 732 msi_free_irq(dev, dev->irq);
1031 dev->irq = default_vector; 733
1032 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 734 /* Restore dev->irq to its default pin-assertion irq */
1033 PCI_CAP_ID_MSI); 735 dev->irq = default_irq;
1034 } 736 }
1035} 737}
1036 738
1037static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) 739static int msi_free_irq(struct pci_dev* dev, int irq)
1038{ 740{
1039 struct msi_desc *entry; 741 struct msi_desc *entry;
1040 int head, entry_nr, type; 742 int head, entry_nr, type;
1041 void __iomem *base; 743 void __iomem *base;
1042 unsigned long flags; 744 unsigned long flags;
1043 745
1044 msi_ops->teardown(vector); 746 arch_teardown_msi_irq(irq);
1045 747
1046 spin_lock_irqsave(&msi_lock, flags); 748 spin_lock_irqsave(&msi_lock, flags);
1047 entry = msi_desc[vector]; 749 entry = msi_desc[irq];
1048 if (!entry || entry->dev != dev) { 750 if (!entry || entry->dev != dev) {
1049 spin_unlock_irqrestore(&msi_lock, flags); 751 spin_unlock_irqrestore(&msi_lock, flags);
1050 return -EINVAL; 752 return -EINVAL;
@@ -1056,100 +758,42 @@ static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
1056 msi_desc[entry->link.head]->link.tail = entry->link.tail; 758 msi_desc[entry->link.head]->link.tail = entry->link.tail;
1057 msi_desc[entry->link.tail]->link.head = entry->link.head; 759 msi_desc[entry->link.tail]->link.head = entry->link.head;
1058 entry->dev = NULL; 760 entry->dev = NULL;
1059 if (!reassign) { 761 msi_desc[irq] = NULL;
1060 vector_irq[vector] = 0;
1061 nr_released_vectors++;
1062 }
1063 msi_desc[vector] = NULL;
1064 spin_unlock_irqrestore(&msi_lock, flags); 762 spin_unlock_irqrestore(&msi_lock, flags);
1065 763
1066 kmem_cache_free(msi_cachep, entry); 764 destroy_msi_irq(irq);
1067 765
1068 if (type == PCI_CAP_ID_MSIX) { 766 if (type == PCI_CAP_ID_MSIX) {
1069 if (!reassign) 767 writel(1, base + entry_nr * PCI_MSIX_ENTRY_SIZE +
1070 writel(1, base + 768 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1071 entry_nr * PCI_MSIX_ENTRY_SIZE +
1072 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
1073 769
1074 if (head == vector) 770 if (head == irq)
1075 iounmap(base); 771 iounmap(base);
1076 } 772 }
1077 773
1078 return 0; 774 return 0;
1079} 775}
1080 776
1081static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
1082{
1083 int vector = head, tail = 0;
1084 int i, j = 0, nr_entries = 0;
1085 void __iomem *base;
1086 unsigned long flags;
1087
1088 spin_lock_irqsave(&msi_lock, flags);
1089 while (head != tail) {
1090 nr_entries++;
1091 tail = msi_desc[vector]->link.tail;
1092 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr)
1093 j = vector;
1094 vector = tail;
1095 }
1096 if (*nvec > nr_entries) {
1097 spin_unlock_irqrestore(&msi_lock, flags);
1098 *nvec = nr_entries;
1099 return -EINVAL;
1100 }
1101 vector = ((j > 0) ? j : head);
1102 for (i = 0; i < *nvec; i++) {
1103 j = msi_desc[vector]->msi_attrib.entry_nr;
1104 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */
1105 vector_irq[vector] = -1; /* Mark it busy */
1106 nr_released_vectors--;
1107 entries[i].vector = vector;
1108 if (j != (entries + i)->entry) {
1109 base = msi_desc[vector]->mask_base;
1110 msi_desc[vector]->msi_attrib.entry_nr =
1111 (entries + i)->entry;
1112 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1113 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base +
1114 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1115 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
1116 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE +
1117 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base +
1118 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE +
1119 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
1120 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE +
1121 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector,
1122 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE +
1123 PCI_MSIX_ENTRY_DATA_OFFSET);
1124 }
1125 vector = msi_desc[vector]->link.tail;
1126 }
1127 spin_unlock_irqrestore(&msi_lock, flags);
1128
1129 return 0;
1130}
1131
1132/** 777/**
1133 * pci_enable_msix - configure device's MSI-X capability structure 778 * pci_enable_msix - configure device's MSI-X capability structure
1134 * @dev: pointer to the pci_dev data structure of MSI-X device function 779 * @dev: pointer to the pci_dev data structure of MSI-X device function
1135 * @entries: pointer to an array of MSI-X entries 780 * @entries: pointer to an array of MSI-X entries
1136 * @nvec: number of MSI-X vectors requested for allocation by device driver 781 * @nvec: number of MSI-X irqs requested for allocation by device driver
1137 * 782 *
1138 * Setup the MSI-X capability structure of device function with the number 783 * Setup the MSI-X capability structure of device function with the number
1139 * of requested vectors upon its software driver call to request for 784 * of requested irqs upon its software driver call to request for
1140 * MSI-X mode enabled on its hardware device function. A return of zero 785 * MSI-X mode enabled on its hardware device function. A return of zero
1141 * indicates the successful configuration of MSI-X capability structure 786 * indicates the successful configuration of MSI-X capability structure
1142 * with new allocated MSI-X vectors. A return of < 0 indicates a failure. 787 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
1143 * Or a return of > 0 indicates that driver request is exceeding the number 788 * Or a return of > 0 indicates that driver request is exceeding the number
1144 * of vectors available. Driver should use the returned value to re-send 789 * of irqs available. Driver should use the returned value to re-send
1145 * its request. 790 * its request.
1146 **/ 791 **/
1147int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 792int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1148{ 793{
1149 int status, pos, nr_entries, free_vectors; 794 int status, pos, nr_entries;
1150 int i, j, temp; 795 int i, j, temp;
1151 u16 control; 796 u16 control;
1152 unsigned long flags;
1153 797
1154 if (!entries || pci_msi_supported(dev) < 0) 798 if (!entries || pci_msi_supported(dev) < 0)
1155 return -EINVAL; 799 return -EINVAL;
@@ -1163,9 +807,6 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1163 return -EINVAL; 807 return -EINVAL;
1164 808
1165 pci_read_config_word(dev, msi_control_reg(pos), &control); 809 pci_read_config_word(dev, msi_control_reg(pos), &control);
1166 if (control & PCI_MSIX_FLAGS_ENABLE)
1167 return -EINVAL; /* Already in MSI-X mode */
1168
1169 nr_entries = multi_msix_capable(control); 810 nr_entries = multi_msix_capable(control);
1170 if (nvec > nr_entries) 811 if (nvec > nr_entries)
1171 return -EINVAL; 812 return -EINVAL;
@@ -1180,56 +821,18 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
1180 } 821 }
1181 } 822 }
1182 temp = dev->irq; 823 temp = dev->irq;
1183 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 824 WARN_ON(!msi_lookup_irq(dev, PCI_CAP_ID_MSIX));
1184 /* Lookup Sucess */ 825
1185 nr_entries = nvec; 826 /* Check whether driver already requested for MSI irq */
1186 /* Reroute MSI-X table */
1187 if (reroute_msix_table(dev->irq, entries, &nr_entries)) {
1188 /* #requested > #previous-assigned */
1189 dev->irq = temp;
1190 return nr_entries;
1191 }
1192 dev->irq = temp;
1193 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
1194 return 0;
1195 }
1196 /* Check whether driver already requested for MSI vector */
1197 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 827 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 &&
1198 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 828 !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
1199 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 829 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
1200 "Device already has an MSI vector assigned\n", 830 "Device already has an MSI irq assigned\n",
1201 pci_name(dev)); 831 pci_name(dev));
1202 dev->irq = temp; 832 dev->irq = temp;
1203 return -EINVAL; 833 return -EINVAL;
1204 } 834 }
1205
1206 spin_lock_irqsave(&msi_lock, flags);
1207 /*
1208 * msi_lock is provided to ensure that enough vectors resources are
1209 * available before granting.
1210 */
1211 free_vectors = pci_vector_resources(last_alloc_vector,
1212 nr_released_vectors);
1213 /* Ensure that each MSI/MSI-X device has one vector reserved by
1214 default to avoid any MSI-X driver to take all available
1215 resources */
1216 free_vectors -= nr_reserved_vectors;
1217 /* Find the average of free vectors among MSI-X devices */
1218 if (nr_msix_devices > 0)
1219 free_vectors /= nr_msix_devices;
1220 spin_unlock_irqrestore(&msi_lock, flags);
1221
1222 if (nvec > free_vectors) {
1223 if (free_vectors > 0)
1224 return free_vectors;
1225 else
1226 return -EBUSY;
1227 }
1228
1229 status = msix_capability_init(dev, entries, nvec); 835 status = msix_capability_init(dev, entries, nvec);
1230 if (!status && nr_msix_devices > 0)
1231 nr_msix_devices--;
1232
1233 return status; 836 return status;
1234} 837}
1235 838
@@ -1251,53 +854,47 @@ void pci_disable_msix(struct pci_dev* dev)
1251 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 854 if (!(control & PCI_MSIX_FLAGS_ENABLE))
1252 return; 855 return;
1253 856
857 disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
858
1254 temp = dev->irq; 859 temp = dev->irq;
1255 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 860 if (!msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
1256 int state, vector, head, tail = 0, warning = 0; 861 int irq, head, tail = 0, warning = 0;
1257 unsigned long flags; 862 unsigned long flags;
1258 863
1259 vector = head = dev->irq; 864 irq = head = dev->irq;
1260 spin_lock_irqsave(&msi_lock, flags); 865 dev->irq = temp; /* Restore pin IRQ */
1261 while (head != tail) { 866 while (head != tail) {
1262 state = msi_desc[vector]->msi_attrib.state; 867 spin_lock_irqsave(&msi_lock, flags);
1263 if (state) 868 tail = msi_desc[irq]->link.tail;
869 spin_unlock_irqrestore(&msi_lock, flags);
870 if (irq_has_action(irq))
1264 warning = 1; 871 warning = 1;
1265 else { 872 else if (irq != head) /* Release MSI-X irq */
1266 vector_irq[vector] = 0; /* free it */ 873 msi_free_irq(dev, irq);
1267 nr_released_vectors++; 874 irq = tail;
1268 }
1269 tail = msi_desc[vector]->link.tail;
1270 vector = tail;
1271 } 875 }
1272 spin_unlock_irqrestore(&msi_lock, flags); 876 msi_free_irq(dev, irq);
1273 if (warning) { 877 if (warning) {
1274 dev->irq = temp;
1275 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " 878 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without "
1276 "free_irq() on all MSI-X vectors\n", 879 "free_irq() on all MSI-X irqs\n",
1277 pci_name(dev)); 880 pci_name(dev));
1278 BUG_ON(warning > 0); 881 BUG_ON(warning > 0);
1279 } else {
1280 dev->irq = temp;
1281 disable_msi_mode(dev,
1282 pci_find_capability(dev, PCI_CAP_ID_MSIX),
1283 PCI_CAP_ID_MSIX);
1284
1285 } 882 }
1286 } 883 }
1287} 884}
1288 885
1289/** 886/**
1290 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state 887 * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
1291 * @dev: pointer to the pci_dev data structure of MSI(X) device function 888 * @dev: pointer to the pci_dev data structure of MSI(X) device function
1292 * 889 *
1293 * Being called during hotplug remove, from which the device function 890 * Being called during hotplug remove, from which the device function
1294 * is hot-removed. All previous assigned MSI/MSI-X vectors, if 891 * is hot-removed. All previous assigned MSI/MSI-X irqs, if
1295 * allocated for this device function, are reclaimed to unused state, 892 * allocated for this device function, are reclaimed to unused state,
1296 * which may be used later on. 893 * which may be used later on.
1297 **/ 894 **/
1298void msi_remove_pci_irq_vectors(struct pci_dev* dev) 895void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1299{ 896{
1300 int state, pos, temp; 897 int pos, temp;
1301 unsigned long flags; 898 unsigned long flags;
1302 899
1303 if (!pci_msi_enable || !dev) 900 if (!pci_msi_enable || !dev)
@@ -1305,42 +902,38 @@ void msi_remove_pci_irq_vectors(struct pci_dev* dev)
1305 902
1306 temp = dev->irq; /* Save IOAPIC IRQ */ 903 temp = dev->irq; /* Save IOAPIC IRQ */
1307 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 904 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
1308 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 905 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSI)) {
1309 spin_lock_irqsave(&msi_lock, flags); 906 if (irq_has_action(dev->irq)) {
1310 state = msi_desc[dev->irq]->msi_attrib.state;
1311 spin_unlock_irqrestore(&msi_lock, flags);
1312 if (state) {
1313 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 907 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1314 "called without free_irq() on MSI vector %d\n", 908 "called without free_irq() on MSI irq %d\n",
1315 pci_name(dev), dev->irq); 909 pci_name(dev), dev->irq);
1316 BUG_ON(state > 0); 910 BUG_ON(irq_has_action(dev->irq));
1317 } else /* Release MSI vector assigned to this device */ 911 } else /* Release MSI irq assigned to this device */
1318 msi_free_vector(dev, dev->irq, 0); 912 msi_free_irq(dev, dev->irq);
1319 dev->irq = temp; /* Restore IOAPIC IRQ */ 913 dev->irq = temp; /* Restore IOAPIC IRQ */
1320 } 914 }
1321 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 915 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1322 if (pos > 0 && !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 916 if (pos > 0 && !msi_lookup_irq(dev, PCI_CAP_ID_MSIX)) {
1323 int vector, head, tail = 0, warning = 0; 917 int irq, head, tail = 0, warning = 0;
1324 void __iomem *base = NULL; 918 void __iomem *base = NULL;
1325 919
1326 vector = head = dev->irq; 920 irq = head = dev->irq;
1327 while (head != tail) { 921 while (head != tail) {
1328 spin_lock_irqsave(&msi_lock, flags); 922 spin_lock_irqsave(&msi_lock, flags);
1329 state = msi_desc[vector]->msi_attrib.state; 923 tail = msi_desc[irq]->link.tail;
1330 tail = msi_desc[vector]->link.tail; 924 base = msi_desc[irq]->mask_base;
1331 base = msi_desc[vector]->mask_base;
1332 spin_unlock_irqrestore(&msi_lock, flags); 925 spin_unlock_irqrestore(&msi_lock, flags);
1333 if (state) 926 if (irq_has_action(irq))
1334 warning = 1; 927 warning = 1;
1335 else if (vector != head) /* Release MSI-X vector */ 928 else if (irq != head) /* Release MSI-X irq */
1336 msi_free_vector(dev, vector, 0); 929 msi_free_irq(dev, irq);
1337 vector = tail; 930 irq = tail;
1338 } 931 }
1339 msi_free_vector(dev, vector, 0); 932 msi_free_irq(dev, irq);
1340 if (warning) { 933 if (warning) {
1341 iounmap(base); 934 iounmap(base);
1342 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 935 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() "
1343 "called without free_irq() on all MSI-X vectors\n", 936 "called without free_irq() on all MSI-X irqs\n",
1344 pci_name(dev)); 937 pci_name(dev));
1345 BUG_ON(warning > 0); 938 BUG_ON(warning > 0);
1346 } 939 }
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 56951c39d3a3..f0cca1772f9c 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -7,84 +7,6 @@
7#define MSI_H 7#define MSI_H
8 8
9/* 9/*
10 * MSI operation vector. Used by the msi core code (drivers/pci/msi.c)
11 * to abstract platform-specific tasks relating to MSI address generation
12 * and resource management.
13 */
14struct msi_ops {
15 /**
16 * setup - generate an MSI bus address and data for a given vector
17 * @pdev: PCI device context (in)
18 * @vector: vector allocated by the msi core (in)
19 * @addr_hi: upper 32 bits of PCI bus MSI address (out)
20 * @addr_lo: lower 32 bits of PCI bus MSI address (out)
21 * @data: MSI data payload (out)
22 *
23 * Description: The setup op is used to generate a PCI bus addres and
24 * data which the msi core will program into the card MSI capability
25 * registers. The setup routine is responsible for picking an initial
26 * cpu to target the MSI at. The setup routine is responsible for
27 * examining pdev to determine the MSI capabilities of the card and
28 * generating a suitable address/data. The setup routine is
29 * responsible for allocating and tracking any system resources it
30 * needs to route the MSI to the cpu it picks, and for associating
31 * those resources with the passed in vector.
32 *
33 * Returns 0 if the MSI address/data was successfully setup.
34 **/
35
36 int (*setup) (struct pci_dev *pdev, unsigned int vector,
37 u32 *addr_hi, u32 *addr_lo, u32 *data);
38
39 /**
40 * teardown - release resources allocated by setup
41 * @vector: vector context for resources (in)
42 *
43 * Description: The teardown op is used to release any resources
44 * that were allocated in the setup routine associated with the passed
45 * in vector.
46 **/
47
48 void (*teardown) (unsigned int vector);
49
50 /**
51 * target - retarget an MSI at a different cpu
52 * @vector: vector context for resources (in)
53 * @cpu: new cpu to direct vector at (in)
54 * @addr_hi: new value of PCI bus upper 32 bits (in/out)
55 * @addr_lo: new value of PCI bus lower 32 bits (in/out)
56 *
57 * Description: The target op is used to redirect an MSI vector
58 * at a different cpu. addr_hi/addr_lo coming in are the existing
59 * values that the MSI core has programmed into the card. The
60 * target code is responsible for freeing any resources (if any)
61 * associated with the old address, and generating a new PCI bus
62 * addr_hi/addr_lo that will redirect the vector at the indicated cpu.
63 **/
64
65 void (*target) (unsigned int vector, unsigned int cpu,
66 u32 *addr_hi, u32 *addr_lo);
67};
68
69extern int msi_register(struct msi_ops *ops);
70
71#include <asm/msi.h>
72
73/*
74 * Assume the maximum number of hot plug slots supported by the system is about
75 * ten. The worstcase is that each of these slots is hot-added with a device,
76 * which has two MSI/MSI-X capable functions. To avoid any MSI-X driver, which
77 * attempts to request all available vectors, NR_HP_RESERVED_VECTORS is defined
78 * as below to ensure at least one message is assigned to each detected MSI/
79 * MSI-X device function.
80 */
81#define NR_HP_RESERVED_VECTORS 20
82
83extern int vector_irq[NR_VECTORS];
84extern void (*interrupt[NR_IRQS])(void);
85extern int pci_vector_resources(int last, int nr_released);
86
87/*
88 * MSI-X Address Register 10 * MSI-X Address Register
89 */ 11 */
90#define PCI_MSIX_FLAGS_QSIZE 0x7FF 12#define PCI_MSIX_FLAGS_QSIZE 0x7FF
@@ -110,8 +32,8 @@ extern int pci_vector_resources(int last, int nr_released);
110 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1)) 32 (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1))
111#define multi_msi_enable(control, num) \ 33#define multi_msi_enable(control, num) \
112 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE); 34 control |= (((num >> 1) << 4) & PCI_MSI_FLAGS_QSIZE);
113#define is_64bit_address(control) (control & PCI_MSI_FLAGS_64BIT) 35#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
114#define is_mask_bit_support(control) (control & PCI_MSI_FLAGS_MASKBIT) 36#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
115#define msi_enable(control, num) multi_msi_enable(control, num); \ 37#define msi_enable(control, num) multi_msi_enable(control, num); \
116 control |= PCI_MSI_FLAGS_ENABLE 38 control |= PCI_MSI_FLAGS_ENABLE
117 39
@@ -125,32 +47,4 @@ extern int pci_vector_resources(int last, int nr_released);
125#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK) 47#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
126#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK) 48#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
127 49
128struct msi_desc {
129 struct {
130 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
131 __u8 maskbit : 1; /* mask-pending bit supported ? */
132 __u8 state : 1; /* {0: free, 1: busy} */
133 __u8 reserved: 1; /* reserved */
134 __u8 entry_nr; /* specific enabled entry */
135 __u8 default_vector; /* default pre-assigned vector */
136 __u8 unused; /* formerly unused destination cpu*/
137 }msi_attrib;
138
139 struct {
140 __u16 head;
141 __u16 tail;
142 }link;
143
144 void __iomem *mask_base;
145 struct pci_dev *dev;
146
147#ifdef CONFIG_PM
148 /* PM save area for MSIX address/data */
149
150 u32 address_hi_save;
151 u32 address_lo_save;
152 u32 data_save;
153#endif
154};
155
156#endif /* MSI_H */ 50#endif /* MSI_H */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 54404917be9a..8f7bcf56f149 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -55,16 +55,16 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
55 list_for_each_entry(dev, &bus->devices, bus_list) { 55 list_for_each_entry(dev, &bus->devices, bus_list) {
56 u16 class = dev->class >> 8; 56 u16 class = dev->class >> 8;
57 57
58 /* Don't touch classless devices or host bridges. */ 58 /* Don't touch classless devices or host bridges or ioapics. */
59 if (class == PCI_CLASS_NOT_DEFINED || 59 if (class == PCI_CLASS_NOT_DEFINED ||
60 class == PCI_CLASS_BRIDGE_HOST) 60 class == PCI_CLASS_BRIDGE_HOST)
61 continue; 61 continue;
62 62
63 /* Don't touch ioapics if it has the assigned resources. */ 63 /* Don't touch ioapic devices already enabled by firmware */
64 if (class == PCI_CLASS_SYSTEM_PIC) { 64 if (class == PCI_CLASS_SYSTEM_PIC) {
65 res = &dev->resource[0]; 65 u16 command;
66 if (res[0].start || res[1].start || res[2].start || 66 pci_read_config_word(dev, PCI_COMMAND, &command);
67 res[3].start || res[4].start || res[5].start) 67 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
68 continue; 68 continue;
69 } 69 }
70 70
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cc5032b6f42a..3f0f7b8fa813 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -141,9 +141,9 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
141 141
142 dev_dbg(dev, "%s secs=%d, mins=%d, " 142 dev_dbg(dev, "%s secs=%d, mins=%d, "
143 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", 143 "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
144 "write", dt->tm_sec, dt->tm_min, 144 "write", t->tm_sec, t->tm_min,
145 dt->tm_hour, dt->tm_mday, 145 t->tm_hour, t->tm_mday,
146 dt->tm_mon, dt->tm_year, dt->tm_wday); 146 t->tm_mon, t->tm_year, t->tm_wday);
147 147
148 *buf++ = 0; /* first register addr */ 148 *buf++ = 0; /* first register addr */
149 buf[DS1307_REG_SECS] = BIN2BCD(t->tm_sec); 149 buf[DS1307_REG_SECS] = BIN2BCD(t->tm_sec);
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 9c68ec99afa5..67e816a9a39f 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -55,7 +55,7 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
55 } 55 }
56 56
57 dev_dbg(&client->dev, 57 dev_dbg(&client->dev,
58 "%s: raw read data - counters=%02x,%02x,%02x,%02x\n" 58 "%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
59 __FUNCTION__, buf[0], buf[1], buf[2], buf[3]); 59 __FUNCTION__, buf[0], buf[1], buf[2], buf[3]);
60 60
61 time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; 61 time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
@@ -96,7 +96,7 @@ static int ds1672_set_datetime(struct i2c_client *client, struct rtc_time *tm)
96 unsigned long secs; 96 unsigned long secs;
97 97
98 dev_dbg(&client->dev, 98 dev_dbg(&client->dev,
99 "%s: secs=%d, mins=%d, hours=%d, ", 99 "%s: secs=%d, mins=%d, hours=%d, "
100 "mday=%d, mon=%d, year=%d, wday=%d\n", 100 "mday=%d, mon=%d, year=%d, wday=%d\n",
101 __FUNCTION__, 101 __FUNCTION__,
102 tm->tm_sec, tm->tm_min, tm->tm_hour, 102 tm->tm_sec, tm->tm_min, tm->tm_hour,
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index bbdad099471d..2a86632580f1 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -91,7 +91,7 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
91 unsigned char buf[8] = { RS5C372_REG_BASE }; 91 unsigned char buf[8] = { RS5C372_REG_BASE };
92 92
93 dev_dbg(&client->dev, 93 dev_dbg(&client->dev,
94 "%s: secs=%d, mins=%d, hours=%d ", 94 "%s: secs=%d, mins=%d, hours=%d "
95 "mday=%d, mon=%d, year=%d, wday=%d\n", 95 "mday=%d, mon=%d, year=%d, wday=%d\n",
96 __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour, 96 __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour,
97 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday); 97 tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
@@ -126,7 +126,7 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
126 return -EIO; 126 return -EIO;
127 } 127 }
128 128
129 dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, trim); 129 dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, *trim);
130 130
131 if (osc) 131 if (osc)
132 *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768; 132 *osc = (buf & RS5C372_TRIM_XSL) ? 32000 : 32768;
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c
index 1ebe6b585d2d..c5d0addfda4f 100644
--- a/drivers/serial/8250_gsc.c
+++ b/drivers/serial/8250_gsc.c
@@ -22,7 +22,6 @@
22#include <asm/hardware.h> 22#include <asm/hardware.h>
23#include <asm/parisc-device.h> 23#include <asm/parisc-device.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/serial.h> /* for LASI_BASE_BAUD */
26 25
27#include "8250.h" 26#include "8250.h"
28 27
@@ -54,7 +53,8 @@ serial_init_chip(struct parisc_device *dev)
54 53
55 memset(&port, 0, sizeof(port)); 54 memset(&port, 0, sizeof(port));
56 port.iotype = UPIO_MEM; 55 port.iotype = UPIO_MEM;
57 port.uartclk = LASI_BASE_BAUD * 16; 56 /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */
57 port.uartclk = 7272727;
58 port.mapbase = address; 58 port.mapbase = address;
59 port.membase = ioremap_nocache(address, 16); 59 port.membase = ioremap_nocache(address, 16);
60 port.irq = dev->irq; 60 port.irq = dev->irq;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 653098bc2dd5..8edee745888a 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -556,10 +556,11 @@ config SERIAL_MUX
556 default y 556 default y
557 ---help--- 557 ---help---
558 Saying Y here will enable the hardware MUX serial driver for 558 Saying Y here will enable the hardware MUX serial driver for
559 the Nova and K class systems. The hardware MUX is not 8250/16550 559 the Nova, K class systems and D class with a 'remote control card'.
560 compatible therefore the /dev/ttyB0 device is shared between the 560 The hardware MUX is not 8250/16550 compatible therefore the
561 Serial MUX and the PDC software console. The following steps 561 /dev/ttyB0 device is shared between the Serial MUX and the PDC
562 need to be completed to use the Serial MUX: 562 software console. The following steps need to be completed to use
563 the Serial MUX:
563 564
564 1. create the device entry (mknod /dev/ttyB0 c 11 0) 565 1. create the device entry (mknod /dev/ttyB0 c 11 0)
565 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 566 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index b120896c8ab4..a433cc78ef90 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -1843,7 +1843,7 @@ static int __devinit riva_get_EDID_OF(struct fb_info *info, struct pci_dev *pd)
1843 for (i = 0; propnames[i] != NULL; ++i) { 1843 for (i = 0; propnames[i] != NULL; ++i) {
1844 pedid = get_property(dp, propnames[i], NULL); 1844 pedid = get_property(dp, propnames[i], NULL);
1845 if (pedid != NULL) { 1845 if (pedid != NULL) {
1846 par->EDID = pedid; 1846 par->EDID = (unsigned char *)pedid;
1847 NVTRACE("LCD found.\n"); 1847 NVTRACE("LCD found.\n");
1848 return 1; 1848 return 1;
1849 } 1849 }
diff --git a/fs/Kconfig b/fs/Kconfig
index 68f4561423ff..599de54451af 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -325,6 +325,7 @@ config FS_POSIX_ACL
325 default n 325 default n
326 326
327source "fs/xfs/Kconfig" 327source "fs/xfs/Kconfig"
328source "fs/gfs2/Kconfig"
328 329
329config OCFS2_FS 330config OCFS2_FS
330 tristate "OCFS2 file system support" 331 tristate "OCFS2 file system support"
@@ -995,6 +996,18 @@ config AFFS_FS
995 To compile this file system support as a module, choose M here: the 996 To compile this file system support as a module, choose M here: the
996 module will be called affs. If unsure, say N. 997 module will be called affs. If unsure, say N.
997 998
999config ECRYPT_FS
1000 tristate "eCrypt filesystem layer support (EXPERIMENTAL)"
1001 depends on EXPERIMENTAL && KEYS && CRYPTO
1002 help
1003 Encrypted filesystem that operates on the VFS layer. See
1004 <file:Documentation/ecryptfs.txt> to learn more about
1005 eCryptfs. Userspace components are required and can be
1006 obtained from <http://ecryptfs.sf.net>.
1007
1008 To compile this file system support as a module, choose M here: the
1009 module will be called ecryptfs.
1010
998config HFS_FS 1011config HFS_FS
999 tristate "Apple Macintosh file system support (EXPERIMENTAL)" 1012 tristate "Apple Macintosh file system support (EXPERIMENTAL)"
1000 depends on BLOCK && EXPERIMENTAL 1013 depends on BLOCK && EXPERIMENTAL
@@ -1983,6 +1996,7 @@ endmenu
1983endif 1996endif
1984 1997
1985source "fs/nls/Kconfig" 1998source "fs/nls/Kconfig"
1999source "fs/dlm/Kconfig"
1986 2000
1987endmenu 2001endmenu
1988 2002
diff --git a/fs/Makefile b/fs/Makefile
index 819b2a93bebe..df614eacee86 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_CONFIGFS_FS) += configfs/
57obj-y += devpts/ 57obj-y += devpts/
58 58
59obj-$(CONFIG_PROFILING) += dcookies.o 59obj-$(CONFIG_PROFILING) += dcookies.o
60obj-$(CONFIG_DLM) += dlm/
60 61
61# Do not add any filesystems before this line 62# Do not add any filesystems before this line
62obj-$(CONFIG_REISERFS_FS) += reiserfs/ 63obj-$(CONFIG_REISERFS_FS) += reiserfs/
@@ -75,6 +76,7 @@ obj-$(CONFIG_BFS_FS) += bfs/
75obj-$(CONFIG_ISO9660_FS) += isofs/ 76obj-$(CONFIG_ISO9660_FS) += isofs/
76obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+ 77obj-$(CONFIG_HFSPLUS_FS) += hfsplus/ # Before hfs to find wrapped HFS+
77obj-$(CONFIG_HFS_FS) += hfs/ 78obj-$(CONFIG_HFS_FS) += hfs/
79obj-$(CONFIG_ECRYPT_FS) += ecryptfs/
78obj-$(CONFIG_VXFS_FS) += freevxfs/ 80obj-$(CONFIG_VXFS_FS) += freevxfs/
79obj-$(CONFIG_NFS_FS) += nfs/ 81obj-$(CONFIG_NFS_FS) += nfs/
80obj-$(CONFIG_EXPORTFS) += exportfs/ 82obj-$(CONFIG_EXPORTFS) += exportfs/
@@ -109,3 +111,4 @@ obj-$(CONFIG_HOSTFS) += hostfs/
109obj-$(CONFIG_HPPFS) += hppfs/ 111obj-$(CONFIG_HPPFS) += hppfs/
110obj-$(CONFIG_DEBUG_FS) += debugfs/ 112obj-$(CONFIG_DEBUG_FS) += debugfs/
111obj-$(CONFIG_OCFS2_FS) += ocfs2/ 113obj-$(CONFIG_OCFS2_FS) += ocfs2/
114obj-$(CONFIG_GFS2_FS) += gfs2/
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 32b5d625ce9c..5bcdaaf4eae0 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -29,6 +29,7 @@
29#include <linux/personality.h> 29#include <linux/personality.h>
30#include <linux/init.h> 30#include <linux/init.h>
31 31
32#include <asm/a.out.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
33#include <asm/pgtable.h> 34#include <asm/pgtable.h>
34 35
@@ -194,6 +195,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
194 unsigned long som_entry; 195 unsigned long som_entry;
195 struct som_hdr *som_ex; 196 struct som_hdr *som_ex;
196 struct som_exec_auxhdr *hpuxhdr; 197 struct som_exec_auxhdr *hpuxhdr;
198 struct files_struct *files;
197 199
198 /* Get the exec-header */ 200 /* Get the exec-header */
199 som_ex = (struct som_hdr *) bprm->buf; 201 som_ex = (struct som_hdr *) bprm->buf;
@@ -208,15 +210,27 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
208 size = som_ex->aux_header_size; 210 size = som_ex->aux_header_size;
209 if (size > SOM_PAGESIZE) 211 if (size > SOM_PAGESIZE)
210 goto out; 212 goto out;
211 hpuxhdr = (struct som_exec_auxhdr *) kmalloc(size, GFP_KERNEL); 213 hpuxhdr = kmalloc(size, GFP_KERNEL);
212 if (!hpuxhdr) 214 if (!hpuxhdr)
213 goto out; 215 goto out;
214 216
215 retval = kernel_read(bprm->file, som_ex->aux_header_location, 217 retval = kernel_read(bprm->file, som_ex->aux_header_location,
216 (char *) hpuxhdr, size); 218 (char *) hpuxhdr, size);
219 if (retval != size) {
220 if (retval >= 0)
221 retval = -EIO;
222 goto out_free;
223 }
224
225 files = current->files; /* Refcounted so ok */
226 retval = unshare_files();
217 if (retval < 0) 227 if (retval < 0)
218 goto out_free; 228 goto out_free;
219#error "Fix security hole before enabling me" 229 if (files == current->files) {
230 put_files_struct(files);
231 files = NULL;
232 }
233
220 retval = get_unused_fd(); 234 retval = get_unused_fd();
221 if (retval < 0) 235 if (retval < 0)
222 goto out_free; 236 goto out_free;
diff --git a/fs/configfs/item.c b/fs/configfs/item.c
index e07485ac50ad..24421209f854 100644
--- a/fs/configfs/item.c
+++ b/fs/configfs/item.c
@@ -224,4 +224,4 @@ EXPORT_SYMBOL(config_item_init);
224EXPORT_SYMBOL(config_group_init); 224EXPORT_SYMBOL(config_group_init);
225EXPORT_SYMBOL(config_item_get); 225EXPORT_SYMBOL(config_item_get);
226EXPORT_SYMBOL(config_item_put); 226EXPORT_SYMBOL(config_item_put);
227 227EXPORT_SYMBOL(config_group_find_obj);
diff --git a/fs/dcache.c b/fs/dcache.c
index fc2faa44f8d1..2355bddad8de 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -291,9 +291,9 @@ struct dentry * dget_locked(struct dentry *dentry)
291 * it can be unhashed only if it has no children, or if it is the root 291 * it can be unhashed only if it has no children, or if it is the root
292 * of a filesystem. 292 * of a filesystem.
293 * 293 *
294 * If the inode has a DCACHE_DISCONNECTED alias, then prefer 294 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
295 * any other hashed alias over that one unless @want_discon is set, 295 * any other hashed alias over that one unless @want_discon is set,
296 * in which case only return a DCACHE_DISCONNECTED alias. 296 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
297 */ 297 */
298 298
299static struct dentry * __d_find_alias(struct inode *inode, int want_discon) 299static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
@@ -309,7 +309,8 @@ static struct dentry * __d_find_alias(struct inode *inode, int want_discon)
309 prefetch(next); 309 prefetch(next);
310 alias = list_entry(tmp, struct dentry, d_alias); 310 alias = list_entry(tmp, struct dentry, d_alias);
311 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { 311 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
312 if (alias->d_flags & DCACHE_DISCONNECTED) 312 if (IS_ROOT(alias) &&
313 (alias->d_flags & DCACHE_DISCONNECTED))
313 discon_alias = alias; 314 discon_alias = alias;
314 else if (!want_discon) { 315 else if (!want_discon) {
315 __dget_locked(alias); 316 __dget_locked(alias);
@@ -1004,7 +1005,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1004{ 1005{
1005 struct dentry *new = NULL; 1006 struct dentry *new = NULL;
1006 1007
1007 if (inode) { 1008 if (inode && S_ISDIR(inode->i_mode)) {
1008 spin_lock(&dcache_lock); 1009 spin_lock(&dcache_lock);
1009 new = __d_find_alias(inode, 1); 1010 new = __d_find_alias(inode, 1);
1010 if (new) { 1011 if (new) {
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
new file mode 100644
index 000000000000..490f85b3fa59
--- /dev/null
+++ b/fs/dlm/Kconfig
@@ -0,0 +1,21 @@
1menu "Distributed Lock Manager"
2 depends on INET && EXPERIMENTAL
3
4config DLM
5 tristate "Distributed Lock Manager (DLM)"
6 depends on IPV6 || IPV6=n
7 depends on IP_SCTP
8 select CONFIGFS_FS
9 help
10 A general purpose distributed lock manager for kernel or userspace
11 applications.
12
13config DLM_DEBUG
14 bool "DLM debugging"
15 depends on DLM
16 help
17 Under the debugfs mount point, the name of each lockspace will
18 appear as a file in the "dlm" directory. The output is the
19 list of resource and locks the local node knows about.
20
21endmenu
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
new file mode 100644
index 000000000000..1832e0297f7d
--- /dev/null
+++ b/fs/dlm/Makefile
@@ -0,0 +1,19 @@
1obj-$(CONFIG_DLM) += dlm.o
2dlm-y := ast.o \
3 config.o \
4 dir.o \
5 lock.o \
6 lockspace.o \
7 lowcomms.o \
8 main.o \
9 member.o \
10 memory.o \
11 midcomms.o \
12 rcom.o \
13 recover.o \
14 recoverd.o \
15 requestqueue.o \
16 user.o \
17 util.o
18dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
19
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
new file mode 100644
index 000000000000..f91d39cb1e0b
--- /dev/null
+++ b/fs/dlm/ast.c
@@ -0,0 +1,173 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lock.h"
16#include "user.h"
17
18#define WAKE_ASTS 0
19
20static struct list_head ast_queue;
21static spinlock_t ast_queue_lock;
22static struct task_struct * astd_task;
23static unsigned long astd_wakeflags;
24static struct mutex astd_running;
25
26
27void dlm_del_ast(struct dlm_lkb *lkb)
28{
29 spin_lock(&ast_queue_lock);
30 if (lkb->lkb_ast_type & (AST_COMP | AST_BAST))
31 list_del(&lkb->lkb_astqueue);
32 spin_unlock(&ast_queue_lock);
33}
34
35void dlm_add_ast(struct dlm_lkb *lkb, int type)
36{
37 if (lkb->lkb_flags & DLM_IFL_USER) {
38 dlm_user_add_ast(lkb, type);
39 return;
40 }
41 DLM_ASSERT(lkb->lkb_astaddr != DLM_FAKE_USER_AST, dlm_print_lkb(lkb););
42
43 spin_lock(&ast_queue_lock);
44 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
45 kref_get(&lkb->lkb_ref);
46 list_add_tail(&lkb->lkb_astqueue, &ast_queue);
47 }
48 lkb->lkb_ast_type |= type;
49 spin_unlock(&ast_queue_lock);
50
51 set_bit(WAKE_ASTS, &astd_wakeflags);
52 wake_up_process(astd_task);
53}
54
55static void process_asts(void)
56{
57 struct dlm_ls *ls = NULL;
58 struct dlm_rsb *r = NULL;
59 struct dlm_lkb *lkb;
60 void (*cast) (long param);
61 void (*bast) (long param, int mode);
62 int type = 0, found, bmode;
63
64 for (;;) {
65 found = 0;
66 spin_lock(&ast_queue_lock);
67 list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
68 r = lkb->lkb_resource;
69 ls = r->res_ls;
70
71 if (dlm_locking_stopped(ls))
72 continue;
73
74 list_del(&lkb->lkb_astqueue);
75 type = lkb->lkb_ast_type;
76 lkb->lkb_ast_type = 0;
77 found = 1;
78 break;
79 }
80 spin_unlock(&ast_queue_lock);
81
82 if (!found)
83 break;
84
85 cast = lkb->lkb_astaddr;
86 bast = lkb->lkb_bastaddr;
87 bmode = lkb->lkb_bastmode;
88
89 if ((type & AST_COMP) && cast)
90 cast(lkb->lkb_astparam);
91
92 /* FIXME: Is it safe to look at lkb_grmode here
93 without doing a lock_rsb() ?
94 Look at other checks in v1 to avoid basts. */
95
96 if ((type & AST_BAST) && bast)
97 if (!dlm_modes_compat(lkb->lkb_grmode, bmode))
98 bast(lkb->lkb_astparam, bmode);
99
100 /* this removes the reference added by dlm_add_ast
101 and may result in the lkb being freed */
102 dlm_put_lkb(lkb);
103
104 schedule();
105 }
106}
107
108static inline int no_asts(void)
109{
110 int ret;
111
112 spin_lock(&ast_queue_lock);
113 ret = list_empty(&ast_queue);
114 spin_unlock(&ast_queue_lock);
115 return ret;
116}
117
118static int dlm_astd(void *data)
119{
120 while (!kthread_should_stop()) {
121 set_current_state(TASK_INTERRUPTIBLE);
122 if (!test_bit(WAKE_ASTS, &astd_wakeflags))
123 schedule();
124 set_current_state(TASK_RUNNING);
125
126 mutex_lock(&astd_running);
127 if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
128 process_asts();
129 mutex_unlock(&astd_running);
130 }
131 return 0;
132}
133
134void dlm_astd_wake(void)
135{
136 if (!no_asts()) {
137 set_bit(WAKE_ASTS, &astd_wakeflags);
138 wake_up_process(astd_task);
139 }
140}
141
142int dlm_astd_start(void)
143{
144 struct task_struct *p;
145 int error = 0;
146
147 INIT_LIST_HEAD(&ast_queue);
148 spin_lock_init(&ast_queue_lock);
149 mutex_init(&astd_running);
150
151 p = kthread_run(dlm_astd, NULL, "dlm_astd");
152 if (IS_ERR(p))
153 error = PTR_ERR(p);
154 else
155 astd_task = p;
156 return error;
157}
158
159void dlm_astd_stop(void)
160{
161 kthread_stop(astd_task);
162}
163
164void dlm_astd_suspend(void)
165{
166 mutex_lock(&astd_running);
167}
168
169void dlm_astd_resume(void)
170{
171 mutex_unlock(&astd_running);
172}
173
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
new file mode 100644
index 000000000000..6ee276c74c52
--- /dev/null
+++ b/fs/dlm/ast.h
@@ -0,0 +1,26 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __ASTD_DOT_H__
14#define __ASTD_DOT_H__
15
16void dlm_add_ast(struct dlm_lkb *lkb, int type);
17void dlm_del_ast(struct dlm_lkb *lkb);
18
19void dlm_astd_wake(void);
20int dlm_astd_start(void);
21void dlm_astd_stop(void);
22void dlm_astd_suspend(void);
23void dlm_astd_resume(void);
24
25#endif
26
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
new file mode 100644
index 000000000000..88553054bbfa
--- /dev/null
+++ b/fs/dlm/config.c
@@ -0,0 +1,789 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/configfs.h>
17#include <net/sock.h>
18
19#include "config.h"
20#include "lowcomms.h"
21
22/*
23 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
24 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
25 * /config/dlm/<cluster>/comms/<comm>/nodeid
26 * /config/dlm/<cluster>/comms/<comm>/local
27 * /config/dlm/<cluster>/comms/<comm>/addr
28 * The <cluster> level is useless, but I haven't figured out how to avoid it.
29 */
30
31static struct config_group *space_list;
32static struct config_group *comm_list;
33static struct comm *local_comm;
34
35struct clusters;
36struct cluster;
37struct spaces;
38struct space;
39struct comms;
40struct comm;
41struct nodes;
42struct node;
43
44static struct config_group *make_cluster(struct config_group *, const char *);
45static void drop_cluster(struct config_group *, struct config_item *);
46static void release_cluster(struct config_item *);
47static struct config_group *make_space(struct config_group *, const char *);
48static void drop_space(struct config_group *, struct config_item *);
49static void release_space(struct config_item *);
50static struct config_item *make_comm(struct config_group *, const char *);
51static void drop_comm(struct config_group *, struct config_item *);
52static void release_comm(struct config_item *);
53static struct config_item *make_node(struct config_group *, const char *);
54static void drop_node(struct config_group *, struct config_item *);
55static void release_node(struct config_item *);
56
57static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
58 char *buf);
59static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
60 const char *buf, size_t len);
61static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
62 char *buf);
63static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
64 const char *buf, size_t len);
65
66static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
67static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
68static ssize_t comm_local_read(struct comm *cm, char *buf);
69static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
70static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
71static ssize_t node_nodeid_read(struct node *nd, char *buf);
72static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
73static ssize_t node_weight_read(struct node *nd, char *buf);
74static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
75
76enum {
77 COMM_ATTR_NODEID = 0,
78 COMM_ATTR_LOCAL,
79 COMM_ATTR_ADDR,
80};
81
82struct comm_attribute {
83 struct configfs_attribute attr;
84 ssize_t (*show)(struct comm *, char *);
85 ssize_t (*store)(struct comm *, const char *, size_t);
86};
87
88static struct comm_attribute comm_attr_nodeid = {
89 .attr = { .ca_owner = THIS_MODULE,
90 .ca_name = "nodeid",
91 .ca_mode = S_IRUGO | S_IWUSR },
92 .show = comm_nodeid_read,
93 .store = comm_nodeid_write,
94};
95
96static struct comm_attribute comm_attr_local = {
97 .attr = { .ca_owner = THIS_MODULE,
98 .ca_name = "local",
99 .ca_mode = S_IRUGO | S_IWUSR },
100 .show = comm_local_read,
101 .store = comm_local_write,
102};
103
104static struct comm_attribute comm_attr_addr = {
105 .attr = { .ca_owner = THIS_MODULE,
106 .ca_name = "addr",
107 .ca_mode = S_IRUGO | S_IWUSR },
108 .store = comm_addr_write,
109};
110
111static struct configfs_attribute *comm_attrs[] = {
112 [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
113 [COMM_ATTR_LOCAL] = &comm_attr_local.attr,
114 [COMM_ATTR_ADDR] = &comm_attr_addr.attr,
115 NULL,
116};
117
118enum {
119 NODE_ATTR_NODEID = 0,
120 NODE_ATTR_WEIGHT,
121};
122
123struct node_attribute {
124 struct configfs_attribute attr;
125 ssize_t (*show)(struct node *, char *);
126 ssize_t (*store)(struct node *, const char *, size_t);
127};
128
129static struct node_attribute node_attr_nodeid = {
130 .attr = { .ca_owner = THIS_MODULE,
131 .ca_name = "nodeid",
132 .ca_mode = S_IRUGO | S_IWUSR },
133 .show = node_nodeid_read,
134 .store = node_nodeid_write,
135};
136
137static struct node_attribute node_attr_weight = {
138 .attr = { .ca_owner = THIS_MODULE,
139 .ca_name = "weight",
140 .ca_mode = S_IRUGO | S_IWUSR },
141 .show = node_weight_read,
142 .store = node_weight_write,
143};
144
145static struct configfs_attribute *node_attrs[] = {
146 [NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
147 [NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
148 NULL,
149};
150
151struct clusters {
152 struct configfs_subsystem subsys;
153};
154
155struct cluster {
156 struct config_group group;
157};
158
159struct spaces {
160 struct config_group ss_group;
161};
162
163struct space {
164 struct config_group group;
165 struct list_head members;
166 struct mutex members_lock;
167 int members_count;
168};
169
170struct comms {
171 struct config_group cs_group;
172};
173
174struct comm {
175 struct config_item item;
176 int nodeid;
177 int local;
178 int addr_count;
179 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
180};
181
182struct nodes {
183 struct config_group ns_group;
184};
185
186struct node {
187 struct config_item item;
188 struct list_head list; /* space->members */
189 int nodeid;
190 int weight;
191};
192
193static struct configfs_group_operations clusters_ops = {
194 .make_group = make_cluster,
195 .drop_item = drop_cluster,
196};
197
198static struct configfs_item_operations cluster_ops = {
199 .release = release_cluster,
200};
201
202static struct configfs_group_operations spaces_ops = {
203 .make_group = make_space,
204 .drop_item = drop_space,
205};
206
207static struct configfs_item_operations space_ops = {
208 .release = release_space,
209};
210
211static struct configfs_group_operations comms_ops = {
212 .make_item = make_comm,
213 .drop_item = drop_comm,
214};
215
216static struct configfs_item_operations comm_ops = {
217 .release = release_comm,
218 .show_attribute = show_comm,
219 .store_attribute = store_comm,
220};
221
222static struct configfs_group_operations nodes_ops = {
223 .make_item = make_node,
224 .drop_item = drop_node,
225};
226
227static struct configfs_item_operations node_ops = {
228 .release = release_node,
229 .show_attribute = show_node,
230 .store_attribute = store_node,
231};
232
233static struct config_item_type clusters_type = {
234 .ct_group_ops = &clusters_ops,
235 .ct_owner = THIS_MODULE,
236};
237
238static struct config_item_type cluster_type = {
239 .ct_item_ops = &cluster_ops,
240 .ct_owner = THIS_MODULE,
241};
242
243static struct config_item_type spaces_type = {
244 .ct_group_ops = &spaces_ops,
245 .ct_owner = THIS_MODULE,
246};
247
248static struct config_item_type space_type = {
249 .ct_item_ops = &space_ops,
250 .ct_owner = THIS_MODULE,
251};
252
253static struct config_item_type comms_type = {
254 .ct_group_ops = &comms_ops,
255 .ct_owner = THIS_MODULE,
256};
257
258static struct config_item_type comm_type = {
259 .ct_item_ops = &comm_ops,
260 .ct_attrs = comm_attrs,
261 .ct_owner = THIS_MODULE,
262};
263
264static struct config_item_type nodes_type = {
265 .ct_group_ops = &nodes_ops,
266 .ct_owner = THIS_MODULE,
267};
268
269static struct config_item_type node_type = {
270 .ct_item_ops = &node_ops,
271 .ct_attrs = node_attrs,
272 .ct_owner = THIS_MODULE,
273};
274
275static struct cluster *to_cluster(struct config_item *i)
276{
277 return i ? container_of(to_config_group(i), struct cluster, group):NULL;
278}
279
280static struct space *to_space(struct config_item *i)
281{
282 return i ? container_of(to_config_group(i), struct space, group) : NULL;
283}
284
285static struct comm *to_comm(struct config_item *i)
286{
287 return i ? container_of(i, struct comm, item) : NULL;
288}
289
290static struct node *to_node(struct config_item *i)
291{
292 return i ? container_of(i, struct node, item) : NULL;
293}
294
295static struct config_group *make_cluster(struct config_group *g,
296 const char *name)
297{
298 struct cluster *cl = NULL;
299 struct spaces *sps = NULL;
300 struct comms *cms = NULL;
301 void *gps = NULL;
302
303 cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
304 gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
305 sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
306 cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
307
308 if (!cl || !gps || !sps || !cms)
309 goto fail;
310
311 config_group_init_type_name(&cl->group, name, &cluster_type);
312 config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
313 config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
314
315 cl->group.default_groups = gps;
316 cl->group.default_groups[0] = &sps->ss_group;
317 cl->group.default_groups[1] = &cms->cs_group;
318 cl->group.default_groups[2] = NULL;
319
320 space_list = &sps->ss_group;
321 comm_list = &cms->cs_group;
322 return &cl->group;
323
324 fail:
325 kfree(cl);
326 kfree(gps);
327 kfree(sps);
328 kfree(cms);
329 return NULL;
330}
331
332static void drop_cluster(struct config_group *g, struct config_item *i)
333{
334 struct cluster *cl = to_cluster(i);
335 struct config_item *tmp;
336 int j;
337
338 for (j = 0; cl->group.default_groups[j]; j++) {
339 tmp = &cl->group.default_groups[j]->cg_item;
340 cl->group.default_groups[j] = NULL;
341 config_item_put(tmp);
342 }
343
344 space_list = NULL;
345 comm_list = NULL;
346
347 config_item_put(i);
348}
349
350static void release_cluster(struct config_item *i)
351{
352 struct cluster *cl = to_cluster(i);
353 kfree(cl->group.default_groups);
354 kfree(cl);
355}
356
357static struct config_group *make_space(struct config_group *g, const char *name)
358{
359 struct space *sp = NULL;
360 struct nodes *nds = NULL;
361 void *gps = NULL;
362
363 sp = kzalloc(sizeof(struct space), GFP_KERNEL);
364 gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
365 nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
366
367 if (!sp || !gps || !nds)
368 goto fail;
369
370 config_group_init_type_name(&sp->group, name, &space_type);
371 config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
372
373 sp->group.default_groups = gps;
374 sp->group.default_groups[0] = &nds->ns_group;
375 sp->group.default_groups[1] = NULL;
376
377 INIT_LIST_HEAD(&sp->members);
378 mutex_init(&sp->members_lock);
379 sp->members_count = 0;
380 return &sp->group;
381
382 fail:
383 kfree(sp);
384 kfree(gps);
385 kfree(nds);
386 return NULL;
387}
388
389static void drop_space(struct config_group *g, struct config_item *i)
390{
391 struct space *sp = to_space(i);
392 struct config_item *tmp;
393 int j;
394
395 /* assert list_empty(&sp->members) */
396
397 for (j = 0; sp->group.default_groups[j]; j++) {
398 tmp = &sp->group.default_groups[j]->cg_item;
399 sp->group.default_groups[j] = NULL;
400 config_item_put(tmp);
401 }
402
403 config_item_put(i);
404}
405
406static void release_space(struct config_item *i)
407{
408 struct space *sp = to_space(i);
409 kfree(sp->group.default_groups);
410 kfree(sp);
411}
412
413static struct config_item *make_comm(struct config_group *g, const char *name)
414{
415 struct comm *cm;
416
417 cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
418 if (!cm)
419 return NULL;
420
421 config_item_init_type_name(&cm->item, name, &comm_type);
422 cm->nodeid = -1;
423 cm->local = 0;
424 cm->addr_count = 0;
425 return &cm->item;
426}
427
428static void drop_comm(struct config_group *g, struct config_item *i)
429{
430 struct comm *cm = to_comm(i);
431 if (local_comm == cm)
432 local_comm = NULL;
433 dlm_lowcomms_close(cm->nodeid);
434 while (cm->addr_count--)
435 kfree(cm->addr[cm->addr_count]);
436 config_item_put(i);
437}
438
439static void release_comm(struct config_item *i)
440{
441 struct comm *cm = to_comm(i);
442 kfree(cm);
443}
444
445static struct config_item *make_node(struct config_group *g, const char *name)
446{
447 struct space *sp = to_space(g->cg_item.ci_parent);
448 struct node *nd;
449
450 nd = kzalloc(sizeof(struct node), GFP_KERNEL);
451 if (!nd)
452 return NULL;
453
454 config_item_init_type_name(&nd->item, name, &node_type);
455 nd->nodeid = -1;
456 nd->weight = 1; /* default weight of 1 if none is set */
457
458 mutex_lock(&sp->members_lock);
459 list_add(&nd->list, &sp->members);
460 sp->members_count++;
461 mutex_unlock(&sp->members_lock);
462
463 return &nd->item;
464}
465
466static void drop_node(struct config_group *g, struct config_item *i)
467{
468 struct space *sp = to_space(g->cg_item.ci_parent);
469 struct node *nd = to_node(i);
470
471 mutex_lock(&sp->members_lock);
472 list_del(&nd->list);
473 sp->members_count--;
474 mutex_unlock(&sp->members_lock);
475
476 config_item_put(i);
477}
478
479static void release_node(struct config_item *i)
480{
481 struct node *nd = to_node(i);
482 kfree(nd);
483}
484
485static struct clusters clusters_root = {
486 .subsys = {
487 .su_group = {
488 .cg_item = {
489 .ci_namebuf = "dlm",
490 .ci_type = &clusters_type,
491 },
492 },
493 },
494};
495
496int dlm_config_init(void)
497{
498 config_group_init(&clusters_root.subsys.su_group);
499 init_MUTEX(&clusters_root.subsys.su_sem);
500 return configfs_register_subsystem(&clusters_root.subsys);
501}
502
503void dlm_config_exit(void)
504{
505 configfs_unregister_subsystem(&clusters_root.subsys);
506}
507
508/*
509 * Functions for user space to read/write attributes
510 */
511
512static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
513 char *buf)
514{
515 struct comm *cm = to_comm(i);
516 struct comm_attribute *cma =
517 container_of(a, struct comm_attribute, attr);
518 return cma->show ? cma->show(cm, buf) : 0;
519}
520
521static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
522 const char *buf, size_t len)
523{
524 struct comm *cm = to_comm(i);
525 struct comm_attribute *cma =
526 container_of(a, struct comm_attribute, attr);
527 return cma->store ? cma->store(cm, buf, len) : -EINVAL;
528}
529
530static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
531{
532 return sprintf(buf, "%d\n", cm->nodeid);
533}
534
535static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
536{
537 cm->nodeid = simple_strtol(buf, NULL, 0);
538 return len;
539}
540
541static ssize_t comm_local_read(struct comm *cm, char *buf)
542{
543 return sprintf(buf, "%d\n", cm->local);
544}
545
546static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
547{
548 cm->local= simple_strtol(buf, NULL, 0);
549 if (cm->local && !local_comm)
550 local_comm = cm;
551 return len;
552}
553
554static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
555{
556 struct sockaddr_storage *addr;
557
558 if (len != sizeof(struct sockaddr_storage))
559 return -EINVAL;
560
561 if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
562 return -ENOSPC;
563
564 addr = kzalloc(sizeof(*addr), GFP_KERNEL);
565 if (!addr)
566 return -ENOMEM;
567
568 memcpy(addr, buf, len);
569 cm->addr[cm->addr_count++] = addr;
570 return len;
571}
572
573static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
574 char *buf)
575{
576 struct node *nd = to_node(i);
577 struct node_attribute *nda =
578 container_of(a, struct node_attribute, attr);
579 return nda->show ? nda->show(nd, buf) : 0;
580}
581
582static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
583 const char *buf, size_t len)
584{
585 struct node *nd = to_node(i);
586 struct node_attribute *nda =
587 container_of(a, struct node_attribute, attr);
588 return nda->store ? nda->store(nd, buf, len) : -EINVAL;
589}
590
591static ssize_t node_nodeid_read(struct node *nd, char *buf)
592{
593 return sprintf(buf, "%d\n", nd->nodeid);
594}
595
596static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
597{
598 nd->nodeid = simple_strtol(buf, NULL, 0);
599 return len;
600}
601
602static ssize_t node_weight_read(struct node *nd, char *buf)
603{
604 return sprintf(buf, "%d\n", nd->weight);
605}
606
607static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
608{
609 nd->weight = simple_strtol(buf, NULL, 0);
610 return len;
611}
612
613/*
614 * Functions for the dlm to get the info that's been configured
615 */
616
617static struct space *get_space(char *name)
618{
619 if (!space_list)
620 return NULL;
621 return to_space(config_group_find_obj(space_list, name));
622}
623
624static void put_space(struct space *sp)
625{
626 config_item_put(&sp->group.cg_item);
627}
628
629static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
630{
631 struct config_item *i;
632 struct comm *cm = NULL;
633 int found = 0;
634
635 if (!comm_list)
636 return NULL;
637
638 down(&clusters_root.subsys.su_sem);
639
640 list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
641 cm = to_comm(i);
642
643 if (nodeid) {
644 if (cm->nodeid != nodeid)
645 continue;
646 found = 1;
647 break;
648 } else {
649 if (!cm->addr_count ||
650 memcmp(cm->addr[0], addr, sizeof(*addr)))
651 continue;
652 found = 1;
653 break;
654 }
655 }
656 up(&clusters_root.subsys.su_sem);
657
658 if (found)
659 config_item_get(i);
660 else
661 cm = NULL;
662 return cm;
663}
664
665static void put_comm(struct comm *cm)
666{
667 config_item_put(&cm->item);
668}
669
670/* caller must free mem */
671int dlm_nodeid_list(char *lsname, int **ids_out)
672{
673 struct space *sp;
674 struct node *nd;
675 int i = 0, rv = 0;
676 int *ids;
677
678 sp = get_space(lsname);
679 if (!sp)
680 return -EEXIST;
681
682 mutex_lock(&sp->members_lock);
683 if (!sp->members_count) {
684 rv = 0;
685 goto out;
686 }
687
688 ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
689 if (!ids) {
690 rv = -ENOMEM;
691 goto out;
692 }
693
694 rv = sp->members_count;
695 list_for_each_entry(nd, &sp->members, list)
696 ids[i++] = nd->nodeid;
697
698 if (rv != i)
699 printk("bad nodeid count %d %d\n", rv, i);
700
701 *ids_out = ids;
702 out:
703 mutex_unlock(&sp->members_lock);
704 put_space(sp);
705 return rv;
706}
707
708int dlm_node_weight(char *lsname, int nodeid)
709{
710 struct space *sp;
711 struct node *nd;
712 int w = -EEXIST;
713
714 sp = get_space(lsname);
715 if (!sp)
716 goto out;
717
718 mutex_lock(&sp->members_lock);
719 list_for_each_entry(nd, &sp->members, list) {
720 if (nd->nodeid != nodeid)
721 continue;
722 w = nd->weight;
723 break;
724 }
725 mutex_unlock(&sp->members_lock);
726 put_space(sp);
727 out:
728 return w;
729}
730
731int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
732{
733 struct comm *cm = get_comm(nodeid, NULL);
734 if (!cm)
735 return -EEXIST;
736 if (!cm->addr_count)
737 return -ENOENT;
738 memcpy(addr, cm->addr[0], sizeof(*addr));
739 put_comm(cm);
740 return 0;
741}
742
743int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
744{
745 struct comm *cm = get_comm(0, addr);
746 if (!cm)
747 return -EEXIST;
748 *nodeid = cm->nodeid;
749 put_comm(cm);
750 return 0;
751}
752
753int dlm_our_nodeid(void)
754{
755 return local_comm ? local_comm->nodeid : 0;
756}
757
758/* num 0 is first addr, num 1 is second addr */
759int dlm_our_addr(struct sockaddr_storage *addr, int num)
760{
761 if (!local_comm)
762 return -1;
763 if (num + 1 > local_comm->addr_count)
764 return -1;
765 memcpy(addr, local_comm->addr[num], sizeof(*addr));
766 return 0;
767}
768
769/* Config file defaults */
770#define DEFAULT_TCP_PORT 21064
771#define DEFAULT_BUFFER_SIZE 4096
772#define DEFAULT_RSBTBL_SIZE 256
773#define DEFAULT_LKBTBL_SIZE 1024
774#define DEFAULT_DIRTBL_SIZE 512
775#define DEFAULT_RECOVER_TIMER 5
776#define DEFAULT_TOSS_SECS 10
777#define DEFAULT_SCAN_SECS 5
778
779struct dlm_config_info dlm_config = {
780 .tcp_port = DEFAULT_TCP_PORT,
781 .buffer_size = DEFAULT_BUFFER_SIZE,
782 .rsbtbl_size = DEFAULT_RSBTBL_SIZE,
783 .lkbtbl_size = DEFAULT_LKBTBL_SIZE,
784 .dirtbl_size = DEFAULT_DIRTBL_SIZE,
785 .recover_timer = DEFAULT_RECOVER_TIMER,
786 .toss_secs = DEFAULT_TOSS_SECS,
787 .scan_secs = DEFAULT_SCAN_SECS
788};
789
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
new file mode 100644
index 000000000000..9da7839958a9
--- /dev/null
+++ b/fs/dlm/config.h
@@ -0,0 +1,42 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __CONFIG_DOT_H__
15#define __CONFIG_DOT_H__
16
17#define DLM_MAX_ADDR_COUNT 3
18
19struct dlm_config_info {
20 int tcp_port;
21 int buffer_size;
22 int rsbtbl_size;
23 int lkbtbl_size;
24 int dirtbl_size;
25 int recover_timer;
26 int toss_secs;
27 int scan_secs;
28};
29
30extern struct dlm_config_info dlm_config;
31
32int dlm_config_init(void);
33void dlm_config_exit(void);
34int dlm_node_weight(char *lsname, int nodeid);
35int dlm_nodeid_list(char *lsname, int **ids_out);
36int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
37int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
38int dlm_our_nodeid(void);
39int dlm_our_addr(struct sockaddr_storage *addr, int num);
40
41#endif /* __CONFIG_DOT_H__ */
42
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
new file mode 100644
index 000000000000..ca94a837a5bb
--- /dev/null
+++ b/fs/dlm/debug_fs.c
@@ -0,0 +1,387 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include <linux/pagemap.h>
14#include <linux/seq_file.h>
15#include <linux/module.h>
16#include <linux/ctype.h>
17#include <linux/debugfs.h>
18
19#include "dlm_internal.h"
20
21#define DLM_DEBUG_BUF_LEN 4096
22static char debug_buf[DLM_DEBUG_BUF_LEN];
23static struct mutex debug_buf_lock;
24
25static struct dentry *dlm_root;
26
27struct rsb_iter {
28 int entry;
29 struct dlm_ls *ls;
30 struct list_head *next;
31 struct dlm_rsb *rsb;
32};
33
34/*
35 * dump all rsb's in the lockspace hash table
36 */
37
38static char *print_lockmode(int mode)
39{
40 switch (mode) {
41 case DLM_LOCK_IV:
42 return "--";
43 case DLM_LOCK_NL:
44 return "NL";
45 case DLM_LOCK_CR:
46 return "CR";
47 case DLM_LOCK_CW:
48 return "CW";
49 case DLM_LOCK_PR:
50 return "PR";
51 case DLM_LOCK_PW:
52 return "PW";
53 case DLM_LOCK_EX:
54 return "EX";
55 default:
56 return "??";
57 }
58}
59
60static void print_lock(struct seq_file *s, struct dlm_lkb *lkb,
61 struct dlm_rsb *res)
62{
63 seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode));
64
65 if (lkb->lkb_status == DLM_LKSTS_CONVERT
66 || lkb->lkb_status == DLM_LKSTS_WAITING)
67 seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode));
68
69 if (lkb->lkb_nodeid) {
70 if (lkb->lkb_nodeid != res->res_nodeid)
71 seq_printf(s, " Remote: %3d %08x", lkb->lkb_nodeid,
72 lkb->lkb_remid);
73 else
74 seq_printf(s, " Master: %08x", lkb->lkb_remid);
75 }
76
77 if (lkb->lkb_wait_type)
78 seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
79
80 seq_printf(s, "\n");
81}
82
83static int print_resource(struct dlm_rsb *res, struct seq_file *s)
84{
85 struct dlm_lkb *lkb;
86 int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list;
87
88 seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length);
89 for (i = 0; i < res->res_length; i++) {
90 if (isprint(res->res_name[i]))
91 seq_printf(s, "%c", res->res_name[i]);
92 else
93 seq_printf(s, "%c", '.');
94 }
95 if (res->res_nodeid > 0)
96 seq_printf(s, "\" \nLocal Copy, Master is node %d\n",
97 res->res_nodeid);
98 else if (res->res_nodeid == 0)
99 seq_printf(s, "\" \nMaster Copy\n");
100 else if (res->res_nodeid == -1)
101 seq_printf(s, "\" \nLooking up master (lkid %x)\n",
102 res->res_first_lkid);
103 else
104 seq_printf(s, "\" \nInvalid master %d\n", res->res_nodeid);
105
106 /* Print the LVB: */
107 if (res->res_lvbptr) {
108 seq_printf(s, "LVB: ");
109 for (i = 0; i < lvblen; i++) {
110 if (i == lvblen / 2)
111 seq_printf(s, "\n ");
112 seq_printf(s, "%02x ",
113 (unsigned char) res->res_lvbptr[i]);
114 }
115 if (rsb_flag(res, RSB_VALNOTVALID))
116 seq_printf(s, " (INVALID)");
117 seq_printf(s, "\n");
118 }
119
120 root_list = !list_empty(&res->res_root_list);
121 recover_list = !list_empty(&res->res_recover_list);
122
123 if (root_list || recover_list) {
124 seq_printf(s, "Recovery: root %d recover %d flags %lx "
125 "count %d\n", root_list, recover_list,
126 res->res_flags, res->res_recover_locks_count);
127 }
128
129 /* Print the locks attached to this resource */
130 seq_printf(s, "Granted Queue\n");
131 list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue)
132 print_lock(s, lkb, res);
133
134 seq_printf(s, "Conversion Queue\n");
135 list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue)
136 print_lock(s, lkb, res);
137
138 seq_printf(s, "Waiting Queue\n");
139 list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue)
140 print_lock(s, lkb, res);
141
142 if (list_empty(&res->res_lookup))
143 goto out;
144
145 seq_printf(s, "Lookup Queue\n");
146 list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) {
147 seq_printf(s, "%08x %s", lkb->lkb_id,
148 print_lockmode(lkb->lkb_rqmode));
149 if (lkb->lkb_wait_type)
150 seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
151 seq_printf(s, "\n");
152 }
153 out:
154 return 0;
155}
156
157static int rsb_iter_next(struct rsb_iter *ri)
158{
159 struct dlm_ls *ls = ri->ls;
160 int i;
161
162 if (!ri->next) {
163 top:
164 /* Find the next non-empty hash bucket */
165 for (i = ri->entry; i < ls->ls_rsbtbl_size; i++) {
166 read_lock(&ls->ls_rsbtbl[i].lock);
167 if (!list_empty(&ls->ls_rsbtbl[i].list)) {
168 ri->next = ls->ls_rsbtbl[i].list.next;
169 read_unlock(&ls->ls_rsbtbl[i].lock);
170 break;
171 }
172 read_unlock(&ls->ls_rsbtbl[i].lock);
173 }
174 ri->entry = i;
175
176 if (ri->entry >= ls->ls_rsbtbl_size)
177 return 1;
178 } else {
179 i = ri->entry;
180 read_lock(&ls->ls_rsbtbl[i].lock);
181 ri->next = ri->next->next;
182 if (ri->next->next == ls->ls_rsbtbl[i].list.next) {
183 /* End of list - move to next bucket */
184 ri->next = NULL;
185 ri->entry++;
186 read_unlock(&ls->ls_rsbtbl[i].lock);
187 goto top;
188 }
189 read_unlock(&ls->ls_rsbtbl[i].lock);
190 }
191 ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain);
192
193 return 0;
194}
195
196static void rsb_iter_free(struct rsb_iter *ri)
197{
198 kfree(ri);
199}
200
201static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls)
202{
203 struct rsb_iter *ri;
204
205 ri = kmalloc(sizeof *ri, GFP_KERNEL);
206 if (!ri)
207 return NULL;
208
209 ri->ls = ls;
210 ri->entry = 0;
211 ri->next = NULL;
212
213 if (rsb_iter_next(ri)) {
214 rsb_iter_free(ri);
215 return NULL;
216 }
217
218 return ri;
219}
220
221static void *rsb_seq_start(struct seq_file *file, loff_t *pos)
222{
223 struct rsb_iter *ri;
224 loff_t n = *pos;
225
226 ri = rsb_iter_init(file->private);
227 if (!ri)
228 return NULL;
229
230 while (n--) {
231 if (rsb_iter_next(ri)) {
232 rsb_iter_free(ri);
233 return NULL;
234 }
235 }
236
237 return ri;
238}
239
240static void *rsb_seq_next(struct seq_file *file, void *iter_ptr, loff_t *pos)
241{
242 struct rsb_iter *ri = iter_ptr;
243
244 (*pos)++;
245
246 if (rsb_iter_next(ri)) {
247 rsb_iter_free(ri);
248 return NULL;
249 }
250
251 return ri;
252}
253
254static void rsb_seq_stop(struct seq_file *file, void *iter_ptr)
255{
256 /* nothing for now */
257}
258
259static int rsb_seq_show(struct seq_file *file, void *iter_ptr)
260{
261 struct rsb_iter *ri = iter_ptr;
262
263 print_resource(ri->rsb, file);
264
265 return 0;
266}
267
268static struct seq_operations rsb_seq_ops = {
269 .start = rsb_seq_start,
270 .next = rsb_seq_next,
271 .stop = rsb_seq_stop,
272 .show = rsb_seq_show,
273};
274
275static int rsb_open(struct inode *inode, struct file *file)
276{
277 struct seq_file *seq;
278 int ret;
279
280 ret = seq_open(file, &rsb_seq_ops);
281 if (ret)
282 return ret;
283
284 seq = file->private_data;
285 seq->private = inode->i_private;
286
287 return 0;
288}
289
290static struct file_operations rsb_fops = {
291 .owner = THIS_MODULE,
292 .open = rsb_open,
293 .read = seq_read,
294 .llseek = seq_lseek,
295 .release = seq_release
296};
297
298/*
299 * dump lkb's on the ls_waiters list
300 */
301
302static int waiters_open(struct inode *inode, struct file *file)
303{
304 file->private_data = inode->i_private;
305 return 0;
306}
307
308static ssize_t waiters_read(struct file *file, char __user *userbuf,
309 size_t count, loff_t *ppos)
310{
311 struct dlm_ls *ls = file->private_data;
312 struct dlm_lkb *lkb;
313 size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv;
314
315 mutex_lock(&debug_buf_lock);
316 mutex_lock(&ls->ls_waiters_mutex);
317 memset(debug_buf, 0, sizeof(debug_buf));
318
319 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
320 ret = snprintf(debug_buf + pos, len - pos, "%x %d %d %s\n",
321 lkb->lkb_id, lkb->lkb_wait_type,
322 lkb->lkb_nodeid, lkb->lkb_resource->res_name);
323 if (ret >= len - pos)
324 break;
325 pos += ret;
326 }
327 mutex_unlock(&ls->ls_waiters_mutex);
328
329 rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
330 mutex_unlock(&debug_buf_lock);
331 return rv;
332}
333
334static struct file_operations waiters_fops = {
335 .owner = THIS_MODULE,
336 .open = waiters_open,
337 .read = waiters_read
338};
339
340int dlm_create_debug_file(struct dlm_ls *ls)
341{
342 char name[DLM_LOCKSPACE_LEN+8];
343
344 ls->ls_debug_rsb_dentry = debugfs_create_file(ls->ls_name,
345 S_IFREG | S_IRUGO,
346 dlm_root,
347 ls,
348 &rsb_fops);
349 if (!ls->ls_debug_rsb_dentry)
350 return -ENOMEM;
351
352 memset(name, 0, sizeof(name));
353 snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name);
354
355 ls->ls_debug_waiters_dentry = debugfs_create_file(name,
356 S_IFREG | S_IRUGO,
357 dlm_root,
358 ls,
359 &waiters_fops);
360 if (!ls->ls_debug_waiters_dentry) {
361 debugfs_remove(ls->ls_debug_rsb_dentry);
362 return -ENOMEM;
363 }
364
365 return 0;
366}
367
368void dlm_delete_debug_file(struct dlm_ls *ls)
369{
370 if (ls->ls_debug_rsb_dentry)
371 debugfs_remove(ls->ls_debug_rsb_dentry);
372 if (ls->ls_debug_waiters_dentry)
373 debugfs_remove(ls->ls_debug_waiters_dentry);
374}
375
376int dlm_register_debugfs(void)
377{
378 mutex_init(&debug_buf_lock);
379 dlm_root = debugfs_create_dir("dlm", NULL);
380 return dlm_root ? 0 : -ENOMEM;
381}
382
383void dlm_unregister_debugfs(void)
384{
385 debugfs_remove(dlm_root);
386}
387
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
new file mode 100644
index 000000000000..46754553fdcc
--- /dev/null
+++ b/fs/dlm/dir.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "lowcomms.h"
18#include "rcom.h"
19#include "config.h"
20#include "memory.h"
21#include "recover.h"
22#include "util.h"
23#include "lock.h"
24#include "dir.h"
25
26
27static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
28{
29 spin_lock(&ls->ls_recover_list_lock);
30 list_add(&de->list, &ls->ls_recover_list);
31 spin_unlock(&ls->ls_recover_list_lock);
32}
33
34static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
35{
36 int found = 0;
37 struct dlm_direntry *de;
38
39 spin_lock(&ls->ls_recover_list_lock);
40 list_for_each_entry(de, &ls->ls_recover_list, list) {
41 if (de->length == len) {
42 list_del(&de->list);
43 de->master_nodeid = 0;
44 memset(de->name, 0, len);
45 found = 1;
46 break;
47 }
48 }
49 spin_unlock(&ls->ls_recover_list_lock);
50
51 if (!found)
52 de = allocate_direntry(ls, len);
53 return de;
54}
55
56void dlm_clear_free_entries(struct dlm_ls *ls)
57{
58 struct dlm_direntry *de;
59
60 spin_lock(&ls->ls_recover_list_lock);
61 while (!list_empty(&ls->ls_recover_list)) {
62 de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
63 list);
64 list_del(&de->list);
65 free_direntry(de);
66 }
67 spin_unlock(&ls->ls_recover_list_lock);
68}
69
70/*
71 * We use the upper 16 bits of the hash value to select the directory node.
72 * Low bits are used for distribution of rsb's among hash buckets on each node.
73 *
74 * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of
75 * num_nodes to the hash value. This value in the desired range is used as an
76 * offset into the sorted list of nodeid's to give the particular nodeid.
77 */
78
79int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
80{
81 struct list_head *tmp;
82 struct dlm_member *memb = NULL;
83 uint32_t node, n = 0;
84 int nodeid;
85
86 if (ls->ls_num_nodes == 1) {
87 nodeid = dlm_our_nodeid();
88 goto out;
89 }
90
91 if (ls->ls_node_array) {
92 node = (hash >> 16) % ls->ls_total_weight;
93 nodeid = ls->ls_node_array[node];
94 goto out;
95 }
96
97 /* make_member_array() failed to kmalloc ls_node_array... */
98
99 node = (hash >> 16) % ls->ls_num_nodes;
100
101 list_for_each(tmp, &ls->ls_nodes) {
102 if (n++ != node)
103 continue;
104 memb = list_entry(tmp, struct dlm_member, list);
105 break;
106 }
107
108 DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
109 ls->ls_num_nodes, n, node););
110 nodeid = memb->nodeid;
111 out:
112 return nodeid;
113}
114
115int dlm_dir_nodeid(struct dlm_rsb *r)
116{
117 return dlm_hash2nodeid(r->res_ls, r->res_hash);
118}
119
120static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
121{
122 uint32_t val;
123
124 val = jhash(name, len, 0);
125 val &= (ls->ls_dirtbl_size - 1);
126
127 return val;
128}
129
130static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
131{
132 uint32_t bucket;
133
134 bucket = dir_hash(ls, de->name, de->length);
135 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
136}
137
138static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
139 int namelen, uint32_t bucket)
140{
141 struct dlm_direntry *de;
142
143 list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
144 if (de->length == namelen && !memcmp(name, de->name, namelen))
145 goto out;
146 }
147 de = NULL;
148 out:
149 return de;
150}
151
152void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
153{
154 struct dlm_direntry *de;
155 uint32_t bucket;
156
157 bucket = dir_hash(ls, name, namelen);
158
159 write_lock(&ls->ls_dirtbl[bucket].lock);
160
161 de = search_bucket(ls, name, namelen, bucket);
162
163 if (!de) {
164 log_error(ls, "remove fr %u none", nodeid);
165 goto out;
166 }
167
168 if (de->master_nodeid != nodeid) {
169 log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
170 goto out;
171 }
172
173 list_del(&de->list);
174 free_direntry(de);
175 out:
176 write_unlock(&ls->ls_dirtbl[bucket].lock);
177}
178
179void dlm_dir_clear(struct dlm_ls *ls)
180{
181 struct list_head *head;
182 struct dlm_direntry *de;
183 int i;
184
185 DLM_ASSERT(list_empty(&ls->ls_recover_list), );
186
187 for (i = 0; i < ls->ls_dirtbl_size; i++) {
188 write_lock(&ls->ls_dirtbl[i].lock);
189 head = &ls->ls_dirtbl[i].list;
190 while (!list_empty(head)) {
191 de = list_entry(head->next, struct dlm_direntry, list);
192 list_del(&de->list);
193 put_free_de(ls, de);
194 }
195 write_unlock(&ls->ls_dirtbl[i].lock);
196 }
197}
198
199int dlm_recover_directory(struct dlm_ls *ls)
200{
201 struct dlm_member *memb;
202 struct dlm_direntry *de;
203 char *b, *last_name = NULL;
204 int error = -ENOMEM, last_len, count = 0;
205 uint16_t namelen;
206
207 log_debug(ls, "dlm_recover_directory");
208
209 if (dlm_no_directory(ls))
210 goto out_status;
211
212 dlm_dir_clear(ls);
213
214 last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_KERNEL);
215 if (!last_name)
216 goto out;
217
218 list_for_each_entry(memb, &ls->ls_nodes, list) {
219 memset(last_name, 0, DLM_RESNAME_MAXLEN);
220 last_len = 0;
221
222 for (;;) {
223 error = dlm_recovery_stopped(ls);
224 if (error)
225 goto out_free;
226
227 error = dlm_rcom_names(ls, memb->nodeid,
228 last_name, last_len);
229 if (error)
230 goto out_free;
231
232 schedule();
233
234 /*
235 * pick namelen/name pairs out of received buffer
236 */
237
238 b = ls->ls_recover_buf + sizeof(struct dlm_rcom);
239
240 for (;;) {
241 memcpy(&namelen, b, sizeof(uint16_t));
242 namelen = be16_to_cpu(namelen);
243 b += sizeof(uint16_t);
244
245 /* namelen of 0xFFFFF marks end of names for
246 this node; namelen of 0 marks end of the
247 buffer */
248
249 if (namelen == 0xFFFF)
250 goto done;
251 if (!namelen)
252 break;
253
254 error = -ENOMEM;
255 de = get_free_de(ls, namelen);
256 if (!de)
257 goto out_free;
258
259 de->master_nodeid = memb->nodeid;
260 de->length = namelen;
261 last_len = namelen;
262 memcpy(de->name, b, namelen);
263 memcpy(last_name, b, namelen);
264 b += namelen;
265
266 add_entry_to_hash(ls, de);
267 count++;
268 }
269 }
270 done:
271 ;
272 }
273
274 out_status:
275 error = 0;
276 dlm_set_recover_status(ls, DLM_RS_DIR);
277 log_debug(ls, "dlm_recover_directory %d entries", count);
278 out_free:
279 kfree(last_name);
280 out:
281 dlm_clear_free_entries(ls);
282 return error;
283}
284
285static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
286 int namelen, int *r_nodeid)
287{
288 struct dlm_direntry *de, *tmp;
289 uint32_t bucket;
290
291 bucket = dir_hash(ls, name, namelen);
292
293 write_lock(&ls->ls_dirtbl[bucket].lock);
294 de = search_bucket(ls, name, namelen, bucket);
295 if (de) {
296 *r_nodeid = de->master_nodeid;
297 write_unlock(&ls->ls_dirtbl[bucket].lock);
298 if (*r_nodeid == nodeid)
299 return -EEXIST;
300 return 0;
301 }
302
303 write_unlock(&ls->ls_dirtbl[bucket].lock);
304
305 de = allocate_direntry(ls, namelen);
306 if (!de)
307 return -ENOMEM;
308
309 de->master_nodeid = nodeid;
310 de->length = namelen;
311 memcpy(de->name, name, namelen);
312
313 write_lock(&ls->ls_dirtbl[bucket].lock);
314 tmp = search_bucket(ls, name, namelen, bucket);
315 if (tmp) {
316 free_direntry(de);
317 de = tmp;
318 } else {
319 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
320 }
321 *r_nodeid = de->master_nodeid;
322 write_unlock(&ls->ls_dirtbl[bucket].lock);
323 return 0;
324}
325
326int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
327 int *r_nodeid)
328{
329 return get_entry(ls, nodeid, name, namelen, r_nodeid);
330}
331
332/* Copy the names of master rsb's into the buffer provided.
333 Only select names whose dir node is the given nodeid. */
334
335void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
336 char *outbuf, int outlen, int nodeid)
337{
338 struct list_head *list;
339 struct dlm_rsb *start_r = NULL, *r = NULL;
340 int offset = 0, start_namelen, error, dir_nodeid;
341 char *start_name;
342 uint16_t be_namelen;
343
344 /*
345 * Find the rsb where we left off (or start again)
346 */
347
348 start_namelen = inlen;
349 start_name = inbuf;
350
351 if (start_namelen > 1) {
352 /*
353 * We could also use a find_rsb_root() function here that
354 * searched the ls_root_list.
355 */
356 error = dlm_find_rsb(ls, start_name, start_namelen, R_MASTER,
357 &start_r);
358 DLM_ASSERT(!error && start_r,
359 printk("error %d\n", error););
360 DLM_ASSERT(!list_empty(&start_r->res_root_list),
361 dlm_print_rsb(start_r););
362 dlm_put_rsb(start_r);
363 }
364
365 /*
366 * Send rsb names for rsb's we're master of and whose directory node
367 * matches the requesting node.
368 */
369
370 down_read(&ls->ls_root_sem);
371 if (start_r)
372 list = start_r->res_root_list.next;
373 else
374 list = ls->ls_root_list.next;
375
376 for (offset = 0; list != &ls->ls_root_list; list = list->next) {
377 r = list_entry(list, struct dlm_rsb, res_root_list);
378 if (r->res_nodeid)
379 continue;
380
381 dir_nodeid = dlm_dir_nodeid(r);
382 if (dir_nodeid != nodeid)
383 continue;
384
385 /*
386 * The block ends when we can't fit the following in the
387 * remaining buffer space:
388 * namelen (uint16_t) +
389 * name (r->res_length) +
390 * end-of-block record 0x0000 (uint16_t)
391 */
392
393 if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) {
394 /* Write end-of-block record */
395 be_namelen = 0;
396 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
397 offset += sizeof(uint16_t);
398 goto out;
399 }
400
401 be_namelen = cpu_to_be16(r->res_length);
402 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
403 offset += sizeof(uint16_t);
404 memcpy(outbuf + offset, r->res_name, r->res_length);
405 offset += r->res_length;
406 }
407
408 /*
409 * If we've reached the end of the list (and there's room) write a
410 * terminating record.
411 */
412
413 if ((list == &ls->ls_root_list) &&
414 (offset + sizeof(uint16_t) <= outlen)) {
415 be_namelen = 0xFFFF;
416 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
417 offset += sizeof(uint16_t);
418 }
419
420 out:
421 up_read(&ls->ls_root_sem);
422}
423
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
new file mode 100644
index 000000000000..0b0eb1267b6e
--- /dev/null
+++ b/fs/dlm/dir.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __DIR_DOT_H__
15#define __DIR_DOT_H__
16
17
18int dlm_dir_nodeid(struct dlm_rsb *rsb);
19int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
20void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int len);
21void dlm_dir_clear(struct dlm_ls *ls);
22void dlm_clear_free_entries(struct dlm_ls *ls);
23int dlm_recover_directory(struct dlm_ls *ls);
24int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
25 int *r_nodeid);
26void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
27 char *outbuf, int outlen, int nodeid);
28
29#endif /* __DIR_DOT_H__ */
30
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
new file mode 100644
index 000000000000..1e5cd67e1b7a
--- /dev/null
+++ b/fs/dlm/dlm_internal.h
@@ -0,0 +1,543 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __DLM_INTERNAL_DOT_H__
15#define __DLM_INTERNAL_DOT_H__
16
17/*
18 * This is the main header file to be included in each DLM source file.
19 */
20
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/ctype.h>
26#include <linux/spinlock.h>
27#include <linux/vmalloc.h>
28#include <linux/list.h>
29#include <linux/errno.h>
30#include <linux/random.h>
31#include <linux/delay.h>
32#include <linux/socket.h>
33#include <linux/kthread.h>
34#include <linux/kobject.h>
35#include <linux/kref.h>
36#include <linux/kernel.h>
37#include <linux/jhash.h>
38#include <linux/miscdevice.h>
39#include <linux/mutex.h>
40#include <asm/semaphore.h>
41#include <asm/uaccess.h>
42
43#include <linux/dlm.h>
44
45#define DLM_LOCKSPACE_LEN 64
46
47/* Size of the temp buffer midcomms allocates on the stack.
48 We try to make this large enough so most messages fit.
49 FIXME: should sctp make this unnecessary? */
50
51#define DLM_INBUF_LEN 148
52
53struct dlm_ls;
54struct dlm_lkb;
55struct dlm_rsb;
56struct dlm_member;
57struct dlm_lkbtable;
58struct dlm_rsbtable;
59struct dlm_dirtable;
60struct dlm_direntry;
61struct dlm_recover;
62struct dlm_header;
63struct dlm_message;
64struct dlm_rcom;
65struct dlm_mhandle;
66
67#define log_print(fmt, args...) \
68 printk(KERN_ERR "dlm: "fmt"\n" , ##args)
69#define log_error(ls, fmt, args...) \
70 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
71
72#define DLM_LOG_DEBUG
73#ifdef DLM_LOG_DEBUG
74#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args)
75#else
76#define log_debug(ls, fmt, args...)
77#endif
78
79#define DLM_ASSERT(x, do) \
80{ \
81 if (!(x)) \
82 { \
83 printk(KERN_ERR "\nDLM: Assertion failed on line %d of file %s\n" \
84 "DLM: assertion: \"%s\"\n" \
85 "DLM: time = %lu\n", \
86 __LINE__, __FILE__, #x, jiffies); \
87 {do} \
88 printk("\n"); \
89 BUG(); \
90 panic("DLM: Record message above and reboot.\n"); \
91 } \
92}
93
94#define DLM_FAKE_USER_AST ERR_PTR(-EINVAL)
95
96
97struct dlm_direntry {
98 struct list_head list;
99 uint32_t master_nodeid;
100 uint16_t length;
101 char name[1];
102};
103
104struct dlm_dirtable {
105 struct list_head list;
106 rwlock_t lock;
107};
108
109struct dlm_rsbtable {
110 struct list_head list;
111 struct list_head toss;
112 rwlock_t lock;
113};
114
115struct dlm_lkbtable {
116 struct list_head list;
117 rwlock_t lock;
118 uint16_t counter;
119};
120
121/*
122 * Lockspace member (per node in a ls)
123 */
124
125struct dlm_member {
126 struct list_head list;
127 int nodeid;
128 int weight;
129};
130
131/*
132 * Save and manage recovery state for a lockspace.
133 */
134
135struct dlm_recover {
136 struct list_head list;
137 int *nodeids;
138 int node_count;
139 uint64_t seq;
140};
141
142/*
143 * Pass input args to second stage locking function.
144 */
145
146struct dlm_args {
147 uint32_t flags;
148 void *astaddr;
149 long astparam;
150 void *bastaddr;
151 int mode;
152 struct dlm_lksb *lksb;
153};
154
155
156/*
157 * Lock block
158 *
159 * A lock can be one of three types:
160 *
161 * local copy lock is mastered locally
162 * (lkb_nodeid is zero and DLM_LKF_MSTCPY is not set)
163 * process copy lock is mastered on a remote node
164 * (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is not set)
165 * master copy master node's copy of a lock owned by remote node
166 * (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is set)
167 *
168 * lkb_exflags: a copy of the most recent flags arg provided to dlm_lock or
169 * dlm_unlock. The dlm does not modify these or use any private flags in
170 * this field; it only contains DLM_LKF_ flags from dlm.h. These flags
171 * are sent as-is to the remote master when the lock is remote.
172 *
173 * lkb_flags: internal dlm flags (DLM_IFL_ prefix) from dlm_internal.h.
174 * Some internal flags are shared between the master and process nodes;
175 * these shared flags are kept in the lower two bytes. One of these
176 * flags set on the master copy will be propagated to the process copy
177 * and v.v. Other internal flags are private to the master or process
178 * node (e.g. DLM_IFL_MSTCPY). These are kept in the high two bytes.
179 *
180 * lkb_sbflags: status block flags. These flags are copied directly into
181 * the caller's lksb.sb_flags prior to the dlm_lock/dlm_unlock completion
182 * ast. All defined in dlm.h with DLM_SBF_ prefix.
183 *
184 * lkb_status: the lock status indicates which rsb queue the lock is
185 * on, grant, convert, or wait. DLM_LKSTS_ WAITING/GRANTED/CONVERT
186 *
187 * lkb_wait_type: the dlm message type (DLM_MSG_ prefix) for which a
188 * reply is needed. Only set when the lkb is on the lockspace waiters
189 * list awaiting a reply from a remote node.
190 *
191 * lkb_nodeid: when the lkb is a local copy, nodeid is 0; when the lkb
192 * is a master copy, nodeid specifies the remote lock holder, when the
193 * lkb is a process copy, the nodeid specifies the lock master.
194 */
195
196/* lkb_ast_type */
197
198#define AST_COMP 1
199#define AST_BAST 2
200
201/* lkb_status */
202
203#define DLM_LKSTS_WAITING 1
204#define DLM_LKSTS_GRANTED 2
205#define DLM_LKSTS_CONVERT 3
206
207/* lkb_flags */
208
209#define DLM_IFL_MSTCPY 0x00010000
210#define DLM_IFL_RESEND 0x00020000
211#define DLM_IFL_DEAD 0x00040000
212#define DLM_IFL_USER 0x00000001
213#define DLM_IFL_ORPHAN 0x00000002
214
215struct dlm_lkb {
216 struct dlm_rsb *lkb_resource; /* the rsb */
217 struct kref lkb_ref;
218 int lkb_nodeid; /* copied from rsb */
219 int lkb_ownpid; /* pid of lock owner */
220 uint32_t lkb_id; /* our lock ID */
221 uint32_t lkb_remid; /* lock ID on remote partner */
222 uint32_t lkb_exflags; /* external flags from caller */
223 uint32_t lkb_sbflags; /* lksb flags */
224 uint32_t lkb_flags; /* internal flags */
225 uint32_t lkb_lvbseq; /* lvb sequence number */
226
227 int8_t lkb_status; /* granted, waiting, convert */
228 int8_t lkb_rqmode; /* requested lock mode */
229 int8_t lkb_grmode; /* granted lock mode */
230 int8_t lkb_bastmode; /* requested mode */
231 int8_t lkb_highbast; /* highest mode bast sent for */
232
233 int8_t lkb_wait_type; /* type of reply waiting for */
234 int8_t lkb_ast_type; /* type of ast queued for */
235
236 struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
237 struct list_head lkb_statequeue; /* rsb g/c/w list */
238 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
239 struct list_head lkb_wait_reply; /* waiting for remote reply */
240 struct list_head lkb_astqueue; /* need ast to be sent */
241 struct list_head lkb_ownqueue; /* list of locks for a process */
242
243 char *lkb_lvbptr;
244 struct dlm_lksb *lkb_lksb; /* caller's status block */
245 void *lkb_astaddr; /* caller's ast function */
246 void *lkb_bastaddr; /* caller's bast function */
247 long lkb_astparam; /* caller's ast arg */
248};
249
250
251struct dlm_rsb {
252 struct dlm_ls *res_ls; /* the lockspace */
253 struct kref res_ref;
254 struct mutex res_mutex;
255 unsigned long res_flags;
256 int res_length; /* length of rsb name */
257 int res_nodeid;
258 uint32_t res_lvbseq;
259 uint32_t res_hash;
260 uint32_t res_bucket; /* rsbtbl */
261 unsigned long res_toss_time;
262 uint32_t res_first_lkid;
263 struct list_head res_lookup; /* lkbs waiting on first */
264 struct list_head res_hashchain; /* rsbtbl */
265 struct list_head res_grantqueue;
266 struct list_head res_convertqueue;
267 struct list_head res_waitqueue;
268
269 struct list_head res_root_list; /* used for recovery */
270 struct list_head res_recover_list; /* used for recovery */
271 int res_recover_locks_count;
272
273 char *res_lvbptr;
274 char res_name[1];
275};
276
277/* find_rsb() flags */
278
279#define R_MASTER 1 /* only return rsb if it's a master */
280#define R_CREATE 2 /* create/add rsb if not found */
281
282/* rsb_flags */
283
284enum rsb_flags {
285 RSB_MASTER_UNCERTAIN,
286 RSB_VALNOTVALID,
287 RSB_VALNOTVALID_PREV,
288 RSB_NEW_MASTER,
289 RSB_NEW_MASTER2,
290 RSB_RECOVER_CONVERT,
291 RSB_LOCKS_PURGED,
292};
293
294static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
295{
296 __set_bit(flag, &r->res_flags);
297}
298
299static inline void rsb_clear_flag(struct dlm_rsb *r, enum rsb_flags flag)
300{
301 __clear_bit(flag, &r->res_flags);
302}
303
304static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
305{
306 return test_bit(flag, &r->res_flags);
307}
308
309
310/* dlm_header is first element of all structs sent between nodes */
311
312#define DLM_HEADER_MAJOR 0x00020000
313#define DLM_HEADER_MINOR 0x00000001
314
315#define DLM_MSG 1
316#define DLM_RCOM 2
317
318struct dlm_header {
319 uint32_t h_version;
320 uint32_t h_lockspace;
321 uint32_t h_nodeid; /* nodeid of sender */
322 uint16_t h_length;
323 uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
324 uint8_t h_pad;
325};
326
327
328#define DLM_MSG_REQUEST 1
329#define DLM_MSG_CONVERT 2
330#define DLM_MSG_UNLOCK 3
331#define DLM_MSG_CANCEL 4
332#define DLM_MSG_REQUEST_REPLY 5
333#define DLM_MSG_CONVERT_REPLY 6
334#define DLM_MSG_UNLOCK_REPLY 7
335#define DLM_MSG_CANCEL_REPLY 8
336#define DLM_MSG_GRANT 9
337#define DLM_MSG_BAST 10
338#define DLM_MSG_LOOKUP 11
339#define DLM_MSG_REMOVE 12
340#define DLM_MSG_LOOKUP_REPLY 13
341
342struct dlm_message {
343 struct dlm_header m_header;
344 uint32_t m_type; /* DLM_MSG_ */
345 uint32_t m_nodeid;
346 uint32_t m_pid;
347 uint32_t m_lkid; /* lkid on sender */
348 uint32_t m_remid; /* lkid on receiver */
349 uint32_t m_parent_lkid;
350 uint32_t m_parent_remid;
351 uint32_t m_exflags;
352 uint32_t m_sbflags;
353 uint32_t m_flags;
354 uint32_t m_lvbseq;
355 uint32_t m_hash;
356 int m_status;
357 int m_grmode;
358 int m_rqmode;
359 int m_bastmode;
360 int m_asts;
361 int m_result; /* 0 or -EXXX */
362 char m_extra[0]; /* name or lvb */
363};
364
365
366#define DLM_RS_NODES 0x00000001
367#define DLM_RS_NODES_ALL 0x00000002
368#define DLM_RS_DIR 0x00000004
369#define DLM_RS_DIR_ALL 0x00000008
370#define DLM_RS_LOCKS 0x00000010
371#define DLM_RS_LOCKS_ALL 0x00000020
372#define DLM_RS_DONE 0x00000040
373#define DLM_RS_DONE_ALL 0x00000080
374
375#define DLM_RCOM_STATUS 1
376#define DLM_RCOM_NAMES 2
377#define DLM_RCOM_LOOKUP 3
378#define DLM_RCOM_LOCK 4
379#define DLM_RCOM_STATUS_REPLY 5
380#define DLM_RCOM_NAMES_REPLY 6
381#define DLM_RCOM_LOOKUP_REPLY 7
382#define DLM_RCOM_LOCK_REPLY 8
383
384struct dlm_rcom {
385 struct dlm_header rc_header;
386 uint32_t rc_type; /* DLM_RCOM_ */
387 int rc_result; /* multi-purpose */
388 uint64_t rc_id; /* match reply with request */
389 char rc_buf[0];
390};
391
392struct rcom_config {
393 uint32_t rf_lvblen;
394 uint32_t rf_lsflags;
395 uint64_t rf_unused;
396};
397
398struct rcom_lock {
399 uint32_t rl_ownpid;
400 uint32_t rl_lkid;
401 uint32_t rl_remid;
402 uint32_t rl_parent_lkid;
403 uint32_t rl_parent_remid;
404 uint32_t rl_exflags;
405 uint32_t rl_flags;
406 uint32_t rl_lvbseq;
407 int rl_result;
408 int8_t rl_rqmode;
409 int8_t rl_grmode;
410 int8_t rl_status;
411 int8_t rl_asts;
412 uint16_t rl_wait_type;
413 uint16_t rl_namelen;
414 char rl_name[DLM_RESNAME_MAXLEN];
415 char rl_lvb[0];
416};
417
418struct dlm_ls {
419 struct list_head ls_list; /* list of lockspaces */
420 dlm_lockspace_t *ls_local_handle;
421 uint32_t ls_global_id; /* global unique lockspace ID */
422 uint32_t ls_exflags;
423 int ls_lvblen;
424 int ls_count; /* reference count */
425 unsigned long ls_flags; /* LSFL_ */
426 struct kobject ls_kobj;
427
428 struct dlm_rsbtable *ls_rsbtbl;
429 uint32_t ls_rsbtbl_size;
430
431 struct dlm_lkbtable *ls_lkbtbl;
432 uint32_t ls_lkbtbl_size;
433
434 struct dlm_dirtable *ls_dirtbl;
435 uint32_t ls_dirtbl_size;
436
437 struct mutex ls_waiters_mutex;
438 struct list_head ls_waiters; /* lkbs needing a reply */
439
440 struct list_head ls_nodes; /* current nodes in ls */
441 struct list_head ls_nodes_gone; /* dead node list, recovery */
442 int ls_num_nodes; /* number of nodes in ls */
443 int ls_low_nodeid;
444 int ls_total_weight;
445 int *ls_node_array;
446
447 struct dlm_rsb ls_stub_rsb; /* for returning errors */
448 struct dlm_lkb ls_stub_lkb; /* for returning errors */
449 struct dlm_message ls_stub_ms; /* for faking a reply */
450
451 struct dentry *ls_debug_rsb_dentry; /* debugfs */
452 struct dentry *ls_debug_waiters_dentry; /* debugfs */
453
454 wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
455 int ls_uevent_result;
456
457 struct miscdevice ls_device;
458
459 /* recovery related */
460
461 struct timer_list ls_timer;
462 struct task_struct *ls_recoverd_task;
463 struct mutex ls_recoverd_active;
464 spinlock_t ls_recover_lock;
465 uint32_t ls_recover_status; /* DLM_RS_ */
466 uint64_t ls_recover_seq;
467 struct dlm_recover *ls_recover_args;
468 struct rw_semaphore ls_in_recovery; /* block local requests */
469 struct list_head ls_requestqueue;/* queue remote requests */
470 struct mutex ls_requestqueue_mutex;
471 char *ls_recover_buf;
472 int ls_recover_nodeid; /* for debugging */
473 uint64_t ls_rcom_seq;
474 struct list_head ls_recover_list;
475 spinlock_t ls_recover_list_lock;
476 int ls_recover_list_count;
477 wait_queue_head_t ls_wait_general;
478 struct mutex ls_clear_proc_locks;
479
480 struct list_head ls_root_list; /* root resources */
481 struct rw_semaphore ls_root_sem; /* protect root_list */
482
483 int ls_namelen;
484 char ls_name[1];
485};
486
487#define LSFL_WORK 0
488#define LSFL_RUNNING 1
489#define LSFL_RECOVERY_STOP 2
490#define LSFL_RCOM_READY 3
491#define LSFL_UEVENT_WAIT 4
492
493/* much of this is just saving user space pointers associated with the
494 lock that we pass back to the user lib with an ast */
495
496struct dlm_user_args {
497 struct dlm_user_proc *proc; /* each process that opens the lockspace
498 device has private data
499 (dlm_user_proc) on the struct file,
500 the process's locks point back to it*/
501 struct dlm_lksb lksb;
502 int old_mode;
503 int update_user_lvb;
504 struct dlm_lksb __user *user_lksb;
505 void __user *castparam;
506 void __user *castaddr;
507 void __user *bastparam;
508 void __user *bastaddr;
509};
510
511#define DLM_PROC_FLAGS_CLOSING 1
512#define DLM_PROC_FLAGS_COMPAT 2
513
514/* locks list is kept so we can remove all a process's locks when it
515 exits (or orphan those that are persistent) */
516
517struct dlm_user_proc {
518 dlm_lockspace_t *lockspace;
519 unsigned long flags; /* DLM_PROC_FLAGS */
520 struct list_head asts;
521 spinlock_t asts_spin;
522 struct list_head locks;
523 spinlock_t locks_spin;
524 wait_queue_head_t wait;
525};
526
527static inline int dlm_locking_stopped(struct dlm_ls *ls)
528{
529 return !test_bit(LSFL_RUNNING, &ls->ls_flags);
530}
531
532static inline int dlm_recovery_stopped(struct dlm_ls *ls)
533{
534 return test_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
535}
536
537static inline int dlm_no_directory(struct dlm_ls *ls)
538{
539 return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0;
540}
541
542#endif /* __DLM_INTERNAL_DOT_H__ */
543
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
new file mode 100644
index 000000000000..3f2befa4797b
--- /dev/null
+++ b/fs/dlm/lock.c
@@ -0,0 +1,3871 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13/* Central locking logic has four stages:
14
15 dlm_lock()
16 dlm_unlock()
17
18 request_lock(ls, lkb)
19 convert_lock(ls, lkb)
20 unlock_lock(ls, lkb)
21 cancel_lock(ls, lkb)
22
23 _request_lock(r, lkb)
24 _convert_lock(r, lkb)
25 _unlock_lock(r, lkb)
26 _cancel_lock(r, lkb)
27
28 do_request(r, lkb)
29 do_convert(r, lkb)
30 do_unlock(r, lkb)
31 do_cancel(r, lkb)
32
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
35
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
40
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
43
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
46
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
49
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
53
54 L: send_xxxx() -> R: receive_xxxx()
55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
57*/
58#include <linux/types.h>
59#include "dlm_internal.h"
60#include <linux/dlm_device.h>
61#include "memory.h"
62#include "lowcomms.h"
63#include "requestqueue.h"
64#include "util.h"
65#include "dir.h"
66#include "member.h"
67#include "lockspace.h"
68#include "ast.h"
69#include "lock.h"
70#include "rcom.h"
71#include "recover.h"
72#include "lvb_table.h"
73#include "user.h"
74#include "config.h"
75
76static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
77static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
78static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
79static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
80static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
81static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
82static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
83static int send_remove(struct dlm_rsb *r);
84static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
85static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
86 struct dlm_message *ms);
87static int receive_extralen(struct dlm_message *ms);
88
89/*
90 * Lock compatibilty matrix - thanks Steve
91 * UN = Unlocked state. Not really a state, used as a flag
92 * PD = Padding. Used to make the matrix a nice power of two in size
93 * Other states are the same as the VMS DLM.
94 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
95 */
96
97static const int __dlm_compat_matrix[8][8] = {
98 /* UN NL CR CW PR PW EX PD */
99 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
100 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
101 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
102 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
103 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
104 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
105 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
106 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
107};
108
109/*
110 * This defines the direction of transfer of LVB data.
111 * Granted mode is the row; requested mode is the column.
112 * Usage: matrix[grmode+1][rqmode+1]
113 * 1 = LVB is returned to the caller
114 * 0 = LVB is written to the resource
115 * -1 = nothing happens to the LVB
116 */
117
118const int dlm_lvb_operations[8][8] = {
119 /* UN NL CR CW PR PW EX PD*/
120 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
121 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
122 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
123 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
124 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
125 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
126 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
127 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
128};
129
130#define modes_compat(gr, rq) \
131 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
132
133int dlm_modes_compat(int mode1, int mode2)
134{
135 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
136}
137
138/*
139 * Compatibility matrix for conversions with QUECVT set.
140 * Granted mode is the row; requested mode is the column.
141 * Usage: matrix[grmode+1][rqmode+1]
142 */
143
144static const int __quecvt_compat_matrix[8][8] = {
145 /* UN NL CR CW PR PW EX PD */
146 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
147 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
148 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
149 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
150 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
151 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
153 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
154};
155
156void dlm_print_lkb(struct dlm_lkb *lkb)
157{
158 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
159 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
160 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
161 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
162 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
163}
164
165void dlm_print_rsb(struct dlm_rsb *r)
166{
167 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
168 r->res_nodeid, r->res_flags, r->res_first_lkid,
169 r->res_recover_locks_count, r->res_name);
170}
171
172void dlm_dump_rsb(struct dlm_rsb *r)
173{
174 struct dlm_lkb *lkb;
175
176 dlm_print_rsb(r);
177
178 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
179 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
180 printk(KERN_ERR "rsb lookup list\n");
181 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
182 dlm_print_lkb(lkb);
183 printk(KERN_ERR "rsb grant queue:\n");
184 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
185 dlm_print_lkb(lkb);
186 printk(KERN_ERR "rsb convert queue:\n");
187 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
188 dlm_print_lkb(lkb);
189 printk(KERN_ERR "rsb wait queue:\n");
190 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
191 dlm_print_lkb(lkb);
192}
193
194/* Threads cannot use the lockspace while it's being recovered */
195
196static inline void lock_recovery(struct dlm_ls *ls)
197{
198 down_read(&ls->ls_in_recovery);
199}
200
201static inline void unlock_recovery(struct dlm_ls *ls)
202{
203 up_read(&ls->ls_in_recovery);
204}
205
206static inline int lock_recovery_try(struct dlm_ls *ls)
207{
208 return down_read_trylock(&ls->ls_in_recovery);
209}
210
211static inline int can_be_queued(struct dlm_lkb *lkb)
212{
213 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
214}
215
216static inline int force_blocking_asts(struct dlm_lkb *lkb)
217{
218 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
219}
220
221static inline int is_demoted(struct dlm_lkb *lkb)
222{
223 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
224}
225
226static inline int is_remote(struct dlm_rsb *r)
227{
228 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
229 return !!r->res_nodeid;
230}
231
232static inline int is_process_copy(struct dlm_lkb *lkb)
233{
234 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
235}
236
237static inline int is_master_copy(struct dlm_lkb *lkb)
238{
239 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
240 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
241 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
242}
243
244static inline int middle_conversion(struct dlm_lkb *lkb)
245{
246 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
247 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
248 return 1;
249 return 0;
250}
251
252static inline int down_conversion(struct dlm_lkb *lkb)
253{
254 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
255}
256
257static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
258{
259 if (is_master_copy(lkb))
260 return;
261
262 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
263
264 lkb->lkb_lksb->sb_status = rv;
265 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
266
267 dlm_add_ast(lkb, AST_COMP);
268}
269
270static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
271{
272 if (is_master_copy(lkb))
273 send_bast(r, lkb, rqmode);
274 else {
275 lkb->lkb_bastmode = rqmode;
276 dlm_add_ast(lkb, AST_BAST);
277 }
278}
279
280/*
281 * Basic operations on rsb's and lkb's
282 */
283
284static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
285{
286 struct dlm_rsb *r;
287
288 r = allocate_rsb(ls, len);
289 if (!r)
290 return NULL;
291
292 r->res_ls = ls;
293 r->res_length = len;
294 memcpy(r->res_name, name, len);
295 mutex_init(&r->res_mutex);
296
297 INIT_LIST_HEAD(&r->res_lookup);
298 INIT_LIST_HEAD(&r->res_grantqueue);
299 INIT_LIST_HEAD(&r->res_convertqueue);
300 INIT_LIST_HEAD(&r->res_waitqueue);
301 INIT_LIST_HEAD(&r->res_root_list);
302 INIT_LIST_HEAD(&r->res_recover_list);
303
304 return r;
305}
306
307static int search_rsb_list(struct list_head *head, char *name, int len,
308 unsigned int flags, struct dlm_rsb **r_ret)
309{
310 struct dlm_rsb *r;
311 int error = 0;
312
313 list_for_each_entry(r, head, res_hashchain) {
314 if (len == r->res_length && !memcmp(name, r->res_name, len))
315 goto found;
316 }
317 return -EBADR;
318
319 found:
320 if (r->res_nodeid && (flags & R_MASTER))
321 error = -ENOTBLK;
322 *r_ret = r;
323 return error;
324}
325
326static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
327 unsigned int flags, struct dlm_rsb **r_ret)
328{
329 struct dlm_rsb *r;
330 int error;
331
332 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
333 if (!error) {
334 kref_get(&r->res_ref);
335 goto out;
336 }
337 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
338 if (error)
339 goto out;
340
341 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
342
343 if (dlm_no_directory(ls))
344 goto out;
345
346 if (r->res_nodeid == -1) {
347 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
348 r->res_first_lkid = 0;
349 } else if (r->res_nodeid > 0) {
350 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
351 r->res_first_lkid = 0;
352 } else {
353 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
354 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
355 }
356 out:
357 *r_ret = r;
358 return error;
359}
360
361static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
362 unsigned int flags, struct dlm_rsb **r_ret)
363{
364 int error;
365 write_lock(&ls->ls_rsbtbl[b].lock);
366 error = _search_rsb(ls, name, len, b, flags, r_ret);
367 write_unlock(&ls->ls_rsbtbl[b].lock);
368 return error;
369}
370
371/*
372 * Find rsb in rsbtbl and potentially create/add one
373 *
374 * Delaying the release of rsb's has a similar benefit to applications keeping
375 * NL locks on an rsb, but without the guarantee that the cached master value
376 * will still be valid when the rsb is reused. Apps aren't always smart enough
377 * to keep NL locks on an rsb that they may lock again shortly; this can lead
378 * to excessive master lookups and removals if we don't delay the release.
379 *
380 * Searching for an rsb means looking through both the normal list and toss
381 * list. When found on the toss list the rsb is moved to the normal list with
382 * ref count of 1; when found on normal list the ref count is incremented.
383 */
384
385static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
386 unsigned int flags, struct dlm_rsb **r_ret)
387{
388 struct dlm_rsb *r, *tmp;
389 uint32_t hash, bucket;
390 int error = 0;
391
392 if (dlm_no_directory(ls))
393 flags |= R_CREATE;
394
395 hash = jhash(name, namelen, 0);
396 bucket = hash & (ls->ls_rsbtbl_size - 1);
397
398 error = search_rsb(ls, name, namelen, bucket, flags, &r);
399 if (!error)
400 goto out;
401
402 if (error == -EBADR && !(flags & R_CREATE))
403 goto out;
404
405 /* the rsb was found but wasn't a master copy */
406 if (error == -ENOTBLK)
407 goto out;
408
409 error = -ENOMEM;
410 r = create_rsb(ls, name, namelen);
411 if (!r)
412 goto out;
413
414 r->res_hash = hash;
415 r->res_bucket = bucket;
416 r->res_nodeid = -1;
417 kref_init(&r->res_ref);
418
419 /* With no directory, the master can be set immediately */
420 if (dlm_no_directory(ls)) {
421 int nodeid = dlm_dir_nodeid(r);
422 if (nodeid == dlm_our_nodeid())
423 nodeid = 0;
424 r->res_nodeid = nodeid;
425 }
426
427 write_lock(&ls->ls_rsbtbl[bucket].lock);
428 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
429 if (!error) {
430 write_unlock(&ls->ls_rsbtbl[bucket].lock);
431 free_rsb(r);
432 r = tmp;
433 goto out;
434 }
435 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
436 write_unlock(&ls->ls_rsbtbl[bucket].lock);
437 error = 0;
438 out:
439 *r_ret = r;
440 return error;
441}
442
443int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
444 unsigned int flags, struct dlm_rsb **r_ret)
445{
446 return find_rsb(ls, name, namelen, flags, r_ret);
447}
448
449/* This is only called to add a reference when the code already holds
450 a valid reference to the rsb, so there's no need for locking. */
451
452static inline void hold_rsb(struct dlm_rsb *r)
453{
454 kref_get(&r->res_ref);
455}
456
457void dlm_hold_rsb(struct dlm_rsb *r)
458{
459 hold_rsb(r);
460}
461
462static void toss_rsb(struct kref *kref)
463{
464 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
465 struct dlm_ls *ls = r->res_ls;
466
467 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
468 kref_init(&r->res_ref);
469 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
470 r->res_toss_time = jiffies;
471 if (r->res_lvbptr) {
472 free_lvb(r->res_lvbptr);
473 r->res_lvbptr = NULL;
474 }
475}
476
477/* When all references to the rsb are gone it's transfered to
478 the tossed list for later disposal. */
479
480static void put_rsb(struct dlm_rsb *r)
481{
482 struct dlm_ls *ls = r->res_ls;
483 uint32_t bucket = r->res_bucket;
484
485 write_lock(&ls->ls_rsbtbl[bucket].lock);
486 kref_put(&r->res_ref, toss_rsb);
487 write_unlock(&ls->ls_rsbtbl[bucket].lock);
488}
489
490void dlm_put_rsb(struct dlm_rsb *r)
491{
492 put_rsb(r);
493}
494
495/* See comment for unhold_lkb */
496
497static void unhold_rsb(struct dlm_rsb *r)
498{
499 int rv;
500 rv = kref_put(&r->res_ref, toss_rsb);
501 DLM_ASSERT(!rv, dlm_dump_rsb(r););
502}
503
504static void kill_rsb(struct kref *kref)
505{
506 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
507
508 /* All work is done after the return from kref_put() so we
509 can release the write_lock before the remove and free. */
510
511 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
512 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
513 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
514 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
515 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
516 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
517}
518
519/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
520 The rsb must exist as long as any lkb's for it do. */
521
522static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
523{
524 hold_rsb(r);
525 lkb->lkb_resource = r;
526}
527
528static void detach_lkb(struct dlm_lkb *lkb)
529{
530 if (lkb->lkb_resource) {
531 put_rsb(lkb->lkb_resource);
532 lkb->lkb_resource = NULL;
533 }
534}
535
536static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
537{
538 struct dlm_lkb *lkb, *tmp;
539 uint32_t lkid = 0;
540 uint16_t bucket;
541
542 lkb = allocate_lkb(ls);
543 if (!lkb)
544 return -ENOMEM;
545
546 lkb->lkb_nodeid = -1;
547 lkb->lkb_grmode = DLM_LOCK_IV;
548 kref_init(&lkb->lkb_ref);
549 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
550
551 get_random_bytes(&bucket, sizeof(bucket));
552 bucket &= (ls->ls_lkbtbl_size - 1);
553
554 write_lock(&ls->ls_lkbtbl[bucket].lock);
555
556 /* counter can roll over so we must verify lkid is not in use */
557
558 while (lkid == 0) {
559 lkid = bucket | (ls->ls_lkbtbl[bucket].counter++ << 16);
560
561 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
562 lkb_idtbl_list) {
563 if (tmp->lkb_id != lkid)
564 continue;
565 lkid = 0;
566 break;
567 }
568 }
569
570 lkb->lkb_id = lkid;
571 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
572 write_unlock(&ls->ls_lkbtbl[bucket].lock);
573
574 *lkb_ret = lkb;
575 return 0;
576}
577
578static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
579{
580 uint16_t bucket = lkid & 0xFFFF;
581 struct dlm_lkb *lkb;
582
583 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
584 if (lkb->lkb_id == lkid)
585 return lkb;
586 }
587 return NULL;
588}
589
590static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
591{
592 struct dlm_lkb *lkb;
593 uint16_t bucket = lkid & 0xFFFF;
594
595 if (bucket >= ls->ls_lkbtbl_size)
596 return -EBADSLT;
597
598 read_lock(&ls->ls_lkbtbl[bucket].lock);
599 lkb = __find_lkb(ls, lkid);
600 if (lkb)
601 kref_get(&lkb->lkb_ref);
602 read_unlock(&ls->ls_lkbtbl[bucket].lock);
603
604 *lkb_ret = lkb;
605 return lkb ? 0 : -ENOENT;
606}
607
608static void kill_lkb(struct kref *kref)
609{
610 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
611
612 /* All work is done after the return from kref_put() so we
613 can release the write_lock before the detach_lkb */
614
615 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
616}
617
618/* __put_lkb() is used when an lkb may not have an rsb attached to
619 it so we need to provide the lockspace explicitly */
620
621static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
622{
623 uint16_t bucket = lkb->lkb_id & 0xFFFF;
624
625 write_lock(&ls->ls_lkbtbl[bucket].lock);
626 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
627 list_del(&lkb->lkb_idtbl_list);
628 write_unlock(&ls->ls_lkbtbl[bucket].lock);
629
630 detach_lkb(lkb);
631
632 /* for local/process lkbs, lvbptr points to caller's lksb */
633 if (lkb->lkb_lvbptr && is_master_copy(lkb))
634 free_lvb(lkb->lkb_lvbptr);
635 free_lkb(lkb);
636 return 1;
637 } else {
638 write_unlock(&ls->ls_lkbtbl[bucket].lock);
639 return 0;
640 }
641}
642
643int dlm_put_lkb(struct dlm_lkb *lkb)
644{
645 struct dlm_ls *ls;
646
647 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
648 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
649
650 ls = lkb->lkb_resource->res_ls;
651 return __put_lkb(ls, lkb);
652}
653
654/* This is only called to add a reference when the code already holds
655 a valid reference to the lkb, so there's no need for locking. */
656
657static inline void hold_lkb(struct dlm_lkb *lkb)
658{
659 kref_get(&lkb->lkb_ref);
660}
661
662/* This is called when we need to remove a reference and are certain
663 it's not the last ref. e.g. del_lkb is always called between a
664 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
665 put_lkb would work fine, but would involve unnecessary locking */
666
667static inline void unhold_lkb(struct dlm_lkb *lkb)
668{
669 int rv;
670 rv = kref_put(&lkb->lkb_ref, kill_lkb);
671 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
672}
673
674static void lkb_add_ordered(struct list_head *new, struct list_head *head,
675 int mode)
676{
677 struct dlm_lkb *lkb = NULL;
678
679 list_for_each_entry(lkb, head, lkb_statequeue)
680 if (lkb->lkb_rqmode < mode)
681 break;
682
683 if (!lkb)
684 list_add_tail(new, head);
685 else
686 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
687}
688
689/* add/remove lkb to rsb's grant/convert/wait queue */
690
691static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
692{
693 kref_get(&lkb->lkb_ref);
694
695 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
696
697 lkb->lkb_status = status;
698
699 switch (status) {
700 case DLM_LKSTS_WAITING:
701 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
702 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
703 else
704 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
705 break;
706 case DLM_LKSTS_GRANTED:
707 /* convention says granted locks kept in order of grmode */
708 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
709 lkb->lkb_grmode);
710 break;
711 case DLM_LKSTS_CONVERT:
712 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
713 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
714 else
715 list_add_tail(&lkb->lkb_statequeue,
716 &r->res_convertqueue);
717 break;
718 default:
719 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
720 }
721}
722
723static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
724{
725 lkb->lkb_status = 0;
726 list_del(&lkb->lkb_statequeue);
727 unhold_lkb(lkb);
728}
729
730static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
731{
732 hold_lkb(lkb);
733 del_lkb(r, lkb);
734 add_lkb(r, lkb, sts);
735 unhold_lkb(lkb);
736}
737
738/* add/remove lkb from global waiters list of lkb's waiting for
739 a reply from a remote node */
740
741static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
742{
743 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
744
745 mutex_lock(&ls->ls_waiters_mutex);
746 if (lkb->lkb_wait_type) {
747 log_print("add_to_waiters error %d", lkb->lkb_wait_type);
748 goto out;
749 }
750 lkb->lkb_wait_type = mstype;
751 kref_get(&lkb->lkb_ref);
752 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
753 out:
754 mutex_unlock(&ls->ls_waiters_mutex);
755}
756
757static int _remove_from_waiters(struct dlm_lkb *lkb)
758{
759 int error = 0;
760
761 if (!lkb->lkb_wait_type) {
762 log_print("remove_from_waiters error");
763 error = -EINVAL;
764 goto out;
765 }
766 lkb->lkb_wait_type = 0;
767 list_del(&lkb->lkb_wait_reply);
768 unhold_lkb(lkb);
769 out:
770 return error;
771}
772
773static int remove_from_waiters(struct dlm_lkb *lkb)
774{
775 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
776 int error;
777
778 mutex_lock(&ls->ls_waiters_mutex);
779 error = _remove_from_waiters(lkb);
780 mutex_unlock(&ls->ls_waiters_mutex);
781 return error;
782}
783
784static void dir_remove(struct dlm_rsb *r)
785{
786 int to_nodeid;
787
788 if (dlm_no_directory(r->res_ls))
789 return;
790
791 to_nodeid = dlm_dir_nodeid(r);
792 if (to_nodeid != dlm_our_nodeid())
793 send_remove(r);
794 else
795 dlm_dir_remove_entry(r->res_ls, to_nodeid,
796 r->res_name, r->res_length);
797}
798
799/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
800 found since they are in order of newest to oldest? */
801
802static int shrink_bucket(struct dlm_ls *ls, int b)
803{
804 struct dlm_rsb *r;
805 int count = 0, found;
806
807 for (;;) {
808 found = 0;
809 write_lock(&ls->ls_rsbtbl[b].lock);
810 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
811 res_hashchain) {
812 if (!time_after_eq(jiffies, r->res_toss_time +
813 dlm_config.toss_secs * HZ))
814 continue;
815 found = 1;
816 break;
817 }
818
819 if (!found) {
820 write_unlock(&ls->ls_rsbtbl[b].lock);
821 break;
822 }
823
824 if (kref_put(&r->res_ref, kill_rsb)) {
825 list_del(&r->res_hashchain);
826 write_unlock(&ls->ls_rsbtbl[b].lock);
827
828 if (is_master(r))
829 dir_remove(r);
830 free_rsb(r);
831 count++;
832 } else {
833 write_unlock(&ls->ls_rsbtbl[b].lock);
834 log_error(ls, "tossed rsb in use %s", r->res_name);
835 }
836 }
837
838 return count;
839}
840
841void dlm_scan_rsbs(struct dlm_ls *ls)
842{
843 int i;
844
845 if (dlm_locking_stopped(ls))
846 return;
847
848 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
849 shrink_bucket(ls, i);
850 cond_resched();
851 }
852}
853
854/* lkb is master or local copy */
855
856static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
857{
858 int b, len = r->res_ls->ls_lvblen;
859
860 /* b=1 lvb returned to caller
861 b=0 lvb written to rsb or invalidated
862 b=-1 do nothing */
863
864 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
865
866 if (b == 1) {
867 if (!lkb->lkb_lvbptr)
868 return;
869
870 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
871 return;
872
873 if (!r->res_lvbptr)
874 return;
875
876 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
877 lkb->lkb_lvbseq = r->res_lvbseq;
878
879 } else if (b == 0) {
880 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
881 rsb_set_flag(r, RSB_VALNOTVALID);
882 return;
883 }
884
885 if (!lkb->lkb_lvbptr)
886 return;
887
888 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
889 return;
890
891 if (!r->res_lvbptr)
892 r->res_lvbptr = allocate_lvb(r->res_ls);
893
894 if (!r->res_lvbptr)
895 return;
896
897 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
898 r->res_lvbseq++;
899 lkb->lkb_lvbseq = r->res_lvbseq;
900 rsb_clear_flag(r, RSB_VALNOTVALID);
901 }
902
903 if (rsb_flag(r, RSB_VALNOTVALID))
904 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
905}
906
907static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
908{
909 if (lkb->lkb_grmode < DLM_LOCK_PW)
910 return;
911
912 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
913 rsb_set_flag(r, RSB_VALNOTVALID);
914 return;
915 }
916
917 if (!lkb->lkb_lvbptr)
918 return;
919
920 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
921 return;
922
923 if (!r->res_lvbptr)
924 r->res_lvbptr = allocate_lvb(r->res_ls);
925
926 if (!r->res_lvbptr)
927 return;
928
929 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
930 r->res_lvbseq++;
931 rsb_clear_flag(r, RSB_VALNOTVALID);
932}
933
934/* lkb is process copy (pc) */
935
936static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
937 struct dlm_message *ms)
938{
939 int b;
940
941 if (!lkb->lkb_lvbptr)
942 return;
943
944 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
945 return;
946
947 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
948 if (b == 1) {
949 int len = receive_extralen(ms);
950 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
951 lkb->lkb_lvbseq = ms->m_lvbseq;
952 }
953}
954
955/* Manipulate lkb's on rsb's convert/granted/waiting queues
956 remove_lock -- used for unlock, removes lkb from granted
957 revert_lock -- used for cancel, moves lkb from convert to granted
958 grant_lock -- used for request and convert, adds lkb to granted or
959 moves lkb from convert or waiting to granted
960
961 Each of these is used for master or local copy lkb's. There is
962 also a _pc() variation used to make the corresponding change on
963 a process copy (pc) lkb. */
964
965static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
966{
967 del_lkb(r, lkb);
968 lkb->lkb_grmode = DLM_LOCK_IV;
969 /* this unhold undoes the original ref from create_lkb()
970 so this leads to the lkb being freed */
971 unhold_lkb(lkb);
972}
973
974static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
975{
976 set_lvb_unlock(r, lkb);
977 _remove_lock(r, lkb);
978}
979
980static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
981{
982 _remove_lock(r, lkb);
983}
984
985static void revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
986{
987 lkb->lkb_rqmode = DLM_LOCK_IV;
988
989 switch (lkb->lkb_status) {
990 case DLM_LKSTS_GRANTED:
991 break;
992 case DLM_LKSTS_CONVERT:
993 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
994 break;
995 case DLM_LKSTS_WAITING:
996 del_lkb(r, lkb);
997 lkb->lkb_grmode = DLM_LOCK_IV;
998 /* this unhold undoes the original ref from create_lkb()
999 so this leads to the lkb being freed */
1000 unhold_lkb(lkb);
1001 break;
1002 default:
1003 log_print("invalid status for revert %d", lkb->lkb_status);
1004 }
1005}
1006
1007static void revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1008{
1009 revert_lock(r, lkb);
1010}
1011
1012static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1013{
1014 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
1015 lkb->lkb_grmode = lkb->lkb_rqmode;
1016 if (lkb->lkb_status)
1017 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
1018 else
1019 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
1020 }
1021
1022 lkb->lkb_rqmode = DLM_LOCK_IV;
1023}
1024
1025static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1026{
1027 set_lvb_lock(r, lkb);
1028 _grant_lock(r, lkb);
1029 lkb->lkb_highbast = 0;
1030}
1031
1032static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1033 struct dlm_message *ms)
1034{
1035 set_lvb_lock_pc(r, lkb, ms);
1036 _grant_lock(r, lkb);
1037}
1038
1039/* called by grant_pending_locks() which means an async grant message must
1040 be sent to the requesting node in addition to granting the lock if the
1041 lkb belongs to a remote node. */
1042
1043static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1044{
1045 grant_lock(r, lkb);
1046 if (is_master_copy(lkb))
1047 send_grant(r, lkb);
1048 else
1049 queue_cast(r, lkb, 0);
1050}
1051
1052static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1053{
1054 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1055 lkb_statequeue);
1056 if (lkb->lkb_id == first->lkb_id)
1057 return 1;
1058
1059 return 0;
1060}
1061
1062/* Check if the given lkb conflicts with another lkb on the queue. */
1063
1064static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1065{
1066 struct dlm_lkb *this;
1067
1068 list_for_each_entry(this, head, lkb_statequeue) {
1069 if (this == lkb)
1070 continue;
1071 if (!modes_compat(this, lkb))
1072 return 1;
1073 }
1074 return 0;
1075}
1076
1077/*
1078 * "A conversion deadlock arises with a pair of lock requests in the converting
1079 * queue for one resource. The granted mode of each lock blocks the requested
1080 * mode of the other lock."
1081 *
1082 * Part 2: if the granted mode of lkb is preventing the first lkb in the
1083 * convert queue from being granted, then demote lkb (set grmode to NL).
1084 * This second form requires that we check for conv-deadlk even when
1085 * now == 0 in _can_be_granted().
1086 *
1087 * Example:
1088 * Granted Queue: empty
1089 * Convert Queue: NL->EX (first lock)
1090 * PR->EX (second lock)
1091 *
1092 * The first lock can't be granted because of the granted mode of the second
1093 * lock and the second lock can't be granted because it's not first in the
1094 * list. We demote the granted mode of the second lock (the lkb passed to this
1095 * function).
1096 *
1097 * After the resolution, the "grant pending" function needs to go back and try
1098 * to grant locks on the convert queue again since the first lock can now be
1099 * granted.
1100 */
1101
1102static int conversion_deadlock_detect(struct dlm_rsb *rsb, struct dlm_lkb *lkb)
1103{
1104 struct dlm_lkb *this, *first = NULL, *self = NULL;
1105
1106 list_for_each_entry(this, &rsb->res_convertqueue, lkb_statequeue) {
1107 if (!first)
1108 first = this;
1109 if (this == lkb) {
1110 self = lkb;
1111 continue;
1112 }
1113
1114 if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
1115 return 1;
1116 }
1117
1118 /* if lkb is on the convert queue and is preventing the first
1119 from being granted, then there's deadlock and we demote lkb.
1120 multiple converting locks may need to do this before the first
1121 converting lock can be granted. */
1122
1123 if (self && self != first) {
1124 if (!modes_compat(lkb, first) &&
1125 !queue_conflict(&rsb->res_grantqueue, first))
1126 return 1;
1127 }
1128
1129 return 0;
1130}
1131
1132/*
1133 * Return 1 if the lock can be granted, 0 otherwise.
1134 * Also detect and resolve conversion deadlocks.
1135 *
1136 * lkb is the lock to be granted
1137 *
1138 * now is 1 if the function is being called in the context of the
1139 * immediate request, it is 0 if called later, after the lock has been
1140 * queued.
1141 *
1142 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1143 */
1144
1145static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1146{
1147 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1148
1149 /*
1150 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1151 * a new request for a NL mode lock being blocked.
1152 *
1153 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1154 * request, then it would be granted. In essence, the use of this flag
1155 * tells the Lock Manager to expedite theis request by not considering
1156 * what may be in the CONVERTING or WAITING queues... As of this
1157 * writing, the EXPEDITE flag can be used only with new requests for NL
1158 * mode locks. This flag is not valid for conversion requests.
1159 *
1160 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1161 * conversion or used with a non-NL requested mode. We also know an
1162 * EXPEDITE request is always granted immediately, so now must always
1163 * be 1. The full condition to grant an expedite request: (now &&
1164 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1165 * therefore be shortened to just checking the flag.
1166 */
1167
1168 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1169 return 1;
1170
1171 /*
1172 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1173 * added to the remaining conditions.
1174 */
1175
1176 if (queue_conflict(&r->res_grantqueue, lkb))
1177 goto out;
1178
1179 /*
1180 * 6-3: By default, a conversion request is immediately granted if the
1181 * requested mode is compatible with the modes of all other granted
1182 * locks
1183 */
1184
1185 if (queue_conflict(&r->res_convertqueue, lkb))
1186 goto out;
1187
1188 /*
1189 * 6-5: But the default algorithm for deciding whether to grant or
1190 * queue conversion requests does not by itself guarantee that such
1191 * requests are serviced on a "first come first serve" basis. This, in
1192 * turn, can lead to a phenomenon known as "indefinate postponement".
1193 *
1194 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1195 * the system service employed to request a lock conversion. This flag
1196 * forces certain conversion requests to be queued, even if they are
1197 * compatible with the granted modes of other locks on the same
1198 * resource. Thus, the use of this flag results in conversion requests
1199 * being ordered on a "first come first servce" basis.
1200 *
1201 * DCT: This condition is all about new conversions being able to occur
1202 * "in place" while the lock remains on the granted queue (assuming
1203 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1204 * doesn't _have_ to go onto the convert queue where it's processed in
1205 * order. The "now" variable is necessary to distinguish converts
1206 * being received and processed for the first time now, because once a
1207 * convert is moved to the conversion queue the condition below applies
1208 * requiring fifo granting.
1209 */
1210
1211 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1212 return 1;
1213
1214 /*
1215 * The NOORDER flag is set to avoid the standard vms rules on grant
1216 * order.
1217 */
1218
1219 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1220 return 1;
1221
1222 /*
1223 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1224 * granted until all other conversion requests ahead of it are granted
1225 * and/or canceled.
1226 */
1227
1228 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1229 return 1;
1230
1231 /*
1232 * 6-4: By default, a new request is immediately granted only if all
1233 * three of the following conditions are satisfied when the request is
1234 * issued:
1235 * - The queue of ungranted conversion requests for the resource is
1236 * empty.
1237 * - The queue of ungranted new requests for the resource is empty.
1238 * - The mode of the new request is compatible with the most
1239 * restrictive mode of all granted locks on the resource.
1240 */
1241
1242 if (now && !conv && list_empty(&r->res_convertqueue) &&
1243 list_empty(&r->res_waitqueue))
1244 return 1;
1245
1246 /*
1247 * 6-4: Once a lock request is in the queue of ungranted new requests,
1248 * it cannot be granted until the queue of ungranted conversion
1249 * requests is empty, all ungranted new requests ahead of it are
1250 * granted and/or canceled, and it is compatible with the granted mode
1251 * of the most restrictive lock granted on the resource.
1252 */
1253
1254 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1255 first_in_list(lkb, &r->res_waitqueue))
1256 return 1;
1257
1258 out:
1259 /*
1260 * The following, enabled by CONVDEADLK, departs from VMS.
1261 */
1262
1263 if (conv && (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) &&
1264 conversion_deadlock_detect(r, lkb)) {
1265 lkb->lkb_grmode = DLM_LOCK_NL;
1266 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1267 }
1268
1269 return 0;
1270}
1271
1272/*
1273 * The ALTPR and ALTCW flags aren't traditional lock manager flags, but are a
1274 * simple way to provide a big optimization to applications that can use them.
1275 */
1276
1277static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1278{
1279 uint32_t flags = lkb->lkb_exflags;
1280 int rv;
1281 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1282
1283 rv = _can_be_granted(r, lkb, now);
1284 if (rv)
1285 goto out;
1286
1287 if (lkb->lkb_sbflags & DLM_SBF_DEMOTED)
1288 goto out;
1289
1290 if (rqmode != DLM_LOCK_PR && flags & DLM_LKF_ALTPR)
1291 alt = DLM_LOCK_PR;
1292 else if (rqmode != DLM_LOCK_CW && flags & DLM_LKF_ALTCW)
1293 alt = DLM_LOCK_CW;
1294
1295 if (alt) {
1296 lkb->lkb_rqmode = alt;
1297 rv = _can_be_granted(r, lkb, now);
1298 if (rv)
1299 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1300 else
1301 lkb->lkb_rqmode = rqmode;
1302 }
1303 out:
1304 return rv;
1305}
1306
1307static int grant_pending_convert(struct dlm_rsb *r, int high)
1308{
1309 struct dlm_lkb *lkb, *s;
1310 int hi, demoted, quit, grant_restart, demote_restart;
1311
1312 quit = 0;
1313 restart:
1314 grant_restart = 0;
1315 demote_restart = 0;
1316 hi = DLM_LOCK_IV;
1317
1318 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1319 demoted = is_demoted(lkb);
1320 if (can_be_granted(r, lkb, 0)) {
1321 grant_lock_pending(r, lkb);
1322 grant_restart = 1;
1323 } else {
1324 hi = max_t(int, lkb->lkb_rqmode, hi);
1325 if (!demoted && is_demoted(lkb))
1326 demote_restart = 1;
1327 }
1328 }
1329
1330 if (grant_restart)
1331 goto restart;
1332 if (demote_restart && !quit) {
1333 quit = 1;
1334 goto restart;
1335 }
1336
1337 return max_t(int, high, hi);
1338}
1339
1340static int grant_pending_wait(struct dlm_rsb *r, int high)
1341{
1342 struct dlm_lkb *lkb, *s;
1343
1344 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1345 if (can_be_granted(r, lkb, 0))
1346 grant_lock_pending(r, lkb);
1347 else
1348 high = max_t(int, lkb->lkb_rqmode, high);
1349 }
1350
1351 return high;
1352}
1353
1354static void grant_pending_locks(struct dlm_rsb *r)
1355{
1356 struct dlm_lkb *lkb, *s;
1357 int high = DLM_LOCK_IV;
1358
1359 DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
1360
1361 high = grant_pending_convert(r, high);
1362 high = grant_pending_wait(r, high);
1363
1364 if (high == DLM_LOCK_IV)
1365 return;
1366
1367 /*
1368 * If there are locks left on the wait/convert queue then send blocking
1369 * ASTs to granted locks based on the largest requested mode (high)
1370 * found above. FIXME: highbast < high comparison not valid for PR/CW.
1371 */
1372
1373 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1374 if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
1375 !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
1376 queue_bast(r, lkb, high);
1377 lkb->lkb_highbast = high;
1378 }
1379 }
1380}
1381
1382static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1383 struct dlm_lkb *lkb)
1384{
1385 struct dlm_lkb *gr;
1386
1387 list_for_each_entry(gr, head, lkb_statequeue) {
1388 if (gr->lkb_bastaddr &&
1389 gr->lkb_highbast < lkb->lkb_rqmode &&
1390 !modes_compat(gr, lkb)) {
1391 queue_bast(r, gr, lkb->lkb_rqmode);
1392 gr->lkb_highbast = lkb->lkb_rqmode;
1393 }
1394 }
1395}
1396
1397static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1398{
1399 send_bast_queue(r, &r->res_grantqueue, lkb);
1400}
1401
1402static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1403{
1404 send_bast_queue(r, &r->res_grantqueue, lkb);
1405 send_bast_queue(r, &r->res_convertqueue, lkb);
1406}
1407
1408/* set_master(r, lkb) -- set the master nodeid of a resource
1409
1410 The purpose of this function is to set the nodeid field in the given
1411 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1412 known, it can just be copied to the lkb and the function will return
1413 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1414 before it can be copied to the lkb.
1415
1416 When the rsb nodeid is being looked up remotely, the initial lkb
1417 causing the lookup is kept on the ls_waiters list waiting for the
1418 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1419 on the rsb's res_lookup list until the master is verified.
1420
1421 Return values:
1422 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1423 1: the rsb master is not available and the lkb has been placed on
1424 a wait queue
1425*/
1426
1427static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1428{
1429 struct dlm_ls *ls = r->res_ls;
1430 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1431
1432 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1433 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1434 r->res_first_lkid = lkb->lkb_id;
1435 lkb->lkb_nodeid = r->res_nodeid;
1436 return 0;
1437 }
1438
1439 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1440 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1441 return 1;
1442 }
1443
1444 if (r->res_nodeid == 0) {
1445 lkb->lkb_nodeid = 0;
1446 return 0;
1447 }
1448
1449 if (r->res_nodeid > 0) {
1450 lkb->lkb_nodeid = r->res_nodeid;
1451 return 0;
1452 }
1453
1454 DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
1455
1456 dir_nodeid = dlm_dir_nodeid(r);
1457
1458 if (dir_nodeid != our_nodeid) {
1459 r->res_first_lkid = lkb->lkb_id;
1460 send_lookup(r, lkb);
1461 return 1;
1462 }
1463
1464 for (;;) {
1465 /* It's possible for dlm_scand to remove an old rsb for
1466 this same resource from the toss list, us to create
1467 a new one, look up the master locally, and find it
1468 already exists just before dlm_scand does the
1469 dir_remove() on the previous rsb. */
1470
1471 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1472 r->res_length, &ret_nodeid);
1473 if (!error)
1474 break;
1475 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1476 schedule();
1477 }
1478
1479 if (ret_nodeid == our_nodeid) {
1480 r->res_first_lkid = 0;
1481 r->res_nodeid = 0;
1482 lkb->lkb_nodeid = 0;
1483 } else {
1484 r->res_first_lkid = lkb->lkb_id;
1485 r->res_nodeid = ret_nodeid;
1486 lkb->lkb_nodeid = ret_nodeid;
1487 }
1488 return 0;
1489}
1490
1491static void process_lookup_list(struct dlm_rsb *r)
1492{
1493 struct dlm_lkb *lkb, *safe;
1494
1495 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1496 list_del(&lkb->lkb_rsb_lookup);
1497 _request_lock(r, lkb);
1498 schedule();
1499 }
1500}
1501
1502/* confirm_master -- confirm (or deny) an rsb's master nodeid */
1503
1504static void confirm_master(struct dlm_rsb *r, int error)
1505{
1506 struct dlm_lkb *lkb;
1507
1508 if (!r->res_first_lkid)
1509 return;
1510
1511 switch (error) {
1512 case 0:
1513 case -EINPROGRESS:
1514 r->res_first_lkid = 0;
1515 process_lookup_list(r);
1516 break;
1517
1518 case -EAGAIN:
1519 /* the remote master didn't queue our NOQUEUE request;
1520 make a waiting lkb the first_lkid */
1521
1522 r->res_first_lkid = 0;
1523
1524 if (!list_empty(&r->res_lookup)) {
1525 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1526 lkb_rsb_lookup);
1527 list_del(&lkb->lkb_rsb_lookup);
1528 r->res_first_lkid = lkb->lkb_id;
1529 _request_lock(r, lkb);
1530 } else
1531 r->res_nodeid = -1;
1532 break;
1533
1534 default:
1535 log_error(r->res_ls, "confirm_master unknown error %d", error);
1536 }
1537}
1538
1539static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1540 int namelen, uint32_t parent_lkid, void *ast,
1541 void *astarg, void *bast, struct dlm_args *args)
1542{
1543 int rv = -EINVAL;
1544
1545 /* check for invalid arg usage */
1546
1547 if (mode < 0 || mode > DLM_LOCK_EX)
1548 goto out;
1549
1550 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1551 goto out;
1552
1553 if (flags & DLM_LKF_CANCEL)
1554 goto out;
1555
1556 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1557 goto out;
1558
1559 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1560 goto out;
1561
1562 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1563 goto out;
1564
1565 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1566 goto out;
1567
1568 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1569 goto out;
1570
1571 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1572 goto out;
1573
1574 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
1575 goto out;
1576
1577 if (!ast || !lksb)
1578 goto out;
1579
1580 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
1581 goto out;
1582
1583 /* parent/child locks not yet supported */
1584 if (parent_lkid)
1585 goto out;
1586
1587 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
1588 goto out;
1589
1590 /* these args will be copied to the lkb in validate_lock_args,
1591 it cannot be done now because when converting locks, fields in
1592 an active lkb cannot be modified before locking the rsb */
1593
1594 args->flags = flags;
1595 args->astaddr = ast;
1596 args->astparam = (long) astarg;
1597 args->bastaddr = bast;
1598 args->mode = mode;
1599 args->lksb = lksb;
1600 rv = 0;
1601 out:
1602 return rv;
1603}
1604
1605static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
1606{
1607 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
1608 DLM_LKF_FORCEUNLOCK))
1609 return -EINVAL;
1610
1611 args->flags = flags;
1612 args->astparam = (long) astarg;
1613 return 0;
1614}
1615
1616static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
1617 struct dlm_args *args)
1618{
1619 int rv = -EINVAL;
1620
1621 if (args->flags & DLM_LKF_CONVERT) {
1622 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1623 goto out;
1624
1625 if (args->flags & DLM_LKF_QUECVT &&
1626 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
1627 goto out;
1628
1629 rv = -EBUSY;
1630 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
1631 goto out;
1632
1633 if (lkb->lkb_wait_type)
1634 goto out;
1635 }
1636
1637 lkb->lkb_exflags = args->flags;
1638 lkb->lkb_sbflags = 0;
1639 lkb->lkb_astaddr = args->astaddr;
1640 lkb->lkb_astparam = args->astparam;
1641 lkb->lkb_bastaddr = args->bastaddr;
1642 lkb->lkb_rqmode = args->mode;
1643 lkb->lkb_lksb = args->lksb;
1644 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
1645 lkb->lkb_ownpid = (int) current->pid;
1646 rv = 0;
1647 out:
1648 return rv;
1649}
1650
1651static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
1652{
1653 int rv = -EINVAL;
1654
1655 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1656 goto out;
1657
1658 if (args->flags & DLM_LKF_FORCEUNLOCK)
1659 goto out_ok;
1660
1661 if (args->flags & DLM_LKF_CANCEL &&
1662 lkb->lkb_status == DLM_LKSTS_GRANTED)
1663 goto out;
1664
1665 if (!(args->flags & DLM_LKF_CANCEL) &&
1666 lkb->lkb_status != DLM_LKSTS_GRANTED)
1667 goto out;
1668
1669 rv = -EBUSY;
1670 if (lkb->lkb_wait_type)
1671 goto out;
1672
1673 out_ok:
1674 lkb->lkb_exflags = args->flags;
1675 lkb->lkb_sbflags = 0;
1676 lkb->lkb_astparam = args->astparam;
1677
1678 rv = 0;
1679 out:
1680 return rv;
1681}
1682
1683/*
1684 * Four stage 4 varieties:
1685 * do_request(), do_convert(), do_unlock(), do_cancel()
1686 * These are called on the master node for the given lock and
1687 * from the central locking logic.
1688 */
1689
1690static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
1691{
1692 int error = 0;
1693
1694 if (can_be_granted(r, lkb, 1)) {
1695 grant_lock(r, lkb);
1696 queue_cast(r, lkb, 0);
1697 goto out;
1698 }
1699
1700 if (can_be_queued(lkb)) {
1701 error = -EINPROGRESS;
1702 add_lkb(r, lkb, DLM_LKSTS_WAITING);
1703 send_blocking_asts(r, lkb);
1704 goto out;
1705 }
1706
1707 error = -EAGAIN;
1708 if (force_blocking_asts(lkb))
1709 send_blocking_asts_all(r, lkb);
1710 queue_cast(r, lkb, -EAGAIN);
1711
1712 out:
1713 return error;
1714}
1715
1716static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
1717{
1718 int error = 0;
1719
1720 /* changing an existing lock may allow others to be granted */
1721
1722 if (can_be_granted(r, lkb, 1)) {
1723 grant_lock(r, lkb);
1724 queue_cast(r, lkb, 0);
1725 grant_pending_locks(r);
1726 goto out;
1727 }
1728
1729 if (can_be_queued(lkb)) {
1730 if (is_demoted(lkb))
1731 grant_pending_locks(r);
1732 error = -EINPROGRESS;
1733 del_lkb(r, lkb);
1734 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
1735 send_blocking_asts(r, lkb);
1736 goto out;
1737 }
1738
1739 error = -EAGAIN;
1740 if (force_blocking_asts(lkb))
1741 send_blocking_asts_all(r, lkb);
1742 queue_cast(r, lkb, -EAGAIN);
1743
1744 out:
1745 return error;
1746}
1747
1748static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1749{
1750 remove_lock(r, lkb);
1751 queue_cast(r, lkb, -DLM_EUNLOCK);
1752 grant_pending_locks(r);
1753 return -DLM_EUNLOCK;
1754}
1755
1756/* FIXME: if revert_lock() finds that the lkb is granted, we should
1757 skip the queue_cast(ECANCEL). It indicates that the request/convert
1758 completed (and queued a normal ast) just before the cancel; we don't
1759 want to clobber the sb_result for the normal ast with ECANCEL. */
1760
1761static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
1762{
1763 revert_lock(r, lkb);
1764 queue_cast(r, lkb, -DLM_ECANCEL);
1765 grant_pending_locks(r);
1766 return -DLM_ECANCEL;
1767}
1768
1769/*
1770 * Four stage 3 varieties:
1771 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
1772 */
1773
1774/* add a new lkb to a possibly new rsb, called by requesting process */
1775
1776static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1777{
1778 int error;
1779
1780 /* set_master: sets lkb nodeid from r */
1781
1782 error = set_master(r, lkb);
1783 if (error < 0)
1784 goto out;
1785 if (error) {
1786 error = 0;
1787 goto out;
1788 }
1789
1790 if (is_remote(r))
1791 /* receive_request() calls do_request() on remote node */
1792 error = send_request(r, lkb);
1793 else
1794 error = do_request(r, lkb);
1795 out:
1796 return error;
1797}
1798
1799/* change some property of an existing lkb, e.g. mode */
1800
1801static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1802{
1803 int error;
1804
1805 if (is_remote(r))
1806 /* receive_convert() calls do_convert() on remote node */
1807 error = send_convert(r, lkb);
1808 else
1809 error = do_convert(r, lkb);
1810
1811 return error;
1812}
1813
1814/* remove an existing lkb from the granted queue */
1815
1816static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1817{
1818 int error;
1819
1820 if (is_remote(r))
1821 /* receive_unlock() calls do_unlock() on remote node */
1822 error = send_unlock(r, lkb);
1823 else
1824 error = do_unlock(r, lkb);
1825
1826 return error;
1827}
1828
1829/* remove an existing lkb from the convert or wait queue */
1830
1831static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1832{
1833 int error;
1834
1835 if (is_remote(r))
1836 /* receive_cancel() calls do_cancel() on remote node */
1837 error = send_cancel(r, lkb);
1838 else
1839 error = do_cancel(r, lkb);
1840
1841 return error;
1842}
1843
1844/*
1845 * Four stage 2 varieties:
1846 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
1847 */
1848
1849static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
1850 int len, struct dlm_args *args)
1851{
1852 struct dlm_rsb *r;
1853 int error;
1854
1855 error = validate_lock_args(ls, lkb, args);
1856 if (error)
1857 goto out;
1858
1859 error = find_rsb(ls, name, len, R_CREATE, &r);
1860 if (error)
1861 goto out;
1862
1863 lock_rsb(r);
1864
1865 attach_lkb(r, lkb);
1866 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
1867
1868 error = _request_lock(r, lkb);
1869
1870 unlock_rsb(r);
1871 put_rsb(r);
1872
1873 out:
1874 return error;
1875}
1876
1877static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1878 struct dlm_args *args)
1879{
1880 struct dlm_rsb *r;
1881 int error;
1882
1883 r = lkb->lkb_resource;
1884
1885 hold_rsb(r);
1886 lock_rsb(r);
1887
1888 error = validate_lock_args(ls, lkb, args);
1889 if (error)
1890 goto out;
1891
1892 error = _convert_lock(r, lkb);
1893 out:
1894 unlock_rsb(r);
1895 put_rsb(r);
1896 return error;
1897}
1898
1899static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1900 struct dlm_args *args)
1901{
1902 struct dlm_rsb *r;
1903 int error;
1904
1905 r = lkb->lkb_resource;
1906
1907 hold_rsb(r);
1908 lock_rsb(r);
1909
1910 error = validate_unlock_args(lkb, args);
1911 if (error)
1912 goto out;
1913
1914 error = _unlock_lock(r, lkb);
1915 out:
1916 unlock_rsb(r);
1917 put_rsb(r);
1918 return error;
1919}
1920
1921static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1922 struct dlm_args *args)
1923{
1924 struct dlm_rsb *r;
1925 int error;
1926
1927 r = lkb->lkb_resource;
1928
1929 hold_rsb(r);
1930 lock_rsb(r);
1931
1932 error = validate_unlock_args(lkb, args);
1933 if (error)
1934 goto out;
1935
1936 error = _cancel_lock(r, lkb);
1937 out:
1938 unlock_rsb(r);
1939 put_rsb(r);
1940 return error;
1941}
1942
1943/*
1944 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
1945 */
1946
1947int dlm_lock(dlm_lockspace_t *lockspace,
1948 int mode,
1949 struct dlm_lksb *lksb,
1950 uint32_t flags,
1951 void *name,
1952 unsigned int namelen,
1953 uint32_t parent_lkid,
1954 void (*ast) (void *astarg),
1955 void *astarg,
1956 void (*bast) (void *astarg, int mode))
1957{
1958 struct dlm_ls *ls;
1959 struct dlm_lkb *lkb;
1960 struct dlm_args args;
1961 int error, convert = flags & DLM_LKF_CONVERT;
1962
1963 ls = dlm_find_lockspace_local(lockspace);
1964 if (!ls)
1965 return -EINVAL;
1966
1967 lock_recovery(ls);
1968
1969 if (convert)
1970 error = find_lkb(ls, lksb->sb_lkid, &lkb);
1971 else
1972 error = create_lkb(ls, &lkb);
1973
1974 if (error)
1975 goto out;
1976
1977 error = set_lock_args(mode, lksb, flags, namelen, parent_lkid, ast,
1978 astarg, bast, &args);
1979 if (error)
1980 goto out_put;
1981
1982 if (convert)
1983 error = convert_lock(ls, lkb, &args);
1984 else
1985 error = request_lock(ls, lkb, name, namelen, &args);
1986
1987 if (error == -EINPROGRESS)
1988 error = 0;
1989 out_put:
1990 if (convert || error)
1991 __put_lkb(ls, lkb);
1992 if (error == -EAGAIN)
1993 error = 0;
1994 out:
1995 unlock_recovery(ls);
1996 dlm_put_lockspace(ls);
1997 return error;
1998}
1999
2000int dlm_unlock(dlm_lockspace_t *lockspace,
2001 uint32_t lkid,
2002 uint32_t flags,
2003 struct dlm_lksb *lksb,
2004 void *astarg)
2005{
2006 struct dlm_ls *ls;
2007 struct dlm_lkb *lkb;
2008 struct dlm_args args;
2009 int error;
2010
2011 ls = dlm_find_lockspace_local(lockspace);
2012 if (!ls)
2013 return -EINVAL;
2014
2015 lock_recovery(ls);
2016
2017 error = find_lkb(ls, lkid, &lkb);
2018 if (error)
2019 goto out;
2020
2021 error = set_unlock_args(flags, astarg, &args);
2022 if (error)
2023 goto out_put;
2024
2025 if (flags & DLM_LKF_CANCEL)
2026 error = cancel_lock(ls, lkb, &args);
2027 else
2028 error = unlock_lock(ls, lkb, &args);
2029
2030 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
2031 error = 0;
2032 out_put:
2033 dlm_put_lkb(lkb);
2034 out:
2035 unlock_recovery(ls);
2036 dlm_put_lockspace(ls);
2037 return error;
2038}
2039
2040/*
2041 * send/receive routines for remote operations and replies
2042 *
2043 * send_args
2044 * send_common
2045 * send_request receive_request
2046 * send_convert receive_convert
2047 * send_unlock receive_unlock
2048 * send_cancel receive_cancel
2049 * send_grant receive_grant
2050 * send_bast receive_bast
2051 * send_lookup receive_lookup
2052 * send_remove receive_remove
2053 *
2054 * send_common_reply
2055 * receive_request_reply send_request_reply
2056 * receive_convert_reply send_convert_reply
2057 * receive_unlock_reply send_unlock_reply
2058 * receive_cancel_reply send_cancel_reply
2059 * receive_lookup_reply send_lookup_reply
2060 */
2061
2062static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2063 int to_nodeid, int mstype,
2064 struct dlm_message **ms_ret,
2065 struct dlm_mhandle **mh_ret)
2066{
2067 struct dlm_message *ms;
2068 struct dlm_mhandle *mh;
2069 char *mb;
2070 int mb_len = sizeof(struct dlm_message);
2071
2072 switch (mstype) {
2073 case DLM_MSG_REQUEST:
2074 case DLM_MSG_LOOKUP:
2075 case DLM_MSG_REMOVE:
2076 mb_len += r->res_length;
2077 break;
2078 case DLM_MSG_CONVERT:
2079 case DLM_MSG_UNLOCK:
2080 case DLM_MSG_REQUEST_REPLY:
2081 case DLM_MSG_CONVERT_REPLY:
2082 case DLM_MSG_GRANT:
2083 if (lkb && lkb->lkb_lvbptr)
2084 mb_len += r->res_ls->ls_lvblen;
2085 break;
2086 }
2087
2088 /* get_buffer gives us a message handle (mh) that we need to
2089 pass into lowcomms_commit and a message buffer (mb) that we
2090 write our data into */
2091
2092 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
2093 if (!mh)
2094 return -ENOBUFS;
2095
2096 memset(mb, 0, mb_len);
2097
2098 ms = (struct dlm_message *) mb;
2099
2100 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2101 ms->m_header.h_lockspace = r->res_ls->ls_global_id;
2102 ms->m_header.h_nodeid = dlm_our_nodeid();
2103 ms->m_header.h_length = mb_len;
2104 ms->m_header.h_cmd = DLM_MSG;
2105
2106 ms->m_type = mstype;
2107
2108 *mh_ret = mh;
2109 *ms_ret = ms;
2110 return 0;
2111}
2112
2113/* further lowcomms enhancements or alternate implementations may make
2114 the return value from this function useful at some point */
2115
2116static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2117{
2118 dlm_message_out(ms);
2119 dlm_lowcomms_commit_buffer(mh);
2120 return 0;
2121}
2122
2123static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2124 struct dlm_message *ms)
2125{
2126 ms->m_nodeid = lkb->lkb_nodeid;
2127 ms->m_pid = lkb->lkb_ownpid;
2128 ms->m_lkid = lkb->lkb_id;
2129 ms->m_remid = lkb->lkb_remid;
2130 ms->m_exflags = lkb->lkb_exflags;
2131 ms->m_sbflags = lkb->lkb_sbflags;
2132 ms->m_flags = lkb->lkb_flags;
2133 ms->m_lvbseq = lkb->lkb_lvbseq;
2134 ms->m_status = lkb->lkb_status;
2135 ms->m_grmode = lkb->lkb_grmode;
2136 ms->m_rqmode = lkb->lkb_rqmode;
2137 ms->m_hash = r->res_hash;
2138
2139 /* m_result and m_bastmode are set from function args,
2140 not from lkb fields */
2141
2142 if (lkb->lkb_bastaddr)
2143 ms->m_asts |= AST_BAST;
2144 if (lkb->lkb_astaddr)
2145 ms->m_asts |= AST_COMP;
2146
2147 if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP)
2148 memcpy(ms->m_extra, r->res_name, r->res_length);
2149
2150 else if (lkb->lkb_lvbptr)
2151 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2152
2153}
2154
2155static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2156{
2157 struct dlm_message *ms;
2158 struct dlm_mhandle *mh;
2159 int to_nodeid, error;
2160
2161 add_to_waiters(lkb, mstype);
2162
2163 to_nodeid = r->res_nodeid;
2164
2165 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2166 if (error)
2167 goto fail;
2168
2169 send_args(r, lkb, ms);
2170
2171 error = send_message(mh, ms);
2172 if (error)
2173 goto fail;
2174 return 0;
2175
2176 fail:
2177 remove_from_waiters(lkb);
2178 return error;
2179}
2180
2181static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2182{
2183 return send_common(r, lkb, DLM_MSG_REQUEST);
2184}
2185
2186static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2187{
2188 int error;
2189
2190 error = send_common(r, lkb, DLM_MSG_CONVERT);
2191
2192 /* down conversions go without a reply from the master */
2193 if (!error && down_conversion(lkb)) {
2194 remove_from_waiters(lkb);
2195 r->res_ls->ls_stub_ms.m_result = 0;
2196 r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
2197 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2198 }
2199
2200 return error;
2201}
2202
2203/* FIXME: if this lkb is the only lock we hold on the rsb, then set
2204 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2205 that the master is still correct. */
2206
2207static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2208{
2209 return send_common(r, lkb, DLM_MSG_UNLOCK);
2210}
2211
2212static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2213{
2214 return send_common(r, lkb, DLM_MSG_CANCEL);
2215}
2216
2217static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2218{
2219 struct dlm_message *ms;
2220 struct dlm_mhandle *mh;
2221 int to_nodeid, error;
2222
2223 to_nodeid = lkb->lkb_nodeid;
2224
2225 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2226 if (error)
2227 goto out;
2228
2229 send_args(r, lkb, ms);
2230
2231 ms->m_result = 0;
2232
2233 error = send_message(mh, ms);
2234 out:
2235 return error;
2236}
2237
2238static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2239{
2240 struct dlm_message *ms;
2241 struct dlm_mhandle *mh;
2242 int to_nodeid, error;
2243
2244 to_nodeid = lkb->lkb_nodeid;
2245
2246 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2247 if (error)
2248 goto out;
2249
2250 send_args(r, lkb, ms);
2251
2252 ms->m_bastmode = mode;
2253
2254 error = send_message(mh, ms);
2255 out:
2256 return error;
2257}
2258
2259static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2260{
2261 struct dlm_message *ms;
2262 struct dlm_mhandle *mh;
2263 int to_nodeid, error;
2264
2265 add_to_waiters(lkb, DLM_MSG_LOOKUP);
2266
2267 to_nodeid = dlm_dir_nodeid(r);
2268
2269 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2270 if (error)
2271 goto fail;
2272
2273 send_args(r, lkb, ms);
2274
2275 error = send_message(mh, ms);
2276 if (error)
2277 goto fail;
2278 return 0;
2279
2280 fail:
2281 remove_from_waiters(lkb);
2282 return error;
2283}
2284
2285static int send_remove(struct dlm_rsb *r)
2286{
2287 struct dlm_message *ms;
2288 struct dlm_mhandle *mh;
2289 int to_nodeid, error;
2290
2291 to_nodeid = dlm_dir_nodeid(r);
2292
2293 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2294 if (error)
2295 goto out;
2296
2297 memcpy(ms->m_extra, r->res_name, r->res_length);
2298 ms->m_hash = r->res_hash;
2299
2300 error = send_message(mh, ms);
2301 out:
2302 return error;
2303}
2304
2305static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2306 int mstype, int rv)
2307{
2308 struct dlm_message *ms;
2309 struct dlm_mhandle *mh;
2310 int to_nodeid, error;
2311
2312 to_nodeid = lkb->lkb_nodeid;
2313
2314 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2315 if (error)
2316 goto out;
2317
2318 send_args(r, lkb, ms);
2319
2320 ms->m_result = rv;
2321
2322 error = send_message(mh, ms);
2323 out:
2324 return error;
2325}
2326
2327static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2328{
2329 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2330}
2331
2332static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2333{
2334 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2335}
2336
2337static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2338{
2339 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2340}
2341
2342static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2343{
2344 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2345}
2346
2347static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2348 int ret_nodeid, int rv)
2349{
2350 struct dlm_rsb *r = &ls->ls_stub_rsb;
2351 struct dlm_message *ms;
2352 struct dlm_mhandle *mh;
2353 int error, nodeid = ms_in->m_header.h_nodeid;
2354
2355 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2356 if (error)
2357 goto out;
2358
2359 ms->m_lkid = ms_in->m_lkid;
2360 ms->m_result = rv;
2361 ms->m_nodeid = ret_nodeid;
2362
2363 error = send_message(mh, ms);
2364 out:
2365 return error;
2366}
2367
2368/* which args we save from a received message depends heavily on the type
2369 of message, unlike the send side where we can safely send everything about
2370 the lkb for any type of message */
2371
2372static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2373{
2374 lkb->lkb_exflags = ms->m_exflags;
2375 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2376 (ms->m_flags & 0x0000FFFF);
2377}
2378
2379static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2380{
2381 lkb->lkb_sbflags = ms->m_sbflags;
2382 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2383 (ms->m_flags & 0x0000FFFF);
2384}
2385
2386static int receive_extralen(struct dlm_message *ms)
2387{
2388 return (ms->m_header.h_length - sizeof(struct dlm_message));
2389}
2390
2391static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2392 struct dlm_message *ms)
2393{
2394 int len;
2395
2396 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2397 if (!lkb->lkb_lvbptr)
2398 lkb->lkb_lvbptr = allocate_lvb(ls);
2399 if (!lkb->lkb_lvbptr)
2400 return -ENOMEM;
2401 len = receive_extralen(ms);
2402 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2403 }
2404 return 0;
2405}
2406
2407static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2408 struct dlm_message *ms)
2409{
2410 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2411 lkb->lkb_ownpid = ms->m_pid;
2412 lkb->lkb_remid = ms->m_lkid;
2413 lkb->lkb_grmode = DLM_LOCK_IV;
2414 lkb->lkb_rqmode = ms->m_rqmode;
2415 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
2416 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
2417
2418 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2419
2420 if (receive_lvb(ls, lkb, ms))
2421 return -ENOMEM;
2422
2423 return 0;
2424}
2425
2426static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2427 struct dlm_message *ms)
2428{
2429 if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
2430 log_error(ls, "convert_args nodeid %d %d lkid %x %x",
2431 lkb->lkb_nodeid, ms->m_header.h_nodeid,
2432 lkb->lkb_id, lkb->lkb_remid);
2433 return -EINVAL;
2434 }
2435
2436 if (!is_master_copy(lkb))
2437 return -EINVAL;
2438
2439 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2440 return -EBUSY;
2441
2442 if (receive_lvb(ls, lkb, ms))
2443 return -ENOMEM;
2444
2445 lkb->lkb_rqmode = ms->m_rqmode;
2446 lkb->lkb_lvbseq = ms->m_lvbseq;
2447
2448 return 0;
2449}
2450
2451static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2452 struct dlm_message *ms)
2453{
2454 if (!is_master_copy(lkb))
2455 return -EINVAL;
2456 if (receive_lvb(ls, lkb, ms))
2457 return -ENOMEM;
2458 return 0;
2459}
2460
2461/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
2462 uses to send a reply and that the remote end uses to process the reply. */
2463
2464static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
2465{
2466 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
2467 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2468 lkb->lkb_remid = ms->m_lkid;
2469}
2470
2471static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
2472{
2473 struct dlm_lkb *lkb;
2474 struct dlm_rsb *r;
2475 int error, namelen;
2476
2477 error = create_lkb(ls, &lkb);
2478 if (error)
2479 goto fail;
2480
2481 receive_flags(lkb, ms);
2482 lkb->lkb_flags |= DLM_IFL_MSTCPY;
2483 error = receive_request_args(ls, lkb, ms);
2484 if (error) {
2485 __put_lkb(ls, lkb);
2486 goto fail;
2487 }
2488
2489 namelen = receive_extralen(ms);
2490
2491 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
2492 if (error) {
2493 __put_lkb(ls, lkb);
2494 goto fail;
2495 }
2496
2497 lock_rsb(r);
2498
2499 attach_lkb(r, lkb);
2500 error = do_request(r, lkb);
2501 send_request_reply(r, lkb, error);
2502
2503 unlock_rsb(r);
2504 put_rsb(r);
2505
2506 if (error == -EINPROGRESS)
2507 error = 0;
2508 if (error)
2509 dlm_put_lkb(lkb);
2510 return;
2511
2512 fail:
2513 setup_stub_lkb(ls, ms);
2514 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2515}
2516
2517static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
2518{
2519 struct dlm_lkb *lkb;
2520 struct dlm_rsb *r;
2521 int error, reply = 1;
2522
2523 error = find_lkb(ls, ms->m_remid, &lkb);
2524 if (error)
2525 goto fail;
2526
2527 r = lkb->lkb_resource;
2528
2529 hold_rsb(r);
2530 lock_rsb(r);
2531
2532 receive_flags(lkb, ms);
2533 error = receive_convert_args(ls, lkb, ms);
2534 if (error)
2535 goto out;
2536 reply = !down_conversion(lkb);
2537
2538 error = do_convert(r, lkb);
2539 out:
2540 if (reply)
2541 send_convert_reply(r, lkb, error);
2542
2543 unlock_rsb(r);
2544 put_rsb(r);
2545 dlm_put_lkb(lkb);
2546 return;
2547
2548 fail:
2549 setup_stub_lkb(ls, ms);
2550 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2551}
2552
2553static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
2554{
2555 struct dlm_lkb *lkb;
2556 struct dlm_rsb *r;
2557 int error;
2558
2559 error = find_lkb(ls, ms->m_remid, &lkb);
2560 if (error)
2561 goto fail;
2562
2563 r = lkb->lkb_resource;
2564
2565 hold_rsb(r);
2566 lock_rsb(r);
2567
2568 receive_flags(lkb, ms);
2569 error = receive_unlock_args(ls, lkb, ms);
2570 if (error)
2571 goto out;
2572
2573 error = do_unlock(r, lkb);
2574 out:
2575 send_unlock_reply(r, lkb, error);
2576
2577 unlock_rsb(r);
2578 put_rsb(r);
2579 dlm_put_lkb(lkb);
2580 return;
2581
2582 fail:
2583 setup_stub_lkb(ls, ms);
2584 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2585}
2586
2587static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
2588{
2589 struct dlm_lkb *lkb;
2590 struct dlm_rsb *r;
2591 int error;
2592
2593 error = find_lkb(ls, ms->m_remid, &lkb);
2594 if (error)
2595 goto fail;
2596
2597 receive_flags(lkb, ms);
2598
2599 r = lkb->lkb_resource;
2600
2601 hold_rsb(r);
2602 lock_rsb(r);
2603
2604 error = do_cancel(r, lkb);
2605 send_cancel_reply(r, lkb, error);
2606
2607 unlock_rsb(r);
2608 put_rsb(r);
2609 dlm_put_lkb(lkb);
2610 return;
2611
2612 fail:
2613 setup_stub_lkb(ls, ms);
2614 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2615}
2616
2617static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
2618{
2619 struct dlm_lkb *lkb;
2620 struct dlm_rsb *r;
2621 int error;
2622
2623 error = find_lkb(ls, ms->m_remid, &lkb);
2624 if (error) {
2625 log_error(ls, "receive_grant no lkb");
2626 return;
2627 }
2628 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2629
2630 r = lkb->lkb_resource;
2631
2632 hold_rsb(r);
2633 lock_rsb(r);
2634
2635 receive_flags_reply(lkb, ms);
2636 grant_lock_pc(r, lkb, ms);
2637 queue_cast(r, lkb, 0);
2638
2639 unlock_rsb(r);
2640 put_rsb(r);
2641 dlm_put_lkb(lkb);
2642}
2643
2644static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
2645{
2646 struct dlm_lkb *lkb;
2647 struct dlm_rsb *r;
2648 int error;
2649
2650 error = find_lkb(ls, ms->m_remid, &lkb);
2651 if (error) {
2652 log_error(ls, "receive_bast no lkb");
2653 return;
2654 }
2655 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2656
2657 r = lkb->lkb_resource;
2658
2659 hold_rsb(r);
2660 lock_rsb(r);
2661
2662 queue_bast(r, lkb, ms->m_bastmode);
2663
2664 unlock_rsb(r);
2665 put_rsb(r);
2666 dlm_put_lkb(lkb);
2667}
2668
2669static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
2670{
2671 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
2672
2673 from_nodeid = ms->m_header.h_nodeid;
2674 our_nodeid = dlm_our_nodeid();
2675
2676 len = receive_extralen(ms);
2677
2678 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2679 if (dir_nodeid != our_nodeid) {
2680 log_error(ls, "lookup dir_nodeid %d from %d",
2681 dir_nodeid, from_nodeid);
2682 error = -EINVAL;
2683 ret_nodeid = -1;
2684 goto out;
2685 }
2686
2687 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
2688
2689 /* Optimization: we're master so treat lookup as a request */
2690 if (!error && ret_nodeid == our_nodeid) {
2691 receive_request(ls, ms);
2692 return;
2693 }
2694 out:
2695 send_lookup_reply(ls, ms, ret_nodeid, error);
2696}
2697
2698static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
2699{
2700 int len, dir_nodeid, from_nodeid;
2701
2702 from_nodeid = ms->m_header.h_nodeid;
2703
2704 len = receive_extralen(ms);
2705
2706 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2707 if (dir_nodeid != dlm_our_nodeid()) {
2708 log_error(ls, "remove dir entry dir_nodeid %d from %d",
2709 dir_nodeid, from_nodeid);
2710 return;
2711 }
2712
2713 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
2714}
2715
2716static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
2717{
2718 struct dlm_lkb *lkb;
2719 struct dlm_rsb *r;
2720 int error, mstype;
2721
2722 error = find_lkb(ls, ms->m_remid, &lkb);
2723 if (error) {
2724 log_error(ls, "receive_request_reply no lkb");
2725 return;
2726 }
2727 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2728
2729 mstype = lkb->lkb_wait_type;
2730 error = remove_from_waiters(lkb);
2731 if (error) {
2732 log_error(ls, "receive_request_reply not on waiters");
2733 goto out;
2734 }
2735
2736 /* this is the value returned from do_request() on the master */
2737 error = ms->m_result;
2738
2739 r = lkb->lkb_resource;
2740 hold_rsb(r);
2741 lock_rsb(r);
2742
2743 /* Optimization: the dir node was also the master, so it took our
2744 lookup as a request and sent request reply instead of lookup reply */
2745 if (mstype == DLM_MSG_LOOKUP) {
2746 r->res_nodeid = ms->m_header.h_nodeid;
2747 lkb->lkb_nodeid = r->res_nodeid;
2748 }
2749
2750 switch (error) {
2751 case -EAGAIN:
2752 /* request would block (be queued) on remote master;
2753 the unhold undoes the original ref from create_lkb()
2754 so it leads to the lkb being freed */
2755 queue_cast(r, lkb, -EAGAIN);
2756 confirm_master(r, -EAGAIN);
2757 unhold_lkb(lkb);
2758 break;
2759
2760 case -EINPROGRESS:
2761 case 0:
2762 /* request was queued or granted on remote master */
2763 receive_flags_reply(lkb, ms);
2764 lkb->lkb_remid = ms->m_lkid;
2765 if (error)
2766 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2767 else {
2768 grant_lock_pc(r, lkb, ms);
2769 queue_cast(r, lkb, 0);
2770 }
2771 confirm_master(r, error);
2772 break;
2773
2774 case -EBADR:
2775 case -ENOTBLK:
2776 /* find_rsb failed to find rsb or rsb wasn't master */
2777 r->res_nodeid = -1;
2778 lkb->lkb_nodeid = -1;
2779 _request_lock(r, lkb);
2780 break;
2781
2782 default:
2783 log_error(ls, "receive_request_reply error %d", error);
2784 }
2785
2786 unlock_rsb(r);
2787 put_rsb(r);
2788 out:
2789 dlm_put_lkb(lkb);
2790}
2791
2792static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2793 struct dlm_message *ms)
2794{
2795 int error = ms->m_result;
2796
2797 /* this is the value returned from do_convert() on the master */
2798
2799 switch (error) {
2800 case -EAGAIN:
2801 /* convert would block (be queued) on remote master */
2802 queue_cast(r, lkb, -EAGAIN);
2803 break;
2804
2805 case -EINPROGRESS:
2806 /* convert was queued on remote master */
2807 del_lkb(r, lkb);
2808 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2809 break;
2810
2811 case 0:
2812 /* convert was granted on remote master */
2813 receive_flags_reply(lkb, ms);
2814 grant_lock_pc(r, lkb, ms);
2815 queue_cast(r, lkb, 0);
2816 break;
2817
2818 default:
2819 log_error(r->res_ls, "receive_convert_reply error %d", error);
2820 }
2821}
2822
2823static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2824{
2825 struct dlm_rsb *r = lkb->lkb_resource;
2826
2827 hold_rsb(r);
2828 lock_rsb(r);
2829
2830 __receive_convert_reply(r, lkb, ms);
2831
2832 unlock_rsb(r);
2833 put_rsb(r);
2834}
2835
2836static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
2837{
2838 struct dlm_lkb *lkb;
2839 int error;
2840
2841 error = find_lkb(ls, ms->m_remid, &lkb);
2842 if (error) {
2843 log_error(ls, "receive_convert_reply no lkb");
2844 return;
2845 }
2846 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2847
2848 error = remove_from_waiters(lkb);
2849 if (error) {
2850 log_error(ls, "receive_convert_reply not on waiters");
2851 goto out;
2852 }
2853
2854 _receive_convert_reply(lkb, ms);
2855 out:
2856 dlm_put_lkb(lkb);
2857}
2858
2859static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2860{
2861 struct dlm_rsb *r = lkb->lkb_resource;
2862 int error = ms->m_result;
2863
2864 hold_rsb(r);
2865 lock_rsb(r);
2866
2867 /* this is the value returned from do_unlock() on the master */
2868
2869 switch (error) {
2870 case -DLM_EUNLOCK:
2871 receive_flags_reply(lkb, ms);
2872 remove_lock_pc(r, lkb);
2873 queue_cast(r, lkb, -DLM_EUNLOCK);
2874 break;
2875 default:
2876 log_error(r->res_ls, "receive_unlock_reply error %d", error);
2877 }
2878
2879 unlock_rsb(r);
2880 put_rsb(r);
2881}
2882
2883static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
2884{
2885 struct dlm_lkb *lkb;
2886 int error;
2887
2888 error = find_lkb(ls, ms->m_remid, &lkb);
2889 if (error) {
2890 log_error(ls, "receive_unlock_reply no lkb");
2891 return;
2892 }
2893 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2894
2895 error = remove_from_waiters(lkb);
2896 if (error) {
2897 log_error(ls, "receive_unlock_reply not on waiters");
2898 goto out;
2899 }
2900
2901 _receive_unlock_reply(lkb, ms);
2902 out:
2903 dlm_put_lkb(lkb);
2904}
2905
2906static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2907{
2908 struct dlm_rsb *r = lkb->lkb_resource;
2909 int error = ms->m_result;
2910
2911 hold_rsb(r);
2912 lock_rsb(r);
2913
2914 /* this is the value returned from do_cancel() on the master */
2915
2916 switch (error) {
2917 case -DLM_ECANCEL:
2918 receive_flags_reply(lkb, ms);
2919 revert_lock_pc(r, lkb);
2920 queue_cast(r, lkb, -DLM_ECANCEL);
2921 break;
2922 default:
2923 log_error(r->res_ls, "receive_cancel_reply error %d", error);
2924 }
2925
2926 unlock_rsb(r);
2927 put_rsb(r);
2928}
2929
2930static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
2931{
2932 struct dlm_lkb *lkb;
2933 int error;
2934
2935 error = find_lkb(ls, ms->m_remid, &lkb);
2936 if (error) {
2937 log_error(ls, "receive_cancel_reply no lkb");
2938 return;
2939 }
2940 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2941
2942 error = remove_from_waiters(lkb);
2943 if (error) {
2944 log_error(ls, "receive_cancel_reply not on waiters");
2945 goto out;
2946 }
2947
2948 _receive_cancel_reply(lkb, ms);
2949 out:
2950 dlm_put_lkb(lkb);
2951}
2952
2953static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
2954{
2955 struct dlm_lkb *lkb;
2956 struct dlm_rsb *r;
2957 int error, ret_nodeid;
2958
2959 error = find_lkb(ls, ms->m_lkid, &lkb);
2960 if (error) {
2961 log_error(ls, "receive_lookup_reply no lkb");
2962 return;
2963 }
2964
2965 error = remove_from_waiters(lkb);
2966 if (error) {
2967 log_error(ls, "receive_lookup_reply not on waiters");
2968 goto out;
2969 }
2970
2971 /* this is the value returned by dlm_dir_lookup on dir node
2972 FIXME: will a non-zero error ever be returned? */
2973 error = ms->m_result;
2974
2975 r = lkb->lkb_resource;
2976 hold_rsb(r);
2977 lock_rsb(r);
2978
2979 ret_nodeid = ms->m_nodeid;
2980 if (ret_nodeid == dlm_our_nodeid()) {
2981 r->res_nodeid = 0;
2982 ret_nodeid = 0;
2983 r->res_first_lkid = 0;
2984 } else {
2985 /* set_master() will copy res_nodeid to lkb_nodeid */
2986 r->res_nodeid = ret_nodeid;
2987 }
2988
2989 _request_lock(r, lkb);
2990
2991 if (!ret_nodeid)
2992 process_lookup_list(r);
2993
2994 unlock_rsb(r);
2995 put_rsb(r);
2996 out:
2997 dlm_put_lkb(lkb);
2998}
2999
3000int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
3001{
3002 struct dlm_message *ms = (struct dlm_message *) hd;
3003 struct dlm_ls *ls;
3004 int error;
3005
3006 if (!recovery)
3007 dlm_message_in(ms);
3008
3009 ls = dlm_find_lockspace_global(hd->h_lockspace);
3010 if (!ls) {
3011 log_print("drop message %d from %d for unknown lockspace %d",
3012 ms->m_type, nodeid, hd->h_lockspace);
3013 return -EINVAL;
3014 }
3015
3016 /* recovery may have just ended leaving a bunch of backed-up requests
3017 in the requestqueue; wait while dlm_recoverd clears them */
3018
3019 if (!recovery)
3020 dlm_wait_requestqueue(ls);
3021
3022 /* recovery may have just started while there were a bunch of
3023 in-flight requests -- save them in requestqueue to be processed
3024 after recovery. we can't let dlm_recvd block on the recovery
3025 lock. if dlm_recoverd is calling this function to clear the
3026 requestqueue, it needs to be interrupted (-EINTR) if another
3027 recovery operation is starting. */
3028
3029 while (1) {
3030 if (dlm_locking_stopped(ls)) {
3031 if (!recovery)
3032 dlm_add_requestqueue(ls, nodeid, hd);
3033 error = -EINTR;
3034 goto out;
3035 }
3036
3037 if (lock_recovery_try(ls))
3038 break;
3039 schedule();
3040 }
3041
3042 switch (ms->m_type) {
3043
3044 /* messages sent to a master node */
3045
3046 case DLM_MSG_REQUEST:
3047 receive_request(ls, ms);
3048 break;
3049
3050 case DLM_MSG_CONVERT:
3051 receive_convert(ls, ms);
3052 break;
3053
3054 case DLM_MSG_UNLOCK:
3055 receive_unlock(ls, ms);
3056 break;
3057
3058 case DLM_MSG_CANCEL:
3059 receive_cancel(ls, ms);
3060 break;
3061
3062 /* messages sent from a master node (replies to above) */
3063
3064 case DLM_MSG_REQUEST_REPLY:
3065 receive_request_reply(ls, ms);
3066 break;
3067
3068 case DLM_MSG_CONVERT_REPLY:
3069 receive_convert_reply(ls, ms);
3070 break;
3071
3072 case DLM_MSG_UNLOCK_REPLY:
3073 receive_unlock_reply(ls, ms);
3074 break;
3075
3076 case DLM_MSG_CANCEL_REPLY:
3077 receive_cancel_reply(ls, ms);
3078 break;
3079
3080 /* messages sent from a master node (only two types of async msg) */
3081
3082 case DLM_MSG_GRANT:
3083 receive_grant(ls, ms);
3084 break;
3085
3086 case DLM_MSG_BAST:
3087 receive_bast(ls, ms);
3088 break;
3089
3090 /* messages sent to a dir node */
3091
3092 case DLM_MSG_LOOKUP:
3093 receive_lookup(ls, ms);
3094 break;
3095
3096 case DLM_MSG_REMOVE:
3097 receive_remove(ls, ms);
3098 break;
3099
3100 /* messages sent from a dir node (remove has no reply) */
3101
3102 case DLM_MSG_LOOKUP_REPLY:
3103 receive_lookup_reply(ls, ms);
3104 break;
3105
3106 default:
3107 log_error(ls, "unknown message type %d", ms->m_type);
3108 }
3109
3110 unlock_recovery(ls);
3111 out:
3112 dlm_put_lockspace(ls);
3113 dlm_astd_wake();
3114 return 0;
3115}
3116
3117
3118/*
3119 * Recovery related
3120 */
3121
3122static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3123{
3124 if (middle_conversion(lkb)) {
3125 hold_lkb(lkb);
3126 ls->ls_stub_ms.m_result = -EINPROGRESS;
3127 _remove_from_waiters(lkb);
3128 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3129
3130 /* Same special case as in receive_rcom_lock_args() */
3131 lkb->lkb_grmode = DLM_LOCK_IV;
3132 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3133 unhold_lkb(lkb);
3134
3135 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3136 lkb->lkb_flags |= DLM_IFL_RESEND;
3137 }
3138
3139 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3140 conversions are async; there's no reply from the remote master */
3141}
3142
3143/* A waiting lkb needs recovery if the master node has failed, or
3144 the master node is changing (only when no directory is used) */
3145
3146static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3147{
3148 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3149 return 1;
3150
3151 if (!dlm_no_directory(ls))
3152 return 0;
3153
3154 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3155 return 1;
3156
3157 return 0;
3158}
3159
3160/* Recovery for locks that are waiting for replies from nodes that are now
3161 gone. We can just complete unlocks and cancels by faking a reply from the
3162 dead node. Requests and up-conversions we flag to be resent after
3163 recovery. Down-conversions can just be completed with a fake reply like
3164 unlocks. Conversions between PR and CW need special attention. */
3165
3166void dlm_recover_waiters_pre(struct dlm_ls *ls)
3167{
3168 struct dlm_lkb *lkb, *safe;
3169
3170 mutex_lock(&ls->ls_waiters_mutex);
3171
3172 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3173 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3174 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3175
3176 /* all outstanding lookups, regardless of destination will be
3177 resent after recovery is done */
3178
3179 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3180 lkb->lkb_flags |= DLM_IFL_RESEND;
3181 continue;
3182 }
3183
3184 if (!waiter_needs_recovery(ls, lkb))
3185 continue;
3186
3187 switch (lkb->lkb_wait_type) {
3188
3189 case DLM_MSG_REQUEST:
3190 lkb->lkb_flags |= DLM_IFL_RESEND;
3191 break;
3192
3193 case DLM_MSG_CONVERT:
3194 recover_convert_waiter(ls, lkb);
3195 break;
3196
3197 case DLM_MSG_UNLOCK:
3198 hold_lkb(lkb);
3199 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3200 _remove_from_waiters(lkb);
3201 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3202 dlm_put_lkb(lkb);
3203 break;
3204
3205 case DLM_MSG_CANCEL:
3206 hold_lkb(lkb);
3207 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3208 _remove_from_waiters(lkb);
3209 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3210 dlm_put_lkb(lkb);
3211 break;
3212
3213 default:
3214 log_error(ls, "invalid lkb wait_type %d",
3215 lkb->lkb_wait_type);
3216 }
3217 schedule();
3218 }
3219 mutex_unlock(&ls->ls_waiters_mutex);
3220}
3221
3222static int remove_resend_waiter(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
3223{
3224 struct dlm_lkb *lkb;
3225 int rv = 0;
3226
3227 mutex_lock(&ls->ls_waiters_mutex);
3228 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3229 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3230 rv = lkb->lkb_wait_type;
3231 _remove_from_waiters(lkb);
3232 lkb->lkb_flags &= ~DLM_IFL_RESEND;
3233 break;
3234 }
3235 }
3236 mutex_unlock(&ls->ls_waiters_mutex);
3237
3238 if (!rv)
3239 lkb = NULL;
3240 *lkb_ret = lkb;
3241 return rv;
3242}
3243
3244/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
3245 master or dir-node for r. Processing the lkb may result in it being placed
3246 back on waiters. */
3247
3248int dlm_recover_waiters_post(struct dlm_ls *ls)
3249{
3250 struct dlm_lkb *lkb;
3251 struct dlm_rsb *r;
3252 int error = 0, mstype;
3253
3254 while (1) {
3255 if (dlm_locking_stopped(ls)) {
3256 log_debug(ls, "recover_waiters_post aborted");
3257 error = -EINTR;
3258 break;
3259 }
3260
3261 mstype = remove_resend_waiter(ls, &lkb);
3262 if (!mstype)
3263 break;
3264
3265 r = lkb->lkb_resource;
3266
3267 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
3268 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
3269
3270 switch (mstype) {
3271
3272 case DLM_MSG_LOOKUP:
3273 hold_rsb(r);
3274 lock_rsb(r);
3275 _request_lock(r, lkb);
3276 if (is_master(r))
3277 confirm_master(r, 0);
3278 unlock_rsb(r);
3279 put_rsb(r);
3280 break;
3281
3282 case DLM_MSG_REQUEST:
3283 hold_rsb(r);
3284 lock_rsb(r);
3285 _request_lock(r, lkb);
3286 if (is_master(r))
3287 confirm_master(r, 0);
3288 unlock_rsb(r);
3289 put_rsb(r);
3290 break;
3291
3292 case DLM_MSG_CONVERT:
3293 hold_rsb(r);
3294 lock_rsb(r);
3295 _convert_lock(r, lkb);
3296 unlock_rsb(r);
3297 put_rsb(r);
3298 break;
3299
3300 default:
3301 log_error(ls, "recover_waiters_post type %d", mstype);
3302 }
3303 }
3304
3305 return error;
3306}
3307
3308static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
3309 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
3310{
3311 struct dlm_ls *ls = r->res_ls;
3312 struct dlm_lkb *lkb, *safe;
3313
3314 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
3315 if (test(ls, lkb)) {
3316 rsb_set_flag(r, RSB_LOCKS_PURGED);
3317 del_lkb(r, lkb);
3318 /* this put should free the lkb */
3319 if (!dlm_put_lkb(lkb))
3320 log_error(ls, "purged lkb not released");
3321 }
3322 }
3323}
3324
3325static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3326{
3327 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
3328}
3329
3330static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3331{
3332 return is_master_copy(lkb);
3333}
3334
3335static void purge_dead_locks(struct dlm_rsb *r)
3336{
3337 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
3338 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
3339 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
3340}
3341
3342void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
3343{
3344 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
3345 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
3346 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
3347}
3348
3349/* Get rid of locks held by nodes that are gone. */
3350
3351int dlm_purge_locks(struct dlm_ls *ls)
3352{
3353 struct dlm_rsb *r;
3354
3355 log_debug(ls, "dlm_purge_locks");
3356
3357 down_write(&ls->ls_root_sem);
3358 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
3359 hold_rsb(r);
3360 lock_rsb(r);
3361 if (is_master(r))
3362 purge_dead_locks(r);
3363 unlock_rsb(r);
3364 unhold_rsb(r);
3365
3366 schedule();
3367 }
3368 up_write(&ls->ls_root_sem);
3369
3370 return 0;
3371}
3372
3373static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
3374{
3375 struct dlm_rsb *r, *r_ret = NULL;
3376
3377 read_lock(&ls->ls_rsbtbl[bucket].lock);
3378 list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
3379 if (!rsb_flag(r, RSB_LOCKS_PURGED))
3380 continue;
3381 hold_rsb(r);
3382 rsb_clear_flag(r, RSB_LOCKS_PURGED);
3383 r_ret = r;
3384 break;
3385 }
3386 read_unlock(&ls->ls_rsbtbl[bucket].lock);
3387 return r_ret;
3388}
3389
3390void dlm_grant_after_purge(struct dlm_ls *ls)
3391{
3392 struct dlm_rsb *r;
3393 int bucket = 0;
3394
3395 while (1) {
3396 r = find_purged_rsb(ls, bucket);
3397 if (!r) {
3398 if (bucket == ls->ls_rsbtbl_size - 1)
3399 break;
3400 bucket++;
3401 continue;
3402 }
3403 lock_rsb(r);
3404 if (is_master(r)) {
3405 grant_pending_locks(r);
3406 confirm_master(r, 0);
3407 }
3408 unlock_rsb(r);
3409 put_rsb(r);
3410 schedule();
3411 }
3412}
3413
3414static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
3415 uint32_t remid)
3416{
3417 struct dlm_lkb *lkb;
3418
3419 list_for_each_entry(lkb, head, lkb_statequeue) {
3420 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
3421 return lkb;
3422 }
3423 return NULL;
3424}
3425
3426static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
3427 uint32_t remid)
3428{
3429 struct dlm_lkb *lkb;
3430
3431 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
3432 if (lkb)
3433 return lkb;
3434 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
3435 if (lkb)
3436 return lkb;
3437 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
3438 if (lkb)
3439 return lkb;
3440 return NULL;
3441}
3442
3443static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3444 struct dlm_rsb *r, struct dlm_rcom *rc)
3445{
3446 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3447 int lvblen;
3448
3449 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
3450 lkb->lkb_ownpid = rl->rl_ownpid;
3451 lkb->lkb_remid = rl->rl_lkid;
3452 lkb->lkb_exflags = rl->rl_exflags;
3453 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
3454 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3455 lkb->lkb_lvbseq = rl->rl_lvbseq;
3456 lkb->lkb_rqmode = rl->rl_rqmode;
3457 lkb->lkb_grmode = rl->rl_grmode;
3458 /* don't set lkb_status because add_lkb wants to itself */
3459
3460 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
3461 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
3462
3463 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3464 lkb->lkb_lvbptr = allocate_lvb(ls);
3465 if (!lkb->lkb_lvbptr)
3466 return -ENOMEM;
3467 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
3468 sizeof(struct rcom_lock);
3469 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
3470 }
3471
3472 /* Conversions between PR and CW (middle modes) need special handling.
3473 The real granted mode of these converting locks cannot be determined
3474 until all locks have been rebuilt on the rsb (recover_conversion) */
3475
3476 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
3477 rl->rl_status = DLM_LKSTS_CONVERT;
3478 lkb->lkb_grmode = DLM_LOCK_IV;
3479 rsb_set_flag(r, RSB_RECOVER_CONVERT);
3480 }
3481
3482 return 0;
3483}
3484
3485/* This lkb may have been recovered in a previous aborted recovery so we need
3486 to check if the rsb already has an lkb with the given remote nodeid/lkid.
3487 If so we just send back a standard reply. If not, we create a new lkb with
3488 the given values and send back our lkid. We send back our lkid by sending
3489 back the rcom_lock struct we got but with the remid field filled in. */
3490
3491int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3492{
3493 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3494 struct dlm_rsb *r;
3495 struct dlm_lkb *lkb;
3496 int error;
3497
3498 if (rl->rl_parent_lkid) {
3499 error = -EOPNOTSUPP;
3500 goto out;
3501 }
3502
3503 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
3504 if (error)
3505 goto out;
3506
3507 lock_rsb(r);
3508
3509 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
3510 if (lkb) {
3511 error = -EEXIST;
3512 goto out_remid;
3513 }
3514
3515 error = create_lkb(ls, &lkb);
3516 if (error)
3517 goto out_unlock;
3518
3519 error = receive_rcom_lock_args(ls, lkb, r, rc);
3520 if (error) {
3521 __put_lkb(ls, lkb);
3522 goto out_unlock;
3523 }
3524
3525 attach_lkb(r, lkb);
3526 add_lkb(r, lkb, rl->rl_status);
3527 error = 0;
3528
3529 out_remid:
3530 /* this is the new value returned to the lock holder for
3531 saving in its process-copy lkb */
3532 rl->rl_remid = lkb->lkb_id;
3533
3534 out_unlock:
3535 unlock_rsb(r);
3536 put_rsb(r);
3537 out:
3538 if (error)
3539 log_print("recover_master_copy %d %x", error, rl->rl_lkid);
3540 rl->rl_result = error;
3541 return error;
3542}
3543
3544int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3545{
3546 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3547 struct dlm_rsb *r;
3548 struct dlm_lkb *lkb;
3549 int error;
3550
3551 error = find_lkb(ls, rl->rl_lkid, &lkb);
3552 if (error) {
3553 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
3554 return error;
3555 }
3556
3557 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3558
3559 error = rl->rl_result;
3560
3561 r = lkb->lkb_resource;
3562 hold_rsb(r);
3563 lock_rsb(r);
3564
3565 switch (error) {
3566 case -EEXIST:
3567 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3568 /* fall through */
3569 case 0:
3570 lkb->lkb_remid = rl->rl_remid;
3571 break;
3572 default:
3573 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
3574 error, lkb->lkb_id);
3575 }
3576
3577 /* an ack for dlm_recover_locks() which waits for replies from
3578 all the locks it sends to new masters */
3579 dlm_recovered_lock(r);
3580
3581 unlock_rsb(r);
3582 put_rsb(r);
3583 dlm_put_lkb(lkb);
3584
3585 return 0;
3586}
3587
3588int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3589 int mode, uint32_t flags, void *name, unsigned int namelen,
3590 uint32_t parent_lkid)
3591{
3592 struct dlm_lkb *lkb;
3593 struct dlm_args args;
3594 int error;
3595
3596 lock_recovery(ls);
3597
3598 error = create_lkb(ls, &lkb);
3599 if (error) {
3600 kfree(ua);
3601 goto out;
3602 }
3603
3604 if (flags & DLM_LKF_VALBLK) {
3605 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3606 if (!ua->lksb.sb_lvbptr) {
3607 kfree(ua);
3608 __put_lkb(ls, lkb);
3609 error = -ENOMEM;
3610 goto out;
3611 }
3612 }
3613
3614 /* After ua is attached to lkb it will be freed by free_lkb().
3615 When DLM_IFL_USER is set, the dlm knows that this is a userspace
3616 lock and that lkb_astparam is the dlm_user_args structure. */
3617
3618 error = set_lock_args(mode, &ua->lksb, flags, namelen, parent_lkid,
3619 DLM_FAKE_USER_AST, ua, DLM_FAKE_USER_AST, &args);
3620 lkb->lkb_flags |= DLM_IFL_USER;
3621 ua->old_mode = DLM_LOCK_IV;
3622
3623 if (error) {
3624 __put_lkb(ls, lkb);
3625 goto out;
3626 }
3627
3628 error = request_lock(ls, lkb, name, namelen, &args);
3629
3630 switch (error) {
3631 case 0:
3632 break;
3633 case -EINPROGRESS:
3634 error = 0;
3635 break;
3636 case -EAGAIN:
3637 error = 0;
3638 /* fall through */
3639 default:
3640 __put_lkb(ls, lkb);
3641 goto out;
3642 }
3643
3644 /* add this new lkb to the per-process list of locks */
3645 spin_lock(&ua->proc->locks_spin);
3646 kref_get(&lkb->lkb_ref);
3647 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
3648 spin_unlock(&ua->proc->locks_spin);
3649 out:
3650 unlock_recovery(ls);
3651 return error;
3652}
3653
3654int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3655 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
3656{
3657 struct dlm_lkb *lkb;
3658 struct dlm_args args;
3659 struct dlm_user_args *ua;
3660 int error;
3661
3662 lock_recovery(ls);
3663
3664 error = find_lkb(ls, lkid, &lkb);
3665 if (error)
3666 goto out;
3667
3668 /* user can change the params on its lock when it converts it, or
3669 add an lvb that didn't exist before */
3670
3671 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3672
3673 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
3674 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3675 if (!ua->lksb.sb_lvbptr) {
3676 error = -ENOMEM;
3677 goto out_put;
3678 }
3679 }
3680 if (lvb_in && ua->lksb.sb_lvbptr)
3681 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
3682
3683 ua->castparam = ua_tmp->castparam;
3684 ua->castaddr = ua_tmp->castaddr;
3685 ua->bastparam = ua_tmp->bastparam;
3686 ua->bastaddr = ua_tmp->bastaddr;
3687 ua->user_lksb = ua_tmp->user_lksb;
3688 ua->old_mode = lkb->lkb_grmode;
3689
3690 error = set_lock_args(mode, &ua->lksb, flags, 0, 0, DLM_FAKE_USER_AST,
3691 ua, DLM_FAKE_USER_AST, &args);
3692 if (error)
3693 goto out_put;
3694
3695 error = convert_lock(ls, lkb, &args);
3696
3697 if (error == -EINPROGRESS || error == -EAGAIN)
3698 error = 0;
3699 out_put:
3700 dlm_put_lkb(lkb);
3701 out:
3702 unlock_recovery(ls);
3703 kfree(ua_tmp);
3704 return error;
3705}
3706
3707int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3708 uint32_t flags, uint32_t lkid, char *lvb_in)
3709{
3710 struct dlm_lkb *lkb;
3711 struct dlm_args args;
3712 struct dlm_user_args *ua;
3713 int error;
3714
3715 lock_recovery(ls);
3716
3717 error = find_lkb(ls, lkid, &lkb);
3718 if (error)
3719 goto out;
3720
3721 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3722
3723 if (lvb_in && ua->lksb.sb_lvbptr)
3724 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
3725 ua->castparam = ua_tmp->castparam;
3726 ua->user_lksb = ua_tmp->user_lksb;
3727
3728 error = set_unlock_args(flags, ua, &args);
3729 if (error)
3730 goto out_put;
3731
3732 error = unlock_lock(ls, lkb, &args);
3733
3734 if (error == -DLM_EUNLOCK)
3735 error = 0;
3736 if (error)
3737 goto out_put;
3738
3739 spin_lock(&ua->proc->locks_spin);
3740 list_del_init(&lkb->lkb_ownqueue);
3741 spin_unlock(&ua->proc->locks_spin);
3742
3743 /* this removes the reference for the proc->locks list added by
3744 dlm_user_request */
3745 unhold_lkb(lkb);
3746 out_put:
3747 dlm_put_lkb(lkb);
3748 out:
3749 unlock_recovery(ls);
3750 return error;
3751}
3752
3753int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3754 uint32_t flags, uint32_t lkid)
3755{
3756 struct dlm_lkb *lkb;
3757 struct dlm_args args;
3758 struct dlm_user_args *ua;
3759 int error;
3760
3761 lock_recovery(ls);
3762
3763 error = find_lkb(ls, lkid, &lkb);
3764 if (error)
3765 goto out;
3766
3767 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3768 ua->castparam = ua_tmp->castparam;
3769 ua->user_lksb = ua_tmp->user_lksb;
3770
3771 error = set_unlock_args(flags, ua, &args);
3772 if (error)
3773 goto out_put;
3774
3775 error = cancel_lock(ls, lkb, &args);
3776
3777 if (error == -DLM_ECANCEL)
3778 error = 0;
3779 if (error)
3780 goto out_put;
3781
3782 /* this lkb was removed from the WAITING queue */
3783 if (lkb->lkb_grmode == DLM_LOCK_IV) {
3784 spin_lock(&ua->proc->locks_spin);
3785 list_del_init(&lkb->lkb_ownqueue);
3786 spin_unlock(&ua->proc->locks_spin);
3787 unhold_lkb(lkb);
3788 }
3789 out_put:
3790 dlm_put_lkb(lkb);
3791 out:
3792 unlock_recovery(ls);
3793 return error;
3794}
3795
3796static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
3797{
3798 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
3799
3800 if (ua->lksb.sb_lvbptr)
3801 kfree(ua->lksb.sb_lvbptr);
3802 kfree(ua);
3803 lkb->lkb_astparam = (long)NULL;
3804
3805 /* TODO: propogate to master if needed */
3806 return 0;
3807}
3808
3809/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
3810 Regardless of what rsb queue the lock is on, it's removed and freed. */
3811
3812static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
3813{
3814 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
3815 struct dlm_args args;
3816 int error;
3817
3818 /* FIXME: we need to handle the case where the lkb is in limbo
3819 while the rsb is being looked up, currently we assert in
3820 _unlock_lock/is_remote because rsb nodeid is -1. */
3821
3822 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
3823
3824 error = unlock_lock(ls, lkb, &args);
3825 if (error == -DLM_EUNLOCK)
3826 error = 0;
3827 return error;
3828}
3829
3830/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
3831 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
3832 which we clear here. */
3833
3834/* proc CLOSING flag is set so no more device_reads should look at proc->asts
3835 list, and no more device_writes should add lkb's to proc->locks list; so we
3836 shouldn't need to take asts_spin or locks_spin here. this assumes that
3837 device reads/writes/closes are serialized -- FIXME: we may need to serialize
3838 them ourself. */
3839
3840void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3841{
3842 struct dlm_lkb *lkb, *safe;
3843
3844 lock_recovery(ls);
3845 mutex_lock(&ls->ls_clear_proc_locks);
3846
3847 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
3848 if (lkb->lkb_ast_type) {
3849 list_del(&lkb->lkb_astqueue);
3850 unhold_lkb(lkb);
3851 }
3852
3853 list_del_init(&lkb->lkb_ownqueue);
3854
3855 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
3856 lkb->lkb_flags |= DLM_IFL_ORPHAN;
3857 orphan_proc_lock(ls, lkb);
3858 } else {
3859 lkb->lkb_flags |= DLM_IFL_DEAD;
3860 unlock_proc_lock(ls, lkb);
3861 }
3862
3863 /* this removes the reference for the proc->locks list
3864 added by dlm_user_request, it may result in the lkb
3865 being freed */
3866
3867 dlm_put_lkb(lkb);
3868 }
3869 mutex_unlock(&ls->ls_clear_proc_locks);
3870 unlock_recovery(ls);
3871}
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
new file mode 100644
index 000000000000..0843a3073ec3
--- /dev/null
+++ b/fs/dlm/lock.h
@@ -0,0 +1,62 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __LOCK_DOT_H__
14#define __LOCK_DOT_H__
15
16void dlm_print_rsb(struct dlm_rsb *r);
17void dlm_dump_rsb(struct dlm_rsb *r);
18void dlm_print_lkb(struct dlm_lkb *lkb);
19int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
20int dlm_modes_compat(int mode1, int mode2);
21int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
22 unsigned int flags, struct dlm_rsb **r_ret);
23void dlm_put_rsb(struct dlm_rsb *r);
24void dlm_hold_rsb(struct dlm_rsb *r);
25int dlm_put_lkb(struct dlm_lkb *lkb);
26void dlm_scan_rsbs(struct dlm_ls *ls);
27
28int dlm_purge_locks(struct dlm_ls *ls);
29void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
30void dlm_grant_after_purge(struct dlm_ls *ls);
31int dlm_recover_waiters_post(struct dlm_ls *ls);
32void dlm_recover_waiters_pre(struct dlm_ls *ls);
33int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
34int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
35
36int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
37 uint32_t flags, void *name, unsigned int namelen, uint32_t parent_lkid);
38int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
39 int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
40int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
41 uint32_t flags, uint32_t lkid, char *lvb_in);
42int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
43 uint32_t flags, uint32_t lkid);
44void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc);
45
46static inline int is_master(struct dlm_rsb *r)
47{
48 return !r->res_nodeid;
49}
50
51static inline void lock_rsb(struct dlm_rsb *r)
52{
53 mutex_lock(&r->res_mutex);
54}
55
56static inline void unlock_rsb(struct dlm_rsb *r)
57{
58 mutex_unlock(&r->res_mutex);
59}
60
61#endif
62
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
new file mode 100644
index 000000000000..109333c8ecb9
--- /dev/null
+++ b/fs/dlm/lockspace.c
@@ -0,0 +1,717 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
24#include "recover.h"
25
26#ifdef CONFIG_DLM_DEBUG
27int dlm_create_debug_file(struct dlm_ls *ls);
28void dlm_delete_debug_file(struct dlm_ls *ls);
29#else
30static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
31static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
32#endif
33
34static int ls_count;
35static struct mutex ls_lock;
36static struct list_head lslist;
37static spinlock_t lslist_lock;
38static struct task_struct * scand_task;
39
40
41static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
42{
43 ssize_t ret = len;
44 int n = simple_strtol(buf, NULL, 0);
45
46 switch (n) {
47 case 0:
48 dlm_ls_stop(ls);
49 break;
50 case 1:
51 dlm_ls_start(ls);
52 break;
53 default:
54 ret = -EINVAL;
55 }
56 return ret;
57}
58
59static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
60{
61 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
62 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
63 wake_up(&ls->ls_uevent_wait);
64 return len;
65}
66
67static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
68{
69 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
70}
71
72static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
73{
74 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
75 return len;
76}
77
78static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
79{
80 uint32_t status = dlm_recover_status(ls);
81 return snprintf(buf, PAGE_SIZE, "%x\n", status);
82}
83
84static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
85{
86 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
87}
88
89struct dlm_attr {
90 struct attribute attr;
91 ssize_t (*show)(struct dlm_ls *, char *);
92 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
93};
94
95static struct dlm_attr dlm_attr_control = {
96 .attr = {.name = "control", .mode = S_IWUSR},
97 .store = dlm_control_store
98};
99
100static struct dlm_attr dlm_attr_event = {
101 .attr = {.name = "event_done", .mode = S_IWUSR},
102 .store = dlm_event_store
103};
104
105static struct dlm_attr dlm_attr_id = {
106 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
107 .show = dlm_id_show,
108 .store = dlm_id_store
109};
110
111static struct dlm_attr dlm_attr_recover_status = {
112 .attr = {.name = "recover_status", .mode = S_IRUGO},
113 .show = dlm_recover_status_show
114};
115
116static struct dlm_attr dlm_attr_recover_nodeid = {
117 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
118 .show = dlm_recover_nodeid_show
119};
120
121static struct attribute *dlm_attrs[] = {
122 &dlm_attr_control.attr,
123 &dlm_attr_event.attr,
124 &dlm_attr_id.attr,
125 &dlm_attr_recover_status.attr,
126 &dlm_attr_recover_nodeid.attr,
127 NULL,
128};
129
130static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
131 char *buf)
132{
133 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
134 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
135 return a->show ? a->show(ls, buf) : 0;
136}
137
138static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
139 const char *buf, size_t len)
140{
141 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
142 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
143 return a->store ? a->store(ls, buf, len) : len;
144}
145
146static struct sysfs_ops dlm_attr_ops = {
147 .show = dlm_attr_show,
148 .store = dlm_attr_store,
149};
150
151static struct kobj_type dlm_ktype = {
152 .default_attrs = dlm_attrs,
153 .sysfs_ops = &dlm_attr_ops,
154};
155
156static struct kset dlm_kset = {
157 .subsys = &kernel_subsys,
158 .kobj = {.name = "dlm",},
159 .ktype = &dlm_ktype,
160};
161
162static int kobject_setup(struct dlm_ls *ls)
163{
164 char lsname[DLM_LOCKSPACE_LEN];
165 int error;
166
167 memset(lsname, 0, DLM_LOCKSPACE_LEN);
168 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
169
170 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
171 if (error)
172 return error;
173
174 ls->ls_kobj.kset = &dlm_kset;
175 ls->ls_kobj.ktype = &dlm_ktype;
176 return 0;
177}
178
179static int do_uevent(struct dlm_ls *ls, int in)
180{
181 int error;
182
183 if (in)
184 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
185 else
186 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
187
188 error = wait_event_interruptible(ls->ls_uevent_wait,
189 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
190 if (error)
191 goto out;
192
193 error = ls->ls_uevent_result;
194 out:
195 return error;
196}
197
198
199int dlm_lockspace_init(void)
200{
201 int error;
202
203 ls_count = 0;
204 mutex_init(&ls_lock);
205 INIT_LIST_HEAD(&lslist);
206 spin_lock_init(&lslist_lock);
207
208 error = kset_register(&dlm_kset);
209 if (error)
210 printk("dlm_lockspace_init: cannot register kset %d\n", error);
211 return error;
212}
213
214void dlm_lockspace_exit(void)
215{
216 kset_unregister(&dlm_kset);
217}
218
219static int dlm_scand(void *data)
220{
221 struct dlm_ls *ls;
222
223 while (!kthread_should_stop()) {
224 list_for_each_entry(ls, &lslist, ls_list)
225 dlm_scan_rsbs(ls);
226 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
227 }
228 return 0;
229}
230
231static int dlm_scand_start(void)
232{
233 struct task_struct *p;
234 int error = 0;
235
236 p = kthread_run(dlm_scand, NULL, "dlm_scand");
237 if (IS_ERR(p))
238 error = PTR_ERR(p);
239 else
240 scand_task = p;
241 return error;
242}
243
244static void dlm_scand_stop(void)
245{
246 kthread_stop(scand_task);
247}
248
249static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
250{
251 struct dlm_ls *ls;
252
253 spin_lock(&lslist_lock);
254
255 list_for_each_entry(ls, &lslist, ls_list) {
256 if (ls->ls_namelen == namelen &&
257 memcmp(ls->ls_name, name, namelen) == 0)
258 goto out;
259 }
260 ls = NULL;
261 out:
262 spin_unlock(&lslist_lock);
263 return ls;
264}
265
266struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
267{
268 struct dlm_ls *ls;
269
270 spin_lock(&lslist_lock);
271
272 list_for_each_entry(ls, &lslist, ls_list) {
273 if (ls->ls_global_id == id) {
274 ls->ls_count++;
275 goto out;
276 }
277 }
278 ls = NULL;
279 out:
280 spin_unlock(&lslist_lock);
281 return ls;
282}
283
284struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
285{
286 struct dlm_ls *ls;
287
288 spin_lock(&lslist_lock);
289 list_for_each_entry(ls, &lslist, ls_list) {
290 if (ls->ls_local_handle == lockspace) {
291 ls->ls_count++;
292 goto out;
293 }
294 }
295 ls = NULL;
296 out:
297 spin_unlock(&lslist_lock);
298 return ls;
299}
300
301struct dlm_ls *dlm_find_lockspace_device(int minor)
302{
303 struct dlm_ls *ls;
304
305 spin_lock(&lslist_lock);
306 list_for_each_entry(ls, &lslist, ls_list) {
307 if (ls->ls_device.minor == minor) {
308 ls->ls_count++;
309 goto out;
310 }
311 }
312 ls = NULL;
313 out:
314 spin_unlock(&lslist_lock);
315 return ls;
316}
317
318void dlm_put_lockspace(struct dlm_ls *ls)
319{
320 spin_lock(&lslist_lock);
321 ls->ls_count--;
322 spin_unlock(&lslist_lock);
323}
324
325static void remove_lockspace(struct dlm_ls *ls)
326{
327 for (;;) {
328 spin_lock(&lslist_lock);
329 if (ls->ls_count == 0) {
330 list_del(&ls->ls_list);
331 spin_unlock(&lslist_lock);
332 return;
333 }
334 spin_unlock(&lslist_lock);
335 ssleep(1);
336 }
337}
338
339static int threads_start(void)
340{
341 int error;
342
343 /* Thread which process lock requests for all lockspace's */
344 error = dlm_astd_start();
345 if (error) {
346 log_print("cannot start dlm_astd thread %d", error);
347 goto fail;
348 }
349
350 error = dlm_scand_start();
351 if (error) {
352 log_print("cannot start dlm_scand thread %d", error);
353 goto astd_fail;
354 }
355
356 /* Thread for sending/receiving messages for all lockspace's */
357 error = dlm_lowcomms_start();
358 if (error) {
359 log_print("cannot start dlm lowcomms %d", error);
360 goto scand_fail;
361 }
362
363 return 0;
364
365 scand_fail:
366 dlm_scand_stop();
367 astd_fail:
368 dlm_astd_stop();
369 fail:
370 return error;
371}
372
373static void threads_stop(void)
374{
375 dlm_scand_stop();
376 dlm_lowcomms_stop();
377 dlm_astd_stop();
378}
379
380static int new_lockspace(char *name, int namelen, void **lockspace,
381 uint32_t flags, int lvblen)
382{
383 struct dlm_ls *ls;
384 int i, size, error = -ENOMEM;
385
386 if (namelen > DLM_LOCKSPACE_LEN)
387 return -EINVAL;
388
389 if (!lvblen || (lvblen % 8))
390 return -EINVAL;
391
392 if (!try_module_get(THIS_MODULE))
393 return -EINVAL;
394
395 ls = dlm_find_lockspace_name(name, namelen);
396 if (ls) {
397 *lockspace = ls;
398 module_put(THIS_MODULE);
399 return -EEXIST;
400 }
401
402 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
403 if (!ls)
404 goto out;
405 memcpy(ls->ls_name, name, namelen);
406 ls->ls_namelen = namelen;
407 ls->ls_exflags = flags;
408 ls->ls_lvblen = lvblen;
409 ls->ls_count = 0;
410 ls->ls_flags = 0;
411
412 size = dlm_config.rsbtbl_size;
413 ls->ls_rsbtbl_size = size;
414
415 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
416 if (!ls->ls_rsbtbl)
417 goto out_lsfree;
418 for (i = 0; i < size; i++) {
419 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
420 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
421 rwlock_init(&ls->ls_rsbtbl[i].lock);
422 }
423
424 size = dlm_config.lkbtbl_size;
425 ls->ls_lkbtbl_size = size;
426
427 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
428 if (!ls->ls_lkbtbl)
429 goto out_rsbfree;
430 for (i = 0; i < size; i++) {
431 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
432 rwlock_init(&ls->ls_lkbtbl[i].lock);
433 ls->ls_lkbtbl[i].counter = 1;
434 }
435
436 size = dlm_config.dirtbl_size;
437 ls->ls_dirtbl_size = size;
438
439 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
440 if (!ls->ls_dirtbl)
441 goto out_lkbfree;
442 for (i = 0; i < size; i++) {
443 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
444 rwlock_init(&ls->ls_dirtbl[i].lock);
445 }
446
447 INIT_LIST_HEAD(&ls->ls_waiters);
448 mutex_init(&ls->ls_waiters_mutex);
449
450 INIT_LIST_HEAD(&ls->ls_nodes);
451 INIT_LIST_HEAD(&ls->ls_nodes_gone);
452 ls->ls_num_nodes = 0;
453 ls->ls_low_nodeid = 0;
454 ls->ls_total_weight = 0;
455 ls->ls_node_array = NULL;
456
457 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
458 ls->ls_stub_rsb.res_ls = ls;
459
460 ls->ls_debug_rsb_dentry = NULL;
461 ls->ls_debug_waiters_dentry = NULL;
462
463 init_waitqueue_head(&ls->ls_uevent_wait);
464 ls->ls_uevent_result = 0;
465
466 ls->ls_recoverd_task = NULL;
467 mutex_init(&ls->ls_recoverd_active);
468 spin_lock_init(&ls->ls_recover_lock);
469 ls->ls_recover_status = 0;
470 ls->ls_recover_seq = 0;
471 ls->ls_recover_args = NULL;
472 init_rwsem(&ls->ls_in_recovery);
473 INIT_LIST_HEAD(&ls->ls_requestqueue);
474 mutex_init(&ls->ls_requestqueue_mutex);
475 mutex_init(&ls->ls_clear_proc_locks);
476
477 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
478 if (!ls->ls_recover_buf)
479 goto out_dirfree;
480
481 INIT_LIST_HEAD(&ls->ls_recover_list);
482 spin_lock_init(&ls->ls_recover_list_lock);
483 ls->ls_recover_list_count = 0;
484 ls->ls_local_handle = ls;
485 init_waitqueue_head(&ls->ls_wait_general);
486 INIT_LIST_HEAD(&ls->ls_root_list);
487 init_rwsem(&ls->ls_root_sem);
488
489 down_write(&ls->ls_in_recovery);
490
491 spin_lock(&lslist_lock);
492 list_add(&ls->ls_list, &lslist);
493 spin_unlock(&lslist_lock);
494
495 /* needs to find ls in lslist */
496 error = dlm_recoverd_start(ls);
497 if (error) {
498 log_error(ls, "can't start dlm_recoverd %d", error);
499 goto out_rcomfree;
500 }
501
502 dlm_create_debug_file(ls);
503
504 error = kobject_setup(ls);
505 if (error)
506 goto out_del;
507
508 error = kobject_register(&ls->ls_kobj);
509 if (error)
510 goto out_del;
511
512 error = do_uevent(ls, 1);
513 if (error)
514 goto out_unreg;
515
516 *lockspace = ls;
517 return 0;
518
519 out_unreg:
520 kobject_unregister(&ls->ls_kobj);
521 out_del:
522 dlm_delete_debug_file(ls);
523 dlm_recoverd_stop(ls);
524 out_rcomfree:
525 spin_lock(&lslist_lock);
526 list_del(&ls->ls_list);
527 spin_unlock(&lslist_lock);
528 kfree(ls->ls_recover_buf);
529 out_dirfree:
530 kfree(ls->ls_dirtbl);
531 out_lkbfree:
532 kfree(ls->ls_lkbtbl);
533 out_rsbfree:
534 kfree(ls->ls_rsbtbl);
535 out_lsfree:
536 kfree(ls);
537 out:
538 module_put(THIS_MODULE);
539 return error;
540}
541
542int dlm_new_lockspace(char *name, int namelen, void **lockspace,
543 uint32_t flags, int lvblen)
544{
545 int error = 0;
546
547 mutex_lock(&ls_lock);
548 if (!ls_count)
549 error = threads_start();
550 if (error)
551 goto out;
552
553 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
554 if (!error)
555 ls_count++;
556 out:
557 mutex_unlock(&ls_lock);
558 return error;
559}
560
561/* Return 1 if the lockspace still has active remote locks,
562 * 2 if the lockspace still has active local locks.
563 */
564static int lockspace_busy(struct dlm_ls *ls)
565{
566 int i, lkb_found = 0;
567 struct dlm_lkb *lkb;
568
569 /* NOTE: We check the lockidtbl here rather than the resource table.
570 This is because there may be LKBs queued as ASTs that have been
571 unlinked from their RSBs and are pending deletion once the AST has
572 been delivered */
573
574 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
575 read_lock(&ls->ls_lkbtbl[i].lock);
576 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
577 lkb_found = 1;
578 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
579 lkb_idtbl_list) {
580 if (!lkb->lkb_nodeid) {
581 read_unlock(&ls->ls_lkbtbl[i].lock);
582 return 2;
583 }
584 }
585 }
586 read_unlock(&ls->ls_lkbtbl[i].lock);
587 }
588 return lkb_found;
589}
590
591static int release_lockspace(struct dlm_ls *ls, int force)
592{
593 struct dlm_lkb *lkb;
594 struct dlm_rsb *rsb;
595 struct list_head *head;
596 int i;
597 int busy = lockspace_busy(ls);
598
599 if (busy > force)
600 return -EBUSY;
601
602 if (force < 3)
603 do_uevent(ls, 0);
604
605 dlm_recoverd_stop(ls);
606
607 remove_lockspace(ls);
608
609 dlm_delete_debug_file(ls);
610
611 dlm_astd_suspend();
612
613 kfree(ls->ls_recover_buf);
614
615 /*
616 * Free direntry structs.
617 */
618
619 dlm_dir_clear(ls);
620 kfree(ls->ls_dirtbl);
621
622 /*
623 * Free all lkb's on lkbtbl[] lists.
624 */
625
626 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
627 head = &ls->ls_lkbtbl[i].list;
628 while (!list_empty(head)) {
629 lkb = list_entry(head->next, struct dlm_lkb,
630 lkb_idtbl_list);
631
632 list_del(&lkb->lkb_idtbl_list);
633
634 dlm_del_ast(lkb);
635
636 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
637 free_lvb(lkb->lkb_lvbptr);
638
639 free_lkb(lkb);
640 }
641 }
642 dlm_astd_resume();
643
644 kfree(ls->ls_lkbtbl);
645
646 /*
647 * Free all rsb's on rsbtbl[] lists
648 */
649
650 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
651 head = &ls->ls_rsbtbl[i].list;
652 while (!list_empty(head)) {
653 rsb = list_entry(head->next, struct dlm_rsb,
654 res_hashchain);
655
656 list_del(&rsb->res_hashchain);
657 free_rsb(rsb);
658 }
659
660 head = &ls->ls_rsbtbl[i].toss;
661 while (!list_empty(head)) {
662 rsb = list_entry(head->next, struct dlm_rsb,
663 res_hashchain);
664 list_del(&rsb->res_hashchain);
665 free_rsb(rsb);
666 }
667 }
668
669 kfree(ls->ls_rsbtbl);
670
671 /*
672 * Free structures on any other lists
673 */
674
675 kfree(ls->ls_recover_args);
676 dlm_clear_free_entries(ls);
677 dlm_clear_members(ls);
678 dlm_clear_members_gone(ls);
679 kfree(ls->ls_node_array);
680 kobject_unregister(&ls->ls_kobj);
681 kfree(ls);
682
683 mutex_lock(&ls_lock);
684 ls_count--;
685 if (!ls_count)
686 threads_stop();
687 mutex_unlock(&ls_lock);
688
689 module_put(THIS_MODULE);
690 return 0;
691}
692
693/*
694 * Called when a system has released all its locks and is not going to use the
695 * lockspace any longer. We free everything we're managing for this lockspace.
696 * Remaining nodes will go through the recovery process as if we'd died. The
697 * lockspace must continue to function as usual, participating in recoveries,
698 * until this returns.
699 *
700 * Force has 4 possible values:
701 * 0 - don't destroy locksapce if it has any LKBs
702 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
703 * 2 - destroy lockspace regardless of LKBs
704 * 3 - destroy lockspace as part of a forced shutdown
705 */
706
707int dlm_release_lockspace(void *lockspace, int force)
708{
709 struct dlm_ls *ls;
710
711 ls = dlm_find_lockspace_local(lockspace);
712 if (!ls)
713 return -EINVAL;
714 dlm_put_lockspace(ls);
715 return release_lockspace(ls, force);
716}
717
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
new file mode 100644
index 000000000000..891eabbdd021
--- /dev/null
+++ b/fs/dlm/lockspace.h
@@ -0,0 +1,25 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __LOCKSPACE_DOT_H__
15#define __LOCKSPACE_DOT_H__
16
17int dlm_lockspace_init(void);
18void dlm_lockspace_exit(void);
19struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
20struct dlm_ls *dlm_find_lockspace_local(void *id);
21struct dlm_ls *dlm_find_lockspace_device(int minor);
22void dlm_put_lockspace(struct dlm_ls *ls);
23
24#endif /* __LOCKSPACE_DOT_H__ */
25
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
new file mode 100644
index 000000000000..23f5ce12080b
--- /dev/null
+++ b/fs/dlm/lowcomms.c
@@ -0,0 +1,1238 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * lowcomms.c
16 *
17 * This is the "low-level" comms layer.
18 *
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
28 *
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
38 *
39 * I don't see any problem with the recv thread executing the locking
40 * code on behalf of remote processes as the locking code is
41 * short, efficient and never (well, hardly ever) waits.
42 *
43 */
44
45#include <asm/ioctls.h>
46#include <net/sock.h>
47#include <net/tcp.h>
48#include <net/sctp/user.h>
49#include <linux/pagemap.h>
50#include <linux/socket.h>
51#include <linux/idr.h>
52
53#include "dlm_internal.h"
54#include "lowcomms.h"
55#include "config.h"
56#include "midcomms.h"
57
58static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
59static int dlm_local_count;
60static int dlm_local_nodeid;
61
62/* One of these per connected node */
63
64#define NI_INIT_PENDING 1
65#define NI_WRITE_PENDING 2
66
67struct nodeinfo {
68 spinlock_t lock;
69 sctp_assoc_t assoc_id;
70 unsigned long flags;
71 struct list_head write_list; /* nodes with pending writes */
72 struct list_head writequeue; /* outgoing writequeue_entries */
73 spinlock_t writequeue_lock;
74 int nodeid;
75};
76
77static DEFINE_IDR(nodeinfo_idr);
78static struct rw_semaphore nodeinfo_lock;
79static int max_nodeid;
80
81struct cbuf {
82 unsigned base;
83 unsigned len;
84 unsigned mask;
85};
86
87/* Just the one of these, now. But this struct keeps
88 the connection-specific variables together */
89
90#define CF_READ_PENDING 1
91
92struct connection {
93 struct socket *sock;
94 unsigned long flags;
95 struct page *rx_page;
96 atomic_t waiting_requests;
97 struct cbuf cb;
98 int eagain_flag;
99};
100
101/* An entry waiting to be sent */
102
103struct writequeue_entry {
104 struct list_head list;
105 struct page *page;
106 int offset;
107 int len;
108 int end;
109 int users;
110 struct nodeinfo *ni;
111};
112
113#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0)
114#define CBUF_EMPTY(cb) ((cb)->len == 0)
115#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1))
116#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask)
117
118#define CBUF_INIT(cb, size) \
119do { \
120 (cb)->base = (cb)->len = 0; \
121 (cb)->mask = ((size)-1); \
122} while(0)
123
124#define CBUF_EAT(cb, n) \
125do { \
126 (cb)->len -= (n); \
127 (cb)->base += (n); \
128 (cb)->base &= (cb)->mask; \
129} while(0)
130
131
132/* List of nodes which have writes pending */
133static struct list_head write_nodes;
134static spinlock_t write_nodes_lock;
135
136/* Maximum number of incoming messages to process before
137 * doing a schedule()
138 */
139#define MAX_RX_MSG_COUNT 25
140
141/* Manage daemons */
142static struct task_struct *recv_task;
143static struct task_struct *send_task;
144static wait_queue_head_t lowcomms_recv_wait;
145static atomic_t accepting;
146
147/* The SCTP connection */
148static struct connection sctp_con;
149
150
151static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
152{
153 struct sockaddr_storage addr;
154 int error;
155
156 if (!dlm_local_count)
157 return -1;
158
159 error = dlm_nodeid_to_addr(nodeid, &addr);
160 if (error)
161 return error;
162
163 if (dlm_local_addr[0]->ss_family == AF_INET) {
164 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr;
165 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
166 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
167 } else {
168 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
169 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
170 memcpy(&ret6->sin6_addr, &in6->sin6_addr,
171 sizeof(in6->sin6_addr));
172 }
173
174 return 0;
175}
176
177static struct nodeinfo *nodeid2nodeinfo(int nodeid, int alloc)
178{
179 struct nodeinfo *ni;
180 int r;
181 int n;
182
183 down_read(&nodeinfo_lock);
184 ni = idr_find(&nodeinfo_idr, nodeid);
185 up_read(&nodeinfo_lock);
186
187 if (!ni && alloc) {
188 down_write(&nodeinfo_lock);
189
190 ni = idr_find(&nodeinfo_idr, nodeid);
191 if (ni)
192 goto out_up;
193
194 r = idr_pre_get(&nodeinfo_idr, alloc);
195 if (!r)
196 goto out_up;
197
198 ni = kmalloc(sizeof(struct nodeinfo), alloc);
199 if (!ni)
200 goto out_up;
201
202 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
203 if (r) {
204 kfree(ni);
205 ni = NULL;
206 goto out_up;
207 }
208 if (n != nodeid) {
209 idr_remove(&nodeinfo_idr, n);
210 kfree(ni);
211 ni = NULL;
212 goto out_up;
213 }
214 memset(ni, 0, sizeof(struct nodeinfo));
215 spin_lock_init(&ni->lock);
216 INIT_LIST_HEAD(&ni->writequeue);
217 spin_lock_init(&ni->writequeue_lock);
218 ni->nodeid = nodeid;
219
220 if (nodeid > max_nodeid)
221 max_nodeid = nodeid;
222 out_up:
223 up_write(&nodeinfo_lock);
224 }
225
226 return ni;
227}
228
229/* Don't call this too often... */
230static struct nodeinfo *assoc2nodeinfo(sctp_assoc_t assoc)
231{
232 int i;
233 struct nodeinfo *ni;
234
235 for (i=1; i<=max_nodeid; i++) {
236 ni = nodeid2nodeinfo(i, 0);
237 if (ni && ni->assoc_id == assoc)
238 return ni;
239 }
240 return NULL;
241}
242
243/* Data or notification available on socket */
244static void lowcomms_data_ready(struct sock *sk, int count_unused)
245{
246 atomic_inc(&sctp_con.waiting_requests);
247 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags))
248 return;
249
250 wake_up_interruptible(&lowcomms_recv_wait);
251}
252
253
254/* Add the port number to an IP6 or 4 sockaddr and return the address length.
255 Also padd out the struct with zeros to make comparisons meaningful */
256
257static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
258 int *addr_len)
259{
260 struct sockaddr_in *local4_addr;
261 struct sockaddr_in6 *local6_addr;
262
263 if (!dlm_local_count)
264 return;
265
266 if (!port) {
267 if (dlm_local_addr[0]->ss_family == AF_INET) {
268 local4_addr = (struct sockaddr_in *)dlm_local_addr[0];
269 port = be16_to_cpu(local4_addr->sin_port);
270 } else {
271 local6_addr = (struct sockaddr_in6 *)dlm_local_addr[0];
272 port = be16_to_cpu(local6_addr->sin6_port);
273 }
274 }
275
276 saddr->ss_family = dlm_local_addr[0]->ss_family;
277 if (dlm_local_addr[0]->ss_family == AF_INET) {
278 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
279 in4_addr->sin_port = cpu_to_be16(port);
280 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
281 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
282 sizeof(struct sockaddr_in));
283 *addr_len = sizeof(struct sockaddr_in);
284 } else {
285 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
286 in6_addr->sin6_port = cpu_to_be16(port);
287 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
288 sizeof(struct sockaddr_in6));
289 *addr_len = sizeof(struct sockaddr_in6);
290 }
291}
292
293/* Close the connection and tidy up */
294static void close_connection(void)
295{
296 if (sctp_con.sock) {
297 sock_release(sctp_con.sock);
298 sctp_con.sock = NULL;
299 }
300
301 if (sctp_con.rx_page) {
302 __free_page(sctp_con.rx_page);
303 sctp_con.rx_page = NULL;
304 }
305}
306
307/* We only send shutdown messages to nodes that are not part of the cluster */
308static void send_shutdown(sctp_assoc_t associd)
309{
310 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
311 struct msghdr outmessage;
312 struct cmsghdr *cmsg;
313 struct sctp_sndrcvinfo *sinfo;
314 int ret;
315
316 outmessage.msg_name = NULL;
317 outmessage.msg_namelen = 0;
318 outmessage.msg_control = outcmsg;
319 outmessage.msg_controllen = sizeof(outcmsg);
320 outmessage.msg_flags = MSG_EOR;
321
322 cmsg = CMSG_FIRSTHDR(&outmessage);
323 cmsg->cmsg_level = IPPROTO_SCTP;
324 cmsg->cmsg_type = SCTP_SNDRCV;
325 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
326 outmessage.msg_controllen = cmsg->cmsg_len;
327 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
328 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
329
330 sinfo->sinfo_flags |= MSG_EOF;
331 sinfo->sinfo_assoc_id = associd;
332
333 ret = kernel_sendmsg(sctp_con.sock, &outmessage, NULL, 0, 0);
334
335 if (ret != 0)
336 log_print("send EOF to node failed: %d", ret);
337}
338
339
340/* INIT failed but we don't know which node...
341 restart INIT on all pending nodes */
342static void init_failed(void)
343{
344 int i;
345 struct nodeinfo *ni;
346
347 for (i=1; i<=max_nodeid; i++) {
348 ni = nodeid2nodeinfo(i, 0);
349 if (!ni)
350 continue;
351
352 if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
353 ni->assoc_id = 0;
354 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
355 spin_lock_bh(&write_nodes_lock);
356 list_add_tail(&ni->write_list, &write_nodes);
357 spin_unlock_bh(&write_nodes_lock);
358 }
359 }
360 }
361 wake_up_process(send_task);
362}
363
364/* Something happened to an association */
365static void process_sctp_notification(struct msghdr *msg, char *buf)
366{
367 union sctp_notification *sn = (union sctp_notification *)buf;
368
369 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) {
370 switch (sn->sn_assoc_change.sac_state) {
371
372 case SCTP_COMM_UP:
373 case SCTP_RESTART:
374 {
375 /* Check that the new node is in the lockspace */
376 struct sctp_prim prim;
377 mm_segment_t fs;
378 int nodeid;
379 int prim_len, ret;
380 int addr_len;
381 struct nodeinfo *ni;
382
383 /* This seems to happen when we received a connection
384 * too early... or something... anyway, it happens but
385 * we always seem to get a real message too, see
386 * receive_from_sock */
387
388 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
389 log_print("COMM_UP for invalid assoc ID %d",
390 (int)sn->sn_assoc_change.sac_assoc_id);
391 init_failed();
392 return;
393 }
394 memset(&prim, 0, sizeof(struct sctp_prim));
395 prim_len = sizeof(struct sctp_prim);
396 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
397
398 fs = get_fs();
399 set_fs(get_ds());
400 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
401 IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
402 (char*)&prim, &prim_len);
403 set_fs(fs);
404 if (ret < 0) {
405 struct nodeinfo *ni;
406
407 log_print("getsockopt/sctp_primary_addr on "
408 "new assoc %d failed : %d",
409 (int)sn->sn_assoc_change.sac_assoc_id, ret);
410
411 /* Retry INIT later */
412 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
413 if (ni)
414 clear_bit(NI_INIT_PENDING, &ni->flags);
415 return;
416 }
417 make_sockaddr(&prim.ssp_addr, 0, &addr_len);
418 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
419 log_print("reject connect from unknown addr");
420 send_shutdown(prim.ssp_assoc_id);
421 return;
422 }
423
424 ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
425 if (!ni)
426 return;
427
428 /* Save the assoc ID */
429 spin_lock(&ni->lock);
430 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
431 spin_unlock(&ni->lock);
432
433 log_print("got new/restarted association %d nodeid %d",
434 (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
435
436 /* Send any pending writes */
437 clear_bit(NI_INIT_PENDING, &ni->flags);
438 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
439 spin_lock_bh(&write_nodes_lock);
440 list_add_tail(&ni->write_list, &write_nodes);
441 spin_unlock_bh(&write_nodes_lock);
442 }
443 wake_up_process(send_task);
444 }
445 break;
446
447 case SCTP_COMM_LOST:
448 case SCTP_SHUTDOWN_COMP:
449 {
450 struct nodeinfo *ni;
451
452 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
453 if (ni) {
454 spin_lock(&ni->lock);
455 ni->assoc_id = 0;
456 spin_unlock(&ni->lock);
457 }
458 }
459 break;
460
461 /* We don't know which INIT failed, so clear the PENDING flags
462 * on them all. if assoc_id is zero then it will then try
463 * again */
464
465 case SCTP_CANT_STR_ASSOC:
466 {
467 log_print("Can't start SCTP association - retrying");
468 init_failed();
469 }
470 break;
471
472 default:
473 log_print("unexpected SCTP assoc change id=%d state=%d",
474 (int)sn->sn_assoc_change.sac_assoc_id,
475 sn->sn_assoc_change.sac_state);
476 }
477 }
478}
479
480/* Data received from remote end */
481static int receive_from_sock(void)
482{
483 int ret = 0;
484 struct msghdr msg;
485 struct kvec iov[2];
486 unsigned len;
487 int r;
488 struct sctp_sndrcvinfo *sinfo;
489 struct cmsghdr *cmsg;
490 struct nodeinfo *ni;
491
492 /* These two are marginally too big for stack allocation, but this
493 * function is (currently) only called by dlm_recvd so static should be
494 * OK.
495 */
496 static struct sockaddr_storage msgname;
497 static char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
498
499 if (sctp_con.sock == NULL)
500 goto out;
501
502 if (sctp_con.rx_page == NULL) {
503 /*
504 * This doesn't need to be atomic, but I think it should
505 * improve performance if it is.
506 */
507 sctp_con.rx_page = alloc_page(GFP_ATOMIC);
508 if (sctp_con.rx_page == NULL)
509 goto out_resched;
510 CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE);
511 }
512
513 memset(&incmsg, 0, sizeof(incmsg));
514 memset(&msgname, 0, sizeof(msgname));
515
516 memset(incmsg, 0, sizeof(incmsg));
517 msg.msg_name = &msgname;
518 msg.msg_namelen = sizeof(msgname);
519 msg.msg_flags = 0;
520 msg.msg_control = incmsg;
521 msg.msg_controllen = sizeof(incmsg);
522
523 /* I don't see why this circular buffer stuff is necessary for SCTP
524 * which is a packet-based protocol, but the whole thing breaks under
525 * load without it! The overhead is minimal (and is in the TCP lowcomms
526 * anyway, of course) so I'll leave it in until I can figure out what's
527 * really happening.
528 */
529
530 /*
531 * iov[0] is the bit of the circular buffer between the current end
532 * point (cb.base + cb.len) and the end of the buffer.
533 */
534 iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb);
535 iov[0].iov_base = page_address(sctp_con.rx_page) +
536 CBUF_DATA(&sctp_con.cb);
537 iov[1].iov_len = 0;
538
539 /*
540 * iov[1] is the bit of the circular buffer between the start of the
541 * buffer and the start of the currently used section (cb.base)
542 */
543 if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) {
544 iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb);
545 iov[1].iov_len = sctp_con.cb.base;
546 iov[1].iov_base = page_address(sctp_con.rx_page);
547 msg.msg_iovlen = 2;
548 }
549 len = iov[0].iov_len + iov[1].iov_len;
550
551 r = ret = kernel_recvmsg(sctp_con.sock, &msg, iov, 1, len,
552 MSG_NOSIGNAL | MSG_DONTWAIT);
553 if (ret <= 0)
554 goto out_close;
555
556 msg.msg_control = incmsg;
557 msg.msg_controllen = sizeof(incmsg);
558 cmsg = CMSG_FIRSTHDR(&msg);
559 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
560
561 if (msg.msg_flags & MSG_NOTIFICATION) {
562 process_sctp_notification(&msg, page_address(sctp_con.rx_page));
563 return 0;
564 }
565
566 /* Is this a new association ? */
567 ni = nodeid2nodeinfo(le32_to_cpu(sinfo->sinfo_ppid), GFP_KERNEL);
568 if (ni) {
569 ni->assoc_id = sinfo->sinfo_assoc_id;
570 if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
571
572 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
573 spin_lock_bh(&write_nodes_lock);
574 list_add_tail(&ni->write_list, &write_nodes);
575 spin_unlock_bh(&write_nodes_lock);
576 }
577 wake_up_process(send_task);
578 }
579 }
580
581 /* INIT sends a message with length of 1 - ignore it */
582 if (r == 1)
583 return 0;
584
585 CBUF_ADD(&sctp_con.cb, ret);
586 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
587 page_address(sctp_con.rx_page),
588 sctp_con.cb.base, sctp_con.cb.len,
589 PAGE_CACHE_SIZE);
590 if (ret < 0)
591 goto out_close;
592 CBUF_EAT(&sctp_con.cb, ret);
593
594 out:
595 ret = 0;
596 goto out_ret;
597
598 out_resched:
599 lowcomms_data_ready(sctp_con.sock->sk, 0);
600 ret = 0;
601 schedule();
602 goto out_ret;
603
604 out_close:
605 if (ret != -EAGAIN)
606 log_print("error reading from sctp socket: %d", ret);
607 out_ret:
608 return ret;
609}
610
611/* Bind to an IP address. SCTP allows multiple address so it can do multi-homing */
612static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
613{
614 mm_segment_t fs;
615 int result = 0;
616
617 fs = get_fs();
618 set_fs(get_ds());
619 if (num == 1)
620 result = sctp_con.sock->ops->bind(sctp_con.sock,
621 (struct sockaddr *) addr, addr_len);
622 else
623 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
624 SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len);
625 set_fs(fs);
626
627 if (result < 0)
628 log_print("Can't bind to port %d addr number %d",
629 dlm_config.tcp_port, num);
630
631 return result;
632}
633
634static void init_local(void)
635{
636 struct sockaddr_storage sas, *addr;
637 int i;
638
639 dlm_local_nodeid = dlm_our_nodeid();
640
641 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) {
642 if (dlm_our_addr(&sas, i))
643 break;
644
645 addr = kmalloc(sizeof(*addr), GFP_KERNEL);
646 if (!addr)
647 break;
648 memcpy(addr, &sas, sizeof(*addr));
649 dlm_local_addr[dlm_local_count++] = addr;
650 }
651}
652
653/* Initialise SCTP socket and bind to all interfaces */
654static int init_sock(void)
655{
656 mm_segment_t fs;
657 struct socket *sock = NULL;
658 struct sockaddr_storage localaddr;
659 struct sctp_event_subscribe subscribe;
660 int result = -EINVAL, num = 1, i, addr_len;
661
662 if (!dlm_local_count) {
663 init_local();
664 if (!dlm_local_count) {
665 log_print("no local IP address has been set");
666 goto out;
667 }
668 }
669
670 result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET,
671 IPPROTO_SCTP, &sock);
672 if (result < 0) {
673 log_print("Can't create comms socket, check SCTP is loaded");
674 goto out;
675 }
676
677 /* Listen for events */
678 memset(&subscribe, 0, sizeof(subscribe));
679 subscribe.sctp_data_io_event = 1;
680 subscribe.sctp_association_event = 1;
681 subscribe.sctp_send_failure_event = 1;
682 subscribe.sctp_shutdown_event = 1;
683 subscribe.sctp_partial_delivery_event = 1;
684
685 fs = get_fs();
686 set_fs(get_ds());
687 result = sock->ops->setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
688 (char *)&subscribe, sizeof(subscribe));
689 set_fs(fs);
690
691 if (result < 0) {
692 log_print("Failed to set SCTP_EVENTS on socket: result=%d",
693 result);
694 goto create_delsock;
695 }
696
697 /* Init con struct */
698 sock->sk->sk_user_data = &sctp_con;
699 sctp_con.sock = sock;
700 sctp_con.sock->sk->sk_data_ready = lowcomms_data_ready;
701
702 /* Bind to all interfaces. */
703 for (i = 0; i < dlm_local_count; i++) {
704 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
705 make_sockaddr(&localaddr, dlm_config.tcp_port, &addr_len);
706
707 result = add_bind_addr(&localaddr, addr_len, num);
708 if (result)
709 goto create_delsock;
710 ++num;
711 }
712
713 result = sock->ops->listen(sock, 5);
714 if (result < 0) {
715 log_print("Can't set socket listening");
716 goto create_delsock;
717 }
718
719 return 0;
720
721 create_delsock:
722 sock_release(sock);
723 sctp_con.sock = NULL;
724 out:
725 return result;
726}
727
728
729static struct writequeue_entry *new_writequeue_entry(int allocation)
730{
731 struct writequeue_entry *entry;
732
733 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
734 if (!entry)
735 return NULL;
736
737 entry->page = alloc_page(allocation);
738 if (!entry->page) {
739 kfree(entry);
740 return NULL;
741 }
742
743 entry->offset = 0;
744 entry->len = 0;
745 entry->end = 0;
746 entry->users = 0;
747
748 return entry;
749}
750
751void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc)
752{
753 struct writequeue_entry *e;
754 int offset = 0;
755 int users = 0;
756 struct nodeinfo *ni;
757
758 if (!atomic_read(&accepting))
759 return NULL;
760
761 ni = nodeid2nodeinfo(nodeid, allocation);
762 if (!ni)
763 return NULL;
764
765 spin_lock(&ni->writequeue_lock);
766 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
767 if (((struct list_head *) e == &ni->writequeue) ||
768 (PAGE_CACHE_SIZE - e->end < len)) {
769 e = NULL;
770 } else {
771 offset = e->end;
772 e->end += len;
773 users = e->users++;
774 }
775 spin_unlock(&ni->writequeue_lock);
776
777 if (e) {
778 got_one:
779 if (users == 0)
780 kmap(e->page);
781 *ppc = page_address(e->page) + offset;
782 return e;
783 }
784
785 e = new_writequeue_entry(allocation);
786 if (e) {
787 spin_lock(&ni->writequeue_lock);
788 offset = e->end;
789 e->end += len;
790 e->ni = ni;
791 users = e->users++;
792 list_add_tail(&e->list, &ni->writequeue);
793 spin_unlock(&ni->writequeue_lock);
794 goto got_one;
795 }
796 return NULL;
797}
798
799void dlm_lowcomms_commit_buffer(void *arg)
800{
801 struct writequeue_entry *e = (struct writequeue_entry *) arg;
802 int users;
803 struct nodeinfo *ni = e->ni;
804
805 if (!atomic_read(&accepting))
806 return;
807
808 spin_lock(&ni->writequeue_lock);
809 users = --e->users;
810 if (users)
811 goto out;
812 e->len = e->end - e->offset;
813 kunmap(e->page);
814 spin_unlock(&ni->writequeue_lock);
815
816 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
817 spin_lock_bh(&write_nodes_lock);
818 list_add_tail(&ni->write_list, &write_nodes);
819 spin_unlock_bh(&write_nodes_lock);
820 wake_up_process(send_task);
821 }
822 return;
823
824 out:
825 spin_unlock(&ni->writequeue_lock);
826 return;
827}
828
829static void free_entry(struct writequeue_entry *e)
830{
831 __free_page(e->page);
832 kfree(e);
833}
834
835/* Initiate an SCTP association. In theory we could just use sendmsg() on
836 the first IP address and it should work, but this allows us to set up the
837 association before sending any valuable data that we can't afford to lose.
838 It also keeps the send path clean as it can now always use the association ID */
839static void initiate_association(int nodeid)
840{
841 struct sockaddr_storage rem_addr;
842 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
843 struct msghdr outmessage;
844 struct cmsghdr *cmsg;
845 struct sctp_sndrcvinfo *sinfo;
846 int ret;
847 int addrlen;
848 char buf[1];
849 struct kvec iov[1];
850 struct nodeinfo *ni;
851
852 log_print("Initiating association with node %d", nodeid);
853
854 ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
855 if (!ni)
856 return;
857
858 if (nodeid_to_addr(nodeid, (struct sockaddr *)&rem_addr)) {
859 log_print("no address for nodeid %d", nodeid);
860 return;
861 }
862
863 make_sockaddr(&rem_addr, dlm_config.tcp_port, &addrlen);
864
865 outmessage.msg_name = &rem_addr;
866 outmessage.msg_namelen = addrlen;
867 outmessage.msg_control = outcmsg;
868 outmessage.msg_controllen = sizeof(outcmsg);
869 outmessage.msg_flags = MSG_EOR;
870
871 iov[0].iov_base = buf;
872 iov[0].iov_len = 1;
873
874 /* Real INIT messages seem to cause trouble. Just send a 1 byte message
875 we can afford to lose */
876 cmsg = CMSG_FIRSTHDR(&outmessage);
877 cmsg->cmsg_level = IPPROTO_SCTP;
878 cmsg->cmsg_type = SCTP_SNDRCV;
879 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
880 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
881 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
882 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
883
884 outmessage.msg_controllen = cmsg->cmsg_len;
885 ret = kernel_sendmsg(sctp_con.sock, &outmessage, iov, 1, 1);
886 if (ret < 0) {
887 log_print("send INIT to node failed: %d", ret);
888 /* Try again later */
889 clear_bit(NI_INIT_PENDING, &ni->flags);
890 }
891}
892
893/* Send a message */
894static int send_to_sock(struct nodeinfo *ni)
895{
896 int ret = 0;
897 struct writequeue_entry *e;
898 int len, offset;
899 struct msghdr outmsg;
900 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
901 struct cmsghdr *cmsg;
902 struct sctp_sndrcvinfo *sinfo;
903 struct kvec iov;
904
905 /* See if we need to init an association before we start
906 sending precious messages */
907 spin_lock(&ni->lock);
908 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
909 spin_unlock(&ni->lock);
910 initiate_association(ni->nodeid);
911 return 0;
912 }
913 spin_unlock(&ni->lock);
914
915 outmsg.msg_name = NULL; /* We use assoc_id */
916 outmsg.msg_namelen = 0;
917 outmsg.msg_control = outcmsg;
918 outmsg.msg_controllen = sizeof(outcmsg);
919 outmsg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL | MSG_EOR;
920
921 cmsg = CMSG_FIRSTHDR(&outmsg);
922 cmsg->cmsg_level = IPPROTO_SCTP;
923 cmsg->cmsg_type = SCTP_SNDRCV;
924 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
925 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
926 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
927 sinfo->sinfo_ppid = cpu_to_le32(dlm_local_nodeid);
928 sinfo->sinfo_assoc_id = ni->assoc_id;
929 outmsg.msg_controllen = cmsg->cmsg_len;
930
931 spin_lock(&ni->writequeue_lock);
932 for (;;) {
933 if (list_empty(&ni->writequeue))
934 break;
935 e = list_entry(ni->writequeue.next, struct writequeue_entry,
936 list);
937 len = e->len;
938 offset = e->offset;
939 BUG_ON(len == 0 && e->users == 0);
940 spin_unlock(&ni->writequeue_lock);
941 kmap(e->page);
942
943 ret = 0;
944 if (len) {
945 iov.iov_base = page_address(e->page)+offset;
946 iov.iov_len = len;
947
948 ret = kernel_sendmsg(sctp_con.sock, &outmsg, &iov, 1,
949 len);
950 if (ret == -EAGAIN) {
951 sctp_con.eagain_flag = 1;
952 goto out;
953 } else if (ret < 0)
954 goto send_error;
955 } else {
956 /* Don't starve people filling buffers */
957 schedule();
958 }
959
960 spin_lock(&ni->writequeue_lock);
961 e->offset += ret;
962 e->len -= ret;
963
964 if (e->len == 0 && e->users == 0) {
965 list_del(&e->list);
966 free_entry(e);
967 continue;
968 }
969 }
970 spin_unlock(&ni->writequeue_lock);
971 out:
972 return ret;
973
974 send_error:
975 log_print("Error sending to node %d %d", ni->nodeid, ret);
976 spin_lock(&ni->lock);
977 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
978 ni->assoc_id = 0;
979 spin_unlock(&ni->lock);
980 initiate_association(ni->nodeid);
981 } else
982 spin_unlock(&ni->lock);
983
984 return ret;
985}
986
987/* Try to send any messages that are pending */
988static void process_output_queue(void)
989{
990 struct list_head *list;
991 struct list_head *temp;
992
993 spin_lock_bh(&write_nodes_lock);
994 list_for_each_safe(list, temp, &write_nodes) {
995 struct nodeinfo *ni =
996 list_entry(list, struct nodeinfo, write_list);
997 clear_bit(NI_WRITE_PENDING, &ni->flags);
998 list_del(&ni->write_list);
999
1000 spin_unlock_bh(&write_nodes_lock);
1001
1002 send_to_sock(ni);
1003 spin_lock_bh(&write_nodes_lock);
1004 }
1005 spin_unlock_bh(&write_nodes_lock);
1006}
1007
1008/* Called after we've had -EAGAIN and been woken up */
1009static void refill_write_queue(void)
1010{
1011 int i;
1012
1013 for (i=1; i<=max_nodeid; i++) {
1014 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1015
1016 if (ni) {
1017 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
1018 spin_lock_bh(&write_nodes_lock);
1019 list_add_tail(&ni->write_list, &write_nodes);
1020 spin_unlock_bh(&write_nodes_lock);
1021 }
1022 }
1023 }
1024}
1025
1026static void clean_one_writequeue(struct nodeinfo *ni)
1027{
1028 struct list_head *list;
1029 struct list_head *temp;
1030
1031 spin_lock(&ni->writequeue_lock);
1032 list_for_each_safe(list, temp, &ni->writequeue) {
1033 struct writequeue_entry *e =
1034 list_entry(list, struct writequeue_entry, list);
1035 list_del(&e->list);
1036 free_entry(e);
1037 }
1038 spin_unlock(&ni->writequeue_lock);
1039}
1040
1041static void clean_writequeues(void)
1042{
1043 int i;
1044
1045 for (i=1; i<=max_nodeid; i++) {
1046 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1047 if (ni)
1048 clean_one_writequeue(ni);
1049 }
1050}
1051
1052
1053static void dealloc_nodeinfo(void)
1054{
1055 int i;
1056
1057 for (i=1; i<=max_nodeid; i++) {
1058 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1059 if (ni) {
1060 idr_remove(&nodeinfo_idr, i);
1061 kfree(ni);
1062 }
1063 }
1064}
1065
1066int dlm_lowcomms_close(int nodeid)
1067{
1068 struct nodeinfo *ni;
1069
1070 ni = nodeid2nodeinfo(nodeid, 0);
1071 if (!ni)
1072 return -1;
1073
1074 spin_lock(&ni->lock);
1075 if (ni->assoc_id) {
1076 ni->assoc_id = 0;
1077 /* Don't send shutdown here, sctp will just queue it
1078 till the node comes back up! */
1079 }
1080 spin_unlock(&ni->lock);
1081
1082 clean_one_writequeue(ni);
1083 clear_bit(NI_INIT_PENDING, &ni->flags);
1084 return 0;
1085}
1086
1087static int write_list_empty(void)
1088{
1089 int status;
1090
1091 spin_lock_bh(&write_nodes_lock);
1092 status = list_empty(&write_nodes);
1093 spin_unlock_bh(&write_nodes_lock);
1094
1095 return status;
1096}
1097
1098static int dlm_recvd(void *data)
1099{
1100 DECLARE_WAITQUEUE(wait, current);
1101
1102 while (!kthread_should_stop()) {
1103 int count = 0;
1104
1105 set_current_state(TASK_INTERRUPTIBLE);
1106 add_wait_queue(&lowcomms_recv_wait, &wait);
1107 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1108 schedule();
1109 remove_wait_queue(&lowcomms_recv_wait, &wait);
1110 set_current_state(TASK_RUNNING);
1111
1112 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1113 int ret;
1114
1115 do {
1116 ret = receive_from_sock();
1117
1118 /* Don't starve out everyone else */
1119 if (++count >= MAX_RX_MSG_COUNT) {
1120 schedule();
1121 count = 0;
1122 }
1123 } while (!kthread_should_stop() && ret >=0);
1124 }
1125 schedule();
1126 }
1127
1128 return 0;
1129}
1130
1131static int dlm_sendd(void *data)
1132{
1133 DECLARE_WAITQUEUE(wait, current);
1134
1135 add_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
1136
1137 while (!kthread_should_stop()) {
1138 set_current_state(TASK_INTERRUPTIBLE);
1139 if (write_list_empty())
1140 schedule();
1141 set_current_state(TASK_RUNNING);
1142
1143 if (sctp_con.eagain_flag) {
1144 sctp_con.eagain_flag = 0;
1145 refill_write_queue();
1146 }
1147 process_output_queue();
1148 }
1149
1150 remove_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
1151
1152 return 0;
1153}
1154
1155static void daemons_stop(void)
1156{
1157 kthread_stop(recv_task);
1158 kthread_stop(send_task);
1159}
1160
1161static int daemons_start(void)
1162{
1163 struct task_struct *p;
1164 int error;
1165
1166 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1167 error = IS_ERR(p);
1168 if (error) {
1169 log_print("can't start dlm_recvd %d", error);
1170 return error;
1171 }
1172 recv_task = p;
1173
1174 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1175 error = IS_ERR(p);
1176 if (error) {
1177 log_print("can't start dlm_sendd %d", error);
1178 kthread_stop(recv_task);
1179 return error;
1180 }
1181 send_task = p;
1182
1183 return 0;
1184}
1185
1186/*
1187 * This is quite likely to sleep...
1188 */
1189int dlm_lowcomms_start(void)
1190{
1191 int error;
1192
1193 error = init_sock();
1194 if (error)
1195 goto fail_sock;
1196 error = daemons_start();
1197 if (error)
1198 goto fail_sock;
1199 atomic_set(&accepting, 1);
1200 return 0;
1201
1202 fail_sock:
1203 close_connection();
1204 return error;
1205}
1206
1207/* Set all the activity flags to prevent any socket activity. */
1208
1209void dlm_lowcomms_stop(void)
1210{
1211 atomic_set(&accepting, 0);
1212 sctp_con.flags = 0x7;
1213 daemons_stop();
1214 clean_writequeues();
1215 close_connection();
1216 dealloc_nodeinfo();
1217 max_nodeid = 0;
1218}
1219
1220int dlm_lowcomms_init(void)
1221{
1222 init_waitqueue_head(&lowcomms_recv_wait);
1223 spin_lock_init(&write_nodes_lock);
1224 INIT_LIST_HEAD(&write_nodes);
1225 init_rwsem(&nodeinfo_lock);
1226 return 0;
1227}
1228
1229void dlm_lowcomms_exit(void)
1230{
1231 int i;
1232
1233 for (i = 0; i < dlm_local_count; i++)
1234 kfree(dlm_local_addr[i]);
1235 dlm_local_count = 0;
1236 dlm_local_nodeid = 0;
1237}
1238
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
new file mode 100644
index 000000000000..6c04bb09cfa8
--- /dev/null
+++ b/fs/dlm/lowcomms.h
@@ -0,0 +1,26 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __LOWCOMMS_DOT_H__
15#define __LOWCOMMS_DOT_H__
16
17int dlm_lowcomms_init(void);
18void dlm_lowcomms_exit(void);
19int dlm_lowcomms_start(void);
20void dlm_lowcomms_stop(void);
21int dlm_lowcomms_close(int nodeid);
22void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc);
23void dlm_lowcomms_commit_buffer(void *mh);
24
25#endif /* __LOWCOMMS_DOT_H__ */
26
diff --git a/fs/dlm/lvb_table.h b/fs/dlm/lvb_table.h
new file mode 100644
index 000000000000..cc3e92f3feef
--- /dev/null
+++ b/fs/dlm/lvb_table.h
@@ -0,0 +1,18 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __LVB_TABLE_DOT_H__
14#define __LVB_TABLE_DOT_H__
15
16extern const int dlm_lvb_operations[8][8];
17
18#endif
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
new file mode 100644
index 000000000000..a8da8dc36b2e
--- /dev/null
+++ b/fs/dlm/main.c
@@ -0,0 +1,97 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "lock.h"
17#include "user.h"
18#include "memory.h"
19#include "lowcomms.h"
20#include "config.h"
21
22#ifdef CONFIG_DLM_DEBUG
23int dlm_register_debugfs(void);
24void dlm_unregister_debugfs(void);
25#else
26static inline int dlm_register_debugfs(void) { return 0; }
27static inline void dlm_unregister_debugfs(void) { }
28#endif
29
30static int __init init_dlm(void)
31{
32 int error;
33
34 error = dlm_memory_init();
35 if (error)
36 goto out;
37
38 error = dlm_lockspace_init();
39 if (error)
40 goto out_mem;
41
42 error = dlm_config_init();
43 if (error)
44 goto out_lockspace;
45
46 error = dlm_register_debugfs();
47 if (error)
48 goto out_config;
49
50 error = dlm_lowcomms_init();
51 if (error)
52 goto out_debug;
53
54 error = dlm_user_init();
55 if (error)
56 goto out_lowcomms;
57
58 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
59
60 return 0;
61
62 out_lowcomms:
63 dlm_lowcomms_exit();
64 out_debug:
65 dlm_unregister_debugfs();
66 out_config:
67 dlm_config_exit();
68 out_lockspace:
69 dlm_lockspace_exit();
70 out_mem:
71 dlm_memory_exit();
72 out:
73 return error;
74}
75
76static void __exit exit_dlm(void)
77{
78 dlm_user_exit();
79 dlm_lowcomms_exit();
80 dlm_config_exit();
81 dlm_memory_exit();
82 dlm_lockspace_exit();
83 dlm_unregister_debugfs();
84}
85
86module_init(init_dlm);
87module_exit(exit_dlm);
88
89MODULE_DESCRIPTION("Distributed Lock Manager");
90MODULE_AUTHOR("Red Hat, Inc.");
91MODULE_LICENSE("GPL");
92
93EXPORT_SYMBOL_GPL(dlm_new_lockspace);
94EXPORT_SYMBOL_GPL(dlm_release_lockspace);
95EXPORT_SYMBOL_GPL(dlm_lock);
96EXPORT_SYMBOL_GPL(dlm_unlock);
97
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
new file mode 100644
index 000000000000..a3f7de7f3a8f
--- /dev/null
+++ b/fs/dlm/member.c
@@ -0,0 +1,327 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "lockspace.h"
15#include "member.h"
16#include "recoverd.h"
17#include "recover.h"
18#include "rcom.h"
19#include "config.h"
20
21/*
22 * Following called by dlm_recoverd thread
23 */
24
25static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
26{
27 struct dlm_member *memb = NULL;
28 struct list_head *tmp;
29 struct list_head *newlist = &new->list;
30 struct list_head *head = &ls->ls_nodes;
31
32 list_for_each(tmp, head) {
33 memb = list_entry(tmp, struct dlm_member, list);
34 if (new->nodeid < memb->nodeid)
35 break;
36 }
37
38 if (!memb)
39 list_add_tail(newlist, head);
40 else {
41 /* FIXME: can use list macro here */
42 newlist->prev = tmp->prev;
43 newlist->next = tmp;
44 tmp->prev->next = newlist;
45 tmp->prev = newlist;
46 }
47}
48
49static int dlm_add_member(struct dlm_ls *ls, int nodeid)
50{
51 struct dlm_member *memb;
52 int w;
53
54 memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
55 if (!memb)
56 return -ENOMEM;
57
58 w = dlm_node_weight(ls->ls_name, nodeid);
59 if (w < 0)
60 return w;
61
62 memb->nodeid = nodeid;
63 memb->weight = w;
64 add_ordered_member(ls, memb);
65 ls->ls_num_nodes++;
66 return 0;
67}
68
69static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
70{
71 list_move(&memb->list, &ls->ls_nodes_gone);
72 ls->ls_num_nodes--;
73}
74
75static int dlm_is_member(struct dlm_ls *ls, int nodeid)
76{
77 struct dlm_member *memb;
78
79 list_for_each_entry(memb, &ls->ls_nodes, list) {
80 if (memb->nodeid == nodeid)
81 return 1;
82 }
83 return 0;
84}
85
86int dlm_is_removed(struct dlm_ls *ls, int nodeid)
87{
88 struct dlm_member *memb;
89
90 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
91 if (memb->nodeid == nodeid)
92 return 1;
93 }
94 return 0;
95}
96
97static void clear_memb_list(struct list_head *head)
98{
99 struct dlm_member *memb;
100
101 while (!list_empty(head)) {
102 memb = list_entry(head->next, struct dlm_member, list);
103 list_del(&memb->list);
104 kfree(memb);
105 }
106}
107
108void dlm_clear_members(struct dlm_ls *ls)
109{
110 clear_memb_list(&ls->ls_nodes);
111 ls->ls_num_nodes = 0;
112}
113
114void dlm_clear_members_gone(struct dlm_ls *ls)
115{
116 clear_memb_list(&ls->ls_nodes_gone);
117}
118
119static void make_member_array(struct dlm_ls *ls)
120{
121 struct dlm_member *memb;
122 int i, w, x = 0, total = 0, all_zero = 0, *array;
123
124 kfree(ls->ls_node_array);
125 ls->ls_node_array = NULL;
126
127 list_for_each_entry(memb, &ls->ls_nodes, list) {
128 if (memb->weight)
129 total += memb->weight;
130 }
131
132 /* all nodes revert to weight of 1 if all have weight 0 */
133
134 if (!total) {
135 total = ls->ls_num_nodes;
136 all_zero = 1;
137 }
138
139 ls->ls_total_weight = total;
140
141 array = kmalloc(sizeof(int) * total, GFP_KERNEL);
142 if (!array)
143 return;
144
145 list_for_each_entry(memb, &ls->ls_nodes, list) {
146 if (!all_zero && !memb->weight)
147 continue;
148
149 if (all_zero)
150 w = 1;
151 else
152 w = memb->weight;
153
154 DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
155
156 for (i = 0; i < w; i++)
157 array[x++] = memb->nodeid;
158 }
159
160 ls->ls_node_array = array;
161}
162
163/* send a status request to all members just to establish comms connections */
164
165static int ping_members(struct dlm_ls *ls)
166{
167 struct dlm_member *memb;
168 int error = 0;
169
170 list_for_each_entry(memb, &ls->ls_nodes, list) {
171 error = dlm_recovery_stopped(ls);
172 if (error)
173 break;
174 error = dlm_rcom_status(ls, memb->nodeid);
175 if (error)
176 break;
177 }
178 if (error)
179 log_debug(ls, "ping_members aborted %d last nodeid %d",
180 error, ls->ls_recover_nodeid);
181 return error;
182}
183
184int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
185{
186 struct dlm_member *memb, *safe;
187 int i, error, found, pos = 0, neg = 0, low = -1;
188
189 /* move departed members from ls_nodes to ls_nodes_gone */
190
191 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
192 found = 0;
193 for (i = 0; i < rv->node_count; i++) {
194 if (memb->nodeid == rv->nodeids[i]) {
195 found = 1;
196 break;
197 }
198 }
199
200 if (!found) {
201 neg++;
202 dlm_remove_member(ls, memb);
203 log_debug(ls, "remove member %d", memb->nodeid);
204 }
205 }
206
207 /* add new members to ls_nodes */
208
209 for (i = 0; i < rv->node_count; i++) {
210 if (dlm_is_member(ls, rv->nodeids[i]))
211 continue;
212 dlm_add_member(ls, rv->nodeids[i]);
213 pos++;
214 log_debug(ls, "add member %d", rv->nodeids[i]);
215 }
216
217 list_for_each_entry(memb, &ls->ls_nodes, list) {
218 if (low == -1 || memb->nodeid < low)
219 low = memb->nodeid;
220 }
221 ls->ls_low_nodeid = low;
222
223 make_member_array(ls);
224 dlm_set_recover_status(ls, DLM_RS_NODES);
225 *neg_out = neg;
226
227 error = ping_members(ls);
228 if (error)
229 goto out;
230
231 error = dlm_recover_members_wait(ls);
232 out:
233 log_debug(ls, "total members %d error %d", ls->ls_num_nodes, error);
234 return error;
235}
236
237/*
238 * Following called from lockspace.c
239 */
240
241int dlm_ls_stop(struct dlm_ls *ls)
242{
243 int new;
244
245 /*
246 * A stop cancels any recovery that's in progress (see RECOVERY_STOP,
247 * dlm_recovery_stopped()) and prevents any new locks from being
248 * processed (see RUNNING, dlm_locking_stopped()).
249 */
250
251 spin_lock(&ls->ls_recover_lock);
252 set_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
253 new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
254 ls->ls_recover_seq++;
255 spin_unlock(&ls->ls_recover_lock);
256
257 /*
258 * This in_recovery lock does two things:
259 *
260 * 1) Keeps this function from returning until all threads are out
261 * of locking routines and locking is truely stopped.
262 * 2) Keeps any new requests from being processed until it's unlocked
263 * when recovery is complete.
264 */
265
266 if (new)
267 down_write(&ls->ls_in_recovery);
268
269 /*
270 * The recoverd suspend/resume makes sure that dlm_recoverd (if
271 * running) has noticed the clearing of RUNNING above and quit
272 * processing the previous recovery. This will be true for all nodes
273 * before any nodes start the new recovery.
274 */
275
276 dlm_recoverd_suspend(ls);
277 ls->ls_recover_status = 0;
278 dlm_recoverd_resume(ls);
279 return 0;
280}
281
282int dlm_ls_start(struct dlm_ls *ls)
283{
284 struct dlm_recover *rv = NULL, *rv_old;
285 int *ids = NULL;
286 int error, count;
287
288 rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
289 if (!rv)
290 return -ENOMEM;
291
292 error = count = dlm_nodeid_list(ls->ls_name, &ids);
293 if (error <= 0)
294 goto fail;
295
296 spin_lock(&ls->ls_recover_lock);
297
298 /* the lockspace needs to be stopped before it can be started */
299
300 if (!dlm_locking_stopped(ls)) {
301 spin_unlock(&ls->ls_recover_lock);
302 log_error(ls, "start ignored: lockspace running");
303 error = -EINVAL;
304 goto fail;
305 }
306
307 rv->nodeids = ids;
308 rv->node_count = count;
309 rv->seq = ++ls->ls_recover_seq;
310 rv_old = ls->ls_recover_args;
311 ls->ls_recover_args = rv;
312 spin_unlock(&ls->ls_recover_lock);
313
314 if (rv_old) {
315 kfree(rv_old->nodeids);
316 kfree(rv_old);
317 }
318
319 dlm_recoverd_kick(ls);
320 return 0;
321
322 fail:
323 kfree(rv);
324 kfree(ids);
325 return error;
326}
327
diff --git a/fs/dlm/member.h b/fs/dlm/member.h
new file mode 100644
index 000000000000..927c08c19214
--- /dev/null
+++ b/fs/dlm/member.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __MEMBER_DOT_H__
14#define __MEMBER_DOT_H__
15
16int dlm_ls_stop(struct dlm_ls *ls);
17int dlm_ls_start(struct dlm_ls *ls);
18void dlm_clear_members(struct dlm_ls *ls);
19void dlm_clear_members_gone(struct dlm_ls *ls);
20int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
21int dlm_is_removed(struct dlm_ls *ls, int nodeid);
22
23#endif /* __MEMBER_DOT_H__ */
24
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
new file mode 100644
index 000000000000..989b608fd836
--- /dev/null
+++ b/fs/dlm/memory.c
@@ -0,0 +1,116 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "config.h"
16#include "memory.h"
17
18static kmem_cache_t *lkb_cache;
19
20
21int dlm_memory_init(void)
22{
23 int ret = 0;
24
25 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
26 __alignof__(struct dlm_lkb), 0, NULL, NULL);
27 if (!lkb_cache)
28 ret = -ENOMEM;
29 return ret;
30}
31
32void dlm_memory_exit(void)
33{
34 if (lkb_cache)
35 kmem_cache_destroy(lkb_cache);
36}
37
38char *allocate_lvb(struct dlm_ls *ls)
39{
40 char *p;
41
42 p = kmalloc(ls->ls_lvblen, GFP_KERNEL);
43 if (p)
44 memset(p, 0, ls->ls_lvblen);
45 return p;
46}
47
48void free_lvb(char *p)
49{
50 kfree(p);
51}
52
53/* FIXME: have some minimal space built-in to rsb for the name and
54 kmalloc a separate name if needed, like dentries are done */
55
56struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
57{
58 struct dlm_rsb *r;
59
60 DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
61
62 r = kmalloc(sizeof(*r) + namelen, GFP_KERNEL);
63 if (r)
64 memset(r, 0, sizeof(*r) + namelen);
65 return r;
66}
67
68void free_rsb(struct dlm_rsb *r)
69{
70 if (r->res_lvbptr)
71 free_lvb(r->res_lvbptr);
72 kfree(r);
73}
74
75struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
76{
77 struct dlm_lkb *lkb;
78
79 lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
80 if (lkb)
81 memset(lkb, 0, sizeof(*lkb));
82 return lkb;
83}
84
85void free_lkb(struct dlm_lkb *lkb)
86{
87 if (lkb->lkb_flags & DLM_IFL_USER) {
88 struct dlm_user_args *ua;
89 ua = (struct dlm_user_args *)lkb->lkb_astparam;
90 if (ua) {
91 if (ua->lksb.sb_lvbptr)
92 kfree(ua->lksb.sb_lvbptr);
93 kfree(ua);
94 }
95 }
96 kmem_cache_free(lkb_cache, lkb);
97}
98
99struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen)
100{
101 struct dlm_direntry *de;
102
103 DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,
104 printk("namelen = %d\n", namelen););
105
106 de = kmalloc(sizeof(*de) + namelen, GFP_KERNEL);
107 if (de)
108 memset(de, 0, sizeof(*de) + namelen);
109 return de;
110}
111
112void free_direntry(struct dlm_direntry *de)
113{
114 kfree(de);
115}
116
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
new file mode 100644
index 000000000000..6ead158ccc5c
--- /dev/null
+++ b/fs/dlm/memory.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __MEMORY_DOT_H__
15#define __MEMORY_DOT_H__
16
17int dlm_memory_init(void);
18void dlm_memory_exit(void);
19struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen);
20void free_rsb(struct dlm_rsb *r);
21struct dlm_lkb *allocate_lkb(struct dlm_ls *ls);
22void free_lkb(struct dlm_lkb *l);
23struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen);
24void free_direntry(struct dlm_direntry *de);
25char *allocate_lvb(struct dlm_ls *ls);
26void free_lvb(char *l);
27
28#endif /* __MEMORY_DOT_H__ */
29
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
new file mode 100644
index 000000000000..c9b1c3d535f4
--- /dev/null
+++ b/fs/dlm/midcomms.c
@@ -0,0 +1,140 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * midcomms.c
16 *
17 * This is the appallingly named "mid-level" comms layer.
18 *
19 * Its purpose is to take packets from the "real" comms layer,
20 * split them up into packets and pass them to the interested
21 * part of the locking mechanism.
22 *
23 * It also takes messages from the locking layer, formats them
24 * into packets and sends them to the comms layer.
25 */
26
27#include "dlm_internal.h"
28#include "lowcomms.h"
29#include "config.h"
30#include "rcom.h"
31#include "lock.h"
32#include "midcomms.h"
33
34
35static void copy_from_cb(void *dst, const void *base, unsigned offset,
36 unsigned len, unsigned limit)
37{
38 unsigned copy = len;
39
40 if ((copy + offset) > limit)
41 copy = limit - offset;
42 memcpy(dst, base + offset, copy);
43 len -= copy;
44 if (len)
45 memcpy(dst + copy, base, len);
46}
47
48/*
49 * Called from the low-level comms layer to process a buffer of
50 * commands.
51 *
52 * Only complete messages are processed here, any "spare" bytes from
53 * the end of a buffer are saved and tacked onto the front of the next
54 * message that comes in. I doubt this will happen very often but we
55 * need to be able to cope with it and I don't want the task to be waiting
56 * for packets to come in when there is useful work to be done.
57 */
58
59int dlm_process_incoming_buffer(int nodeid, const void *base,
60 unsigned offset, unsigned len, unsigned limit)
61{
62 unsigned char __tmp[DLM_INBUF_LEN];
63 struct dlm_header *msg = (struct dlm_header *) __tmp;
64 int ret = 0;
65 int err = 0;
66 uint16_t msglen;
67 uint32_t lockspace;
68
69 while (len > sizeof(struct dlm_header)) {
70
71 /* Copy just the header to check the total length. The
72 message may wrap around the end of the buffer back to the
73 start, so we need to use a temp buffer and copy_from_cb. */
74
75 copy_from_cb(msg, base, offset, sizeof(struct dlm_header),
76 limit);
77
78 msglen = le16_to_cpu(msg->h_length);
79 lockspace = msg->h_lockspace;
80
81 err = -EINVAL;
82 if (msglen < sizeof(struct dlm_header))
83 break;
84 err = -E2BIG;
85 if (msglen > dlm_config.buffer_size) {
86 log_print("message size %d from %d too big, buf len %d",
87 msglen, nodeid, len);
88 break;
89 }
90 err = 0;
91
92 /* If only part of the full message is contained in this
93 buffer, then do nothing and wait for lowcomms to call
94 us again later with more data. We return 0 meaning
95 we've consumed none of the input buffer. */
96
97 if (msglen > len)
98 break;
99
100 /* Allocate a larger temp buffer if the full message won't fit
101 in the buffer on the stack (which should work for most
102 ordinary messages). */
103
104 if (msglen > sizeof(__tmp) &&
105 msg == (struct dlm_header *) __tmp) {
106 msg = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
107 if (msg == NULL)
108 return ret;
109 }
110
111 copy_from_cb(msg, base, offset, msglen, limit);
112
113 BUG_ON(lockspace != msg->h_lockspace);
114
115 ret += msglen;
116 offset += msglen;
117 offset &= (limit - 1);
118 len -= msglen;
119
120 switch (msg->h_cmd) {
121 case DLM_MSG:
122 dlm_receive_message(msg, nodeid, 0);
123 break;
124
125 case DLM_RCOM:
126 dlm_receive_rcom(msg, nodeid);
127 break;
128
129 default:
130 log_print("unknown msg type %x from %u: %u %u %u %u",
131 msg->h_cmd, nodeid, msglen, len, offset, ret);
132 }
133 }
134
135 if (msg != (struct dlm_header *) __tmp)
136 kfree(msg);
137
138 return err ? err : ret;
139}
140
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
new file mode 100644
index 000000000000..95852a5f111d
--- /dev/null
+++ b/fs/dlm/midcomms.h
@@ -0,0 +1,21 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __MIDCOMMS_DOT_H__
15#define __MIDCOMMS_DOT_H__
16
17int dlm_process_incoming_buffer(int nodeid, const void *base, unsigned offset,
18 unsigned len, unsigned limit);
19
20#endif /* __MIDCOMMS_DOT_H__ */
21
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
new file mode 100644
index 000000000000..518239a8b1e9
--- /dev/null
+++ b/fs/dlm/rcom.c
@@ -0,0 +1,472 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "lowcomms.h"
18#include "midcomms.h"
19#include "rcom.h"
20#include "recover.h"
21#include "dir.h"
22#include "config.h"
23#include "memory.h"
24#include "lock.h"
25#include "util.h"
26
27
28static int rcom_response(struct dlm_ls *ls)
29{
30 return test_bit(LSFL_RCOM_READY, &ls->ls_flags);
31}
32
33static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
34 struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
35{
36 struct dlm_rcom *rc;
37 struct dlm_mhandle *mh;
38 char *mb;
39 int mb_len = sizeof(struct dlm_rcom) + len;
40
41 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
42 if (!mh) {
43 log_print("create_rcom to %d type %d len %d ENOBUFS",
44 to_nodeid, type, len);
45 return -ENOBUFS;
46 }
47 memset(mb, 0, mb_len);
48
49 rc = (struct dlm_rcom *) mb;
50
51 rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
52 rc->rc_header.h_lockspace = ls->ls_global_id;
53 rc->rc_header.h_nodeid = dlm_our_nodeid();
54 rc->rc_header.h_length = mb_len;
55 rc->rc_header.h_cmd = DLM_RCOM;
56
57 rc->rc_type = type;
58
59 *mh_ret = mh;
60 *rc_ret = rc;
61 return 0;
62}
63
64static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
65 struct dlm_rcom *rc)
66{
67 dlm_rcom_out(rc);
68 dlm_lowcomms_commit_buffer(mh);
69}
70
71/* When replying to a status request, a node also sends back its
72 configuration values. The requesting node then checks that the remote
73 node is configured the same way as itself. */
74
75static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
76{
77 rf->rf_lvblen = ls->ls_lvblen;
78 rf->rf_lsflags = ls->ls_exflags;
79}
80
81static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
82{
83 if (rf->rf_lvblen != ls->ls_lvblen ||
84 rf->rf_lsflags != ls->ls_exflags) {
85 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
86 ls->ls_lvblen, ls->ls_exflags,
87 nodeid, rf->rf_lvblen, rf->rf_lsflags);
88 return -EINVAL;
89 }
90 return 0;
91}
92
93int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
94{
95 struct dlm_rcom *rc;
96 struct dlm_mhandle *mh;
97 int error = 0;
98
99 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
100 ls->ls_recover_nodeid = nodeid;
101
102 if (nodeid == dlm_our_nodeid()) {
103 rc = (struct dlm_rcom *) ls->ls_recover_buf;
104 rc->rc_result = dlm_recover_status(ls);
105 goto out;
106 }
107
108 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
109 if (error)
110 goto out;
111 rc->rc_id = ++ls->ls_rcom_seq;
112
113 send_rcom(ls, mh, rc);
114
115 error = dlm_wait_function(ls, &rcom_response);
116 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
117 if (error)
118 goto out;
119
120 rc = (struct dlm_rcom *) ls->ls_recover_buf;
121
122 if (rc->rc_result == -ESRCH) {
123 /* we pretend the remote lockspace exists with 0 status */
124 log_debug(ls, "remote node %d not ready", nodeid);
125 rc->rc_result = 0;
126 } else
127 error = check_config(ls, (struct rcom_config *) rc->rc_buf,
128 nodeid);
129 /* the caller looks at rc_result for the remote recovery status */
130 out:
131 return error;
132}
133
134static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
135{
136 struct dlm_rcom *rc;
137 struct dlm_mhandle *mh;
138 int error, nodeid = rc_in->rc_header.h_nodeid;
139
140 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
141 sizeof(struct rcom_config), &rc, &mh);
142 if (error)
143 return;
144 rc->rc_id = rc_in->rc_id;
145 rc->rc_result = dlm_recover_status(ls);
146 make_config(ls, (struct rcom_config *) rc->rc_buf);
147
148 send_rcom(ls, mh, rc);
149}
150
151static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
152{
153 if (rc_in->rc_id != ls->ls_rcom_seq) {
154 log_debug(ls, "reject old reply %d got %llx wanted %llx",
155 rc_in->rc_type, rc_in->rc_id, ls->ls_rcom_seq);
156 return;
157 }
158 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
159 set_bit(LSFL_RCOM_READY, &ls->ls_flags);
160 wake_up(&ls->ls_wait_general);
161}
162
163static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
164{
165 receive_sync_reply(ls, rc_in);
166}
167
168int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
169{
170 struct dlm_rcom *rc;
171 struct dlm_mhandle *mh;
172 int error = 0, len = sizeof(struct dlm_rcom);
173
174 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
175 ls->ls_recover_nodeid = nodeid;
176
177 if (nodeid == dlm_our_nodeid()) {
178 dlm_copy_master_names(ls, last_name, last_len,
179 ls->ls_recover_buf + len,
180 dlm_config.buffer_size - len, nodeid);
181 goto out;
182 }
183
184 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
185 if (error)
186 goto out;
187 memcpy(rc->rc_buf, last_name, last_len);
188 rc->rc_id = ++ls->ls_rcom_seq;
189
190 send_rcom(ls, mh, rc);
191
192 error = dlm_wait_function(ls, &rcom_response);
193 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
194 out:
195 return error;
196}
197
198static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
199{
200 struct dlm_rcom *rc;
201 struct dlm_mhandle *mh;
202 int error, inlen, outlen;
203 int nodeid = rc_in->rc_header.h_nodeid;
204 uint32_t status = dlm_recover_status(ls);
205
206 /*
207 * We can't run dlm_dir_rebuild_send (which uses ls_nodes) while
208 * dlm_recoverd is running ls_nodes_reconfig (which changes ls_nodes).
209 * It could only happen in rare cases where we get a late NAMES
210 * message from a previous instance of recovery.
211 */
212
213 if (!(status & DLM_RS_NODES)) {
214 log_debug(ls, "ignoring RCOM_NAMES from %u", nodeid);
215 return;
216 }
217
218 nodeid = rc_in->rc_header.h_nodeid;
219 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
220 outlen = dlm_config.buffer_size - sizeof(struct dlm_rcom);
221
222 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
223 if (error)
224 return;
225 rc->rc_id = rc_in->rc_id;
226
227 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
228 nodeid);
229 send_rcom(ls, mh, rc);
230}
231
232static void receive_rcom_names_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
233{
234 receive_sync_reply(ls, rc_in);
235}
236
237int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
238{
239 struct dlm_rcom *rc;
240 struct dlm_mhandle *mh;
241 struct dlm_ls *ls = r->res_ls;
242 int error;
243
244 error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length,
245 &rc, &mh);
246 if (error)
247 goto out;
248 memcpy(rc->rc_buf, r->res_name, r->res_length);
249 rc->rc_id = (unsigned long) r;
250
251 send_rcom(ls, mh, rc);
252 out:
253 return error;
254}
255
256static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
257{
258 struct dlm_rcom *rc;
259 struct dlm_mhandle *mh;
260 int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
261 int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
262
263 error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
264 if (error)
265 return;
266
267 error = dlm_dir_lookup(ls, nodeid, rc_in->rc_buf, len, &ret_nodeid);
268 if (error)
269 ret_nodeid = error;
270 rc->rc_result = ret_nodeid;
271 rc->rc_id = rc_in->rc_id;
272
273 send_rcom(ls, mh, rc);
274}
275
276static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
277{
278 dlm_recover_master_reply(ls, rc_in);
279}
280
281static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
282 struct rcom_lock *rl)
283{
284 memset(rl, 0, sizeof(*rl));
285
286 rl->rl_ownpid = lkb->lkb_ownpid;
287 rl->rl_lkid = lkb->lkb_id;
288 rl->rl_exflags = lkb->lkb_exflags;
289 rl->rl_flags = lkb->lkb_flags;
290 rl->rl_lvbseq = lkb->lkb_lvbseq;
291 rl->rl_rqmode = lkb->lkb_rqmode;
292 rl->rl_grmode = lkb->lkb_grmode;
293 rl->rl_status = lkb->lkb_status;
294 rl->rl_wait_type = lkb->lkb_wait_type;
295
296 if (lkb->lkb_bastaddr)
297 rl->rl_asts |= AST_BAST;
298 if (lkb->lkb_astaddr)
299 rl->rl_asts |= AST_COMP;
300
301 rl->rl_namelen = r->res_length;
302 memcpy(rl->rl_name, r->res_name, r->res_length);
303
304 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ?
305 If so, receive_rcom_lock_args() won't take this copy. */
306
307 if (lkb->lkb_lvbptr)
308 memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
309}
310
311int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
312{
313 struct dlm_ls *ls = r->res_ls;
314 struct dlm_rcom *rc;
315 struct dlm_mhandle *mh;
316 struct rcom_lock *rl;
317 int error, len = sizeof(struct rcom_lock);
318
319 if (lkb->lkb_lvbptr)
320 len += ls->ls_lvblen;
321
322 error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh);
323 if (error)
324 goto out;
325
326 rl = (struct rcom_lock *) rc->rc_buf;
327 pack_rcom_lock(r, lkb, rl);
328 rc->rc_id = (unsigned long) r;
329
330 send_rcom(ls, mh, rc);
331 out:
332 return error;
333}
334
335static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
336{
337 struct dlm_rcom *rc;
338 struct dlm_mhandle *mh;
339 int error, nodeid = rc_in->rc_header.h_nodeid;
340
341 dlm_recover_master_copy(ls, rc_in);
342
343 error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY,
344 sizeof(struct rcom_lock), &rc, &mh);
345 if (error)
346 return;
347
348 /* We send back the same rcom_lock struct we received, but
349 dlm_recover_master_copy() has filled in rl_remid and rl_result */
350
351 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
352 rc->rc_id = rc_in->rc_id;
353
354 send_rcom(ls, mh, rc);
355}
356
357static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
358{
359 uint32_t status = dlm_recover_status(ls);
360
361 if (!(status & DLM_RS_DIR)) {
362 log_debug(ls, "ignoring RCOM_LOCK_REPLY from %u",
363 rc_in->rc_header.h_nodeid);
364 return;
365 }
366
367 dlm_recover_process_copy(ls, rc_in);
368}
369
370static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
371{
372 struct dlm_rcom *rc;
373 struct dlm_mhandle *mh;
374 char *mb;
375 int mb_len = sizeof(struct dlm_rcom);
376
377 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
378 if (!mh)
379 return -ENOBUFS;
380 memset(mb, 0, mb_len);
381
382 rc = (struct dlm_rcom *) mb;
383
384 rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
385 rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace;
386 rc->rc_header.h_nodeid = dlm_our_nodeid();
387 rc->rc_header.h_length = mb_len;
388 rc->rc_header.h_cmd = DLM_RCOM;
389
390 rc->rc_type = DLM_RCOM_STATUS_REPLY;
391 rc->rc_id = rc_in->rc_id;
392 rc->rc_result = -ESRCH;
393
394 dlm_rcom_out(rc);
395 dlm_lowcomms_commit_buffer(mh);
396
397 return 0;
398}
399
400/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
401 recovery-only comms are sent through here. */
402
403void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
404{
405 struct dlm_rcom *rc = (struct dlm_rcom *) hd;
406 struct dlm_ls *ls;
407
408 dlm_rcom_in(rc);
409
410 /* If the lockspace doesn't exist then still send a status message
411 back; it's possible that it just doesn't have its global_id yet. */
412
413 ls = dlm_find_lockspace_global(hd->h_lockspace);
414 if (!ls) {
415 log_print("lockspace %x from %d not found",
416 hd->h_lockspace, nodeid);
417 send_ls_not_ready(nodeid, rc);
418 return;
419 }
420
421 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
422 log_error(ls, "ignoring recovery message %x from %d",
423 rc->rc_type, nodeid);
424 goto out;
425 }
426
427 if (nodeid != rc->rc_header.h_nodeid) {
428 log_error(ls, "bad rcom nodeid %d from %d",
429 rc->rc_header.h_nodeid, nodeid);
430 goto out;
431 }
432
433 switch (rc->rc_type) {
434 case DLM_RCOM_STATUS:
435 receive_rcom_status(ls, rc);
436 break;
437
438 case DLM_RCOM_NAMES:
439 receive_rcom_names(ls, rc);
440 break;
441
442 case DLM_RCOM_LOOKUP:
443 receive_rcom_lookup(ls, rc);
444 break;
445
446 case DLM_RCOM_LOCK:
447 receive_rcom_lock(ls, rc);
448 break;
449
450 case DLM_RCOM_STATUS_REPLY:
451 receive_rcom_status_reply(ls, rc);
452 break;
453
454 case DLM_RCOM_NAMES_REPLY:
455 receive_rcom_names_reply(ls, rc);
456 break;
457
458 case DLM_RCOM_LOOKUP_REPLY:
459 receive_rcom_lookup_reply(ls, rc);
460 break;
461
462 case DLM_RCOM_LOCK_REPLY:
463 receive_rcom_lock_reply(ls, rc);
464 break;
465
466 default:
467 DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
468 }
469 out:
470 dlm_put_lockspace(ls);
471}
472
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
new file mode 100644
index 000000000000..d7984321ff41
--- /dev/null
+++ b/fs/dlm/rcom.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RCOM_DOT_H__
15#define __RCOM_DOT_H__
16
17int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
18int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
19int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
20int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
21void dlm_receive_rcom(struct dlm_header *hd, int nodeid);
22
23#endif
24
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
new file mode 100644
index 000000000000..a5e6d184872e
--- /dev/null
+++ b/fs/dlm/recover.c
@@ -0,0 +1,765 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "dir.h"
17#include "config.h"
18#include "ast.h"
19#include "memory.h"
20#include "rcom.h"
21#include "lock.h"
22#include "lowcomms.h"
23#include "member.h"
24#include "recover.h"
25
26
27/*
28 * Recovery waiting routines: these functions wait for a particular reply from
29 * a remote node, or for the remote node to report a certain status. They need
30 * to abort if the lockspace is stopped indicating a node has failed (perhaps
31 * the one being waited for).
32 */
33
34/*
35 * Wait until given function returns non-zero or lockspace is stopped
36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
37 * function thinks it could have completed the waited-on task, they should wake
38 * up ls_wait_general to get an immediate response rather than waiting for the
39 * timer to detect the result. A timer wakes us up periodically while waiting
40 * to see if we should abort due to a node failure. This should only be called
41 * by the dlm_recoverd thread.
42 */
43
44static void dlm_wait_timer_fn(unsigned long data)
45{
46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ));
48 wake_up(&ls->ls_wait_general);
49}
50
51int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
52{
53 int error = 0;
54
55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ);
59 add_timer(&ls->ls_timer);
60
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
62 del_timer_sync(&ls->ls_timer);
63
64 if (dlm_recovery_stopped(ls)) {
65 log_debug(ls, "dlm_wait_function aborted");
66 error = -EINTR;
67 }
68 return error;
69}
70
71/*
72 * An efficient way for all nodes to wait for all others to have a certain
73 * status. The node with the lowest nodeid polls all the others for their
74 * status (wait_status_all) and all the others poll the node with the low id
75 * for its accumulated result (wait_status_low). When all nodes have set
76 * status flag X, then status flag X_ALL will be set on the low nodeid.
77 */
78
79uint32_t dlm_recover_status(struct dlm_ls *ls)
80{
81 uint32_t status;
82 spin_lock(&ls->ls_recover_lock);
83 status = ls->ls_recover_status;
84 spin_unlock(&ls->ls_recover_lock);
85 return status;
86}
87
88void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89{
90 spin_lock(&ls->ls_recover_lock);
91 ls->ls_recover_status |= status;
92 spin_unlock(&ls->ls_recover_lock);
93}
94
95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
96{
97 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
98 struct dlm_member *memb;
99 int error = 0, delay;
100
101 list_for_each_entry(memb, &ls->ls_nodes, list) {
102 delay = 0;
103 for (;;) {
104 if (dlm_recovery_stopped(ls)) {
105 error = -EINTR;
106 goto out;
107 }
108
109 error = dlm_rcom_status(ls, memb->nodeid);
110 if (error)
111 goto out;
112
113 if (rc->rc_result & wait_status)
114 break;
115 if (delay < 1000)
116 delay += 20;
117 msleep(delay);
118 }
119 }
120 out:
121 return error;
122}
123
124static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
125{
126 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
128
129 for (;;) {
130 if (dlm_recovery_stopped(ls)) {
131 error = -EINTR;
132 goto out;
133 }
134
135 error = dlm_rcom_status(ls, nodeid);
136 if (error)
137 break;
138
139 if (rc->rc_result & wait_status)
140 break;
141 if (delay < 1000)
142 delay += 20;
143 msleep(delay);
144 }
145 out:
146 return error;
147}
148
149static int wait_status(struct dlm_ls *ls, uint32_t status)
150{
151 uint32_t status_all = status << 1;
152 int error;
153
154 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
155 error = wait_status_all(ls, status);
156 if (!error)
157 dlm_set_recover_status(ls, status_all);
158 } else
159 error = wait_status_low(ls, status_all);
160
161 return error;
162}
163
164int dlm_recover_members_wait(struct dlm_ls *ls)
165{
166 return wait_status(ls, DLM_RS_NODES);
167}
168
169int dlm_recover_directory_wait(struct dlm_ls *ls)
170{
171 return wait_status(ls, DLM_RS_DIR);
172}
173
174int dlm_recover_locks_wait(struct dlm_ls *ls)
175{
176 return wait_status(ls, DLM_RS_LOCKS);
177}
178
179int dlm_recover_done_wait(struct dlm_ls *ls)
180{
181 return wait_status(ls, DLM_RS_DONE);
182}
183
184/*
185 * The recover_list contains all the rsb's for which we've requested the new
186 * master nodeid. As replies are returned from the resource directories the
187 * rsb's are removed from the list. When the list is empty we're done.
188 *
189 * The recover_list is later similarly used for all rsb's for which we've sent
190 * new lkb's and need to receive new corresponding lkid's.
191 *
192 * We use the address of the rsb struct as a simple local identifier for the
193 * rsb so we can match an rcom reply with the rsb it was sent for.
194 */
195
196static int recover_list_empty(struct dlm_ls *ls)
197{
198 int empty;
199
200 spin_lock(&ls->ls_recover_list_lock);
201 empty = list_empty(&ls->ls_recover_list);
202 spin_unlock(&ls->ls_recover_list_lock);
203
204 return empty;
205}
206
207static void recover_list_add(struct dlm_rsb *r)
208{
209 struct dlm_ls *ls = r->res_ls;
210
211 spin_lock(&ls->ls_recover_list_lock);
212 if (list_empty(&r->res_recover_list)) {
213 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
214 ls->ls_recover_list_count++;
215 dlm_hold_rsb(r);
216 }
217 spin_unlock(&ls->ls_recover_list_lock);
218}
219
220static void recover_list_del(struct dlm_rsb *r)
221{
222 struct dlm_ls *ls = r->res_ls;
223
224 spin_lock(&ls->ls_recover_list_lock);
225 list_del_init(&r->res_recover_list);
226 ls->ls_recover_list_count--;
227 spin_unlock(&ls->ls_recover_list_lock);
228
229 dlm_put_rsb(r);
230}
231
232static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
233{
234 struct dlm_rsb *r = NULL;
235
236 spin_lock(&ls->ls_recover_list_lock);
237
238 list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
239 if (id == (unsigned long) r)
240 goto out;
241 }
242 r = NULL;
243 out:
244 spin_unlock(&ls->ls_recover_list_lock);
245 return r;
246}
247
248static void recover_list_clear(struct dlm_ls *ls)
249{
250 struct dlm_rsb *r, *s;
251
252 spin_lock(&ls->ls_recover_list_lock);
253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
254 list_del_init(&r->res_recover_list);
255 dlm_put_rsb(r);
256 ls->ls_recover_list_count--;
257 }
258
259 if (ls->ls_recover_list_count != 0) {
260 log_error(ls, "warning: recover_list_count %d",
261 ls->ls_recover_list_count);
262 ls->ls_recover_list_count = 0;
263 }
264 spin_unlock(&ls->ls_recover_list_lock);
265}
266
267
268/* Master recovery: find new master node for rsb's that were
269 mastered on nodes that have been removed.
270
271 dlm_recover_masters
272 recover_master
273 dlm_send_rcom_lookup -> receive_rcom_lookup
274 dlm_dir_lookup
275 receive_rcom_lookup_reply <-
276 dlm_recover_master_reply
277 set_new_master
278 set_master_lkbs
279 set_lock_master
280*/
281
282/*
283 * Set the lock master for all LKBs in a lock queue
284 * If we are the new master of the rsb, we may have received new
285 * MSTCPY locks from other nodes already which we need to ignore
286 * when setting the new nodeid.
287 */
288
289static void set_lock_master(struct list_head *queue, int nodeid)
290{
291 struct dlm_lkb *lkb;
292
293 list_for_each_entry(lkb, queue, lkb_statequeue)
294 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY))
295 lkb->lkb_nodeid = nodeid;
296}
297
298static void set_master_lkbs(struct dlm_rsb *r)
299{
300 set_lock_master(&r->res_grantqueue, r->res_nodeid);
301 set_lock_master(&r->res_convertqueue, r->res_nodeid);
302 set_lock_master(&r->res_waitqueue, r->res_nodeid);
303}
304
305/*
306 * Propogate the new master nodeid to locks
307 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
308 * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which
309 * rsb's to consider.
310 */
311
312static void set_new_master(struct dlm_rsb *r, int nodeid)
313{
314 lock_rsb(r);
315 r->res_nodeid = nodeid;
316 set_master_lkbs(r);
317 rsb_set_flag(r, RSB_NEW_MASTER);
318 rsb_set_flag(r, RSB_NEW_MASTER2);
319 unlock_rsb(r);
320}
321
322/*
323 * We do async lookups on rsb's that need new masters. The rsb's
324 * waiting for a lookup reply are kept on the recover_list.
325 */
326
327static int recover_master(struct dlm_rsb *r)
328{
329 struct dlm_ls *ls = r->res_ls;
330 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
331
332 dir_nodeid = dlm_dir_nodeid(r);
333
334 if (dir_nodeid == our_nodeid) {
335 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
336 r->res_length, &ret_nodeid);
337 if (error)
338 log_error(ls, "recover dir lookup error %d", error);
339
340 if (ret_nodeid == our_nodeid)
341 ret_nodeid = 0;
342 set_new_master(r, ret_nodeid);
343 } else {
344 recover_list_add(r);
345 error = dlm_send_rcom_lookup(r, dir_nodeid);
346 }
347
348 return error;
349}
350
351/*
352 * When not using a directory, most resource names will hash to a new static
353 * master nodeid and the resource will need to be remastered.
354 */
355
356static int recover_master_static(struct dlm_rsb *r)
357{
358 int master = dlm_dir_nodeid(r);
359
360 if (master == dlm_our_nodeid())
361 master = 0;
362
363 if (r->res_nodeid != master) {
364 if (is_master(r))
365 dlm_purge_mstcpy_locks(r);
366 set_new_master(r, master);
367 return 1;
368 }
369 return 0;
370}
371
372/*
373 * Go through local root resources and for each rsb which has a master which
374 * has departed, get the new master nodeid from the directory. The dir will
375 * assign mastery to the first node to look up the new master. That means
376 * we'll discover in this lookup if we're the new master of any rsb's.
377 *
378 * We fire off all the dir lookup requests individually and asynchronously to
379 * the correct dir node.
380 */
381
382int dlm_recover_masters(struct dlm_ls *ls)
383{
384 struct dlm_rsb *r;
385 int error = 0, count = 0;
386
387 log_debug(ls, "dlm_recover_masters");
388
389 down_read(&ls->ls_root_sem);
390 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
391 if (dlm_recovery_stopped(ls)) {
392 up_read(&ls->ls_root_sem);
393 error = -EINTR;
394 goto out;
395 }
396
397 if (dlm_no_directory(ls))
398 count += recover_master_static(r);
399 else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) {
400 recover_master(r);
401 count++;
402 }
403
404 schedule();
405 }
406 up_read(&ls->ls_root_sem);
407
408 log_debug(ls, "dlm_recover_masters %d resources", count);
409
410 error = dlm_wait_function(ls, &recover_list_empty);
411 out:
412 if (error)
413 recover_list_clear(ls);
414 return error;
415}
416
417int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
418{
419 struct dlm_rsb *r;
420 int nodeid;
421
422 r = recover_list_find(ls, rc->rc_id);
423 if (!r) {
424 log_error(ls, "dlm_recover_master_reply no id %llx",
425 (unsigned long long)rc->rc_id);
426 goto out;
427 }
428
429 nodeid = rc->rc_result;
430 if (nodeid == dlm_our_nodeid())
431 nodeid = 0;
432
433 set_new_master(r, nodeid);
434 recover_list_del(r);
435
436 if (recover_list_empty(ls))
437 wake_up(&ls->ls_wait_general);
438 out:
439 return 0;
440}
441
442
443/* Lock recovery: rebuild the process-copy locks we hold on a
444 remastered rsb on the new rsb master.
445
446 dlm_recover_locks
447 recover_locks
448 recover_locks_queue
449 dlm_send_rcom_lock -> receive_rcom_lock
450 dlm_recover_master_copy
451 receive_rcom_lock_reply <-
452 dlm_recover_process_copy
453*/
454
455
456/*
457 * keep a count of the number of lkb's we send to the new master; when we get
458 * an equal number of replies then recovery for the rsb is done
459 */
460
461static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
462{
463 struct dlm_lkb *lkb;
464 int error = 0;
465
466 list_for_each_entry(lkb, head, lkb_statequeue) {
467 error = dlm_send_rcom_lock(r, lkb);
468 if (error)
469 break;
470 r->res_recover_locks_count++;
471 }
472
473 return error;
474}
475
476static int recover_locks(struct dlm_rsb *r)
477{
478 int error = 0;
479
480 lock_rsb(r);
481
482 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
483
484 error = recover_locks_queue(r, &r->res_grantqueue);
485 if (error)
486 goto out;
487 error = recover_locks_queue(r, &r->res_convertqueue);
488 if (error)
489 goto out;
490 error = recover_locks_queue(r, &r->res_waitqueue);
491 if (error)
492 goto out;
493
494 if (r->res_recover_locks_count)
495 recover_list_add(r);
496 else
497 rsb_clear_flag(r, RSB_NEW_MASTER);
498 out:
499 unlock_rsb(r);
500 return error;
501}
502
503int dlm_recover_locks(struct dlm_ls *ls)
504{
505 struct dlm_rsb *r;
506 int error, count = 0;
507
508 log_debug(ls, "dlm_recover_locks");
509
510 down_read(&ls->ls_root_sem);
511 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
512 if (is_master(r)) {
513 rsb_clear_flag(r, RSB_NEW_MASTER);
514 continue;
515 }
516
517 if (!rsb_flag(r, RSB_NEW_MASTER))
518 continue;
519
520 if (dlm_recovery_stopped(ls)) {
521 error = -EINTR;
522 up_read(&ls->ls_root_sem);
523 goto out;
524 }
525
526 error = recover_locks(r);
527 if (error) {
528 up_read(&ls->ls_root_sem);
529 goto out;
530 }
531
532 count += r->res_recover_locks_count;
533 }
534 up_read(&ls->ls_root_sem);
535
536 log_debug(ls, "dlm_recover_locks %d locks", count);
537
538 error = dlm_wait_function(ls, &recover_list_empty);
539 out:
540 if (error)
541 recover_list_clear(ls);
542 else
543 dlm_set_recover_status(ls, DLM_RS_LOCKS);
544 return error;
545}
546
547void dlm_recovered_lock(struct dlm_rsb *r)
548{
549 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
550
551 r->res_recover_locks_count--;
552 if (!r->res_recover_locks_count) {
553 rsb_clear_flag(r, RSB_NEW_MASTER);
554 recover_list_del(r);
555 }
556
557 if (recover_list_empty(r->res_ls))
558 wake_up(&r->res_ls->ls_wait_general);
559}
560
561/*
562 * The lvb needs to be recovered on all master rsb's. This includes setting
563 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
564 * based on the lvb's of the locks held on the rsb.
565 *
566 * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
567 * was already set prior to recovery, it's not cleared, regardless of locks.
568 *
569 * The LVB contents are only considered for changing when this is a new master
570 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
571 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
572 * from the lkb with the largest lvb sequence number.
573 */
574
575static void recover_lvb(struct dlm_rsb *r)
576{
577 struct dlm_lkb *lkb, *high_lkb = NULL;
578 uint32_t high_seq = 0;
579 int lock_lvb_exists = 0;
580 int big_lock_exists = 0;
581 int lvblen = r->res_ls->ls_lvblen;
582
583 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
584 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
585 continue;
586
587 lock_lvb_exists = 1;
588
589 if (lkb->lkb_grmode > DLM_LOCK_CR) {
590 big_lock_exists = 1;
591 goto setflag;
592 }
593
594 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
595 high_lkb = lkb;
596 high_seq = lkb->lkb_lvbseq;
597 }
598 }
599
600 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
601 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
602 continue;
603
604 lock_lvb_exists = 1;
605
606 if (lkb->lkb_grmode > DLM_LOCK_CR) {
607 big_lock_exists = 1;
608 goto setflag;
609 }
610
611 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
612 high_lkb = lkb;
613 high_seq = lkb->lkb_lvbseq;
614 }
615 }
616
617 setflag:
618 if (!lock_lvb_exists)
619 goto out;
620
621 if (!big_lock_exists)
622 rsb_set_flag(r, RSB_VALNOTVALID);
623
624 /* don't mess with the lvb unless we're the new master */
625 if (!rsb_flag(r, RSB_NEW_MASTER2))
626 goto out;
627
628 if (!r->res_lvbptr) {
629 r->res_lvbptr = allocate_lvb(r->res_ls);
630 if (!r->res_lvbptr)
631 goto out;
632 }
633
634 if (big_lock_exists) {
635 r->res_lvbseq = lkb->lkb_lvbseq;
636 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
637 } else if (high_lkb) {
638 r->res_lvbseq = high_lkb->lkb_lvbseq;
639 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
640 } else {
641 r->res_lvbseq = 0;
642 memset(r->res_lvbptr, 0, lvblen);
643 }
644 out:
645 return;
646}
647
648/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
649 converting PR->CW or CW->PR need to have their lkb_grmode set. */
650
651static void recover_conversion(struct dlm_rsb *r)
652{
653 struct dlm_lkb *lkb;
654 int grmode = -1;
655
656 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
657 if (lkb->lkb_grmode == DLM_LOCK_PR ||
658 lkb->lkb_grmode == DLM_LOCK_CW) {
659 grmode = lkb->lkb_grmode;
660 break;
661 }
662 }
663
664 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
665 if (lkb->lkb_grmode != DLM_LOCK_IV)
666 continue;
667 if (grmode == -1)
668 lkb->lkb_grmode = lkb->lkb_rqmode;
669 else
670 lkb->lkb_grmode = grmode;
671 }
672}
673
674/* We've become the new master for this rsb and waiting/converting locks may
675 need to be granted in dlm_grant_after_purge() due to locks that may have
676 existed from a removed node. */
677
678static void set_locks_purged(struct dlm_rsb *r)
679{
680 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
681 rsb_set_flag(r, RSB_LOCKS_PURGED);
682}
683
684void dlm_recover_rsbs(struct dlm_ls *ls)
685{
686 struct dlm_rsb *r;
687 int count = 0;
688
689 log_debug(ls, "dlm_recover_rsbs");
690
691 down_read(&ls->ls_root_sem);
692 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
693 lock_rsb(r);
694 if (is_master(r)) {
695 if (rsb_flag(r, RSB_RECOVER_CONVERT))
696 recover_conversion(r);
697 if (rsb_flag(r, RSB_NEW_MASTER2))
698 set_locks_purged(r);
699 recover_lvb(r);
700 count++;
701 }
702 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
703 rsb_clear_flag(r, RSB_NEW_MASTER2);
704 unlock_rsb(r);
705 }
706 up_read(&ls->ls_root_sem);
707
708 log_debug(ls, "dlm_recover_rsbs %d rsbs", count);
709}
710
711/* Create a single list of all root rsb's to be used during recovery */
712
713int dlm_create_root_list(struct dlm_ls *ls)
714{
715 struct dlm_rsb *r;
716 int i, error = 0;
717
718 down_write(&ls->ls_root_sem);
719 if (!list_empty(&ls->ls_root_list)) {
720 log_error(ls, "root list not empty");
721 error = -EINVAL;
722 goto out;
723 }
724
725 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
726 read_lock(&ls->ls_rsbtbl[i].lock);
727 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
728 list_add(&r->res_root_list, &ls->ls_root_list);
729 dlm_hold_rsb(r);
730 }
731 read_unlock(&ls->ls_rsbtbl[i].lock);
732 }
733 out:
734 up_write(&ls->ls_root_sem);
735 return error;
736}
737
738void dlm_release_root_list(struct dlm_ls *ls)
739{
740 struct dlm_rsb *r, *safe;
741
742 down_write(&ls->ls_root_sem);
743 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
744 list_del_init(&r->res_root_list);
745 dlm_put_rsb(r);
746 }
747 up_write(&ls->ls_root_sem);
748}
749
750void dlm_clear_toss_list(struct dlm_ls *ls)
751{
752 struct dlm_rsb *r, *safe;
753 int i;
754
755 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
756 write_lock(&ls->ls_rsbtbl[i].lock);
757 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
758 res_hashchain) {
759 list_del(&r->res_hashchain);
760 free_rsb(r);
761 }
762 write_unlock(&ls->ls_rsbtbl[i].lock);
763 }
764}
765
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
new file mode 100644
index 000000000000..ebd0363f1e08
--- /dev/null
+++ b/fs/dlm/recover.h
@@ -0,0 +1,34 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RECOVER_DOT_H__
15#define __RECOVER_DOT_H__
16
17int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls));
18uint32_t dlm_recover_status(struct dlm_ls *ls);
19void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status);
20int dlm_recover_members_wait(struct dlm_ls *ls);
21int dlm_recover_directory_wait(struct dlm_ls *ls);
22int dlm_recover_locks_wait(struct dlm_ls *ls);
23int dlm_recover_done_wait(struct dlm_ls *ls);
24int dlm_recover_masters(struct dlm_ls *ls);
25int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc);
26int dlm_recover_locks(struct dlm_ls *ls);
27void dlm_recovered_lock(struct dlm_rsb *r);
28int dlm_create_root_list(struct dlm_ls *ls);
29void dlm_release_root_list(struct dlm_ls *ls);
30void dlm_clear_toss_list(struct dlm_ls *ls);
31void dlm_recover_rsbs(struct dlm_ls *ls);
32
33#endif /* __RECOVER_DOT_H__ */
34
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
new file mode 100644
index 000000000000..362e3eff4dc9
--- /dev/null
+++ b/fs/dlm/recoverd.c
@@ -0,0 +1,290 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "dir.h"
18#include "ast.h"
19#include "recover.h"
20#include "lowcomms.h"
21#include "lock.h"
22#include "requestqueue.h"
23#include "recoverd.h"
24
25
26/* If the start for which we're re-enabling locking (seq) has been superseded
27 by a newer stop (ls_recover_seq), we need to leave locking disabled. */
28
29static int enable_locking(struct dlm_ls *ls, uint64_t seq)
30{
31 int error = -EINTR;
32
33 spin_lock(&ls->ls_recover_lock);
34 if (ls->ls_recover_seq == seq) {
35 set_bit(LSFL_RUNNING, &ls->ls_flags);
36 up_write(&ls->ls_in_recovery);
37 error = 0;
38 }
39 spin_unlock(&ls->ls_recover_lock);
40 return error;
41}
42
43static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
44{
45 unsigned long start;
46 int error, neg = 0;
47
48 log_debug(ls, "recover %llx", rv->seq);
49
50 mutex_lock(&ls->ls_recoverd_active);
51
52 /*
53 * Suspending and resuming dlm_astd ensures that no lkb's from this ls
54 * will be processed by dlm_astd during recovery.
55 */
56
57 dlm_astd_suspend();
58 dlm_astd_resume();
59
60 /*
61 * This list of root rsb's will be the basis of most of the recovery
62 * routines.
63 */
64
65 dlm_create_root_list(ls);
66
67 /*
68 * Free all the tossed rsb's so we don't have to recover them.
69 */
70
71 dlm_clear_toss_list(ls);
72
73 /*
74 * Add or remove nodes from the lockspace's ls_nodes list.
75 * Also waits for all nodes to complete dlm_recover_members.
76 */
77
78 error = dlm_recover_members(ls, rv, &neg);
79 if (error) {
80 log_error(ls, "recover_members failed %d", error);
81 goto fail;
82 }
83 start = jiffies;
84
85 /*
86 * Rebuild our own share of the directory by collecting from all other
87 * nodes their master rsb names that hash to us.
88 */
89
90 error = dlm_recover_directory(ls);
91 if (error) {
92 log_error(ls, "recover_directory failed %d", error);
93 goto fail;
94 }
95
96 /*
97 * Purge directory-related requests that are saved in requestqueue.
98 * All dir requests from before recovery are invalid now due to the dir
99 * rebuild and will be resent by the requesting nodes.
100 */
101
102 dlm_purge_requestqueue(ls);
103
104 /*
105 * Wait for all nodes to complete directory rebuild.
106 */
107
108 error = dlm_recover_directory_wait(ls);
109 if (error) {
110 log_error(ls, "recover_directory_wait failed %d", error);
111 goto fail;
112 }
113
114 /*
115 * We may have outstanding operations that are waiting for a reply from
116 * a failed node. Mark these to be resent after recovery. Unlock and
117 * cancel ops can just be completed.
118 */
119
120 dlm_recover_waiters_pre(ls);
121
122 error = dlm_recovery_stopped(ls);
123 if (error)
124 goto fail;
125
126 if (neg || dlm_no_directory(ls)) {
127 /*
128 * Clear lkb's for departed nodes.
129 */
130
131 dlm_purge_locks(ls);
132
133 /*
134 * Get new master nodeid's for rsb's that were mastered on
135 * departed nodes.
136 */
137
138 error = dlm_recover_masters(ls);
139 if (error) {
140 log_error(ls, "recover_masters failed %d", error);
141 goto fail;
142 }
143
144 /*
145 * Send our locks on remastered rsb's to the new masters.
146 */
147
148 error = dlm_recover_locks(ls);
149 if (error) {
150 log_error(ls, "recover_locks failed %d", error);
151 goto fail;
152 }
153
154 error = dlm_recover_locks_wait(ls);
155 if (error) {
156 log_error(ls, "recover_locks_wait failed %d", error);
157 goto fail;
158 }
159
160 /*
161 * Finalize state in master rsb's now that all locks can be
162 * checked. This includes conversion resolution and lvb
163 * settings.
164 */
165
166 dlm_recover_rsbs(ls);
167 }
168
169 dlm_release_root_list(ls);
170
171 dlm_set_recover_status(ls, DLM_RS_DONE);
172 error = dlm_recover_done_wait(ls);
173 if (error) {
174 log_error(ls, "recover_done_wait failed %d", error);
175 goto fail;
176 }
177
178 dlm_clear_members_gone(ls);
179
180 error = enable_locking(ls, rv->seq);
181 if (error) {
182 log_error(ls, "enable_locking failed %d", error);
183 goto fail;
184 }
185
186 error = dlm_process_requestqueue(ls);
187 if (error) {
188 log_error(ls, "process_requestqueue failed %d", error);
189 goto fail;
190 }
191
192 error = dlm_recover_waiters_post(ls);
193 if (error) {
194 log_error(ls, "recover_waiters_post failed %d", error);
195 goto fail;
196 }
197
198 dlm_grant_after_purge(ls);
199
200 dlm_astd_wake();
201
202 log_debug(ls, "recover %llx done: %u ms", rv->seq,
203 jiffies_to_msecs(jiffies - start));
204 mutex_unlock(&ls->ls_recoverd_active);
205
206 return 0;
207
208 fail:
209 dlm_release_root_list(ls);
210 log_debug(ls, "recover %llx error %d", rv->seq, error);
211 mutex_unlock(&ls->ls_recoverd_active);
212 return error;
213}
214
215static void do_ls_recovery(struct dlm_ls *ls)
216{
217 struct dlm_recover *rv = NULL;
218
219 spin_lock(&ls->ls_recover_lock);
220 rv = ls->ls_recover_args;
221 ls->ls_recover_args = NULL;
222 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
223 spin_unlock(&ls->ls_recover_lock);
224
225 if (rv) {
226 ls_recover(ls, rv);
227 kfree(rv->nodeids);
228 kfree(rv);
229 }
230}
231
232static int dlm_recoverd(void *arg)
233{
234 struct dlm_ls *ls;
235
236 ls = dlm_find_lockspace_local(arg);
237 if (!ls) {
238 log_print("dlm_recoverd: no lockspace %p", arg);
239 return -1;
240 }
241
242 while (!kthread_should_stop()) {
243 set_current_state(TASK_INTERRUPTIBLE);
244 if (!test_bit(LSFL_WORK, &ls->ls_flags))
245 schedule();
246 set_current_state(TASK_RUNNING);
247
248 if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
249 do_ls_recovery(ls);
250 }
251
252 dlm_put_lockspace(ls);
253 return 0;
254}
255
256void dlm_recoverd_kick(struct dlm_ls *ls)
257{
258 set_bit(LSFL_WORK, &ls->ls_flags);
259 wake_up_process(ls->ls_recoverd_task);
260}
261
262int dlm_recoverd_start(struct dlm_ls *ls)
263{
264 struct task_struct *p;
265 int error = 0;
266
267 p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
268 if (IS_ERR(p))
269 error = PTR_ERR(p);
270 else
271 ls->ls_recoverd_task = p;
272 return error;
273}
274
275void dlm_recoverd_stop(struct dlm_ls *ls)
276{
277 kthread_stop(ls->ls_recoverd_task);
278}
279
280void dlm_recoverd_suspend(struct dlm_ls *ls)
281{
282 wake_up(&ls->ls_wait_general);
283 mutex_lock(&ls->ls_recoverd_active);
284}
285
286void dlm_recoverd_resume(struct dlm_ls *ls)
287{
288 mutex_unlock(&ls->ls_recoverd_active);
289}
290
diff --git a/fs/dlm/recoverd.h b/fs/dlm/recoverd.h
new file mode 100644
index 000000000000..866657c5d69d
--- /dev/null
+++ b/fs/dlm/recoverd.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RECOVERD_DOT_H__
15#define __RECOVERD_DOT_H__
16
17void dlm_recoverd_kick(struct dlm_ls *ls);
18void dlm_recoverd_stop(struct dlm_ls *ls);
19int dlm_recoverd_start(struct dlm_ls *ls);
20void dlm_recoverd_suspend(struct dlm_ls *ls);
21void dlm_recoverd_resume(struct dlm_ls *ls);
22
23#endif /* __RECOVERD_DOT_H__ */
24
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
new file mode 100644
index 000000000000..7b2b089634a2
--- /dev/null
+++ b/fs/dlm/requestqueue.c
@@ -0,0 +1,184 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "member.h"
15#include "lock.h"
16#include "dir.h"
17#include "config.h"
18#include "requestqueue.h"
19
20struct rq_entry {
21 struct list_head list;
22 int nodeid;
23 char request[1];
24};
25
26/*
27 * Requests received while the lockspace is in recovery get added to the
28 * request queue and processed when recovery is complete. This happens when
29 * the lockspace is suspended on some nodes before it is on others, or the
30 * lockspace is enabled on some while still suspended on others.
31 */
32
33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
34{
35 struct rq_entry *e;
36 int length = hd->h_length;
37
38 if (dlm_is_removed(ls, nodeid))
39 return;
40
41 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
42 if (!e) {
43 log_print("dlm_add_requestqueue: out of memory\n");
44 return;
45 }
46
47 e->nodeid = nodeid;
48 memcpy(e->request, hd, length);
49
50 mutex_lock(&ls->ls_requestqueue_mutex);
51 list_add_tail(&e->list, &ls->ls_requestqueue);
52 mutex_unlock(&ls->ls_requestqueue_mutex);
53}
54
55int dlm_process_requestqueue(struct dlm_ls *ls)
56{
57 struct rq_entry *e;
58 struct dlm_header *hd;
59 int error = 0;
60
61 mutex_lock(&ls->ls_requestqueue_mutex);
62
63 for (;;) {
64 if (list_empty(&ls->ls_requestqueue)) {
65 mutex_unlock(&ls->ls_requestqueue_mutex);
66 error = 0;
67 break;
68 }
69 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
70 mutex_unlock(&ls->ls_requestqueue_mutex);
71
72 hd = (struct dlm_header *) e->request;
73 error = dlm_receive_message(hd, e->nodeid, 1);
74
75 if (error == -EINTR) {
76 /* entry is left on requestqueue */
77 log_debug(ls, "process_requestqueue abort eintr");
78 break;
79 }
80
81 mutex_lock(&ls->ls_requestqueue_mutex);
82 list_del(&e->list);
83 kfree(e);
84
85 if (dlm_locking_stopped(ls)) {
86 log_debug(ls, "process_requestqueue abort running");
87 mutex_unlock(&ls->ls_requestqueue_mutex);
88 error = -EINTR;
89 break;
90 }
91 schedule();
92 }
93
94 return error;
95}
96
97/*
98 * After recovery is done, locking is resumed and dlm_recoverd takes all the
99 * saved requests and processes them as they would have been by dlm_recvd. At
100 * the same time, dlm_recvd will start receiving new requests from remote
101 * nodes. We want to delay dlm_recvd processing new requests until
102 * dlm_recoverd has finished processing the old saved requests.
103 */
104
105void dlm_wait_requestqueue(struct dlm_ls *ls)
106{
107 for (;;) {
108 mutex_lock(&ls->ls_requestqueue_mutex);
109 if (list_empty(&ls->ls_requestqueue))
110 break;
111 if (dlm_locking_stopped(ls))
112 break;
113 mutex_unlock(&ls->ls_requestqueue_mutex);
114 schedule();
115 }
116 mutex_unlock(&ls->ls_requestqueue_mutex);
117}
118
119static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
120{
121 uint32_t type = ms->m_type;
122
123 if (dlm_is_removed(ls, nodeid))
124 return 1;
125
126 /* directory operations are always purged because the directory is
127 always rebuilt during recovery and the lookups resent */
128
129 if (type == DLM_MSG_REMOVE ||
130 type == DLM_MSG_LOOKUP ||
131 type == DLM_MSG_LOOKUP_REPLY)
132 return 1;
133
134 if (!dlm_no_directory(ls))
135 return 0;
136
137 /* with no directory, the master is likely to change as a part of
138 recovery; requests to/from the defunct master need to be purged */
139
140 switch (type) {
141 case DLM_MSG_REQUEST:
142 case DLM_MSG_CONVERT:
143 case DLM_MSG_UNLOCK:
144 case DLM_MSG_CANCEL:
145 /* we're no longer the master of this resource, the sender
146 will resend to the new master (see waiter_needs_recovery) */
147
148 if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid())
149 return 1;
150 break;
151
152 case DLM_MSG_REQUEST_REPLY:
153 case DLM_MSG_CONVERT_REPLY:
154 case DLM_MSG_UNLOCK_REPLY:
155 case DLM_MSG_CANCEL_REPLY:
156 case DLM_MSG_GRANT:
157 /* this reply is from the former master of the resource,
158 we'll resend to the new master if needed */
159
160 if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid)
161 return 1;
162 break;
163 }
164
165 return 0;
166}
167
168void dlm_purge_requestqueue(struct dlm_ls *ls)
169{
170 struct dlm_message *ms;
171 struct rq_entry *e, *safe;
172
173 mutex_lock(&ls->ls_requestqueue_mutex);
174 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
175 ms = (struct dlm_message *) e->request;
176
177 if (purge_request(ls, ms, e->nodeid)) {
178 list_del(&e->list);
179 kfree(e);
180 }
181 }
182 mutex_unlock(&ls->ls_requestqueue_mutex);
183}
184
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
new file mode 100644
index 000000000000..349f0d292d95
--- /dev/null
+++ b/fs/dlm/requestqueue.h
@@ -0,0 +1,22 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __REQUESTQUEUE_DOT_H__
14#define __REQUESTQUEUE_DOT_H__
15
16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
17int dlm_process_requestqueue(struct dlm_ls *ls);
18void dlm_wait_requestqueue(struct dlm_ls *ls);
19void dlm_purge_requestqueue(struct dlm_ls *ls);
20
21#endif
22
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
new file mode 100644
index 000000000000..c37e93e4f2df
--- /dev/null
+++ b/fs/dlm/user.c
@@ -0,0 +1,788 @@
1/*
2 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/init.h>
11#include <linux/wait.h>
12#include <linux/module.h>
13#include <linux/file.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/dlm.h>
19#include <linux/dlm_device.h>
20
21#include "dlm_internal.h"
22#include "lockspace.h"
23#include "lock.h"
24#include "lvb_table.h"
25
26static const char *name_prefix="dlm";
27static struct miscdevice ctl_device;
28static struct file_operations device_fops;
29
30#ifdef CONFIG_COMPAT
31
32struct dlm_lock_params32 {
33 __u8 mode;
34 __u8 namelen;
35 __u16 flags;
36 __u32 lkid;
37 __u32 parent;
38
39 __u32 castparam;
40 __u32 castaddr;
41 __u32 bastparam;
42 __u32 bastaddr;
43 __u32 lksb;
44
45 char lvb[DLM_USER_LVB_LEN];
46 char name[0];
47};
48
49struct dlm_write_request32 {
50 __u32 version[3];
51 __u8 cmd;
52 __u8 is64bit;
53 __u8 unused[2];
54
55 union {
56 struct dlm_lock_params32 lock;
57 struct dlm_lspace_params lspace;
58 } i;
59};
60
61struct dlm_lksb32 {
62 __u32 sb_status;
63 __u32 sb_lkid;
64 __u8 sb_flags;
65 __u32 sb_lvbptr;
66};
67
68struct dlm_lock_result32 {
69 __u32 length;
70 __u32 user_astaddr;
71 __u32 user_astparam;
72 __u32 user_lksb;
73 struct dlm_lksb32 lksb;
74 __u8 bast_mode;
75 __u8 unused[3];
76 /* Offsets may be zero if no data is present */
77 __u32 lvb_offset;
78};
79
80static void compat_input(struct dlm_write_request *kb,
81 struct dlm_write_request32 *kb32)
82{
83 kb->version[0] = kb32->version[0];
84 kb->version[1] = kb32->version[1];
85 kb->version[2] = kb32->version[2];
86
87 kb->cmd = kb32->cmd;
88 kb->is64bit = kb32->is64bit;
89 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
90 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
91 kb->i.lspace.flags = kb32->i.lspace.flags;
92 kb->i.lspace.minor = kb32->i.lspace.minor;
93 strcpy(kb->i.lspace.name, kb32->i.lspace.name);
94 } else {
95 kb->i.lock.mode = kb32->i.lock.mode;
96 kb->i.lock.namelen = kb32->i.lock.namelen;
97 kb->i.lock.flags = kb32->i.lock.flags;
98 kb->i.lock.lkid = kb32->i.lock.lkid;
99 kb->i.lock.parent = kb32->i.lock.parent;
100 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
101 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
102 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
103 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
104 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
105 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
106 memcpy(kb->i.lock.name, kb32->i.lock.name, kb->i.lock.namelen);
107 }
108}
109
110static void compat_output(struct dlm_lock_result *res,
111 struct dlm_lock_result32 *res32)
112{
113 res32->length = res->length - (sizeof(struct dlm_lock_result) -
114 sizeof(struct dlm_lock_result32));
115 res32->user_astaddr = (__u32)(long)res->user_astaddr;
116 res32->user_astparam = (__u32)(long)res->user_astparam;
117 res32->user_lksb = (__u32)(long)res->user_lksb;
118 res32->bast_mode = res->bast_mode;
119
120 res32->lvb_offset = res->lvb_offset;
121 res32->length = res->length;
122
123 res32->lksb.sb_status = res->lksb.sb_status;
124 res32->lksb.sb_flags = res->lksb.sb_flags;
125 res32->lksb.sb_lkid = res->lksb.sb_lkid;
126 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
127}
128#endif
129
130
131void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
132{
133 struct dlm_ls *ls;
134 struct dlm_user_args *ua;
135 struct dlm_user_proc *proc;
136 int remove_ownqueue = 0;
137
138 /* dlm_clear_proc_locks() sets ORPHAN/DEAD flag on each
139 lkb before dealing with it. We need to check this
140 flag before taking ls_clear_proc_locks mutex because if
141 it's set, dlm_clear_proc_locks() holds the mutex. */
142
143 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
144 /* log_print("user_add_ast skip1 %x", lkb->lkb_flags); */
145 return;
146 }
147
148 ls = lkb->lkb_resource->res_ls;
149 mutex_lock(&ls->ls_clear_proc_locks);
150
151 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
152 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
153 lkb->ua so we can't try to use it. */
154
155 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
156 /* log_print("user_add_ast skip2 %x", lkb->lkb_flags); */
157 goto out;
158 }
159
160 DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb););
161 ua = (struct dlm_user_args *)lkb->lkb_astparam;
162 proc = ua->proc;
163
164 if (type == AST_BAST && ua->bastaddr == NULL)
165 goto out;
166
167 spin_lock(&proc->asts_spin);
168 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
169 kref_get(&lkb->lkb_ref);
170 list_add_tail(&lkb->lkb_astqueue, &proc->asts);
171 lkb->lkb_ast_type |= type;
172 wake_up_interruptible(&proc->wait);
173 }
174
175 /* noqueue requests that fail may need to be removed from the
176 proc's locks list, there should be a better way of detecting
177 this situation than checking all these things... */
178
179 if (type == AST_COMP && lkb->lkb_grmode == DLM_LOCK_IV &&
180 ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
181 remove_ownqueue = 1;
182
183 /* We want to copy the lvb to userspace when the completion
184 ast is read if the status is 0, the lock has an lvb and
185 lvb_ops says we should. We could probably have set_lvb_lock()
186 set update_user_lvb instead and not need old_mode */
187
188 if ((lkb->lkb_ast_type & AST_COMP) &&
189 (lkb->lkb_lksb->sb_status == 0) &&
190 lkb->lkb_lksb->sb_lvbptr &&
191 dlm_lvb_operations[ua->old_mode + 1][lkb->lkb_grmode + 1])
192 ua->update_user_lvb = 1;
193 else
194 ua->update_user_lvb = 0;
195
196 spin_unlock(&proc->asts_spin);
197
198 if (remove_ownqueue) {
199 spin_lock(&ua->proc->locks_spin);
200 list_del_init(&lkb->lkb_ownqueue);
201 spin_unlock(&ua->proc->locks_spin);
202 dlm_put_lkb(lkb);
203 }
204 out:
205 mutex_unlock(&ls->ls_clear_proc_locks);
206}
207
208static int device_user_lock(struct dlm_user_proc *proc,
209 struct dlm_lock_params *params)
210{
211 struct dlm_ls *ls;
212 struct dlm_user_args *ua;
213 int error = -ENOMEM;
214
215 ls = dlm_find_lockspace_local(proc->lockspace);
216 if (!ls)
217 return -ENOENT;
218
219 if (!params->castaddr || !params->lksb) {
220 error = -EINVAL;
221 goto out;
222 }
223
224 ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
225 if (!ua)
226 goto out;
227 ua->proc = proc;
228 ua->user_lksb = params->lksb;
229 ua->castparam = params->castparam;
230 ua->castaddr = params->castaddr;
231 ua->bastparam = params->bastparam;
232 ua->bastaddr = params->bastaddr;
233
234 if (params->flags & DLM_LKF_CONVERT)
235 error = dlm_user_convert(ls, ua,
236 params->mode, params->flags,
237 params->lkid, params->lvb);
238 else {
239 error = dlm_user_request(ls, ua,
240 params->mode, params->flags,
241 params->name, params->namelen,
242 params->parent);
243 if (!error)
244 error = ua->lksb.sb_lkid;
245 }
246 out:
247 dlm_put_lockspace(ls);
248 return error;
249}
250
251static int device_user_unlock(struct dlm_user_proc *proc,
252 struct dlm_lock_params *params)
253{
254 struct dlm_ls *ls;
255 struct dlm_user_args *ua;
256 int error = -ENOMEM;
257
258 ls = dlm_find_lockspace_local(proc->lockspace);
259 if (!ls)
260 return -ENOENT;
261
262 ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
263 if (!ua)
264 goto out;
265 ua->proc = proc;
266 ua->user_lksb = params->lksb;
267 ua->castparam = params->castparam;
268 ua->castaddr = params->castaddr;
269
270 if (params->flags & DLM_LKF_CANCEL)
271 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
272 else
273 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
274 params->lvb);
275 out:
276 dlm_put_lockspace(ls);
277 return error;
278}
279
280static int device_create_lockspace(struct dlm_lspace_params *params)
281{
282 dlm_lockspace_t *lockspace;
283 struct dlm_ls *ls;
284 int error, len;
285
286 if (!capable(CAP_SYS_ADMIN))
287 return -EPERM;
288
289 error = dlm_new_lockspace(params->name, strlen(params->name),
290 &lockspace, 0, DLM_USER_LVB_LEN);
291 if (error)
292 return error;
293
294 ls = dlm_find_lockspace_local(lockspace);
295 if (!ls)
296 return -ENOENT;
297
298 error = -ENOMEM;
299 len = strlen(params->name) + strlen(name_prefix) + 2;
300 ls->ls_device.name = kzalloc(len, GFP_KERNEL);
301 if (!ls->ls_device.name)
302 goto fail;
303 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
304 params->name);
305 ls->ls_device.fops = &device_fops;
306 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
307
308 error = misc_register(&ls->ls_device);
309 if (error) {
310 kfree(ls->ls_device.name);
311 goto fail;
312 }
313
314 error = ls->ls_device.minor;
315 dlm_put_lockspace(ls);
316 return error;
317
318 fail:
319 dlm_put_lockspace(ls);
320 dlm_release_lockspace(lockspace, 0);
321 return error;
322}
323
324static int device_remove_lockspace(struct dlm_lspace_params *params)
325{
326 dlm_lockspace_t *lockspace;
327 struct dlm_ls *ls;
328 int error, force = 0;
329
330 if (!capable(CAP_SYS_ADMIN))
331 return -EPERM;
332
333 ls = dlm_find_lockspace_device(params->minor);
334 if (!ls)
335 return -ENOENT;
336
337 error = misc_deregister(&ls->ls_device);
338 if (error) {
339 dlm_put_lockspace(ls);
340 goto out;
341 }
342 kfree(ls->ls_device.name);
343
344 if (params->flags & DLM_USER_LSFLG_FORCEFREE)
345 force = 2;
346
347 lockspace = ls->ls_local_handle;
348
349 /* dlm_release_lockspace waits for references to go to zero,
350 so all processes will need to close their device for the ls
351 before the release will procede */
352
353 dlm_put_lockspace(ls);
354 error = dlm_release_lockspace(lockspace, force);
355 out:
356 return error;
357}
358
359/* Check the user's version matches ours */
360static int check_version(struct dlm_write_request *req)
361{
362 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
363 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
364 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
365
366 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
367 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
368 current->comm,
369 current->pid,
370 req->version[0],
371 req->version[1],
372 req->version[2],
373 DLM_DEVICE_VERSION_MAJOR,
374 DLM_DEVICE_VERSION_MINOR,
375 DLM_DEVICE_VERSION_PATCH);
376 return -EINVAL;
377 }
378 return 0;
379}
380
381/*
382 * device_write
383 *
384 * device_user_lock
385 * dlm_user_request -> request_lock
386 * dlm_user_convert -> convert_lock
387 *
388 * device_user_unlock
389 * dlm_user_unlock -> unlock_lock
390 * dlm_user_cancel -> cancel_lock
391 *
392 * device_create_lockspace
393 * dlm_new_lockspace
394 *
395 * device_remove_lockspace
396 * dlm_release_lockspace
397 */
398
399/* a write to a lockspace device is a lock or unlock request, a write
400 to the control device is to create/remove a lockspace */
401
402static ssize_t device_write(struct file *file, const char __user *buf,
403 size_t count, loff_t *ppos)
404{
405 struct dlm_user_proc *proc = file->private_data;
406 struct dlm_write_request *kbuf;
407 sigset_t tmpsig, allsigs;
408 int error;
409
410#ifdef CONFIG_COMPAT
411 if (count < sizeof(struct dlm_write_request32))
412#else
413 if (count < sizeof(struct dlm_write_request))
414#endif
415 return -EINVAL;
416
417 kbuf = kmalloc(count, GFP_KERNEL);
418 if (!kbuf)
419 return -ENOMEM;
420
421 if (copy_from_user(kbuf, buf, count)) {
422 error = -EFAULT;
423 goto out_free;
424 }
425
426 if (check_version(kbuf)) {
427 error = -EBADE;
428 goto out_free;
429 }
430
431#ifdef CONFIG_COMPAT
432 if (!kbuf->is64bit) {
433 struct dlm_write_request32 *k32buf;
434 k32buf = (struct dlm_write_request32 *)kbuf;
435 kbuf = kmalloc(count + (sizeof(struct dlm_write_request) -
436 sizeof(struct dlm_write_request32)), GFP_KERNEL);
437 if (!kbuf)
438 return -ENOMEM;
439
440 if (proc)
441 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
442 compat_input(kbuf, k32buf);
443 kfree(k32buf);
444 }
445#endif
446
447 /* do we really need this? can a write happen after a close? */
448 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
449 test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
450 return -EINVAL;
451
452 sigfillset(&allsigs);
453 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
454
455 error = -EINVAL;
456
457 switch (kbuf->cmd)
458 {
459 case DLM_USER_LOCK:
460 if (!proc) {
461 log_print("no locking on control device");
462 goto out_sig;
463 }
464 error = device_user_lock(proc, &kbuf->i.lock);
465 break;
466
467 case DLM_USER_UNLOCK:
468 if (!proc) {
469 log_print("no locking on control device");
470 goto out_sig;
471 }
472 error = device_user_unlock(proc, &kbuf->i.lock);
473 break;
474
475 case DLM_USER_CREATE_LOCKSPACE:
476 if (proc) {
477 log_print("create/remove only on control device");
478 goto out_sig;
479 }
480 error = device_create_lockspace(&kbuf->i.lspace);
481 break;
482
483 case DLM_USER_REMOVE_LOCKSPACE:
484 if (proc) {
485 log_print("create/remove only on control device");
486 goto out_sig;
487 }
488 error = device_remove_lockspace(&kbuf->i.lspace);
489 break;
490
491 default:
492 log_print("Unknown command passed to DLM device : %d\n",
493 kbuf->cmd);
494 }
495
496 out_sig:
497 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
498 recalc_sigpending();
499 out_free:
500 kfree(kbuf);
501 return error;
502}
503
504/* Every process that opens the lockspace device has its own "proc" structure
505 hanging off the open file that's used to keep track of locks owned by the
506 process and asts that need to be delivered to the process. */
507
508static int device_open(struct inode *inode, struct file *file)
509{
510 struct dlm_user_proc *proc;
511 struct dlm_ls *ls;
512
513 ls = dlm_find_lockspace_device(iminor(inode));
514 if (!ls)
515 return -ENOENT;
516
517 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
518 if (!proc) {
519 dlm_put_lockspace(ls);
520 return -ENOMEM;
521 }
522
523 proc->lockspace = ls->ls_local_handle;
524 INIT_LIST_HEAD(&proc->asts);
525 INIT_LIST_HEAD(&proc->locks);
526 spin_lock_init(&proc->asts_spin);
527 spin_lock_init(&proc->locks_spin);
528 init_waitqueue_head(&proc->wait);
529 file->private_data = proc;
530
531 return 0;
532}
533
534static int device_close(struct inode *inode, struct file *file)
535{
536 struct dlm_user_proc *proc = file->private_data;
537 struct dlm_ls *ls;
538 sigset_t tmpsig, allsigs;
539
540 ls = dlm_find_lockspace_local(proc->lockspace);
541 if (!ls)
542 return -ENOENT;
543
544 sigfillset(&allsigs);
545 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
546
547 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
548
549 dlm_clear_proc_locks(ls, proc);
550
551 /* at this point no more lkb's should exist for this lockspace,
552 so there's no chance of dlm_user_add_ast() being called and
553 looking for lkb->ua->proc */
554
555 kfree(proc);
556 file->private_data = NULL;
557
558 dlm_put_lockspace(ls);
559 dlm_put_lockspace(ls); /* for the find in device_open() */
560
561 /* FIXME: AUTOFREE: if this ls is no longer used do
562 device_remove_lockspace() */
563
564 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
565 recalc_sigpending();
566
567 return 0;
568}
569
570static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
571 int bmode, char __user *buf, size_t count)
572{
573#ifdef CONFIG_COMPAT
574 struct dlm_lock_result32 result32;
575#endif
576 struct dlm_lock_result result;
577 void *resultptr;
578 int error=0;
579 int len;
580 int struct_len;
581
582 memset(&result, 0, sizeof(struct dlm_lock_result));
583 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
584 result.user_lksb = ua->user_lksb;
585
586 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
587 in a conversion unless the conversion is successful. See code
588 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
589 notes that a new blocking AST address and parameter are set even if
590 the conversion fails, so maybe we should just do that. */
591
592 if (type == AST_BAST) {
593 result.user_astaddr = ua->bastaddr;
594 result.user_astparam = ua->bastparam;
595 result.bast_mode = bmode;
596 } else {
597 result.user_astaddr = ua->castaddr;
598 result.user_astparam = ua->castparam;
599 }
600
601#ifdef CONFIG_COMPAT
602 if (compat)
603 len = sizeof(struct dlm_lock_result32);
604 else
605#endif
606 len = sizeof(struct dlm_lock_result);
607 struct_len = len;
608
609 /* copy lvb to userspace if there is one, it's been updated, and
610 the user buffer has space for it */
611
612 if (ua->update_user_lvb && ua->lksb.sb_lvbptr &&
613 count >= len + DLM_USER_LVB_LEN) {
614 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
615 DLM_USER_LVB_LEN)) {
616 error = -EFAULT;
617 goto out;
618 }
619
620 result.lvb_offset = len;
621 len += DLM_USER_LVB_LEN;
622 }
623
624 result.length = len;
625 resultptr = &result;
626#ifdef CONFIG_COMPAT
627 if (compat) {
628 compat_output(&result, &result32);
629 resultptr = &result32;
630 }
631#endif
632
633 if (copy_to_user(buf, resultptr, struct_len))
634 error = -EFAULT;
635 else
636 error = len;
637 out:
638 return error;
639}
640
641/* a read returns a single ast described in a struct dlm_lock_result */
642
643static ssize_t device_read(struct file *file, char __user *buf, size_t count,
644 loff_t *ppos)
645{
646 struct dlm_user_proc *proc = file->private_data;
647 struct dlm_lkb *lkb;
648 struct dlm_user_args *ua;
649 DECLARE_WAITQUEUE(wait, current);
650 int error, type=0, bmode=0, removed = 0;
651
652#ifdef CONFIG_COMPAT
653 if (count < sizeof(struct dlm_lock_result32))
654#else
655 if (count < sizeof(struct dlm_lock_result))
656#endif
657 return -EINVAL;
658
659 /* do we really need this? can a read happen after a close? */
660 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
661 return -EINVAL;
662
663 spin_lock(&proc->asts_spin);
664 if (list_empty(&proc->asts)) {
665 if (file->f_flags & O_NONBLOCK) {
666 spin_unlock(&proc->asts_spin);
667 return -EAGAIN;
668 }
669
670 add_wait_queue(&proc->wait, &wait);
671
672 repeat:
673 set_current_state(TASK_INTERRUPTIBLE);
674 if (list_empty(&proc->asts) && !signal_pending(current)) {
675 spin_unlock(&proc->asts_spin);
676 schedule();
677 spin_lock(&proc->asts_spin);
678 goto repeat;
679 }
680 set_current_state(TASK_RUNNING);
681 remove_wait_queue(&proc->wait, &wait);
682
683 if (signal_pending(current)) {
684 spin_unlock(&proc->asts_spin);
685 return -ERESTARTSYS;
686 }
687 }
688
689 if (list_empty(&proc->asts)) {
690 spin_unlock(&proc->asts_spin);
691 return -EAGAIN;
692 }
693
694 /* there may be both completion and blocking asts to return for
695 the lkb, don't remove lkb from asts list unless no asts remain */
696
697 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
698
699 if (lkb->lkb_ast_type & AST_COMP) {
700 lkb->lkb_ast_type &= ~AST_COMP;
701 type = AST_COMP;
702 } else if (lkb->lkb_ast_type & AST_BAST) {
703 lkb->lkb_ast_type &= ~AST_BAST;
704 type = AST_BAST;
705 bmode = lkb->lkb_bastmode;
706 }
707
708 if (!lkb->lkb_ast_type) {
709 list_del(&lkb->lkb_astqueue);
710 removed = 1;
711 }
712 spin_unlock(&proc->asts_spin);
713
714 ua = (struct dlm_user_args *)lkb->lkb_astparam;
715 error = copy_result_to_user(ua,
716 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
717 type, bmode, buf, count);
718
719 /* removes reference for the proc->asts lists added by
720 dlm_user_add_ast() and may result in the lkb being freed */
721 if (removed)
722 dlm_put_lkb(lkb);
723
724 return error;
725}
726
727static unsigned int device_poll(struct file *file, poll_table *wait)
728{
729 struct dlm_user_proc *proc = file->private_data;
730
731 poll_wait(file, &proc->wait, wait);
732
733 spin_lock(&proc->asts_spin);
734 if (!list_empty(&proc->asts)) {
735 spin_unlock(&proc->asts_spin);
736 return POLLIN | POLLRDNORM;
737 }
738 spin_unlock(&proc->asts_spin);
739 return 0;
740}
741
742static int ctl_device_open(struct inode *inode, struct file *file)
743{
744 file->private_data = NULL;
745 return 0;
746}
747
748static int ctl_device_close(struct inode *inode, struct file *file)
749{
750 return 0;
751}
752
753static struct file_operations device_fops = {
754 .open = device_open,
755 .release = device_close,
756 .read = device_read,
757 .write = device_write,
758 .poll = device_poll,
759 .owner = THIS_MODULE,
760};
761
762static struct file_operations ctl_device_fops = {
763 .open = ctl_device_open,
764 .release = ctl_device_close,
765 .write = device_write,
766 .owner = THIS_MODULE,
767};
768
769int dlm_user_init(void)
770{
771 int error;
772
773 ctl_device.name = "dlm-control";
774 ctl_device.fops = &ctl_device_fops;
775 ctl_device.minor = MISC_DYNAMIC_MINOR;
776
777 error = misc_register(&ctl_device);
778 if (error)
779 log_print("misc_register failed for control device");
780
781 return error;
782}
783
784void dlm_user_exit(void)
785{
786 misc_deregister(&ctl_device);
787}
788
diff --git a/fs/dlm/user.h b/fs/dlm/user.h
new file mode 100644
index 000000000000..d38e9f3e4151
--- /dev/null
+++ b/fs/dlm/user.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#ifndef __USER_DOT_H__
10#define __USER_DOT_H__
11
12void dlm_user_add_ast(struct dlm_lkb *lkb, int type);
13int dlm_user_init(void);
14void dlm_user_exit(void);
15
16#endif
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
new file mode 100644
index 000000000000..767197db9944
--- /dev/null
+++ b/fs/dlm/util.c
@@ -0,0 +1,161 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "rcom.h"
15#include "util.h"
16
17static void header_out(struct dlm_header *hd)
18{
19 hd->h_version = cpu_to_le32(hd->h_version);
20 hd->h_lockspace = cpu_to_le32(hd->h_lockspace);
21 hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
22 hd->h_length = cpu_to_le16(hd->h_length);
23}
24
25static void header_in(struct dlm_header *hd)
26{
27 hd->h_version = le32_to_cpu(hd->h_version);
28 hd->h_lockspace = le32_to_cpu(hd->h_lockspace);
29 hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
30 hd->h_length = le16_to_cpu(hd->h_length);
31}
32
33void dlm_message_out(struct dlm_message *ms)
34{
35 struct dlm_header *hd = (struct dlm_header *) ms;
36
37 header_out(hd);
38
39 ms->m_type = cpu_to_le32(ms->m_type);
40 ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
41 ms->m_pid = cpu_to_le32(ms->m_pid);
42 ms->m_lkid = cpu_to_le32(ms->m_lkid);
43 ms->m_remid = cpu_to_le32(ms->m_remid);
44 ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
45 ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
46 ms->m_exflags = cpu_to_le32(ms->m_exflags);
47 ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
48 ms->m_flags = cpu_to_le32(ms->m_flags);
49 ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
50 ms->m_hash = cpu_to_le32(ms->m_hash);
51 ms->m_status = cpu_to_le32(ms->m_status);
52 ms->m_grmode = cpu_to_le32(ms->m_grmode);
53 ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
54 ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
55 ms->m_asts = cpu_to_le32(ms->m_asts);
56 ms->m_result = cpu_to_le32(ms->m_result);
57}
58
59void dlm_message_in(struct dlm_message *ms)
60{
61 struct dlm_header *hd = (struct dlm_header *) ms;
62
63 header_in(hd);
64
65 ms->m_type = le32_to_cpu(ms->m_type);
66 ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
67 ms->m_pid = le32_to_cpu(ms->m_pid);
68 ms->m_lkid = le32_to_cpu(ms->m_lkid);
69 ms->m_remid = le32_to_cpu(ms->m_remid);
70 ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
71 ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
72 ms->m_exflags = le32_to_cpu(ms->m_exflags);
73 ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
74 ms->m_flags = le32_to_cpu(ms->m_flags);
75 ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
76 ms->m_hash = le32_to_cpu(ms->m_hash);
77 ms->m_status = le32_to_cpu(ms->m_status);
78 ms->m_grmode = le32_to_cpu(ms->m_grmode);
79 ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
80 ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
81 ms->m_asts = le32_to_cpu(ms->m_asts);
82 ms->m_result = le32_to_cpu(ms->m_result);
83}
84
85static void rcom_lock_out(struct rcom_lock *rl)
86{
87 rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid);
88 rl->rl_lkid = cpu_to_le32(rl->rl_lkid);
89 rl->rl_remid = cpu_to_le32(rl->rl_remid);
90 rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid);
91 rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid);
92 rl->rl_exflags = cpu_to_le32(rl->rl_exflags);
93 rl->rl_flags = cpu_to_le32(rl->rl_flags);
94 rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq);
95 rl->rl_result = cpu_to_le32(rl->rl_result);
96 rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type);
97 rl->rl_namelen = cpu_to_le16(rl->rl_namelen);
98}
99
100static void rcom_lock_in(struct rcom_lock *rl)
101{
102 rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid);
103 rl->rl_lkid = le32_to_cpu(rl->rl_lkid);
104 rl->rl_remid = le32_to_cpu(rl->rl_remid);
105 rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid);
106 rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid);
107 rl->rl_exflags = le32_to_cpu(rl->rl_exflags);
108 rl->rl_flags = le32_to_cpu(rl->rl_flags);
109 rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq);
110 rl->rl_result = le32_to_cpu(rl->rl_result);
111 rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type);
112 rl->rl_namelen = le16_to_cpu(rl->rl_namelen);
113}
114
115static void rcom_config_out(struct rcom_config *rf)
116{
117 rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen);
118 rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags);
119}
120
121static void rcom_config_in(struct rcom_config *rf)
122{
123 rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen);
124 rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags);
125}
126
127void dlm_rcom_out(struct dlm_rcom *rc)
128{
129 struct dlm_header *hd = (struct dlm_header *) rc;
130 int type = rc->rc_type;
131
132 header_out(hd);
133
134 rc->rc_type = cpu_to_le32(rc->rc_type);
135 rc->rc_result = cpu_to_le32(rc->rc_result);
136 rc->rc_id = cpu_to_le64(rc->rc_id);
137
138 if (type == DLM_RCOM_LOCK)
139 rcom_lock_out((struct rcom_lock *) rc->rc_buf);
140
141 else if (type == DLM_RCOM_STATUS_REPLY)
142 rcom_config_out((struct rcom_config *) rc->rc_buf);
143}
144
145void dlm_rcom_in(struct dlm_rcom *rc)
146{
147 struct dlm_header *hd = (struct dlm_header *) rc;
148
149 header_in(hd);
150
151 rc->rc_type = le32_to_cpu(rc->rc_type);
152 rc->rc_result = le32_to_cpu(rc->rc_result);
153 rc->rc_id = le64_to_cpu(rc->rc_id);
154
155 if (rc->rc_type == DLM_RCOM_LOCK)
156 rcom_lock_in((struct rcom_lock *) rc->rc_buf);
157
158 else if (rc->rc_type == DLM_RCOM_STATUS_REPLY)
159 rcom_config_in((struct rcom_config *) rc->rc_buf);
160}
161
diff --git a/fs/dlm/util.h b/fs/dlm/util.h
new file mode 100644
index 000000000000..2b25915161c0
--- /dev/null
+++ b/fs/dlm/util.h
@@ -0,0 +1,22 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __UTIL_DOT_H__
14#define __UTIL_DOT_H__
15
16void dlm_message_out(struct dlm_message *ms);
17void dlm_message_in(struct dlm_message *ms);
18void dlm_rcom_out(struct dlm_rcom *rc);
19void dlm_rcom_in(struct dlm_rcom *rc);
20
21#endif
22
diff --git a/fs/ecryptfs/Makefile b/fs/ecryptfs/Makefile
new file mode 100644
index 000000000000..ca6562451eeb
--- /dev/null
+++ b/fs/ecryptfs/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux 2.6 eCryptfs
3#
4
5obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
6
7ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o crypto.o keystore.o debug.o
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
new file mode 100644
index 000000000000..ed35a9712fa1
--- /dev/null
+++ b/fs/ecryptfs/crypto.c
@@ -0,0 +1,1659 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2004 Erez Zadok
5 * Copyright (C) 2001-2004 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
8 * Michael C. Thompson <mcthomps@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/mount.h>
28#include <linux/pagemap.h>
29#include <linux/random.h>
30#include <linux/compiler.h>
31#include <linux/key.h>
32#include <linux/namei.h>
33#include <linux/crypto.h>
34#include <linux/file.h>
35#include <linux/scatterlist.h>
36#include "ecryptfs_kernel.h"
37
38static int
39ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
40 struct page *dst_page, int dst_offset,
41 struct page *src_page, int src_offset, int size,
42 unsigned char *iv);
43static int
44ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
45 struct page *dst_page, int dst_offset,
46 struct page *src_page, int src_offset, int size,
47 unsigned char *iv);
48
49/**
50 * ecryptfs_to_hex
51 * @dst: Buffer to take hex character representation of contents of
52 * src; must be at least of size (src_size * 2)
53 * @src: Buffer to be converted to a hex string respresentation
54 * @src_size: number of bytes to convert
55 */
56void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
57{
58 int x;
59
60 for (x = 0; x < src_size; x++)
61 sprintf(&dst[x * 2], "%.2x", (unsigned char)src[x]);
62}
63
64/**
65 * ecryptfs_from_hex
66 * @dst: Buffer to take the bytes from src hex; must be at least of
67 * size (src_size / 2)
68 * @src: Buffer to be converted from a hex string respresentation to raw value
69 * @dst_size: size of dst buffer, or number of hex characters pairs to convert
70 */
71void ecryptfs_from_hex(char *dst, char *src, int dst_size)
72{
73 int x;
74 char tmp[3] = { 0, };
75
76 for (x = 0; x < dst_size; x++) {
77 tmp[0] = src[x * 2];
78 tmp[1] = src[x * 2 + 1];
79 dst[x] = (unsigned char)simple_strtol(tmp, NULL, 16);
80 }
81}
82
83/**
84 * ecryptfs_calculate_md5 - calculates the md5 of @src
85 * @dst: Pointer to 16 bytes of allocated memory
86 * @crypt_stat: Pointer to crypt_stat struct for the current inode
87 * @src: Data to be md5'd
88 * @len: Length of @src
89 *
90 * Uses the allocated crypto context that crypt_stat references to
91 * generate the MD5 sum of the contents of src.
92 */
93static int ecryptfs_calculate_md5(char *dst,
94 struct ecryptfs_crypt_stat *crypt_stat,
95 char *src, int len)
96{
97 int rc = 0;
98 struct scatterlist sg;
99
100 mutex_lock(&crypt_stat->cs_md5_tfm_mutex);
101 sg_init_one(&sg, (u8 *)src, len);
102 if (!crypt_stat->md5_tfm) {
103 crypt_stat->md5_tfm =
104 crypto_alloc_tfm("md5", CRYPTO_TFM_REQ_MAY_SLEEP);
105 if (!crypt_stat->md5_tfm) {
106 rc = -ENOMEM;
107 ecryptfs_printk(KERN_ERR, "Error attempting to "
108 "allocate crypto context\n");
109 goto out;
110 }
111 }
112 crypto_digest_init(crypt_stat->md5_tfm);
113 crypto_digest_update(crypt_stat->md5_tfm, &sg, 1);
114 crypto_digest_final(crypt_stat->md5_tfm, dst);
115 mutex_unlock(&crypt_stat->cs_md5_tfm_mutex);
116out:
117 return rc;
118}
119
120/**
121 * ecryptfs_derive_iv
122 * @iv: destination for the derived iv vale
123 * @crypt_stat: Pointer to crypt_stat struct for the current inode
124 * @offset: Offset of the page whose's iv we are to derive
125 *
126 * Generate the initialization vector from the given root IV and page
127 * offset.
128 *
129 * Returns zero on success; non-zero on error.
130 */
131static int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
132 pgoff_t offset)
133{
134 int rc = 0;
135 char dst[MD5_DIGEST_SIZE];
136 char src[ECRYPTFS_MAX_IV_BYTES + 16];
137
138 if (unlikely(ecryptfs_verbosity > 0)) {
139 ecryptfs_printk(KERN_DEBUG, "root iv:\n");
140 ecryptfs_dump_hex(crypt_stat->root_iv, crypt_stat->iv_bytes);
141 }
142 /* TODO: It is probably secure to just cast the least
143 * significant bits of the root IV into an unsigned long and
144 * add the offset to that rather than go through all this
145 * hashing business. -Halcrow */
146 memcpy(src, crypt_stat->root_iv, crypt_stat->iv_bytes);
147 memset((src + crypt_stat->iv_bytes), 0, 16);
148 snprintf((src + crypt_stat->iv_bytes), 16, "%ld", offset);
149 if (unlikely(ecryptfs_verbosity > 0)) {
150 ecryptfs_printk(KERN_DEBUG, "source:\n");
151 ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16));
152 }
153 rc = ecryptfs_calculate_md5(dst, crypt_stat, src,
154 (crypt_stat->iv_bytes + 16));
155 if (rc) {
156 ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
157 "MD5 while generating IV for a page\n");
158 goto out;
159 }
160 memcpy(iv, dst, crypt_stat->iv_bytes);
161 if (unlikely(ecryptfs_verbosity > 0)) {
162 ecryptfs_printk(KERN_DEBUG, "derived iv:\n");
163 ecryptfs_dump_hex(iv, crypt_stat->iv_bytes);
164 }
165out:
166 return rc;
167}
168
169/**
170 * ecryptfs_init_crypt_stat
171 * @crypt_stat: Pointer to the crypt_stat struct to initialize.
172 *
173 * Initialize the crypt_stat structure.
174 */
175void
176ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
177{
178 memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
179 mutex_init(&crypt_stat->cs_mutex);
180 mutex_init(&crypt_stat->cs_tfm_mutex);
181 mutex_init(&crypt_stat->cs_md5_tfm_mutex);
182 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_STRUCT_INITIALIZED);
183}
184
185/**
186 * ecryptfs_destruct_crypt_stat
187 * @crypt_stat: Pointer to the crypt_stat struct to initialize.
188 *
189 * Releases all memory associated with a crypt_stat struct.
190 */
191void ecryptfs_destruct_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
192{
193 if (crypt_stat->tfm)
194 crypto_free_tfm(crypt_stat->tfm);
195 if (crypt_stat->md5_tfm)
196 crypto_free_tfm(crypt_stat->md5_tfm);
197 memset(crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat));
198}
199
200void ecryptfs_destruct_mount_crypt_stat(
201 struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
202{
203 if (mount_crypt_stat->global_auth_tok_key)
204 key_put(mount_crypt_stat->global_auth_tok_key);
205 if (mount_crypt_stat->global_key_tfm)
206 crypto_free_tfm(mount_crypt_stat->global_key_tfm);
207 memset(mount_crypt_stat, 0, sizeof(struct ecryptfs_mount_crypt_stat));
208}
209
210/**
211 * virt_to_scatterlist
212 * @addr: Virtual address
213 * @size: Size of data; should be an even multiple of the block size
214 * @sg: Pointer to scatterlist array; set to NULL to obtain only
215 * the number of scatterlist structs required in array
216 * @sg_size: Max array size
217 *
218 * Fills in a scatterlist array with page references for a passed
219 * virtual address.
220 *
221 * Returns the number of scatterlist structs in array used
222 */
223int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
224 int sg_size)
225{
226 int i = 0;
227 struct page *pg;
228 int offset;
229 int remainder_of_page;
230
231 while (size > 0 && i < sg_size) {
232 pg = virt_to_page(addr);
233 offset = offset_in_page(addr);
234 if (sg) {
235 sg[i].page = pg;
236 sg[i].offset = offset;
237 }
238 remainder_of_page = PAGE_CACHE_SIZE - offset;
239 if (size >= remainder_of_page) {
240 if (sg)
241 sg[i].length = remainder_of_page;
242 addr += remainder_of_page;
243 size -= remainder_of_page;
244 } else {
245 if (sg)
246 sg[i].length = size;
247 addr += size;
248 size = 0;
249 }
250 i++;
251 }
252 if (size > 0)
253 return -ENOMEM;
254 return i;
255}
256
257/**
258 * encrypt_scatterlist
259 * @crypt_stat: Pointer to the crypt_stat struct to initialize.
260 * @dest_sg: Destination of encrypted data
261 * @src_sg: Data to be encrypted
262 * @size: Length of data to be encrypted
263 * @iv: iv to use during encryption
264 *
265 * Returns the number of bytes encrypted; negative value on error
266 */
267static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
268 struct scatterlist *dest_sg,
269 struct scatterlist *src_sg, int size,
270 unsigned char *iv)
271{
272 int rc = 0;
273
274 BUG_ON(!crypt_stat || !crypt_stat->tfm
275 || !ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
276 ECRYPTFS_STRUCT_INITIALIZED));
277 if (unlikely(ecryptfs_verbosity > 0)) {
278 ecryptfs_printk(KERN_DEBUG, "Key size [%d]; key:\n",
279 crypt_stat->key_size);
280 ecryptfs_dump_hex(crypt_stat->key,
281 crypt_stat->key_size);
282 }
283 /* Consider doing this once, when the file is opened */
284 mutex_lock(&crypt_stat->cs_tfm_mutex);
285 rc = crypto_cipher_setkey(crypt_stat->tfm, crypt_stat->key,
286 crypt_stat->key_size);
287 if (rc) {
288 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
289 rc);
290 mutex_unlock(&crypt_stat->cs_tfm_mutex);
291 rc = -EINVAL;
292 goto out;
293 }
294 ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes.\n", size);
295 crypto_cipher_encrypt_iv(crypt_stat->tfm, dest_sg, src_sg, size, iv);
296 mutex_unlock(&crypt_stat->cs_tfm_mutex);
297out:
298 return rc;
299}
300
301static void
302ecryptfs_extent_to_lwr_pg_idx_and_offset(unsigned long *lower_page_idx,
303 int *byte_offset,
304 struct ecryptfs_crypt_stat *crypt_stat,
305 unsigned long extent_num)
306{
307 unsigned long lower_extent_num;
308 int extents_occupied_by_headers_at_front;
309 int bytes_occupied_by_headers_at_front;
310 int extent_offset;
311 int extents_per_page;
312
313 bytes_occupied_by_headers_at_front =
314 ( crypt_stat->header_extent_size
315 * crypt_stat->num_header_extents_at_front );
316 extents_occupied_by_headers_at_front =
317 ( bytes_occupied_by_headers_at_front
318 / crypt_stat->extent_size );
319 lower_extent_num = extents_occupied_by_headers_at_front + extent_num;
320 extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
321 (*lower_page_idx) = lower_extent_num / extents_per_page;
322 extent_offset = lower_extent_num % extents_per_page;
323 (*byte_offset) = extent_offset * crypt_stat->extent_size;
324 ecryptfs_printk(KERN_DEBUG, " * crypt_stat->header_extent_size = "
325 "[%d]\n", crypt_stat->header_extent_size);
326 ecryptfs_printk(KERN_DEBUG, " * crypt_stat->"
327 "num_header_extents_at_front = [%d]\n",
328 crypt_stat->num_header_extents_at_front);
329 ecryptfs_printk(KERN_DEBUG, " * extents_occupied_by_headers_at_"
330 "front = [%d]\n", extents_occupied_by_headers_at_front);
331 ecryptfs_printk(KERN_DEBUG, " * lower_extent_num = [0x%.16x]\n",
332 lower_extent_num);
333 ecryptfs_printk(KERN_DEBUG, " * extents_per_page = [%d]\n",
334 extents_per_page);
335 ecryptfs_printk(KERN_DEBUG, " * (*lower_page_idx) = [0x%.16x]\n",
336 (*lower_page_idx));
337 ecryptfs_printk(KERN_DEBUG, " * extent_offset = [%d]\n",
338 extent_offset);
339 ecryptfs_printk(KERN_DEBUG, " * (*byte_offset) = [%d]\n",
340 (*byte_offset));
341}
342
343static int ecryptfs_write_out_page(struct ecryptfs_page_crypt_context *ctx,
344 struct page *lower_page,
345 struct inode *lower_inode,
346 int byte_offset_in_page, int bytes_to_write)
347{
348 int rc = 0;
349
350 if (ctx->mode == ECRYPTFS_PREPARE_COMMIT_MODE) {
351 rc = ecryptfs_commit_lower_page(lower_page, lower_inode,
352 ctx->param.lower_file,
353 byte_offset_in_page,
354 bytes_to_write);
355 if (rc) {
356 ecryptfs_printk(KERN_ERR, "Error calling lower "
357 "commit; rc = [%d]\n", rc);
358 goto out;
359 }
360 } else {
361 rc = ecryptfs_writepage_and_release_lower_page(lower_page,
362 lower_inode,
363 ctx->param.wbc);
364 if (rc) {
365 ecryptfs_printk(KERN_ERR, "Error calling lower "
366 "writepage(); rc = [%d]\n", rc);
367 goto out;
368 }
369 }
370out:
371 return rc;
372}
373
374static int ecryptfs_read_in_page(struct ecryptfs_page_crypt_context *ctx,
375 struct page **lower_page,
376 struct inode *lower_inode,
377 unsigned long lower_page_idx,
378 int byte_offset_in_page)
379{
380 int rc = 0;
381
382 if (ctx->mode == ECRYPTFS_PREPARE_COMMIT_MODE) {
383 /* TODO: Limit this to only the data extents that are
384 * needed */
385 rc = ecryptfs_get_lower_page(lower_page, lower_inode,
386 ctx->param.lower_file,
387 lower_page_idx,
388 byte_offset_in_page,
389 (PAGE_CACHE_SIZE
390 - byte_offset_in_page));
391 if (rc) {
392 ecryptfs_printk(
393 KERN_ERR, "Error attempting to grab, map, "
394 "and prepare_write lower page with index "
395 "[0x%.16x]; rc = [%d]\n", lower_page_idx, rc);
396 goto out;
397 }
398 } else {
399 rc = ecryptfs_grab_and_map_lower_page(lower_page, NULL,
400 lower_inode,
401 lower_page_idx);
402 if (rc) {
403 ecryptfs_printk(
404 KERN_ERR, "Error attempting to grab and map "
405 "lower page with index [0x%.16x]; rc = [%d]\n",
406 lower_page_idx, rc);
407 goto out;
408 }
409 }
410out:
411 return rc;
412}
413
414/**
415 * ecryptfs_encrypt_page
416 * @ctx: The context of the page
417 *
418 * Encrypt an eCryptfs page. This is done on a per-extent basis. Note
419 * that eCryptfs pages may straddle the lower pages -- for instance,
420 * if the file was created on a machine with an 8K page size
421 * (resulting in an 8K header), and then the file is copied onto a
422 * host with a 32K page size, then when reading page 0 of the eCryptfs
423 * file, 24K of page 0 of the lower file will be read and decrypted,
424 * and then 8K of page 1 of the lower file will be read and decrypted.
425 *
426 * The actual operations performed on each page depends on the
427 * contents of the ecryptfs_page_crypt_context struct.
428 *
429 * Returns zero on success; negative on error
430 */
431int ecryptfs_encrypt_page(struct ecryptfs_page_crypt_context *ctx)
432{
433 char extent_iv[ECRYPTFS_MAX_IV_BYTES];
434 unsigned long base_extent;
435 unsigned long extent_offset = 0;
436 unsigned long lower_page_idx = 0;
437 unsigned long prior_lower_page_idx = 0;
438 struct page *lower_page;
439 struct inode *lower_inode;
440 struct ecryptfs_inode_info *inode_info;
441 struct ecryptfs_crypt_stat *crypt_stat;
442 int rc = 0;
443 int lower_byte_offset = 0;
444 int orig_byte_offset = 0;
445 int num_extents_per_page;
446#define ECRYPTFS_PAGE_STATE_UNREAD 0
447#define ECRYPTFS_PAGE_STATE_READ 1
448#define ECRYPTFS_PAGE_STATE_MODIFIED 2
449#define ECRYPTFS_PAGE_STATE_WRITTEN 3
450 int page_state;
451
452 lower_inode = ecryptfs_inode_to_lower(ctx->page->mapping->host);
453 inode_info = ecryptfs_inode_to_private(ctx->page->mapping->host);
454 crypt_stat = &inode_info->crypt_stat;
455 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED)) {
456 rc = ecryptfs_copy_page_to_lower(ctx->page, lower_inode,
457 ctx->param.lower_file);
458 if (rc)
459 ecryptfs_printk(KERN_ERR, "Error attempting to copy "
460 "page at index [0x%.16x]\n",
461 ctx->page->index);
462 goto out;
463 }
464 num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
465 base_extent = (ctx->page->index * num_extents_per_page);
466 page_state = ECRYPTFS_PAGE_STATE_UNREAD;
467 while (extent_offset < num_extents_per_page) {
468 ecryptfs_extent_to_lwr_pg_idx_and_offset(
469 &lower_page_idx, &lower_byte_offset, crypt_stat,
470 (base_extent + extent_offset));
471 if (prior_lower_page_idx != lower_page_idx
472 && page_state == ECRYPTFS_PAGE_STATE_MODIFIED) {
473 rc = ecryptfs_write_out_page(ctx, lower_page,
474 lower_inode,
475 orig_byte_offset,
476 (PAGE_CACHE_SIZE
477 - orig_byte_offset));
478 if (rc) {
479 ecryptfs_printk(KERN_ERR, "Error attempting "
480 "to write out page; rc = [%d]"
481 "\n", rc);
482 goto out;
483 }
484 page_state = ECRYPTFS_PAGE_STATE_WRITTEN;
485 }
486 if (page_state == ECRYPTFS_PAGE_STATE_UNREAD
487 || page_state == ECRYPTFS_PAGE_STATE_WRITTEN) {
488 rc = ecryptfs_read_in_page(ctx, &lower_page,
489 lower_inode, lower_page_idx,
490 lower_byte_offset);
491 if (rc) {
492 ecryptfs_printk(KERN_ERR, "Error attempting "
493 "to read in lower page with "
494 "index [0x%.16x]; rc = [%d]\n",
495 lower_page_idx, rc);
496 goto out;
497 }
498 orig_byte_offset = lower_byte_offset;
499 prior_lower_page_idx = lower_page_idx;
500 page_state = ECRYPTFS_PAGE_STATE_READ;
501 }
502 BUG_ON(!(page_state == ECRYPTFS_PAGE_STATE_MODIFIED
503 || page_state == ECRYPTFS_PAGE_STATE_READ));
504 rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
505 (base_extent + extent_offset));
506 if (rc) {
507 ecryptfs_printk(KERN_ERR, "Error attempting to "
508 "derive IV for extent [0x%.16x]; "
509 "rc = [%d]\n",
510 (base_extent + extent_offset), rc);
511 goto out;
512 }
513 if (unlikely(ecryptfs_verbosity > 0)) {
514 ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
515 "with iv:\n");
516 ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
517 ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
518 "encryption:\n");
519 ecryptfs_dump_hex((char *)
520 (page_address(ctx->page)
521 + (extent_offset
522 * crypt_stat->extent_size)), 8);
523 }
524 rc = ecryptfs_encrypt_page_offset(
525 crypt_stat, lower_page, lower_byte_offset, ctx->page,
526 (extent_offset * crypt_stat->extent_size),
527 crypt_stat->extent_size, extent_iv);
528 ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
529 "rc = [%d]\n",
530 (base_extent + extent_offset), rc);
531 if (unlikely(ecryptfs_verbosity > 0)) {
532 ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
533 "encryption:\n");
534 ecryptfs_dump_hex((char *)(page_address(lower_page)
535 + lower_byte_offset), 8);
536 }
537 page_state = ECRYPTFS_PAGE_STATE_MODIFIED;
538 extent_offset++;
539 }
540 BUG_ON(orig_byte_offset != 0);
541 rc = ecryptfs_write_out_page(ctx, lower_page, lower_inode, 0,
542 (lower_byte_offset
543 + crypt_stat->extent_size));
544 if (rc) {
545 ecryptfs_printk(KERN_ERR, "Error attempting to write out "
546 "page; rc = [%d]\n", rc);
547 goto out;
548 }
549out:
550 return rc;
551}
552
553/**
554 * ecryptfs_decrypt_page
555 * @file: The ecryptfs file
556 * @page: The page in ecryptfs to decrypt
557 *
558 * Decrypt an eCryptfs page. This is done on a per-extent basis. Note
559 * that eCryptfs pages may straddle the lower pages -- for instance,
560 * if the file was created on a machine with an 8K page size
561 * (resulting in an 8K header), and then the file is copied onto a
562 * host with a 32K page size, then when reading page 0 of the eCryptfs
563 * file, 24K of page 0 of the lower file will be read and decrypted,
564 * and then 8K of page 1 of the lower file will be read and decrypted.
565 *
566 * Returns zero on success; negative on error
567 */
568int ecryptfs_decrypt_page(struct file *file, struct page *page)
569{
570 char extent_iv[ECRYPTFS_MAX_IV_BYTES];
571 unsigned long base_extent;
572 unsigned long extent_offset = 0;
573 unsigned long lower_page_idx = 0;
574 unsigned long prior_lower_page_idx = 0;
575 struct page *lower_page;
576 char *lower_page_virt = NULL;
577 struct inode *lower_inode;
578 struct ecryptfs_crypt_stat *crypt_stat;
579 int rc = 0;
580 int byte_offset;
581 int num_extents_per_page;
582 int page_state;
583
584 crypt_stat = &(ecryptfs_inode_to_private(
585 page->mapping->host)->crypt_stat);
586 lower_inode = ecryptfs_inode_to_lower(page->mapping->host);
587 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED)) {
588 rc = ecryptfs_do_readpage(file, page, page->index);
589 if (rc)
590 ecryptfs_printk(KERN_ERR, "Error attempting to copy "
591 "page at index [0x%.16x]\n",
592 page->index);
593 goto out;
594 }
595 num_extents_per_page = PAGE_CACHE_SIZE / crypt_stat->extent_size;
596 base_extent = (page->index * num_extents_per_page);
597 lower_page_virt = kmem_cache_alloc(ecryptfs_lower_page_cache,
598 SLAB_KERNEL);
599 if (!lower_page_virt) {
600 rc = -ENOMEM;
601 ecryptfs_printk(KERN_ERR, "Error getting page for encrypted "
602 "lower page(s)\n");
603 goto out;
604 }
605 lower_page = virt_to_page(lower_page_virt);
606 page_state = ECRYPTFS_PAGE_STATE_UNREAD;
607 while (extent_offset < num_extents_per_page) {
608 ecryptfs_extent_to_lwr_pg_idx_and_offset(
609 &lower_page_idx, &byte_offset, crypt_stat,
610 (base_extent + extent_offset));
611 if (prior_lower_page_idx != lower_page_idx
612 || page_state == ECRYPTFS_PAGE_STATE_UNREAD) {
613 rc = ecryptfs_do_readpage(file, lower_page,
614 lower_page_idx);
615 if (rc) {
616 ecryptfs_printk(KERN_ERR, "Error reading "
617 "lower encrypted page; rc = "
618 "[%d]\n", rc);
619 goto out;
620 }
621 prior_lower_page_idx = lower_page_idx;
622 page_state = ECRYPTFS_PAGE_STATE_READ;
623 }
624 rc = ecryptfs_derive_iv(extent_iv, crypt_stat,
625 (base_extent + extent_offset));
626 if (rc) {
627 ecryptfs_printk(KERN_ERR, "Error attempting to "
628 "derive IV for extent [0x%.16x]; rc = "
629 "[%d]\n",
630 (base_extent + extent_offset), rc);
631 goto out;
632 }
633 if (unlikely(ecryptfs_verbosity > 0)) {
634 ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
635 "with iv:\n");
636 ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
637 ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
638 "decryption:\n");
639 ecryptfs_dump_hex((lower_page_virt + byte_offset), 8);
640 }
641 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
642 (extent_offset
643 * crypt_stat->extent_size),
644 lower_page, byte_offset,
645 crypt_stat->extent_size,
646 extent_iv);
647 if (rc != crypt_stat->extent_size) {
648 ecryptfs_printk(KERN_ERR, "Error attempting to "
649 "decrypt extent [0x%.16x]\n",
650 (base_extent + extent_offset));
651 goto out;
652 }
653 rc = 0;
654 if (unlikely(ecryptfs_verbosity > 0)) {
655 ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
656 "decryption:\n");
657 ecryptfs_dump_hex((char *)(page_address(page)
658 + byte_offset), 8);
659 }
660 extent_offset++;
661 }
662out:
663 if (lower_page_virt)
664 kmem_cache_free(ecryptfs_lower_page_cache, lower_page_virt);
665 return rc;
666}
667
668/**
669 * decrypt_scatterlist
670 *
671 * Returns the number of bytes decrypted; negative value on error
672 */
673static int decrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
674 struct scatterlist *dest_sg,
675 struct scatterlist *src_sg, int size,
676 unsigned char *iv)
677{
678 int rc = 0;
679
680 /* Consider doing this once, when the file is opened */
681 mutex_lock(&crypt_stat->cs_tfm_mutex);
682 rc = crypto_cipher_setkey(crypt_stat->tfm, crypt_stat->key,
683 crypt_stat->key_size);
684 if (rc) {
685 ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
686 rc);
687 mutex_unlock(&crypt_stat->cs_tfm_mutex);
688 rc = -EINVAL;
689 goto out;
690 }
691 ecryptfs_printk(KERN_DEBUG, "Decrypting [%d] bytes.\n", size);
692 rc = crypto_cipher_decrypt_iv(crypt_stat->tfm, dest_sg, src_sg, size,
693 iv);
694 mutex_unlock(&crypt_stat->cs_tfm_mutex);
695 if (rc) {
696 ecryptfs_printk(KERN_ERR, "Error decrypting; rc = [%d]\n",
697 rc);
698 goto out;
699 }
700 rc = size;
701out:
702 return rc;
703}
704
705/**
706 * ecryptfs_encrypt_page_offset
707 *
708 * Returns the number of bytes encrypted
709 */
710static int
711ecryptfs_encrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
712 struct page *dst_page, int dst_offset,
713 struct page *src_page, int src_offset, int size,
714 unsigned char *iv)
715{
716 struct scatterlist src_sg, dst_sg;
717
718 src_sg.page = src_page;
719 src_sg.offset = src_offset;
720 src_sg.length = size;
721 dst_sg.page = dst_page;
722 dst_sg.offset = dst_offset;
723 dst_sg.length = size;
724 return encrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
725}
726
727/**
728 * ecryptfs_decrypt_page_offset
729 *
730 * Returns the number of bytes decrypted
731 */
732static int
733ecryptfs_decrypt_page_offset(struct ecryptfs_crypt_stat *crypt_stat,
734 struct page *dst_page, int dst_offset,
735 struct page *src_page, int src_offset, int size,
736 unsigned char *iv)
737{
738 struct scatterlist src_sg, dst_sg;
739
740 src_sg.page = src_page;
741 src_sg.offset = src_offset;
742 src_sg.length = size;
743 dst_sg.page = dst_page;
744 dst_sg.offset = dst_offset;
745 dst_sg.length = size;
746 return decrypt_scatterlist(crypt_stat, &dst_sg, &src_sg, size, iv);
747}
748
749#define ECRYPTFS_MAX_SCATTERLIST_LEN 4
750
751/**
752 * ecryptfs_init_crypt_ctx
753 * @crypt_stat: Uninitilized crypt stats structure
754 *
755 * Initialize the crypto context.
756 *
757 * TODO: Performance: Keep a cache of initialized cipher contexts;
758 * only init if needed
759 */
760int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
761{
762 int rc = -EINVAL;
763
764 if (!crypt_stat->cipher) {
765 ecryptfs_printk(KERN_ERR, "No cipher specified\n");
766 goto out;
767 }
768 ecryptfs_printk(KERN_DEBUG,
769 "Initializing cipher [%s]; strlen = [%d]; "
770 "key_size_bits = [%d]\n",
771 crypt_stat->cipher, (int)strlen(crypt_stat->cipher),
772 crypt_stat->key_size << 3);
773 if (crypt_stat->tfm) {
774 rc = 0;
775 goto out;
776 }
777 mutex_lock(&crypt_stat->cs_tfm_mutex);
778 crypt_stat->tfm = crypto_alloc_tfm(crypt_stat->cipher,
779 ECRYPTFS_DEFAULT_CHAINING_MODE
780 | CRYPTO_TFM_REQ_WEAK_KEY);
781 mutex_unlock(&crypt_stat->cs_tfm_mutex);
782 if (!crypt_stat->tfm) {
783 ecryptfs_printk(KERN_ERR, "cryptfs: init_crypt_ctx(): "
784 "Error initializing cipher [%s]\n",
785 crypt_stat->cipher);
786 goto out;
787 }
788 rc = 0;
789out:
790 return rc;
791}
792
793static void set_extent_mask_and_shift(struct ecryptfs_crypt_stat *crypt_stat)
794{
795 int extent_size_tmp;
796
797 crypt_stat->extent_mask = 0xFFFFFFFF;
798 crypt_stat->extent_shift = 0;
799 if (crypt_stat->extent_size == 0)
800 return;
801 extent_size_tmp = crypt_stat->extent_size;
802 while ((extent_size_tmp & 0x01) == 0) {
803 extent_size_tmp >>= 1;
804 crypt_stat->extent_mask <<= 1;
805 crypt_stat->extent_shift++;
806 }
807}
808
809void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
810{
811 /* Default values; may be overwritten as we are parsing the
812 * packets. */
813 crypt_stat->extent_size = ECRYPTFS_DEFAULT_EXTENT_SIZE;
814 set_extent_mask_and_shift(crypt_stat);
815 crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
816 if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) {
817 crypt_stat->header_extent_size =
818 ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
819 } else
820 crypt_stat->header_extent_size = PAGE_CACHE_SIZE;
821 crypt_stat->num_header_extents_at_front = 1;
822}
823
824/**
825 * ecryptfs_compute_root_iv
826 * @crypt_stats
827 *
828 * On error, sets the root IV to all 0's.
829 */
830int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat)
831{
832 int rc = 0;
833 char dst[MD5_DIGEST_SIZE];
834
835 BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE);
836 BUG_ON(crypt_stat->iv_bytes <= 0);
837 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_KEY_VALID)) {
838 rc = -EINVAL;
839 ecryptfs_printk(KERN_WARNING, "Session key not valid; "
840 "cannot generate root IV\n");
841 goto out;
842 }
843 rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key,
844 crypt_stat->key_size);
845 if (rc) {
846 ecryptfs_printk(KERN_WARNING, "Error attempting to compute "
847 "MD5 while generating root IV\n");
848 goto out;
849 }
850 memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes);
851out:
852 if (rc) {
853 memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes);
854 ECRYPTFS_SET_FLAG(crypt_stat->flags,
855 ECRYPTFS_SECURITY_WARNING);
856 }
857 return rc;
858}
859
860static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
861{
862 get_random_bytes(crypt_stat->key, crypt_stat->key_size);
863 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_KEY_VALID);
864 ecryptfs_compute_root_iv(crypt_stat);
865 if (unlikely(ecryptfs_verbosity > 0)) {
866 ecryptfs_printk(KERN_DEBUG, "Generated new session key:\n");
867 ecryptfs_dump_hex(crypt_stat->key,
868 crypt_stat->key_size);
869 }
870}
871
872/**
873 * ecryptfs_set_default_crypt_stat_vals
874 * @crypt_stat
875 *
876 * Default values in the event that policy does not override them.
877 */
878static void ecryptfs_set_default_crypt_stat_vals(
879 struct ecryptfs_crypt_stat *crypt_stat,
880 struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
881{
882 ecryptfs_set_default_sizes(crypt_stat);
883 strcpy(crypt_stat->cipher, ECRYPTFS_DEFAULT_CIPHER);
884 crypt_stat->key_size = ECRYPTFS_DEFAULT_KEY_BYTES;
885 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags, ECRYPTFS_KEY_VALID);
886 crypt_stat->file_version = ECRYPTFS_FILE_VERSION;
887 crypt_stat->mount_crypt_stat = mount_crypt_stat;
888}
889
890/**
891 * ecryptfs_new_file_context
892 * @ecryptfs_dentry
893 *
894 * If the crypto context for the file has not yet been established,
895 * this is where we do that. Establishing a new crypto context
896 * involves the following decisions:
897 * - What cipher to use?
898 * - What set of authentication tokens to use?
899 * Here we just worry about getting enough information into the
900 * authentication tokens so that we know that they are available.
901 * We associate the available authentication tokens with the new file
902 * via the set of signatures in the crypt_stat struct. Later, when
903 * the headers are actually written out, we may again defer to
904 * userspace to perform the encryption of the session key; for the
905 * foreseeable future, this will be the case with public key packets.
906 *
907 * Returns zero on success; non-zero otherwise
908 */
909/* Associate an authentication token(s) with the file */
910int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry)
911{
912 int rc = 0;
913 struct ecryptfs_crypt_stat *crypt_stat =
914 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
915 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
916 &ecryptfs_superblock_to_private(
917 ecryptfs_dentry->d_sb)->mount_crypt_stat;
918 int cipher_name_len;
919
920 ecryptfs_set_default_crypt_stat_vals(crypt_stat, mount_crypt_stat);
921 /* See if there are mount crypt options */
922 if (mount_crypt_stat->global_auth_tok) {
923 ecryptfs_printk(KERN_DEBUG, "Initializing context for new "
924 "file using mount_crypt_stat\n");
925 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED);
926 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_KEY_VALID);
927 memcpy(crypt_stat->keysigs[crypt_stat->num_keysigs++],
928 mount_crypt_stat->global_auth_tok_sig,
929 ECRYPTFS_SIG_SIZE_HEX);
930 cipher_name_len =
931 strlen(mount_crypt_stat->global_default_cipher_name);
932 memcpy(crypt_stat->cipher,
933 mount_crypt_stat->global_default_cipher_name,
934 cipher_name_len);
935 crypt_stat->cipher[cipher_name_len] = '\0';
936 crypt_stat->key_size =
937 mount_crypt_stat->global_default_cipher_key_size;
938 ecryptfs_generate_new_key(crypt_stat);
939 } else
940 /* We should not encounter this scenario since we
941 * should detect lack of global_auth_tok at mount time
942 * TODO: Applies to 0.1 release only; remove in future
943 * release */
944 BUG();
945 rc = ecryptfs_init_crypt_ctx(crypt_stat);
946 if (rc)
947 ecryptfs_printk(KERN_ERR, "Error initializing cryptographic "
948 "context for cipher [%s]: rc = [%d]\n",
949 crypt_stat->cipher, rc);
950 return rc;
951}
952
953/**
954 * contains_ecryptfs_marker - check for the ecryptfs marker
955 * @data: The data block in which to check
956 *
957 * Returns one if marker found; zero if not found
958 */
959int contains_ecryptfs_marker(char *data)
960{
961 u32 m_1, m_2;
962
963 memcpy(&m_1, data, 4);
964 m_1 = be32_to_cpu(m_1);
965 memcpy(&m_2, (data + 4), 4);
966 m_2 = be32_to_cpu(m_2);
967 if ((m_1 ^ MAGIC_ECRYPTFS_MARKER) == m_2)
968 return 1;
969 ecryptfs_printk(KERN_DEBUG, "m_1 = [0x%.8x]; m_2 = [0x%.8x]; "
970 "MAGIC_ECRYPTFS_MARKER = [0x%.8x]\n", m_1, m_2,
971 MAGIC_ECRYPTFS_MARKER);
972 ecryptfs_printk(KERN_DEBUG, "(m_1 ^ MAGIC_ECRYPTFS_MARKER) = "
973 "[0x%.8x]\n", (m_1 ^ MAGIC_ECRYPTFS_MARKER));
974 return 0;
975}
976
977struct ecryptfs_flag_map_elem {
978 u32 file_flag;
979 u32 local_flag;
980};
981
982/* Add support for additional flags by adding elements here. */
983static struct ecryptfs_flag_map_elem ecryptfs_flag_map[] = {
984 {0x00000001, ECRYPTFS_ENABLE_HMAC},
985 {0x00000002, ECRYPTFS_ENCRYPTED}
986};
987
988/**
989 * ecryptfs_process_flags
990 * @crypt_stat
991 * @page_virt: Source data to be parsed
992 * @bytes_read: Updated with the number of bytes read
993 *
994 * Returns zero on success; non-zero if the flag set is invalid
995 */
996static int ecryptfs_process_flags(struct ecryptfs_crypt_stat *crypt_stat,
997 char *page_virt, int *bytes_read)
998{
999 int rc = 0;
1000 int i;
1001 u32 flags;
1002
1003 memcpy(&flags, page_virt, 4);
1004 flags = be32_to_cpu(flags);
1005 for (i = 0; i < ((sizeof(ecryptfs_flag_map)
1006 / sizeof(struct ecryptfs_flag_map_elem))); i++)
1007 if (flags & ecryptfs_flag_map[i].file_flag) {
1008 ECRYPTFS_SET_FLAG(crypt_stat->flags,
1009 ecryptfs_flag_map[i].local_flag);
1010 } else
1011 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags,
1012 ecryptfs_flag_map[i].local_flag);
1013 /* Version is in top 8 bits of the 32-bit flag vector */
1014 crypt_stat->file_version = ((flags >> 24) & 0xFF);
1015 (*bytes_read) = 4;
1016 return rc;
1017}
1018
1019/**
1020 * write_ecryptfs_marker
1021 * @page_virt: The pointer to in a page to begin writing the marker
1022 * @written: Number of bytes written
1023 *
1024 * Marker = 0x3c81b7f5
1025 */
1026static void write_ecryptfs_marker(char *page_virt, size_t *written)
1027{
1028 u32 m_1, m_2;
1029
1030 get_random_bytes(&m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
1031 m_2 = (m_1 ^ MAGIC_ECRYPTFS_MARKER);
1032 m_1 = cpu_to_be32(m_1);
1033 memcpy(page_virt, &m_1, (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
1034 m_2 = cpu_to_be32(m_2);
1035 memcpy(page_virt + (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2), &m_2,
1036 (MAGIC_ECRYPTFS_MARKER_SIZE_BYTES / 2));
1037 (*written) = MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1038}
1039
1040static void
1041write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
1042 size_t *written)
1043{
1044 u32 flags = 0;
1045 int i;
1046
1047 for (i = 0; i < ((sizeof(ecryptfs_flag_map)
1048 / sizeof(struct ecryptfs_flag_map_elem))); i++)
1049 if (ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
1050 ecryptfs_flag_map[i].local_flag))
1051 flags |= ecryptfs_flag_map[i].file_flag;
1052 /* Version is in top 8 bits of the 32-bit flag vector */
1053 flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
1054 flags = cpu_to_be32(flags);
1055 memcpy(page_virt, &flags, 4);
1056 (*written) = 4;
1057}
1058
1059struct ecryptfs_cipher_code_str_map_elem {
1060 char cipher_str[16];
1061 u16 cipher_code;
1062};
1063
1064/* Add support for additional ciphers by adding elements here. The
1065 * cipher_code is whatever OpenPGP applicatoins use to identify the
1066 * ciphers. List in order of probability. */
1067static struct ecryptfs_cipher_code_str_map_elem
1068ecryptfs_cipher_code_str_map[] = {
1069 {"aes",RFC2440_CIPHER_AES_128 },
1070 {"blowfish", RFC2440_CIPHER_BLOWFISH},
1071 {"des3_ede", RFC2440_CIPHER_DES3_EDE},
1072 {"cast5", RFC2440_CIPHER_CAST_5},
1073 {"twofish", RFC2440_CIPHER_TWOFISH},
1074 {"cast6", RFC2440_CIPHER_CAST_6},
1075 {"aes", RFC2440_CIPHER_AES_192},
1076 {"aes", RFC2440_CIPHER_AES_256}
1077};
1078
1079/**
1080 * ecryptfs_code_for_cipher_string
1081 * @str: The string representing the cipher name
1082 *
1083 * Returns zero on no match, or the cipher code on match
1084 */
1085u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
1086{
1087 int i;
1088 u16 code = 0;
1089 struct ecryptfs_cipher_code_str_map_elem *map =
1090 ecryptfs_cipher_code_str_map;
1091
1092 if (strcmp(crypt_stat->cipher, "aes") == 0) {
1093 switch (crypt_stat->key_size) {
1094 case 16:
1095 code = RFC2440_CIPHER_AES_128;
1096 break;
1097 case 24:
1098 code = RFC2440_CIPHER_AES_192;
1099 break;
1100 case 32:
1101 code = RFC2440_CIPHER_AES_256;
1102 }
1103 } else {
1104 for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
1105 if (strcmp(crypt_stat->cipher, map[i].cipher_str) == 0){
1106 code = map[i].cipher_code;
1107 break;
1108 }
1109 }
1110 return code;
1111}
1112
1113/**
1114 * ecryptfs_cipher_code_to_string
1115 * @str: Destination to write out the cipher name
1116 * @cipher_code: The code to convert to cipher name string
1117 *
1118 * Returns zero on success
1119 */
1120int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code)
1121{
1122 int rc = 0;
1123 int i;
1124
1125 str[0] = '\0';
1126 for (i = 0; i < ARRAY_SIZE(ecryptfs_cipher_code_str_map); i++)
1127 if (cipher_code == ecryptfs_cipher_code_str_map[i].cipher_code)
1128 strcpy(str, ecryptfs_cipher_code_str_map[i].cipher_str);
1129 if (str[0] == '\0') {
1130 ecryptfs_printk(KERN_WARNING, "Cipher code not recognized: "
1131 "[%d]\n", cipher_code);
1132 rc = -EINVAL;
1133 }
1134 return rc;
1135}
1136
1137/**
1138 * ecryptfs_read_header_region
1139 * @data
1140 * @dentry
1141 * @nd
1142 *
1143 * Returns zero on success; non-zero otherwise
1144 */
1145int ecryptfs_read_header_region(char *data, struct dentry *dentry,
1146 struct vfsmount *mnt)
1147{
1148 struct file *file;
1149 mm_segment_t oldfs;
1150 int rc;
1151
1152 mnt = mntget(mnt);
1153 file = dentry_open(dentry, mnt, O_RDONLY);
1154 if (IS_ERR(file)) {
1155 ecryptfs_printk(KERN_DEBUG, "Error opening file to "
1156 "read header region\n");
1157 mntput(mnt);
1158 rc = PTR_ERR(file);
1159 goto out;
1160 }
1161 file->f_pos = 0;
1162 oldfs = get_fs();
1163 set_fs(get_ds());
1164 /* For releases 0.1 and 0.2, all of the header information
1165 * fits in the first data extent-sized region. */
1166 rc = file->f_op->read(file, (char __user *)data,
1167 ECRYPTFS_DEFAULT_EXTENT_SIZE, &file->f_pos);
1168 set_fs(oldfs);
1169 fput(file);
1170 rc = 0;
1171out:
1172 return rc;
1173}
1174
1175static void
1176write_header_metadata(char *virt, struct ecryptfs_crypt_stat *crypt_stat,
1177 size_t *written)
1178{
1179 u32 header_extent_size;
1180 u16 num_header_extents_at_front;
1181
1182 header_extent_size = (u32)crypt_stat->header_extent_size;
1183 num_header_extents_at_front =
1184 (u16)crypt_stat->num_header_extents_at_front;
1185 header_extent_size = cpu_to_be32(header_extent_size);
1186 memcpy(virt, &header_extent_size, 4);
1187 virt += 4;
1188 num_header_extents_at_front = cpu_to_be16(num_header_extents_at_front);
1189 memcpy(virt, &num_header_extents_at_front, 2);
1190 (*written) = 6;
1191}
1192
1193struct kmem_cache *ecryptfs_header_cache_0;
1194struct kmem_cache *ecryptfs_header_cache_1;
1195struct kmem_cache *ecryptfs_header_cache_2;
1196
1197/**
1198 * ecryptfs_write_headers_virt
1199 * @page_virt
1200 * @crypt_stat
1201 * @ecryptfs_dentry
1202 *
1203 * Format version: 1
1204 *
1205 * Header Extent:
1206 * Octets 0-7: Unencrypted file size (big-endian)
1207 * Octets 8-15: eCryptfs special marker
1208 * Octets 16-19: Flags
1209 * Octet 16: File format version number (between 0 and 255)
1210 * Octets 17-18: Reserved
1211 * Octet 19: Bit 1 (lsb): Reserved
1212 * Bit 2: Encrypted?
1213 * Bits 3-8: Reserved
1214 * Octets 20-23: Header extent size (big-endian)
1215 * Octets 24-25: Number of header extents at front of file
1216 * (big-endian)
1217 * Octet 26: Begin RFC 2440 authentication token packet set
1218 * Data Extent 0:
1219 * Lower data (CBC encrypted)
1220 * Data Extent 1:
1221 * Lower data (CBC encrypted)
1222 * ...
1223 *
1224 * Returns zero on success
1225 */
1226int ecryptfs_write_headers_virt(char *page_virt,
1227 struct ecryptfs_crypt_stat *crypt_stat,
1228 struct dentry *ecryptfs_dentry)
1229{
1230 int rc;
1231 size_t written;
1232 size_t offset;
1233
1234 offset = ECRYPTFS_FILE_SIZE_BYTES;
1235 write_ecryptfs_marker((page_virt + offset), &written);
1236 offset += written;
1237 write_ecryptfs_flags((page_virt + offset), crypt_stat, &written);
1238 offset += written;
1239 write_header_metadata((page_virt + offset), crypt_stat, &written);
1240 offset += written;
1241 rc = ecryptfs_generate_key_packet_set((page_virt + offset), crypt_stat,
1242 ecryptfs_dentry, &written,
1243 PAGE_CACHE_SIZE - offset);
1244 if (rc)
1245 ecryptfs_printk(KERN_WARNING, "Error generating key packet "
1246 "set; rc = [%d]\n", rc);
1247 return rc;
1248}
1249
1250/**
1251 * ecryptfs_write_headers
1252 * @lower_file: The lower file struct, which was returned from dentry_open
1253 *
1254 * Write the file headers out. This will likely involve a userspace
1255 * callout, in which the session key is encrypted with one or more
1256 * public keys and/or the passphrase necessary to do the encryption is
1257 * retrieved via a prompt. Exactly what happens at this point should
1258 * be policy-dependent.
1259 *
1260 * Returns zero on success; non-zero on error
1261 */
1262int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
1263 struct file *lower_file)
1264{
1265 mm_segment_t oldfs;
1266 struct ecryptfs_crypt_stat *crypt_stat;
1267 char *page_virt;
1268 int current_header_page;
1269 int header_pages;
1270 int rc = 0;
1271
1272 crypt_stat = &ecryptfs_inode_to_private(
1273 ecryptfs_dentry->d_inode)->crypt_stat;
1274 if (likely(ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
1275 ECRYPTFS_ENCRYPTED))) {
1276 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
1277 ECRYPTFS_KEY_VALID)) {
1278 ecryptfs_printk(KERN_DEBUG, "Key is "
1279 "invalid; bailing out\n");
1280 rc = -EINVAL;
1281 goto out;
1282 }
1283 } else {
1284 rc = -EINVAL;
1285 ecryptfs_printk(KERN_WARNING,
1286 "Called with crypt_stat->encrypted == 0\n");
1287 goto out;
1288 }
1289 /* Released in this function */
1290 page_virt = kmem_cache_alloc(ecryptfs_header_cache_0, SLAB_USER);
1291 if (!page_virt) {
1292 ecryptfs_printk(KERN_ERR, "Out of memory\n");
1293 rc = -ENOMEM;
1294 goto out;
1295 }
1296 memset(page_virt, 0, PAGE_CACHE_SIZE);
1297 rc = ecryptfs_write_headers_virt(page_virt, crypt_stat,
1298 ecryptfs_dentry);
1299 if (unlikely(rc)) {
1300 ecryptfs_printk(KERN_ERR, "Error whilst writing headers\n");
1301 memset(page_virt, 0, PAGE_CACHE_SIZE);
1302 goto out_free;
1303 }
1304 ecryptfs_printk(KERN_DEBUG,
1305 "Writing key packet set to underlying file\n");
1306 lower_file->f_pos = 0;
1307 oldfs = get_fs();
1308 set_fs(get_ds());
1309 ecryptfs_printk(KERN_DEBUG, "Calling lower_file->f_op->"
1310 "write() w/ header page; lower_file->f_pos = "
1311 "[0x%.16x]\n", lower_file->f_pos);
1312 lower_file->f_op->write(lower_file, (char __user *)page_virt,
1313 PAGE_CACHE_SIZE, &lower_file->f_pos);
1314 header_pages = ((crypt_stat->header_extent_size
1315 * crypt_stat->num_header_extents_at_front)
1316 / PAGE_CACHE_SIZE);
1317 memset(page_virt, 0, PAGE_CACHE_SIZE);
1318 current_header_page = 1;
1319 while (current_header_page < header_pages) {
1320 ecryptfs_printk(KERN_DEBUG, "Calling lower_file->f_op->"
1321 "write() w/ zero'd page; lower_file->f_pos = "
1322 "[0x%.16x]\n", lower_file->f_pos);
1323 lower_file->f_op->write(lower_file, (char __user *)page_virt,
1324 PAGE_CACHE_SIZE, &lower_file->f_pos);
1325 current_header_page++;
1326 }
1327 set_fs(oldfs);
1328 ecryptfs_printk(KERN_DEBUG,
1329 "Done writing key packet set to underlying file.\n");
1330out_free:
1331 kmem_cache_free(ecryptfs_header_cache_0, page_virt);
1332out:
1333 return rc;
1334}
1335
1336static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
1337 char *virt, int *bytes_read)
1338{
1339 int rc = 0;
1340 u32 header_extent_size;
1341 u16 num_header_extents_at_front;
1342
1343 memcpy(&header_extent_size, virt, 4);
1344 header_extent_size = be32_to_cpu(header_extent_size);
1345 virt += 4;
1346 memcpy(&num_header_extents_at_front, virt, 2);
1347 num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front);
1348 crypt_stat->header_extent_size = (int)header_extent_size;
1349 crypt_stat->num_header_extents_at_front =
1350 (int)num_header_extents_at_front;
1351 (*bytes_read) = 6;
1352 if ((crypt_stat->header_extent_size
1353 * crypt_stat->num_header_extents_at_front)
1354 < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) {
1355 rc = -EINVAL;
1356 ecryptfs_printk(KERN_WARNING, "Invalid header extent size: "
1357 "[%d]\n", crypt_stat->header_extent_size);
1358 }
1359 return rc;
1360}
1361
1362/**
1363 * set_default_header_data
1364 *
1365 * For version 0 file format; this function is only for backwards
1366 * compatibility for files created with the prior versions of
1367 * eCryptfs.
1368 */
1369static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
1370{
1371 crypt_stat->header_extent_size = 4096;
1372 crypt_stat->num_header_extents_at_front = 1;
1373}
1374
1375/**
1376 * ecryptfs_read_headers_virt
1377 *
1378 * Read/parse the header data. The header format is detailed in the
1379 * comment block for the ecryptfs_write_headers_virt() function.
1380 *
1381 * Returns zero on success
1382 */
1383static int ecryptfs_read_headers_virt(char *page_virt,
1384 struct ecryptfs_crypt_stat *crypt_stat,
1385 struct dentry *ecryptfs_dentry)
1386{
1387 int rc = 0;
1388 int offset;
1389 int bytes_read;
1390
1391 ecryptfs_set_default_sizes(crypt_stat);
1392 crypt_stat->mount_crypt_stat = &ecryptfs_superblock_to_private(
1393 ecryptfs_dentry->d_sb)->mount_crypt_stat;
1394 offset = ECRYPTFS_FILE_SIZE_BYTES;
1395 rc = contains_ecryptfs_marker(page_virt + offset);
1396 if (rc == 0) {
1397 rc = -EINVAL;
1398 goto out;
1399 }
1400 offset += MAGIC_ECRYPTFS_MARKER_SIZE_BYTES;
1401 rc = ecryptfs_process_flags(crypt_stat, (page_virt + offset),
1402 &bytes_read);
1403 if (rc) {
1404 ecryptfs_printk(KERN_WARNING, "Error processing flags\n");
1405 goto out;
1406 }
1407 if (crypt_stat->file_version > ECRYPTFS_SUPPORTED_FILE_VERSION) {
1408 ecryptfs_printk(KERN_WARNING, "File version is [%d]; only "
1409 "file version [%d] is supported by this "
1410 "version of eCryptfs\n",
1411 crypt_stat->file_version,
1412 ECRYPTFS_SUPPORTED_FILE_VERSION);
1413 rc = -EINVAL;
1414 goto out;
1415 }
1416 offset += bytes_read;
1417 if (crypt_stat->file_version >= 1) {
1418 rc = parse_header_metadata(crypt_stat, (page_virt + offset),
1419 &bytes_read);
1420 if (rc) {
1421 ecryptfs_printk(KERN_WARNING, "Error reading header "
1422 "metadata; rc = [%d]\n", rc);
1423 }
1424 offset += bytes_read;
1425 } else
1426 set_default_header_data(crypt_stat);
1427 rc = ecryptfs_parse_packet_set(crypt_stat, (page_virt + offset),
1428 ecryptfs_dentry);
1429out:
1430 return rc;
1431}
1432
1433/**
1434 * ecryptfs_read_headers
1435 *
1436 * Returns zero if valid headers found and parsed; non-zero otherwise
1437 */
1438int ecryptfs_read_headers(struct dentry *ecryptfs_dentry,
1439 struct file *lower_file)
1440{
1441 int rc = 0;
1442 char *page_virt = NULL;
1443 mm_segment_t oldfs;
1444 ssize_t bytes_read;
1445 struct ecryptfs_crypt_stat *crypt_stat =
1446 &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
1447
1448 /* Read the first page from the underlying file */
1449 page_virt = kmem_cache_alloc(ecryptfs_header_cache_1, SLAB_USER);
1450 if (!page_virt) {
1451 rc = -ENOMEM;
1452 ecryptfs_printk(KERN_ERR, "Unable to allocate page_virt\n");
1453 goto out;
1454 }
1455 lower_file->f_pos = 0;
1456 oldfs = get_fs();
1457 set_fs(get_ds());
1458 bytes_read = lower_file->f_op->read(lower_file,
1459 (char __user *)page_virt,
1460 ECRYPTFS_DEFAULT_EXTENT_SIZE,
1461 &lower_file->f_pos);
1462 set_fs(oldfs);
1463 if (bytes_read != ECRYPTFS_DEFAULT_EXTENT_SIZE) {
1464 rc = -EINVAL;
1465 goto out;
1466 }
1467 rc = ecryptfs_read_headers_virt(page_virt, crypt_stat,
1468 ecryptfs_dentry);
1469 if (rc) {
1470 ecryptfs_printk(KERN_DEBUG, "Valid eCryptfs headers not "
1471 "found\n");
1472 rc = -EINVAL;
1473 }
1474out:
1475 if (page_virt) {
1476 memset(page_virt, 0, PAGE_CACHE_SIZE);
1477 kmem_cache_free(ecryptfs_header_cache_1, page_virt);
1478 }
1479 return rc;
1480}
1481
1482/**
1483 * ecryptfs_encode_filename - converts a plaintext file name to cipher text
1484 * @crypt_stat: The crypt_stat struct associated with the file anem to encode
1485 * @name: The plaintext name
1486 * @length: The length of the plaintext
1487 * @encoded_name: The encypted name
1488 *
1489 * Encrypts and encodes a filename into something that constitutes a
1490 * valid filename for a filesystem, with printable characters.
1491 *
1492 * We assume that we have a properly initialized crypto context,
1493 * pointed to by crypt_stat->tfm.
1494 *
1495 * TODO: Implement filename decoding and decryption here, in place of
1496 * memcpy. We are keeping the framework around for now to (1)
1497 * facilitate testing of the components needed to implement filename
1498 * encryption and (2) to provide a code base from which other
1499 * developers in the community can easily implement this feature.
1500 *
1501 * Returns the length of encoded filename; negative if error
1502 */
1503int
1504ecryptfs_encode_filename(struct ecryptfs_crypt_stat *crypt_stat,
1505 const char *name, int length, char **encoded_name)
1506{
1507 int error = 0;
1508
1509 (*encoded_name) = kmalloc(length + 2, GFP_KERNEL);
1510 if (!(*encoded_name)) {
1511 error = -ENOMEM;
1512 goto out;
1513 }
1514 /* TODO: Filename encryption is a scheduled feature for a
1515 * future version of eCryptfs. This function is here only for
1516 * the purpose of providing a framework for other developers
1517 * to easily implement filename encryption. Hint: Replace this
1518 * memcpy() with a call to encrypt and encode the
1519 * filename, the set the length accordingly. */
1520 memcpy((void *)(*encoded_name), (void *)name, length);
1521 (*encoded_name)[length] = '\0';
1522 error = length + 1;
1523out:
1524 return error;
1525}
1526
1527/**
1528 * ecryptfs_decode_filename - converts the cipher text name to plaintext
1529 * @crypt_stat: The crypt_stat struct associated with the file
1530 * @name: The filename in cipher text
1531 * @length: The length of the cipher text name
1532 * @decrypted_name: The plaintext name
1533 *
1534 * Decodes and decrypts the filename.
1535 *
1536 * We assume that we have a properly initialized crypto context,
1537 * pointed to by crypt_stat->tfm.
1538 *
1539 * TODO: Implement filename decoding and decryption here, in place of
1540 * memcpy. We are keeping the framework around for now to (1)
1541 * facilitate testing of the components needed to implement filename
1542 * encryption and (2) to provide a code base from which other
1543 * developers in the community can easily implement this feature.
1544 *
1545 * Returns the length of decoded filename; negative if error
1546 */
1547int
1548ecryptfs_decode_filename(struct ecryptfs_crypt_stat *crypt_stat,
1549 const char *name, int length, char **decrypted_name)
1550{
1551 int error = 0;
1552
1553 (*decrypted_name) = kmalloc(length + 2, GFP_KERNEL);
1554 if (!(*decrypted_name)) {
1555 error = -ENOMEM;
1556 goto out;
1557 }
1558 /* TODO: Filename encryption is a scheduled feature for a
1559 * future version of eCryptfs. This function is here only for
1560 * the purpose of providing a framework for other developers
1561 * to easily implement filename encryption. Hint: Replace this
1562 * memcpy() with a call to decode and decrypt the
1563 * filename, the set the length accordingly. */
1564 memcpy((void *)(*decrypted_name), (void *)name, length);
1565 (*decrypted_name)[length + 1] = '\0'; /* Only for convenience
1566 * in printing out the
1567 * string in debug
1568 * messages */
1569 error = length;
1570out:
1571 return error;
1572}
1573
1574/**
1575 * ecryptfs_process_cipher - Perform cipher initialization.
1576 * @tfm: Crypto context set by this function
1577 * @key_tfm: Crypto context for key material, set by this function
1578 * @cipher_name: Name of the cipher.
1579 * @key_size: Size of the key in bytes.
1580 *
1581 * Returns zero on success. Any crypto_tfm structs allocated here
1582 * should be released by other functions, such as on a superblock put
1583 * event, regardless of whether this function succeeds for fails.
1584 */
1585int
1586ecryptfs_process_cipher(struct crypto_tfm **tfm, struct crypto_tfm **key_tfm,
1587 char *cipher_name, size_t key_size)
1588{
1589 char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
1590 int rc;
1591
1592 *tfm = *key_tfm = NULL;
1593 if (key_size > ECRYPTFS_MAX_KEY_BYTES) {
1594 rc = -EINVAL;
1595 printk(KERN_ERR "Requested key size is [%Zd] bytes; maximum "
1596 "allowable is [%d]\n", key_size, ECRYPTFS_MAX_KEY_BYTES);
1597 goto out;
1598 }
1599 *tfm = crypto_alloc_tfm(cipher_name, (ECRYPTFS_DEFAULT_CHAINING_MODE
1600 | CRYPTO_TFM_REQ_WEAK_KEY));
1601 if (!(*tfm)) {
1602 rc = -EINVAL;
1603 printk(KERN_ERR "Unable to allocate crypto cipher with name "
1604 "[%s]\n", cipher_name);
1605 goto out;
1606 }
1607 *key_tfm = crypto_alloc_tfm(cipher_name, CRYPTO_TFM_REQ_WEAK_KEY);
1608 if (!(*key_tfm)) {
1609 rc = -EINVAL;
1610 printk(KERN_ERR "Unable to allocate crypto cipher with name "
1611 "[%s]\n", cipher_name);
1612 goto out;
1613 }
1614 if (key_size < crypto_tfm_alg_min_keysize(*tfm)) {
1615 rc = -EINVAL;
1616 printk(KERN_ERR "Request key size is [%Zd]; minimum key size "
1617 "supported by cipher [%s] is [%d]\n", key_size,
1618 cipher_name, crypto_tfm_alg_min_keysize(*tfm));
1619 goto out;
1620 }
1621 if (key_size < crypto_tfm_alg_min_keysize(*key_tfm)) {
1622 rc = -EINVAL;
1623 printk(KERN_ERR "Request key size is [%Zd]; minimum key size "
1624 "supported by cipher [%s] is [%d]\n", key_size,
1625 cipher_name, crypto_tfm_alg_min_keysize(*key_tfm));
1626 goto out;
1627 }
1628 if (key_size > crypto_tfm_alg_max_keysize(*tfm)) {
1629 rc = -EINVAL;
1630 printk(KERN_ERR "Request key size is [%Zd]; maximum key size "
1631 "supported by cipher [%s] is [%d]\n", key_size,
1632 cipher_name, crypto_tfm_alg_min_keysize(*tfm));
1633 goto out;
1634 }
1635 if (key_size > crypto_tfm_alg_max_keysize(*key_tfm)) {
1636 rc = -EINVAL;
1637 printk(KERN_ERR "Request key size is [%Zd]; maximum key size "
1638 "supported by cipher [%s] is [%d]\n", key_size,
1639 cipher_name, crypto_tfm_alg_min_keysize(*key_tfm));
1640 goto out;
1641 }
1642 get_random_bytes(dummy_key, key_size);
1643 rc = crypto_cipher_setkey(*tfm, dummy_key, key_size);
1644 if (rc) {
1645 printk(KERN_ERR "Error attempting to set key of size [%Zd] for "
1646 "cipher [%s]; rc = [%d]\n", key_size, cipher_name, rc);
1647 rc = -EINVAL;
1648 goto out;
1649 }
1650 rc = crypto_cipher_setkey(*key_tfm, dummy_key, key_size);
1651 if (rc) {
1652 printk(KERN_ERR "Error attempting to set key of size [%Zd] for "
1653 "cipher [%s]; rc = [%d]\n", key_size, cipher_name, rc);
1654 rc = -EINVAL;
1655 goto out;
1656 }
1657out:
1658 return rc;
1659}
diff --git a/fs/ecryptfs/debug.c b/fs/ecryptfs/debug.c
new file mode 100644
index 000000000000..61f8e894284f
--- /dev/null
+++ b/fs/ecryptfs/debug.c
@@ -0,0 +1,123 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 * Functions only useful for debugging.
4 *
5 * Copyright (C) 2006 International Business Machines Corp.
6 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
21 * 02111-1307, USA.
22 */
23
24#include "ecryptfs_kernel.h"
25
26/**
27 * ecryptfs_dump_auth_tok - debug function to print auth toks
28 *
29 * This function will print the contents of an ecryptfs authentication
30 * token.
31 */
32void ecryptfs_dump_auth_tok(struct ecryptfs_auth_tok *auth_tok)
33{
34 char salt[ECRYPTFS_SALT_SIZE * 2 + 1];
35 char sig[ECRYPTFS_SIG_SIZE_HEX + 1];
36
37 ecryptfs_printk(KERN_DEBUG, "Auth tok at mem loc [%p]:\n",
38 auth_tok);
39 if (ECRYPTFS_CHECK_FLAG(auth_tok->flags, ECRYPTFS_PRIVATE_KEY)) {
40 ecryptfs_printk(KERN_DEBUG, " * private key type\n");
41 ecryptfs_printk(KERN_DEBUG, " * (NO PRIVATE KEY SUPPORT "
42 "IN ECRYPTFS VERSION 0.1)\n");
43 } else {
44 ecryptfs_printk(KERN_DEBUG, " * passphrase type\n");
45 ecryptfs_to_hex(salt, auth_tok->token.password.salt,
46 ECRYPTFS_SALT_SIZE);
47 salt[ECRYPTFS_SALT_SIZE * 2] = '\0';
48 ecryptfs_printk(KERN_DEBUG, " * salt = [%s]\n", salt);
49 if (ECRYPTFS_CHECK_FLAG(auth_tok->token.password.flags,
50 ECRYPTFS_PERSISTENT_PASSWORD)) {
51 ecryptfs_printk(KERN_DEBUG, " * persistent\n");
52 }
53 memcpy(sig, auth_tok->token.password.signature,
54 ECRYPTFS_SIG_SIZE_HEX);
55 sig[ECRYPTFS_SIG_SIZE_HEX] = '\0';
56 ecryptfs_printk(KERN_DEBUG, " * signature = [%s]\n", sig);
57 }
58 ecryptfs_printk(KERN_DEBUG, " * session_key.flags = [0x%x]\n",
59 auth_tok->session_key.flags);
60 if (auth_tok->session_key.flags
61 & ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT)
62 ecryptfs_printk(KERN_DEBUG,
63 " * Userspace decrypt request set\n");
64 if (auth_tok->session_key.flags
65 & ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT)
66 ecryptfs_printk(KERN_DEBUG,
67 " * Userspace encrypt request set\n");
68 if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_DECRYPTED_KEY) {
69 ecryptfs_printk(KERN_DEBUG, " * Contains decrypted key\n");
70 ecryptfs_printk(KERN_DEBUG,
71 " * session_key.decrypted_key_size = [0x%x]\n",
72 auth_tok->session_key.decrypted_key_size);
73 ecryptfs_printk(KERN_DEBUG, " * Decrypted session key "
74 "dump:\n");
75 if (ecryptfs_verbosity > 0)
76 ecryptfs_dump_hex(auth_tok->session_key.decrypted_key,
77 ECRYPTFS_DEFAULT_KEY_BYTES);
78 }
79 if (auth_tok->session_key.flags & ECRYPTFS_CONTAINS_ENCRYPTED_KEY) {
80 ecryptfs_printk(KERN_DEBUG, " * Contains encrypted key\n");
81 ecryptfs_printk(KERN_DEBUG,
82 " * session_key.encrypted_key_size = [0x%x]\n",
83 auth_tok->session_key.encrypted_key_size);
84 ecryptfs_printk(KERN_DEBUG, " * Encrypted session key "
85 "dump:\n");
86 if (ecryptfs_verbosity > 0)
87 ecryptfs_dump_hex(auth_tok->session_key.encrypted_key,
88 auth_tok->session_key.
89 encrypted_key_size);
90 }
91}
92
93/**
94 * ecryptfs_dump_hex - debug hex printer
95 * @data: string of bytes to be printed
96 * @bytes: number of bytes to print
97 *
98 * Dump hexadecimal representation of char array
99 */
100void ecryptfs_dump_hex(char *data, int bytes)
101{
102 int i = 0;
103 int add_newline = 1;
104
105 if (ecryptfs_verbosity < 1)
106 return;
107 if (bytes != 0) {
108 printk(KERN_DEBUG "0x%.2x.", (unsigned char)data[i]);
109 i++;
110 }
111 while (i < bytes) {
112 printk("0x%.2x.", (unsigned char)data[i]);
113 i++;
114 if (i % 16 == 0) {
115 printk("\n");
116 add_newline = 0;
117 } else
118 add_newline = 1;
119 }
120 if (add_newline)
121 printk("\n");
122}
123
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
new file mode 100644
index 000000000000..f0d2a433242b
--- /dev/null
+++ b/fs/ecryptfs/dentry.c
@@ -0,0 +1,87 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2003 Erez Zadok
5 * Copyright (C) 2001-2003 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
22 * 02111-1307, USA.
23 */
24
25#include <linux/dcache.h>
26#include <linux/namei.h>
27#include "ecryptfs_kernel.h"
28
29/**
30 * ecryptfs_d_revalidate - revalidate an ecryptfs dentry
31 * @dentry: The ecryptfs dentry
32 * @nd: The associated nameidata
33 *
34 * Called when the VFS needs to revalidate a dentry. This
35 * is called whenever a name lookup finds a dentry in the
36 * dcache. Most filesystems leave this as NULL, because all their
37 * dentries in the dcache are valid.
38 *
39 * Returns 1 if valid, 0 otherwise.
40 *
41 */
42static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
43{
44 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
45 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
46 struct dentry *dentry_save;
47 struct vfsmount *vfsmount_save;
48 int rc = 1;
49
50 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
51 goto out;
52 dentry_save = nd->dentry;
53 vfsmount_save = nd->mnt;
54 nd->dentry = lower_dentry;
55 nd->mnt = lower_mnt;
56 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
57 nd->dentry = dentry_save;
58 nd->mnt = vfsmount_save;
59out:
60 return rc;
61}
62
63struct kmem_cache *ecryptfs_dentry_info_cache;
64
65/**
66 * ecryptfs_d_release
67 * @dentry: The ecryptfs dentry
68 *
69 * Called when a dentry is really deallocated.
70 */
71static void ecryptfs_d_release(struct dentry *dentry)
72{
73 struct dentry *lower_dentry;
74
75 lower_dentry = ecryptfs_dentry_to_lower(dentry);
76 if (ecryptfs_dentry_to_private(dentry))
77 kmem_cache_free(ecryptfs_dentry_info_cache,
78 ecryptfs_dentry_to_private(dentry));
79 if (lower_dentry)
80 dput(lower_dentry);
81 return;
82}
83
84struct dentry_operations ecryptfs_dops = {
85 .d_revalidate = ecryptfs_d_revalidate,
86 .d_release = ecryptfs_d_release,
87};
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
new file mode 100644
index 000000000000..872c9958531a
--- /dev/null
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -0,0 +1,482 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 * Kernel declarations.
4 *
5 * Copyright (C) 1997-2003 Erez Zadok
6 * Copyright (C) 2001-2003 Stony Brook University
7 * Copyright (C) 2004-2006 International Business Machines Corp.
8 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#ifndef ECRYPTFS_KERNEL_H
27#define ECRYPTFS_KERNEL_H
28
29#include <keys/user-type.h>
30#include <linux/fs.h>
31#include <linux/scatterlist.h>
32
33/* Version verification for shared data structures w/ userspace */
34#define ECRYPTFS_VERSION_MAJOR 0x00
35#define ECRYPTFS_VERSION_MINOR 0x04
36#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x01
37/* These flags indicate which features are supported by the kernel
38 * module; userspace tools such as the mount helper read
39 * ECRYPTFS_VERSIONING_MASK from a sysfs handle in order to determine
40 * how to behave. */
41#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001
42#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002
43#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004
44#define ECRYPTFS_VERSIONING_POLICY 0x00000008
45#define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \
46 | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH)
47
48#define ECRYPTFS_MAX_PASSWORD_LENGTH 64
49#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
50#define ECRYPTFS_SALT_SIZE 8
51#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2)
52/* The original signature size is only for what is stored on disk; all
53 * in-memory representations are expanded hex, so it better adapted to
54 * be passed around or referenced on the command line */
55#define ECRYPTFS_SIG_SIZE 8
56#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2)
57#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX
58#define ECRYPTFS_MAX_KEY_BYTES 64
59#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512
60#define ECRYPTFS_DEFAULT_IV_BYTES 16
61#define ECRYPTFS_FILE_VERSION 0x01
62#define ECRYPTFS_DEFAULT_HEADER_EXTENT_SIZE 8192
63#define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
64#define ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE 8192
65
66#define RFC2440_CIPHER_DES3_EDE 0x02
67#define RFC2440_CIPHER_CAST_5 0x03
68#define RFC2440_CIPHER_BLOWFISH 0x04
69#define RFC2440_CIPHER_AES_128 0x07
70#define RFC2440_CIPHER_AES_192 0x08
71#define RFC2440_CIPHER_AES_256 0x09
72#define RFC2440_CIPHER_TWOFISH 0x0a
73#define RFC2440_CIPHER_CAST_6 0x0b
74
75#define ECRYPTFS_SET_FLAG(flag_bit_vector, flag) (flag_bit_vector |= (flag))
76#define ECRYPTFS_CLEAR_FLAG(flag_bit_vector, flag) (flag_bit_vector &= ~(flag))
77#define ECRYPTFS_CHECK_FLAG(flag_bit_vector, flag) (flag_bit_vector & (flag))
78
79/**
80 * For convenience, we may need to pass around the encrypted session
81 * key between kernel and userspace because the authentication token
82 * may not be extractable. For example, the TPM may not release the
83 * private key, instead requiring the encrypted data and returning the
84 * decrypted data.
85 */
86struct ecryptfs_session_key {
87#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001
88#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002
89#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004
90#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008
91 u32 flags;
92 u32 encrypted_key_size;
93 u32 decrypted_key_size;
94 u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES];
95 u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES];
96};
97
98struct ecryptfs_password {
99 u32 password_bytes;
100 s32 hash_algo;
101 u32 hash_iterations;
102 u32 session_key_encryption_key_bytes;
103#define ECRYPTFS_PERSISTENT_PASSWORD 0x01
104#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02
105 u32 flags;
106 /* Iterated-hash concatenation of salt and passphrase */
107 u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
108 u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1];
109 /* Always in expanded hex */
110 u8 salt[ECRYPTFS_SALT_SIZE];
111};
112
113enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY};
114
115/* May be a password or a private key */
116struct ecryptfs_auth_tok {
117 u16 version; /* 8-bit major and 8-bit minor */
118 u16 token_type;
119 u32 flags;
120 struct ecryptfs_session_key session_key;
121 u8 reserved[32];
122 union {
123 struct ecryptfs_password password;
124 /* Private key is in future eCryptfs releases */
125 } token;
126} __attribute__ ((packed));
127
128void ecryptfs_dump_auth_tok(struct ecryptfs_auth_tok *auth_tok);
129extern void ecryptfs_to_hex(char *dst, char *src, size_t src_size);
130extern void ecryptfs_from_hex(char *dst, char *src, int dst_size);
131
132struct ecryptfs_key_record {
133 unsigned char type;
134 size_t enc_key_size;
135 unsigned char sig[ECRYPTFS_SIG_SIZE];
136 unsigned char enc_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES];
137};
138
139struct ecryptfs_auth_tok_list {
140 struct ecryptfs_auth_tok *auth_tok;
141 struct list_head list;
142};
143
144struct ecryptfs_crypt_stat;
145struct ecryptfs_mount_crypt_stat;
146
147struct ecryptfs_page_crypt_context {
148 struct page *page;
149#define ECRYPTFS_PREPARE_COMMIT_MODE 0
150#define ECRYPTFS_WRITEPAGE_MODE 1
151 unsigned int mode;
152 union {
153 struct file *lower_file;
154 struct writeback_control *wbc;
155 } param;
156};
157
158static inline struct ecryptfs_auth_tok *
159ecryptfs_get_key_payload_data(struct key *key)
160{
161 return (struct ecryptfs_auth_tok *)
162 (((struct user_key_payload*)key->payload.data)->data);
163}
164
165#define ECRYPTFS_SUPER_MAGIC 0xf15f
166#define ECRYPTFS_MAX_KEYSET_SIZE 1024
167#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32
168#define ECRYPTFS_MAX_NUM_ENC_KEYS 64
169#define ECRYPTFS_MAX_NUM_KEYSIGS 2 /* TODO: Make this a linked list */
170#define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */
171#define ECRYPTFS_SALT_BYTES 2
172#define MAGIC_ECRYPTFS_MARKER 0x3c81b7f5
173#define MAGIC_ECRYPTFS_MARKER_SIZE_BYTES 8 /* 4*2 */
174#define ECRYPTFS_FILE_SIZE_BYTES 8
175#define ECRYPTFS_DEFAULT_CIPHER "aes"
176#define ECRYPTFS_DEFAULT_KEY_BYTES 16
177#define ECRYPTFS_DEFAULT_CHAINING_MODE CRYPTO_TFM_MODE_CBC
178#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
179#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
180#define MD5_DIGEST_SIZE 16
181
182/**
183 * This is the primary struct associated with each encrypted file.
184 *
185 * TODO: cache align/pack?
186 */
187struct ecryptfs_crypt_stat {
188#define ECRYPTFS_STRUCT_INITIALIZED 0x00000001
189#define ECRYPTFS_POLICY_APPLIED 0x00000002
190#define ECRYPTFS_NEW_FILE 0x00000004
191#define ECRYPTFS_ENCRYPTED 0x00000008
192#define ECRYPTFS_SECURITY_WARNING 0x00000010
193#define ECRYPTFS_ENABLE_HMAC 0x00000020
194#define ECRYPTFS_ENCRYPT_IV_PAGES 0x00000040
195#define ECRYPTFS_KEY_VALID 0x00000080
196 u32 flags;
197 unsigned int file_version;
198 size_t iv_bytes;
199 size_t num_keysigs;
200 size_t header_extent_size;
201 size_t num_header_extents_at_front;
202 size_t extent_size; /* Data extent size; default is 4096 */
203 size_t key_size;
204 size_t extent_shift;
205 unsigned int extent_mask;
206 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
207 struct crypto_tfm *tfm;
208 struct crypto_tfm *md5_tfm; /* Crypto context for generating
209 * the initialization vectors */
210 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE];
211 unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
212 unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
213 unsigned char keysigs[ECRYPTFS_MAX_NUM_KEYSIGS][ECRYPTFS_SIG_SIZE_HEX];
214 struct mutex cs_tfm_mutex;
215 struct mutex cs_md5_tfm_mutex;
216 struct mutex cs_mutex;
217};
218
219/* inode private data. */
220struct ecryptfs_inode_info {
221 struct inode vfs_inode;
222 struct inode *wii_inode;
223 struct ecryptfs_crypt_stat crypt_stat;
224};
225
226/* dentry private data. Each dentry must keep track of a lower
227 * vfsmount too. */
228struct ecryptfs_dentry_info {
229 struct dentry *wdi_dentry;
230 struct vfsmount *lower_mnt;
231 struct ecryptfs_crypt_stat *crypt_stat;
232};
233
234/**
235 * This struct is to enable a mount-wide passphrase/salt combo. This
236 * is more or less a stopgap to provide similar functionality to other
237 * crypto filesystems like EncFS or CFS until full policy support is
238 * implemented in eCryptfs.
239 */
240struct ecryptfs_mount_crypt_stat {
241 /* Pointers to memory we do not own, do not free these */
242#define ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED 0x00000001
243 u32 flags;
244 struct ecryptfs_auth_tok *global_auth_tok;
245 struct key *global_auth_tok_key;
246 size_t global_default_cipher_key_size;
247 struct crypto_tfm *global_key_tfm;
248 struct mutex global_key_tfm_mutex;
249 unsigned char global_default_cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE
250 + 1];
251 unsigned char global_auth_tok_sig[ECRYPTFS_SIG_SIZE_HEX + 1];
252};
253
254/* superblock private data. */
255struct ecryptfs_sb_info {
256 struct super_block *wsi_sb;
257 struct ecryptfs_mount_crypt_stat mount_crypt_stat;
258};
259
260/* file private data. */
261struct ecryptfs_file_info {
262 struct file *wfi_file;
263 struct ecryptfs_crypt_stat *crypt_stat;
264};
265
266/* auth_tok <=> encrypted_session_key mappings */
267struct ecryptfs_auth_tok_list_item {
268 unsigned char encrypted_session_key[ECRYPTFS_MAX_KEY_BYTES];
269 struct list_head list;
270 struct ecryptfs_auth_tok auth_tok;
271};
272
273static inline struct ecryptfs_file_info *
274ecryptfs_file_to_private(struct file *file)
275{
276 return (struct ecryptfs_file_info *)file->private_data;
277}
278
279static inline void
280ecryptfs_set_file_private(struct file *file,
281 struct ecryptfs_file_info *file_info)
282{
283 file->private_data = file_info;
284}
285
286static inline struct file *ecryptfs_file_to_lower(struct file *file)
287{
288 return ((struct ecryptfs_file_info *)file->private_data)->wfi_file;
289}
290
291static inline void
292ecryptfs_set_file_lower(struct file *file, struct file *lower_file)
293{
294 ((struct ecryptfs_file_info *)file->private_data)->wfi_file =
295 lower_file;
296}
297
298static inline struct ecryptfs_inode_info *
299ecryptfs_inode_to_private(struct inode *inode)
300{
301 return container_of(inode, struct ecryptfs_inode_info, vfs_inode);
302}
303
304static inline struct inode *ecryptfs_inode_to_lower(struct inode *inode)
305{
306 return ecryptfs_inode_to_private(inode)->wii_inode;
307}
308
309static inline void
310ecryptfs_set_inode_lower(struct inode *inode, struct inode *lower_inode)
311{
312 ecryptfs_inode_to_private(inode)->wii_inode = lower_inode;
313}
314
315static inline struct ecryptfs_sb_info *
316ecryptfs_superblock_to_private(struct super_block *sb)
317{
318 return (struct ecryptfs_sb_info *)sb->s_fs_info;
319}
320
321static inline void
322ecryptfs_set_superblock_private(struct super_block *sb,
323 struct ecryptfs_sb_info *sb_info)
324{
325 sb->s_fs_info = sb_info;
326}
327
328static inline struct super_block *
329ecryptfs_superblock_to_lower(struct super_block *sb)
330{
331 return ((struct ecryptfs_sb_info *)sb->s_fs_info)->wsi_sb;
332}
333
334static inline void
335ecryptfs_set_superblock_lower(struct super_block *sb,
336 struct super_block *lower_sb)
337{
338 ((struct ecryptfs_sb_info *)sb->s_fs_info)->wsi_sb = lower_sb;
339}
340
341static inline struct ecryptfs_dentry_info *
342ecryptfs_dentry_to_private(struct dentry *dentry)
343{
344 return (struct ecryptfs_dentry_info *)dentry->d_fsdata;
345}
346
347static inline void
348ecryptfs_set_dentry_private(struct dentry *dentry,
349 struct ecryptfs_dentry_info *dentry_info)
350{
351 dentry->d_fsdata = dentry_info;
352}
353
354static inline struct dentry *
355ecryptfs_dentry_to_lower(struct dentry *dentry)
356{
357 return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->wdi_dentry;
358}
359
360static inline void
361ecryptfs_set_dentry_lower(struct dentry *dentry, struct dentry *lower_dentry)
362{
363 ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->wdi_dentry =
364 lower_dentry;
365}
366
367static inline struct vfsmount *
368ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
369{
370 return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_mnt;
371}
372
373static inline void
374ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
375{
376 ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_mnt =
377 lower_mnt;
378}
379
380#define ecryptfs_printk(type, fmt, arg...) \
381 __ecryptfs_printk(type "%s: " fmt, __FUNCTION__, ## arg);
382void __ecryptfs_printk(const char *fmt, ...);
383
384extern const struct file_operations ecryptfs_main_fops;
385extern const struct file_operations ecryptfs_dir_fops;
386extern struct inode_operations ecryptfs_main_iops;
387extern struct inode_operations ecryptfs_dir_iops;
388extern struct inode_operations ecryptfs_symlink_iops;
389extern struct super_operations ecryptfs_sops;
390extern struct dentry_operations ecryptfs_dops;
391extern struct address_space_operations ecryptfs_aops;
392extern int ecryptfs_verbosity;
393
394extern struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
395extern struct kmem_cache *ecryptfs_file_info_cache;
396extern struct kmem_cache *ecryptfs_dentry_info_cache;
397extern struct kmem_cache *ecryptfs_inode_info_cache;
398extern struct kmem_cache *ecryptfs_sb_info_cache;
399extern struct kmem_cache *ecryptfs_header_cache_0;
400extern struct kmem_cache *ecryptfs_header_cache_1;
401extern struct kmem_cache *ecryptfs_header_cache_2;
402extern struct kmem_cache *ecryptfs_lower_page_cache;
403
404int ecryptfs_interpose(struct dentry *hidden_dentry,
405 struct dentry *this_dentry, struct super_block *sb,
406 int flag);
407int ecryptfs_fill_zeros(struct file *file, loff_t new_length);
408int ecryptfs_decode_filename(struct ecryptfs_crypt_stat *crypt_stat,
409 const char *name, int length,
410 char **decrypted_name);
411int ecryptfs_encode_filename(struct ecryptfs_crypt_stat *crypt_stat,
412 const char *name, int length,
413 char **encoded_name);
414struct dentry *ecryptfs_lower_dentry(struct dentry *this_dentry);
415void ecryptfs_copy_attr_atime(struct inode *dest, const struct inode *src);
416void ecryptfs_copy_attr_all(struct inode *dest, const struct inode *src);
417void ecryptfs_copy_inode_size(struct inode *dst, const struct inode *src);
418void ecryptfs_dump_hex(char *data, int bytes);
419int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg,
420 int sg_size);
421int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat);
422void ecryptfs_rotate_iv(unsigned char *iv);
423void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
424void ecryptfs_destruct_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat);
425void ecryptfs_destruct_mount_crypt_stat(
426 struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
427int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
428int ecryptfs_write_inode_size_to_header(struct file *lower_file,
429 struct inode *lower_inode,
430 struct inode *inode);
431int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode,
432 struct file *lower_file,
433 unsigned long lower_page_index, int byte_offset,
434 int region_bytes);
435int
436ecryptfs_commit_lower_page(struct page *lower_page, struct inode *lower_inode,
437 struct file *lower_file, int byte_offset,
438 int region_size);
439int ecryptfs_copy_page_to_lower(struct page *page, struct inode *lower_inode,
440 struct file *lower_file);
441int ecryptfs_do_readpage(struct file *file, struct page *page,
442 pgoff_t lower_page_index);
443int ecryptfs_grab_and_map_lower_page(struct page **lower_page,
444 char **lower_virt,
445 struct inode *lower_inode,
446 unsigned long lower_page_index);
447int ecryptfs_writepage_and_release_lower_page(struct page *lower_page,
448 struct inode *lower_inode,
449 struct writeback_control *wbc);
450int ecryptfs_encrypt_page(struct ecryptfs_page_crypt_context *ctx);
451int ecryptfs_decrypt_page(struct file *file, struct page *page);
452int ecryptfs_write_headers(struct dentry *ecryptfs_dentry,
453 struct file *lower_file);
454int ecryptfs_write_headers_virt(char *page_virt,
455 struct ecryptfs_crypt_stat *crypt_stat,
456 struct dentry *ecryptfs_dentry);
457int ecryptfs_read_headers(struct dentry *ecryptfs_dentry,
458 struct file *lower_file);
459int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
460int contains_ecryptfs_marker(char *data);
461int ecryptfs_read_header_region(char *data, struct dentry *dentry,
462 struct vfsmount *mnt);
463u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat);
464int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code);
465void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat);
466int ecryptfs_generate_key_packet_set(char *dest_base,
467 struct ecryptfs_crypt_stat *crypt_stat,
468 struct dentry *ecryptfs_dentry,
469 size_t *len, size_t max);
470int process_request_key_err(long err_code);
471int
472ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
473 unsigned char *src, struct dentry *ecryptfs_dentry);
474int ecryptfs_truncate(struct dentry *dentry, loff_t new_length);
475int
476ecryptfs_process_cipher(struct crypto_tfm **tfm, struct crypto_tfm **key_tfm,
477 char *cipher_name, size_t key_size);
478int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode);
479int ecryptfs_inode_set(struct inode *inode, void *lower_inode);
480void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode);
481
482#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
new file mode 100644
index 000000000000..c8550c9f9cd2
--- /dev/null
+++ b/fs/ecryptfs/file.c
@@ -0,0 +1,440 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2004 Erez Zadok
5 * Copyright (C) 2001-2004 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
8 * Michael C. Thompson <mcthomps@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#include <linux/file.h>
27#include <linux/poll.h>
28#include <linux/mount.h>
29#include <linux/pagemap.h>
30#include <linux/security.h>
31#include <linux/smp_lock.h>
32#include <linux/compat.h>
33#include "ecryptfs_kernel.h"
34
35/**
36 * ecryptfs_llseek
37 * @file: File we are seeking in
38 * @offset: The offset to seek to
39 * @origin: 2 - offset from i_size; 1 - offset from f_pos
40 *
41 * Returns the position we have seeked to, or negative on error
42 */
43static loff_t ecryptfs_llseek(struct file *file, loff_t offset, int origin)
44{
45 loff_t rv;
46 loff_t new_end_pos;
47 int rc;
48 int expanding_file = 0;
49 struct inode *inode = file->f_mapping->host;
50
51 /* If our offset is past the end of our file, we're going to
52 * need to grow it so we have a valid length of 0's */
53 new_end_pos = offset;
54 switch (origin) {
55 case 2:
56 new_end_pos += i_size_read(inode);
57 expanding_file = 1;
58 break;
59 case 1:
60 new_end_pos += file->f_pos;
61 if (new_end_pos > i_size_read(inode)) {
62 ecryptfs_printk(KERN_DEBUG, "new_end_pos(=[0x%.16x]) "
63 "> i_size_read(inode)(=[0x%.16x])\n",
64 new_end_pos, i_size_read(inode));
65 expanding_file = 1;
66 }
67 break;
68 default:
69 if (new_end_pos > i_size_read(inode)) {
70 ecryptfs_printk(KERN_DEBUG, "new_end_pos(=[0x%.16x]) "
71 "> i_size_read(inode)(=[0x%.16x])\n",
72 new_end_pos, i_size_read(inode));
73 expanding_file = 1;
74 }
75 }
76 ecryptfs_printk(KERN_DEBUG, "new_end_pos = [0x%.16x]\n", new_end_pos);
77 if (expanding_file) {
78 rc = ecryptfs_truncate(file->f_dentry, new_end_pos);
79 if (rc) {
80 rv = rc;
81 ecryptfs_printk(KERN_ERR, "Error on attempt to "
82 "truncate to (higher) offset [0x%.16x];"
83 " rc = [%d]\n", new_end_pos, rc);
84 goto out;
85 }
86 }
87 rv = generic_file_llseek(file, offset, origin);
88out:
89 return rv;
90}
91
92/**
93 * ecryptfs_read_update_atime
94 *
95 * generic_file_read updates the atime of upper layer inode. But, it
96 * doesn't give us a chance to update the atime of the lower layer
97 * inode. This function is a wrapper to generic_file_read. It
98 * updates the atime of the lower level inode if generic_file_read
99 * returns without any errors. This is to be used only for file reads.
100 * The function to be used for directory reads is ecryptfs_read.
101 */
102static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
103 const struct iovec *iov,
104 unsigned long nr_segs, loff_t pos)
105{
106 int rc;
107 struct dentry *lower_dentry;
108 struct vfsmount *lower_vfsmount;
109 struct file *file = iocb->ki_filp;
110
111 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
112 /*
113 * Even though this is a async interface, we need to wait
114 * for IO to finish to update atime
115 */
116 if (-EIOCBQUEUED == rc)
117 rc = wait_on_sync_kiocb(iocb);
118 if (rc >= 0) {
119 lower_dentry = ecryptfs_dentry_to_lower(file->f_dentry);
120 lower_vfsmount = ecryptfs_dentry_to_lower_mnt(file->f_dentry);
121 touch_atime(lower_vfsmount, lower_dentry);
122 }
123 return rc;
124}
125
126struct ecryptfs_getdents_callback {
127 void *dirent;
128 struct dentry *dentry;
129 filldir_t filldir;
130 int err;
131 int filldir_called;
132 int entries_written;
133};
134
135/* Inspired by generic filldir in fs/readir.c */
136static int
137ecryptfs_filldir(void *dirent, const char *name, int namelen, loff_t offset,
138 u64 ino, unsigned int d_type)
139{
140 struct ecryptfs_crypt_stat *crypt_stat;
141 struct ecryptfs_getdents_callback *buf =
142 (struct ecryptfs_getdents_callback *)dirent;
143 int rc;
144 int decoded_length;
145 char *decoded_name;
146
147 crypt_stat = ecryptfs_dentry_to_private(buf->dentry)->crypt_stat;
148 buf->filldir_called++;
149 decoded_length = ecryptfs_decode_filename(crypt_stat, name, namelen,
150 &decoded_name);
151 if (decoded_length < 0) {
152 rc = decoded_length;
153 goto out;
154 }
155 rc = buf->filldir(buf->dirent, decoded_name, decoded_length, offset,
156 ino, d_type);
157 kfree(decoded_name);
158 if (rc >= 0)
159 buf->entries_written++;
160out:
161 return rc;
162}
163
164/**
165 * ecryptfs_readdir
166 * @file: The ecryptfs file struct
167 * @dirent: Directory entry
168 * @filldir: The filldir callback function
169 */
170static int ecryptfs_readdir(struct file *file, void *dirent, filldir_t filldir)
171{
172 int rc;
173 struct file *lower_file;
174 struct inode *inode;
175 struct ecryptfs_getdents_callback buf;
176
177 lower_file = ecryptfs_file_to_lower(file);
178 lower_file->f_pos = file->f_pos;
179 inode = file->f_dentry->d_inode;
180 memset(&buf, 0, sizeof(buf));
181 buf.dirent = dirent;
182 buf.dentry = file->f_dentry;
183 buf.filldir = filldir;
184retry:
185 buf.filldir_called = 0;
186 buf.entries_written = 0;
187 buf.err = 0;
188 rc = vfs_readdir(lower_file, ecryptfs_filldir, (void *)&buf);
189 if (buf.err)
190 rc = buf.err;
191 if (buf.filldir_called && !buf.entries_written)
192 goto retry;
193 file->f_pos = lower_file->f_pos;
194 if (rc >= 0)
195 ecryptfs_copy_attr_atime(inode, lower_file->f_dentry->d_inode);
196 return rc;
197}
198
199struct kmem_cache *ecryptfs_file_info_cache;
200
201/**
202 * ecryptfs_open
203 * @inode: inode speciying file to open
204 * @file: Structure to return filled in
205 *
206 * Opens the file specified by inode.
207 *
208 * Returns zero on success; non-zero otherwise
209 */
210static int ecryptfs_open(struct inode *inode, struct file *file)
211{
212 int rc = 0;
213 struct ecryptfs_crypt_stat *crypt_stat = NULL;
214 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
215 struct dentry *ecryptfs_dentry = file->f_dentry;
216 /* Private value of ecryptfs_dentry allocated in
217 * ecryptfs_lookup() */
218 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
219 struct inode *lower_inode = NULL;
220 struct file *lower_file = NULL;
221 struct vfsmount *lower_mnt;
222 struct ecryptfs_file_info *file_info;
223 int lower_flags;
224
225 /* Released in ecryptfs_release or end of function if failure */
226 file_info = kmem_cache_alloc(ecryptfs_file_info_cache, SLAB_KERNEL);
227 ecryptfs_set_file_private(file, file_info);
228 if (!file_info) {
229 ecryptfs_printk(KERN_ERR,
230 "Error attempting to allocate memory\n");
231 rc = -ENOMEM;
232 goto out;
233 }
234 memset(file_info, 0, sizeof(*file_info));
235 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
236 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
237 mount_crypt_stat = &ecryptfs_superblock_to_private(
238 ecryptfs_dentry->d_sb)->mount_crypt_stat;
239 mutex_lock(&crypt_stat->cs_mutex);
240 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED)) {
241 ecryptfs_printk(KERN_DEBUG, "Setting flags for stat...\n");
242 /* Policy code enabled in future release */
243 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED);
244 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED);
245 }
246 mutex_unlock(&crypt_stat->cs_mutex);
247 /* This mntget & dget is undone via fput when the file is released */
248 dget(lower_dentry);
249 lower_flags = file->f_flags;
250 if ((lower_flags & O_ACCMODE) == O_WRONLY)
251 lower_flags = (lower_flags & O_ACCMODE) | O_RDWR;
252 if (file->f_flags & O_APPEND)
253 lower_flags &= ~O_APPEND;
254 lower_mnt = ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
255 mntget(lower_mnt);
256 /* Corresponding fput() in ecryptfs_release() */
257 lower_file = dentry_open(lower_dentry, lower_mnt, lower_flags);
258 if (IS_ERR(lower_file)) {
259 rc = PTR_ERR(lower_file);
260 ecryptfs_printk(KERN_ERR, "Error opening lower file\n");
261 goto out_puts;
262 }
263 ecryptfs_set_file_lower(file, lower_file);
264 /* Isn't this check the same as the one in lookup? */
265 lower_inode = lower_dentry->d_inode;
266 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
267 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
268 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED);
269 rc = 0;
270 goto out;
271 }
272 mutex_lock(&crypt_stat->cs_mutex);
273 if (i_size_read(lower_inode) < ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) {
274 if (!(mount_crypt_stat->flags
275 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
276 rc = -EIO;
277 printk(KERN_WARNING "Attempt to read file that is "
278 "not in a valid eCryptfs format, and plaintext "
279 "passthrough mode is not enabled; returning "
280 "-EIO\n");
281 mutex_unlock(&crypt_stat->cs_mutex);
282 goto out_puts;
283 }
284 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
285 rc = 0;
286 mutex_unlock(&crypt_stat->cs_mutex);
287 goto out;
288 } else if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
289 ECRYPTFS_POLICY_APPLIED)
290 || !ECRYPTFS_CHECK_FLAG(crypt_stat->flags,
291 ECRYPTFS_KEY_VALID)) {
292 rc = ecryptfs_read_headers(ecryptfs_dentry, lower_file);
293 if (rc) {
294 ecryptfs_printk(KERN_DEBUG,
295 "Valid headers not found\n");
296 if (!(mount_crypt_stat->flags
297 & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
298 rc = -EIO;
299 printk(KERN_WARNING "Attempt to read file that "
300 "is not in a valid eCryptfs format, "
301 "and plaintext passthrough mode is not "
302 "enabled; returning -EIO\n");
303 mutex_unlock(&crypt_stat->cs_mutex);
304 goto out_puts;
305 }
306 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags,
307 ECRYPTFS_ENCRYPTED);
308 rc = 0;
309 mutex_unlock(&crypt_stat->cs_mutex);
310 goto out;
311 }
312 }
313 mutex_unlock(&crypt_stat->cs_mutex);
314 ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = [0x%.16x] "
315 "size: [0x%.16x]\n", inode, inode->i_ino,
316 i_size_read(inode));
317 ecryptfs_set_file_lower(file, lower_file);
318 goto out;
319out_puts:
320 mntput(lower_mnt);
321 dput(lower_dentry);
322 kmem_cache_free(ecryptfs_file_info_cache,
323 ecryptfs_file_to_private(file));
324out:
325 return rc;
326}
327
328static int ecryptfs_flush(struct file *file, fl_owner_t td)
329{
330 int rc = 0;
331 struct file *lower_file = NULL;
332
333 lower_file = ecryptfs_file_to_lower(file);
334 if (lower_file->f_op && lower_file->f_op->flush)
335 rc = lower_file->f_op->flush(lower_file, td);
336 return rc;
337}
338
339static int ecryptfs_release(struct inode *inode, struct file *file)
340{
341 struct file *lower_file = ecryptfs_file_to_lower(file);
342 struct ecryptfs_file_info *file_info = ecryptfs_file_to_private(file);
343 struct inode *lower_inode = ecryptfs_inode_to_lower(inode);
344
345 fput(lower_file);
346 inode->i_blocks = lower_inode->i_blocks;
347 kmem_cache_free(ecryptfs_file_info_cache, file_info);
348 return 0;
349}
350
351static int
352ecryptfs_fsync(struct file *file, struct dentry *dentry, int datasync)
353{
354 struct file *lower_file = ecryptfs_file_to_lower(file);
355 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
356 struct inode *lower_inode = lower_dentry->d_inode;
357 int rc = -EINVAL;
358
359 if (lower_inode->i_fop->fsync) {
360 mutex_lock(&lower_inode->i_mutex);
361 rc = lower_inode->i_fop->fsync(lower_file, lower_dentry,
362 datasync);
363 mutex_unlock(&lower_inode->i_mutex);
364 }
365 return rc;
366}
367
368static int ecryptfs_fasync(int fd, struct file *file, int flag)
369{
370 int rc = 0;
371 struct file *lower_file = NULL;
372
373 lower_file = ecryptfs_file_to_lower(file);
374 if (lower_file->f_op && lower_file->f_op->fasync)
375 rc = lower_file->f_op->fasync(fd, lower_file, flag);
376 return rc;
377}
378
379static ssize_t ecryptfs_sendfile(struct file *file, loff_t * ppos,
380 size_t count, read_actor_t actor, void *target)
381{
382 struct file *lower_file = NULL;
383 int rc = -EINVAL;
384
385 lower_file = ecryptfs_file_to_lower(file);
386 if (lower_file->f_op && lower_file->f_op->sendfile)
387 rc = lower_file->f_op->sendfile(lower_file, ppos, count,
388 actor, target);
389
390 return rc;
391}
392
393static int ecryptfs_ioctl(struct inode *inode, struct file *file,
394 unsigned int cmd, unsigned long arg);
395
396const struct file_operations ecryptfs_dir_fops = {
397 .readdir = ecryptfs_readdir,
398 .ioctl = ecryptfs_ioctl,
399 .mmap = generic_file_mmap,
400 .open = ecryptfs_open,
401 .flush = ecryptfs_flush,
402 .release = ecryptfs_release,
403 .fsync = ecryptfs_fsync,
404 .fasync = ecryptfs_fasync,
405 .sendfile = ecryptfs_sendfile,
406};
407
408const struct file_operations ecryptfs_main_fops = {
409 .llseek = ecryptfs_llseek,
410 .read = do_sync_read,
411 .aio_read = ecryptfs_read_update_atime,
412 .write = do_sync_write,
413 .aio_write = generic_file_aio_write,
414 .readdir = ecryptfs_readdir,
415 .ioctl = ecryptfs_ioctl,
416 .mmap = generic_file_mmap,
417 .open = ecryptfs_open,
418 .flush = ecryptfs_flush,
419 .release = ecryptfs_release,
420 .fsync = ecryptfs_fsync,
421 .fasync = ecryptfs_fasync,
422 .sendfile = ecryptfs_sendfile,
423};
424
425static int
426ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
427 unsigned long arg)
428{
429 int rc = 0;
430 struct file *lower_file = NULL;
431
432 if (ecryptfs_file_to_private(file))
433 lower_file = ecryptfs_file_to_lower(file);
434 if (lower_file && lower_file->f_op && lower_file->f_op->ioctl)
435 rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode),
436 lower_file, cmd, arg);
437 else
438 rc = -ENOTTY;
439 return rc;
440}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
new file mode 100644
index 000000000000..efdd2b7b62d7
--- /dev/null
+++ b/fs/ecryptfs/inode.c
@@ -0,0 +1,1079 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2004 Erez Zadok
5 * Copyright (C) 2001-2004 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
8 * Michael C. Thompsion <mcthomps@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#include <linux/file.h>
27#include <linux/vmalloc.h>
28#include <linux/pagemap.h>
29#include <linux/dcache.h>
30#include <linux/namei.h>
31#include <linux/mount.h>
32#include <linux/crypto.h>
33#include "ecryptfs_kernel.h"
34
35static struct dentry *lock_parent(struct dentry *dentry)
36{
37 struct dentry *dir;
38
39 dir = dget(dentry->d_parent);
40 mutex_lock(&(dir->d_inode->i_mutex));
41 return dir;
42}
43
44static void unlock_parent(struct dentry *dentry)
45{
46 mutex_unlock(&(dentry->d_parent->d_inode->i_mutex));
47 dput(dentry->d_parent);
48}
49
50static void unlock_dir(struct dentry *dir)
51{
52 mutex_unlock(&dir->d_inode->i_mutex);
53 dput(dir);
54}
55
56void ecryptfs_copy_inode_size(struct inode *dst, const struct inode *src)
57{
58 i_size_write(dst, i_size_read((struct inode *)src));
59 dst->i_blocks = src->i_blocks;
60}
61
62void ecryptfs_copy_attr_atime(struct inode *dest, const struct inode *src)
63{
64 dest->i_atime = src->i_atime;
65}
66
67static void ecryptfs_copy_attr_times(struct inode *dest,
68 const struct inode *src)
69{
70 dest->i_atime = src->i_atime;
71 dest->i_mtime = src->i_mtime;
72 dest->i_ctime = src->i_ctime;
73}
74
75static void ecryptfs_copy_attr_timesizes(struct inode *dest,
76 const struct inode *src)
77{
78 dest->i_atime = src->i_atime;
79 dest->i_mtime = src->i_mtime;
80 dest->i_ctime = src->i_ctime;
81 ecryptfs_copy_inode_size(dest, src);
82}
83
84void ecryptfs_copy_attr_all(struct inode *dest, const struct inode *src)
85{
86 dest->i_mode = src->i_mode;
87 dest->i_nlink = src->i_nlink;
88 dest->i_uid = src->i_uid;
89 dest->i_gid = src->i_gid;
90 dest->i_rdev = src->i_rdev;
91 dest->i_atime = src->i_atime;
92 dest->i_mtime = src->i_mtime;
93 dest->i_ctime = src->i_ctime;
94 dest->i_blkbits = src->i_blkbits;
95 dest->i_flags = src->i_flags;
96}
97
98/**
99 * ecryptfs_create_underlying_file
100 * @lower_dir_inode: inode of the parent in the lower fs of the new file
101 * @lower_dentry: New file's dentry in the lower fs
102 * @ecryptfs_dentry: New file's dentry in ecryptfs
103 * @mode: The mode of the new file
104 * @nd: nameidata of ecryptfs' parent's dentry & vfsmount
105 *
106 * Creates the file in the lower file system.
107 *
108 * Returns zero on success; non-zero on error condition
109 */
110static int
111ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
112 struct dentry *dentry, int mode,
113 struct nameidata *nd)
114{
115 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
116 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
117 struct dentry *dentry_save;
118 struct vfsmount *vfsmount_save;
119 int rc;
120
121 dentry_save = nd->dentry;
122 vfsmount_save = nd->mnt;
123 nd->dentry = lower_dentry;
124 nd->mnt = lower_mnt;
125 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
126 nd->dentry = dentry_save;
127 nd->mnt = vfsmount_save;
128 return rc;
129}
130
131/**
132 * ecryptfs_do_create
133 * @directory_inode: inode of the new file's dentry's parent in ecryptfs
134 * @ecryptfs_dentry: New file's dentry in ecryptfs
135 * @mode: The mode of the new file
136 * @nd: nameidata of ecryptfs' parent's dentry & vfsmount
137 *
138 * Creates the underlying file and the eCryptfs inode which will link to
139 * it. It will also update the eCryptfs directory inode to mimic the
140 * stat of the lower directory inode.
141 *
142 * Returns zero on success; non-zero on error condition
143 */
144static int
145ecryptfs_do_create(struct inode *directory_inode,
146 struct dentry *ecryptfs_dentry, int mode,
147 struct nameidata *nd)
148{
149 int rc;
150 struct dentry *lower_dentry;
151 struct dentry *lower_dir_dentry;
152
153 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
154 lower_dir_dentry = lock_parent(lower_dentry);
155 if (unlikely(IS_ERR(lower_dir_dentry))) {
156 ecryptfs_printk(KERN_ERR, "Error locking directory of "
157 "dentry\n");
158 rc = PTR_ERR(lower_dir_dentry);
159 goto out;
160 }
161 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
162 ecryptfs_dentry, mode, nd);
163 if (unlikely(rc)) {
164 ecryptfs_printk(KERN_ERR,
165 "Failure to create underlying file\n");
166 goto out_lock;
167 }
168 rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
169 directory_inode->i_sb, 0);
170 if (rc) {
171 ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
172 goto out_lock;
173 }
174 ecryptfs_copy_attr_timesizes(directory_inode,
175 lower_dir_dentry->d_inode);
176out_lock:
177 unlock_dir(lower_dir_dentry);
178out:
179 return rc;
180}
181
182/**
183 * grow_file
184 * @ecryptfs_dentry: the ecryptfs dentry
185 * @lower_file: The lower file
186 * @inode: The ecryptfs inode
187 * @lower_inode: The lower inode
188 *
189 * This is the code which will grow the file to its correct size.
190 */
191static int grow_file(struct dentry *ecryptfs_dentry, struct file *lower_file,
192 struct inode *inode, struct inode *lower_inode)
193{
194 int rc = 0;
195 struct file fake_file;
196 struct ecryptfs_file_info tmp_file_info;
197
198 memset(&fake_file, 0, sizeof(fake_file));
199 fake_file.f_dentry = ecryptfs_dentry;
200 memset(&tmp_file_info, 0, sizeof(tmp_file_info));
201 ecryptfs_set_file_private(&fake_file, &tmp_file_info);
202 ecryptfs_set_file_lower(&fake_file, lower_file);
203 rc = ecryptfs_fill_zeros(&fake_file, 1);
204 if (rc) {
205 ECRYPTFS_SET_FLAG(
206 ecryptfs_inode_to_private(inode)->crypt_stat.flags,
207 ECRYPTFS_SECURITY_WARNING);
208 ecryptfs_printk(KERN_WARNING, "Error attempting to fill zeros "
209 "in file; rc = [%d]\n", rc);
210 goto out;
211 }
212 i_size_write(inode, 0);
213 ecryptfs_write_inode_size_to_header(lower_file, lower_inode, inode);
214 ECRYPTFS_SET_FLAG(ecryptfs_inode_to_private(inode)->crypt_stat.flags,
215 ECRYPTFS_NEW_FILE);
216out:
217 return rc;
218}
219
220/**
221 * ecryptfs_initialize_file
222 *
223 * Cause the file to be changed from a basic empty file to an ecryptfs
224 * file with a header and first data page.
225 *
226 * Returns zero on success
227 */
228static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
229{
230 int rc = 0;
231 int lower_flags;
232 struct ecryptfs_crypt_stat *crypt_stat;
233 struct dentry *lower_dentry;
234 struct dentry *tlower_dentry = NULL;
235 struct file *lower_file;
236 struct inode *inode, *lower_inode;
237 struct vfsmount *lower_mnt;
238
239 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
240 ecryptfs_printk(KERN_DEBUG, "lower_dentry->d_name.name = [%s]\n",
241 lower_dentry->d_name.name);
242 inode = ecryptfs_dentry->d_inode;
243 crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
244 tlower_dentry = dget(lower_dentry);
245 if (!tlower_dentry) {
246 rc = -ENOMEM;
247 ecryptfs_printk(KERN_ERR, "Error dget'ing lower_dentry\n");
248 goto out;
249 }
250 lower_flags = ((O_CREAT | O_WRONLY | O_TRUNC) & O_ACCMODE) | O_RDWR;
251#if BITS_PER_LONG != 32
252 lower_flags |= O_LARGEFILE;
253#endif
254 lower_mnt = ecryptfs_dentry_to_lower_mnt(ecryptfs_dentry);
255 mntget(lower_mnt);
256 /* Corresponding fput() at end of this function */
257 lower_file = dentry_open(tlower_dentry, lower_mnt, lower_flags);
258 if (IS_ERR(lower_file)) {
259 rc = PTR_ERR(lower_file);
260 ecryptfs_printk(KERN_ERR,
261 "Error opening dentry; rc = [%i]\n", rc);
262 goto out;
263 }
264 /* fput(lower_file) should handle the puts if we do this */
265 lower_file->f_dentry = tlower_dentry;
266 lower_file->f_vfsmnt = lower_mnt;
267 lower_inode = tlower_dentry->d_inode;
268 if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
269 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
270 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED);
271 goto out_fput;
272 }
273 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_NEW_FILE);
274 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
275 rc = ecryptfs_new_file_context(ecryptfs_dentry);
276 if (rc) {
277 ecryptfs_printk(KERN_DEBUG, "Error creating new file "
278 "context\n");
279 goto out_fput;
280 }
281 rc = ecryptfs_write_headers(ecryptfs_dentry, lower_file);
282 if (rc) {
283 ecryptfs_printk(KERN_DEBUG, "Error writing headers\n");
284 goto out_fput;
285 }
286 rc = grow_file(ecryptfs_dentry, lower_file, inode, lower_inode);
287out_fput:
288 fput(lower_file);
289out:
290 return rc;
291}
292
293/**
294 * ecryptfs_create
295 * @dir: The inode of the directory in which to create the file.
296 * @dentry: The eCryptfs dentry
297 * @mode: The mode of the new file.
298 * @nd: nameidata
299 *
300 * Creates a new file.
301 *
302 * Returns zero on success; non-zero on error condition
303 */
304static int
305ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
306 int mode, struct nameidata *nd)
307{
308 int rc;
309
310 rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode, nd);
311 if (unlikely(rc)) {
312 ecryptfs_printk(KERN_WARNING, "Failed to create file in"
313 "lower filesystem\n");
314 goto out;
315 }
316 /* At this point, a file exists on "disk"; we need to make sure
317 * that this on disk file is prepared to be an ecryptfs file */
318 rc = ecryptfs_initialize_file(ecryptfs_dentry);
319out:
320 return rc;
321}
322
323/**
324 * ecryptfs_lookup
325 * @dir: inode
326 * @dentry: The dentry
327 * @nd: nameidata, may be NULL
328 *
329 * Find a file on disk. If the file does not exist, then we'll add it to the
330 * dentry cache and continue on to read it from the disk.
331 */
332static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
333 struct nameidata *nd)
334{
335 int rc = 0;
336 struct dentry *lower_dir_dentry;
337 struct dentry *lower_dentry;
338 struct vfsmount *lower_mnt;
339 struct dentry *tlower_dentry = NULL;
340 char *encoded_name;
341 unsigned int encoded_namelen;
342 struct ecryptfs_crypt_stat *crypt_stat = NULL;
343 char *page_virt = NULL;
344 struct inode *lower_inode;
345 u64 file_size;
346
347 lower_dir_dentry = ecryptfs_dentry_to_lower(dentry->d_parent);
348 dentry->d_op = &ecryptfs_dops;
349 if ((dentry->d_name.len == 1 && !strcmp(dentry->d_name.name, "."))
350 || (dentry->d_name.len == 2 && !strcmp(dentry->d_name.name, "..")))
351 goto out_drop;
352 encoded_namelen = ecryptfs_encode_filename(crypt_stat,
353 dentry->d_name.name,
354 dentry->d_name.len,
355 &encoded_name);
356 if (encoded_namelen < 0) {
357 rc = encoded_namelen;
358 goto out_drop;
359 }
360 ecryptfs_printk(KERN_DEBUG, "encoded_name = [%s]; encoded_namelen "
361 "= [%d]\n", encoded_name, encoded_namelen);
362 lower_dentry = lookup_one_len(encoded_name, lower_dir_dentry,
363 encoded_namelen - 1);
364 kfree(encoded_name);
365 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
366 if (IS_ERR(lower_dentry)) {
367 ecryptfs_printk(KERN_ERR, "ERR from lower_dentry\n");
368 rc = PTR_ERR(lower_dentry);
369 goto out_drop;
370 }
371 ecryptfs_printk(KERN_DEBUG, "lower_dentry = [%p]; lower_dentry->"
372 "d_name.name = [%s]\n", lower_dentry,
373 lower_dentry->d_name.name);
374 lower_inode = lower_dentry->d_inode;
375 ecryptfs_copy_attr_atime(dir, lower_dir_dentry->d_inode);
376 BUG_ON(!atomic_read(&lower_dentry->d_count));
377 ecryptfs_set_dentry_private(dentry,
378 kmem_cache_alloc(ecryptfs_dentry_info_cache,
379 SLAB_KERNEL));
380 if (!ecryptfs_dentry_to_private(dentry)) {
381 rc = -ENOMEM;
382 ecryptfs_printk(KERN_ERR, "Out of memory whilst attempting "
383 "to allocate ecryptfs_dentry_info struct\n");
384 goto out_dput;
385 }
386 ecryptfs_set_dentry_lower(dentry, lower_dentry);
387 ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
388 if (!lower_dentry->d_inode) {
389 /* We want to add because we couldn't find in lower */
390 d_add(dentry, NULL);
391 goto out;
392 }
393 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 1);
394 if (rc) {
395 ecryptfs_printk(KERN_ERR, "Error interposing\n");
396 goto out_dput;
397 }
398 if (S_ISDIR(lower_inode->i_mode)) {
399 ecryptfs_printk(KERN_DEBUG, "Is a directory; returning\n");
400 goto out;
401 }
402 if (S_ISLNK(lower_inode->i_mode)) {
403 ecryptfs_printk(KERN_DEBUG, "Is a symlink; returning\n");
404 goto out;
405 }
406 if (!nd) {
407 ecryptfs_printk(KERN_DEBUG, "We have a NULL nd, just leave"
408 "as we *think* we are about to unlink\n");
409 goto out;
410 }
411 tlower_dentry = dget(lower_dentry);
412 if (!tlower_dentry || IS_ERR(tlower_dentry)) {
413 rc = -ENOMEM;
414 ecryptfs_printk(KERN_ERR, "Cannot dget lower_dentry\n");
415 goto out_dput;
416 }
417 /* Released in this function */
418 page_virt =
419 (char *)kmem_cache_alloc(ecryptfs_header_cache_2,
420 SLAB_USER);
421 if (!page_virt) {
422 rc = -ENOMEM;
423 ecryptfs_printk(KERN_ERR,
424 "Cannot ecryptfs_kmalloc a page\n");
425 goto out_dput;
426 }
427 memset(page_virt, 0, PAGE_CACHE_SIZE);
428 rc = ecryptfs_read_header_region(page_virt, tlower_dentry, nd->mnt);
429 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
430 if (!ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_POLICY_APPLIED))
431 ecryptfs_set_default_sizes(crypt_stat);
432 if (rc) {
433 rc = 0;
434 ecryptfs_printk(KERN_WARNING, "Error reading header region;"
435 " assuming unencrypted\n");
436 } else {
437 if (!contains_ecryptfs_marker(page_virt
438 + ECRYPTFS_FILE_SIZE_BYTES)) {
439 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
440 goto out;
441 }
442 memcpy(&file_size, page_virt, sizeof(file_size));
443 file_size = be64_to_cpu(file_size);
444 i_size_write(dentry->d_inode, (loff_t)file_size);
445 }
446 kmem_cache_free(ecryptfs_header_cache_2, page_virt);
447 goto out;
448
449out_dput:
450 dput(lower_dentry);
451 if (tlower_dentry)
452 dput(tlower_dentry);
453out_drop:
454 d_drop(dentry);
455out:
456 return ERR_PTR(rc);
457}
458
459static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
460 struct dentry *new_dentry)
461{
462 struct dentry *lower_old_dentry;
463 struct dentry *lower_new_dentry;
464 struct dentry *lower_dir_dentry;
465 u64 file_size_save;
466 int rc;
467
468 file_size_save = i_size_read(old_dentry->d_inode);
469 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
470 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
471 dget(lower_old_dentry);
472 dget(lower_new_dentry);
473 lower_dir_dentry = lock_parent(lower_new_dentry);
474 rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
475 lower_new_dentry);
476 if (rc || !lower_new_dentry->d_inode)
477 goto out_lock;
478 rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
479 if (rc)
480 goto out_lock;
481 ecryptfs_copy_attr_timesizes(dir, lower_new_dentry->d_inode);
482 old_dentry->d_inode->i_nlink =
483 ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink;
484 i_size_write(new_dentry->d_inode, file_size_save);
485out_lock:
486 unlock_dir(lower_dir_dentry);
487 dput(lower_new_dentry);
488 dput(lower_old_dentry);
489 if (!new_dentry->d_inode)
490 d_drop(new_dentry);
491 return rc;
492}
493
494static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
495{
496 int rc = 0;
497 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
498 struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
499
500 lock_parent(lower_dentry);
501 rc = vfs_unlink(lower_dir_inode, lower_dentry);
502 if (rc) {
503 ecryptfs_printk(KERN_ERR, "Error in vfs_unlink\n");
504 goto out_unlock;
505 }
506 ecryptfs_copy_attr_times(dir, lower_dir_inode);
507 dentry->d_inode->i_nlink =
508 ecryptfs_inode_to_lower(dentry->d_inode)->i_nlink;
509 dentry->d_inode->i_ctime = dir->i_ctime;
510out_unlock:
511 unlock_parent(lower_dentry);
512 return rc;
513}
514
515static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
516 const char *symname)
517{
518 int rc;
519 struct dentry *lower_dentry;
520 struct dentry *lower_dir_dentry;
521 umode_t mode;
522 char *encoded_symname;
523 unsigned int encoded_symlen;
524 struct ecryptfs_crypt_stat *crypt_stat = NULL;
525
526 lower_dentry = ecryptfs_dentry_to_lower(dentry);
527 dget(lower_dentry);
528 lower_dir_dentry = lock_parent(lower_dentry);
529 mode = S_IALLUGO;
530 encoded_symlen = ecryptfs_encode_filename(crypt_stat, symname,
531 strlen(symname),
532 &encoded_symname);
533 if (encoded_symlen < 0) {
534 rc = encoded_symlen;
535 goto out_lock;
536 }
537 rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry,
538 encoded_symname, mode);
539 kfree(encoded_symname);
540 if (rc || !lower_dentry->d_inode)
541 goto out_lock;
542 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
543 if (rc)
544 goto out_lock;
545 ecryptfs_copy_attr_timesizes(dir, lower_dir_dentry->d_inode);
546out_lock:
547 unlock_dir(lower_dir_dentry);
548 dput(lower_dentry);
549 if (!dentry->d_inode)
550 d_drop(dentry);
551 return rc;
552}
553
554static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
555{
556 int rc;
557 struct dentry *lower_dentry;
558 struct dentry *lower_dir_dentry;
559
560 lower_dentry = ecryptfs_dentry_to_lower(dentry);
561 lower_dir_dentry = lock_parent(lower_dentry);
562 rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
563 if (rc || !lower_dentry->d_inode)
564 goto out;
565 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
566 if (rc)
567 goto out;
568 ecryptfs_copy_attr_timesizes(dir, lower_dir_dentry->d_inode);
569 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
570out:
571 unlock_dir(lower_dir_dentry);
572 if (!dentry->d_inode)
573 d_drop(dentry);
574 return rc;
575}
576
577static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
578{
579 int rc = 0;
580 struct dentry *tdentry = NULL;
581 struct dentry *lower_dentry;
582 struct dentry *tlower_dentry = NULL;
583 struct dentry *lower_dir_dentry;
584
585 lower_dentry = ecryptfs_dentry_to_lower(dentry);
586 if (!(tdentry = dget(dentry))) {
587 rc = -EINVAL;
588 ecryptfs_printk(KERN_ERR, "Error dget'ing dentry [%p]\n",
589 dentry);
590 goto out;
591 }
592 lower_dir_dentry = lock_parent(lower_dentry);
593 if (!(tlower_dentry = dget(lower_dentry))) {
594 rc = -EINVAL;
595 ecryptfs_printk(KERN_ERR, "Error dget'ing lower_dentry "
596 "[%p]\n", lower_dentry);
597 goto out;
598 }
599 rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
600 if (!rc) {
601 d_delete(tlower_dentry);
602 tlower_dentry = NULL;
603 }
604 ecryptfs_copy_attr_times(dir, lower_dir_dentry->d_inode);
605 dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
606 unlock_dir(lower_dir_dentry);
607 if (!rc)
608 d_drop(dentry);
609out:
610 if (tdentry)
611 dput(tdentry);
612 if (tlower_dentry)
613 dput(tlower_dentry);
614 return rc;
615}
616
617static int
618ecryptfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
619{
620 int rc;
621 struct dentry *lower_dentry;
622 struct dentry *lower_dir_dentry;
623
624 lower_dentry = ecryptfs_dentry_to_lower(dentry);
625 lower_dir_dentry = lock_parent(lower_dentry);
626 rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
627 if (rc || !lower_dentry->d_inode)
628 goto out;
629 rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
630 if (rc)
631 goto out;
632 ecryptfs_copy_attr_timesizes(dir, lower_dir_dentry->d_inode);
633out:
634 unlock_dir(lower_dir_dentry);
635 if (!dentry->d_inode)
636 d_drop(dentry);
637 return rc;
638}
639
640static int
641ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
642 struct inode *new_dir, struct dentry *new_dentry)
643{
644 int rc;
645 struct dentry *lower_old_dentry;
646 struct dentry *lower_new_dentry;
647 struct dentry *lower_old_dir_dentry;
648 struct dentry *lower_new_dir_dentry;
649
650 lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
651 lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
652 dget(lower_old_dentry);
653 dget(lower_new_dentry);
654 lower_old_dir_dentry = dget_parent(lower_old_dentry);
655 lower_new_dir_dentry = dget_parent(lower_new_dentry);
656 lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
657 rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
658 lower_new_dir_dentry->d_inode, lower_new_dentry);
659 if (rc)
660 goto out_lock;
661 ecryptfs_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
662 if (new_dir != old_dir)
663 ecryptfs_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
664out_lock:
665 unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
666 dput(lower_new_dentry);
667 dput(lower_old_dentry);
668 return rc;
669}
670
671static int
672ecryptfs_readlink(struct dentry *dentry, char __user * buf, int bufsiz)
673{
674 int rc;
675 struct dentry *lower_dentry;
676 char *decoded_name;
677 char *lower_buf;
678 mm_segment_t old_fs;
679 struct ecryptfs_crypt_stat *crypt_stat;
680
681 lower_dentry = ecryptfs_dentry_to_lower(dentry);
682 if (!lower_dentry->d_inode->i_op ||
683 !lower_dentry->d_inode->i_op->readlink) {
684 rc = -EINVAL;
685 goto out;
686 }
687 /* Released in this function */
688 lower_buf = kmalloc(bufsiz, GFP_KERNEL);
689 if (lower_buf == NULL) {
690 ecryptfs_printk(KERN_ERR, "Out of memory\n");
691 rc = -ENOMEM;
692 goto out;
693 }
694 old_fs = get_fs();
695 set_fs(get_ds());
696 ecryptfs_printk(KERN_DEBUG, "Calling readlink w/ "
697 "lower_dentry->d_name.name = [%s]\n",
698 lower_dentry->d_name.name);
699 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
700 (char __user *)lower_buf,
701 bufsiz);
702 set_fs(old_fs);
703 if (rc >= 0) {
704 crypt_stat = NULL;
705 rc = ecryptfs_decode_filename(crypt_stat, lower_buf, rc,
706 &decoded_name);
707 if (rc == -ENOMEM)
708 goto out_free_lower_buf;
709 if (rc > 0) {
710 ecryptfs_printk(KERN_DEBUG, "Copying [%d] bytes "
711 "to userspace: [%*s]\n", rc,
712 decoded_name);
713 if (copy_to_user(buf, decoded_name, rc))
714 rc = -EFAULT;
715 }
716 kfree(decoded_name);
717 ecryptfs_copy_attr_atime(dentry->d_inode,
718 lower_dentry->d_inode);
719 }
720out_free_lower_buf:
721 kfree(lower_buf);
722out:
723 return rc;
724}
725
726static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
727{
728 char *buf;
729 int len = PAGE_SIZE, rc;
730 mm_segment_t old_fs;
731
732 /* Released in ecryptfs_put_link(); only release here on error */
733 buf = kmalloc(len, GFP_KERNEL);
734 if (!buf) {
735 rc = -ENOMEM;
736 goto out;
737 }
738 old_fs = get_fs();
739 set_fs(get_ds());
740 ecryptfs_printk(KERN_DEBUG, "Calling readlink w/ "
741 "dentry->d_name.name = [%s]\n", dentry->d_name.name);
742 rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
743 buf[rc] = '\0';
744 set_fs(old_fs);
745 if (rc < 0)
746 goto out_free;
747 rc = 0;
748 nd_set_link(nd, buf);
749 goto out;
750out_free:
751 kfree(buf);
752out:
753 return ERR_PTR(rc);
754}
755
756static void
757ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
758{
759 /* Free the char* */
760 kfree(nd_get_link(nd));
761}
762
763/**
764 * upper_size_to_lower_size
765 * @crypt_stat: Crypt_stat associated with file
766 * @upper_size: Size of the upper file
767 *
768 * Calculate the requried size of the lower file based on the
769 * specified size of the upper file. This calculation is based on the
770 * number of headers in the underlying file and the extent size.
771 *
772 * Returns Calculated size of the lower file.
773 */
774static loff_t
775upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
776 loff_t upper_size)
777{
778 loff_t lower_size;
779
780 lower_size = ( crypt_stat->header_extent_size
781 * crypt_stat->num_header_extents_at_front );
782 if (upper_size != 0) {
783 loff_t num_extents;
784
785 num_extents = upper_size >> crypt_stat->extent_shift;
786 if (upper_size & ~crypt_stat->extent_mask)
787 num_extents++;
788 lower_size += (num_extents * crypt_stat->extent_size);
789 }
790 return lower_size;
791}
792
793/**
794 * ecryptfs_truncate
795 * @dentry: The ecryptfs layer dentry
796 * @new_length: The length to expand the file to
797 *
798 * Function to handle truncations modifying the size of the file. Note
799 * that the file sizes are interpolated. When expanding, we are simply
800 * writing strings of 0's out. When truncating, we need to modify the
801 * underlying file size according to the page index interpolations.
802 *
803 * Returns zero on success; non-zero otherwise
804 */
805int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
806{
807 int rc = 0;
808 struct inode *inode = dentry->d_inode;
809 struct dentry *lower_dentry;
810 struct vfsmount *lower_mnt;
811 struct file fake_ecryptfs_file, *lower_file = NULL;
812 struct ecryptfs_crypt_stat *crypt_stat;
813 loff_t i_size = i_size_read(inode);
814 loff_t lower_size_before_truncate;
815 loff_t lower_size_after_truncate;
816
817 if (unlikely((new_length == i_size)))
818 goto out;
819 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
820 /* Set up a fake ecryptfs file, this is used to interface with
821 * the file in the underlying filesystem so that the
822 * truncation has an effect there as well. */
823 memset(&fake_ecryptfs_file, 0, sizeof(fake_ecryptfs_file));
824 fake_ecryptfs_file.f_dentry = dentry;
825 /* Released at out_free: label */
826 ecryptfs_set_file_private(&fake_ecryptfs_file,
827 kmem_cache_alloc(ecryptfs_file_info_cache,
828 SLAB_KERNEL));
829 if (unlikely(!ecryptfs_file_to_private(&fake_ecryptfs_file))) {
830 rc = -ENOMEM;
831 goto out;
832 }
833 lower_dentry = ecryptfs_dentry_to_lower(dentry);
834 /* This dget & mntget is released through fput at out_fput: */
835 dget(lower_dentry);
836 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
837 mntget(lower_mnt);
838 lower_file = dentry_open(lower_dentry, lower_mnt, O_RDWR);
839 if (unlikely(IS_ERR(lower_file))) {
840 rc = PTR_ERR(lower_file);
841 goto out_free;
842 }
843 ecryptfs_set_file_lower(&fake_ecryptfs_file, lower_file);
844 /* Switch on growing or shrinking file */
845 if (new_length > i_size) {
846 rc = ecryptfs_fill_zeros(&fake_ecryptfs_file, new_length);
847 if (rc) {
848 ecryptfs_printk(KERN_ERR,
849 "Problem with fill_zeros\n");
850 goto out_fput;
851 }
852 i_size_write(inode, new_length);
853 rc = ecryptfs_write_inode_size_to_header(lower_file,
854 lower_dentry->d_inode,
855 inode);
856 if (rc) {
857 ecryptfs_printk(KERN_ERR,
858 "Problem with ecryptfs_write"
859 "_inode_size\n");
860 goto out_fput;
861 }
862 } else { /* new_length < i_size_read(inode) */
863 vmtruncate(inode, new_length);
864 ecryptfs_write_inode_size_to_header(lower_file,
865 lower_dentry->d_inode,
866 inode);
867 /* We are reducing the size of the ecryptfs file, and need to
868 * know if we need to reduce the size of the lower file. */
869 lower_size_before_truncate =
870 upper_size_to_lower_size(crypt_stat, i_size);
871 lower_size_after_truncate =
872 upper_size_to_lower_size(crypt_stat, new_length);
873 if (lower_size_after_truncate < lower_size_before_truncate)
874 vmtruncate(lower_dentry->d_inode,
875 lower_size_after_truncate);
876 }
877 /* Update the access times */
878 lower_dentry->d_inode->i_mtime = lower_dentry->d_inode->i_ctime
879 = CURRENT_TIME;
880 mark_inode_dirty_sync(inode);
881out_fput:
882 fput(lower_file);
883out_free:
884 if (ecryptfs_file_to_private(&fake_ecryptfs_file))
885 kmem_cache_free(ecryptfs_file_info_cache,
886 ecryptfs_file_to_private(&fake_ecryptfs_file));
887out:
888 return rc;
889}
890
891static int
892ecryptfs_permission(struct inode *inode, int mask, struct nameidata *nd)
893{
894 int rc;
895
896 if (nd) {
897 struct vfsmount *vfsmnt_save = nd->mnt;
898 struct dentry *dentry_save = nd->dentry;
899
900 nd->mnt = ecryptfs_dentry_to_lower_mnt(nd->dentry);
901 nd->dentry = ecryptfs_dentry_to_lower(nd->dentry);
902 rc = permission(ecryptfs_inode_to_lower(inode), mask, nd);
903 nd->mnt = vfsmnt_save;
904 nd->dentry = dentry_save;
905 } else
906 rc = permission(ecryptfs_inode_to_lower(inode), mask, NULL);
907 return rc;
908}
909
910/**
911 * ecryptfs_setattr
912 * @dentry: dentry handle to the inode to modify
913 * @ia: Structure with flags of what to change and values
914 *
915 * Updates the metadata of an inode. If the update is to the size
916 * i.e. truncation, then ecryptfs_truncate will handle the size modification
917 * of both the ecryptfs inode and the lower inode.
918 *
919 * All other metadata changes will be passed right to the lower filesystem,
920 * and we will just update our inode to look like the lower.
921 */
922static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
923{
924 int rc = 0;
925 struct dentry *lower_dentry;
926 struct inode *inode;
927 struct inode *lower_inode;
928 struct ecryptfs_crypt_stat *crypt_stat;
929
930 crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
931 lower_dentry = ecryptfs_dentry_to_lower(dentry);
932 inode = dentry->d_inode;
933 lower_inode = ecryptfs_inode_to_lower(inode);
934 if (ia->ia_valid & ATTR_SIZE) {
935 ecryptfs_printk(KERN_DEBUG,
936 "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n",
937 ia->ia_valid, ATTR_SIZE);
938 rc = ecryptfs_truncate(dentry, ia->ia_size);
939 /* ecryptfs_truncate handles resizing of the lower file */
940 ia->ia_valid &= ~ATTR_SIZE;
941 ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n",
942 ia->ia_valid);
943 if (rc < 0)
944 goto out;
945 }
946 rc = notify_change(lower_dentry, ia);
947out:
948 ecryptfs_copy_attr_all(inode, lower_inode);
949 return rc;
950}
951
952static int
953ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
954 size_t size, int flags)
955{
956 int rc = 0;
957 struct dentry *lower_dentry;
958
959 lower_dentry = ecryptfs_dentry_to_lower(dentry);
960 if (!lower_dentry->d_inode->i_op->setxattr) {
961 rc = -ENOSYS;
962 goto out;
963 }
964 mutex_lock(&lower_dentry->d_inode->i_mutex);
965 rc = lower_dentry->d_inode->i_op->setxattr(lower_dentry, name, value,
966 size, flags);
967 mutex_unlock(&lower_dentry->d_inode->i_mutex);
968out:
969 return rc;
970}
971
972static ssize_t
973ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
974 size_t size)
975{
976 int rc = 0;
977 struct dentry *lower_dentry;
978
979 lower_dentry = ecryptfs_dentry_to_lower(dentry);
980 if (!lower_dentry->d_inode->i_op->getxattr) {
981 rc = -ENOSYS;
982 goto out;
983 }
984 mutex_lock(&lower_dentry->d_inode->i_mutex);
985 rc = lower_dentry->d_inode->i_op->getxattr(lower_dentry, name, value,
986 size);
987 mutex_unlock(&lower_dentry->d_inode->i_mutex);
988out:
989 return rc;
990}
991
992static ssize_t
993ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
994{
995 int rc = 0;
996 struct dentry *lower_dentry;
997
998 lower_dentry = ecryptfs_dentry_to_lower(dentry);
999 if (!lower_dentry->d_inode->i_op->listxattr) {
1000 rc = -ENOSYS;
1001 goto out;
1002 }
1003 mutex_lock(&lower_dentry->d_inode->i_mutex);
1004 rc = lower_dentry->d_inode->i_op->listxattr(lower_dentry, list, size);
1005 mutex_unlock(&lower_dentry->d_inode->i_mutex);
1006out:
1007 return rc;
1008}
1009
1010static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
1011{
1012 int rc = 0;
1013 struct dentry *lower_dentry;
1014
1015 lower_dentry = ecryptfs_dentry_to_lower(dentry);
1016 if (!lower_dentry->d_inode->i_op->removexattr) {
1017 rc = -ENOSYS;
1018 goto out;
1019 }
1020 mutex_lock(&lower_dentry->d_inode->i_mutex);
1021 rc = lower_dentry->d_inode->i_op->removexattr(lower_dentry, name);
1022 mutex_unlock(&lower_dentry->d_inode->i_mutex);
1023out:
1024 return rc;
1025}
1026
1027int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode)
1028{
1029 if ((ecryptfs_inode_to_lower(inode)
1030 == (struct inode *)candidate_lower_inode))
1031 return 1;
1032 else
1033 return 0;
1034}
1035
1036int ecryptfs_inode_set(struct inode *inode, void *lower_inode)
1037{
1038 ecryptfs_init_inode(inode, (struct inode *)lower_inode);
1039 return 0;
1040}
1041
1042struct inode_operations ecryptfs_symlink_iops = {
1043 .readlink = ecryptfs_readlink,
1044 .follow_link = ecryptfs_follow_link,
1045 .put_link = ecryptfs_put_link,
1046 .permission = ecryptfs_permission,
1047 .setattr = ecryptfs_setattr,
1048 .setxattr = ecryptfs_setxattr,
1049 .getxattr = ecryptfs_getxattr,
1050 .listxattr = ecryptfs_listxattr,
1051 .removexattr = ecryptfs_removexattr
1052};
1053
1054struct inode_operations ecryptfs_dir_iops = {
1055 .create = ecryptfs_create,
1056 .lookup = ecryptfs_lookup,
1057 .link = ecryptfs_link,
1058 .unlink = ecryptfs_unlink,
1059 .symlink = ecryptfs_symlink,
1060 .mkdir = ecryptfs_mkdir,
1061 .rmdir = ecryptfs_rmdir,
1062 .mknod = ecryptfs_mknod,
1063 .rename = ecryptfs_rename,
1064 .permission = ecryptfs_permission,
1065 .setattr = ecryptfs_setattr,
1066 .setxattr = ecryptfs_setxattr,
1067 .getxattr = ecryptfs_getxattr,
1068 .listxattr = ecryptfs_listxattr,
1069 .removexattr = ecryptfs_removexattr
1070};
1071
1072struct inode_operations ecryptfs_main_iops = {
1073 .permission = ecryptfs_permission,
1074 .setattr = ecryptfs_setattr,
1075 .setxattr = ecryptfs_setxattr,
1076 .getxattr = ecryptfs_getxattr,
1077 .listxattr = ecryptfs_listxattr,
1078 .removexattr = ecryptfs_removexattr
1079};
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
new file mode 100644
index 000000000000..ba454785a0c5
--- /dev/null
+++ b/fs/ecryptfs/keystore.c
@@ -0,0 +1,1061 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 * In-kernel key management code. Includes functions to parse and
4 * write authentication token-related packets with the underlying
5 * file.
6 *
7 * Copyright (C) 2004-2006 International Business Machines Corp.
8 * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
9 * Michael C. Thompson <mcthomps@us.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
24 * 02111-1307, USA.
25 */
26
27#include <linux/string.h>
28#include <linux/sched.h>
29#include <linux/syscalls.h>
30#include <linux/pagemap.h>
31#include <linux/key.h>
32#include <linux/random.h>
33#include <linux/crypto.h>
34#include <linux/scatterlist.h>
35#include "ecryptfs_kernel.h"
36
37/**
38 * request_key returned an error instead of a valid key address;
39 * determine the type of error, make appropriate log entries, and
40 * return an error code.
41 */
42int process_request_key_err(long err_code)
43{
44 int rc = 0;
45
46 switch (err_code) {
47 case ENOKEY:
48 ecryptfs_printk(KERN_WARNING, "No key\n");
49 rc = -ENOENT;
50 break;
51 case EKEYEXPIRED:
52 ecryptfs_printk(KERN_WARNING, "Key expired\n");
53 rc = -ETIME;
54 break;
55 case EKEYREVOKED:
56 ecryptfs_printk(KERN_WARNING, "Key revoked\n");
57 rc = -EINVAL;
58 break;
59 default:
60 ecryptfs_printk(KERN_WARNING, "Unknown error code: "
61 "[0x%.16x]\n", err_code);
62 rc = -EINVAL;
63 }
64 return rc;
65}
66
67static void wipe_auth_tok_list(struct list_head *auth_tok_list_head)
68{
69 struct list_head *walker;
70 struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
71
72 walker = auth_tok_list_head->next;
73 while (walker != auth_tok_list_head) {
74 auth_tok_list_item =
75 list_entry(walker, struct ecryptfs_auth_tok_list_item,
76 list);
77 walker = auth_tok_list_item->list.next;
78 memset(auth_tok_list_item, 0,
79 sizeof(struct ecryptfs_auth_tok_list_item));
80 kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
81 auth_tok_list_item);
82 }
83}
84
85struct kmem_cache *ecryptfs_auth_tok_list_item_cache;
86
87/**
88 * parse_packet_length
89 * @data: Pointer to memory containing length at offset
90 * @size: This function writes the decoded size to this memory
91 * address; zero on error
92 * @length_size: The number of bytes occupied by the encoded length
93 *
94 * Returns Zero on success
95 */
96static int parse_packet_length(unsigned char *data, size_t *size,
97 size_t *length_size)
98{
99 int rc = 0;
100
101 (*length_size) = 0;
102 (*size) = 0;
103 if (data[0] < 192) {
104 /* One-byte length */
105 (*size) = data[0];
106 (*length_size) = 1;
107 } else if (data[0] < 224) {
108 /* Two-byte length */
109 (*size) = ((data[0] - 192) * 256);
110 (*size) += (data[1] + 192);
111 (*length_size) = 2;
112 } else if (data[0] == 255) {
113 /* Five-byte length; we're not supposed to see this */
114 ecryptfs_printk(KERN_ERR, "Five-byte packet length not "
115 "supported\n");
116 rc = -EINVAL;
117 goto out;
118 } else {
119 ecryptfs_printk(KERN_ERR, "Error parsing packet length\n");
120 rc = -EINVAL;
121 goto out;
122 }
123out:
124 return rc;
125}
126
127/**
128 * write_packet_length
129 * @dest: The byte array target into which to write the
130 * length. Must have at least 5 bytes allocated.
131 * @size: The length to write.
132 * @packet_size_length: The number of bytes used to encode the
133 * packet length is written to this address.
134 *
135 * Returns zero on success; non-zero on error.
136 */
137static int write_packet_length(char *dest, size_t size,
138 size_t *packet_size_length)
139{
140 int rc = 0;
141
142 if (size < 192) {
143 dest[0] = size;
144 (*packet_size_length) = 1;
145 } else if (size < 65536) {
146 dest[0] = (((size - 192) / 256) + 192);
147 dest[1] = ((size - 192) % 256);
148 (*packet_size_length) = 2;
149 } else {
150 rc = -EINVAL;
151 ecryptfs_printk(KERN_WARNING,
152 "Unsupported packet size: [%d]\n", size);
153 }
154 return rc;
155}
156
157/**
158 * parse_tag_3_packet
159 * @crypt_stat: The cryptographic context to modify based on packet
160 * contents.
161 * @data: The raw bytes of the packet.
162 * @auth_tok_list: eCryptfs parses packets into authentication tokens;
163 * a new authentication token will be placed at the end
164 * of this list for this packet.
165 * @new_auth_tok: Pointer to a pointer to memory that this function
166 * allocates; sets the memory address of the pointer to
167 * NULL on error. This object is added to the
168 * auth_tok_list.
169 * @packet_size: This function writes the size of the parsed packet
170 * into this memory location; zero on error.
171 * @max_packet_size: maximum number of bytes to parse
172 *
173 * Returns zero on success; non-zero on error.
174 */
175static int
176parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
177 unsigned char *data, struct list_head *auth_tok_list,
178 struct ecryptfs_auth_tok **new_auth_tok,
179 size_t *packet_size, size_t max_packet_size)
180{
181 int rc = 0;
182 size_t body_size;
183 struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
184 size_t length_size;
185
186 (*packet_size) = 0;
187 (*new_auth_tok) = NULL;
188
189 /* we check that:
190 * one byte for the Tag 3 ID flag
191 * two bytes for the body size
192 * do not exceed the maximum_packet_size
193 */
194 if (unlikely((*packet_size) + 3 > max_packet_size)) {
195 ecryptfs_printk(KERN_ERR, "Packet size exceeds max\n");
196 rc = -EINVAL;
197 goto out;
198 }
199
200 /* check for Tag 3 identifyer - one byte */
201 if (data[(*packet_size)++] != ECRYPTFS_TAG_3_PACKET_TYPE) {
202 ecryptfs_printk(KERN_ERR, "Enter w/ first byte != 0x%.2x\n",
203 ECRYPTFS_TAG_3_PACKET_TYPE);
204 rc = -EINVAL;
205 goto out;
206 }
207 /* Released: wipe_auth_tok_list called in ecryptfs_parse_packet_set or
208 * at end of function upon failure */
209 auth_tok_list_item =
210 kmem_cache_alloc(ecryptfs_auth_tok_list_item_cache, SLAB_KERNEL);
211 if (!auth_tok_list_item) {
212 ecryptfs_printk(KERN_ERR, "Unable to allocate memory\n");
213 rc = -ENOMEM;
214 goto out;
215 }
216 memset(auth_tok_list_item, 0,
217 sizeof(struct ecryptfs_auth_tok_list_item));
218 (*new_auth_tok) = &auth_tok_list_item->auth_tok;
219
220 /* check for body size - one to two bytes */
221 rc = parse_packet_length(&data[(*packet_size)], &body_size,
222 &length_size);
223 if (rc) {
224 ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
225 "rc = [%d]\n", rc);
226 goto out_free;
227 }
228 if (unlikely(body_size < (0x05 + ECRYPTFS_SALT_SIZE))) {
229 ecryptfs_printk(KERN_WARNING, "Invalid body size ([%d])\n",
230 body_size);
231 rc = -EINVAL;
232 goto out_free;
233 }
234 (*packet_size) += length_size;
235
236 /* now we know the length of the remainting Tag 3 packet size:
237 * 5 fix bytes for: version string, cipher, S2K ID, hash algo,
238 * number of hash iterations
239 * ECRYPTFS_SALT_SIZE bytes for salt
240 * body_size bytes minus the stuff above is the encrypted key size
241 */
242 if (unlikely((*packet_size) + body_size > max_packet_size)) {
243 ecryptfs_printk(KERN_ERR, "Packet size exceeds max\n");
244 rc = -EINVAL;
245 goto out_free;
246 }
247
248 /* There are 5 characters of additional information in the
249 * packet */
250 (*new_auth_tok)->session_key.encrypted_key_size =
251 body_size - (0x05 + ECRYPTFS_SALT_SIZE);
252 ecryptfs_printk(KERN_DEBUG, "Encrypted key size = [%d]\n",
253 (*new_auth_tok)->session_key.encrypted_key_size);
254
255 /* Version 4 (from RFC2440) - one byte */
256 if (unlikely(data[(*packet_size)++] != 0x04)) {
257 ecryptfs_printk(KERN_DEBUG, "Unknown version number "
258 "[%d]\n", data[(*packet_size) - 1]);
259 rc = -EINVAL;
260 goto out_free;
261 }
262
263 /* cipher - one byte */
264 ecryptfs_cipher_code_to_string(crypt_stat->cipher,
265 (u16)data[(*packet_size)]);
266 /* A little extra work to differentiate among the AES key
267 * sizes; see RFC2440 */
268 switch(data[(*packet_size)++]) {
269 case RFC2440_CIPHER_AES_192:
270 crypt_stat->key_size = 24;
271 break;
272 default:
273 crypt_stat->key_size =
274 (*new_auth_tok)->session_key.encrypted_key_size;
275 }
276 ecryptfs_init_crypt_ctx(crypt_stat);
277 /* S2K identifier 3 (from RFC2440) */
278 if (unlikely(data[(*packet_size)++] != 0x03)) {
279 ecryptfs_printk(KERN_ERR, "Only S2K ID 3 is currently "
280 "supported\n");
281 rc = -ENOSYS;
282 goto out_free;
283 }
284
285 /* TODO: finish the hash mapping */
286 /* hash algorithm - one byte */
287 switch (data[(*packet_size)++]) {
288 case 0x01: /* See RFC2440 for these numbers and their mappings */
289 /* Choose MD5 */
290 /* salt - ECRYPTFS_SALT_SIZE bytes */
291 memcpy((*new_auth_tok)->token.password.salt,
292 &data[(*packet_size)], ECRYPTFS_SALT_SIZE);
293 (*packet_size) += ECRYPTFS_SALT_SIZE;
294
295 /* This conversion was taken straight from RFC2440 */
296 /* number of hash iterations - one byte */
297 (*new_auth_tok)->token.password.hash_iterations =
298 ((u32) 16 + (data[(*packet_size)] & 15))
299 << ((data[(*packet_size)] >> 4) + 6);
300 (*packet_size)++;
301
302 /* encrypted session key -
303 * (body_size-5-ECRYPTFS_SALT_SIZE) bytes */
304 memcpy((*new_auth_tok)->session_key.encrypted_key,
305 &data[(*packet_size)],
306 (*new_auth_tok)->session_key.encrypted_key_size);
307 (*packet_size) +=
308 (*new_auth_tok)->session_key.encrypted_key_size;
309 (*new_auth_tok)->session_key.flags &=
310 ~ECRYPTFS_CONTAINS_DECRYPTED_KEY;
311 (*new_auth_tok)->session_key.flags |=
312 ECRYPTFS_CONTAINS_ENCRYPTED_KEY;
313 (*new_auth_tok)->token.password.hash_algo = 0x01;
314 break;
315 default:
316 ecryptfs_printk(KERN_ERR, "Unsupported hash algorithm: "
317 "[%d]\n", data[(*packet_size) - 1]);
318 rc = -ENOSYS;
319 goto out_free;
320 }
321 (*new_auth_tok)->token_type = ECRYPTFS_PASSWORD;
322 /* TODO: Parametarize; we might actually want userspace to
323 * decrypt the session key. */
324 ECRYPTFS_CLEAR_FLAG((*new_auth_tok)->session_key.flags,
325 ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT);
326 ECRYPTFS_CLEAR_FLAG((*new_auth_tok)->session_key.flags,
327 ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT);
328 list_add(&auth_tok_list_item->list, auth_tok_list);
329 goto out;
330out_free:
331 (*new_auth_tok) = NULL;
332 memset(auth_tok_list_item, 0,
333 sizeof(struct ecryptfs_auth_tok_list_item));
334 kmem_cache_free(ecryptfs_auth_tok_list_item_cache,
335 auth_tok_list_item);
336out:
337 if (rc)
338 (*packet_size) = 0;
339 return rc;
340}
341
342/**
343 * parse_tag_11_packet
344 * @data: The raw bytes of the packet
345 * @contents: This function writes the data contents of the literal
346 * packet into this memory location
347 * @max_contents_bytes: The maximum number of bytes that this function
348 * is allowed to write into contents
349 * @tag_11_contents_size: This function writes the size of the parsed
350 * contents into this memory location; zero on
351 * error
352 * @packet_size: This function writes the size of the parsed packet
353 * into this memory location; zero on error
354 * @max_packet_size: maximum number of bytes to parse
355 *
356 * Returns zero on success; non-zero on error.
357 */
358static int
359parse_tag_11_packet(unsigned char *data, unsigned char *contents,
360 size_t max_contents_bytes, size_t *tag_11_contents_size,
361 size_t *packet_size, size_t max_packet_size)
362{
363 int rc = 0;
364 size_t body_size;
365 size_t length_size;
366
367 (*packet_size) = 0;
368 (*tag_11_contents_size) = 0;
369
370 /* check that:
371 * one byte for the Tag 11 ID flag
372 * two bytes for the Tag 11 length
373 * do not exceed the maximum_packet_size
374 */
375 if (unlikely((*packet_size) + 3 > max_packet_size)) {
376 ecryptfs_printk(KERN_ERR, "Packet size exceeds max\n");
377 rc = -EINVAL;
378 goto out;
379 }
380
381 /* check for Tag 11 identifyer - one byte */
382 if (data[(*packet_size)++] != ECRYPTFS_TAG_11_PACKET_TYPE) {
383 ecryptfs_printk(KERN_WARNING,
384 "Invalid tag 11 packet format\n");
385 rc = -EINVAL;
386 goto out;
387 }
388
389 /* get Tag 11 content length - one or two bytes */
390 rc = parse_packet_length(&data[(*packet_size)], &body_size,
391 &length_size);
392 if (rc) {
393 ecryptfs_printk(KERN_WARNING,
394 "Invalid tag 11 packet format\n");
395 goto out;
396 }
397 (*packet_size) += length_size;
398
399 if (body_size < 13) {
400 ecryptfs_printk(KERN_WARNING, "Invalid body size ([%d])\n",
401 body_size);
402 rc = -EINVAL;
403 goto out;
404 }
405 /* We have 13 bytes of surrounding packet values */
406 (*tag_11_contents_size) = (body_size - 13);
407
408 /* now we know the length of the remainting Tag 11 packet size:
409 * 14 fix bytes for: special flag one, special flag two,
410 * 12 skipped bytes
411 * body_size bytes minus the stuff above is the Tag 11 content
412 */
413 /* FIXME why is the body size one byte smaller than the actual
414 * size of the body?
415 * this seems to be an error here as well as in
416 * write_tag_11_packet() */
417 if (unlikely((*packet_size) + body_size + 1 > max_packet_size)) {
418 ecryptfs_printk(KERN_ERR, "Packet size exceeds max\n");
419 rc = -EINVAL;
420 goto out;
421 }
422
423 /* special flag one - one byte */
424 if (data[(*packet_size)++] != 0x62) {
425 ecryptfs_printk(KERN_WARNING, "Unrecognizable packet\n");
426 rc = -EINVAL;
427 goto out;
428 }
429
430 /* special flag two - one byte */
431 if (data[(*packet_size)++] != 0x08) {
432 ecryptfs_printk(KERN_WARNING, "Unrecognizable packet\n");
433 rc = -EINVAL;
434 goto out;
435 }
436
437 /* skip the next 12 bytes */
438 (*packet_size) += 12; /* We don't care about the filename or
439 * the timestamp */
440
441 /* get the Tag 11 contents - tag_11_contents_size bytes */
442 memcpy(contents, &data[(*packet_size)], (*tag_11_contents_size));
443 (*packet_size) += (*tag_11_contents_size);
444
445out:
446 if (rc) {
447 (*packet_size) = 0;
448 (*tag_11_contents_size) = 0;
449 }
450 return rc;
451}
452
453/**
454 * decrypt_session_key - Decrypt the session key with the given auth_tok.
455 *
456 * Returns Zero on success; non-zero error otherwise.
457 */
458static int decrypt_session_key(struct ecryptfs_auth_tok *auth_tok,
459 struct ecryptfs_crypt_stat *crypt_stat)
460{
461 int rc = 0;
462 struct ecryptfs_password *password_s_ptr;
463 struct crypto_tfm *tfm = NULL;
464 struct scatterlist src_sg[2], dst_sg[2];
465 struct mutex *tfm_mutex = NULL;
466 /* TODO: Use virt_to_scatterlist for these */
467 char *encrypted_session_key;
468 char *session_key;
469
470 password_s_ptr = &auth_tok->token.password;
471 if (ECRYPTFS_CHECK_FLAG(password_s_ptr->flags,
472 ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET))
473 ecryptfs_printk(KERN_DEBUG, "Session key encryption key "
474 "set; skipping key generation\n");
475 ecryptfs_printk(KERN_DEBUG, "Session key encryption key (size [%d])"
476 ":\n",
477 password_s_ptr->session_key_encryption_key_bytes);
478 if (ecryptfs_verbosity > 0)
479 ecryptfs_dump_hex(password_s_ptr->session_key_encryption_key,
480 password_s_ptr->
481 session_key_encryption_key_bytes);
482 if (!strcmp(crypt_stat->cipher,
483 crypt_stat->mount_crypt_stat->global_default_cipher_name)
484 && crypt_stat->mount_crypt_stat->global_key_tfm) {
485 tfm = crypt_stat->mount_crypt_stat->global_key_tfm;
486 tfm_mutex = &crypt_stat->mount_crypt_stat->global_key_tfm_mutex;
487 } else {
488 tfm = crypto_alloc_tfm(crypt_stat->cipher,
489 CRYPTO_TFM_REQ_WEAK_KEY);
490 if (!tfm) {
491 printk(KERN_ERR "Error allocating crypto context\n");
492 rc = -ENOMEM;
493 goto out;
494 }
495 }
496 if (password_s_ptr->session_key_encryption_key_bytes
497 < crypto_tfm_alg_min_keysize(tfm)) {
498 printk(KERN_WARNING "Session key encryption key is [%d] bytes; "
499 "minimum keysize for selected cipher is [%d] bytes.\n",
500 password_s_ptr->session_key_encryption_key_bytes,
501 crypto_tfm_alg_min_keysize(tfm));
502 rc = -EINVAL;
503 goto out;
504 }
505 if (tfm_mutex)
506 mutex_lock(tfm_mutex);
507 crypto_cipher_setkey(tfm, password_s_ptr->session_key_encryption_key,
508 crypt_stat->key_size);
509 /* TODO: virt_to_scatterlist */
510 encrypted_session_key = (char *)__get_free_page(GFP_KERNEL);
511 if (!encrypted_session_key) {
512 ecryptfs_printk(KERN_ERR, "Out of memory\n");
513 rc = -ENOMEM;
514 goto out_free_tfm;
515 }
516 session_key = (char *)__get_free_page(GFP_KERNEL);
517 if (!session_key) {
518 kfree(encrypted_session_key);
519 ecryptfs_printk(KERN_ERR, "Out of memory\n");
520 rc = -ENOMEM;
521 goto out_free_tfm;
522 }
523 memcpy(encrypted_session_key, auth_tok->session_key.encrypted_key,
524 auth_tok->session_key.encrypted_key_size);
525 src_sg[0].page = virt_to_page(encrypted_session_key);
526 src_sg[0].offset = 0;
527 BUG_ON(auth_tok->session_key.encrypted_key_size > PAGE_CACHE_SIZE);
528 src_sg[0].length = auth_tok->session_key.encrypted_key_size;
529 dst_sg[0].page = virt_to_page(session_key);
530 dst_sg[0].offset = 0;
531 auth_tok->session_key.decrypted_key_size =
532 auth_tok->session_key.encrypted_key_size;
533 dst_sg[0].length = auth_tok->session_key.encrypted_key_size;
534 /* TODO: Handle error condition */
535 crypto_cipher_decrypt(tfm, dst_sg, src_sg,
536 auth_tok->session_key.encrypted_key_size);
537 auth_tok->session_key.decrypted_key_size =
538 auth_tok->session_key.encrypted_key_size;
539 memcpy(auth_tok->session_key.decrypted_key, session_key,
540 auth_tok->session_key.decrypted_key_size);
541 auth_tok->session_key.flags |= ECRYPTFS_CONTAINS_DECRYPTED_KEY;
542 memcpy(crypt_stat->key, auth_tok->session_key.decrypted_key,
543 auth_tok->session_key.decrypted_key_size);
544 ECRYPTFS_SET_FLAG(crypt_stat->flags, ECRYPTFS_KEY_VALID);
545 ecryptfs_printk(KERN_DEBUG, "Decrypted session key:\n");
546 if (ecryptfs_verbosity > 0)
547 ecryptfs_dump_hex(crypt_stat->key,
548 crypt_stat->key_size);
549 memset(encrypted_session_key, 0, PAGE_CACHE_SIZE);
550 free_page((unsigned long)encrypted_session_key);
551 memset(session_key, 0, PAGE_CACHE_SIZE);
552 free_page((unsigned long)session_key);
553out_free_tfm:
554 if (tfm_mutex)
555 mutex_unlock(tfm_mutex);
556 else
557 crypto_free_tfm(tfm);
558out:
559 return rc;
560}
561
562/**
563 * ecryptfs_parse_packet_set
564 * @dest: The header page in memory
565 * @version: Version of file format, to guide parsing behavior
566 *
567 * Get crypt_stat to have the file's session key if the requisite key
568 * is available to decrypt the session key.
569 *
570 * Returns Zero if a valid authentication token was retrieved and
571 * processed; negative value for file not encrypted or for error
572 * conditions.
573 */
574int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat,
575 unsigned char *src,
576 struct dentry *ecryptfs_dentry)
577{
578 size_t i = 0;
579 int rc = 0;
580 size_t found_auth_tok = 0;
581 size_t next_packet_is_auth_tok_packet;
582 char sig[ECRYPTFS_SIG_SIZE_HEX];
583 struct list_head auth_tok_list;
584 struct list_head *walker;
585 struct ecryptfs_auth_tok *chosen_auth_tok = NULL;
586 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
587 &ecryptfs_superblock_to_private(
588 ecryptfs_dentry->d_sb)->mount_crypt_stat;
589 struct ecryptfs_auth_tok *candidate_auth_tok = NULL;
590 size_t packet_size;
591 struct ecryptfs_auth_tok *new_auth_tok;
592 unsigned char sig_tmp_space[ECRYPTFS_SIG_SIZE];
593 size_t tag_11_contents_size;
594 size_t tag_11_packet_size;
595
596 INIT_LIST_HEAD(&auth_tok_list);
597 /* Parse the header to find as many packets as we can, these will be
598 * added the our &auth_tok_list */
599 next_packet_is_auth_tok_packet = 1;
600 while (next_packet_is_auth_tok_packet) {
601 size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i);
602
603 switch (src[i]) {
604 case ECRYPTFS_TAG_3_PACKET_TYPE:
605 rc = parse_tag_3_packet(crypt_stat,
606 (unsigned char *)&src[i],
607 &auth_tok_list, &new_auth_tok,
608 &packet_size, max_packet_size);
609 if (rc) {
610 ecryptfs_printk(KERN_ERR, "Error parsing "
611 "tag 3 packet\n");
612 rc = -EIO;
613 goto out_wipe_list;
614 }
615 i += packet_size;
616 rc = parse_tag_11_packet((unsigned char *)&src[i],
617 sig_tmp_space,
618 ECRYPTFS_SIG_SIZE,
619 &tag_11_contents_size,
620 &tag_11_packet_size,
621 max_packet_size);
622 if (rc) {
623 ecryptfs_printk(KERN_ERR, "No valid "
624 "(ecryptfs-specific) literal "
625 "packet containing "
626 "authentication token "
627 "signature found after "
628 "tag 3 packet\n");
629 rc = -EIO;
630 goto out_wipe_list;
631 }
632 i += tag_11_packet_size;
633 if (ECRYPTFS_SIG_SIZE != tag_11_contents_size) {
634 ecryptfs_printk(KERN_ERR, "Expected "
635 "signature of size [%d]; "
636 "read size [%d]\n",
637 ECRYPTFS_SIG_SIZE,
638 tag_11_contents_size);
639 rc = -EIO;
640 goto out_wipe_list;
641 }
642 ecryptfs_to_hex(new_auth_tok->token.password.signature,
643 sig_tmp_space, tag_11_contents_size);
644 new_auth_tok->token.password.signature[
645 ECRYPTFS_PASSWORD_SIG_SIZE] = '\0';
646 ECRYPTFS_SET_FLAG(crypt_stat->flags,
647 ECRYPTFS_ENCRYPTED);
648 break;
649 case ECRYPTFS_TAG_11_PACKET_TYPE:
650 ecryptfs_printk(KERN_WARNING, "Invalid packet set "
651 "(Tag 11 not allowed by itself)\n");
652 rc = -EIO;
653 goto out_wipe_list;
654 break;
655 default:
656 ecryptfs_printk(KERN_DEBUG, "No packet at offset "
657 "[%d] of the file header; hex value of "
658 "character is [0x%.2x]\n", i, src[i]);
659 next_packet_is_auth_tok_packet = 0;
660 }
661 }
662 if (list_empty(&auth_tok_list)) {
663 rc = -EINVAL; /* Do not support non-encrypted files in
664 * the 0.1 release */
665 goto out;
666 }
667 /* If we have a global auth tok, then we should try to use
668 * it */
669 if (mount_crypt_stat->global_auth_tok) {
670 memcpy(sig, mount_crypt_stat->global_auth_tok_sig,
671 ECRYPTFS_SIG_SIZE_HEX);
672 chosen_auth_tok = mount_crypt_stat->global_auth_tok;
673 } else
674 BUG(); /* We should always have a global auth tok in
675 * the 0.1 release */
676 /* Scan list to see if our chosen_auth_tok works */
677 list_for_each(walker, &auth_tok_list) {
678 struct ecryptfs_auth_tok_list_item *auth_tok_list_item;
679 auth_tok_list_item =
680 list_entry(walker, struct ecryptfs_auth_tok_list_item,
681 list);
682 candidate_auth_tok = &auth_tok_list_item->auth_tok;
683 if (unlikely(ecryptfs_verbosity > 0)) {
684 ecryptfs_printk(KERN_DEBUG,
685 "Considering cadidate auth tok:\n");
686 ecryptfs_dump_auth_tok(candidate_auth_tok);
687 }
688 /* TODO: Replace ECRYPTFS_SIG_SIZE_HEX w/ dynamic value */
689 if (candidate_auth_tok->token_type == ECRYPTFS_PASSWORD
690 && !strncmp(candidate_auth_tok->token.password.signature,
691 sig, ECRYPTFS_SIG_SIZE_HEX)) {
692 found_auth_tok = 1;
693 goto leave_list;
694 /* TODO: Transfer the common salt into the
695 * crypt_stat salt */
696 }
697 }
698leave_list:
699 if (!found_auth_tok) {
700 ecryptfs_printk(KERN_ERR, "Could not find authentication "
701 "token on temporary list for sig [%.*s]\n",
702 ECRYPTFS_SIG_SIZE_HEX, sig);
703 rc = -EIO;
704 goto out_wipe_list;
705 } else {
706 memcpy(&(candidate_auth_tok->token.password),
707 &(chosen_auth_tok->token.password),
708 sizeof(struct ecryptfs_password));
709 rc = decrypt_session_key(candidate_auth_tok, crypt_stat);
710 if (rc) {
711 ecryptfs_printk(KERN_ERR, "Error decrypting the "
712 "session key\n");
713 goto out_wipe_list;
714 }
715 rc = ecryptfs_compute_root_iv(crypt_stat);
716 if (rc) {
717 ecryptfs_printk(KERN_ERR, "Error computing "
718 "the root IV\n");
719 goto out_wipe_list;
720 }
721 }
722 rc = ecryptfs_init_crypt_ctx(crypt_stat);
723 if (rc) {
724 ecryptfs_printk(KERN_ERR, "Error initializing crypto "
725 "context for cipher [%s]; rc = [%d]\n",
726 crypt_stat->cipher, rc);
727 }
728out_wipe_list:
729 wipe_auth_tok_list(&auth_tok_list);
730out:
731 return rc;
732}
733
734/**
735 * write_tag_11_packet
736 * @dest: Target into which Tag 11 packet is to be written
737 * @max: Maximum packet length
738 * @contents: Byte array of contents to copy in
739 * @contents_length: Number of bytes in contents
740 * @packet_length: Length of the Tag 11 packet written; zero on error
741 *
742 * Returns zero on success; non-zero on error.
743 */
744static int
745write_tag_11_packet(char *dest, int max, char *contents, size_t contents_length,
746 size_t *packet_length)
747{
748 int rc = 0;
749 size_t packet_size_length;
750
751 (*packet_length) = 0;
752 if ((13 + contents_length) > max) {
753 rc = -EINVAL;
754 ecryptfs_printk(KERN_ERR, "Packet length larger than "
755 "maximum allowable\n");
756 goto out;
757 }
758 /* General packet header */
759 /* Packet tag */
760 dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE;
761 /* Packet length */
762 rc = write_packet_length(&dest[(*packet_length)],
763 (13 + contents_length), &packet_size_length);
764 if (rc) {
765 ecryptfs_printk(KERN_ERR, "Error generating tag 11 packet "
766 "header; cannot generate packet length\n");
767 goto out;
768 }
769 (*packet_length) += packet_size_length;
770 /* Tag 11 specific */
771 /* One-octet field that describes how the data is formatted */
772 dest[(*packet_length)++] = 0x62; /* binary data */
773 /* One-octet filename length followed by filename */
774 dest[(*packet_length)++] = 8;
775 memcpy(&dest[(*packet_length)], "_CONSOLE", 8);
776 (*packet_length) += 8;
777 /* Four-octet number indicating modification date */
778 memset(&dest[(*packet_length)], 0x00, 4);
779 (*packet_length) += 4;
780 /* Remainder is literal data */
781 memcpy(&dest[(*packet_length)], contents, contents_length);
782 (*packet_length) += contents_length;
783 out:
784 if (rc)
785 (*packet_length) = 0;
786 return rc;
787}
788
789/**
790 * write_tag_3_packet
791 * @dest: Buffer into which to write the packet
792 * @max: Maximum number of bytes that can be written
793 * @auth_tok: Authentication token
794 * @crypt_stat: The cryptographic context
795 * @key_rec: encrypted key
796 * @packet_size: This function will write the number of bytes that end
797 * up constituting the packet; set to zero on error
798 *
799 * Returns zero on success; non-zero on error.
800 */
801static int
802write_tag_3_packet(char *dest, size_t max, struct ecryptfs_auth_tok *auth_tok,
803 struct ecryptfs_crypt_stat *crypt_stat,
804 struct ecryptfs_key_record *key_rec, size_t *packet_size)
805{
806 int rc = 0;
807
808 size_t i;
809 size_t signature_is_valid = 0;
810 size_t encrypted_session_key_valid = 0;
811 char session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
812 struct scatterlist dest_sg[2];
813 struct scatterlist src_sg[2];
814 struct crypto_tfm *tfm = NULL;
815 struct mutex *tfm_mutex = NULL;
816 size_t key_rec_size;
817 size_t packet_size_length;
818 size_t cipher_code;
819
820 (*packet_size) = 0;
821 /* Check for a valid signature on the auth_tok */
822 for (i = 0; i < ECRYPTFS_SIG_SIZE_HEX; i++)
823 signature_is_valid |= auth_tok->token.password.signature[i];
824 if (!signature_is_valid)
825 BUG();
826 ecryptfs_from_hex((*key_rec).sig, auth_tok->token.password.signature,
827 ECRYPTFS_SIG_SIZE);
828 encrypted_session_key_valid = 0;
829 for (i = 0; i < crypt_stat->key_size; i++)
830 encrypted_session_key_valid |=
831 auth_tok->session_key.encrypted_key[i];
832 if (encrypted_session_key_valid) {
833 memcpy((*key_rec).enc_key,
834 auth_tok->session_key.encrypted_key,
835 auth_tok->session_key.encrypted_key_size);
836 goto encrypted_session_key_set;
837 }
838 if (auth_tok->session_key.encrypted_key_size == 0)
839 auth_tok->session_key.encrypted_key_size =
840 crypt_stat->key_size;
841 if (crypt_stat->key_size == 24
842 && strcmp("aes", crypt_stat->cipher) == 0) {
843 memset((crypt_stat->key + 24), 0, 8);
844 auth_tok->session_key.encrypted_key_size = 32;
845 }
846 (*key_rec).enc_key_size =
847 auth_tok->session_key.encrypted_key_size;
848 if (ECRYPTFS_CHECK_FLAG(auth_tok->token.password.flags,
849 ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET)) {
850 ecryptfs_printk(KERN_DEBUG, "Using previously generated "
851 "session key encryption key of size [%d]\n",
852 auth_tok->token.password.
853 session_key_encryption_key_bytes);
854 memcpy(session_key_encryption_key,
855 auth_tok->token.password.session_key_encryption_key,
856 crypt_stat->key_size);
857 ecryptfs_printk(KERN_DEBUG,
858 "Cached session key " "encryption key: \n");
859 if (ecryptfs_verbosity > 0)
860 ecryptfs_dump_hex(session_key_encryption_key, 16);
861 }
862 if (unlikely(ecryptfs_verbosity > 0)) {
863 ecryptfs_printk(KERN_DEBUG, "Session key encryption key:\n");
864 ecryptfs_dump_hex(session_key_encryption_key, 16);
865 }
866 rc = virt_to_scatterlist(crypt_stat->key,
867 (*key_rec).enc_key_size, src_sg, 2);
868 if (!rc) {
869 ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
870 "for crypt_stat session key\n");
871 rc = -ENOMEM;
872 goto out;
873 }
874 rc = virt_to_scatterlist((*key_rec).enc_key,
875 (*key_rec).enc_key_size, dest_sg, 2);
876 if (!rc) {
877 ecryptfs_printk(KERN_ERR, "Error generating scatterlist "
878 "for crypt_stat encrypted session key\n");
879 rc = -ENOMEM;
880 goto out;
881 }
882 if (!strcmp(crypt_stat->cipher,
883 crypt_stat->mount_crypt_stat->global_default_cipher_name)
884 && crypt_stat->mount_crypt_stat->global_key_tfm) {
885 tfm = crypt_stat->mount_crypt_stat->global_key_tfm;
886 tfm_mutex = &crypt_stat->mount_crypt_stat->global_key_tfm_mutex;
887 } else
888 tfm = crypto_alloc_tfm(crypt_stat->cipher, 0);
889 if (!tfm) {
890 ecryptfs_printk(KERN_ERR, "Could not initialize crypto "
891 "context for cipher [%s]\n",
892 crypt_stat->cipher);
893 rc = -EINVAL;
894 goto out;
895 }
896 if (tfm_mutex)
897 mutex_lock(tfm_mutex);
898 rc = crypto_cipher_setkey(tfm, session_key_encryption_key,
899 crypt_stat->key_size);
900 if (rc < 0) {
901 if (tfm_mutex)
902 mutex_unlock(tfm_mutex);
903 ecryptfs_printk(KERN_ERR, "Error setting key for crypto "
904 "context\n");
905 goto out;
906 }
907 rc = 0;
908 ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n",
909 crypt_stat->key_size);
910 crypto_cipher_encrypt(tfm, dest_sg, src_sg,
911 (*key_rec).enc_key_size);
912 if (tfm_mutex)
913 mutex_unlock(tfm_mutex);
914 ecryptfs_printk(KERN_DEBUG, "This should be the encrypted key:\n");
915 if (ecryptfs_verbosity > 0)
916 ecryptfs_dump_hex((*key_rec).enc_key,
917 (*key_rec).enc_key_size);
918encrypted_session_key_set:
919 /* Now we have a valid key_rec. Append it to the
920 * key_rec set. */
921 key_rec_size = (sizeof(struct ecryptfs_key_record)
922 - ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES
923 + ((*key_rec).enc_key_size));
924 /* TODO: Include a packet size limit as a parameter to this
925 * function once we have multi-packet headers (for versions
926 * later than 0.1 */
927 if (key_rec_size >= ECRYPTFS_MAX_KEYSET_SIZE) {
928 ecryptfs_printk(KERN_ERR, "Keyset too large\n");
929 rc = -EINVAL;
930 goto out;
931 }
932 /* TODO: Packet size limit */
933 /* We have 5 bytes of surrounding packet data */
934 if ((0x05 + ECRYPTFS_SALT_SIZE
935 + (*key_rec).enc_key_size) >= max) {
936 ecryptfs_printk(KERN_ERR, "Authentication token is too "
937 "large\n");
938 rc = -EINVAL;
939 goto out;
940 }
941 /* This format is inspired by OpenPGP; see RFC 2440
942 * packet tag 3 */
943 dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE;
944 /* ver+cipher+s2k+hash+salt+iter+enc_key */
945 rc = write_packet_length(&dest[(*packet_size)],
946 (0x05 + ECRYPTFS_SALT_SIZE
947 + (*key_rec).enc_key_size),
948 &packet_size_length);
949 if (rc) {
950 ecryptfs_printk(KERN_ERR, "Error generating tag 3 packet "
951 "header; cannot generate packet length\n");
952 goto out;
953 }
954 (*packet_size) += packet_size_length;
955 dest[(*packet_size)++] = 0x04; /* version 4 */
956 cipher_code = ecryptfs_code_for_cipher_string(crypt_stat);
957 if (cipher_code == 0) {
958 ecryptfs_printk(KERN_WARNING, "Unable to generate code for "
959 "cipher [%s]\n", crypt_stat->cipher);
960 rc = -EINVAL;
961 goto out;
962 }
963 dest[(*packet_size)++] = cipher_code;
964 dest[(*packet_size)++] = 0x03; /* S2K */
965 dest[(*packet_size)++] = 0x01; /* MD5 (TODO: parameterize) */
966 memcpy(&dest[(*packet_size)], auth_tok->token.password.salt,
967 ECRYPTFS_SALT_SIZE);
968 (*packet_size) += ECRYPTFS_SALT_SIZE; /* salt */
969 dest[(*packet_size)++] = 0x60; /* hash iterations (65536) */
970 memcpy(&dest[(*packet_size)], (*key_rec).enc_key,
971 (*key_rec).enc_key_size);
972 (*packet_size) += (*key_rec).enc_key_size;
973out:
974 if (tfm && !tfm_mutex)
975 crypto_free_tfm(tfm);
976 if (rc)
977 (*packet_size) = 0;
978 return rc;
979}
980
981/**
982 * ecryptfs_generate_key_packet_set
983 * @dest: Virtual address from which to write the key record set
984 * @crypt_stat: The cryptographic context from which the
985 * authentication tokens will be retrieved
986 * @ecryptfs_dentry: The dentry, used to retrieve the mount crypt stat
987 * for the global parameters
988 * @len: The amount written
989 * @max: The maximum amount of data allowed to be written
990 *
991 * Generates a key packet set and writes it to the virtual address
992 * passed in.
993 *
994 * Returns zero on success; non-zero on error.
995 */
996int
997ecryptfs_generate_key_packet_set(char *dest_base,
998 struct ecryptfs_crypt_stat *crypt_stat,
999 struct dentry *ecryptfs_dentry, size_t *len,
1000 size_t max)
1001{
1002 int rc = 0;
1003 struct ecryptfs_auth_tok *auth_tok;
1004 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
1005 &ecryptfs_superblock_to_private(
1006 ecryptfs_dentry->d_sb)->mount_crypt_stat;
1007 size_t written;
1008 struct ecryptfs_key_record key_rec;
1009
1010 (*len) = 0;
1011 if (mount_crypt_stat->global_auth_tok) {
1012 auth_tok = mount_crypt_stat->global_auth_tok;
1013 if (auth_tok->token_type == ECRYPTFS_PASSWORD) {
1014 rc = write_tag_3_packet((dest_base + (*len)),
1015 max, auth_tok,
1016 crypt_stat, &key_rec,
1017 &written);
1018 if (rc) {
1019 ecryptfs_printk(KERN_WARNING, "Error "
1020 "writing tag 3 packet\n");
1021 goto out;
1022 }
1023 (*len) += written;
1024 /* Write auth tok signature packet */
1025 rc = write_tag_11_packet(
1026 (dest_base + (*len)),
1027 (max - (*len)),
1028 key_rec.sig, ECRYPTFS_SIG_SIZE, &written);
1029 if (rc) {
1030 ecryptfs_printk(KERN_ERR, "Error writing "
1031 "auth tok signature packet\n");
1032 goto out;
1033 }
1034 (*len) += written;
1035 } else {
1036 ecryptfs_printk(KERN_WARNING, "Unsupported "
1037 "authentication token type\n");
1038 rc = -EINVAL;
1039 goto out;
1040 }
1041 if (rc) {
1042 ecryptfs_printk(KERN_WARNING, "Error writing "
1043 "authentication token packet with sig "
1044 "= [%s]\n",
1045 mount_crypt_stat->global_auth_tok_sig);
1046 rc = -EIO;
1047 goto out;
1048 }
1049 } else
1050 BUG();
1051 if (likely((max - (*len)) > 0)) {
1052 dest_base[(*len)] = 0x00;
1053 } else {
1054 ecryptfs_printk(KERN_ERR, "Error writing boundary byte\n");
1055 rc = -EIO;
1056 }
1057out:
1058 if (rc)
1059 (*len) = 0;
1060 return rc;
1061}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
new file mode 100644
index 000000000000..7a11b8ae6644
--- /dev/null
+++ b/fs/ecryptfs/main.c
@@ -0,0 +1,831 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2003 Erez Zadok
5 * Copyright (C) 2001-2003 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
8 * Michael C. Thompson <mcthomps@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#include <linux/dcache.h>
27#include <linux/file.h>
28#include <linux/module.h>
29#include <linux/namei.h>
30#include <linux/skbuff.h>
31#include <linux/crypto.h>
32#include <linux/netlink.h>
33#include <linux/mount.h>
34#include <linux/dcache.h>
35#include <linux/pagemap.h>
36#include <linux/key.h>
37#include <linux/parser.h>
38#include "ecryptfs_kernel.h"
39
40/**
41 * Module parameter that defines the ecryptfs_verbosity level.
42 */
43int ecryptfs_verbosity = 0;
44
45module_param(ecryptfs_verbosity, int, 0);
46MODULE_PARM_DESC(ecryptfs_verbosity,
47 "Initial verbosity level (0 or 1; defaults to "
48 "0, which is Quiet)");
49
50void __ecryptfs_printk(const char *fmt, ...)
51{
52 va_list args;
53 va_start(args, fmt);
54 if (fmt[1] == '7') { /* KERN_DEBUG */
55 if (ecryptfs_verbosity >= 1)
56 vprintk(fmt, args);
57 } else
58 vprintk(fmt, args);
59 va_end(args);
60}
61
62/**
63 * ecryptfs_interpose
64 * @lower_dentry: Existing dentry in the lower filesystem
65 * @dentry: ecryptfs' dentry
66 * @sb: ecryptfs's super_block
67 * @flag: If set to true, then d_add is called, else d_instantiate is called
68 *
69 * Interposes upper and lower dentries.
70 *
71 * Returns zero on success; non-zero otherwise
72 */
73int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
74 struct super_block *sb, int flag)
75{
76 struct inode *lower_inode;
77 struct inode *inode;
78 int rc = 0;
79
80 lower_inode = lower_dentry->d_inode;
81 if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) {
82 rc = -EXDEV;
83 goto out;
84 }
85 if (!igrab(lower_inode)) {
86 rc = -ESTALE;
87 goto out;
88 }
89 inode = iget5_locked(sb, (unsigned long)lower_inode,
90 ecryptfs_inode_test, ecryptfs_inode_set,
91 lower_inode);
92 if (!inode) {
93 rc = -EACCES;
94 iput(lower_inode);
95 goto out;
96 }
97 if (inode->i_state & I_NEW)
98 unlock_new_inode(inode);
99 else
100 iput(lower_inode);
101 if (S_ISLNK(lower_inode->i_mode))
102 inode->i_op = &ecryptfs_symlink_iops;
103 else if (S_ISDIR(lower_inode->i_mode))
104 inode->i_op = &ecryptfs_dir_iops;
105 if (S_ISDIR(lower_inode->i_mode))
106 inode->i_fop = &ecryptfs_dir_fops;
107 /* TODO: Is there a better way to identify if the inode is
108 * special? */
109 if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) ||
110 S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode))
111 init_special_inode(inode, lower_inode->i_mode,
112 lower_inode->i_rdev);
113 dentry->d_op = &ecryptfs_dops;
114 if (flag)
115 d_add(dentry, inode);
116 else
117 d_instantiate(dentry, inode);
118 ecryptfs_copy_attr_all(inode, lower_inode);
119 /* This size will be overwritten for real files w/ headers and
120 * other metadata */
121 ecryptfs_copy_inode_size(inode, lower_inode);
122out:
123 return rc;
124}
125
126enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_debug,
127 ecryptfs_opt_ecryptfs_debug, ecryptfs_opt_cipher,
128 ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes,
129 ecryptfs_opt_passthrough, ecryptfs_opt_err };
130
131static match_table_t tokens = {
132 {ecryptfs_opt_sig, "sig=%s"},
133 {ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
134 {ecryptfs_opt_debug, "debug=%u"},
135 {ecryptfs_opt_ecryptfs_debug, "ecryptfs_debug=%u"},
136 {ecryptfs_opt_cipher, "cipher=%s"},
137 {ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
138 {ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
139 {ecryptfs_opt_passthrough, "ecryptfs_passthrough"},
140 {ecryptfs_opt_err, NULL}
141};
142
143/**
144 * ecryptfs_verify_version
145 * @version: The version number to confirm
146 *
147 * Returns zero on good version; non-zero otherwise
148 */
149static int ecryptfs_verify_version(u16 version)
150{
151 int rc = 0;
152 unsigned char major;
153 unsigned char minor;
154
155 major = ((version >> 8) & 0xFF);
156 minor = (version & 0xFF);
157 if (major != ECRYPTFS_VERSION_MAJOR) {
158 ecryptfs_printk(KERN_ERR, "Major version number mismatch. "
159 "Expected [%d]; got [%d]\n",
160 ECRYPTFS_VERSION_MAJOR, major);
161 rc = -EINVAL;
162 goto out;
163 }
164 if (minor != ECRYPTFS_VERSION_MINOR) {
165 ecryptfs_printk(KERN_ERR, "Minor version number mismatch. "
166 "Expected [%d]; got [%d]\n",
167 ECRYPTFS_VERSION_MINOR, minor);
168 rc = -EINVAL;
169 goto out;
170 }
171out:
172 return rc;
173}
174
175/**
176 * ecryptfs_parse_options
177 * @sb: The ecryptfs super block
178 * @options: The options pased to the kernel
179 *
180 * Parse mount options:
181 * debug=N - ecryptfs_verbosity level for debug output
182 * sig=XXX - description(signature) of the key to use
183 *
184 * Returns the dentry object of the lower-level (lower/interposed)
185 * directory; We want to mount our stackable file system on top of
186 * that lower directory.
187 *
188 * The signature of the key to use must be the description of a key
189 * already in the keyring. Mounting will fail if the key can not be
190 * found.
191 *
192 * Returns zero on success; non-zero on error
193 */
194static int ecryptfs_parse_options(struct super_block *sb, char *options)
195{
196 char *p;
197 int rc = 0;
198 int sig_set = 0;
199 int cipher_name_set = 0;
200 int cipher_key_bytes;
201 int cipher_key_bytes_set = 0;
202 struct key *auth_tok_key = NULL;
203 struct ecryptfs_auth_tok *auth_tok = NULL;
204 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
205 &ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
206 substring_t args[MAX_OPT_ARGS];
207 int token;
208 char *sig_src;
209 char *sig_dst;
210 char *debug_src;
211 char *cipher_name_dst;
212 char *cipher_name_src;
213 char *cipher_key_bytes_src;
214 struct crypto_tfm *tmp_tfm;
215 int cipher_name_len;
216
217 if (!options) {
218 rc = -EINVAL;
219 goto out;
220 }
221 while ((p = strsep(&options, ",")) != NULL) {
222 if (!*p)
223 continue;
224 token = match_token(p, tokens, args);
225 switch (token) {
226 case ecryptfs_opt_sig:
227 case ecryptfs_opt_ecryptfs_sig:
228 sig_src = args[0].from;
229 sig_dst =
230 mount_crypt_stat->global_auth_tok_sig;
231 memcpy(sig_dst, sig_src, ECRYPTFS_SIG_SIZE_HEX);
232 sig_dst[ECRYPTFS_SIG_SIZE_HEX] = '\0';
233 ecryptfs_printk(KERN_DEBUG,
234 "The mount_crypt_stat "
235 "global_auth_tok_sig set to: "
236 "[%s]\n", sig_dst);
237 sig_set = 1;
238 break;
239 case ecryptfs_opt_debug:
240 case ecryptfs_opt_ecryptfs_debug:
241 debug_src = args[0].from;
242 ecryptfs_verbosity =
243 (int)simple_strtol(debug_src, &debug_src,
244 0);
245 ecryptfs_printk(KERN_DEBUG,
246 "Verbosity set to [%d]" "\n",
247 ecryptfs_verbosity);
248 break;
249 case ecryptfs_opt_cipher:
250 case ecryptfs_opt_ecryptfs_cipher:
251 cipher_name_src = args[0].from;
252 cipher_name_dst =
253 mount_crypt_stat->
254 global_default_cipher_name;
255 strncpy(cipher_name_dst, cipher_name_src,
256 ECRYPTFS_MAX_CIPHER_NAME_SIZE);
257 ecryptfs_printk(KERN_DEBUG,
258 "The mount_crypt_stat "
259 "global_default_cipher_name set to: "
260 "[%s]\n", cipher_name_dst);
261 cipher_name_set = 1;
262 break;
263 case ecryptfs_opt_ecryptfs_key_bytes:
264 cipher_key_bytes_src = args[0].from;
265 cipher_key_bytes =
266 (int)simple_strtol(cipher_key_bytes_src,
267 &cipher_key_bytes_src, 0);
268 mount_crypt_stat->global_default_cipher_key_size =
269 cipher_key_bytes;
270 ecryptfs_printk(KERN_DEBUG,
271 "The mount_crypt_stat "
272 "global_default_cipher_key_size "
273 "set to: [%d]\n", mount_crypt_stat->
274 global_default_cipher_key_size);
275 cipher_key_bytes_set = 1;
276 break;
277 case ecryptfs_opt_passthrough:
278 mount_crypt_stat->flags |=
279 ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED;
280 break;
281 case ecryptfs_opt_err:
282 default:
283 ecryptfs_printk(KERN_WARNING,
284 "eCryptfs: unrecognized option '%s'\n",
285 p);
286 }
287 }
288 /* Do not support lack of mount-wide signature in 0.1
289 * release */
290 if (!sig_set) {
291 rc = -EINVAL;
292 ecryptfs_printk(KERN_ERR, "You must supply a valid "
293 "passphrase auth tok signature as a mount "
294 "parameter; see the eCryptfs README\n");
295 goto out;
296 }
297 if (!cipher_name_set) {
298 cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER);
299 if (unlikely(cipher_name_len
300 >= ECRYPTFS_MAX_CIPHER_NAME_SIZE)) {
301 rc = -EINVAL;
302 BUG();
303 goto out;
304 }
305 memcpy(mount_crypt_stat->global_default_cipher_name,
306 ECRYPTFS_DEFAULT_CIPHER, cipher_name_len);
307 mount_crypt_stat->global_default_cipher_name[cipher_name_len]
308 = '\0';
309 }
310 if (!cipher_key_bytes_set) {
311 mount_crypt_stat->global_default_cipher_key_size =
312 ECRYPTFS_DEFAULT_KEY_BYTES;
313 ecryptfs_printk(KERN_DEBUG, "Cipher key size was not "
314 "specified. Defaulting to [%d]\n",
315 mount_crypt_stat->
316 global_default_cipher_key_size);
317 }
318 rc = ecryptfs_process_cipher(
319 &tmp_tfm,
320 &mount_crypt_stat->global_key_tfm,
321 mount_crypt_stat->global_default_cipher_name,
322 mount_crypt_stat->global_default_cipher_key_size);
323 if (tmp_tfm)
324 crypto_free_tfm(tmp_tfm);
325 if (rc) {
326 printk(KERN_ERR "Error attempting to initialize cipher [%s] "
327 "with key size [%Zd] bytes; rc = [%d]\n",
328 mount_crypt_stat->global_default_cipher_name,
329 mount_crypt_stat->global_default_cipher_key_size, rc);
330 rc = -EINVAL;
331 goto out;
332 }
333 mutex_init(&mount_crypt_stat->global_key_tfm_mutex);
334 ecryptfs_printk(KERN_DEBUG, "Requesting the key with description: "
335 "[%s]\n", mount_crypt_stat->global_auth_tok_sig);
336 /* The reference to this key is held until umount is done The
337 * call to key_put is done in ecryptfs_put_super() */
338 auth_tok_key = request_key(&key_type_user,
339 mount_crypt_stat->global_auth_tok_sig,
340 NULL);
341 if (!auth_tok_key || IS_ERR(auth_tok_key)) {
342 ecryptfs_printk(KERN_ERR, "Could not find key with "
343 "description: [%s]\n",
344 mount_crypt_stat->global_auth_tok_sig);
345 process_request_key_err(PTR_ERR(auth_tok_key));
346 rc = -EINVAL;
347 goto out;
348 }
349 auth_tok = ecryptfs_get_key_payload_data(auth_tok_key);
350 if (ecryptfs_verify_version(auth_tok->version)) {
351 ecryptfs_printk(KERN_ERR, "Data structure version mismatch. "
352 "Userspace tools must match eCryptfs kernel "
353 "module with major version [%d] and minor "
354 "version [%d]\n", ECRYPTFS_VERSION_MAJOR,
355 ECRYPTFS_VERSION_MINOR);
356 rc = -EINVAL;
357 goto out;
358 }
359 if (auth_tok->token_type != ECRYPTFS_PASSWORD) {
360 ecryptfs_printk(KERN_ERR, "Invalid auth_tok structure "
361 "returned from key\n");
362 rc = -EINVAL;
363 goto out;
364 }
365 mount_crypt_stat->global_auth_tok_key = auth_tok_key;
366 mount_crypt_stat->global_auth_tok = auth_tok;
367out:
368 return rc;
369}
370
371struct kmem_cache *ecryptfs_sb_info_cache;
372
373/**
374 * ecryptfs_fill_super
375 * @sb: The ecryptfs super block
376 * @raw_data: The options passed to mount
377 * @silent: Not used but required by function prototype
378 *
379 * Sets up what we can of the sb, rest is done in ecryptfs_read_super
380 *
381 * Returns zero on success; non-zero otherwise
382 */
383static int
384ecryptfs_fill_super(struct super_block *sb, void *raw_data, int silent)
385{
386 int rc = 0;
387
388 /* Released in ecryptfs_put_super() */
389 ecryptfs_set_superblock_private(sb,
390 kmem_cache_alloc(ecryptfs_sb_info_cache,
391 SLAB_KERNEL));
392 if (!ecryptfs_superblock_to_private(sb)) {
393 ecryptfs_printk(KERN_WARNING, "Out of memory\n");
394 rc = -ENOMEM;
395 goto out;
396 }
397 memset(ecryptfs_superblock_to_private(sb), 0,
398 sizeof(struct ecryptfs_sb_info));
399 sb->s_op = &ecryptfs_sops;
400 /* Released through deactivate_super(sb) from get_sb_nodev */
401 sb->s_root = d_alloc(NULL, &(const struct qstr) {
402 .hash = 0,.name = "/",.len = 1});
403 if (!sb->s_root) {
404 ecryptfs_printk(KERN_ERR, "d_alloc failed\n");
405 rc = -ENOMEM;
406 goto out;
407 }
408 sb->s_root->d_op = &ecryptfs_dops;
409 sb->s_root->d_sb = sb;
410 sb->s_root->d_parent = sb->s_root;
411 /* Released in d_release when dput(sb->s_root) is called */
412 /* through deactivate_super(sb) from get_sb_nodev() */
413 ecryptfs_set_dentry_private(sb->s_root,
414 kmem_cache_alloc(ecryptfs_dentry_info_cache,
415 SLAB_KERNEL));
416 if (!ecryptfs_dentry_to_private(sb->s_root)) {
417 ecryptfs_printk(KERN_ERR,
418 "dentry_info_cache alloc failed\n");
419 rc = -ENOMEM;
420 goto out;
421 }
422 memset(ecryptfs_dentry_to_private(sb->s_root), 0,
423 sizeof(struct ecryptfs_dentry_info));
424 rc = 0;
425out:
426 /* Should be able to rely on deactivate_super called from
427 * get_sb_nodev */
428 return rc;
429}
430
431/**
432 * ecryptfs_read_super
433 * @sb: The ecryptfs super block
434 * @dev_name: The path to mount over
435 *
436 * Read the super block of the lower filesystem, and use
437 * ecryptfs_interpose to create our initial inode and super block
438 * struct.
439 */
440static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
441{
442 int rc;
443 struct nameidata nd;
444 struct dentry *lower_root;
445 struct vfsmount *lower_mnt;
446
447 memset(&nd, 0, sizeof(struct nameidata));
448 rc = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
449 if (rc) {
450 ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
451 goto out_free;
452 }
453 lower_root = nd.dentry;
454 if (!lower_root->d_inode) {
455 ecryptfs_printk(KERN_WARNING,
456 "No directory to interpose on\n");
457 rc = -ENOENT;
458 goto out_free;
459 }
460 lower_mnt = nd.mnt;
461 ecryptfs_set_superblock_lower(sb, lower_root->d_sb);
462 sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
463 ecryptfs_set_dentry_lower(sb->s_root, lower_root);
464 ecryptfs_set_dentry_lower_mnt(sb->s_root, lower_mnt);
465 if ((rc = ecryptfs_interpose(lower_root, sb->s_root, sb, 0)))
466 goto out_free;
467 rc = 0;
468 goto out;
469out_free:
470 path_release(&nd);
471out:
472 return rc;
473}
474
475/**
476 * ecryptfs_get_sb
477 * @fs_type
478 * @flags
479 * @dev_name: The path to mount over
480 * @raw_data: The options passed into the kernel
481 *
482 * The whole ecryptfs_get_sb process is broken into 4 functions:
483 * ecryptfs_parse_options(): handle options passed to ecryptfs, if any
484 * ecryptfs_fill_super(): used by get_sb_nodev, fills out the super_block
485 * with as much information as it can before needing
486 * the lower filesystem.
487 * ecryptfs_read_super(): this accesses the lower filesystem and uses
488 * ecryptfs_interpolate to perform most of the linking
489 * ecryptfs_interpolate(): links the lower filesystem into ecryptfs
490 */
491static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
492 const char *dev_name, void *raw_data,
493 struct vfsmount *mnt)
494{
495 int rc;
496 struct super_block *sb;
497
498 rc = get_sb_nodev(fs_type, flags, raw_data, ecryptfs_fill_super, mnt);
499 if (rc < 0) {
500 printk(KERN_ERR "Getting sb failed; rc = [%d]\n", rc);
501 goto out;
502 }
503 sb = mnt->mnt_sb;
504 rc = ecryptfs_parse_options(sb, raw_data);
505 if (rc) {
506 printk(KERN_ERR "Error parsing options; rc = [%d]\n", rc);
507 goto out_abort;
508 }
509 rc = ecryptfs_read_super(sb, dev_name);
510 if (rc) {
511 printk(KERN_ERR "Reading sb failed; rc = [%d]\n", rc);
512 goto out_abort;
513 }
514 goto out;
515out_abort:
516 dput(sb->s_root);
517 up_write(&sb->s_umount);
518 deactivate_super(sb);
519out:
520 return rc;
521}
522
523/**
524 * ecryptfs_kill_block_super
525 * @sb: The ecryptfs super block
526 *
527 * Used to bring the superblock down and free the private data.
528 * Private data is free'd in ecryptfs_put_super()
529 */
530static void ecryptfs_kill_block_super(struct super_block *sb)
531{
532 generic_shutdown_super(sb);
533}
534
535static struct file_system_type ecryptfs_fs_type = {
536 .owner = THIS_MODULE,
537 .name = "ecryptfs",
538 .get_sb = ecryptfs_get_sb,
539 .kill_sb = ecryptfs_kill_block_super,
540 .fs_flags = 0
541};
542
543/**
544 * inode_info_init_once
545 *
546 * Initializes the ecryptfs_inode_info_cache when it is created
547 */
548static void
549inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
550{
551 struct ecryptfs_inode_info *ei = (struct ecryptfs_inode_info *)vptr;
552
553 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
554 SLAB_CTOR_CONSTRUCTOR)
555 inode_init_once(&ei->vfs_inode);
556}
557
558static struct ecryptfs_cache_info {
559 kmem_cache_t **cache;
560 const char *name;
561 size_t size;
562 void (*ctor)(void*, struct kmem_cache *, unsigned long);
563} ecryptfs_cache_infos[] = {
564 {
565 .cache = &ecryptfs_auth_tok_list_item_cache,
566 .name = "ecryptfs_auth_tok_list_item",
567 .size = sizeof(struct ecryptfs_auth_tok_list_item),
568 },
569 {
570 .cache = &ecryptfs_file_info_cache,
571 .name = "ecryptfs_file_cache",
572 .size = sizeof(struct ecryptfs_file_info),
573 },
574 {
575 .cache = &ecryptfs_dentry_info_cache,
576 .name = "ecryptfs_dentry_info_cache",
577 .size = sizeof(struct ecryptfs_dentry_info),
578 },
579 {
580 .cache = &ecryptfs_inode_info_cache,
581 .name = "ecryptfs_inode_cache",
582 .size = sizeof(struct ecryptfs_inode_info),
583 .ctor = inode_info_init_once,
584 },
585 {
586 .cache = &ecryptfs_sb_info_cache,
587 .name = "ecryptfs_sb_cache",
588 .size = sizeof(struct ecryptfs_sb_info),
589 },
590 {
591 .cache = &ecryptfs_header_cache_0,
592 .name = "ecryptfs_headers_0",
593 .size = PAGE_CACHE_SIZE,
594 },
595 {
596 .cache = &ecryptfs_header_cache_1,
597 .name = "ecryptfs_headers_1",
598 .size = PAGE_CACHE_SIZE,
599 },
600 {
601 .cache = &ecryptfs_header_cache_2,
602 .name = "ecryptfs_headers_2",
603 .size = PAGE_CACHE_SIZE,
604 },
605 {
606 .cache = &ecryptfs_lower_page_cache,
607 .name = "ecryptfs_lower_page_cache",
608 .size = PAGE_CACHE_SIZE,
609 },
610};
611
612static void ecryptfs_free_kmem_caches(void)
613{
614 int i;
615
616 for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
617 struct ecryptfs_cache_info *info;
618
619 info = &ecryptfs_cache_infos[i];
620 if (*(info->cache))
621 kmem_cache_destroy(*(info->cache));
622 }
623}
624
625/**
626 * ecryptfs_init_kmem_caches
627 *
628 * Returns zero on success; non-zero otherwise
629 */
630static int ecryptfs_init_kmem_caches(void)
631{
632 int i;
633
634 for (i = 0; i < ARRAY_SIZE(ecryptfs_cache_infos); i++) {
635 struct ecryptfs_cache_info *info;
636
637 info = &ecryptfs_cache_infos[i];
638 *(info->cache) = kmem_cache_create(info->name, info->size,
639 0, SLAB_HWCACHE_ALIGN, info->ctor, NULL);
640 if (!*(info->cache)) {
641 ecryptfs_free_kmem_caches();
642 ecryptfs_printk(KERN_WARNING, "%s: "
643 "kmem_cache_create failed\n",
644 info->name);
645 return -ENOMEM;
646 }
647 }
648 return 0;
649}
650
651struct ecryptfs_obj {
652 char *name;
653 struct list_head slot_list;
654 struct kobject kobj;
655};
656
657struct ecryptfs_attribute {
658 struct attribute attr;
659 ssize_t(*show) (struct ecryptfs_obj *, char *);
660 ssize_t(*store) (struct ecryptfs_obj *, const char *, size_t);
661};
662
663static ssize_t
664ecryptfs_attr_store(struct kobject *kobj,
665 struct attribute *attr, const char *buf, size_t len)
666{
667 struct ecryptfs_obj *obj = container_of(kobj, struct ecryptfs_obj,
668 kobj);
669 struct ecryptfs_attribute *attribute =
670 container_of(attr, struct ecryptfs_attribute, attr);
671
672 return (attribute->store ? attribute->store(obj, buf, len) : 0);
673}
674
675static ssize_t
676ecryptfs_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
677{
678 struct ecryptfs_obj *obj = container_of(kobj, struct ecryptfs_obj,
679 kobj);
680 struct ecryptfs_attribute *attribute =
681 container_of(attr, struct ecryptfs_attribute, attr);
682
683 return (attribute->show ? attribute->show(obj, buf) : 0);
684}
685
686static struct sysfs_ops ecryptfs_sysfs_ops = {
687 .show = ecryptfs_attr_show,
688 .store = ecryptfs_attr_store
689};
690
691static struct kobj_type ecryptfs_ktype = {
692 .sysfs_ops = &ecryptfs_sysfs_ops
693};
694
695static decl_subsys(ecryptfs, &ecryptfs_ktype, NULL);
696
697static ssize_t version_show(struct ecryptfs_obj *obj, char *buff)
698{
699 return snprintf(buff, PAGE_SIZE, "%d\n", ECRYPTFS_VERSIONING_MASK);
700}
701
702static struct ecryptfs_attribute sysfs_attr_version = __ATTR_RO(version);
703
704struct ecryptfs_version_str_map_elem {
705 u32 flag;
706 char *str;
707} ecryptfs_version_str_map[] = {
708 {ECRYPTFS_VERSIONING_PASSPHRASE, "passphrase"},
709 {ECRYPTFS_VERSIONING_PUBKEY, "pubkey"},
710 {ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH, "plaintext passthrough"},
711 {ECRYPTFS_VERSIONING_POLICY, "policy"}
712};
713
714static ssize_t version_str_show(struct ecryptfs_obj *obj, char *buff)
715{
716 int i;
717 int remaining = PAGE_SIZE;
718 int total_written = 0;
719
720 buff[0] = '\0';
721 for (i = 0; i < ARRAY_SIZE(ecryptfs_version_str_map); i++) {
722 int entry_size;
723
724 if (!(ECRYPTFS_VERSIONING_MASK
725 & ecryptfs_version_str_map[i].flag))
726 continue;
727 entry_size = strlen(ecryptfs_version_str_map[i].str);
728 if ((entry_size + 2) > remaining)
729 goto out;
730 memcpy(buff, ecryptfs_version_str_map[i].str, entry_size);
731 buff[entry_size++] = '\n';
732 buff[entry_size] = '\0';
733 buff += entry_size;
734 total_written += entry_size;
735 remaining -= entry_size;
736 }
737out:
738 return total_written;
739}
740
741static struct ecryptfs_attribute sysfs_attr_version_str = __ATTR_RO(version_str);
742
743static int do_sysfs_registration(void)
744{
745 int rc;
746
747 if ((rc = subsystem_register(&ecryptfs_subsys))) {
748 printk(KERN_ERR
749 "Unable to register ecryptfs sysfs subsystem\n");
750 goto out;
751 }
752 rc = sysfs_create_file(&ecryptfs_subsys.kset.kobj,
753 &sysfs_attr_version.attr);
754 if (rc) {
755 printk(KERN_ERR
756 "Unable to create ecryptfs version attribute\n");
757 subsystem_unregister(&ecryptfs_subsys);
758 goto out;
759 }
760 rc = sysfs_create_file(&ecryptfs_subsys.kset.kobj,
761 &sysfs_attr_version_str.attr);
762 if (rc) {
763 printk(KERN_ERR
764 "Unable to create ecryptfs version_str attribute\n");
765 sysfs_remove_file(&ecryptfs_subsys.kset.kobj,
766 &sysfs_attr_version.attr);
767 subsystem_unregister(&ecryptfs_subsys);
768 goto out;
769 }
770out:
771 return rc;
772}
773
774static int __init ecryptfs_init(void)
775{
776 int rc;
777
778 if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) {
779 rc = -EINVAL;
780 ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is "
781 "larger than the host's page size, and so "
782 "eCryptfs cannot run on this system. The "
783 "default eCryptfs extent size is [%d] bytes; "
784 "the page size is [%d] bytes.\n",
785 ECRYPTFS_DEFAULT_EXTENT_SIZE, PAGE_CACHE_SIZE);
786 goto out;
787 }
788 rc = ecryptfs_init_kmem_caches();
789 if (rc) {
790 printk(KERN_ERR
791 "Failed to allocate one or more kmem_cache objects\n");
792 goto out;
793 }
794 rc = register_filesystem(&ecryptfs_fs_type);
795 if (rc) {
796 printk(KERN_ERR "Failed to register filesystem\n");
797 ecryptfs_free_kmem_caches();
798 goto out;
799 }
800 kset_set_kset_s(&ecryptfs_subsys, fs_subsys);
801 sysfs_attr_version.attr.owner = THIS_MODULE;
802 sysfs_attr_version_str.attr.owner = THIS_MODULE;
803 rc = do_sysfs_registration();
804 if (rc) {
805 printk(KERN_ERR "sysfs registration failed\n");
806 unregister_filesystem(&ecryptfs_fs_type);
807 ecryptfs_free_kmem_caches();
808 goto out;
809 }
810out:
811 return rc;
812}
813
814static void __exit ecryptfs_exit(void)
815{
816 sysfs_remove_file(&ecryptfs_subsys.kset.kobj,
817 &sysfs_attr_version.attr);
818 sysfs_remove_file(&ecryptfs_subsys.kset.kobj,
819 &sysfs_attr_version_str.attr);
820 subsystem_unregister(&ecryptfs_subsys);
821 unregister_filesystem(&ecryptfs_fs_type);
822 ecryptfs_free_kmem_caches();
823}
824
825MODULE_AUTHOR("Michael A. Halcrow <mhalcrow@us.ibm.com>");
826MODULE_DESCRIPTION("eCryptfs");
827
828MODULE_LICENSE("GPL");
829
830module_init(ecryptfs_init)
831module_exit(ecryptfs_exit)
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
new file mode 100644
index 000000000000..924dd90a4cf5
--- /dev/null
+++ b/fs/ecryptfs/mmap.c
@@ -0,0 +1,788 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 * This is where eCryptfs coordinates the symmetric encryption and
4 * decryption of the file data as it passes between the lower
5 * encrypted file and the upper decrypted file.
6 *
7 * Copyright (C) 1997-2003 Erez Zadok
8 * Copyright (C) 2001-2003 Stony Brook University
9 * Copyright (C) 2004-2006 International Business Machines Corp.
10 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; either version 2 of the
15 * License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
25 * 02111-1307, USA.
26 */
27
28#include <linux/pagemap.h>
29#include <linux/writeback.h>
30#include <linux/page-flags.h>
31#include <linux/mount.h>
32#include <linux/file.h>
33#include <linux/crypto.h>
34#include <linux/scatterlist.h>
35#include "ecryptfs_kernel.h"
36
37struct kmem_cache *ecryptfs_lower_page_cache;
38
39/**
40 * ecryptfs_get1page
41 *
42 * Get one page from cache or lower f/s, return error otherwise.
43 *
44 * Returns unlocked and up-to-date page (if ok), with increased
45 * refcnt.
46 */
47static struct page *ecryptfs_get1page(struct file *file, int index)
48{
49 struct page *page;
50 struct dentry *dentry;
51 struct inode *inode;
52 struct address_space *mapping;
53
54 dentry = file->f_dentry;
55 inode = dentry->d_inode;
56 mapping = inode->i_mapping;
57 page = read_cache_page(mapping, index,
58 (filler_t *)mapping->a_ops->readpage,
59 (void *)file);
60 if (IS_ERR(page))
61 goto out;
62 wait_on_page_locked(page);
63out:
64 return page;
65}
66
67static
68int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros);
69
70/**
71 * ecryptfs_fill_zeros
72 * @file: The ecryptfs file
73 * @new_length: The new length of the data in the underlying file;
74 * everything between the prior end of the file and the
75 * new end of the file will be filled with zero's.
76 * new_length must be greater than current length
77 *
78 * Function for handling lseek-ing past the end of the file.
79 *
80 * This function does not support shrinking, only growing a file.
81 *
82 * Returns zero on success; non-zero otherwise.
83 */
84int ecryptfs_fill_zeros(struct file *file, loff_t new_length)
85{
86 int rc = 0;
87 struct dentry *dentry = file->f_dentry;
88 struct inode *inode = dentry->d_inode;
89 pgoff_t old_end_page_index = 0;
90 pgoff_t index = old_end_page_index;
91 int old_end_pos_in_page = -1;
92 pgoff_t new_end_page_index;
93 int new_end_pos_in_page;
94 loff_t cur_length = i_size_read(inode);
95
96 if (cur_length != 0) {
97 index = old_end_page_index =
98 ((cur_length - 1) >> PAGE_CACHE_SHIFT);
99 old_end_pos_in_page = ((cur_length - 1) & ~PAGE_CACHE_MASK);
100 }
101 new_end_page_index = ((new_length - 1) >> PAGE_CACHE_SHIFT);
102 new_end_pos_in_page = ((new_length - 1) & ~PAGE_CACHE_MASK);
103 ecryptfs_printk(KERN_DEBUG, "old_end_page_index = [0x%.16x]; "
104 "old_end_pos_in_page = [%d]; "
105 "new_end_page_index = [0x%.16x]; "
106 "new_end_pos_in_page = [%d]\n",
107 old_end_page_index, old_end_pos_in_page,
108 new_end_page_index, new_end_pos_in_page);
109 if (old_end_page_index == new_end_page_index) {
110 /* Start and end are in the same page; we just need to
111 * set a portion of the existing page to zero's */
112 rc = write_zeros(file, index, (old_end_pos_in_page + 1),
113 (new_end_pos_in_page - old_end_pos_in_page));
114 if (rc)
115 ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
116 "index=[0x%.16x], "
117 "old_end_pos_in_page=[d], "
118 "(PAGE_CACHE_SIZE - new_end_pos_in_page"
119 "=[%d]"
120 ")=[d]) returned [%d]\n", file, index,
121 old_end_pos_in_page,
122 new_end_pos_in_page,
123 (PAGE_CACHE_SIZE - new_end_pos_in_page),
124 rc);
125 goto out;
126 }
127 /* Fill the remainder of the previous last page with zeros */
128 rc = write_zeros(file, index, (old_end_pos_in_page + 1),
129 ((PAGE_CACHE_SIZE - 1) - old_end_pos_in_page));
130 if (rc) {
131 ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
132 "index=[0x%.16x], old_end_pos_in_page=[d], "
133 "(PAGE_CACHE_SIZE - old_end_pos_in_page)=[d]) "
134 "returned [%d]\n", file, index,
135 old_end_pos_in_page,
136 (PAGE_CACHE_SIZE - old_end_pos_in_page), rc);
137 goto out;
138 }
139 index++;
140 while (index < new_end_page_index) {
141 /* Fill all intermediate pages with zeros */
142 rc = write_zeros(file, index, 0, PAGE_CACHE_SIZE);
143 if (rc) {
144 ecryptfs_printk(KERN_ERR, "write_zeros(file=[%p], "
145 "index=[0x%.16x], "
146 "old_end_pos_in_page=[d], "
147 "(PAGE_CACHE_SIZE - new_end_pos_in_page"
148 "=[%d]"
149 ")=[d]) returned [%d]\n", file, index,
150 old_end_pos_in_page,
151 new_end_pos_in_page,
152 (PAGE_CACHE_SIZE - new_end_pos_in_page),
153 rc);
154 goto out;
155 }
156 index++;
157 }
158 /* Fill the portion at the beginning of the last new page with
159 * zero's */
160 rc = write_zeros(file, index, 0, (new_end_pos_in_page + 1));
161 if (rc) {
162 ecryptfs_printk(KERN_ERR, "write_zeros(file="
163 "[%p], index=[0x%.16x], 0, "
164 "new_end_pos_in_page=[%d]"
165 "returned [%d]\n", file, index,
166 new_end_pos_in_page, rc);
167 goto out;
168 }
169out:
170 return rc;
171}
172
173/**
174 * ecryptfs_writepage
175 * @page: Page that is locked before this call is made
176 *
177 * Returns zero on success; non-zero otherwise
178 */
179static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
180{
181 struct ecryptfs_page_crypt_context ctx;
182 int rc;
183
184 ctx.page = page;
185 ctx.mode = ECRYPTFS_WRITEPAGE_MODE;
186 ctx.param.wbc = wbc;
187 rc = ecryptfs_encrypt_page(&ctx);
188 if (rc) {
189 ecryptfs_printk(KERN_WARNING, "Error encrypting "
190 "page (upper index [0x%.16x])\n", page->index);
191 ClearPageUptodate(page);
192 goto out;
193 }
194 SetPageUptodate(page);
195 unlock_page(page);
196out:
197 return rc;
198}
199
200/**
201 * Reads the data from the lower file file at index lower_page_index
202 * and copies that data into page.
203 *
204 * @param page Page to fill
205 * @param lower_page_index Index of the page in the lower file to get
206 */
207int ecryptfs_do_readpage(struct file *file, struct page *page,
208 pgoff_t lower_page_index)
209{
210 int rc;
211 struct dentry *dentry;
212 struct file *lower_file;
213 struct dentry *lower_dentry;
214 struct inode *inode;
215 struct inode *lower_inode;
216 char *page_data;
217 struct page *lower_page = NULL;
218 char *lower_page_data;
219 const struct address_space_operations *lower_a_ops;
220
221 dentry = file->f_dentry;
222 lower_file = ecryptfs_file_to_lower(file);
223 lower_dentry = ecryptfs_dentry_to_lower(dentry);
224 inode = dentry->d_inode;
225 lower_inode = ecryptfs_inode_to_lower(inode);
226 lower_a_ops = lower_inode->i_mapping->a_ops;
227 lower_page = read_cache_page(lower_inode->i_mapping, lower_page_index,
228 (filler_t *)lower_a_ops->readpage,
229 (void *)lower_file);
230 if (IS_ERR(lower_page)) {
231 rc = PTR_ERR(lower_page);
232 lower_page = NULL;
233 ecryptfs_printk(KERN_ERR, "Error reading from page cache\n");
234 goto out;
235 }
236 wait_on_page_locked(lower_page);
237 page_data = (char *)kmap(page);
238 if (!page_data) {
239 rc = -ENOMEM;
240 ecryptfs_printk(KERN_ERR, "Error mapping page\n");
241 goto out;
242 }
243 lower_page_data = (char *)kmap(lower_page);
244 if (!lower_page_data) {
245 rc = -ENOMEM;
246 ecryptfs_printk(KERN_ERR, "Error mapping page\n");
247 kunmap(page);
248 goto out;
249 }
250 memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
251 kunmap(lower_page);
252 kunmap(page);
253 rc = 0;
254out:
255 if (likely(lower_page))
256 page_cache_release(lower_page);
257 if (rc == 0)
258 SetPageUptodate(page);
259 else
260 ClearPageUptodate(page);
261 return rc;
262}
263
264/**
265 * ecryptfs_readpage
266 * @file: This is an ecryptfs file
267 * @page: ecryptfs associated page to stick the read data into
268 *
269 * Read in a page, decrypting if necessary.
270 *
271 * Returns zero on success; non-zero on error.
272 */
273static int ecryptfs_readpage(struct file *file, struct page *page)
274{
275 int rc = 0;
276 struct ecryptfs_crypt_stat *crypt_stat;
277
278 BUG_ON(!(file && file->f_dentry && file->f_dentry->d_inode));
279 crypt_stat =
280 &ecryptfs_inode_to_private(file->f_dentry->d_inode)->crypt_stat;
281 if (!crypt_stat
282 || !ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_ENCRYPTED)
283 || ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_NEW_FILE)) {
284 ecryptfs_printk(KERN_DEBUG,
285 "Passing through unencrypted page\n");
286 rc = ecryptfs_do_readpage(file, page, page->index);
287 if (rc) {
288 ecryptfs_printk(KERN_ERR, "Error reading page; rc = "
289 "[%d]\n", rc);
290 goto out;
291 }
292 } else {
293 rc = ecryptfs_decrypt_page(file, page);
294 if (rc) {
295
296 ecryptfs_printk(KERN_ERR, "Error decrypting page; "
297 "rc = [%d]\n", rc);
298 goto out;
299 }
300 }
301 SetPageUptodate(page);
302out:
303 if (rc)
304 ClearPageUptodate(page);
305 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
306 page->index);
307 unlock_page(page);
308 return rc;
309}
310
311static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
312{
313 struct inode *inode = page->mapping->host;
314 int end_byte_in_page;
315 int rc = 0;
316 char *page_virt;
317
318 if ((i_size_read(inode) / PAGE_CACHE_SIZE) == page->index) {
319 end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
320 if (to > end_byte_in_page)
321 end_byte_in_page = to;
322 page_virt = kmap(page);
323 if (!page_virt) {
324 rc = -ENOMEM;
325 ecryptfs_printk(KERN_WARNING,
326 "Could not map page\n");
327 goto out;
328 }
329 memset((page_virt + end_byte_in_page), 0,
330 (PAGE_CACHE_SIZE - end_byte_in_page));
331 kunmap(page);
332 }
333out:
334 return rc;
335}
336
337static int ecryptfs_prepare_write(struct file *file, struct page *page,
338 unsigned from, unsigned to)
339{
340 int rc = 0;
341
342 kmap(page);
343 if (from == 0 && to == PAGE_CACHE_SIZE)
344 goto out; /* If we are writing a full page, it will be
345 up to date. */
346 if (!PageUptodate(page))
347 rc = ecryptfs_do_readpage(file, page, page->index);
348out:
349 return rc;
350}
351
352int ecryptfs_grab_and_map_lower_page(struct page **lower_page,
353 char **lower_virt,
354 struct inode *lower_inode,
355 unsigned long lower_page_index)
356{
357 int rc = 0;
358
359 (*lower_page) = grab_cache_page(lower_inode->i_mapping,
360 lower_page_index);
361 if (!(*lower_page)) {
362 ecryptfs_printk(KERN_ERR, "grab_cache_page for "
363 "lower_page_index = [0x%.16x] failed\n",
364 lower_page_index);
365 rc = -EINVAL;
366 goto out;
367 }
368 if (lower_virt)
369 (*lower_virt) = kmap((*lower_page));
370 else
371 kmap((*lower_page));
372out:
373 return rc;
374}
375
376int ecryptfs_writepage_and_release_lower_page(struct page *lower_page,
377 struct inode *lower_inode,
378 struct writeback_control *wbc)
379{
380 int rc = 0;
381
382 rc = lower_inode->i_mapping->a_ops->writepage(lower_page, wbc);
383 if (rc) {
384 ecryptfs_printk(KERN_ERR, "Error calling lower writepage(); "
385 "rc = [%d]\n", rc);
386 goto out;
387 }
388 lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
389 page_cache_release(lower_page);
390out:
391 return rc;
392}
393
394static void ecryptfs_unmap_and_release_lower_page(struct page *lower_page)
395{
396 kunmap(lower_page);
397 ecryptfs_printk(KERN_DEBUG, "Unlocking lower page with index = "
398 "[0x%.16x]\n", lower_page->index);
399 unlock_page(lower_page);
400 page_cache_release(lower_page);
401}
402
403/**
404 * ecryptfs_write_inode_size_to_header
405 *
406 * Writes the lower file size to the first 8 bytes of the header.
407 *
408 * Returns zero on success; non-zero on error.
409 */
410int
411ecryptfs_write_inode_size_to_header(struct file *lower_file,
412 struct inode *lower_inode,
413 struct inode *inode)
414{
415 int rc = 0;
416 struct page *header_page;
417 char *header_virt;
418 const struct address_space_operations *lower_a_ops;
419 u64 file_size;
420
421 rc = ecryptfs_grab_and_map_lower_page(&header_page, &header_virt,
422 lower_inode, 0);
423 if (rc) {
424 ecryptfs_printk(KERN_ERR, "grab_cache_page for header page "
425 "failed\n");
426 goto out;
427 }
428 lower_a_ops = lower_inode->i_mapping->a_ops;
429 rc = lower_a_ops->prepare_write(lower_file, header_page, 0, 8);
430 file_size = (u64)i_size_read(inode);
431 ecryptfs_printk(KERN_DEBUG, "Writing size: [0x%.16x]\n", file_size);
432 file_size = cpu_to_be64(file_size);
433 memcpy(header_virt, &file_size, sizeof(u64));
434 rc = lower_a_ops->commit_write(lower_file, header_page, 0, 8);
435 if (rc < 0)
436 ecryptfs_printk(KERN_ERR, "Error commiting header page "
437 "write\n");
438 ecryptfs_unmap_and_release_lower_page(header_page);
439 lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
440 mark_inode_dirty_sync(inode);
441out:
442 return rc;
443}
444
445int ecryptfs_get_lower_page(struct page **lower_page, struct inode *lower_inode,
446 struct file *lower_file,
447 unsigned long lower_page_index, int byte_offset,
448 int region_bytes)
449{
450 int rc = 0;
451
452 rc = ecryptfs_grab_and_map_lower_page(lower_page, NULL, lower_inode,
453 lower_page_index);
454 if (rc) {
455 ecryptfs_printk(KERN_ERR, "Error attempting to grab and map "
456 "lower page with index [0x%.16x]\n",
457 lower_page_index);
458 goto out;
459 }
460 rc = lower_inode->i_mapping->a_ops->prepare_write(lower_file,
461 (*lower_page),
462 byte_offset,
463 region_bytes);
464 if (rc) {
465 ecryptfs_printk(KERN_ERR, "prepare_write for "
466 "lower_page_index = [0x%.16x] failed; rc = "
467 "[%d]\n", lower_page_index, rc);
468 }
469out:
470 if (rc && (*lower_page)) {
471 ecryptfs_unmap_and_release_lower_page(*lower_page);
472 (*lower_page) = NULL;
473 }
474 return rc;
475}
476
477/**
478 * ecryptfs_commit_lower_page
479 *
480 * Returns zero on success; non-zero on error
481 */
482int
483ecryptfs_commit_lower_page(struct page *lower_page, struct inode *lower_inode,
484 struct file *lower_file, int byte_offset,
485 int region_size)
486{
487 int rc = 0;
488
489 rc = lower_inode->i_mapping->a_ops->commit_write(
490 lower_file, lower_page, byte_offset, region_size);
491 if (rc < 0) {
492 ecryptfs_printk(KERN_ERR,
493 "Error committing write; rc = [%d]\n", rc);
494 } else
495 rc = 0;
496 ecryptfs_unmap_and_release_lower_page(lower_page);
497 return rc;
498}
499
500/**
501 * ecryptfs_copy_page_to_lower
502 *
503 * Used for plaintext pass-through; no page index interpolation
504 * required.
505 */
506int ecryptfs_copy_page_to_lower(struct page *page, struct inode *lower_inode,
507 struct file *lower_file)
508{
509 int rc = 0;
510 struct page *lower_page;
511
512 rc = ecryptfs_get_lower_page(&lower_page, lower_inode, lower_file,
513 page->index, 0, PAGE_CACHE_SIZE);
514 if (rc) {
515 ecryptfs_printk(KERN_ERR, "Error attempting to get page "
516 "at index [0x%.16x]\n", page->index);
517 goto out;
518 }
519 /* TODO: aops */
520 memcpy((char *)page_address(lower_page), page_address(page),
521 PAGE_CACHE_SIZE);
522 rc = ecryptfs_commit_lower_page(lower_page, lower_inode, lower_file,
523 0, PAGE_CACHE_SIZE);
524 if (rc)
525 ecryptfs_printk(KERN_ERR, "Error attempting to commit page "
526 "at index [0x%.16x]\n", page->index);
527out:
528 return rc;
529}
530
531static int
532process_new_file(struct ecryptfs_crypt_stat *crypt_stat,
533 struct file *file, struct inode *inode)
534{
535 struct page *header_page;
536 const struct address_space_operations *lower_a_ops;
537 struct inode *lower_inode;
538 struct file *lower_file;
539 char *header_virt;
540 int rc = 0;
541 int current_header_page = 0;
542 int header_pages;
543 int more_header_data_to_be_written = 1;
544
545 lower_inode = ecryptfs_inode_to_lower(inode);
546 lower_file = ecryptfs_file_to_lower(file);
547 lower_a_ops = lower_inode->i_mapping->a_ops;
548 header_pages = ((crypt_stat->header_extent_size
549 * crypt_stat->num_header_extents_at_front)
550 / PAGE_CACHE_SIZE);
551 BUG_ON(header_pages < 1);
552 while (current_header_page < header_pages) {
553 rc = ecryptfs_grab_and_map_lower_page(&header_page,
554 &header_virt,
555 lower_inode,
556 current_header_page);
557 if (rc) {
558 ecryptfs_printk(KERN_ERR, "grab_cache_page for "
559 "header page [%d] failed; rc = [%d]\n",
560 current_header_page, rc);
561 goto out;
562 }
563 rc = lower_a_ops->prepare_write(lower_file, header_page, 0,
564 PAGE_CACHE_SIZE);
565 if (rc) {
566 ecryptfs_printk(KERN_ERR, "Error preparing to write "
567 "header page out; rc = [%d]\n", rc);
568 goto out;
569 }
570 memset(header_virt, 0, PAGE_CACHE_SIZE);
571 if (more_header_data_to_be_written) {
572 rc = ecryptfs_write_headers_virt(header_virt,
573 crypt_stat,
574 file->f_dentry);
575 if (rc) {
576 ecryptfs_printk(KERN_WARNING, "Error "
577 "generating header; rc = "
578 "[%d]\n", rc);
579 rc = -EIO;
580 memset(header_virt, 0, PAGE_CACHE_SIZE);
581 ecryptfs_unmap_and_release_lower_page(
582 header_page);
583 goto out;
584 }
585 if (current_header_page == 0)
586 memset(header_virt, 0, 8);
587 more_header_data_to_be_written = 0;
588 }
589 rc = lower_a_ops->commit_write(lower_file, header_page, 0,
590 PAGE_CACHE_SIZE);
591 ecryptfs_unmap_and_release_lower_page(header_page);
592 if (rc < 0) {
593 ecryptfs_printk(KERN_ERR,
594 "Error commiting header page write; "
595 "rc = [%d]\n", rc);
596 break;
597 }
598 current_header_page++;
599 }
600 if (rc >= 0) {
601 rc = 0;
602 ecryptfs_printk(KERN_DEBUG, "lower_inode->i_blocks = "
603 "[0x%.16x]\n", lower_inode->i_blocks);
604 i_size_write(inode, 0);
605 lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
606 mark_inode_dirty_sync(inode);
607 }
608 ecryptfs_printk(KERN_DEBUG, "Clearing ECRYPTFS_NEW_FILE flag in "
609 "crypt_stat at memory location [%p]\n", crypt_stat);
610 ECRYPTFS_CLEAR_FLAG(crypt_stat->flags, ECRYPTFS_NEW_FILE);
611out:
612 return rc;
613}
614
615/**
616 * ecryptfs_commit_write
617 * @file: The eCryptfs file object
618 * @page: The eCryptfs page
619 * @from: Ignored (we rotate the page IV on each write)
620 * @to: Ignored
621 *
622 * This is where we encrypt the data and pass the encrypted data to
623 * the lower filesystem. In OpenPGP-compatible mode, we operate on
624 * entire underlying packets.
625 */
626static int ecryptfs_commit_write(struct file *file, struct page *page,
627 unsigned from, unsigned to)
628{
629 struct ecryptfs_page_crypt_context ctx;
630 loff_t pos;
631 struct inode *inode;
632 struct inode *lower_inode;
633 struct file *lower_file;
634 struct ecryptfs_crypt_stat *crypt_stat;
635 int rc;
636
637 inode = page->mapping->host;
638 lower_inode = ecryptfs_inode_to_lower(inode);
639 lower_file = ecryptfs_file_to_lower(file);
640 mutex_lock(&lower_inode->i_mutex);
641 crypt_stat =
642 &ecryptfs_inode_to_private(file->f_dentry->d_inode)->crypt_stat;
643 if (ECRYPTFS_CHECK_FLAG(crypt_stat->flags, ECRYPTFS_NEW_FILE)) {
644 ecryptfs_printk(KERN_DEBUG, "ECRYPTFS_NEW_FILE flag set in "
645 "crypt_stat at memory location [%p]\n", crypt_stat);
646 rc = process_new_file(crypt_stat, file, inode);
647 if (rc) {
648 ecryptfs_printk(KERN_ERR, "Error processing new "
649 "file; rc = [%d]\n", rc);
650 goto out;
651 }
652 } else
653 ecryptfs_printk(KERN_DEBUG, "Not a new file\n");
654 ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page"
655 "(page w/ index = [0x%.16x], to = [%d])\n", page->index,
656 to);
657 rc = fill_zeros_to_end_of_page(page, to);
658 if (rc) {
659 ecryptfs_printk(KERN_WARNING, "Error attempting to fill "
660 "zeros in page with index = [0x%.16x]\n",
661 page->index);
662 goto out;
663 }
664 ctx.page = page;
665 ctx.mode = ECRYPTFS_PREPARE_COMMIT_MODE;
666 ctx.param.lower_file = lower_file;
667 rc = ecryptfs_encrypt_page(&ctx);
668 if (rc) {
669 ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper "
670 "index [0x%.16x])\n", page->index);
671 goto out;
672 }
673 rc = 0;
674 inode->i_blocks = lower_inode->i_blocks;
675 pos = (page->index << PAGE_CACHE_SHIFT) + to;
676 if (pos > i_size_read(inode)) {
677 i_size_write(inode, pos);
678 ecryptfs_printk(KERN_DEBUG, "Expanded file size to "
679 "[0x%.16x]\n", i_size_read(inode));
680 }
681 ecryptfs_write_inode_size_to_header(lower_file, lower_inode, inode);
682 lower_inode->i_mtime = lower_inode->i_ctime = CURRENT_TIME;
683 mark_inode_dirty_sync(inode);
684out:
685 kunmap(page); /* mapped in prior call (prepare_write) */
686 if (rc < 0)
687 ClearPageUptodate(page);
688 else
689 SetPageUptodate(page);
690 mutex_unlock(&lower_inode->i_mutex);
691 return rc;
692}
693
694/**
695 * write_zeros
696 * @file: The ecryptfs file
697 * @index: The index in which we are writing
698 * @start: The position after the last block of data
699 * @num_zeros: The number of zeros to write
700 *
701 * Write a specified number of zero's to a page.
702 *
703 * (start + num_zeros) must be less than or equal to PAGE_CACHE_SIZE
704 */
705static
706int write_zeros(struct file *file, pgoff_t index, int start, int num_zeros)
707{
708 int rc = 0;
709 struct page *tmp_page;
710
711 tmp_page = ecryptfs_get1page(file, index);
712 if (IS_ERR(tmp_page)) {
713 ecryptfs_printk(KERN_ERR, "Error getting page at index "
714 "[0x%.16x]\n", index);
715 rc = PTR_ERR(tmp_page);
716 goto out;
717 }
718 kmap(tmp_page);
719 rc = ecryptfs_prepare_write(file, tmp_page, start, start + num_zeros);
720 if (rc) {
721 ecryptfs_printk(KERN_ERR, "Error preparing to write zero's "
722 "to remainder of page at index [0x%.16x]\n",
723 index);
724 kunmap(tmp_page);
725 page_cache_release(tmp_page);
726 goto out;
727 }
728 memset(((char *)page_address(tmp_page) + start), 0, num_zeros);
729 rc = ecryptfs_commit_write(file, tmp_page, start, start + num_zeros);
730 if (rc < 0) {
731 ecryptfs_printk(KERN_ERR, "Error attempting to write zero's "
732 "to remainder of page at index [0x%.16x]\n",
733 index);
734 kunmap(tmp_page);
735 page_cache_release(tmp_page);
736 goto out;
737 }
738 rc = 0;
739 kunmap(tmp_page);
740 page_cache_release(tmp_page);
741out:
742 return rc;
743}
744
745static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block)
746{
747 int rc = 0;
748 struct inode *inode;
749 struct inode *lower_inode;
750
751 inode = (struct inode *)mapping->host;
752 lower_inode = ecryptfs_inode_to_lower(inode);
753 if (lower_inode->i_mapping->a_ops->bmap)
754 rc = lower_inode->i_mapping->a_ops->bmap(lower_inode->i_mapping,
755 block);
756 return rc;
757}
758
759static void ecryptfs_sync_page(struct page *page)
760{
761 struct inode *inode;
762 struct inode *lower_inode;
763 struct page *lower_page;
764
765 inode = page->mapping->host;
766 lower_inode = ecryptfs_inode_to_lower(inode);
767 /* NOTE: Recently swapped with grab_cache_page(), since
768 * sync_page() just makes sure that pending I/O gets done. */
769 lower_page = find_lock_page(lower_inode->i_mapping, page->index);
770 if (!lower_page) {
771 ecryptfs_printk(KERN_DEBUG, "find_lock_page failed\n");
772 return;
773 }
774 lower_page->mapping->a_ops->sync_page(lower_page);
775 ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n",
776 lower_page->index);
777 unlock_page(lower_page);
778 page_cache_release(lower_page);
779}
780
781struct address_space_operations ecryptfs_aops = {
782 .writepage = ecryptfs_writepage,
783 .readpage = ecryptfs_readpage,
784 .prepare_write = ecryptfs_prepare_write,
785 .commit_write = ecryptfs_commit_write,
786 .bmap = ecryptfs_bmap,
787 .sync_page = ecryptfs_sync_page,
788};
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
new file mode 100644
index 000000000000..c337c0410fb1
--- /dev/null
+++ b/fs/ecryptfs/super.c
@@ -0,0 +1,198 @@
1/**
2 * eCryptfs: Linux filesystem encryption layer
3 *
4 * Copyright (C) 1997-2003 Erez Zadok
5 * Copyright (C) 2001-2003 Stony Brook University
6 * Copyright (C) 2004-2006 International Business Machines Corp.
7 * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
8 * Michael C. Thompson <mcthomps@us.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
23 * 02111-1307, USA.
24 */
25
26#include <linux/fs.h>
27#include <linux/mount.h>
28#include <linux/key.h>
29#include <linux/seq_file.h>
30#include <linux/crypto.h>
31#include "ecryptfs_kernel.h"
32
33struct kmem_cache *ecryptfs_inode_info_cache;
34
35/**
36 * ecryptfs_alloc_inode - allocate an ecryptfs inode
37 * @sb: Pointer to the ecryptfs super block
38 *
39 * Called to bring an inode into existence.
40 *
41 * Only handle allocation, setting up structures should be done in
42 * ecryptfs_read_inode. This is because the kernel, between now and
43 * then, will 0 out the private data pointer.
44 *
45 * Returns a pointer to a newly allocated inode, NULL otherwise
46 */
47static struct inode *ecryptfs_alloc_inode(struct super_block *sb)
48{
49 struct ecryptfs_inode_info *ecryptfs_inode;
50 struct inode *inode = NULL;
51
52 ecryptfs_inode = kmem_cache_alloc(ecryptfs_inode_info_cache,
53 SLAB_KERNEL);
54 if (unlikely(!ecryptfs_inode))
55 goto out;
56 ecryptfs_init_crypt_stat(&ecryptfs_inode->crypt_stat);
57 inode = &ecryptfs_inode->vfs_inode;
58out:
59 return inode;
60}
61
62/**
63 * ecryptfs_destroy_inode
64 * @inode: The ecryptfs inode
65 *
66 * This is used during the final destruction of the inode.
67 * All allocation of memory related to the inode, including allocated
68 * memory in the crypt_stat struct, will be released here.
69 * There should be no chance that this deallocation will be missed.
70 */
71static void ecryptfs_destroy_inode(struct inode *inode)
72{
73 struct ecryptfs_inode_info *inode_info;
74
75 inode_info = ecryptfs_inode_to_private(inode);
76 ecryptfs_destruct_crypt_stat(&inode_info->crypt_stat);
77 kmem_cache_free(ecryptfs_inode_info_cache, inode_info);
78}
79
80/**
81 * ecryptfs_init_inode
82 * @inode: The ecryptfs inode
83 *
84 * Set up the ecryptfs inode.
85 */
86void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode)
87{
88 ecryptfs_set_inode_lower(inode, lower_inode);
89 inode->i_ino = lower_inode->i_ino;
90 inode->i_version++;
91 inode->i_op = &ecryptfs_main_iops;
92 inode->i_fop = &ecryptfs_main_fops;
93 inode->i_mapping->a_ops = &ecryptfs_aops;
94}
95
96/**
97 * ecryptfs_put_super
98 * @sb: Pointer to the ecryptfs super block
99 *
100 * Final actions when unmounting a file system.
101 * This will handle deallocation and release of our private data.
102 */
103static void ecryptfs_put_super(struct super_block *sb)
104{
105 struct ecryptfs_sb_info *sb_info = ecryptfs_superblock_to_private(sb);
106
107 ecryptfs_destruct_mount_crypt_stat(&sb_info->mount_crypt_stat);
108 kmem_cache_free(ecryptfs_sb_info_cache, sb_info);
109 ecryptfs_set_superblock_private(sb, NULL);
110}
111
112/**
113 * ecryptfs_statfs
114 * @sb: The ecryptfs super block
115 * @buf: The struct kstatfs to fill in with stats
116 *
117 * Get the filesystem statistics. Currently, we let this pass right through
118 * to the lower filesystem and take no action ourselves.
119 */
120static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
121{
122 return vfs_statfs(ecryptfs_dentry_to_lower(dentry), buf);
123}
124
125/**
126 * ecryptfs_clear_inode
127 * @inode - The ecryptfs inode
128 *
129 * Called by iput() when the inode reference count reached zero
130 * and the inode is not hashed anywhere. Used to clear anything
131 * that needs to be, before the inode is completely destroyed and put
132 * on the inode free list. We use this to drop out reference to the
133 * lower inode.
134 */
135static void ecryptfs_clear_inode(struct inode *inode)
136{
137 iput(ecryptfs_inode_to_lower(inode));
138}
139
140/**
141 * ecryptfs_umount_begin
142 *
143 * Called in do_umount().
144 */
145static void ecryptfs_umount_begin(struct vfsmount *vfsmnt, int flags)
146{
147 struct vfsmount *lower_mnt =
148 ecryptfs_dentry_to_lower_mnt(vfsmnt->mnt_sb->s_root);
149 struct super_block *lower_sb;
150
151 mntput(lower_mnt);
152 lower_sb = lower_mnt->mnt_sb;
153 if (lower_sb->s_op->umount_begin)
154 lower_sb->s_op->umount_begin(lower_mnt, flags);
155}
156
157/**
158 * ecryptfs_show_options
159 *
160 * Prints the directory we are currently mounted over.
161 * Returns zero on success; non-zero otherwise
162 */
163static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
164{
165 struct super_block *sb = mnt->mnt_sb;
166 struct dentry *lower_root_dentry = ecryptfs_dentry_to_lower(sb->s_root);
167 struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(sb->s_root);
168 char *tmp_page;
169 char *path;
170 int rc = 0;
171
172 tmp_page = (char *)__get_free_page(GFP_KERNEL);
173 if (!tmp_page) {
174 rc = -ENOMEM;
175 goto out;
176 }
177 path = d_path(lower_root_dentry, lower_mnt, tmp_page, PAGE_SIZE);
178 if (IS_ERR(path)) {
179 rc = PTR_ERR(path);
180 goto out;
181 }
182 seq_printf(m, ",dir=%s", path);
183 free_page((unsigned long)tmp_page);
184out:
185 return rc;
186}
187
188struct super_operations ecryptfs_sops = {
189 .alloc_inode = ecryptfs_alloc_inode,
190 .destroy_inode = ecryptfs_destroy_inode,
191 .drop_inode = generic_delete_inode,
192 .put_super = ecryptfs_put_super,
193 .statfs = ecryptfs_statfs,
194 .remount_fs = NULL,
195 .clear_inode = ecryptfs_clear_inode,
196 .umount_begin = ecryptfs_umount_begin,
197 .show_options = ecryptfs_show_options
198};
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
new file mode 100644
index 000000000000..8c27de8b9568
--- /dev/null
+++ b/fs/gfs2/Kconfig
@@ -0,0 +1,44 @@
1config GFS2_FS
2 tristate "GFS2 file system support"
3 depends on EXPERIMENTAL
4 select FS_POSIX_ACL
5 help
6 A cluster filesystem.
7
8 Allows a cluster of computers to simultaneously use a block device
9 that is shared between them (with FC, iSCSI, NBD, etc...). GFS reads
10 and writes to the block device like a local filesystem, but also uses
11 a lock module to allow the computers coordinate their I/O so
12 filesystem consistency is maintained. One of the nifty features of
13 GFS is perfect consistency -- changes made to the filesystem on one
14 machine show up immediately on all other machines in the cluster.
15
16 To use the GFS2 filesystem, you will need to enable one or more of
17 the below locking modules. Documentation and utilities for GFS2 can
18 be found here: http://sources.redhat.com/cluster
19
20config GFS2_FS_LOCKING_NOLOCK
21 tristate "GFS2 \"nolock\" locking module"
22 depends on GFS2_FS
23 help
24 Single node locking module for GFS2.
25
26 Use this module if you want to use GFS2 on a single node without
27 its clustering features. You can still take advantage of the
28 large file support, and upgrade to running a full cluster later on
29 if required.
30
31 If you will only be using GFS2 in cluster mode, you do not need this
32 module.
33
34config GFS2_FS_LOCKING_DLM
35 tristate "GFS2 DLM locking module"
36 depends on GFS2_FS
37 select DLM
38 help
39 Multiple node locking module for GFS2
40
41 Most users of GFS2 will require this module. It provides the locking
42 interface between GFS2 and the DLM, which is required to use GFS2
43 in a cluster environment.
44
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
new file mode 100644
index 000000000000..e3f1ada643ac
--- /dev/null
+++ b/fs/gfs2/Makefile
@@ -0,0 +1,10 @@
1obj-$(CONFIG_GFS2_FS) += gfs2.o
2gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
3 glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \
4 mount.o ondisk.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
5 ops_fstype.o ops_inode.o ops_super.o ops_vm.o quota.o \
6 recovery.o rgrp.o super.o sys.o trans.o util.o
7
8obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
9obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
10
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
new file mode 100644
index 000000000000..5f959b8ce406
--- /dev/null
+++ b/fs/gfs2/acl.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/posix_acl.h>
16#include <linux/posix_acl_xattr.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "acl.h"
23#include "eaops.h"
24#include "eattr.h"
25#include "glock.h"
26#include "inode.h"
27#include "meta_io.h"
28#include "trans.h"
29#include "util.h"
30
31#define ACL_ACCESS 1
32#define ACL_DEFAULT 0
33
34int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
35 struct gfs2_ea_request *er,
36 int *remove, mode_t *mode)
37{
38 struct posix_acl *acl;
39 int error;
40
41 error = gfs2_acl_validate_remove(ip, access);
42 if (error)
43 return error;
44
45 if (!er->er_data)
46 return -EINVAL;
47
48 acl = posix_acl_from_xattr(er->er_data, er->er_data_len);
49 if (IS_ERR(acl))
50 return PTR_ERR(acl);
51 if (!acl) {
52 *remove = 1;
53 return 0;
54 }
55
56 error = posix_acl_valid(acl);
57 if (error)
58 goto out;
59
60 if (access) {
61 error = posix_acl_equiv_mode(acl, mode);
62 if (!error)
63 *remove = 1;
64 else if (error > 0)
65 error = 0;
66 }
67
68out:
69 posix_acl_release(acl);
70 return error;
71}
72
73int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
74{
75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
76 return -EOPNOTSUPP;
77 if (current->fsuid != ip->i_di.di_uid && !capable(CAP_FOWNER))
78 return -EPERM;
79 if (S_ISLNK(ip->i_di.di_mode))
80 return -EOPNOTSUPP;
81 if (!access && !S_ISDIR(ip->i_di.di_mode))
82 return -EACCES;
83
84 return 0;
85}
86
87static int acl_get(struct gfs2_inode *ip, int access, struct posix_acl **acl,
88 struct gfs2_ea_location *el, char **data, unsigned int *len)
89{
90 struct gfs2_ea_request er;
91 struct gfs2_ea_location el_this;
92 int error;
93
94 if (!ip->i_di.di_eattr)
95 return 0;
96
97 memset(&er, 0, sizeof(struct gfs2_ea_request));
98 if (access) {
99 er.er_name = GFS2_POSIX_ACL_ACCESS;
100 er.er_name_len = GFS2_POSIX_ACL_ACCESS_LEN;
101 } else {
102 er.er_name = GFS2_POSIX_ACL_DEFAULT;
103 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
104 }
105 er.er_type = GFS2_EATYPE_SYS;
106
107 if (!el)
108 el = &el_this;
109
110 error = gfs2_ea_find(ip, &er, el);
111 if (error)
112 return error;
113 if (!el->el_ea)
114 return 0;
115 if (!GFS2_EA_DATA_LEN(el->el_ea))
116 goto out;
117
118 er.er_data_len = GFS2_EA_DATA_LEN(el->el_ea);
119 er.er_data = kmalloc(er.er_data_len, GFP_KERNEL);
120 error = -ENOMEM;
121 if (!er.er_data)
122 goto out;
123
124 error = gfs2_ea_get_copy(ip, el, er.er_data);
125 if (error)
126 goto out_kfree;
127
128 if (acl) {
129 *acl = posix_acl_from_xattr(er.er_data, er.er_data_len);
130 if (IS_ERR(*acl))
131 error = PTR_ERR(*acl);
132 }
133
134out_kfree:
135 if (error || !data)
136 kfree(er.er_data);
137 else {
138 *data = er.er_data;
139 *len = er.er_data_len;
140 }
141out:
142 if (error || el == &el_this)
143 brelse(el->el_bh);
144 return error;
145}
146
147/**
148 * gfs2_check_acl_locked - Check an ACL to see if we're allowed to do something
149 * @inode: the file we want to do something to
150 * @mask: what we want to do
151 *
152 * Returns: errno
153 */
154
155int gfs2_check_acl_locked(struct inode *inode, int mask)
156{
157 struct posix_acl *acl = NULL;
158 int error;
159
160 error = acl_get(GFS2_I(inode), ACL_ACCESS, &acl, NULL, NULL, NULL);
161 if (error)
162 return error;
163
164 if (acl) {
165 error = posix_acl_permission(inode, acl, mask);
166 posix_acl_release(acl);
167 return error;
168 }
169
170 return -EAGAIN;
171}
172
173int gfs2_check_acl(struct inode *inode, int mask)
174{
175 struct gfs2_inode *ip = GFS2_I(inode);
176 struct gfs2_holder i_gh;
177 int error;
178
179 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
180 if (!error) {
181 error = gfs2_check_acl_locked(inode, mask);
182 gfs2_glock_dq_uninit(&i_gh);
183 }
184
185 return error;
186}
187
188static int munge_mode(struct gfs2_inode *ip, mode_t mode)
189{
190 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
191 struct buffer_head *dibh;
192 int error;
193
194 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
195 if (error)
196 return error;
197
198 error = gfs2_meta_inode_buffer(ip, &dibh);
199 if (!error) {
200 gfs2_assert_withdraw(sdp,
201 (ip->i_di.di_mode & S_IFMT) == (mode & S_IFMT));
202 ip->i_di.di_mode = mode;
203 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
204 gfs2_dinode_out(&ip->i_di, dibh->b_data);
205 brelse(dibh);
206 }
207
208 gfs2_trans_end(sdp);
209
210 return 0;
211}
212
213int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip)
214{
215 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
216 struct posix_acl *acl = NULL, *clone;
217 struct gfs2_ea_request er;
218 mode_t mode = ip->i_di.di_mode;
219 int error;
220
221 if (!sdp->sd_args.ar_posix_acl)
222 return 0;
223 if (S_ISLNK(ip->i_di.di_mode))
224 return 0;
225
226 memset(&er, 0, sizeof(struct gfs2_ea_request));
227 er.er_type = GFS2_EATYPE_SYS;
228
229 error = acl_get(dip, ACL_DEFAULT, &acl, NULL,
230 &er.er_data, &er.er_data_len);
231 if (error)
232 return error;
233 if (!acl) {
234 mode &= ~current->fs->umask;
235 if (mode != ip->i_di.di_mode)
236 error = munge_mode(ip, mode);
237 return error;
238 }
239
240 clone = posix_acl_clone(acl, GFP_KERNEL);
241 error = -ENOMEM;
242 if (!clone)
243 goto out;
244 posix_acl_release(acl);
245 acl = clone;
246
247 if (S_ISDIR(ip->i_di.di_mode)) {
248 er.er_name = GFS2_POSIX_ACL_DEFAULT;
249 er.er_name_len = GFS2_POSIX_ACL_DEFAULT_LEN;
250 error = gfs2_system_eaops.eo_set(ip, &er);
251 if (error)
252 goto out;
253 }
254
255 error = posix_acl_create_masq(acl, &mode);
256 if (error < 0)
257 goto out;
258 if (error > 0) {
259 er.er_name = GFS2_POSIX_ACL_ACCESS;
260 er.er_name_len = GFS2_POSIX_ACL_ACCESS_LEN;
261 posix_acl_to_xattr(acl, er.er_data, er.er_data_len);
262 er.er_mode = mode;
263 er.er_flags = GFS2_ERF_MODE;
264 error = gfs2_system_eaops.eo_set(ip, &er);
265 if (error)
266 goto out;
267 } else
268 munge_mode(ip, mode);
269
270out:
271 posix_acl_release(acl);
272 kfree(er.er_data);
273 return error;
274}
275
276int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr)
277{
278 struct posix_acl *acl = NULL, *clone;
279 struct gfs2_ea_location el;
280 char *data;
281 unsigned int len;
282 int error;
283
284 error = acl_get(ip, ACL_ACCESS, &acl, &el, &data, &len);
285 if (error)
286 return error;
287 if (!acl)
288 return gfs2_setattr_simple(ip, attr);
289
290 clone = posix_acl_clone(acl, GFP_KERNEL);
291 error = -ENOMEM;
292 if (!clone)
293 goto out;
294 posix_acl_release(acl);
295 acl = clone;
296
297 error = posix_acl_chmod_masq(acl, attr->ia_mode);
298 if (!error) {
299 posix_acl_to_xattr(acl, data, len);
300 error = gfs2_ea_acl_chmod(ip, &el, attr, data);
301 }
302
303out:
304 posix_acl_release(acl);
305 brelse(el.el_bh);
306 kfree(data);
307 return error;
308}
309
diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h
new file mode 100644
index 000000000000..05c294fe0d78
--- /dev/null
+++ b/fs/gfs2/acl.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __ACL_DOT_H__
11#define __ACL_DOT_H__
12
13#include "incore.h"
14
15#define GFS2_POSIX_ACL_ACCESS "posix_acl_access"
16#define GFS2_POSIX_ACL_ACCESS_LEN 16
17#define GFS2_POSIX_ACL_DEFAULT "posix_acl_default"
18#define GFS2_POSIX_ACL_DEFAULT_LEN 17
19
20#define GFS2_ACL_IS_ACCESS(name, len) \
21 ((len) == GFS2_POSIX_ACL_ACCESS_LEN && \
22 !memcmp(GFS2_POSIX_ACL_ACCESS, (name), (len)))
23
24#define GFS2_ACL_IS_DEFAULT(name, len) \
25 ((len) == GFS2_POSIX_ACL_DEFAULT_LEN && \
26 !memcmp(GFS2_POSIX_ACL_DEFAULT, (name), (len)))
27
28struct gfs2_ea_request;
29
30int gfs2_acl_validate_set(struct gfs2_inode *ip, int access,
31 struct gfs2_ea_request *er,
32 int *remove, mode_t *mode);
33int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access);
34int gfs2_check_acl_locked(struct inode *inode, int mask);
35int gfs2_check_acl(struct inode *inode, int mask);
36int gfs2_acl_create(struct gfs2_inode *dip, struct gfs2_inode *ip);
37int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr);
38
39#endif /* __ACL_DOT_H__ */
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
new file mode 100644
index 000000000000..cc57f2ecd219
--- /dev/null
+++ b/fs/gfs2/bmap.c
@@ -0,0 +1,1221 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "bmap.h"
22#include "glock.h"
23#include "inode.h"
24#include "meta_io.h"
25#include "quota.h"
26#include "rgrp.h"
27#include "trans.h"
28#include "dir.h"
29#include "util.h"
30#include "ops_address.h"
31
32/* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
34 * keep it small.
35 */
36struct metapath {
37 __u16 mp_list[GFS2_MAX_META_HEIGHT];
38};
39
40typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh,
41 struct buffer_head *bh, u64 *top,
42 u64 *bottom, unsigned int height,
43 void *data);
44
45struct strip_mine {
46 int sm_first;
47 unsigned int sm_height;
48};
49
50/**
51 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
52 * @ip: the inode
53 * @dibh: the dinode buffer
54 * @block: the block number that was allocated
55 * @private: any locked page held by the caller process
56 *
57 * Returns: errno
58 */
59
60static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
61 u64 block, struct page *page)
62{
63 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
64 struct inode *inode = &ip->i_inode;
65 struct buffer_head *bh;
66 int release = 0;
67
68 if (!page || page->index) {
69 page = grab_cache_page(inode->i_mapping, 0);
70 if (!page)
71 return -ENOMEM;
72 release = 1;
73 }
74
75 if (!PageUptodate(page)) {
76 void *kaddr = kmap(page);
77
78 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
79 ip->i_di.di_size);
80 memset(kaddr + ip->i_di.di_size, 0,
81 PAGE_CACHE_SIZE - ip->i_di.di_size);
82 kunmap(page);
83
84 SetPageUptodate(page);
85 }
86
87 if (!page_has_buffers(page))
88 create_empty_buffers(page, 1 << inode->i_blkbits,
89 (1 << BH_Uptodate));
90
91 bh = page_buffers(page);
92
93 if (!buffer_mapped(bh))
94 map_bh(bh, inode->i_sb, block);
95
96 set_buffer_uptodate(bh);
97 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
98 gfs2_trans_add_bh(ip->i_gl, bh, 0);
99 mark_buffer_dirty(bh);
100
101 if (release) {
102 unlock_page(page);
103 page_cache_release(page);
104 }
105
106 return 0;
107}
108
109/**
110 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
111 * @ip: The GFS2 inode to unstuff
112 * @unstuffer: the routine that handles unstuffing a non-zero length file
113 * @private: private data for the unstuffer
114 *
115 * This routine unstuffs a dinode and returns it to a "normal" state such
116 * that the height can be grown in the traditional way.
117 *
118 * Returns: errno
119 */
120
121int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
122{
123 struct buffer_head *bh, *dibh;
124 struct gfs2_dinode *di;
125 u64 block = 0;
126 int isdir = gfs2_is_dir(ip);
127 int error;
128
129 down_write(&ip->i_rw_mutex);
130
131 error = gfs2_meta_inode_buffer(ip, &dibh);
132 if (error)
133 goto out;
134
135 if (ip->i_di.di_size) {
136 /* Get a free block, fill it with the stuffed data,
137 and write it out to disk */
138
139 if (isdir) {
140 block = gfs2_alloc_meta(ip);
141
142 error = gfs2_dir_get_new_buffer(ip, block, &bh);
143 if (error)
144 goto out_brelse;
145 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
146 dibh, sizeof(struct gfs2_dinode));
147 brelse(bh);
148 } else {
149 block = gfs2_alloc_data(ip);
150
151 error = gfs2_unstuffer_page(ip, dibh, block, page);
152 if (error)
153 goto out_brelse;
154 }
155 }
156
157 /* Set up the pointer to the new block */
158
159 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
160 di = (struct gfs2_dinode *)dibh->b_data;
161 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
162
163 if (ip->i_di.di_size) {
164 *(__be64 *)(di + 1) = cpu_to_be64(block);
165 ip->i_di.di_blocks++;
166 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
167 }
168
169 ip->i_di.di_height = 1;
170 di->di_height = cpu_to_be16(1);
171
172out_brelse:
173 brelse(dibh);
174out:
175 up_write(&ip->i_rw_mutex);
176 return error;
177}
178
179/**
180 * calc_tree_height - Calculate the height of a metadata tree
181 * @ip: The GFS2 inode
182 * @size: The proposed size of the file
183 *
184 * Work out how tall a metadata tree needs to be in order to accommodate a
185 * file of a particular size. If size is less than the current size of
186 * the inode, then the current size of the inode is used instead of the
187 * supplied one.
188 *
189 * Returns: the height the tree should be
190 */
191
192static unsigned int calc_tree_height(struct gfs2_inode *ip, u64 size)
193{
194 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
195 u64 *arr;
196 unsigned int max, height;
197
198 if (ip->i_di.di_size > size)
199 size = ip->i_di.di_size;
200
201 if (gfs2_is_dir(ip)) {
202 arr = sdp->sd_jheightsize;
203 max = sdp->sd_max_jheight;
204 } else {
205 arr = sdp->sd_heightsize;
206 max = sdp->sd_max_height;
207 }
208
209 for (height = 0; height < max; height++)
210 if (arr[height] >= size)
211 break;
212
213 return height;
214}
215
216/**
217 * build_height - Build a metadata tree of the requested height
218 * @ip: The GFS2 inode
219 * @height: The height to build to
220 *
221 *
222 * Returns: errno
223 */
224
225static int build_height(struct inode *inode, unsigned height)
226{
227 struct gfs2_inode *ip = GFS2_I(inode);
228 unsigned new_height = height - ip->i_di.di_height;
229 struct buffer_head *dibh;
230 struct buffer_head *blocks[GFS2_MAX_META_HEIGHT];
231 struct gfs2_dinode *di;
232 int error;
233 u64 *bp;
234 u64 bn;
235 unsigned n;
236
237 if (height <= ip->i_di.di_height)
238 return 0;
239
240 error = gfs2_meta_inode_buffer(ip, &dibh);
241 if (error)
242 return error;
243
244 for(n = 0; n < new_height; n++) {
245 bn = gfs2_alloc_meta(ip);
246 blocks[n] = gfs2_meta_new(ip->i_gl, bn);
247 gfs2_trans_add_bh(ip->i_gl, blocks[n], 1);
248 }
249
250 n = 0;
251 bn = blocks[0]->b_blocknr;
252 if (new_height > 1) {
253 for(; n < new_height-1; n++) {
254 gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN,
255 GFS2_FORMAT_IN);
256 gfs2_buffer_clear_tail(blocks[n],
257 sizeof(struct gfs2_meta_header));
258 bp = (u64 *)(blocks[n]->b_data +
259 sizeof(struct gfs2_meta_header));
260 *bp = cpu_to_be64(blocks[n+1]->b_blocknr);
261 brelse(blocks[n]);
262 blocks[n] = NULL;
263 }
264 }
265 gfs2_metatype_set(blocks[n], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
266 gfs2_buffer_copy_tail(blocks[n], sizeof(struct gfs2_meta_header),
267 dibh, sizeof(struct gfs2_dinode));
268 brelse(blocks[n]);
269 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
270 di = (struct gfs2_dinode *)dibh->b_data;
271 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
272 *(__be64 *)(di + 1) = cpu_to_be64(bn);
273 ip->i_di.di_height += new_height;
274 ip->i_di.di_blocks += new_height;
275 di->di_height = cpu_to_be16(ip->i_di.di_height);
276 di->di_blocks = cpu_to_be64(ip->i_di.di_blocks);
277 brelse(dibh);
278 return error;
279}
280
281/**
282 * find_metapath - Find path through the metadata tree
283 * @ip: The inode pointer
284 * @mp: The metapath to return the result in
285 * @block: The disk block to look up
286 *
287 * This routine returns a struct metapath structure that defines a path
288 * through the metadata of inode "ip" to get to block "block".
289 *
290 * Example:
291 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
292 * filesystem with a blocksize of 4096.
293 *
294 * find_metapath() would return a struct metapath structure set to:
295 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
296 * and mp_list[2] = 165.
297 *
298 * That means that in order to get to the block containing the byte at
299 * offset 101342453, we would load the indirect block pointed to by pointer
300 * 0 in the dinode. We would then load the indirect block pointed to by
301 * pointer 48 in that indirect block. We would then load the data block
302 * pointed to by pointer 165 in that indirect block.
303 *
304 * ----------------------------------------
305 * | Dinode | |
306 * | | 4|
307 * | |0 1 2 3 4 5 9|
308 * | | 6|
309 * ----------------------------------------
310 * |
311 * |
312 * V
313 * ----------------------------------------
314 * | Indirect Block |
315 * | 5|
316 * | 4 4 4 4 4 5 5 1|
317 * |0 5 6 7 8 9 0 1 2|
318 * ----------------------------------------
319 * |
320 * |
321 * V
322 * ----------------------------------------
323 * | Indirect Block |
324 * | 1 1 1 1 1 5|
325 * | 6 6 6 6 6 1|
326 * |0 3 4 5 6 7 2|
327 * ----------------------------------------
328 * |
329 * |
330 * V
331 * ----------------------------------------
332 * | Data block containing offset |
333 * | 101342453 |
334 * | |
335 * | |
336 * ----------------------------------------
337 *
338 */
339
340static void find_metapath(struct gfs2_inode *ip, u64 block,
341 struct metapath *mp)
342{
343 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
344 u64 b = block;
345 unsigned int i;
346
347 for (i = ip->i_di.di_height; i--;)
348 mp->mp_list[i] = do_div(b, sdp->sd_inptrs);
349
350}
351
352/**
353 * metapointer - Return pointer to start of metadata in a buffer
354 * @bh: The buffer
355 * @height: The metadata height (0 = dinode)
356 * @mp: The metapath
357 *
358 * Return a pointer to the block number of the next height of the metadata
359 * tree given a buffer containing the pointer to the current height of the
360 * metadata tree.
361 */
362
363static inline u64 *metapointer(struct buffer_head *bh, int *boundary,
364 unsigned int height, const struct metapath *mp)
365{
366 unsigned int head_size = (height > 0) ?
367 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
368 u64 *ptr;
369 *boundary = 0;
370 ptr = ((u64 *)(bh->b_data + head_size)) + mp->mp_list[height];
371 if (ptr + 1 == (u64 *)(bh->b_data + bh->b_size))
372 *boundary = 1;
373 return ptr;
374}
375
376/**
377 * lookup_block - Get the next metadata block in metadata tree
378 * @ip: The GFS2 inode
379 * @bh: Buffer containing the pointers to metadata blocks
380 * @height: The height of the tree (0 = dinode)
381 * @mp: The metapath
382 * @create: Non-zero if we may create a new meatdata block
383 * @new: Used to indicate if we did create a new metadata block
384 * @block: the returned disk block number
385 *
386 * Given a metatree, complete to a particular height, checks to see if the next
387 * height of the tree exists. If not the next height of the tree is created.
388 * The block number of the next height of the metadata tree is returned.
389 *
390 */
391
392static int lookup_block(struct gfs2_inode *ip, struct buffer_head *bh,
393 unsigned int height, struct metapath *mp, int create,
394 int *new, u64 *block)
395{
396 int boundary;
397 u64 *ptr = metapointer(bh, &boundary, height, mp);
398
399 if (*ptr) {
400 *block = be64_to_cpu(*ptr);
401 return boundary;
402 }
403
404 *block = 0;
405
406 if (!create)
407 return 0;
408
409 if (height == ip->i_di.di_height - 1 && !gfs2_is_dir(ip))
410 *block = gfs2_alloc_data(ip);
411 else
412 *block = gfs2_alloc_meta(ip);
413
414 gfs2_trans_add_bh(ip->i_gl, bh, 1);
415
416 *ptr = cpu_to_be64(*block);
417 ip->i_di.di_blocks++;
418
419 *new = 1;
420 return 0;
421}
422
423/**
424 * gfs2_block_pointers - Map a block from an inode to a disk block
425 * @inode: The inode
426 * @lblock: The logical block number
427 * @map_bh: The bh to be mapped
428 * @mp: metapath to use
429 *
430 * Find the block number on the current device which corresponds to an
431 * inode's block. If the block had to be created, "new" will be set.
432 *
433 * Returns: errno
434 */
435
436static int gfs2_block_pointers(struct inode *inode, u64 lblock, int create,
437 struct buffer_head *bh_map, struct metapath *mp,
438 unsigned int maxlen)
439{
440 struct gfs2_inode *ip = GFS2_I(inode);
441 struct gfs2_sbd *sdp = GFS2_SB(inode);
442 struct buffer_head *bh;
443 unsigned int bsize;
444 unsigned int height;
445 unsigned int end_of_metadata;
446 unsigned int x;
447 int error = 0;
448 int new = 0;
449 u64 dblock = 0;
450 int boundary;
451
452 BUG_ON(maxlen == 0);
453
454 if (gfs2_assert_warn(sdp, !gfs2_is_stuffed(ip)))
455 return 0;
456
457 bsize = gfs2_is_dir(ip) ? sdp->sd_jbsize : sdp->sd_sb.sb_bsize;
458
459 height = calc_tree_height(ip, (lblock + 1) * bsize);
460 if (ip->i_di.di_height < height) {
461 if (!create)
462 return 0;
463
464 error = build_height(inode, height);
465 if (error)
466 return error;
467 }
468
469 find_metapath(ip, lblock, mp);
470 end_of_metadata = ip->i_di.di_height - 1;
471
472 error = gfs2_meta_inode_buffer(ip, &bh);
473 if (error)
474 return error;
475
476 for (x = 0; x < end_of_metadata; x++) {
477 lookup_block(ip, bh, x, mp, create, &new, &dblock);
478 brelse(bh);
479 if (!dblock)
480 return 0;
481
482 error = gfs2_meta_indirect_buffer(ip, x+1, dblock, new, &bh);
483 if (error)
484 return error;
485 }
486
487 boundary = lookup_block(ip, bh, end_of_metadata, mp, create, &new, &dblock);
488 clear_buffer_mapped(bh_map);
489 clear_buffer_new(bh_map);
490 clear_buffer_boundary(bh_map);
491
492 if (dblock) {
493 map_bh(bh_map, inode->i_sb, dblock);
494 if (boundary)
495 set_buffer_boundary(bh);
496 if (new) {
497 struct buffer_head *dibh;
498 error = gfs2_meta_inode_buffer(ip, &dibh);
499 if (!error) {
500 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
501 gfs2_dinode_out(&ip->i_di, dibh->b_data);
502 brelse(dibh);
503 }
504 set_buffer_new(bh_map);
505 goto out_brelse;
506 }
507 while(--maxlen && !buffer_boundary(bh_map)) {
508 u64 eblock;
509
510 mp->mp_list[end_of_metadata]++;
511 boundary = lookup_block(ip, bh, end_of_metadata, mp, 0, &new, &eblock);
512 if (eblock != ++dblock)
513 break;
514 bh_map->b_size += (1 << inode->i_blkbits);
515 if (boundary)
516 set_buffer_boundary(bh_map);
517 }
518 }
519out_brelse:
520 brelse(bh);
521 return 0;
522}
523
524
525static inline void bmap_lock(struct inode *inode, int create)
526{
527 struct gfs2_inode *ip = GFS2_I(inode);
528 if (create)
529 down_write(&ip->i_rw_mutex);
530 else
531 down_read(&ip->i_rw_mutex);
532}
533
534static inline void bmap_unlock(struct inode *inode, int create)
535{
536 struct gfs2_inode *ip = GFS2_I(inode);
537 if (create)
538 up_write(&ip->i_rw_mutex);
539 else
540 up_read(&ip->i_rw_mutex);
541}
542
543int gfs2_block_map(struct inode *inode, u64 lblock, int create,
544 struct buffer_head *bh, unsigned int maxlen)
545{
546 struct metapath mp;
547 int ret;
548
549 bmap_lock(inode, create);
550 ret = gfs2_block_pointers(inode, lblock, create, bh, &mp, maxlen);
551 bmap_unlock(inode, create);
552 return ret;
553}
554
555int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
556{
557 struct metapath mp;
558 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0, .b_size = 0 };
559 int ret;
560 int create = *new;
561
562 BUG_ON(!extlen);
563 BUG_ON(!dblock);
564 BUG_ON(!new);
565
566 bmap_lock(inode, create);
567 ret = gfs2_block_pointers(inode, lblock, create, &bh, &mp, 32);
568 bmap_unlock(inode, create);
569 *extlen = bh.b_size >> inode->i_blkbits;
570 *dblock = bh.b_blocknr;
571 if (buffer_new(&bh))
572 *new = 1;
573 else
574 *new = 0;
575 return ret;
576}
577
578/**
579 * recursive_scan - recursively scan through the end of a file
580 * @ip: the inode
581 * @dibh: the dinode buffer
582 * @mp: the path through the metadata to the point to start
583 * @height: the height the recursion is at
584 * @block: the indirect block to look at
585 * @first: 1 if this is the first block
586 * @bc: the call to make for each piece of metadata
587 * @data: data opaque to this function to pass to @bc
588 *
589 * When this is first called @height and @block should be zero and
590 * @first should be 1.
591 *
592 * Returns: errno
593 */
594
595static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
596 struct metapath *mp, unsigned int height,
597 u64 block, int first, block_call_t bc,
598 void *data)
599{
600 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
601 struct buffer_head *bh = NULL;
602 u64 *top, *bottom;
603 u64 bn;
604 int error;
605 int mh_size = sizeof(struct gfs2_meta_header);
606
607 if (!height) {
608 error = gfs2_meta_inode_buffer(ip, &bh);
609 if (error)
610 return error;
611 dibh = bh;
612
613 top = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
614 bottom = (u64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
615 } else {
616 error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh);
617 if (error)
618 return error;
619
620 top = (u64 *)(bh->b_data + mh_size) +
621 (first ? mp->mp_list[height] : 0);
622
623 bottom = (u64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
624 }
625
626 error = bc(ip, dibh, bh, top, bottom, height, data);
627 if (error)
628 goto out;
629
630 if (height < ip->i_di.di_height - 1)
631 for (; top < bottom; top++, first = 0) {
632 if (!*top)
633 continue;
634
635 bn = be64_to_cpu(*top);
636
637 error = recursive_scan(ip, dibh, mp, height + 1, bn,
638 first, bc, data);
639 if (error)
640 break;
641 }
642
643out:
644 brelse(bh);
645 return error;
646}
647
648/**
649 * do_strip - Look for a layer a particular layer of the file and strip it off
650 * @ip: the inode
651 * @dibh: the dinode buffer
652 * @bh: A buffer of pointers
653 * @top: The first pointer in the buffer
654 * @bottom: One more than the last pointer
655 * @height: the height this buffer is at
656 * @data: a pointer to a struct strip_mine
657 *
658 * Returns: errno
659 */
660
661static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
662 struct buffer_head *bh, u64 *top, u64 *bottom,
663 unsigned int height, void *data)
664{
665 struct strip_mine *sm = data;
666 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
667 struct gfs2_rgrp_list rlist;
668 u64 bn, bstart;
669 u32 blen;
670 u64 *p;
671 unsigned int rg_blocks = 0;
672 int metadata;
673 unsigned int revokes = 0;
674 int x;
675 int error;
676
677 if (!*top)
678 sm->sm_first = 0;
679
680 if (height != sm->sm_height)
681 return 0;
682
683 if (sm->sm_first) {
684 top++;
685 sm->sm_first = 0;
686 }
687
688 metadata = (height != ip->i_di.di_height - 1);
689 if (metadata)
690 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
691
692 error = gfs2_rindex_hold(sdp, &ip->i_alloc.al_ri_gh);
693 if (error)
694 return error;
695
696 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
697 bstart = 0;
698 blen = 0;
699
700 for (p = top; p < bottom; p++) {
701 if (!*p)
702 continue;
703
704 bn = be64_to_cpu(*p);
705
706 if (bstart + blen == bn)
707 blen++;
708 else {
709 if (bstart)
710 gfs2_rlist_add(sdp, &rlist, bstart);
711
712 bstart = bn;
713 blen = 1;
714 }
715 }
716
717 if (bstart)
718 gfs2_rlist_add(sdp, &rlist, bstart);
719 else
720 goto out; /* Nothing to do */
721
722 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
723
724 for (x = 0; x < rlist.rl_rgrps; x++) {
725 struct gfs2_rgrpd *rgd;
726 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
727 rg_blocks += rgd->rd_ri.ri_length;
728 }
729
730 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
731 if (error)
732 goto out_rlist;
733
734 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
735 RES_INDIRECT + RES_STATFS + RES_QUOTA,
736 revokes);
737 if (error)
738 goto out_rg_gunlock;
739
740 down_write(&ip->i_rw_mutex);
741
742 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
743 gfs2_trans_add_bh(ip->i_gl, bh, 1);
744
745 bstart = 0;
746 blen = 0;
747
748 for (p = top; p < bottom; p++) {
749 if (!*p)
750 continue;
751
752 bn = be64_to_cpu(*p);
753
754 if (bstart + blen == bn)
755 blen++;
756 else {
757 if (bstart) {
758 if (metadata)
759 gfs2_free_meta(ip, bstart, blen);
760 else
761 gfs2_free_data(ip, bstart, blen);
762 }
763
764 bstart = bn;
765 blen = 1;
766 }
767
768 *p = 0;
769 if (!ip->i_di.di_blocks)
770 gfs2_consist_inode(ip);
771 ip->i_di.di_blocks--;
772 }
773 if (bstart) {
774 if (metadata)
775 gfs2_free_meta(ip, bstart, blen);
776 else
777 gfs2_free_data(ip, bstart, blen);
778 }
779
780 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
781
782 gfs2_dinode_out(&ip->i_di, dibh->b_data);
783
784 up_write(&ip->i_rw_mutex);
785
786 gfs2_trans_end(sdp);
787
788out_rg_gunlock:
789 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
790out_rlist:
791 gfs2_rlist_free(&rlist);
792out:
793 gfs2_glock_dq_uninit(&ip->i_alloc.al_ri_gh);
794 return error;
795}
796
797/**
798 * do_grow - Make a file look bigger than it is
799 * @ip: the inode
800 * @size: the size to set the file to
801 *
802 * Called with an exclusive lock on @ip.
803 *
804 * Returns: errno
805 */
806
807static int do_grow(struct gfs2_inode *ip, u64 size)
808{
809 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
810 struct gfs2_alloc *al;
811 struct buffer_head *dibh;
812 unsigned int h;
813 int error;
814
815 al = gfs2_alloc_get(ip);
816
817 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
818 if (error)
819 goto out;
820
821 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
822 if (error)
823 goto out_gunlock_q;
824
825 al->al_requested = sdp->sd_max_height + RES_DATA;
826
827 error = gfs2_inplace_reserve(ip);
828 if (error)
829 goto out_gunlock_q;
830
831 error = gfs2_trans_begin(sdp,
832 sdp->sd_max_height + al->al_rgd->rd_ri.ri_length +
833 RES_JDATA + RES_DINODE + RES_STATFS + RES_QUOTA, 0);
834 if (error)
835 goto out_ipres;
836
837 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
838 if (gfs2_is_stuffed(ip)) {
839 error = gfs2_unstuff_dinode(ip, NULL);
840 if (error)
841 goto out_end_trans;
842 }
843
844 h = calc_tree_height(ip, size);
845 if (ip->i_di.di_height < h) {
846 down_write(&ip->i_rw_mutex);
847 error = build_height(&ip->i_inode, h);
848 up_write(&ip->i_rw_mutex);
849 if (error)
850 goto out_end_trans;
851 }
852 }
853
854 ip->i_di.di_size = size;
855 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
856
857 error = gfs2_meta_inode_buffer(ip, &dibh);
858 if (error)
859 goto out_end_trans;
860
861 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
862 gfs2_dinode_out(&ip->i_di, dibh->b_data);
863 brelse(dibh);
864
865out_end_trans:
866 gfs2_trans_end(sdp);
867out_ipres:
868 gfs2_inplace_release(ip);
869out_gunlock_q:
870 gfs2_quota_unlock(ip);
871out:
872 gfs2_alloc_put(ip);
873 return error;
874}
875
876
877/**
878 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
879 *
880 * This is partly borrowed from ext3.
881 */
882static int gfs2_block_truncate_page(struct address_space *mapping)
883{
884 struct inode *inode = mapping->host;
885 struct gfs2_inode *ip = GFS2_I(inode);
886 struct gfs2_sbd *sdp = GFS2_SB(inode);
887 loff_t from = inode->i_size;
888 unsigned long index = from >> PAGE_CACHE_SHIFT;
889 unsigned offset = from & (PAGE_CACHE_SIZE-1);
890 unsigned blocksize, iblock, length, pos;
891 struct buffer_head *bh;
892 struct page *page;
893 void *kaddr;
894 int err;
895
896 page = grab_cache_page(mapping, index);
897 if (!page)
898 return 0;
899
900 blocksize = inode->i_sb->s_blocksize;
901 length = blocksize - (offset & (blocksize - 1));
902 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
903
904 if (!page_has_buffers(page))
905 create_empty_buffers(page, blocksize, 0);
906
907 /* Find the buffer that contains "offset" */
908 bh = page_buffers(page);
909 pos = blocksize;
910 while (offset >= pos) {
911 bh = bh->b_this_page;
912 iblock++;
913 pos += blocksize;
914 }
915
916 err = 0;
917
918 if (!buffer_mapped(bh)) {
919 gfs2_get_block(inode, iblock, bh, 0);
920 /* unmapped? It's a hole - nothing to do */
921 if (!buffer_mapped(bh))
922 goto unlock;
923 }
924
925 /* Ok, it's mapped. Make sure it's up-to-date */
926 if (PageUptodate(page))
927 set_buffer_uptodate(bh);
928
929 if (!buffer_uptodate(bh)) {
930 err = -EIO;
931 ll_rw_block(READ, 1, &bh);
932 wait_on_buffer(bh);
933 /* Uhhuh. Read error. Complain and punt. */
934 if (!buffer_uptodate(bh))
935 goto unlock;
936 }
937
938 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip))
939 gfs2_trans_add_bh(ip->i_gl, bh, 0);
940
941 kaddr = kmap_atomic(page, KM_USER0);
942 memset(kaddr + offset, 0, length);
943 flush_dcache_page(page);
944 kunmap_atomic(kaddr, KM_USER0);
945
946unlock:
947 unlock_page(page);
948 page_cache_release(page);
949 return err;
950}
951
952static int trunc_start(struct gfs2_inode *ip, u64 size)
953{
954 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
955 struct buffer_head *dibh;
956 int journaled = gfs2_is_jdata(ip);
957 int error;
958
959 error = gfs2_trans_begin(sdp,
960 RES_DINODE + (journaled ? RES_JDATA : 0), 0);
961 if (error)
962 return error;
963
964 error = gfs2_meta_inode_buffer(ip, &dibh);
965 if (error)
966 goto out;
967
968 if (gfs2_is_stuffed(ip)) {
969 ip->i_di.di_size = size;
970 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
971 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
972 gfs2_dinode_out(&ip->i_di, dibh->b_data);
973 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + size);
974 error = 1;
975
976 } else {
977 if (size & (u64)(sdp->sd_sb.sb_bsize - 1))
978 error = gfs2_block_truncate_page(ip->i_inode.i_mapping);
979
980 if (!error) {
981 ip->i_di.di_size = size;
982 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
983 ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG;
984 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
985 gfs2_dinode_out(&ip->i_di, dibh->b_data);
986 }
987 }
988
989 brelse(dibh);
990
991out:
992 gfs2_trans_end(sdp);
993 return error;
994}
995
996static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
997{
998 unsigned int height = ip->i_di.di_height;
999 u64 lblock;
1000 struct metapath mp;
1001 int error;
1002
1003 if (!size)
1004 lblock = 0;
1005 else
1006 lblock = (size - 1) >> GFS2_SB(&ip->i_inode)->sd_sb.sb_bsize_shift;
1007
1008 find_metapath(ip, lblock, &mp);
1009 gfs2_alloc_get(ip);
1010
1011 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1012 if (error)
1013 goto out;
1014
1015 while (height--) {
1016 struct strip_mine sm;
1017 sm.sm_first = !!size;
1018 sm.sm_height = height;
1019
1020 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, do_strip, &sm);
1021 if (error)
1022 break;
1023 }
1024
1025 gfs2_quota_unhold(ip);
1026
1027out:
1028 gfs2_alloc_put(ip);
1029 return error;
1030}
1031
1032static int trunc_end(struct gfs2_inode *ip)
1033{
1034 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1035 struct buffer_head *dibh;
1036 int error;
1037
1038 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1039 if (error)
1040 return error;
1041
1042 down_write(&ip->i_rw_mutex);
1043
1044 error = gfs2_meta_inode_buffer(ip, &dibh);
1045 if (error)
1046 goto out;
1047
1048 if (!ip->i_di.di_size) {
1049 ip->i_di.di_height = 0;
1050 ip->i_di.di_goal_meta =
1051 ip->i_di.di_goal_data =
1052 ip->i_num.no_addr;
1053 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1054 }
1055 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
1056 ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG;
1057
1058 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1059 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1060 brelse(dibh);
1061
1062out:
1063 up_write(&ip->i_rw_mutex);
1064 gfs2_trans_end(sdp);
1065 return error;
1066}
1067
1068/**
1069 * do_shrink - make a file smaller
1070 * @ip: the inode
1071 * @size: the size to make the file
1072 * @truncator: function to truncate the last partial block
1073 *
1074 * Called with an exclusive lock on @ip.
1075 *
1076 * Returns: errno
1077 */
1078
1079static int do_shrink(struct gfs2_inode *ip, u64 size)
1080{
1081 int error;
1082
1083 error = trunc_start(ip, size);
1084 if (error < 0)
1085 return error;
1086 if (error > 0)
1087 return 0;
1088
1089 error = trunc_dealloc(ip, size);
1090 if (!error)
1091 error = trunc_end(ip);
1092
1093 return error;
1094}
1095
1096/**
1097 * gfs2_truncatei - make a file a given size
1098 * @ip: the inode
1099 * @size: the size to make the file
1100 * @truncator: function to truncate the last partial block
1101 *
1102 * The file size can grow, shrink, or stay the same size.
1103 *
1104 * Returns: errno
1105 */
1106
1107int gfs2_truncatei(struct gfs2_inode *ip, u64 size)
1108{
1109 int error;
1110
1111 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_di.di_mode)))
1112 return -EINVAL;
1113
1114 if (size > ip->i_di.di_size)
1115 error = do_grow(ip, size);
1116 else
1117 error = do_shrink(ip, size);
1118
1119 return error;
1120}
1121
1122int gfs2_truncatei_resume(struct gfs2_inode *ip)
1123{
1124 int error;
1125 error = trunc_dealloc(ip, ip->i_di.di_size);
1126 if (!error)
1127 error = trunc_end(ip);
1128 return error;
1129}
1130
1131int gfs2_file_dealloc(struct gfs2_inode *ip)
1132{
1133 return trunc_dealloc(ip, 0);
1134}
1135
1136/**
1137 * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file
1138 * @ip: the file
1139 * @len: the number of bytes to be written to the file
1140 * @data_blocks: returns the number of data blocks required
1141 * @ind_blocks: returns the number of indirect blocks required
1142 *
1143 */
1144
1145void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len,
1146 unsigned int *data_blocks, unsigned int *ind_blocks)
1147{
1148 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1149 unsigned int tmp;
1150
1151 if (gfs2_is_dir(ip)) {
1152 *data_blocks = DIV_ROUND_UP(len, sdp->sd_jbsize) + 2;
1153 *ind_blocks = 3 * (sdp->sd_max_jheight - 1);
1154 } else {
1155 *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3;
1156 *ind_blocks = 3 * (sdp->sd_max_height - 1);
1157 }
1158
1159 for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) {
1160 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1161 *ind_blocks += tmp;
1162 }
1163}
1164
1165/**
1166 * gfs2_write_alloc_required - figure out if a write will require an allocation
1167 * @ip: the file being written to
1168 * @offset: the offset to write to
1169 * @len: the number of bytes being written
1170 * @alloc_required: set to 1 if an alloc is required, 0 otherwise
1171 *
1172 * Returns: errno
1173 */
1174
1175int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1176 unsigned int len, int *alloc_required)
1177{
1178 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1179 u64 lblock, lblock_stop, dblock;
1180 u32 extlen;
1181 int new = 0;
1182 int error = 0;
1183
1184 *alloc_required = 0;
1185
1186 if (!len)
1187 return 0;
1188
1189 if (gfs2_is_stuffed(ip)) {
1190 if (offset + len >
1191 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1192 *alloc_required = 1;
1193 return 0;
1194 }
1195
1196 if (gfs2_is_dir(ip)) {
1197 unsigned int bsize = sdp->sd_jbsize;
1198 lblock = offset;
1199 do_div(lblock, bsize);
1200 lblock_stop = offset + len + bsize - 1;
1201 do_div(lblock_stop, bsize);
1202 } else {
1203 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1204 lblock = offset >> shift;
1205 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1206 }
1207
1208 for (; lblock < lblock_stop; lblock += extlen) {
1209 error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen);
1210 if (error)
1211 return error;
1212
1213 if (!dblock) {
1214 *alloc_required = 1;
1215 return 0;
1216 }
1217 }
1218
1219 return 0;
1220}
1221
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
new file mode 100644
index 000000000000..0fd379b4cd9e
--- /dev/null
+++ b/fs/gfs2/bmap.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __BMAP_DOT_H__
11#define __BMAP_DOT_H__
12
13struct inode;
14struct gfs2_inode;
15struct page;
16
17int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page);
18int gfs2_block_map(struct inode *inode, u64 lblock, int create, struct buffer_head *bh, unsigned int maxlen);
19int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen);
20
21int gfs2_truncatei(struct gfs2_inode *ip, u64 size);
22int gfs2_truncatei_resume(struct gfs2_inode *ip);
23int gfs2_file_dealloc(struct gfs2_inode *ip);
24
25void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len,
26 unsigned int *data_blocks,
27 unsigned int *ind_blocks);
28int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
29 unsigned int len, int *alloc_required);
30
31#endif /* __BMAP_DOT_H__ */
diff --git a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c
new file mode 100644
index 000000000000..cab1f68d4685
--- /dev/null
+++ b/fs/gfs2/daemon.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/kthread.h>
16#include <linux/delay.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "daemon.h"
23#include "glock.h"
24#include "log.h"
25#include "quota.h"
26#include "recovery.h"
27#include "super.h"
28#include "util.h"
29
30/* This uses schedule_timeout() instead of msleep() because it's good for
31 the daemons to wake up more often than the timeout when unmounting so
32 the user's unmount doesn't sit there forever.
33
34 The kthread functions used to start these daemons block and flush signals. */
35
36/**
37 * gfs2_scand - Look for cached glocks and inodes to toss from memory
38 * @sdp: Pointer to GFS2 superblock
39 *
40 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
41 * See gfs2_glockd()
42 */
43
44int gfs2_scand(void *data)
45{
46 struct gfs2_sbd *sdp = data;
47 unsigned long t;
48
49 while (!kthread_should_stop()) {
50 gfs2_scand_internal(sdp);
51 t = gfs2_tune_get(sdp, gt_scand_secs) * HZ;
52 schedule_timeout_interruptible(t);
53 }
54
55 return 0;
56}
57
58/**
59 * gfs2_glockd - Reclaim unused glock structures
60 * @sdp: Pointer to GFS2 superblock
61 *
62 * One or more of these daemons run, reclaiming glocks on sd_reclaim_list.
63 * Number of daemons can be set by user, with num_glockd mount option.
64 */
65
66int gfs2_glockd(void *data)
67{
68 struct gfs2_sbd *sdp = data;
69
70 while (!kthread_should_stop()) {
71 while (atomic_read(&sdp->sd_reclaim_count))
72 gfs2_reclaim_glock(sdp);
73
74 wait_event_interruptible(sdp->sd_reclaim_wq,
75 (atomic_read(&sdp->sd_reclaim_count) ||
76 kthread_should_stop()));
77 }
78
79 return 0;
80}
81
82/**
83 * gfs2_recoverd - Recover dead machine's journals
84 * @sdp: Pointer to GFS2 superblock
85 *
86 */
87
88int gfs2_recoverd(void *data)
89{
90 struct gfs2_sbd *sdp = data;
91 unsigned long t;
92
93 while (!kthread_should_stop()) {
94 gfs2_check_journals(sdp);
95 t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ;
96 schedule_timeout_interruptible(t);
97 }
98
99 return 0;
100}
101
102/**
103 * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
104 * @sdp: Pointer to GFS2 superblock
105 *
106 * Also, periodically check to make sure that we're using the most recent
107 * journal index.
108 */
109
110int gfs2_logd(void *data)
111{
112 struct gfs2_sbd *sdp = data;
113 struct gfs2_holder ji_gh;
114 unsigned long t;
115
116 while (!kthread_should_stop()) {
117 /* Advance the log tail */
118
119 t = sdp->sd_log_flush_time +
120 gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
121
122 gfs2_ail1_empty(sdp, DIO_ALL);
123
124 if (time_after_eq(jiffies, t)) {
125 gfs2_log_flush(sdp, NULL);
126 sdp->sd_log_flush_time = jiffies;
127 }
128
129 /* Check for latest journal index */
130
131 t = sdp->sd_jindex_refresh_time +
132 gfs2_tune_get(sdp, gt_jindex_refresh_secs) * HZ;
133
134 if (time_after_eq(jiffies, t)) {
135 if (!gfs2_jindex_hold(sdp, &ji_gh))
136 gfs2_glock_dq_uninit(&ji_gh);
137 sdp->sd_jindex_refresh_time = jiffies;
138 }
139
140 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
141 schedule_timeout_interruptible(t);
142 }
143
144 return 0;
145}
146
147/**
148 * gfs2_quotad - Write cached quota changes into the quota file
149 * @sdp: Pointer to GFS2 superblock
150 *
151 */
152
153int gfs2_quotad(void *data)
154{
155 struct gfs2_sbd *sdp = data;
156 unsigned long t;
157 int error;
158
159 while (!kthread_should_stop()) {
160 /* Update the master statfs file */
161
162 t = sdp->sd_statfs_sync_time +
163 gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
164
165 if (time_after_eq(jiffies, t)) {
166 error = gfs2_statfs_sync(sdp);
167 if (error &&
168 error != -EROFS &&
169 !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
170 fs_err(sdp, "quotad: (1) error=%d\n", error);
171 sdp->sd_statfs_sync_time = jiffies;
172 }
173
174 /* Update quota file */
175
176 t = sdp->sd_quota_sync_time +
177 gfs2_tune_get(sdp, gt_quota_quantum) * HZ;
178
179 if (time_after_eq(jiffies, t)) {
180 error = gfs2_quota_sync(sdp);
181 if (error &&
182 error != -EROFS &&
183 !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
184 fs_err(sdp, "quotad: (2) error=%d\n", error);
185 sdp->sd_quota_sync_time = jiffies;
186 }
187
188 gfs2_quota_scan(sdp);
189
190 t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ;
191 schedule_timeout_interruptible(t);
192 }
193
194 return 0;
195}
196
diff --git a/fs/gfs2/daemon.h b/fs/gfs2/daemon.h
new file mode 100644
index 000000000000..801007120fb2
--- /dev/null
+++ b/fs/gfs2/daemon.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __DAEMON_DOT_H__
11#define __DAEMON_DOT_H__
12
13int gfs2_scand(void *data);
14int gfs2_glockd(void *data);
15int gfs2_recoverd(void *data);
16int gfs2_logd(void *data);
17int gfs2_quotad(void *data);
18
19#endif /* __DAEMON_DOT_H__ */
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
new file mode 100644
index 000000000000..459498cac93b
--- /dev/null
+++ b/fs/gfs2/dir.c
@@ -0,0 +1,1961 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10/*
11 * Implements Extendible Hashing as described in:
12 * "Extendible Hashing" by Fagin, et al in
13 * __ACM Trans. on Database Systems__, Sept 1979.
14 *
15 *
16 * Here's the layout of dirents which is essentially the same as that of ext2
17 * within a single block. The field de_name_len is the number of bytes
18 * actually required for the name (no null terminator). The field de_rec_len
19 * is the number of bytes allocated to the dirent. The offset of the next
20 * dirent in the block is (dirent + dirent->de_rec_len). When a dirent is
21 * deleted, the preceding dirent inherits its allocated space, ie
22 * prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained
23 * by adding de_rec_len to the current dirent, this essentially causes the
24 * deleted dirent to get jumped over when iterating through all the dirents.
25 *
26 * When deleting the first dirent in a block, there is no previous dirent so
27 * the field de_ino is set to zero to designate it as deleted. When allocating
28 * a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the
29 * first dirent has (de_ino == 0) and de_rec_len is large enough, this first
30 * dirent is allocated. Otherwise it must go through all the 'used' dirents
31 * searching for one in which the amount of total space minus the amount of
32 * used space will provide enough space for the new dirent.
33 *
34 * There are two types of blocks in which dirents reside. In a stuffed dinode,
35 * the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of
36 * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the
37 * beginning of the leaf block. The dirents reside in leaves when
38 *
39 * dip->i_di.di_flags & GFS2_DIF_EXHASH is true
40 *
41 * Otherwise, the dirents are "linear", within a single stuffed dinode block.
42 *
43 * When the dirents are in leaves, the actual contents of the directory file are
44 * used as an array of 64-bit block pointers pointing to the leaf blocks. The
45 * dirents are NOT in the directory file itself. There can be more than one
46 * block pointer in the array that points to the same leaf. In fact, when a
47 * directory is first converted from linear to exhash, all of the pointers
48 * point to the same leaf.
49 *
50 * When a leaf is completely full, the size of the hash table can be
51 * doubled unless it is already at the maximum size which is hard coded into
52 * GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list,
53 * but never before the maximum hash table size has been reached.
54 */
55
56#include <linux/sched.h>
57#include <linux/slab.h>
58#include <linux/spinlock.h>
59#include <linux/buffer_head.h>
60#include <linux/sort.h>
61#include <linux/gfs2_ondisk.h>
62#include <linux/crc32.h>
63#include <linux/vmalloc.h>
64#include <linux/lm_interface.h>
65
66#include "gfs2.h"
67#include "incore.h"
68#include "dir.h"
69#include "glock.h"
70#include "inode.h"
71#include "meta_io.h"
72#include "quota.h"
73#include "rgrp.h"
74#include "trans.h"
75#include "bmap.h"
76#include "util.h"
77
78#define IS_LEAF 1 /* Hashed (leaf) directory */
79#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
80
81#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
82#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
83
84typedef int (*leaf_call_t) (struct gfs2_inode *dip, u32 index, u32 len,
85 u64 leaf_no, void *data);
86typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
87 const struct qstr *name, void *opaque);
88
89
90int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
91 struct buffer_head **bhp)
92{
93 struct buffer_head *bh;
94
95 bh = gfs2_meta_new(ip->i_gl, block);
96 gfs2_trans_add_bh(ip->i_gl, bh, 1);
97 gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD);
98 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
99 *bhp = bh;
100 return 0;
101}
102
103static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block,
104 struct buffer_head **bhp)
105{
106 struct buffer_head *bh;
107 int error;
108
109 error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, &bh);
110 if (error)
111 return error;
112 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) {
113 brelse(bh);
114 return -EIO;
115 }
116 *bhp = bh;
117 return 0;
118}
119
120static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
121 unsigned int offset, unsigned int size)
122{
123 struct buffer_head *dibh;
124 int error;
125
126 error = gfs2_meta_inode_buffer(ip, &dibh);
127 if (error)
128 return error;
129
130 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
131 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
132 if (ip->i_di.di_size < offset + size)
133 ip->i_di.di_size = offset + size;
134 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
135 gfs2_dinode_out(&ip->i_di, dibh->b_data);
136
137 brelse(dibh);
138
139 return size;
140}
141
142
143
144/**
145 * gfs2_dir_write_data - Write directory information to the inode
146 * @ip: The GFS2 inode
147 * @buf: The buffer containing information to be written
148 * @offset: The file offset to start writing at
149 * @size: The amount of data to write
150 *
151 * Returns: The number of bytes correctly written or error code
152 */
153static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
154 u64 offset, unsigned int size)
155{
156 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
157 struct buffer_head *dibh;
158 u64 lblock, dblock;
159 u32 extlen = 0;
160 unsigned int o;
161 int copied = 0;
162 int error = 0;
163
164 if (!size)
165 return 0;
166
167 if (gfs2_is_stuffed(ip) &&
168 offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
169 return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
170 size);
171
172 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
173 return -EINVAL;
174
175 if (gfs2_is_stuffed(ip)) {
176 error = gfs2_unstuff_dinode(ip, NULL);
177 if (error)
178 return error;
179 }
180
181 lblock = offset;
182 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
183
184 while (copied < size) {
185 unsigned int amount;
186 struct buffer_head *bh;
187 int new;
188
189 amount = size - copied;
190 if (amount > sdp->sd_sb.sb_bsize - o)
191 amount = sdp->sd_sb.sb_bsize - o;
192
193 if (!extlen) {
194 new = 1;
195 error = gfs2_extent_map(&ip->i_inode, lblock, &new,
196 &dblock, &extlen);
197 if (error)
198 goto fail;
199 error = -EIO;
200 if (gfs2_assert_withdraw(sdp, dblock))
201 goto fail;
202 }
203
204 if (amount == sdp->sd_jbsize || new)
205 error = gfs2_dir_get_new_buffer(ip, dblock, &bh);
206 else
207 error = gfs2_dir_get_existing_buffer(ip, dblock, &bh);
208
209 if (error)
210 goto fail;
211
212 gfs2_trans_add_bh(ip->i_gl, bh, 1);
213 memcpy(bh->b_data + o, buf, amount);
214 brelse(bh);
215 if (error)
216 goto fail;
217
218 buf += amount;
219 copied += amount;
220 lblock++;
221 dblock++;
222 extlen--;
223
224 o = sizeof(struct gfs2_meta_header);
225 }
226
227out:
228 error = gfs2_meta_inode_buffer(ip, &dibh);
229 if (error)
230 return error;
231
232 if (ip->i_di.di_size < offset + copied)
233 ip->i_di.di_size = offset + copied;
234 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
235
236 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
237 gfs2_dinode_out(&ip->i_di, dibh->b_data);
238 brelse(dibh);
239
240 return copied;
241fail:
242 if (copied)
243 goto out;
244 return error;
245}
246
247static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, char *buf,
248 u64 offset, unsigned int size)
249{
250 struct buffer_head *dibh;
251 int error;
252
253 error = gfs2_meta_inode_buffer(ip, &dibh);
254 if (!error) {
255 offset += sizeof(struct gfs2_dinode);
256 memcpy(buf, dibh->b_data + offset, size);
257 brelse(dibh);
258 }
259
260 return (error) ? error : size;
261}
262
263
264/**
265 * gfs2_dir_read_data - Read a data from a directory inode
266 * @ip: The GFS2 Inode
267 * @buf: The buffer to place result into
268 * @offset: File offset to begin jdata_readng from
269 * @size: Amount of data to transfer
270 *
271 * Returns: The amount of data actually copied or the error
272 */
273static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset,
274 unsigned int size, unsigned ra)
275{
276 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
277 u64 lblock, dblock;
278 u32 extlen = 0;
279 unsigned int o;
280 int copied = 0;
281 int error = 0;
282
283 if (offset >= ip->i_di.di_size)
284 return 0;
285
286 if (offset + size > ip->i_di.di_size)
287 size = ip->i_di.di_size - offset;
288
289 if (!size)
290 return 0;
291
292 if (gfs2_is_stuffed(ip))
293 return gfs2_dir_read_stuffed(ip, buf, offset, size);
294
295 if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
296 return -EINVAL;
297
298 lblock = offset;
299 o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
300
301 while (copied < size) {
302 unsigned int amount;
303 struct buffer_head *bh;
304 int new;
305
306 amount = size - copied;
307 if (amount > sdp->sd_sb.sb_bsize - o)
308 amount = sdp->sd_sb.sb_bsize - o;
309
310 if (!extlen) {
311 new = 0;
312 error = gfs2_extent_map(&ip->i_inode, lblock, &new,
313 &dblock, &extlen);
314 if (error || !dblock)
315 goto fail;
316 BUG_ON(extlen < 1);
317 if (!ra)
318 extlen = 1;
319 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
320 }
321 if (!bh) {
322 error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh);
323 if (error)
324 goto fail;
325 }
326 error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD);
327 if (error) {
328 brelse(bh);
329 goto fail;
330 }
331 dblock++;
332 extlen--;
333 memcpy(buf, bh->b_data + o, amount);
334 brelse(bh);
335 bh = NULL;
336 buf += amount;
337 copied += amount;
338 lblock++;
339 o = sizeof(struct gfs2_meta_header);
340 }
341
342 return copied;
343fail:
344 return (copied) ? copied : error;
345}
346
347static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
348 const struct qstr *name, int ret)
349{
350 if (dent->de_inum.no_addr != 0 &&
351 be32_to_cpu(dent->de_hash) == name->hash &&
352 be16_to_cpu(dent->de_name_len) == name->len &&
353 memcmp(dent+1, name->name, name->len) == 0)
354 return ret;
355 return 0;
356}
357
358static int gfs2_dirent_find(const struct gfs2_dirent *dent,
359 const struct qstr *name,
360 void *opaque)
361{
362 return __gfs2_dirent_find(dent, name, 1);
363}
364
365static int gfs2_dirent_prev(const struct gfs2_dirent *dent,
366 const struct qstr *name,
367 void *opaque)
368{
369 return __gfs2_dirent_find(dent, name, 2);
370}
371
372/*
373 * name->name holds ptr to start of block.
374 * name->len holds size of block.
375 */
376static int gfs2_dirent_last(const struct gfs2_dirent *dent,
377 const struct qstr *name,
378 void *opaque)
379{
380 const char *start = name->name;
381 const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len);
382 if (name->len == (end - start))
383 return 1;
384 return 0;
385}
386
387static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
388 const struct qstr *name,
389 void *opaque)
390{
391 unsigned required = GFS2_DIRENT_SIZE(name->len);
392 unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
393 unsigned totlen = be16_to_cpu(dent->de_rec_len);
394
395 if (!dent->de_inum.no_addr)
396 actual = GFS2_DIRENT_SIZE(0);
397 if (totlen - actual >= required)
398 return 1;
399 return 0;
400}
401
402struct dirent_gather {
403 const struct gfs2_dirent **pdent;
404 unsigned offset;
405};
406
407static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
408 const struct qstr *name,
409 void *opaque)
410{
411 struct dirent_gather *g = opaque;
412 if (dent->de_inum.no_addr) {
413 g->pdent[g->offset++] = dent;
414 }
415 return 0;
416}
417
418/*
419 * Other possible things to check:
420 * - Inode located within filesystem size (and on valid block)
421 * - Valid directory entry type
422 * Not sure how heavy-weight we want to make this... could also check
423 * hash is correct for example, but that would take a lot of extra time.
424 * For now the most important thing is to check that the various sizes
425 * are correct.
426 */
427static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
428 unsigned int size, unsigned int len, int first)
429{
430 const char *msg = "gfs2_dirent too small";
431 if (unlikely(size < sizeof(struct gfs2_dirent)))
432 goto error;
433 msg = "gfs2_dirent misaligned";
434 if (unlikely(offset & 0x7))
435 goto error;
436 msg = "gfs2_dirent points beyond end of block";
437 if (unlikely(offset + size > len))
438 goto error;
439 msg = "zero inode number";
440 if (unlikely(!first && !dent->de_inum.no_addr))
441 goto error;
442 msg = "name length is greater than space in dirent";
443 if (dent->de_inum.no_addr &&
444 unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
445 size))
446 goto error;
447 return 0;
448error:
449 printk(KERN_WARNING "gfs2_check_dirent: %s (%s)\n", msg,
450 first ? "first in block" : "not first in block");
451 return -EIO;
452}
453
454static int gfs2_dirent_offset(const void *buf)
455{
456 const struct gfs2_meta_header *h = buf;
457 int offset;
458
459 BUG_ON(buf == NULL);
460
461 switch(be32_to_cpu(h->mh_type)) {
462 case GFS2_METATYPE_LF:
463 offset = sizeof(struct gfs2_leaf);
464 break;
465 case GFS2_METATYPE_DI:
466 offset = sizeof(struct gfs2_dinode);
467 break;
468 default:
469 goto wrong_type;
470 }
471 return offset;
472wrong_type:
473 printk(KERN_WARNING "gfs2_scan_dirent: wrong block type %u\n",
474 be32_to_cpu(h->mh_type));
475 return -1;
476}
477
478static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
479 unsigned int len, gfs2_dscan_t scan,
480 const struct qstr *name,
481 void *opaque)
482{
483 struct gfs2_dirent *dent, *prev;
484 unsigned offset;
485 unsigned size;
486 int ret = 0;
487
488 ret = gfs2_dirent_offset(buf);
489 if (ret < 0)
490 goto consist_inode;
491
492 offset = ret;
493 prev = NULL;
494 dent = buf + offset;
495 size = be16_to_cpu(dent->de_rec_len);
496 if (gfs2_check_dirent(dent, offset, size, len, 1))
497 goto consist_inode;
498 do {
499 ret = scan(dent, name, opaque);
500 if (ret)
501 break;
502 offset += size;
503 if (offset == len)
504 break;
505 prev = dent;
506 dent = buf + offset;
507 size = be16_to_cpu(dent->de_rec_len);
508 if (gfs2_check_dirent(dent, offset, size, len, 0))
509 goto consist_inode;
510 } while(1);
511
512 switch(ret) {
513 case 0:
514 return NULL;
515 case 1:
516 return dent;
517 case 2:
518 return prev ? prev : dent;
519 default:
520 BUG_ON(ret > 0);
521 return ERR_PTR(ret);
522 }
523
524consist_inode:
525 gfs2_consist_inode(GFS2_I(inode));
526 return ERR_PTR(-EIO);
527}
528
529
530/**
531 * dirent_first - Return the first dirent
532 * @dip: the directory
533 * @bh: The buffer
534 * @dent: Pointer to list of dirents
535 *
536 * return first dirent whether bh points to leaf or stuffed dinode
537 *
538 * Returns: IS_LEAF, IS_DINODE, or -errno
539 */
540
541static int dirent_first(struct gfs2_inode *dip, struct buffer_head *bh,
542 struct gfs2_dirent **dent)
543{
544 struct gfs2_meta_header *h = (struct gfs2_meta_header *)bh->b_data;
545
546 if (be32_to_cpu(h->mh_type) == GFS2_METATYPE_LF) {
547 if (gfs2_meta_check(GFS2_SB(&dip->i_inode), bh))
548 return -EIO;
549 *dent = (struct gfs2_dirent *)(bh->b_data +
550 sizeof(struct gfs2_leaf));
551 return IS_LEAF;
552 } else {
553 if (gfs2_metatype_check(GFS2_SB(&dip->i_inode), bh, GFS2_METATYPE_DI))
554 return -EIO;
555 *dent = (struct gfs2_dirent *)(bh->b_data +
556 sizeof(struct gfs2_dinode));
557 return IS_DINODE;
558 }
559}
560
561static int dirent_check_reclen(struct gfs2_inode *dip,
562 const struct gfs2_dirent *d, const void *end_p)
563{
564 const void *ptr = d;
565 u16 rec_len = be16_to_cpu(d->de_rec_len);
566
567 if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
568 goto broken;
569 ptr += rec_len;
570 if (ptr < end_p)
571 return rec_len;
572 if (ptr == end_p)
573 return -ENOENT;
574broken:
575 gfs2_consist_inode(dip);
576 return -EIO;
577}
578
579/**
580 * dirent_next - Next dirent
581 * @dip: the directory
582 * @bh: The buffer
583 * @dent: Pointer to list of dirents
584 *
585 * Returns: 0 on success, error code otherwise
586 */
587
588static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
589 struct gfs2_dirent **dent)
590{
591 struct gfs2_dirent *cur = *dent, *tmp;
592 char *bh_end = bh->b_data + bh->b_size;
593 int ret;
594
595 ret = dirent_check_reclen(dip, cur, bh_end);
596 if (ret < 0)
597 return ret;
598
599 tmp = (void *)cur + ret;
600 ret = dirent_check_reclen(dip, tmp, bh_end);
601 if (ret == -EIO)
602 return ret;
603
604 /* Only the first dent could ever have de_inum.no_addr == 0 */
605 if (!tmp->de_inum.no_addr) {
606 gfs2_consist_inode(dip);
607 return -EIO;
608 }
609
610 *dent = tmp;
611 return 0;
612}
613
614/**
615 * dirent_del - Delete a dirent
616 * @dip: The GFS2 inode
617 * @bh: The buffer
618 * @prev: The previous dirent
619 * @cur: The current dirent
620 *
621 */
622
623static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
624 struct gfs2_dirent *prev, struct gfs2_dirent *cur)
625{
626 u16 cur_rec_len, prev_rec_len;
627
628 if (!cur->de_inum.no_addr) {
629 gfs2_consist_inode(dip);
630 return;
631 }
632
633 gfs2_trans_add_bh(dip->i_gl, bh, 1);
634
635 /* If there is no prev entry, this is the first entry in the block.
636 The de_rec_len is already as big as it needs to be. Just zero
637 out the inode number and return. */
638
639 if (!prev) {
640 cur->de_inum.no_addr = 0; /* No endianess worries */
641 return;
642 }
643
644 /* Combine this dentry with the previous one. */
645
646 prev_rec_len = be16_to_cpu(prev->de_rec_len);
647 cur_rec_len = be16_to_cpu(cur->de_rec_len);
648
649 if ((char *)prev + prev_rec_len != (char *)cur)
650 gfs2_consist_inode(dip);
651 if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size)
652 gfs2_consist_inode(dip);
653
654 prev_rec_len += cur_rec_len;
655 prev->de_rec_len = cpu_to_be16(prev_rec_len);
656}
657
658/*
659 * Takes a dent from which to grab space as an argument. Returns the
660 * newly created dent.
661 */
662static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
663 struct gfs2_dirent *dent,
664 const struct qstr *name,
665 struct buffer_head *bh)
666{
667 struct gfs2_inode *ip = GFS2_I(inode);
668 struct gfs2_dirent *ndent;
669 unsigned offset = 0, totlen;
670
671 if (dent->de_inum.no_addr)
672 offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
673 totlen = be16_to_cpu(dent->de_rec_len);
674 BUG_ON(offset + name->len > totlen);
675 gfs2_trans_add_bh(ip->i_gl, bh, 1);
676 ndent = (struct gfs2_dirent *)((char *)dent + offset);
677 dent->de_rec_len = cpu_to_be16(offset);
678 gfs2_qstr2dirent(name, totlen - offset, ndent);
679 return ndent;
680}
681
682static struct gfs2_dirent *gfs2_dirent_alloc(struct inode *inode,
683 struct buffer_head *bh,
684 const struct qstr *name)
685{
686 struct gfs2_dirent *dent;
687 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
688 gfs2_dirent_find_space, name, NULL);
689 if (!dent || IS_ERR(dent))
690 return dent;
691 return gfs2_init_dirent(inode, dent, name, bh);
692}
693
694static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
695 struct buffer_head **bhp)
696{
697 int error;
698
699 error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, bhp);
700 if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) {
701 /* printk(KERN_INFO "block num=%llu\n", leaf_no); */
702 error = -EIO;
703 }
704
705 return error;
706}
707
708/**
709 * get_leaf_nr - Get a leaf number associated with the index
710 * @dip: The GFS2 inode
711 * @index:
712 * @leaf_out:
713 *
714 * Returns: 0 on success, error code otherwise
715 */
716
717static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
718 u64 *leaf_out)
719{
720 u64 leaf_no;
721 int error;
722
723 error = gfs2_dir_read_data(dip, (char *)&leaf_no,
724 index * sizeof(u64),
725 sizeof(u64), 0);
726 if (error != sizeof(u64))
727 return (error < 0) ? error : -EIO;
728
729 *leaf_out = be64_to_cpu(leaf_no);
730
731 return 0;
732}
733
734static int get_first_leaf(struct gfs2_inode *dip, u32 index,
735 struct buffer_head **bh_out)
736{
737 u64 leaf_no;
738 int error;
739
740 error = get_leaf_nr(dip, index, &leaf_no);
741 if (!error)
742 error = get_leaf(dip, leaf_no, bh_out);
743
744 return error;
745}
746
747static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
748 const struct qstr *name,
749 gfs2_dscan_t scan,
750 struct buffer_head **pbh)
751{
752 struct buffer_head *bh;
753 struct gfs2_dirent *dent;
754 struct gfs2_inode *ip = GFS2_I(inode);
755 int error;
756
757 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
758 struct gfs2_leaf *leaf;
759 unsigned hsize = 1 << ip->i_di.di_depth;
760 unsigned index;
761 u64 ln;
762 if (hsize * sizeof(u64) != ip->i_di.di_size) {
763 gfs2_consist_inode(ip);
764 return ERR_PTR(-EIO);
765 }
766
767 index = name->hash >> (32 - ip->i_di.di_depth);
768 error = get_first_leaf(ip, index, &bh);
769 if (error)
770 return ERR_PTR(error);
771 do {
772 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
773 scan, name, NULL);
774 if (dent)
775 goto got_dent;
776 leaf = (struct gfs2_leaf *)bh->b_data;
777 ln = be64_to_cpu(leaf->lf_next);
778 brelse(bh);
779 if (!ln)
780 break;
781
782 error = get_leaf(ip, ln, &bh);
783 } while(!error);
784
785 return error ? ERR_PTR(error) : NULL;
786 }
787
788
789 error = gfs2_meta_inode_buffer(ip, &bh);
790 if (error)
791 return ERR_PTR(error);
792 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL);
793got_dent:
794 if (unlikely(dent == NULL || IS_ERR(dent))) {
795 brelse(bh);
796 bh = NULL;
797 }
798 *pbh = bh;
799 return dent;
800}
801
802static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
803{
804 struct gfs2_inode *ip = GFS2_I(inode);
805 u64 bn = gfs2_alloc_meta(ip);
806 struct buffer_head *bh = gfs2_meta_new(ip->i_gl, bn);
807 struct gfs2_leaf *leaf;
808 struct gfs2_dirent *dent;
809 struct qstr name = { .name = "", .len = 0, .hash = 0 };
810 if (!bh)
811 return NULL;
812
813 gfs2_trans_add_bh(ip->i_gl, bh, 1);
814 gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
815 leaf = (struct gfs2_leaf *)bh->b_data;
816 leaf->lf_depth = cpu_to_be16(depth);
817 leaf->lf_entries = 0;
818 leaf->lf_dirent_format = cpu_to_be16(GFS2_FORMAT_DE);
819 leaf->lf_next = 0;
820 memset(leaf->lf_reserved, 0, sizeof(leaf->lf_reserved));
821 dent = (struct gfs2_dirent *)(leaf+1);
822 gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent);
823 *pbh = bh;
824 return leaf;
825}
826
827/**
828 * dir_make_exhash - Convert a stuffed directory into an ExHash directory
829 * @dip: The GFS2 inode
830 *
831 * Returns: 0 on success, error code otherwise
832 */
833
834static int dir_make_exhash(struct inode *inode)
835{
836 struct gfs2_inode *dip = GFS2_I(inode);
837 struct gfs2_sbd *sdp = GFS2_SB(inode);
838 struct gfs2_dirent *dent;
839 struct qstr args;
840 struct buffer_head *bh, *dibh;
841 struct gfs2_leaf *leaf;
842 int y;
843 u32 x;
844 u64 *lp, bn;
845 int error;
846
847 error = gfs2_meta_inode_buffer(dip, &dibh);
848 if (error)
849 return error;
850
851 /* Turn over a new leaf */
852
853 leaf = new_leaf(inode, &bh, 0);
854 if (!leaf)
855 return -ENOSPC;
856 bn = bh->b_blocknr;
857
858 gfs2_assert(sdp, dip->i_di.di_entries < (1 << 16));
859 leaf->lf_entries = cpu_to_be16(dip->i_di.di_entries);
860
861 /* Copy dirents */
862
863 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh,
864 sizeof(struct gfs2_dinode));
865
866 /* Find last entry */
867
868 x = 0;
869 args.len = bh->b_size - sizeof(struct gfs2_dinode) +
870 sizeof(struct gfs2_leaf);
871 args.name = bh->b_data;
872 dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size,
873 gfs2_dirent_last, &args, NULL);
874 if (!dent) {
875 brelse(bh);
876 brelse(dibh);
877 return -EIO;
878 }
879 if (IS_ERR(dent)) {
880 brelse(bh);
881 brelse(dibh);
882 return PTR_ERR(dent);
883 }
884
885 /* Adjust the last dirent's record length
886 (Remember that dent still points to the last entry.) */
887
888 dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) +
889 sizeof(struct gfs2_dinode) -
890 sizeof(struct gfs2_leaf));
891
892 brelse(bh);
893
894 /* We're done with the new leaf block, now setup the new
895 hash table. */
896
897 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
898 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
899
900 lp = (u64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
901
902 for (x = sdp->sd_hash_ptrs; x--; lp++)
903 *lp = cpu_to_be64(bn);
904
905 dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2;
906 dip->i_di.di_blocks++;
907 dip->i_di.di_flags |= GFS2_DIF_EXHASH;
908 dip->i_di.di_payload_format = 0;
909
910 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
911 dip->i_di.di_depth = y;
912
913 gfs2_dinode_out(&dip->i_di, dibh->b_data);
914
915 brelse(dibh);
916
917 return 0;
918}
919
920/**
921 * dir_split_leaf - Split a leaf block into two
922 * @dip: The GFS2 inode
923 * @index:
924 * @leaf_no:
925 *
926 * Returns: 0 on success, error code on failure
927 */
928
929static int dir_split_leaf(struct inode *inode, const struct qstr *name)
930{
931 struct gfs2_inode *dip = GFS2_I(inode);
932 struct buffer_head *nbh, *obh, *dibh;
933 struct gfs2_leaf *nleaf, *oleaf;
934 struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
935 u32 start, len, half_len, divider;
936 u64 bn, *lp, leaf_no;
937 u32 index;
938 int x, moved = 0;
939 int error;
940
941 index = name->hash >> (32 - dip->i_di.di_depth);
942 error = get_leaf_nr(dip, index, &leaf_no);
943 if (error)
944 return error;
945
946 /* Get the old leaf block */
947 error = get_leaf(dip, leaf_no, &obh);
948 if (error)
949 return error;
950
951 oleaf = (struct gfs2_leaf *)obh->b_data;
952 if (dip->i_di.di_depth == be16_to_cpu(oleaf->lf_depth)) {
953 brelse(obh);
954 return 1; /* can't split */
955 }
956
957 gfs2_trans_add_bh(dip->i_gl, obh, 1);
958
959 nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1);
960 if (!nleaf) {
961 brelse(obh);
962 return -ENOSPC;
963 }
964 bn = nbh->b_blocknr;
965
966 /* Compute the start and len of leaf pointers in the hash table. */
967 len = 1 << (dip->i_di.di_depth - be16_to_cpu(oleaf->lf_depth));
968 half_len = len >> 1;
969 if (!half_len) {
970 printk(KERN_WARNING "di_depth %u lf_depth %u index %u\n", dip->i_di.di_depth, be16_to_cpu(oleaf->lf_depth), index);
971 gfs2_consist_inode(dip);
972 error = -EIO;
973 goto fail_brelse;
974 }
975
976 start = (index & ~(len - 1));
977
978 /* Change the pointers.
979 Don't bother distinguishing stuffed from non-stuffed.
980 This code is complicated enough already. */
981 lp = kmalloc(half_len * sizeof(u64), GFP_NOFS | __GFP_NOFAIL);
982 /* Change the pointers */
983 for (x = 0; x < half_len; x++)
984 lp[x] = cpu_to_be64(bn);
985
986 error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64),
987 half_len * sizeof(u64));
988 if (error != half_len * sizeof(u64)) {
989 if (error >= 0)
990 error = -EIO;
991 goto fail_lpfree;
992 }
993
994 kfree(lp);
995
996 /* Compute the divider */
997 divider = (start + half_len) << (32 - dip->i_di.di_depth);
998
999 /* Copy the entries */
1000 dirent_first(dip, obh, &dent);
1001
1002 do {
1003 next = dent;
1004 if (dirent_next(dip, obh, &next))
1005 next = NULL;
1006
1007 if (dent->de_inum.no_addr &&
1008 be32_to_cpu(dent->de_hash) < divider) {
1009 struct qstr str;
1010 str.name = (char*)(dent+1);
1011 str.len = be16_to_cpu(dent->de_name_len);
1012 str.hash = be32_to_cpu(dent->de_hash);
1013 new = gfs2_dirent_alloc(inode, nbh, &str);
1014 if (IS_ERR(new)) {
1015 error = PTR_ERR(new);
1016 break;
1017 }
1018
1019 new->de_inum = dent->de_inum; /* No endian worries */
1020 new->de_type = dent->de_type; /* No endian worries */
1021 nleaf->lf_entries = cpu_to_be16(be16_to_cpu(nleaf->lf_entries)+1);
1022
1023 dirent_del(dip, obh, prev, dent);
1024
1025 if (!oleaf->lf_entries)
1026 gfs2_consist_inode(dip);
1027 oleaf->lf_entries = cpu_to_be16(be16_to_cpu(oleaf->lf_entries)-1);
1028
1029 if (!prev)
1030 prev = dent;
1031
1032 moved = 1;
1033 } else {
1034 prev = dent;
1035 }
1036 dent = next;
1037 } while (dent);
1038
1039 oleaf->lf_depth = nleaf->lf_depth;
1040
1041 error = gfs2_meta_inode_buffer(dip, &dibh);
1042 if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
1043 dip->i_di.di_blocks++;
1044 gfs2_dinode_out(&dip->i_di, dibh->b_data);
1045 brelse(dibh);
1046 }
1047
1048 brelse(obh);
1049 brelse(nbh);
1050
1051 return error;
1052
1053fail_lpfree:
1054 kfree(lp);
1055
1056fail_brelse:
1057 brelse(obh);
1058 brelse(nbh);
1059 return error;
1060}
1061
1062/**
1063 * dir_double_exhash - Double size of ExHash table
1064 * @dip: The GFS2 dinode
1065 *
1066 * Returns: 0 on success, error code on failure
1067 */
1068
1069static int dir_double_exhash(struct gfs2_inode *dip)
1070{
1071 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1072 struct buffer_head *dibh;
1073 u32 hsize;
1074 u64 *buf;
1075 u64 *from, *to;
1076 u64 block;
1077 int x;
1078 int error = 0;
1079
1080 hsize = 1 << dip->i_di.di_depth;
1081 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1082 gfs2_consist_inode(dip);
1083 return -EIO;
1084 }
1085
1086 /* Allocate both the "from" and "to" buffers in one big chunk */
1087
1088 buf = kcalloc(3, sdp->sd_hash_bsize, GFP_KERNEL | __GFP_NOFAIL);
1089
1090 for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) {
1091 error = gfs2_dir_read_data(dip, (char *)buf,
1092 block * sdp->sd_hash_bsize,
1093 sdp->sd_hash_bsize, 1);
1094 if (error != sdp->sd_hash_bsize) {
1095 if (error >= 0)
1096 error = -EIO;
1097 goto fail;
1098 }
1099
1100 from = buf;
1101 to = (u64 *)((char *)buf + sdp->sd_hash_bsize);
1102
1103 for (x = sdp->sd_hash_ptrs; x--; from++) {
1104 *to++ = *from; /* No endianess worries */
1105 *to++ = *from;
1106 }
1107
1108 error = gfs2_dir_write_data(dip,
1109 (char *)buf + sdp->sd_hash_bsize,
1110 block * sdp->sd_sb.sb_bsize,
1111 sdp->sd_sb.sb_bsize);
1112 if (error != sdp->sd_sb.sb_bsize) {
1113 if (error >= 0)
1114 error = -EIO;
1115 goto fail;
1116 }
1117 }
1118
1119 kfree(buf);
1120
1121 error = gfs2_meta_inode_buffer(dip, &dibh);
1122 if (!gfs2_assert_withdraw(sdp, !error)) {
1123 dip->i_di.di_depth++;
1124 gfs2_dinode_out(&dip->i_di, dibh->b_data);
1125 brelse(dibh);
1126 }
1127
1128 return error;
1129
1130fail:
1131 kfree(buf);
1132 return error;
1133}
1134
1135/**
1136 * compare_dents - compare directory entries by hash value
1137 * @a: first dent
1138 * @b: second dent
1139 *
1140 * When comparing the hash entries of @a to @b:
1141 * gt: returns 1
1142 * lt: returns -1
1143 * eq: returns 0
1144 */
1145
1146static int compare_dents(const void *a, const void *b)
1147{
1148 const struct gfs2_dirent *dent_a, *dent_b;
1149 u32 hash_a, hash_b;
1150 int ret = 0;
1151
1152 dent_a = *(const struct gfs2_dirent **)a;
1153 hash_a = be32_to_cpu(dent_a->de_hash);
1154
1155 dent_b = *(const struct gfs2_dirent **)b;
1156 hash_b = be32_to_cpu(dent_b->de_hash);
1157
1158 if (hash_a > hash_b)
1159 ret = 1;
1160 else if (hash_a < hash_b)
1161 ret = -1;
1162 else {
1163 unsigned int len_a = be16_to_cpu(dent_a->de_name_len);
1164 unsigned int len_b = be16_to_cpu(dent_b->de_name_len);
1165
1166 if (len_a > len_b)
1167 ret = 1;
1168 else if (len_a < len_b)
1169 ret = -1;
1170 else
1171 ret = memcmp(dent_a + 1, dent_b + 1, len_a);
1172 }
1173
1174 return ret;
1175}
1176
1177/**
1178 * do_filldir_main - read out directory entries
1179 * @dip: The GFS2 inode
1180 * @offset: The offset in the file to read from
1181 * @opaque: opaque data to pass to filldir
1182 * @filldir: The function to pass entries to
1183 * @darr: an array of struct gfs2_dirent pointers to read
1184 * @entries: the number of entries in darr
1185 * @copied: pointer to int that's non-zero if a entry has been copied out
1186 *
1187 * Jump through some hoops to make sure that if there are hash collsions,
1188 * they are read out at the beginning of a buffer. We want to minimize
1189 * the possibility that they will fall into different readdir buffers or
1190 * that someone will want to seek to that location.
1191 *
1192 * Returns: errno, >0 on exception from filldir
1193 */
1194
1195static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
1196 void *opaque, gfs2_filldir_t filldir,
1197 const struct gfs2_dirent **darr, u32 entries,
1198 int *copied)
1199{
1200 const struct gfs2_dirent *dent, *dent_next;
1201 struct gfs2_inum inum;
1202 u64 off, off_next;
1203 unsigned int x, y;
1204 int run = 0;
1205 int error = 0;
1206
1207 sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL);
1208
1209 dent_next = darr[0];
1210 off_next = be32_to_cpu(dent_next->de_hash);
1211 off_next = gfs2_disk_hash2offset(off_next);
1212
1213 for (x = 0, y = 1; x < entries; x++, y++) {
1214 dent = dent_next;
1215 off = off_next;
1216
1217 if (y < entries) {
1218 dent_next = darr[y];
1219 off_next = be32_to_cpu(dent_next->de_hash);
1220 off_next = gfs2_disk_hash2offset(off_next);
1221
1222 if (off < *offset)
1223 continue;
1224 *offset = off;
1225
1226 if (off_next == off) {
1227 if (*copied && !run)
1228 return 1;
1229 run = 1;
1230 } else
1231 run = 0;
1232 } else {
1233 if (off < *offset)
1234 continue;
1235 *offset = off;
1236 }
1237
1238 gfs2_inum_in(&inum, (char *)&dent->de_inum);
1239
1240 error = filldir(opaque, (const char *)(dent + 1),
1241 be16_to_cpu(dent->de_name_len),
1242 off, &inum,
1243 be16_to_cpu(dent->de_type));
1244 if (error)
1245 return 1;
1246
1247 *copied = 1;
1248 }
1249
1250 /* Increment the *offset by one, so the next time we come into the
1251 do_filldir fxn, we get the next entry instead of the last one in the
1252 current leaf */
1253
1254 (*offset)++;
1255
1256 return 0;
1257}
1258
1259static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
1260 gfs2_filldir_t filldir, int *copied,
1261 unsigned *depth, u64 leaf_no)
1262{
1263 struct gfs2_inode *ip = GFS2_I(inode);
1264 struct buffer_head *bh;
1265 struct gfs2_leaf *lf;
1266 unsigned entries = 0;
1267 unsigned leaves = 0;
1268 const struct gfs2_dirent **darr, *dent;
1269 struct dirent_gather g;
1270 struct buffer_head **larr;
1271 int leaf = 0;
1272 int error, i;
1273 u64 lfn = leaf_no;
1274
1275 do {
1276 error = get_leaf(ip, lfn, &bh);
1277 if (error)
1278 goto out;
1279 lf = (struct gfs2_leaf *)bh->b_data;
1280 if (leaves == 0)
1281 *depth = be16_to_cpu(lf->lf_depth);
1282 entries += be16_to_cpu(lf->lf_entries);
1283 leaves++;
1284 lfn = be64_to_cpu(lf->lf_next);
1285 brelse(bh);
1286 } while(lfn);
1287
1288 if (!entries)
1289 return 0;
1290
1291 error = -ENOMEM;
1292 larr = vmalloc((leaves + entries) * sizeof(void *));
1293 if (!larr)
1294 goto out;
1295 darr = (const struct gfs2_dirent **)(larr + leaves);
1296 g.pdent = darr;
1297 g.offset = 0;
1298 lfn = leaf_no;
1299
1300 do {
1301 error = get_leaf(ip, lfn, &bh);
1302 if (error)
1303 goto out_kfree;
1304 lf = (struct gfs2_leaf *)bh->b_data;
1305 lfn = be64_to_cpu(lf->lf_next);
1306 if (lf->lf_entries) {
1307 dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
1308 gfs2_dirent_gather, NULL, &g);
1309 error = PTR_ERR(dent);
1310 if (IS_ERR(dent)) {
1311 goto out_kfree;
1312 }
1313 error = 0;
1314 larr[leaf++] = bh;
1315 } else {
1316 brelse(bh);
1317 }
1318 } while(lfn);
1319
1320 error = do_filldir_main(ip, offset, opaque, filldir, darr,
1321 entries, copied);
1322out_kfree:
1323 for(i = 0; i < leaf; i++)
1324 brelse(larr[i]);
1325 vfree(larr);
1326out:
1327 return error;
1328}
1329
1330/**
1331 * dir_e_read - Reads the entries from a directory into a filldir buffer
1332 * @dip: dinode pointer
1333 * @offset: the hash of the last entry read shifted to the right once
1334 * @opaque: buffer for the filldir function to fill
1335 * @filldir: points to the filldir function to use
1336 *
1337 * Returns: errno
1338 */
1339
1340static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
1341 gfs2_filldir_t filldir)
1342{
1343 struct gfs2_inode *dip = GFS2_I(inode);
1344 struct gfs2_sbd *sdp = GFS2_SB(inode);
1345 u32 hsize, len = 0;
1346 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1347 u32 hash, index;
1348 u64 *lp;
1349 int copied = 0;
1350 int error = 0;
1351 unsigned depth = 0;
1352
1353 hsize = 1 << dip->i_di.di_depth;
1354 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1355 gfs2_consist_inode(dip);
1356 return -EIO;
1357 }
1358
1359 hash = gfs2_dir_offset2hash(*offset);
1360 index = hash >> (32 - dip->i_di.di_depth);
1361
1362 lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
1363 if (!lp)
1364 return -ENOMEM;
1365
1366 while (index < hsize) {
1367 lp_offset = index & (sdp->sd_hash_ptrs - 1);
1368 ht_offset = index - lp_offset;
1369
1370 if (ht_offset_cur != ht_offset) {
1371 error = gfs2_dir_read_data(dip, (char *)lp,
1372 ht_offset * sizeof(u64),
1373 sdp->sd_hash_bsize, 1);
1374 if (error != sdp->sd_hash_bsize) {
1375 if (error >= 0)
1376 error = -EIO;
1377 goto out;
1378 }
1379 ht_offset_cur = ht_offset;
1380 }
1381
1382 error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
1383 &copied, &depth,
1384 be64_to_cpu(lp[lp_offset]));
1385 if (error)
1386 break;
1387
1388 len = 1 << (dip->i_di.di_depth - depth);
1389 index = (index & ~(len - 1)) + len;
1390 }
1391
1392out:
1393 kfree(lp);
1394 if (error > 0)
1395 error = 0;
1396 return error;
1397}
1398
1399int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
1400 gfs2_filldir_t filldir)
1401{
1402 struct gfs2_inode *dip = GFS2_I(inode);
1403 struct dirent_gather g;
1404 const struct gfs2_dirent **darr, *dent;
1405 struct buffer_head *dibh;
1406 int copied = 0;
1407 int error;
1408
1409 if (!dip->i_di.di_entries)
1410 return 0;
1411
1412 if (dip->i_di.di_flags & GFS2_DIF_EXHASH)
1413 return dir_e_read(inode, offset, opaque, filldir);
1414
1415 if (!gfs2_is_stuffed(dip)) {
1416 gfs2_consist_inode(dip);
1417 return -EIO;
1418 }
1419
1420 error = gfs2_meta_inode_buffer(dip, &dibh);
1421 if (error)
1422 return error;
1423
1424 error = -ENOMEM;
1425 darr = kmalloc(dip->i_di.di_entries * sizeof(struct gfs2_dirent *),
1426 GFP_KERNEL);
1427 if (darr) {
1428 g.pdent = darr;
1429 g.offset = 0;
1430 dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size,
1431 gfs2_dirent_gather, NULL, &g);
1432 if (IS_ERR(dent)) {
1433 error = PTR_ERR(dent);
1434 goto out;
1435 }
1436 error = do_filldir_main(dip, offset, opaque, filldir, darr,
1437 dip->i_di.di_entries, &copied);
1438out:
1439 kfree(darr);
1440 }
1441
1442 if (error > 0)
1443 error = 0;
1444
1445 brelse(dibh);
1446
1447 return error;
1448}
1449
1450/**
1451 * gfs2_dir_search - Search a directory
1452 * @dip: The GFS2 inode
1453 * @filename:
1454 * @inode:
1455 *
1456 * This routine searches a directory for a file or another directory.
1457 * Assumes a glock is held on dip.
1458 *
1459 * Returns: errno
1460 */
1461
1462int gfs2_dir_search(struct inode *dir, const struct qstr *name,
1463 struct gfs2_inum *inum, unsigned int *type)
1464{
1465 struct buffer_head *bh;
1466 struct gfs2_dirent *dent;
1467
1468 dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
1469 if (dent) {
1470 if (IS_ERR(dent))
1471 return PTR_ERR(dent);
1472 if (inum)
1473 gfs2_inum_in(inum, (char *)&dent->de_inum);
1474 if (type)
1475 *type = be16_to_cpu(dent->de_type);
1476 brelse(bh);
1477 return 0;
1478 }
1479 return -ENOENT;
1480}
1481
1482static int dir_new_leaf(struct inode *inode, const struct qstr *name)
1483{
1484 struct buffer_head *bh, *obh;
1485 struct gfs2_inode *ip = GFS2_I(inode);
1486 struct gfs2_leaf *leaf, *oleaf;
1487 int error;
1488 u32 index;
1489 u64 bn;
1490
1491 index = name->hash >> (32 - ip->i_di.di_depth);
1492 error = get_first_leaf(ip, index, &obh);
1493 if (error)
1494 return error;
1495 do {
1496 oleaf = (struct gfs2_leaf *)obh->b_data;
1497 bn = be64_to_cpu(oleaf->lf_next);
1498 if (!bn)
1499 break;
1500 brelse(obh);
1501 error = get_leaf(ip, bn, &obh);
1502 if (error)
1503 return error;
1504 } while(1);
1505
1506 gfs2_trans_add_bh(ip->i_gl, obh, 1);
1507
1508 leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth));
1509 if (!leaf) {
1510 brelse(obh);
1511 return -ENOSPC;
1512 }
1513 oleaf->lf_next = cpu_to_be64(bh->b_blocknr);
1514 brelse(bh);
1515 brelse(obh);
1516
1517 error = gfs2_meta_inode_buffer(ip, &bh);
1518 if (error)
1519 return error;
1520 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1521 ip->i_di.di_blocks++;
1522 gfs2_dinode_out(&ip->i_di, bh->b_data);
1523 brelse(bh);
1524 return 0;
1525}
1526
1527/**
1528 * gfs2_dir_add - Add new filename into directory
1529 * @dip: The GFS2 inode
1530 * @filename: The new name
1531 * @inode: The inode number of the entry
1532 * @type: The type of the entry
1533 *
1534 * Returns: 0 on success, error code on failure
1535 */
1536
1537int gfs2_dir_add(struct inode *inode, const struct qstr *name,
1538 const struct gfs2_inum *inum, unsigned type)
1539{
1540 struct gfs2_inode *ip = GFS2_I(inode);
1541 struct buffer_head *bh;
1542 struct gfs2_dirent *dent;
1543 struct gfs2_leaf *leaf;
1544 int error;
1545
1546 while(1) {
1547 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space,
1548 &bh);
1549 if (dent) {
1550 if (IS_ERR(dent))
1551 return PTR_ERR(dent);
1552 dent = gfs2_init_dirent(inode, dent, name, bh);
1553 gfs2_inum_out(inum, (char *)&dent->de_inum);
1554 dent->de_type = cpu_to_be16(type);
1555 if (ip->i_di.di_flags & GFS2_DIF_EXHASH) {
1556 leaf = (struct gfs2_leaf *)bh->b_data;
1557 leaf->lf_entries = cpu_to_be16(be16_to_cpu(leaf->lf_entries) + 1);
1558 }
1559 brelse(bh);
1560 error = gfs2_meta_inode_buffer(ip, &bh);
1561 if (error)
1562 break;
1563 gfs2_trans_add_bh(ip->i_gl, bh, 1);
1564 ip->i_di.di_entries++;
1565 ip->i_di.di_mtime = ip->i_di.di_ctime = get_seconds();
1566 gfs2_dinode_out(&ip->i_di, bh->b_data);
1567 brelse(bh);
1568 error = 0;
1569 break;
1570 }
1571 if (!(ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
1572 error = dir_make_exhash(inode);
1573 if (error)
1574 break;
1575 continue;
1576 }
1577 error = dir_split_leaf(inode, name);
1578 if (error == 0)
1579 continue;
1580 if (error < 0)
1581 break;
1582 if (ip->i_di.di_depth < GFS2_DIR_MAX_DEPTH) {
1583 error = dir_double_exhash(ip);
1584 if (error)
1585 break;
1586 error = dir_split_leaf(inode, name);
1587 if (error < 0)
1588 break;
1589 if (error == 0)
1590 continue;
1591 }
1592 error = dir_new_leaf(inode, name);
1593 if (!error)
1594 continue;
1595 error = -ENOSPC;
1596 break;
1597 }
1598 return error;
1599}
1600
1601
1602/**
1603 * gfs2_dir_del - Delete a directory entry
1604 * @dip: The GFS2 inode
1605 * @filename: The filename
1606 *
1607 * Returns: 0 on success, error code on failure
1608 */
1609
1610int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *name)
1611{
1612 struct gfs2_dirent *dent, *prev = NULL;
1613 struct buffer_head *bh;
1614 int error;
1615
1616 /* Returns _either_ the entry (if its first in block) or the
1617 previous entry otherwise */
1618 dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh);
1619 if (!dent) {
1620 gfs2_consist_inode(dip);
1621 return -EIO;
1622 }
1623 if (IS_ERR(dent)) {
1624 gfs2_consist_inode(dip);
1625 return PTR_ERR(dent);
1626 }
1627 /* If not first in block, adjust pointers accordingly */
1628 if (gfs2_dirent_find(dent, name, NULL) == 0) {
1629 prev = dent;
1630 dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len));
1631 }
1632
1633 dirent_del(dip, bh, prev, dent);
1634 if (dip->i_di.di_flags & GFS2_DIF_EXHASH) {
1635 struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
1636 u16 entries = be16_to_cpu(leaf->lf_entries);
1637 if (!entries)
1638 gfs2_consist_inode(dip);
1639 leaf->lf_entries = cpu_to_be16(--entries);
1640 }
1641 brelse(bh);
1642
1643 error = gfs2_meta_inode_buffer(dip, &bh);
1644 if (error)
1645 return error;
1646
1647 if (!dip->i_di.di_entries)
1648 gfs2_consist_inode(dip);
1649 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1650 dip->i_di.di_entries--;
1651 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
1652 gfs2_dinode_out(&dip->i_di, bh->b_data);
1653 brelse(bh);
1654 mark_inode_dirty(&dip->i_inode);
1655
1656 return error;
1657}
1658
1659/**
1660 * gfs2_dir_mvino - Change inode number of directory entry
1661 * @dip: The GFS2 inode
1662 * @filename:
1663 * @new_inode:
1664 *
1665 * This routine changes the inode number of a directory entry. It's used
1666 * by rename to change ".." when a directory is moved.
1667 * Assumes a glock is held on dvp.
1668 *
1669 * Returns: errno
1670 */
1671
1672int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
1673 struct gfs2_inum *inum, unsigned int new_type)
1674{
1675 struct buffer_head *bh;
1676 struct gfs2_dirent *dent;
1677 int error;
1678
1679 dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh);
1680 if (!dent) {
1681 gfs2_consist_inode(dip);
1682 return -EIO;
1683 }
1684 if (IS_ERR(dent))
1685 return PTR_ERR(dent);
1686
1687 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1688 gfs2_inum_out(inum, (char *)&dent->de_inum);
1689 dent->de_type = cpu_to_be16(new_type);
1690
1691 if (dip->i_di.di_flags & GFS2_DIF_EXHASH) {
1692 brelse(bh);
1693 error = gfs2_meta_inode_buffer(dip, &bh);
1694 if (error)
1695 return error;
1696 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1697 }
1698
1699 dip->i_di.di_mtime = dip->i_di.di_ctime = get_seconds();
1700 gfs2_dinode_out(&dip->i_di, bh->b_data);
1701 brelse(bh);
1702 return 0;
1703}
1704
1705/**
1706 * foreach_leaf - call a function for each leaf in a directory
1707 * @dip: the directory
1708 * @lc: the function to call for each each
1709 * @data: private data to pass to it
1710 *
1711 * Returns: errno
1712 */
1713
1714static int foreach_leaf(struct gfs2_inode *dip, leaf_call_t lc, void *data)
1715{
1716 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1717 struct buffer_head *bh;
1718 struct gfs2_leaf *leaf;
1719 u32 hsize, len;
1720 u32 ht_offset, lp_offset, ht_offset_cur = -1;
1721 u32 index = 0;
1722 u64 *lp;
1723 u64 leaf_no;
1724 int error = 0;
1725
1726 hsize = 1 << dip->i_di.di_depth;
1727 if (hsize * sizeof(u64) != dip->i_di.di_size) {
1728 gfs2_consist_inode(dip);
1729 return -EIO;
1730 }
1731
1732 lp = kmalloc(sdp->sd_hash_bsize, GFP_KERNEL);
1733 if (!lp)
1734 return -ENOMEM;
1735
1736 while (index < hsize) {
1737 lp_offset = index & (sdp->sd_hash_ptrs - 1);
1738 ht_offset = index - lp_offset;
1739
1740 if (ht_offset_cur != ht_offset) {
1741 error = gfs2_dir_read_data(dip, (char *)lp,
1742 ht_offset * sizeof(u64),
1743 sdp->sd_hash_bsize, 1);
1744 if (error != sdp->sd_hash_bsize) {
1745 if (error >= 0)
1746 error = -EIO;
1747 goto out;
1748 }
1749 ht_offset_cur = ht_offset;
1750 }
1751
1752 leaf_no = be64_to_cpu(lp[lp_offset]);
1753 if (leaf_no) {
1754 error = get_leaf(dip, leaf_no, &bh);
1755 if (error)
1756 goto out;
1757 leaf = (struct gfs2_leaf *)bh->b_data;
1758 len = 1 << (dip->i_di.di_depth - be16_to_cpu(leaf->lf_depth));
1759 brelse(bh);
1760
1761 error = lc(dip, index, len, leaf_no, data);
1762 if (error)
1763 goto out;
1764
1765 index = (index & ~(len - 1)) + len;
1766 } else
1767 index++;
1768 }
1769
1770 if (index != hsize) {
1771 gfs2_consist_inode(dip);
1772 error = -EIO;
1773 }
1774
1775out:
1776 kfree(lp);
1777
1778 return error;
1779}
1780
1781/**
1782 * leaf_dealloc - Deallocate a directory leaf
1783 * @dip: the directory
1784 * @index: the hash table offset in the directory
1785 * @len: the number of pointers to this leaf
1786 * @leaf_no: the leaf number
1787 * @data: not used
1788 *
1789 * Returns: errno
1790 */
1791
1792static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1793 u64 leaf_no, void *data)
1794{
1795 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1796 struct gfs2_leaf *tmp_leaf;
1797 struct gfs2_rgrp_list rlist;
1798 struct buffer_head *bh, *dibh;
1799 u64 blk, nblk;
1800 unsigned int rg_blocks = 0, l_blocks = 0;
1801 char *ht;
1802 unsigned int x, size = len * sizeof(u64);
1803 int error;
1804
1805 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1806
1807 ht = kzalloc(size, GFP_KERNEL);
1808 if (!ht)
1809 return -ENOMEM;
1810
1811 gfs2_alloc_get(dip);
1812
1813 error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1814 if (error)
1815 goto out;
1816
1817 error = gfs2_rindex_hold(sdp, &dip->i_alloc.al_ri_gh);
1818 if (error)
1819 goto out_qs;
1820
1821 /* Count the number of leaves */
1822
1823 for (blk = leaf_no; blk; blk = nblk) {
1824 error = get_leaf(dip, blk, &bh);
1825 if (error)
1826 goto out_rlist;
1827 tmp_leaf = (struct gfs2_leaf *)bh->b_data;
1828 nblk = be64_to_cpu(tmp_leaf->lf_next);
1829 brelse(bh);
1830
1831 gfs2_rlist_add(sdp, &rlist, blk);
1832 l_blocks++;
1833 }
1834
1835 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1836
1837 for (x = 0; x < rlist.rl_rgrps; x++) {
1838 struct gfs2_rgrpd *rgd;
1839 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1840 rg_blocks += rgd->rd_ri.ri_length;
1841 }
1842
1843 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1844 if (error)
1845 goto out_rlist;
1846
1847 error = gfs2_trans_begin(sdp,
1848 rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
1849 RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks);
1850 if (error)
1851 goto out_rg_gunlock;
1852
1853 for (blk = leaf_no; blk; blk = nblk) {
1854 error = get_leaf(dip, blk, &bh);
1855 if (error)
1856 goto out_end_trans;
1857 tmp_leaf = (struct gfs2_leaf *)bh->b_data;
1858 nblk = be64_to_cpu(tmp_leaf->lf_next);
1859 brelse(bh);
1860
1861 gfs2_free_meta(dip, blk, 1);
1862
1863 if (!dip->i_di.di_blocks)
1864 gfs2_consist_inode(dip);
1865 dip->i_di.di_blocks--;
1866 }
1867
1868 error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
1869 if (error != size) {
1870 if (error >= 0)
1871 error = -EIO;
1872 goto out_end_trans;
1873 }
1874
1875 error = gfs2_meta_inode_buffer(dip, &dibh);
1876 if (error)
1877 goto out_end_trans;
1878
1879 gfs2_trans_add_bh(dip->i_gl, dibh, 1);
1880 gfs2_dinode_out(&dip->i_di, dibh->b_data);
1881 brelse(dibh);
1882
1883out_end_trans:
1884 gfs2_trans_end(sdp);
1885out_rg_gunlock:
1886 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1887out_rlist:
1888 gfs2_rlist_free(&rlist);
1889 gfs2_glock_dq_uninit(&dip->i_alloc.al_ri_gh);
1890out_qs:
1891 gfs2_quota_unhold(dip);
1892out:
1893 gfs2_alloc_put(dip);
1894 kfree(ht);
1895 return error;
1896}
1897
1898/**
1899 * gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
1900 * @dip: the directory
1901 *
1902 * Dealloc all on-disk directory leaves to FREEMETA state
1903 * Change on-disk inode type to "regular file"
1904 *
1905 * Returns: errno
1906 */
1907
1908int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
1909{
1910 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1911 struct buffer_head *bh;
1912 int error;
1913
1914 /* Dealloc on-disk leaves to FREEMETA state */
1915 error = foreach_leaf(dip, leaf_dealloc, NULL);
1916 if (error)
1917 return error;
1918
1919 /* Make this a regular file in case we crash.
1920 (We don't want to free these blocks a second time.) */
1921
1922 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1923 if (error)
1924 return error;
1925
1926 error = gfs2_meta_inode_buffer(dip, &bh);
1927 if (!error) {
1928 gfs2_trans_add_bh(dip->i_gl, bh, 1);
1929 ((struct gfs2_dinode *)bh->b_data)->di_mode =
1930 cpu_to_be32(S_IFREG);
1931 brelse(bh);
1932 }
1933
1934 gfs2_trans_end(sdp);
1935
1936 return error;
1937}
1938
1939/**
1940 * gfs2_diradd_alloc_required - find if adding entry will require an allocation
1941 * @ip: the file being written to
1942 * @filname: the filename that's going to be added
1943 *
1944 * Returns: 1 if alloc required, 0 if not, -ve on error
1945 */
1946
1947int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name)
1948{
1949 struct gfs2_dirent *dent;
1950 struct buffer_head *bh;
1951
1952 dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
1953 if (!dent) {
1954 return 1;
1955 }
1956 if (IS_ERR(dent))
1957 return PTR_ERR(dent);
1958 brelse(bh);
1959 return 0;
1960}
1961
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
new file mode 100644
index 000000000000..371233419b07
--- /dev/null
+++ b/fs/gfs2/dir.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __DIR_DOT_H__
11#define __DIR_DOT_H__
12
13#include <linux/dcache.h>
14
15struct inode;
16struct gfs2_inode;
17struct gfs2_inum;
18
19/**
20 * gfs2_filldir_t - Report a directory entry to the caller of gfs2_dir_read()
21 * @opaque: opaque data used by the function
22 * @name: the name of the directory entry
23 * @length: the length of the name
24 * @offset: the entry's offset in the directory
25 * @inum: the inode number the entry points to
26 * @type: the type of inode the entry points to
27 *
28 * Returns: 0 on success, 1 if buffer full
29 */
30
31typedef int (*gfs2_filldir_t) (void *opaque,
32 const char *name, unsigned int length,
33 u64 offset,
34 struct gfs2_inum *inum, unsigned int type);
35
36int gfs2_dir_search(struct inode *dir, const struct qstr *filename,
37 struct gfs2_inum *inum, unsigned int *type);
38int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
39 const struct gfs2_inum *inum, unsigned int type);
40int gfs2_dir_del(struct gfs2_inode *dip, const struct qstr *filename);
41int gfs2_dir_read(struct inode *inode, u64 * offset, void *opaque,
42 gfs2_filldir_t filldir);
43int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
44 struct gfs2_inum *new_inum, unsigned int new_type);
45
46int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
47
48int gfs2_diradd_alloc_required(struct inode *dir,
49 const struct qstr *filename);
50int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
51 struct buffer_head **bhp);
52
53static inline u32 gfs2_disk_hash(const char *data, int len)
54{
55 return crc32_le((u32)~0, data, len) ^ (u32)~0;
56}
57
58
59static inline void gfs2_str2qstr(struct qstr *name, const char *fname)
60{
61 name->name = fname;
62 name->len = strlen(fname);
63 name->hash = gfs2_disk_hash(name->name, name->len);
64}
65
66/* N.B. This probably ought to take inum & type as args as well */
67static inline void gfs2_qstr2dirent(const struct qstr *name, u16 reclen, struct gfs2_dirent *dent)
68{
69 dent->de_inum.no_addr = cpu_to_be64(0);
70 dent->de_inum.no_formal_ino = cpu_to_be64(0);
71 dent->de_hash = cpu_to_be32(name->hash);
72 dent->de_rec_len = cpu_to_be16(reclen);
73 dent->de_name_len = cpu_to_be16(name->len);
74 dent->de_type = cpu_to_be16(0);
75 memset(dent->__pad, 0, sizeof(dent->__pad));
76 memcpy(dent + 1, name->name, name->len);
77}
78
79#endif /* __DIR_DOT_H__ */
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
new file mode 100644
index 000000000000..92c54e9b0dc3
--- /dev/null
+++ b/fs/gfs2/eaops.c
@@ -0,0 +1,230 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/lm_interface.h>
18#include <asm/uaccess.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "acl.h"
23#include "eaops.h"
24#include "eattr.h"
25#include "util.h"
26
27/**
28 * gfs2_ea_name2type - get the type of the ea, and truncate type from the name
29 * @namep: ea name, possibly with type appended
30 *
31 * Returns: GFS2_EATYPE_XXX
32 */
33
34unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name)
35{
36 unsigned int type;
37
38 if (strncmp(name, "system.", 7) == 0) {
39 type = GFS2_EATYPE_SYS;
40 if (truncated_name)
41 *truncated_name = name + sizeof("system.") - 1;
42 } else if (strncmp(name, "user.", 5) == 0) {
43 type = GFS2_EATYPE_USR;
44 if (truncated_name)
45 *truncated_name = name + sizeof("user.") - 1;
46 } else if (strncmp(name, "security.", 9) == 0) {
47 type = GFS2_EATYPE_SECURITY;
48 if (truncated_name)
49 *truncated_name = name + sizeof("security.") - 1;
50 } else {
51 type = GFS2_EATYPE_UNUSED;
52 if (truncated_name)
53 *truncated_name = NULL;
54 }
55
56 return type;
57}
58
59static int user_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
60{
61 struct inode *inode = &ip->i_inode;
62 int error = permission(inode, MAY_READ, NULL);
63 if (error)
64 return error;
65
66 return gfs2_ea_get_i(ip, er);
67}
68
69static int user_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
70{
71 struct inode *inode = &ip->i_inode;
72
73 if (S_ISREG(inode->i_mode) ||
74 (S_ISDIR(inode->i_mode) && !(inode->i_mode & S_ISVTX))) {
75 int error = permission(inode, MAY_WRITE, NULL);
76 if (error)
77 return error;
78 } else
79 return -EPERM;
80
81 return gfs2_ea_set_i(ip, er);
82}
83
84static int user_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
85{
86 struct inode *inode = &ip->i_inode;
87
88 if (S_ISREG(inode->i_mode) ||
89 (S_ISDIR(inode->i_mode) && !(inode->i_mode & S_ISVTX))) {
90 int error = permission(inode, MAY_WRITE, NULL);
91 if (error)
92 return error;
93 } else
94 return -EPERM;
95
96 return gfs2_ea_remove_i(ip, er);
97}
98
99static int system_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
100{
101 if (!GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len) &&
102 !GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len) &&
103 !capable(CAP_SYS_ADMIN))
104 return -EPERM;
105
106 if (GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl == 0 &&
107 (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len) ||
108 GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)))
109 return -EOPNOTSUPP;
110
111
112
113 return gfs2_ea_get_i(ip, er);
114}
115
116static int system_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
117{
118 int remove = 0;
119 int error;
120
121 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
122 if (!(er->er_flags & GFS2_ERF_MODE)) {
123 er->er_mode = ip->i_di.di_mode;
124 er->er_flags |= GFS2_ERF_MODE;
125 }
126 error = gfs2_acl_validate_set(ip, 1, er,
127 &remove, &er->er_mode);
128 if (error)
129 return error;
130 error = gfs2_ea_set_i(ip, er);
131 if (error)
132 return error;
133 if (remove)
134 gfs2_ea_remove_i(ip, er);
135 return 0;
136
137 } else if (GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)) {
138 error = gfs2_acl_validate_set(ip, 0, er,
139 &remove, NULL);
140 if (error)
141 return error;
142 if (!remove)
143 error = gfs2_ea_set_i(ip, er);
144 else {
145 error = gfs2_ea_remove_i(ip, er);
146 if (error == -ENODATA)
147 error = 0;
148 }
149 return error;
150 }
151
152 return -EPERM;
153}
154
155static int system_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
156{
157 if (GFS2_ACL_IS_ACCESS(er->er_name, er->er_name_len)) {
158 int error = gfs2_acl_validate_remove(ip, 1);
159 if (error)
160 return error;
161
162 } else if (GFS2_ACL_IS_DEFAULT(er->er_name, er->er_name_len)) {
163 int error = gfs2_acl_validate_remove(ip, 0);
164 if (error)
165 return error;
166
167 } else
168 return -EPERM;
169
170 return gfs2_ea_remove_i(ip, er);
171}
172
173static int security_eo_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
174{
175 struct inode *inode = &ip->i_inode;
176 int error = permission(inode, MAY_READ, NULL);
177 if (error)
178 return error;
179
180 return gfs2_ea_get_i(ip, er);
181}
182
183static int security_eo_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
184{
185 struct inode *inode = &ip->i_inode;
186 int error = permission(inode, MAY_WRITE, NULL);
187 if (error)
188 return error;
189
190 return gfs2_ea_set_i(ip, er);
191}
192
193static int security_eo_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
194{
195 struct inode *inode = &ip->i_inode;
196 int error = permission(inode, MAY_WRITE, NULL);
197 if (error)
198 return error;
199
200 return gfs2_ea_remove_i(ip, er);
201}
202
203static struct gfs2_eattr_operations gfs2_user_eaops = {
204 .eo_get = user_eo_get,
205 .eo_set = user_eo_set,
206 .eo_remove = user_eo_remove,
207 .eo_name = "user",
208};
209
210struct gfs2_eattr_operations gfs2_system_eaops = {
211 .eo_get = system_eo_get,
212 .eo_set = system_eo_set,
213 .eo_remove = system_eo_remove,
214 .eo_name = "system",
215};
216
217static struct gfs2_eattr_operations gfs2_security_eaops = {
218 .eo_get = security_eo_get,
219 .eo_set = security_eo_set,
220 .eo_remove = security_eo_remove,
221 .eo_name = "security",
222};
223
224struct gfs2_eattr_operations *gfs2_ea_ops[] = {
225 NULL,
226 &gfs2_user_eaops,
227 &gfs2_system_eaops,
228 &gfs2_security_eaops,
229};
230
diff --git a/fs/gfs2/eaops.h b/fs/gfs2/eaops.h
new file mode 100644
index 000000000000..508b4f7a2449
--- /dev/null
+++ b/fs/gfs2/eaops.h
@@ -0,0 +1,30 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __EAOPS_DOT_H__
11#define __EAOPS_DOT_H__
12
13struct gfs2_ea_request;
14struct gfs2_inode;
15
16struct gfs2_eattr_operations {
17 int (*eo_get) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
18 int (*eo_set) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
19 int (*eo_remove) (struct gfs2_inode *ip, struct gfs2_ea_request *er);
20 char *eo_name;
21};
22
23unsigned int gfs2_ea_name2type(const char *name, const char **truncated_name);
24
25extern struct gfs2_eattr_operations gfs2_system_eaops;
26
27extern struct gfs2_eattr_operations *gfs2_ea_ops[];
28
29#endif /* __EAOPS_DOT_H__ */
30
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
new file mode 100644
index 000000000000..a65a4ccfd4dd
--- /dev/null
+++ b/fs/gfs2/eattr.c
@@ -0,0 +1,1501 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/xattr.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/lm_interface.h>
18#include <asm/uaccess.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "acl.h"
23#include "eaops.h"
24#include "eattr.h"
25#include "glock.h"
26#include "inode.h"
27#include "meta_io.h"
28#include "quota.h"
29#include "rgrp.h"
30#include "trans.h"
31#include "util.h"
32
33/**
34 * ea_calc_size - returns the acutal number of bytes the request will take up
35 * (not counting any unstuffed data blocks)
36 * @sdp:
37 * @er:
38 * @size:
39 *
40 * Returns: 1 if the EA should be stuffed
41 */
42
43static int ea_calc_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er,
44 unsigned int *size)
45{
46 *size = GFS2_EAREQ_SIZE_STUFFED(er);
47 if (*size <= sdp->sd_jbsize)
48 return 1;
49
50 *size = GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er);
51
52 return 0;
53}
54
55static int ea_check_size(struct gfs2_sbd *sdp, struct gfs2_ea_request *er)
56{
57 unsigned int size;
58
59 if (er->er_data_len > GFS2_EA_MAX_DATA_LEN)
60 return -ERANGE;
61
62 ea_calc_size(sdp, er, &size);
63
64 /* This can only happen with 512 byte blocks */
65 if (size > sdp->sd_jbsize)
66 return -ERANGE;
67
68 return 0;
69}
70
71typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
72 struct gfs2_ea_header *ea,
73 struct gfs2_ea_header *prev, void *private);
74
75static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
76 ea_call_t ea_call, void *data)
77{
78 struct gfs2_ea_header *ea, *prev = NULL;
79 int error = 0;
80
81 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
82 return -EIO;
83
84 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
85 if (!GFS2_EA_REC_LEN(ea))
86 goto fail;
87 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
88 bh->b_data + bh->b_size))
89 goto fail;
90 if (!GFS2_EATYPE_VALID(ea->ea_type))
91 goto fail;
92
93 error = ea_call(ip, bh, ea, prev, data);
94 if (error)
95 return error;
96
97 if (GFS2_EA_IS_LAST(ea)) {
98 if ((char *)GFS2_EA2NEXT(ea) !=
99 bh->b_data + bh->b_size)
100 goto fail;
101 break;
102 }
103 }
104
105 return error;
106
107fail:
108 gfs2_consist_inode(ip);
109 return -EIO;
110}
111
112static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
113{
114 struct buffer_head *bh, *eabh;
115 u64 *eablk, *end;
116 int error;
117
118 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh);
119 if (error)
120 return error;
121
122 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) {
123 error = ea_foreach_i(ip, bh, ea_call, data);
124 goto out;
125 }
126
127 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
128 error = -EIO;
129 goto out;
130 }
131
132 eablk = (u64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
133 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
134
135 for (; eablk < end; eablk++) {
136 u64 bn;
137
138 if (!*eablk)
139 break;
140 bn = be64_to_cpu(*eablk);
141
142 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, &eabh);
143 if (error)
144 break;
145 error = ea_foreach_i(ip, eabh, ea_call, data);
146 brelse(eabh);
147 if (error)
148 break;
149 }
150out:
151 brelse(bh);
152 return error;
153}
154
155struct ea_find {
156 struct gfs2_ea_request *ef_er;
157 struct gfs2_ea_location *ef_el;
158};
159
160static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
161 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
162 void *private)
163{
164 struct ea_find *ef = private;
165 struct gfs2_ea_request *er = ef->ef_er;
166
167 if (ea->ea_type == GFS2_EATYPE_UNUSED)
168 return 0;
169
170 if (ea->ea_type == er->er_type) {
171 if (ea->ea_name_len == er->er_name_len &&
172 !memcmp(GFS2_EA2NAME(ea), er->er_name, ea->ea_name_len)) {
173 struct gfs2_ea_location *el = ef->ef_el;
174 get_bh(bh);
175 el->el_bh = bh;
176 el->el_ea = ea;
177 el->el_prev = prev;
178 return 1;
179 }
180 }
181
182 return 0;
183}
184
185int gfs2_ea_find(struct gfs2_inode *ip, struct gfs2_ea_request *er,
186 struct gfs2_ea_location *el)
187{
188 struct ea_find ef;
189 int error;
190
191 ef.ef_er = er;
192 ef.ef_el = el;
193
194 memset(el, 0, sizeof(struct gfs2_ea_location));
195
196 error = ea_foreach(ip, ea_find_i, &ef);
197 if (error > 0)
198 return 0;
199
200 return error;
201}
202
203/**
204 * ea_dealloc_unstuffed -
205 * @ip:
206 * @bh:
207 * @ea:
208 * @prev:
209 * @private:
210 *
211 * Take advantage of the fact that all unstuffed blocks are
212 * allocated from the same RG. But watch, this may not always
213 * be true.
214 *
215 * Returns: errno
216 */
217
218static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
219 struct gfs2_ea_header *ea,
220 struct gfs2_ea_header *prev, void *private)
221{
222 int *leave = private;
223 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
224 struct gfs2_rgrpd *rgd;
225 struct gfs2_holder rg_gh;
226 struct buffer_head *dibh;
227 u64 *dataptrs, bn = 0;
228 u64 bstart = 0;
229 unsigned int blen = 0;
230 unsigned int blks = 0;
231 unsigned int x;
232 int error;
233
234 if (GFS2_EA_IS_STUFFED(ea))
235 return 0;
236
237 dataptrs = GFS2_EA2DATAPTRS(ea);
238 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
239 if (*dataptrs) {
240 blks++;
241 bn = be64_to_cpu(*dataptrs);
242 }
243 }
244 if (!blks)
245 return 0;
246
247 rgd = gfs2_blk2rgrpd(sdp, bn);
248 if (!rgd) {
249 gfs2_consist_inode(ip);
250 return -EIO;
251 }
252
253 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
254 if (error)
255 return error;
256
257 error = gfs2_trans_begin(sdp, rgd->rd_ri.ri_length + RES_DINODE +
258 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
259 if (error)
260 goto out_gunlock;
261
262 gfs2_trans_add_bh(ip->i_gl, bh, 1);
263
264 dataptrs = GFS2_EA2DATAPTRS(ea);
265 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
266 if (!*dataptrs)
267 break;
268 bn = be64_to_cpu(*dataptrs);
269
270 if (bstart + blen == bn)
271 blen++;
272 else {
273 if (bstart)
274 gfs2_free_meta(ip, bstart, blen);
275 bstart = bn;
276 blen = 1;
277 }
278
279 *dataptrs = 0;
280 if (!ip->i_di.di_blocks)
281 gfs2_consist_inode(ip);
282 ip->i_di.di_blocks--;
283 }
284 if (bstart)
285 gfs2_free_meta(ip, bstart, blen);
286
287 if (prev && !leave) {
288 u32 len;
289
290 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
291 prev->ea_rec_len = cpu_to_be32(len);
292
293 if (GFS2_EA_IS_LAST(ea))
294 prev->ea_flags |= GFS2_EAFLAG_LAST;
295 } else {
296 ea->ea_type = GFS2_EATYPE_UNUSED;
297 ea->ea_num_ptrs = 0;
298 }
299
300 error = gfs2_meta_inode_buffer(ip, &dibh);
301 if (!error) {
302 ip->i_di.di_ctime = get_seconds();
303 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
304 gfs2_dinode_out(&ip->i_di, dibh->b_data);
305 brelse(dibh);
306 }
307
308 gfs2_trans_end(sdp);
309
310out_gunlock:
311 gfs2_glock_dq_uninit(&rg_gh);
312 return error;
313}
314
315static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
316 struct gfs2_ea_header *ea,
317 struct gfs2_ea_header *prev, int leave)
318{
319 struct gfs2_alloc *al;
320 int error;
321
322 al = gfs2_alloc_get(ip);
323
324 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
325 if (error)
326 goto out_alloc;
327
328 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
329 if (error)
330 goto out_quota;
331
332 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
333
334 gfs2_glock_dq_uninit(&al->al_ri_gh);
335
336out_quota:
337 gfs2_quota_unhold(ip);
338out_alloc:
339 gfs2_alloc_put(ip);
340 return error;
341}
342
343struct ea_list {
344 struct gfs2_ea_request *ei_er;
345 unsigned int ei_size;
346};
347
348static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
349 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
350 void *private)
351{
352 struct ea_list *ei = private;
353 struct gfs2_ea_request *er = ei->ei_er;
354 unsigned int ea_size = gfs2_ea_strlen(ea);
355
356 if (ea->ea_type == GFS2_EATYPE_UNUSED)
357 return 0;
358
359 if (er->er_data_len) {
360 char *prefix = NULL;
361 unsigned int l = 0;
362 char c = 0;
363
364 if (ei->ei_size + ea_size > er->er_data_len)
365 return -ERANGE;
366
367 switch (ea->ea_type) {
368 case GFS2_EATYPE_USR:
369 prefix = "user.";
370 l = 5;
371 break;
372 case GFS2_EATYPE_SYS:
373 prefix = "system.";
374 l = 7;
375 break;
376 case GFS2_EATYPE_SECURITY:
377 prefix = "security.";
378 l = 9;
379 break;
380 }
381
382 BUG_ON(l == 0);
383
384 memcpy(er->er_data + ei->ei_size, prefix, l);
385 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
386 ea->ea_name_len);
387 memcpy(er->er_data + ei->ei_size + ea_size - 1, &c, 1);
388 }
389
390 ei->ei_size += ea_size;
391
392 return 0;
393}
394
395/**
396 * gfs2_ea_list -
397 * @ip:
398 * @er:
399 *
400 * Returns: actual size of data on success, -errno on error
401 */
402
403int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er)
404{
405 struct gfs2_holder i_gh;
406 int error;
407
408 if (!er->er_data || !er->er_data_len) {
409 er->er_data = NULL;
410 er->er_data_len = 0;
411 }
412
413 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
414 if (error)
415 return error;
416
417 if (ip->i_di.di_eattr) {
418 struct ea_list ei = { .ei_er = er, .ei_size = 0 };
419
420 error = ea_foreach(ip, ea_list_i, &ei);
421 if (!error)
422 error = ei.ei_size;
423 }
424
425 gfs2_glock_dq_uninit(&i_gh);
426
427 return error;
428}
429
430/**
431 * ea_get_unstuffed - actually copies the unstuffed data into the
432 * request buffer
433 * @ip: The GFS2 inode
434 * @ea: The extended attribute header structure
435 * @data: The data to be copied
436 *
437 * Returns: errno
438 */
439
440static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
441 char *data)
442{
443 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
444 struct buffer_head **bh;
445 unsigned int amount = GFS2_EA_DATA_LEN(ea);
446 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
447 u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
448 unsigned int x;
449 int error = 0;
450
451 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
452 if (!bh)
453 return -ENOMEM;
454
455 for (x = 0; x < nptrs; x++) {
456 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
457 bh + x);
458 if (error) {
459 while (x--)
460 brelse(bh[x]);
461 goto out;
462 }
463 dataptrs++;
464 }
465
466 for (x = 0; x < nptrs; x++) {
467 error = gfs2_meta_wait(sdp, bh[x]);
468 if (error) {
469 for (; x < nptrs; x++)
470 brelse(bh[x]);
471 goto out;
472 }
473 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
474 for (; x < nptrs; x++)
475 brelse(bh[x]);
476 error = -EIO;
477 goto out;
478 }
479
480 memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header),
481 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
482
483 amount -= sdp->sd_jbsize;
484 data += sdp->sd_jbsize;
485
486 brelse(bh[x]);
487 }
488
489out:
490 kfree(bh);
491 return error;
492}
493
494int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
495 char *data)
496{
497 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
498 memcpy(data, GFS2_EA2DATA(el->el_ea), GFS2_EA_DATA_LEN(el->el_ea));
499 return 0;
500 } else
501 return ea_get_unstuffed(ip, el->el_ea, data);
502}
503
504/**
505 * gfs2_ea_get_i -
506 * @ip: The GFS2 inode
507 * @er: The request structure
508 *
509 * Returns: actual size of data on success, -errno on error
510 */
511
512int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
513{
514 struct gfs2_ea_location el;
515 int error;
516
517 if (!ip->i_di.di_eattr)
518 return -ENODATA;
519
520 error = gfs2_ea_find(ip, er, &el);
521 if (error)
522 return error;
523 if (!el.el_ea)
524 return -ENODATA;
525
526 if (er->er_data_len) {
527 if (GFS2_EA_DATA_LEN(el.el_ea) > er->er_data_len)
528 error = -ERANGE;
529 else
530 error = gfs2_ea_get_copy(ip, &el, er->er_data);
531 }
532 if (!error)
533 error = GFS2_EA_DATA_LEN(el.el_ea);
534
535 brelse(el.el_bh);
536
537 return error;
538}
539
540/**
541 * gfs2_ea_get -
542 * @ip: The GFS2 inode
543 * @er: The request structure
544 *
545 * Returns: actual size of data on success, -errno on error
546 */
547
548int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er)
549{
550 struct gfs2_holder i_gh;
551 int error;
552
553 if (!er->er_name_len ||
554 er->er_name_len > GFS2_EA_MAX_NAME_LEN)
555 return -EINVAL;
556 if (!er->er_data || !er->er_data_len) {
557 er->er_data = NULL;
558 er->er_data_len = 0;
559 }
560
561 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
562 if (error)
563 return error;
564
565 error = gfs2_ea_ops[er->er_type]->eo_get(ip, er);
566
567 gfs2_glock_dq_uninit(&i_gh);
568
569 return error;
570}
571
572/**
573 * ea_alloc_blk - allocates a new block for extended attributes.
574 * @ip: A pointer to the inode that's getting extended attributes
575 * @bhp: Pointer to pointer to a struct buffer_head
576 *
577 * Returns: errno
578 */
579
580static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
581{
582 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
583 struct gfs2_ea_header *ea;
584 u64 block;
585
586 block = gfs2_alloc_meta(ip);
587
588 *bhp = gfs2_meta_new(ip->i_gl, block);
589 gfs2_trans_add_bh(ip->i_gl, *bhp, 1);
590 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
591 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
592
593 ea = GFS2_EA_BH2FIRST(*bhp);
594 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
595 ea->ea_type = GFS2_EATYPE_UNUSED;
596 ea->ea_flags = GFS2_EAFLAG_LAST;
597 ea->ea_num_ptrs = 0;
598
599 ip->i_di.di_blocks++;
600
601 return 0;
602}
603
604/**
605 * ea_write - writes the request info to an ea, creating new blocks if
606 * necessary
607 * @ip: inode that is being modified
608 * @ea: the location of the new ea in a block
609 * @er: the write request
610 *
611 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
612 *
613 * returns : errno
614 */
615
616static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
617 struct gfs2_ea_request *er)
618{
619 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
620
621 ea->ea_data_len = cpu_to_be32(er->er_data_len);
622 ea->ea_name_len = er->er_name_len;
623 ea->ea_type = er->er_type;
624 ea->__pad = 0;
625
626 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
627
628 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
629 ea->ea_num_ptrs = 0;
630 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
631 } else {
632 u64 *dataptr = GFS2_EA2DATAPTRS(ea);
633 const char *data = er->er_data;
634 unsigned int data_len = er->er_data_len;
635 unsigned int copy;
636 unsigned int x;
637
638 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
639 for (x = 0; x < ea->ea_num_ptrs; x++) {
640 struct buffer_head *bh;
641 u64 block;
642 int mh_size = sizeof(struct gfs2_meta_header);
643
644 block = gfs2_alloc_meta(ip);
645
646 bh = gfs2_meta_new(ip->i_gl, block);
647 gfs2_trans_add_bh(ip->i_gl, bh, 1);
648 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
649
650 ip->i_di.di_blocks++;
651
652 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
653 data_len;
654 memcpy(bh->b_data + mh_size, data, copy);
655 if (copy < sdp->sd_jbsize)
656 memset(bh->b_data + mh_size + copy, 0,
657 sdp->sd_jbsize - copy);
658
659 *dataptr++ = cpu_to_be64(bh->b_blocknr);
660 data += copy;
661 data_len -= copy;
662
663 brelse(bh);
664 }
665
666 gfs2_assert_withdraw(sdp, !data_len);
667 }
668
669 return 0;
670}
671
672typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
673 struct gfs2_ea_request *er, void *private);
674
675static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
676 unsigned int blks,
677 ea_skeleton_call_t skeleton_call, void *private)
678{
679 struct gfs2_alloc *al;
680 struct buffer_head *dibh;
681 int error;
682
683 al = gfs2_alloc_get(ip);
684
685 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
686 if (error)
687 goto out;
688
689 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
690 if (error)
691 goto out_gunlock_q;
692
693 al->al_requested = blks;
694
695 error = gfs2_inplace_reserve(ip);
696 if (error)
697 goto out_gunlock_q;
698
699 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
700 blks + al->al_rgd->rd_ri.ri_length +
701 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
702 if (error)
703 goto out_ipres;
704
705 error = skeleton_call(ip, er, private);
706 if (error)
707 goto out_end_trans;
708
709 error = gfs2_meta_inode_buffer(ip, &dibh);
710 if (!error) {
711 if (er->er_flags & GFS2_ERF_MODE) {
712 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
713 (ip->i_di.di_mode & S_IFMT) ==
714 (er->er_mode & S_IFMT));
715 ip->i_di.di_mode = er->er_mode;
716 }
717 ip->i_di.di_ctime = get_seconds();
718 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
719 gfs2_dinode_out(&ip->i_di, dibh->b_data);
720 brelse(dibh);
721 }
722
723out_end_trans:
724 gfs2_trans_end(GFS2_SB(&ip->i_inode));
725out_ipres:
726 gfs2_inplace_release(ip);
727out_gunlock_q:
728 gfs2_quota_unlock(ip);
729out:
730 gfs2_alloc_put(ip);
731 return error;
732}
733
734static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
735 void *private)
736{
737 struct buffer_head *bh;
738 int error;
739
740 error = ea_alloc_blk(ip, &bh);
741 if (error)
742 return error;
743
744 ip->i_di.di_eattr = bh->b_blocknr;
745 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
746
747 brelse(bh);
748
749 return error;
750}
751
752/**
753 * ea_init - initializes a new eattr block
754 * @ip:
755 * @er:
756 *
757 * Returns: errno
758 */
759
760static int ea_init(struct gfs2_inode *ip, struct gfs2_ea_request *er)
761{
762 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
763 unsigned int blks = 1;
764
765 if (GFS2_EAREQ_SIZE_STUFFED(er) > jbsize)
766 blks += DIV_ROUND_UP(er->er_data_len, jbsize);
767
768 return ea_alloc_skeleton(ip, er, blks, ea_init_i, NULL);
769}
770
771static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
772{
773 u32 ea_size = GFS2_EA_SIZE(ea);
774 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
775 ea_size);
776 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
777 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
778
779 ea->ea_rec_len = cpu_to_be32(ea_size);
780 ea->ea_flags ^= last;
781
782 new->ea_rec_len = cpu_to_be32(new_size);
783 new->ea_flags = last;
784
785 return new;
786}
787
788static void ea_set_remove_stuffed(struct gfs2_inode *ip,
789 struct gfs2_ea_location *el)
790{
791 struct gfs2_ea_header *ea = el->el_ea;
792 struct gfs2_ea_header *prev = el->el_prev;
793 u32 len;
794
795 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
796
797 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
798 ea->ea_type = GFS2_EATYPE_UNUSED;
799 return;
800 } else if (GFS2_EA2NEXT(prev) != ea) {
801 prev = GFS2_EA2NEXT(prev);
802 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
803 }
804
805 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
806 prev->ea_rec_len = cpu_to_be32(len);
807
808 if (GFS2_EA_IS_LAST(ea))
809 prev->ea_flags |= GFS2_EAFLAG_LAST;
810}
811
812struct ea_set {
813 int ea_split;
814
815 struct gfs2_ea_request *es_er;
816 struct gfs2_ea_location *es_el;
817
818 struct buffer_head *es_bh;
819 struct gfs2_ea_header *es_ea;
820};
821
822static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
823 struct gfs2_ea_header *ea, struct ea_set *es)
824{
825 struct gfs2_ea_request *er = es->es_er;
826 struct buffer_head *dibh;
827 int error;
828
829 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
830 if (error)
831 return error;
832
833 gfs2_trans_add_bh(ip->i_gl, bh, 1);
834
835 if (es->ea_split)
836 ea = ea_split_ea(ea);
837
838 ea_write(ip, ea, er);
839
840 if (es->es_el)
841 ea_set_remove_stuffed(ip, es->es_el);
842
843 error = gfs2_meta_inode_buffer(ip, &dibh);
844 if (error)
845 goto out;
846
847 if (er->er_flags & GFS2_ERF_MODE) {
848 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
849 (ip->i_di.di_mode & S_IFMT) == (er->er_mode & S_IFMT));
850 ip->i_di.di_mode = er->er_mode;
851 }
852 ip->i_di.di_ctime = get_seconds();
853 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
854 gfs2_dinode_out(&ip->i_di, dibh->b_data);
855 brelse(dibh);
856out:
857 gfs2_trans_end(GFS2_SB(&ip->i_inode));
858 return error;
859}
860
861static int ea_set_simple_alloc(struct gfs2_inode *ip,
862 struct gfs2_ea_request *er, void *private)
863{
864 struct ea_set *es = private;
865 struct gfs2_ea_header *ea = es->es_ea;
866 int error;
867
868 gfs2_trans_add_bh(ip->i_gl, es->es_bh, 1);
869
870 if (es->ea_split)
871 ea = ea_split_ea(ea);
872
873 error = ea_write(ip, ea, er);
874 if (error)
875 return error;
876
877 if (es->es_el)
878 ea_set_remove_stuffed(ip, es->es_el);
879
880 return 0;
881}
882
883static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
884 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
885 void *private)
886{
887 struct ea_set *es = private;
888 unsigned int size;
889 int stuffed;
890 int error;
891
892 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er, &size);
893
894 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
895 if (GFS2_EA_REC_LEN(ea) < size)
896 return 0;
897 if (!GFS2_EA_IS_STUFFED(ea)) {
898 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
899 if (error)
900 return error;
901 }
902 es->ea_split = 0;
903 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
904 es->ea_split = 1;
905 else
906 return 0;
907
908 if (stuffed) {
909 error = ea_set_simple_noalloc(ip, bh, ea, es);
910 if (error)
911 return error;
912 } else {
913 unsigned int blks;
914
915 es->es_bh = bh;
916 es->es_ea = ea;
917 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
918 GFS2_SB(&ip->i_inode)->sd_jbsize);
919
920 error = ea_alloc_skeleton(ip, es->es_er, blks,
921 ea_set_simple_alloc, es);
922 if (error)
923 return error;
924 }
925
926 return 1;
927}
928
929static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
930 void *private)
931{
932 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
933 struct buffer_head *indbh, *newbh;
934 u64 *eablk;
935 int error;
936 int mh_size = sizeof(struct gfs2_meta_header);
937
938 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
939 u64 *end;
940
941 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT,
942 &indbh);
943 if (error)
944 return error;
945
946 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
947 error = -EIO;
948 goto out;
949 }
950
951 eablk = (u64 *)(indbh->b_data + mh_size);
952 end = eablk + sdp->sd_inptrs;
953
954 for (; eablk < end; eablk++)
955 if (!*eablk)
956 break;
957
958 if (eablk == end) {
959 error = -ENOSPC;
960 goto out;
961 }
962
963 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
964 } else {
965 u64 blk;
966
967 blk = gfs2_alloc_meta(ip);
968
969 indbh = gfs2_meta_new(ip->i_gl, blk);
970 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
971 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
972 gfs2_buffer_clear_tail(indbh, mh_size);
973
974 eablk = (u64 *)(indbh->b_data + mh_size);
975 *eablk = cpu_to_be64(ip->i_di.di_eattr);
976 ip->i_di.di_eattr = blk;
977 ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT;
978 ip->i_di.di_blocks++;
979
980 eablk++;
981 }
982
983 error = ea_alloc_blk(ip, &newbh);
984 if (error)
985 goto out;
986
987 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
988 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
989 brelse(newbh);
990 if (error)
991 goto out;
992
993 if (private)
994 ea_set_remove_stuffed(ip, private);
995
996out:
997 brelse(indbh);
998 return error;
999}
1000
1001static int ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
1002 struct gfs2_ea_location *el)
1003{
1004 struct ea_set es;
1005 unsigned int blks = 2;
1006 int error;
1007
1008 memset(&es, 0, sizeof(struct ea_set));
1009 es.es_er = er;
1010 es.es_el = el;
1011
1012 error = ea_foreach(ip, ea_set_simple, &es);
1013 if (error > 0)
1014 return 0;
1015 if (error)
1016 return error;
1017
1018 if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT))
1019 blks++;
1020 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1021 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1022
1023 return ea_alloc_skeleton(ip, er, blks, ea_set_block, el);
1024}
1025
1026static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1027 struct gfs2_ea_location *el)
1028{
1029 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1030 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1031 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1032 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1033 }
1034
1035 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev,0);
1036}
1037
1038int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1039{
1040 struct gfs2_ea_location el;
1041 int error;
1042
1043 if (!ip->i_di.di_eattr) {
1044 if (er->er_flags & XATTR_REPLACE)
1045 return -ENODATA;
1046 return ea_init(ip, er);
1047 }
1048
1049 error = gfs2_ea_find(ip, er, &el);
1050 if (error)
1051 return error;
1052
1053 if (el.el_ea) {
1054 if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) {
1055 brelse(el.el_bh);
1056 return -EPERM;
1057 }
1058
1059 error = -EEXIST;
1060 if (!(er->er_flags & XATTR_CREATE)) {
1061 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1062 error = ea_set_i(ip, er, &el);
1063 if (!error && unstuffed)
1064 ea_set_remove_unstuffed(ip, &el);
1065 }
1066
1067 brelse(el.el_bh);
1068 } else {
1069 error = -ENODATA;
1070 if (!(er->er_flags & XATTR_REPLACE))
1071 error = ea_set_i(ip, er, NULL);
1072 }
1073
1074 return error;
1075}
1076
1077int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1078{
1079 struct gfs2_holder i_gh;
1080 int error;
1081
1082 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1083 return -EINVAL;
1084 if (!er->er_data || !er->er_data_len) {
1085 er->er_data = NULL;
1086 er->er_data_len = 0;
1087 }
1088 error = ea_check_size(GFS2_SB(&ip->i_inode), er);
1089 if (error)
1090 return error;
1091
1092 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1093 if (error)
1094 return error;
1095
1096 if (IS_IMMUTABLE(&ip->i_inode))
1097 error = -EPERM;
1098 else
1099 error = gfs2_ea_ops[er->er_type]->eo_set(ip, er);
1100
1101 gfs2_glock_dq_uninit(&i_gh);
1102
1103 return error;
1104}
1105
1106static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1107{
1108 struct gfs2_ea_header *ea = el->el_ea;
1109 struct gfs2_ea_header *prev = el->el_prev;
1110 struct buffer_head *dibh;
1111 int error;
1112
1113 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1114 if (error)
1115 return error;
1116
1117 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1118
1119 if (prev) {
1120 u32 len;
1121
1122 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1123 prev->ea_rec_len = cpu_to_be32(len);
1124
1125 if (GFS2_EA_IS_LAST(ea))
1126 prev->ea_flags |= GFS2_EAFLAG_LAST;
1127 } else
1128 ea->ea_type = GFS2_EATYPE_UNUSED;
1129
1130 error = gfs2_meta_inode_buffer(ip, &dibh);
1131 if (!error) {
1132 ip->i_di.di_ctime = get_seconds();
1133 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1134 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1135 brelse(dibh);
1136 }
1137
1138 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1139
1140 return error;
1141}
1142
1143int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1144{
1145 struct gfs2_ea_location el;
1146 int error;
1147
1148 if (!ip->i_di.di_eattr)
1149 return -ENODATA;
1150
1151 error = gfs2_ea_find(ip, er, &el);
1152 if (error)
1153 return error;
1154 if (!el.el_ea)
1155 return -ENODATA;
1156
1157 if (GFS2_EA_IS_STUFFED(el.el_ea))
1158 error = ea_remove_stuffed(ip, &el);
1159 else
1160 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev,
1161 0);
1162
1163 brelse(el.el_bh);
1164
1165 return error;
1166}
1167
1168/**
1169 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1170 * @ip: pointer to the inode of the target file
1171 * @er: request information
1172 *
1173 * Returns: errno
1174 */
1175
1176int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er)
1177{
1178 struct gfs2_holder i_gh;
1179 int error;
1180
1181 if (!er->er_name_len || er->er_name_len > GFS2_EA_MAX_NAME_LEN)
1182 return -EINVAL;
1183
1184 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1185 if (error)
1186 return error;
1187
1188 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1189 error = -EPERM;
1190 else
1191 error = gfs2_ea_ops[er->er_type]->eo_remove(ip, er);
1192
1193 gfs2_glock_dq_uninit(&i_gh);
1194
1195 return error;
1196}
1197
1198static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip,
1199 struct gfs2_ea_header *ea, char *data)
1200{
1201 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1202 struct buffer_head **bh;
1203 unsigned int amount = GFS2_EA_DATA_LEN(ea);
1204 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
1205 u64 *dataptrs = GFS2_EA2DATAPTRS(ea);
1206 unsigned int x;
1207 int error;
1208
1209 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_KERNEL);
1210 if (!bh)
1211 return -ENOMEM;
1212
1213 error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0);
1214 if (error)
1215 goto out;
1216
1217 for (x = 0; x < nptrs; x++) {
1218 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0,
1219 bh + x);
1220 if (error) {
1221 while (x--)
1222 brelse(bh[x]);
1223 goto fail;
1224 }
1225 dataptrs++;
1226 }
1227
1228 for (x = 0; x < nptrs; x++) {
1229 error = gfs2_meta_wait(sdp, bh[x]);
1230 if (error) {
1231 for (; x < nptrs; x++)
1232 brelse(bh[x]);
1233 goto fail;
1234 }
1235 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
1236 for (; x < nptrs; x++)
1237 brelse(bh[x]);
1238 error = -EIO;
1239 goto fail;
1240 }
1241
1242 gfs2_trans_add_bh(ip->i_gl, bh[x], 1);
1243
1244 memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data,
1245 (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize);
1246
1247 amount -= sdp->sd_jbsize;
1248 data += sdp->sd_jbsize;
1249
1250 brelse(bh[x]);
1251 }
1252
1253out:
1254 kfree(bh);
1255 return error;
1256
1257fail:
1258 gfs2_trans_end(sdp);
1259 kfree(bh);
1260 return error;
1261}
1262
1263int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
1264 struct iattr *attr, char *data)
1265{
1266 struct buffer_head *dibh;
1267 int error;
1268
1269 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
1270 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1271 if (error)
1272 return error;
1273
1274 gfs2_trans_add_bh(ip->i_gl, el->el_bh, 1);
1275 memcpy(GFS2_EA2DATA(el->el_ea), data,
1276 GFS2_EA_DATA_LEN(el->el_ea));
1277 } else
1278 error = ea_acl_chmod_unstuffed(ip, el->el_ea, data);
1279
1280 if (error)
1281 return error;
1282
1283 error = gfs2_meta_inode_buffer(ip, &dibh);
1284 if (!error) {
1285 error = inode_setattr(&ip->i_inode, attr);
1286 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1287 gfs2_inode_attr_out(ip);
1288 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1289 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1290 brelse(dibh);
1291 }
1292
1293 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1294
1295 return error;
1296}
1297
1298static int ea_dealloc_indirect(struct gfs2_inode *ip)
1299{
1300 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1301 struct gfs2_rgrp_list rlist;
1302 struct buffer_head *indbh, *dibh;
1303 u64 *eablk, *end;
1304 unsigned int rg_blocks = 0;
1305 u64 bstart = 0;
1306 unsigned int blen = 0;
1307 unsigned int blks = 0;
1308 unsigned int x;
1309 int error;
1310
1311 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1312
1313 error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh);
1314 if (error)
1315 return error;
1316
1317 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1318 error = -EIO;
1319 goto out;
1320 }
1321
1322 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1323 end = eablk + sdp->sd_inptrs;
1324
1325 for (; eablk < end; eablk++) {
1326 u64 bn;
1327
1328 if (!*eablk)
1329 break;
1330 bn = be64_to_cpu(*eablk);
1331
1332 if (bstart + blen == bn)
1333 blen++;
1334 else {
1335 if (bstart)
1336 gfs2_rlist_add(sdp, &rlist, bstart);
1337 bstart = bn;
1338 blen = 1;
1339 }
1340 blks++;
1341 }
1342 if (bstart)
1343 gfs2_rlist_add(sdp, &rlist, bstart);
1344 else
1345 goto out;
1346
1347 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, 0);
1348
1349 for (x = 0; x < rlist.rl_rgrps; x++) {
1350 struct gfs2_rgrpd *rgd;
1351 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
1352 rg_blocks += rgd->rd_ri.ri_length;
1353 }
1354
1355 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1356 if (error)
1357 goto out_rlist_free;
1358
1359 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1360 RES_STATFS + RES_QUOTA, blks);
1361 if (error)
1362 goto out_gunlock;
1363
1364 gfs2_trans_add_bh(ip->i_gl, indbh, 1);
1365
1366 eablk = (u64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1367 bstart = 0;
1368 blen = 0;
1369
1370 for (; eablk < end; eablk++) {
1371 u64 bn;
1372
1373 if (!*eablk)
1374 break;
1375 bn = be64_to_cpu(*eablk);
1376
1377 if (bstart + blen == bn)
1378 blen++;
1379 else {
1380 if (bstart)
1381 gfs2_free_meta(ip, bstart, blen);
1382 bstart = bn;
1383 blen = 1;
1384 }
1385
1386 *eablk = 0;
1387 if (!ip->i_di.di_blocks)
1388 gfs2_consist_inode(ip);
1389 ip->i_di.di_blocks--;
1390 }
1391 if (bstart)
1392 gfs2_free_meta(ip, bstart, blen);
1393
1394 ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT;
1395
1396 error = gfs2_meta_inode_buffer(ip, &dibh);
1397 if (!error) {
1398 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1399 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1400 brelse(dibh);
1401 }
1402
1403 gfs2_trans_end(sdp);
1404
1405out_gunlock:
1406 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1407out_rlist_free:
1408 gfs2_rlist_free(&rlist);
1409out:
1410 brelse(indbh);
1411 return error;
1412}
1413
1414static int ea_dealloc_block(struct gfs2_inode *ip)
1415{
1416 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1417 struct gfs2_alloc *al = &ip->i_alloc;
1418 struct gfs2_rgrpd *rgd;
1419 struct buffer_head *dibh;
1420 int error;
1421
1422 rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr);
1423 if (!rgd) {
1424 gfs2_consist_inode(ip);
1425 return -EIO;
1426 }
1427
1428 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
1429 &al->al_rgd_gh);
1430 if (error)
1431 return error;
1432
1433 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1434 RES_QUOTA, 1);
1435 if (error)
1436 goto out_gunlock;
1437
1438 gfs2_free_meta(ip, ip->i_di.di_eattr, 1);
1439
1440 ip->i_di.di_eattr = 0;
1441 if (!ip->i_di.di_blocks)
1442 gfs2_consist_inode(ip);
1443 ip->i_di.di_blocks--;
1444
1445 error = gfs2_meta_inode_buffer(ip, &dibh);
1446 if (!error) {
1447 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1448 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1449 brelse(dibh);
1450 }
1451
1452 gfs2_trans_end(sdp);
1453
1454out_gunlock:
1455 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1456 return error;
1457}
1458
1459/**
1460 * gfs2_ea_dealloc - deallocate the extended attribute fork
1461 * @ip: the inode
1462 *
1463 * Returns: errno
1464 */
1465
1466int gfs2_ea_dealloc(struct gfs2_inode *ip)
1467{
1468 struct gfs2_alloc *al;
1469 int error;
1470
1471 al = gfs2_alloc_get(ip);
1472
1473 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1474 if (error)
1475 goto out_alloc;
1476
1477 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
1478 if (error)
1479 goto out_quota;
1480
1481 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1482 if (error)
1483 goto out_rindex;
1484
1485 if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) {
1486 error = ea_dealloc_indirect(ip);
1487 if (error)
1488 goto out_rindex;
1489 }
1490
1491 error = ea_dealloc_block(ip);
1492
1493out_rindex:
1494 gfs2_glock_dq_uninit(&al->al_ri_gh);
1495out_quota:
1496 gfs2_quota_unhold(ip);
1497out_alloc:
1498 gfs2_alloc_put(ip);
1499 return error;
1500}
1501
diff --git a/fs/gfs2/eattr.h b/fs/gfs2/eattr.h
new file mode 100644
index 000000000000..ffa65947d686
--- /dev/null
+++ b/fs/gfs2/eattr.h
@@ -0,0 +1,100 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __EATTR_DOT_H__
11#define __EATTR_DOT_H__
12
13struct gfs2_inode;
14struct iattr;
15
16#define GFS2_EA_REC_LEN(ea) be32_to_cpu((ea)->ea_rec_len)
17#define GFS2_EA_DATA_LEN(ea) be32_to_cpu((ea)->ea_data_len)
18
19#define GFS2_EA_SIZE(ea) \
20ALIGN(sizeof(struct gfs2_ea_header) + (ea)->ea_name_len + \
21 ((GFS2_EA_IS_STUFFED(ea)) ? GFS2_EA_DATA_LEN(ea) : \
22 (sizeof(u64) * (ea)->ea_num_ptrs)), 8)
23
24#define GFS2_EA_IS_STUFFED(ea) (!(ea)->ea_num_ptrs)
25#define GFS2_EA_IS_LAST(ea) ((ea)->ea_flags & GFS2_EAFLAG_LAST)
26
27#define GFS2_EAREQ_SIZE_STUFFED(er) \
28ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + (er)->er_data_len, 8)
29
30#define GFS2_EAREQ_SIZE_UNSTUFFED(sdp, er) \
31ALIGN(sizeof(struct gfs2_ea_header) + (er)->er_name_len + \
32 sizeof(u64) * DIV_ROUND_UP((er)->er_data_len, (sdp)->sd_jbsize), 8)
33
34#define GFS2_EA2NAME(ea) ((char *)((struct gfs2_ea_header *)(ea) + 1))
35#define GFS2_EA2DATA(ea) (GFS2_EA2NAME(ea) + (ea)->ea_name_len)
36
37#define GFS2_EA2DATAPTRS(ea) \
38((u64 *)(GFS2_EA2NAME(ea) + ALIGN((ea)->ea_name_len, 8)))
39
40#define GFS2_EA2NEXT(ea) \
41((struct gfs2_ea_header *)((char *)(ea) + GFS2_EA_REC_LEN(ea)))
42
43#define GFS2_EA_BH2FIRST(bh) \
44((struct gfs2_ea_header *)((bh)->b_data + sizeof(struct gfs2_meta_header)))
45
46#define GFS2_ERF_MODE 0x80000000
47
48struct gfs2_ea_request {
49 const char *er_name;
50 char *er_data;
51 unsigned int er_name_len;
52 unsigned int er_data_len;
53 unsigned int er_type; /* GFS2_EATYPE_... */
54 int er_flags;
55 mode_t er_mode;
56};
57
58struct gfs2_ea_location {
59 struct buffer_head *el_bh;
60 struct gfs2_ea_header *el_ea;
61 struct gfs2_ea_header *el_prev;
62};
63
64int gfs2_ea_get_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
65int gfs2_ea_set_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
66int gfs2_ea_remove_i(struct gfs2_inode *ip, struct gfs2_ea_request *er);
67
68int gfs2_ea_list(struct gfs2_inode *ip, struct gfs2_ea_request *er);
69int gfs2_ea_get(struct gfs2_inode *ip, struct gfs2_ea_request *er);
70int gfs2_ea_set(struct gfs2_inode *ip, struct gfs2_ea_request *er);
71int gfs2_ea_remove(struct gfs2_inode *ip, struct gfs2_ea_request *er);
72
73int gfs2_ea_dealloc(struct gfs2_inode *ip);
74
75/* Exported to acl.c */
76
77int gfs2_ea_find(struct gfs2_inode *ip,
78 struct gfs2_ea_request *er,
79 struct gfs2_ea_location *el);
80int gfs2_ea_get_copy(struct gfs2_inode *ip,
81 struct gfs2_ea_location *el,
82 char *data);
83int gfs2_ea_acl_chmod(struct gfs2_inode *ip, struct gfs2_ea_location *el,
84 struct iattr *attr, char *data);
85
86static inline unsigned int gfs2_ea_strlen(struct gfs2_ea_header *ea)
87{
88 switch (ea->ea_type) {
89 case GFS2_EATYPE_USR:
90 return 5 + ea->ea_name_len + 1;
91 case GFS2_EATYPE_SYS:
92 return 7 + ea->ea_name_len + 1;
93 case GFS2_EATYPE_SECURITY:
94 return 9 + ea->ea_name_len + 1;
95 default:
96 return 0;
97 }
98}
99
100#endif /* __EATTR_DOT_H__ */
diff --git a/fs/gfs2/gfs2.h b/fs/gfs2/gfs2.h
new file mode 100644
index 000000000000..3bb11c0f8b56
--- /dev/null
+++ b/fs/gfs2/gfs2.h
@@ -0,0 +1,31 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __GFS2_DOT_H__
11#define __GFS2_DOT_H__
12
13enum {
14 NO_CREATE = 0,
15 CREATE = 1,
16};
17
18enum {
19 NO_WAIT = 0,
20 WAIT = 1,
21};
22
23enum {
24 NO_FORCE = 0,
25 FORCE = 1,
26};
27
28#define GFS2_FAST_NAME_SIZE 8
29
30#endif /* __GFS2_DOT_H__ */
31
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
new file mode 100644
index 000000000000..78fe0fae23ff
--- /dev/null
+++ b/fs/gfs2/glock.c
@@ -0,0 +1,2231 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
18#include <linux/kallsyms.h>
19#include <linux/gfs2_ondisk.h>
20#include <linux/list.h>
21#include <linux/lm_interface.h>
22#include <asm/uaccess.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "glock.h"
27#include "glops.h"
28#include "inode.h"
29#include "lm.h"
30#include "lops.h"
31#include "meta_io.h"
32#include "quota.h"
33#include "super.h"
34#include "util.h"
35
36struct greedy {
37 struct gfs2_holder gr_gh;
38 struct work_struct gr_work;
39};
40
41struct gfs2_gl_hash_bucket {
42 struct hlist_head hb_list;
43};
44
45typedef void (*glock_examiner) (struct gfs2_glock * gl);
46
47static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
48static int dump_glock(struct gfs2_glock *gl);
49static int dump_inode(struct gfs2_inode *ip);
50
51#define GFS2_GL_HASH_SHIFT 15
52#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
53#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
54
55static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
56
57/*
58 * Despite what you might think, the numbers below are not arbitrary :-)
59 * They are taken from the ipv4 routing hash code, which is well tested
60 * and thus should be nearly optimal. Later on we might tweek the numbers
61 * but for now this should be fine.
62 *
63 * The reason for putting the locks in a separate array from the list heads
64 * is that we can have fewer locks than list heads and save memory. We use
65 * the same hash function for both, but with a different hash mask.
66 */
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
68 defined(CONFIG_PROVE_LOCKING)
69
70#ifdef CONFIG_LOCKDEP
71# define GL_HASH_LOCK_SZ 256
72#else
73# if NR_CPUS >= 32
74# define GL_HASH_LOCK_SZ 4096
75# elif NR_CPUS >= 16
76# define GL_HASH_LOCK_SZ 2048
77# elif NR_CPUS >= 8
78# define GL_HASH_LOCK_SZ 1024
79# elif NR_CPUS >= 4
80# define GL_HASH_LOCK_SZ 512
81# else
82# define GL_HASH_LOCK_SZ 256
83# endif
84#endif
85
86/* We never want more locks than chains */
87#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
88# undef GL_HASH_LOCK_SZ
89# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
90#endif
91
92static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
93
94static inline rwlock_t *gl_lock_addr(unsigned int x)
95{
96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
97}
98#else /* not SMP, so no spinlocks required */
99static inline rwlock_t *gl_lock_addr(x)
100{
101 return NULL;
102}
103#endif
104
105/**
106 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
107 * @actual: the current state of the lock
108 * @requested: the lock state that was requested by the caller
109 * @flags: the modifier flags passed in by the caller
110 *
111 * Returns: 1 if the locks are compatible, 0 otherwise
112 */
113
114static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
115 int flags)
116{
117 if (actual == requested)
118 return 1;
119
120 if (flags & GL_EXACT)
121 return 0;
122
123 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
124 return 1;
125
126 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
127 return 1;
128
129 return 0;
130}
131
132/**
133 * gl_hash() - Turn glock number into hash bucket number
134 * @lock: The glock number
135 *
136 * Returns: The number of the corresponding hash bucket
137 */
138
139static unsigned int gl_hash(const struct gfs2_sbd *sdp,
140 const struct lm_lockname *name)
141{
142 unsigned int h;
143
144 h = jhash(&name->ln_number, sizeof(u64), 0);
145 h = jhash(&name->ln_type, sizeof(unsigned int), h);
146 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
147 h &= GFS2_GL_HASH_MASK;
148
149 return h;
150}
151
152/**
153 * glock_free() - Perform a few checks and then release struct gfs2_glock
154 * @gl: The glock to release
155 *
156 * Also calls lock module to release its internal structure for this glock.
157 *
158 */
159
160static void glock_free(struct gfs2_glock *gl)
161{
162 struct gfs2_sbd *sdp = gl->gl_sbd;
163 struct inode *aspace = gl->gl_aspace;
164
165 gfs2_lm_put_lock(sdp, gl->gl_lock);
166
167 if (aspace)
168 gfs2_aspace_put(aspace);
169
170 kmem_cache_free(gfs2_glock_cachep, gl);
171}
172
173/**
174 * gfs2_glock_hold() - increment reference count on glock
175 * @gl: The glock to hold
176 *
177 */
178
179void gfs2_glock_hold(struct gfs2_glock *gl)
180{
181 atomic_inc(&gl->gl_ref);
182}
183
184/**
185 * gfs2_glock_put() - Decrement reference count on glock
186 * @gl: The glock to put
187 *
188 */
189
190int gfs2_glock_put(struct gfs2_glock *gl)
191{
192 int rv = 0;
193 struct gfs2_sbd *sdp = gl->gl_sbd;
194
195 write_lock(gl_lock_addr(gl->gl_hash));
196 if (atomic_dec_and_test(&gl->gl_ref)) {
197 hlist_del(&gl->gl_list);
198 write_unlock(gl_lock_addr(gl->gl_hash));
199 BUG_ON(spin_is_locked(&gl->gl_spin));
200 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
201 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
202 gfs2_assert(sdp, list_empty(&gl->gl_holders));
203 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
204 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
205 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
206 glock_free(gl);
207 rv = 1;
208 goto out;
209 }
210 write_unlock(gl_lock_addr(gl->gl_hash));
211out:
212 return rv;
213}
214
215/**
216 * queue_empty - check to see if a glock's queue is empty
217 * @gl: the glock
218 * @head: the head of the queue to check
219 *
220 * This function protects the list in the event that a process already
221 * has a holder on the list and is adding a second holder for itself.
222 * The glmutex lock is what generally prevents processes from working
223 * on the same glock at once, but the special case of adding a second
224 * holder for yourself ("recursive" locking) doesn't involve locking
225 * glmutex, making the spin lock necessary.
226 *
227 * Returns: 1 if the queue is empty
228 */
229
230static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
231{
232 int empty;
233 spin_lock(&gl->gl_spin);
234 empty = list_empty(head);
235 spin_unlock(&gl->gl_spin);
236 return empty;
237}
238
239/**
240 * search_bucket() - Find struct gfs2_glock by lock number
241 * @bucket: the bucket to search
242 * @name: The lock name
243 *
244 * Returns: NULL, or the struct gfs2_glock with the requested number
245 */
246
247static struct gfs2_glock *search_bucket(unsigned int hash,
248 const struct gfs2_sbd *sdp,
249 const struct lm_lockname *name)
250{
251 struct gfs2_glock *gl;
252 struct hlist_node *h;
253
254 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
255 if (!lm_name_equal(&gl->gl_name, name))
256 continue;
257 if (gl->gl_sbd != sdp)
258 continue;
259
260 atomic_inc(&gl->gl_ref);
261
262 return gl;
263 }
264
265 return NULL;
266}
267
268/**
269 * gfs2_glock_find() - Find glock by lock number
270 * @sdp: The GFS2 superblock
271 * @name: The lock name
272 *
273 * Returns: NULL, or the struct gfs2_glock with the requested number
274 */
275
276static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
277 const struct lm_lockname *name)
278{
279 unsigned int hash = gl_hash(sdp, name);
280 struct gfs2_glock *gl;
281
282 read_lock(gl_lock_addr(hash));
283 gl = search_bucket(hash, sdp, name);
284 read_unlock(gl_lock_addr(hash));
285
286 return gl;
287}
288
289/**
290 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
291 * @sdp: The GFS2 superblock
292 * @number: the lock number
293 * @glops: The glock_operations to use
294 * @create: If 0, don't create the glock if it doesn't exist
295 * @glp: the glock is returned here
296 *
297 * This does not lock a glock, just finds/creates structures for one.
298 *
299 * Returns: errno
300 */
301
302int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
303 const struct gfs2_glock_operations *glops, int create,
304 struct gfs2_glock **glp)
305{
306 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
307 struct gfs2_glock *gl, *tmp;
308 unsigned int hash = gl_hash(sdp, &name);
309 int error;
310
311 read_lock(gl_lock_addr(hash));
312 gl = search_bucket(hash, sdp, &name);
313 read_unlock(gl_lock_addr(hash));
314
315 if (gl || !create) {
316 *glp = gl;
317 return 0;
318 }
319
320 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
321 if (!gl)
322 return -ENOMEM;
323
324 gl->gl_flags = 0;
325 gl->gl_name = name;
326 atomic_set(&gl->gl_ref, 1);
327 gl->gl_state = LM_ST_UNLOCKED;
328 gl->gl_hash = hash;
329 gl->gl_owner = NULL;
330 gl->gl_ip = 0;
331 gl->gl_ops = glops;
332 gl->gl_req_gh = NULL;
333 gl->gl_req_bh = NULL;
334 gl->gl_vn = 0;
335 gl->gl_stamp = jiffies;
336 gl->gl_object = NULL;
337 gl->gl_sbd = sdp;
338 gl->gl_aspace = NULL;
339 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
340
341 /* If this glock protects actual on-disk data or metadata blocks,
342 create a VFS inode to manage the pages/buffers holding them. */
343 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
344 gl->gl_aspace = gfs2_aspace_get(sdp);
345 if (!gl->gl_aspace) {
346 error = -ENOMEM;
347 goto fail;
348 }
349 }
350
351 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
352 if (error)
353 goto fail_aspace;
354
355 write_lock(gl_lock_addr(hash));
356 tmp = search_bucket(hash, sdp, &name);
357 if (tmp) {
358 write_unlock(gl_lock_addr(hash));
359 glock_free(gl);
360 gl = tmp;
361 } else {
362 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
363 write_unlock(gl_lock_addr(hash));
364 }
365
366 *glp = gl;
367
368 return 0;
369
370fail_aspace:
371 if (gl->gl_aspace)
372 gfs2_aspace_put(gl->gl_aspace);
373fail:
374 kmem_cache_free(gfs2_glock_cachep, gl);
375 return error;
376}
377
378/**
379 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
380 * @gl: the glock
381 * @state: the state we're requesting
382 * @flags: the modifier flags
383 * @gh: the holder structure
384 *
385 */
386
387void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
388 struct gfs2_holder *gh)
389{
390 INIT_LIST_HEAD(&gh->gh_list);
391 gh->gh_gl = gl;
392 gh->gh_ip = (unsigned long)__builtin_return_address(0);
393 gh->gh_owner = current;
394 gh->gh_state = state;
395 gh->gh_flags = flags;
396 gh->gh_error = 0;
397 gh->gh_iflags = 0;
398 init_completion(&gh->gh_wait);
399
400 if (gh->gh_state == LM_ST_EXCLUSIVE)
401 gh->gh_flags |= GL_LOCAL_EXCL;
402
403 gfs2_glock_hold(gl);
404}
405
406/**
407 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
408 * @state: the state we're requesting
409 * @flags: the modifier flags
410 * @gh: the holder structure
411 *
412 * Don't mess with the glock.
413 *
414 */
415
416void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
417{
418 gh->gh_state = state;
419 gh->gh_flags = flags;
420 if (gh->gh_state == LM_ST_EXCLUSIVE)
421 gh->gh_flags |= GL_LOCAL_EXCL;
422
423 gh->gh_iflags &= 1 << HIF_ALLOCED;
424 gh->gh_ip = (unsigned long)__builtin_return_address(0);
425}
426
427/**
428 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
429 * @gh: the holder structure
430 *
431 */
432
433void gfs2_holder_uninit(struct gfs2_holder *gh)
434{
435 gfs2_glock_put(gh->gh_gl);
436 gh->gh_gl = NULL;
437 gh->gh_ip = 0;
438}
439
440/**
441 * gfs2_holder_get - get a struct gfs2_holder structure
442 * @gl: the glock
443 * @state: the state we're requesting
444 * @flags: the modifier flags
445 * @gfp_flags:
446 *
447 * Figure out how big an impact this function has. Either:
448 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
449 * 2) Leave it like it is
450 *
451 * Returns: the holder structure, NULL on ENOMEM
452 */
453
454static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
455 unsigned int state,
456 int flags, gfp_t gfp_flags)
457{
458 struct gfs2_holder *gh;
459
460 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
461 if (!gh)
462 return NULL;
463
464 gfs2_holder_init(gl, state, flags, gh);
465 set_bit(HIF_ALLOCED, &gh->gh_iflags);
466 gh->gh_ip = (unsigned long)__builtin_return_address(0);
467 return gh;
468}
469
470/**
471 * gfs2_holder_put - get rid of a struct gfs2_holder structure
472 * @gh: the holder structure
473 *
474 */
475
476static void gfs2_holder_put(struct gfs2_holder *gh)
477{
478 gfs2_holder_uninit(gh);
479 kfree(gh);
480}
481
482/**
483 * rq_mutex - process a mutex request in the queue
484 * @gh: the glock holder
485 *
486 * Returns: 1 if the queue is blocked
487 */
488
489static int rq_mutex(struct gfs2_holder *gh)
490{
491 struct gfs2_glock *gl = gh->gh_gl;
492
493 list_del_init(&gh->gh_list);
494 /* gh->gh_error never examined. */
495 set_bit(GLF_LOCK, &gl->gl_flags);
496 complete(&gh->gh_wait);
497
498 return 1;
499}
500
501/**
502 * rq_promote - process a promote request in the queue
503 * @gh: the glock holder
504 *
505 * Acquire a new inter-node lock, or change a lock state to more restrictive.
506 *
507 * Returns: 1 if the queue is blocked
508 */
509
510static int rq_promote(struct gfs2_holder *gh)
511{
512 struct gfs2_glock *gl = gh->gh_gl;
513 struct gfs2_sbd *sdp = gl->gl_sbd;
514 const struct gfs2_glock_operations *glops = gl->gl_ops;
515
516 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
517 if (list_empty(&gl->gl_holders)) {
518 gl->gl_req_gh = gh;
519 set_bit(GLF_LOCK, &gl->gl_flags);
520 spin_unlock(&gl->gl_spin);
521
522 if (atomic_read(&sdp->sd_reclaim_count) >
523 gfs2_tune_get(sdp, gt_reclaim_limit) &&
524 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
525 gfs2_reclaim_glock(sdp);
526 gfs2_reclaim_glock(sdp);
527 }
528
529 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
530 spin_lock(&gl->gl_spin);
531 }
532 return 1;
533 }
534
535 if (list_empty(&gl->gl_holders)) {
536 set_bit(HIF_FIRST, &gh->gh_iflags);
537 set_bit(GLF_LOCK, &gl->gl_flags);
538 } else {
539 struct gfs2_holder *next_gh;
540 if (gh->gh_flags & GL_LOCAL_EXCL)
541 return 1;
542 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
543 gh_list);
544 if (next_gh->gh_flags & GL_LOCAL_EXCL)
545 return 1;
546 }
547
548 list_move_tail(&gh->gh_list, &gl->gl_holders);
549 gh->gh_error = 0;
550 set_bit(HIF_HOLDER, &gh->gh_iflags);
551
552 complete(&gh->gh_wait);
553
554 return 0;
555}
556
557/**
558 * rq_demote - process a demote request in the queue
559 * @gh: the glock holder
560 *
561 * Returns: 1 if the queue is blocked
562 */
563
564static int rq_demote(struct gfs2_holder *gh)
565{
566 struct gfs2_glock *gl = gh->gh_gl;
567 const struct gfs2_glock_operations *glops = gl->gl_ops;
568
569 if (!list_empty(&gl->gl_holders))
570 return 1;
571
572 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
573 list_del_init(&gh->gh_list);
574 gh->gh_error = 0;
575 spin_unlock(&gl->gl_spin);
576 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
577 gfs2_holder_put(gh);
578 else
579 complete(&gh->gh_wait);
580 spin_lock(&gl->gl_spin);
581 } else {
582 gl->gl_req_gh = gh;
583 set_bit(GLF_LOCK, &gl->gl_flags);
584 spin_unlock(&gl->gl_spin);
585
586 if (gh->gh_state == LM_ST_UNLOCKED ||
587 gl->gl_state != LM_ST_EXCLUSIVE)
588 glops->go_drop_th(gl);
589 else
590 glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
591
592 spin_lock(&gl->gl_spin);
593 }
594
595 return 0;
596}
597
598/**
599 * rq_greedy - process a queued request to drop greedy status
600 * @gh: the glock holder
601 *
602 * Returns: 1 if the queue is blocked
603 */
604
605static int rq_greedy(struct gfs2_holder *gh)
606{
607 struct gfs2_glock *gl = gh->gh_gl;
608
609 list_del_init(&gh->gh_list);
610 /* gh->gh_error never examined. */
611 clear_bit(GLF_GREEDY, &gl->gl_flags);
612 spin_unlock(&gl->gl_spin);
613
614 gfs2_holder_uninit(gh);
615 kfree(container_of(gh, struct greedy, gr_gh));
616
617 spin_lock(&gl->gl_spin);
618
619 return 0;
620}
621
622/**
623 * run_queue - process holder structures on a glock
624 * @gl: the glock
625 *
626 */
627static void run_queue(struct gfs2_glock *gl)
628{
629 struct gfs2_holder *gh;
630 int blocked = 1;
631
632 for (;;) {
633 if (test_bit(GLF_LOCK, &gl->gl_flags))
634 break;
635
636 if (!list_empty(&gl->gl_waiters1)) {
637 gh = list_entry(gl->gl_waiters1.next,
638 struct gfs2_holder, gh_list);
639
640 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
641 blocked = rq_mutex(gh);
642 else
643 gfs2_assert_warn(gl->gl_sbd, 0);
644
645 } else if (!list_empty(&gl->gl_waiters2) &&
646 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
647 gh = list_entry(gl->gl_waiters2.next,
648 struct gfs2_holder, gh_list);
649
650 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
651 blocked = rq_demote(gh);
652 else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
653 blocked = rq_greedy(gh);
654 else
655 gfs2_assert_warn(gl->gl_sbd, 0);
656
657 } else if (!list_empty(&gl->gl_waiters3)) {
658 gh = list_entry(gl->gl_waiters3.next,
659 struct gfs2_holder, gh_list);
660
661 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
662 blocked = rq_promote(gh);
663 else
664 gfs2_assert_warn(gl->gl_sbd, 0);
665
666 } else
667 break;
668
669 if (blocked)
670 break;
671 }
672}
673
674/**
675 * gfs2_glmutex_lock - acquire a local lock on a glock
676 * @gl: the glock
677 *
678 * Gives caller exclusive access to manipulate a glock structure.
679 */
680
681static void gfs2_glmutex_lock(struct gfs2_glock *gl)
682{
683 struct gfs2_holder gh;
684
685 gfs2_holder_init(gl, 0, 0, &gh);
686 set_bit(HIF_MUTEX, &gh.gh_iflags);
687
688 spin_lock(&gl->gl_spin);
689 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
690 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
691 } else {
692 gl->gl_owner = current;
693 gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 complete(&gh.gh_wait);
695 }
696 spin_unlock(&gl->gl_spin);
697
698 wait_for_completion(&gh.gh_wait);
699 gfs2_holder_uninit(&gh);
700}
701
702/**
703 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
704 * @gl: the glock
705 *
706 * Returns: 1 if the glock is acquired
707 */
708
709static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
710{
711 int acquired = 1;
712
713 spin_lock(&gl->gl_spin);
714 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
715 acquired = 0;
716 } else {
717 gl->gl_owner = current;
718 gl->gl_ip = (unsigned long)__builtin_return_address(0);
719 }
720 spin_unlock(&gl->gl_spin);
721
722 return acquired;
723}
724
725/**
726 * gfs2_glmutex_unlock - release a local lock on a glock
727 * @gl: the glock
728 *
729 */
730
731static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
732{
733 spin_lock(&gl->gl_spin);
734 clear_bit(GLF_LOCK, &gl->gl_flags);
735 gl->gl_owner = NULL;
736 gl->gl_ip = 0;
737 run_queue(gl);
738 BUG_ON(!spin_is_locked(&gl->gl_spin));
739 spin_unlock(&gl->gl_spin);
740}
741
742/**
743 * handle_callback - add a demote request to a lock's queue
744 * @gl: the glock
745 * @state: the state the caller wants us to change to
746 *
747 * Note: This may fail sliently if we are out of memory.
748 */
749
750static void handle_callback(struct gfs2_glock *gl, unsigned int state)
751{
752 struct gfs2_holder *gh, *new_gh = NULL;
753
754restart:
755 spin_lock(&gl->gl_spin);
756
757 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
758 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
759 gl->gl_req_gh != gh) {
760 if (gh->gh_state != state)
761 gh->gh_state = LM_ST_UNLOCKED;
762 goto out;
763 }
764 }
765
766 if (new_gh) {
767 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
768 new_gh = NULL;
769 } else {
770 spin_unlock(&gl->gl_spin);
771
772 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_KERNEL);
773 if (!new_gh)
774 return;
775 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
776 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
777
778 goto restart;
779 }
780
781out:
782 spin_unlock(&gl->gl_spin);
783
784 if (new_gh)
785 gfs2_holder_put(new_gh);
786}
787
788void gfs2_glock_inode_squish(struct inode *inode)
789{
790 struct gfs2_holder gh;
791 struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
792 gfs2_holder_init(gl, LM_ST_UNLOCKED, 0, &gh);
793 set_bit(HIF_DEMOTE, &gh.gh_iflags);
794 spin_lock(&gl->gl_spin);
795 gfs2_assert(inode->i_sb->s_fs_info, list_empty(&gl->gl_holders));
796 list_add_tail(&gh.gh_list, &gl->gl_waiters2);
797 run_queue(gl);
798 spin_unlock(&gl->gl_spin);
799 wait_for_completion(&gh.gh_wait);
800 gfs2_holder_uninit(&gh);
801}
802
803/**
804 * state_change - record that the glock is now in a different state
805 * @gl: the glock
806 * @new_state the new state
807 *
808 */
809
810static void state_change(struct gfs2_glock *gl, unsigned int new_state)
811{
812 int held1, held2;
813
814 held1 = (gl->gl_state != LM_ST_UNLOCKED);
815 held2 = (new_state != LM_ST_UNLOCKED);
816
817 if (held1 != held2) {
818 if (held2)
819 gfs2_glock_hold(gl);
820 else
821 gfs2_glock_put(gl);
822 }
823
824 gl->gl_state = new_state;
825}
826
827/**
828 * xmote_bh - Called after the lock module is done acquiring a lock
829 * @gl: The glock in question
830 * @ret: the int returned from the lock module
831 *
832 */
833
834static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
835{
836 struct gfs2_sbd *sdp = gl->gl_sbd;
837 const struct gfs2_glock_operations *glops = gl->gl_ops;
838 struct gfs2_holder *gh = gl->gl_req_gh;
839 int prev_state = gl->gl_state;
840 int op_done = 1;
841
842 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
843 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
844 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
845
846 state_change(gl, ret & LM_OUT_ST_MASK);
847
848 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
849 if (glops->go_inval)
850 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
851 } else if (gl->gl_state == LM_ST_DEFERRED) {
852 /* We might not want to do this here.
853 Look at moving to the inode glops. */
854 if (glops->go_inval)
855 glops->go_inval(gl, DIO_DATA);
856 }
857
858 /* Deal with each possible exit condition */
859
860 if (!gh)
861 gl->gl_stamp = jiffies;
862 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
863 spin_lock(&gl->gl_spin);
864 list_del_init(&gh->gh_list);
865 gh->gh_error = -EIO;
866 spin_unlock(&gl->gl_spin);
867 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
868 spin_lock(&gl->gl_spin);
869 list_del_init(&gh->gh_list);
870 if (gl->gl_state == gh->gh_state ||
871 gl->gl_state == LM_ST_UNLOCKED) {
872 gh->gh_error = 0;
873 } else {
874 if (gfs2_assert_warn(sdp, gh->gh_flags &
875 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
876 fs_warn(sdp, "ret = 0x%.8X\n", ret);
877 gh->gh_error = GLR_TRYFAILED;
878 }
879 spin_unlock(&gl->gl_spin);
880
881 if (ret & LM_OUT_CANCELED)
882 handle_callback(gl, LM_ST_UNLOCKED);
883
884 } else if (ret & LM_OUT_CANCELED) {
885 spin_lock(&gl->gl_spin);
886 list_del_init(&gh->gh_list);
887 gh->gh_error = GLR_CANCELED;
888 spin_unlock(&gl->gl_spin);
889
890 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
891 spin_lock(&gl->gl_spin);
892 list_move_tail(&gh->gh_list, &gl->gl_holders);
893 gh->gh_error = 0;
894 set_bit(HIF_HOLDER, &gh->gh_iflags);
895 spin_unlock(&gl->gl_spin);
896
897 set_bit(HIF_FIRST, &gh->gh_iflags);
898
899 op_done = 0;
900
901 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
902 spin_lock(&gl->gl_spin);
903 list_del_init(&gh->gh_list);
904 gh->gh_error = GLR_TRYFAILED;
905 spin_unlock(&gl->gl_spin);
906
907 } else {
908 if (gfs2_assert_withdraw(sdp, 0) == -1)
909 fs_err(sdp, "ret = 0x%.8X\n", ret);
910 }
911
912 if (glops->go_xmote_bh)
913 glops->go_xmote_bh(gl);
914
915 if (op_done) {
916 spin_lock(&gl->gl_spin);
917 gl->gl_req_gh = NULL;
918 gl->gl_req_bh = NULL;
919 clear_bit(GLF_LOCK, &gl->gl_flags);
920 run_queue(gl);
921 spin_unlock(&gl->gl_spin);
922 }
923
924 gfs2_glock_put(gl);
925
926 if (gh) {
927 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
928 gfs2_holder_put(gh);
929 else
930 complete(&gh->gh_wait);
931 }
932}
933
934/**
935 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
936 * @gl: The glock in question
937 * @state: the requested state
938 * @flags: modifier flags to the lock call
939 *
940 */
941
942void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
943{
944 struct gfs2_sbd *sdp = gl->gl_sbd;
945 const struct gfs2_glock_operations *glops = gl->gl_ops;
946 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
947 LM_FLAG_NOEXP | LM_FLAG_ANY |
948 LM_FLAG_PRIORITY);
949 unsigned int lck_ret;
950
951 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
952 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
953 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
954 gfs2_assert_warn(sdp, state != gl->gl_state);
955
956 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
957 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
958
959 gfs2_glock_hold(gl);
960 gl->gl_req_bh = xmote_bh;
961
962 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
963
964 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
965 return;
966
967 if (lck_ret & LM_OUT_ASYNC)
968 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
969 else
970 xmote_bh(gl, lck_ret);
971}
972
973/**
974 * drop_bh - Called after a lock module unlock completes
975 * @gl: the glock
976 * @ret: the return status
977 *
978 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
979 * Doesn't drop the reference on the glock the top half took out
980 *
981 */
982
983static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
984{
985 struct gfs2_sbd *sdp = gl->gl_sbd;
986 const struct gfs2_glock_operations *glops = gl->gl_ops;
987 struct gfs2_holder *gh = gl->gl_req_gh;
988
989 clear_bit(GLF_PREFETCH, &gl->gl_flags);
990
991 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
992 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
993 gfs2_assert_warn(sdp, !ret);
994
995 state_change(gl, LM_ST_UNLOCKED);
996
997 if (glops->go_inval)
998 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
999
1000 if (gh) {
1001 spin_lock(&gl->gl_spin);
1002 list_del_init(&gh->gh_list);
1003 gh->gh_error = 0;
1004 spin_unlock(&gl->gl_spin);
1005 }
1006
1007 if (glops->go_drop_bh)
1008 glops->go_drop_bh(gl);
1009
1010 spin_lock(&gl->gl_spin);
1011 gl->gl_req_gh = NULL;
1012 gl->gl_req_bh = NULL;
1013 clear_bit(GLF_LOCK, &gl->gl_flags);
1014 run_queue(gl);
1015 spin_unlock(&gl->gl_spin);
1016
1017 gfs2_glock_put(gl);
1018
1019 if (gh) {
1020 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1021 gfs2_holder_put(gh);
1022 else
1023 complete(&gh->gh_wait);
1024 }
1025}
1026
1027/**
1028 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1029 * @gl: the glock
1030 *
1031 */
1032
1033void gfs2_glock_drop_th(struct gfs2_glock *gl)
1034{
1035 struct gfs2_sbd *sdp = gl->gl_sbd;
1036 const struct gfs2_glock_operations *glops = gl->gl_ops;
1037 unsigned int ret;
1038
1039 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1040 gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1041 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1042
1043 if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1044 glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
1045
1046 gfs2_glock_hold(gl);
1047 gl->gl_req_bh = drop_bh;
1048
1049 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1050
1051 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1052 return;
1053
1054 if (!ret)
1055 drop_bh(gl, ret);
1056 else
1057 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1058}
1059
1060/**
1061 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1062 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1063 *
1064 * Don't cancel GL_NOCANCEL requests.
1065 */
1066
1067static void do_cancels(struct gfs2_holder *gh)
1068{
1069 struct gfs2_glock *gl = gh->gh_gl;
1070
1071 spin_lock(&gl->gl_spin);
1072
1073 while (gl->gl_req_gh != gh &&
1074 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1075 !list_empty(&gh->gh_list)) {
1076 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1077 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1078 spin_unlock(&gl->gl_spin);
1079 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1080 msleep(100);
1081 spin_lock(&gl->gl_spin);
1082 } else {
1083 spin_unlock(&gl->gl_spin);
1084 msleep(100);
1085 spin_lock(&gl->gl_spin);
1086 }
1087 }
1088
1089 spin_unlock(&gl->gl_spin);
1090}
1091
1092/**
1093 * glock_wait_internal - wait on a glock acquisition
1094 * @gh: the glock holder
1095 *
1096 * Returns: 0 on success
1097 */
1098
1099static int glock_wait_internal(struct gfs2_holder *gh)
1100{
1101 struct gfs2_glock *gl = gh->gh_gl;
1102 struct gfs2_sbd *sdp = gl->gl_sbd;
1103 const struct gfs2_glock_operations *glops = gl->gl_ops;
1104
1105 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1106 return -EIO;
1107
1108 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1109 spin_lock(&gl->gl_spin);
1110 if (gl->gl_req_gh != gh &&
1111 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1112 !list_empty(&gh->gh_list)) {
1113 list_del_init(&gh->gh_list);
1114 gh->gh_error = GLR_TRYFAILED;
1115 run_queue(gl);
1116 spin_unlock(&gl->gl_spin);
1117 return gh->gh_error;
1118 }
1119 spin_unlock(&gl->gl_spin);
1120 }
1121
1122 if (gh->gh_flags & LM_FLAG_PRIORITY)
1123 do_cancels(gh);
1124
1125 wait_for_completion(&gh->gh_wait);
1126
1127 if (gh->gh_error)
1128 return gh->gh_error;
1129
1130 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1131 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1132 gh->gh_flags));
1133
1134 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1135 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1136
1137 if (glops->go_lock) {
1138 gh->gh_error = glops->go_lock(gh);
1139 if (gh->gh_error) {
1140 spin_lock(&gl->gl_spin);
1141 list_del_init(&gh->gh_list);
1142 spin_unlock(&gl->gl_spin);
1143 }
1144 }
1145
1146 spin_lock(&gl->gl_spin);
1147 gl->gl_req_gh = NULL;
1148 gl->gl_req_bh = NULL;
1149 clear_bit(GLF_LOCK, &gl->gl_flags);
1150 run_queue(gl);
1151 spin_unlock(&gl->gl_spin);
1152 }
1153
1154 return gh->gh_error;
1155}
1156
1157static inline struct gfs2_holder *
1158find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1159{
1160 struct gfs2_holder *gh;
1161
1162 list_for_each_entry(gh, head, gh_list) {
1163 if (gh->gh_owner == owner)
1164 return gh;
1165 }
1166
1167 return NULL;
1168}
1169
1170/**
1171 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1172 * @gh: the holder structure to add
1173 *
1174 */
1175
1176static void add_to_queue(struct gfs2_holder *gh)
1177{
1178 struct gfs2_glock *gl = gh->gh_gl;
1179 struct gfs2_holder *existing;
1180
1181 BUG_ON(!gh->gh_owner);
1182
1183 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1184 if (existing) {
1185 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1186 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1187 printk(KERN_INFO "lock type : %d lock state : %d\n",
1188 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1189 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1190 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1191 printk(KERN_INFO "lock type : %d lock state : %d\n",
1192 gl->gl_name.ln_type, gl->gl_state);
1193 BUG();
1194 }
1195
1196 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1197 if (existing) {
1198 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1199 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1200 BUG();
1201 }
1202
1203 if (gh->gh_flags & LM_FLAG_PRIORITY)
1204 list_add(&gh->gh_list, &gl->gl_waiters3);
1205 else
1206 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1207}
1208
1209/**
1210 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1211 * @gh: the holder structure
1212 *
1213 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1214 *
1215 * Returns: 0, GLR_TRYFAILED, or errno on failure
1216 */
1217
1218int gfs2_glock_nq(struct gfs2_holder *gh)
1219{
1220 struct gfs2_glock *gl = gh->gh_gl;
1221 struct gfs2_sbd *sdp = gl->gl_sbd;
1222 int error = 0;
1223
1224restart:
1225 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1226 set_bit(HIF_ABORTED, &gh->gh_iflags);
1227 return -EIO;
1228 }
1229
1230 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1231
1232 spin_lock(&gl->gl_spin);
1233 add_to_queue(gh);
1234 run_queue(gl);
1235 spin_unlock(&gl->gl_spin);
1236
1237 if (!(gh->gh_flags & GL_ASYNC)) {
1238 error = glock_wait_internal(gh);
1239 if (error == GLR_CANCELED) {
1240 msleep(100);
1241 goto restart;
1242 }
1243 }
1244
1245 clear_bit(GLF_PREFETCH, &gl->gl_flags);
1246
1247 if (error == GLR_TRYFAILED && (gh->gh_flags & GL_DUMP))
1248 dump_glock(gl);
1249
1250 return error;
1251}
1252
1253/**
1254 * gfs2_glock_poll - poll to see if an async request has been completed
1255 * @gh: the holder
1256 *
1257 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1258 */
1259
1260int gfs2_glock_poll(struct gfs2_holder *gh)
1261{
1262 struct gfs2_glock *gl = gh->gh_gl;
1263 int ready = 0;
1264
1265 spin_lock(&gl->gl_spin);
1266
1267 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1268 ready = 1;
1269 else if (list_empty(&gh->gh_list)) {
1270 if (gh->gh_error == GLR_CANCELED) {
1271 spin_unlock(&gl->gl_spin);
1272 msleep(100);
1273 if (gfs2_glock_nq(gh))
1274 return 1;
1275 return 0;
1276 } else
1277 ready = 1;
1278 }
1279
1280 spin_unlock(&gl->gl_spin);
1281
1282 return ready;
1283}
1284
1285/**
1286 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1287 * @gh: the holder structure
1288 *
1289 * Returns: 0, GLR_TRYFAILED, or errno on failure
1290 */
1291
1292int gfs2_glock_wait(struct gfs2_holder *gh)
1293{
1294 int error;
1295
1296 error = glock_wait_internal(gh);
1297 if (error == GLR_CANCELED) {
1298 msleep(100);
1299 gh->gh_flags &= ~GL_ASYNC;
1300 error = gfs2_glock_nq(gh);
1301 }
1302
1303 return error;
1304}
1305
1306/**
1307 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1308 * @gh: the glock holder
1309 *
1310 */
1311
1312void gfs2_glock_dq(struct gfs2_holder *gh)
1313{
1314 struct gfs2_glock *gl = gh->gh_gl;
1315 const struct gfs2_glock_operations *glops = gl->gl_ops;
1316
1317 if (gh->gh_flags & GL_NOCACHE)
1318 handle_callback(gl, LM_ST_UNLOCKED);
1319
1320 gfs2_glmutex_lock(gl);
1321
1322 spin_lock(&gl->gl_spin);
1323 list_del_init(&gh->gh_list);
1324
1325 if (list_empty(&gl->gl_holders)) {
1326 spin_unlock(&gl->gl_spin);
1327
1328 if (glops->go_unlock)
1329 glops->go_unlock(gh);
1330
1331 gl->gl_stamp = jiffies;
1332
1333 spin_lock(&gl->gl_spin);
1334 }
1335
1336 clear_bit(GLF_LOCK, &gl->gl_flags);
1337 run_queue(gl);
1338 spin_unlock(&gl->gl_spin);
1339}
1340
1341/**
1342 * gfs2_glock_prefetch - Try to prefetch a glock
1343 * @gl: the glock
1344 * @state: the state to prefetch in
1345 * @flags: flags passed to go_xmote_th()
1346 *
1347 */
1348
1349static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1350 int flags)
1351{
1352 const struct gfs2_glock_operations *glops = gl->gl_ops;
1353
1354 spin_lock(&gl->gl_spin);
1355
1356 if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1357 !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1358 !list_empty(&gl->gl_waiters3) ||
1359 relaxed_state_ok(gl->gl_state, state, flags)) {
1360 spin_unlock(&gl->gl_spin);
1361 return;
1362 }
1363
1364 set_bit(GLF_PREFETCH, &gl->gl_flags);
1365 set_bit(GLF_LOCK, &gl->gl_flags);
1366 spin_unlock(&gl->gl_spin);
1367
1368 glops->go_xmote_th(gl, state, flags);
1369}
1370
1371static void greedy_work(void *data)
1372{
1373 struct greedy *gr = data;
1374 struct gfs2_holder *gh = &gr->gr_gh;
1375 struct gfs2_glock *gl = gh->gh_gl;
1376 const struct gfs2_glock_operations *glops = gl->gl_ops;
1377
1378 clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1379
1380 if (glops->go_greedy)
1381 glops->go_greedy(gl);
1382
1383 spin_lock(&gl->gl_spin);
1384
1385 if (list_empty(&gl->gl_waiters2)) {
1386 clear_bit(GLF_GREEDY, &gl->gl_flags);
1387 spin_unlock(&gl->gl_spin);
1388 gfs2_holder_uninit(gh);
1389 kfree(gr);
1390 } else {
1391 gfs2_glock_hold(gl);
1392 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1393 run_queue(gl);
1394 spin_unlock(&gl->gl_spin);
1395 gfs2_glock_put(gl);
1396 }
1397}
1398
1399/**
1400 * gfs2_glock_be_greedy -
1401 * @gl:
1402 * @time:
1403 *
1404 * Returns: 0 if go_greedy will be called, 1 otherwise
1405 */
1406
1407int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1408{
1409 struct greedy *gr;
1410 struct gfs2_holder *gh;
1411
1412 if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1413 test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1414 return 1;
1415
1416 gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1417 if (!gr) {
1418 clear_bit(GLF_GREEDY, &gl->gl_flags);
1419 return 1;
1420 }
1421 gh = &gr->gr_gh;
1422
1423 gfs2_holder_init(gl, 0, 0, gh);
1424 set_bit(HIF_GREEDY, &gh->gh_iflags);
1425 INIT_WORK(&gr->gr_work, greedy_work, gr);
1426
1427 set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1428 schedule_delayed_work(&gr->gr_work, time);
1429
1430 return 0;
1431}
1432
1433/**
1434 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1435 * @gh: the holder structure
1436 *
1437 */
1438
1439void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1440{
1441 gfs2_glock_dq(gh);
1442 gfs2_holder_uninit(gh);
1443}
1444
1445/**
1446 * gfs2_glock_nq_num - acquire a glock based on lock number
1447 * @sdp: the filesystem
1448 * @number: the lock number
1449 * @glops: the glock operations for the type of glock
1450 * @state: the state to acquire the glock in
1451 * @flags: modifier flags for the aquisition
1452 * @gh: the struct gfs2_holder
1453 *
1454 * Returns: errno
1455 */
1456
1457int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1458 const struct gfs2_glock_operations *glops,
1459 unsigned int state, int flags, struct gfs2_holder *gh)
1460{
1461 struct gfs2_glock *gl;
1462 int error;
1463
1464 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1465 if (!error) {
1466 error = gfs2_glock_nq_init(gl, state, flags, gh);
1467 gfs2_glock_put(gl);
1468 }
1469
1470 return error;
1471}
1472
1473/**
1474 * glock_compare - Compare two struct gfs2_glock structures for sorting
1475 * @arg_a: the first structure
1476 * @arg_b: the second structure
1477 *
1478 */
1479
1480static int glock_compare(const void *arg_a, const void *arg_b)
1481{
1482 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1483 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1484 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1485 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1486
1487 if (a->ln_number > b->ln_number)
1488 return 1;
1489 if (a->ln_number < b->ln_number)
1490 return -1;
1491 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1492 return 1;
1493 if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1494 return 1;
1495 return 0;
1496}
1497
1498/**
1499 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1500 * @num_gh: the number of structures
1501 * @ghs: an array of struct gfs2_holder structures
1502 *
1503 * Returns: 0 on success (all glocks acquired),
1504 * errno on failure (no glocks acquired)
1505 */
1506
1507static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1508 struct gfs2_holder **p)
1509{
1510 unsigned int x;
1511 int error = 0;
1512
1513 for (x = 0; x < num_gh; x++)
1514 p[x] = &ghs[x];
1515
1516 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1517
1518 for (x = 0; x < num_gh; x++) {
1519 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1520
1521 error = gfs2_glock_nq(p[x]);
1522 if (error) {
1523 while (x--)
1524 gfs2_glock_dq(p[x]);
1525 break;
1526 }
1527 }
1528
1529 return error;
1530}
1531
1532/**
1533 * gfs2_glock_nq_m - acquire multiple glocks
1534 * @num_gh: the number of structures
1535 * @ghs: an array of struct gfs2_holder structures
1536 *
1537 * Figure out how big an impact this function has. Either:
1538 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1539 * 2) Forget async stuff and just call nq_m_sync()
1540 * 3) Leave it like it is
1541 *
1542 * Returns: 0 on success (all glocks acquired),
1543 * errno on failure (no glocks acquired)
1544 */
1545
1546int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1547{
1548 int *e;
1549 unsigned int x;
1550 int borked = 0, serious = 0;
1551 int error = 0;
1552
1553 if (!num_gh)
1554 return 0;
1555
1556 if (num_gh == 1) {
1557 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1558 return gfs2_glock_nq(ghs);
1559 }
1560
1561 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1562 if (!e)
1563 return -ENOMEM;
1564
1565 for (x = 0; x < num_gh; x++) {
1566 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1567 error = gfs2_glock_nq(&ghs[x]);
1568 if (error) {
1569 borked = 1;
1570 serious = error;
1571 num_gh = x;
1572 break;
1573 }
1574 }
1575
1576 for (x = 0; x < num_gh; x++) {
1577 error = e[x] = glock_wait_internal(&ghs[x]);
1578 if (error) {
1579 borked = 1;
1580 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1581 serious = error;
1582 }
1583 }
1584
1585 if (!borked) {
1586 kfree(e);
1587 return 0;
1588 }
1589
1590 for (x = 0; x < num_gh; x++)
1591 if (!e[x])
1592 gfs2_glock_dq(&ghs[x]);
1593
1594 if (serious)
1595 error = serious;
1596 else {
1597 for (x = 0; x < num_gh; x++)
1598 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1599 &ghs[x]);
1600 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1601 }
1602
1603 kfree(e);
1604
1605 return error;
1606}
1607
1608/**
1609 * gfs2_glock_dq_m - release multiple glocks
1610 * @num_gh: the number of structures
1611 * @ghs: an array of struct gfs2_holder structures
1612 *
1613 */
1614
1615void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1616{
1617 unsigned int x;
1618
1619 for (x = 0; x < num_gh; x++)
1620 gfs2_glock_dq(&ghs[x]);
1621}
1622
1623/**
1624 * gfs2_glock_dq_uninit_m - release multiple glocks
1625 * @num_gh: the number of structures
1626 * @ghs: an array of struct gfs2_holder structures
1627 *
1628 */
1629
1630void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1631{
1632 unsigned int x;
1633
1634 for (x = 0; x < num_gh; x++)
1635 gfs2_glock_dq_uninit(&ghs[x]);
1636}
1637
1638/**
1639 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1640 * @sdp: the filesystem
1641 * @number: the lock number
1642 * @glops: the glock operations for the type of glock
1643 * @state: the state to acquire the glock in
1644 * @flags: modifier flags for the aquisition
1645 *
1646 * Returns: errno
1647 */
1648
1649void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1650 const struct gfs2_glock_operations *glops,
1651 unsigned int state, int flags)
1652{
1653 struct gfs2_glock *gl;
1654 int error;
1655
1656 if (atomic_read(&sdp->sd_reclaim_count) <
1657 gfs2_tune_get(sdp, gt_reclaim_limit)) {
1658 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1659 if (!error) {
1660 gfs2_glock_prefetch(gl, state, flags);
1661 gfs2_glock_put(gl);
1662 }
1663 }
1664}
1665
1666/**
1667 * gfs2_lvb_hold - attach a LVB from a glock
1668 * @gl: The glock in question
1669 *
1670 */
1671
1672int gfs2_lvb_hold(struct gfs2_glock *gl)
1673{
1674 int error;
1675
1676 gfs2_glmutex_lock(gl);
1677
1678 if (!atomic_read(&gl->gl_lvb_count)) {
1679 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1680 if (error) {
1681 gfs2_glmutex_unlock(gl);
1682 return error;
1683 }
1684 gfs2_glock_hold(gl);
1685 }
1686 atomic_inc(&gl->gl_lvb_count);
1687
1688 gfs2_glmutex_unlock(gl);
1689
1690 return 0;
1691}
1692
1693/**
1694 * gfs2_lvb_unhold - detach a LVB from a glock
1695 * @gl: The glock in question
1696 *
1697 */
1698
1699void gfs2_lvb_unhold(struct gfs2_glock *gl)
1700{
1701 gfs2_glock_hold(gl);
1702 gfs2_glmutex_lock(gl);
1703
1704 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1705 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1706 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1707 gl->gl_lvb = NULL;
1708 gfs2_glock_put(gl);
1709 }
1710
1711 gfs2_glmutex_unlock(gl);
1712 gfs2_glock_put(gl);
1713}
1714
1715static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1716 unsigned int state)
1717{
1718 struct gfs2_glock *gl;
1719
1720 gl = gfs2_glock_find(sdp, name);
1721 if (!gl)
1722 return;
1723
1724 if (gl->gl_ops->go_callback)
1725 gl->gl_ops->go_callback(gl, state);
1726 handle_callback(gl, state);
1727
1728 spin_lock(&gl->gl_spin);
1729 run_queue(gl);
1730 spin_unlock(&gl->gl_spin);
1731
1732 gfs2_glock_put(gl);
1733}
1734
1735/**
1736 * gfs2_glock_cb - Callback used by locking module
1737 * @sdp: Pointer to the superblock
1738 * @type: Type of callback
1739 * @data: Type dependent data pointer
1740 *
1741 * Called by the locking module when it wants to tell us something.
1742 * Either we need to drop a lock, one of our ASYNC requests completed, or
1743 * a journal from another client needs to be recovered.
1744 */
1745
1746void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1747{
1748 struct gfs2_sbd *sdp = cb_data;
1749
1750 switch (type) {
1751 case LM_CB_NEED_E:
1752 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1753 return;
1754
1755 case LM_CB_NEED_D:
1756 blocking_cb(sdp, data, LM_ST_DEFERRED);
1757 return;
1758
1759 case LM_CB_NEED_S:
1760 blocking_cb(sdp, data, LM_ST_SHARED);
1761 return;
1762
1763 case LM_CB_ASYNC: {
1764 struct lm_async_cb *async = data;
1765 struct gfs2_glock *gl;
1766
1767 gl = gfs2_glock_find(sdp, &async->lc_name);
1768 if (gfs2_assert_warn(sdp, gl))
1769 return;
1770 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1771 gl->gl_req_bh(gl, async->lc_ret);
1772 gfs2_glock_put(gl);
1773 return;
1774 }
1775
1776 case LM_CB_NEED_RECOVERY:
1777 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1778 if (sdp->sd_recoverd_process)
1779 wake_up_process(sdp->sd_recoverd_process);
1780 return;
1781
1782 case LM_CB_DROPLOCKS:
1783 gfs2_gl_hash_clear(sdp, NO_WAIT);
1784 gfs2_quota_scan(sdp);
1785 return;
1786
1787 default:
1788 gfs2_assert_warn(sdp, 0);
1789 return;
1790 }
1791}
1792
1793/**
1794 * demote_ok - Check to see if it's ok to unlock a glock
1795 * @gl: the glock
1796 *
1797 * Returns: 1 if it's ok
1798 */
1799
1800static int demote_ok(struct gfs2_glock *gl)
1801{
1802 struct gfs2_sbd *sdp = gl->gl_sbd;
1803 const struct gfs2_glock_operations *glops = gl->gl_ops;
1804 int demote = 1;
1805
1806 if (test_bit(GLF_STICKY, &gl->gl_flags))
1807 demote = 0;
1808 else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1809 demote = time_after_eq(jiffies, gl->gl_stamp +
1810 gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1811 else if (glops->go_demote_ok)
1812 demote = glops->go_demote_ok(gl);
1813
1814 return demote;
1815}
1816
1817/**
1818 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1819 * @gl: the glock
1820 *
1821 */
1822
1823void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1824{
1825 struct gfs2_sbd *sdp = gl->gl_sbd;
1826
1827 spin_lock(&sdp->sd_reclaim_lock);
1828 if (list_empty(&gl->gl_reclaim)) {
1829 gfs2_glock_hold(gl);
1830 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1831 atomic_inc(&sdp->sd_reclaim_count);
1832 }
1833 spin_unlock(&sdp->sd_reclaim_lock);
1834
1835 wake_up(&sdp->sd_reclaim_wq);
1836}
1837
1838/**
1839 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1840 * @sdp: the filesystem
1841 *
1842 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1843 * different glock and we notice that there are a lot of glocks in the
1844 * reclaim list.
1845 *
1846 */
1847
1848void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1849{
1850 struct gfs2_glock *gl;
1851
1852 spin_lock(&sdp->sd_reclaim_lock);
1853 if (list_empty(&sdp->sd_reclaim_list)) {
1854 spin_unlock(&sdp->sd_reclaim_lock);
1855 return;
1856 }
1857 gl = list_entry(sdp->sd_reclaim_list.next,
1858 struct gfs2_glock, gl_reclaim);
1859 list_del_init(&gl->gl_reclaim);
1860 spin_unlock(&sdp->sd_reclaim_lock);
1861
1862 atomic_dec(&sdp->sd_reclaim_count);
1863 atomic_inc(&sdp->sd_reclaimed);
1864
1865 if (gfs2_glmutex_trylock(gl)) {
1866 if (queue_empty(gl, &gl->gl_holders) &&
1867 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1868 handle_callback(gl, LM_ST_UNLOCKED);
1869 gfs2_glmutex_unlock(gl);
1870 }
1871
1872 gfs2_glock_put(gl);
1873}
1874
1875/**
1876 * examine_bucket - Call a function for glock in a hash bucket
1877 * @examiner: the function
1878 * @sdp: the filesystem
1879 * @bucket: the bucket
1880 *
1881 * Returns: 1 if the bucket has entries
1882 */
1883
1884static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1885 unsigned int hash)
1886{
1887 struct gfs2_glock *gl, *prev = NULL;
1888 int has_entries = 0;
1889 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1890
1891 read_lock(gl_lock_addr(hash));
1892 /* Can't use hlist_for_each_entry - don't want prefetch here */
1893 if (hlist_empty(head))
1894 goto out;
1895 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1896 while(1) {
1897 if (gl->gl_sbd == sdp) {
1898 gfs2_glock_hold(gl);
1899 read_unlock(gl_lock_addr(hash));
1900 if (prev)
1901 gfs2_glock_put(prev);
1902 prev = gl;
1903 examiner(gl);
1904 has_entries = 1;
1905 read_lock(gl_lock_addr(hash));
1906 }
1907 if (gl->gl_list.next == NULL)
1908 break;
1909 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1910 }
1911out:
1912 read_unlock(gl_lock_addr(hash));
1913 if (prev)
1914 gfs2_glock_put(prev);
1915 return has_entries;
1916}
1917
1918/**
1919 * scan_glock - look at a glock and see if we can reclaim it
1920 * @gl: the glock to look at
1921 *
1922 */
1923
1924static void scan_glock(struct gfs2_glock *gl)
1925{
1926 if (gl->gl_ops == &gfs2_inode_glops)
1927 return;
1928
1929 if (gfs2_glmutex_trylock(gl)) {
1930 if (queue_empty(gl, &gl->gl_holders) &&
1931 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1932 goto out_schedule;
1933 gfs2_glmutex_unlock(gl);
1934 }
1935 return;
1936
1937out_schedule:
1938 gfs2_glmutex_unlock(gl);
1939 gfs2_glock_schedule_for_reclaim(gl);
1940}
1941
1942/**
1943 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1944 * @sdp: the filesystem
1945 *
1946 */
1947
1948void gfs2_scand_internal(struct gfs2_sbd *sdp)
1949{
1950 unsigned int x;
1951
1952 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1953 examine_bucket(scan_glock, sdp, x);
1954}
1955
1956/**
1957 * clear_glock - look at a glock and see if we can free it from glock cache
1958 * @gl: the glock to look at
1959 *
1960 */
1961
1962static void clear_glock(struct gfs2_glock *gl)
1963{
1964 struct gfs2_sbd *sdp = gl->gl_sbd;
1965 int released;
1966
1967 spin_lock(&sdp->sd_reclaim_lock);
1968 if (!list_empty(&gl->gl_reclaim)) {
1969 list_del_init(&gl->gl_reclaim);
1970 atomic_dec(&sdp->sd_reclaim_count);
1971 spin_unlock(&sdp->sd_reclaim_lock);
1972 released = gfs2_glock_put(gl);
1973 gfs2_assert(sdp, !released);
1974 } else {
1975 spin_unlock(&sdp->sd_reclaim_lock);
1976 }
1977
1978 if (gfs2_glmutex_trylock(gl)) {
1979 if (queue_empty(gl, &gl->gl_holders) &&
1980 gl->gl_state != LM_ST_UNLOCKED)
1981 handle_callback(gl, LM_ST_UNLOCKED);
1982 gfs2_glmutex_unlock(gl);
1983 }
1984}
1985
1986/**
1987 * gfs2_gl_hash_clear - Empty out the glock hash table
1988 * @sdp: the filesystem
1989 * @wait: wait until it's all gone
1990 *
1991 * Called when unmounting the filesystem, or when inter-node lock manager
1992 * requests DROPLOCKS because it is running out of capacity.
1993 */
1994
1995void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1996{
1997 unsigned long t;
1998 unsigned int x;
1999 int cont;
2000
2001 t = jiffies;
2002
2003 for (;;) {
2004 cont = 0;
2005 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2006 if (examine_bucket(clear_glock, sdp, x))
2007 cont = 1;
2008 }
2009
2010 if (!wait || !cont)
2011 break;
2012
2013 if (time_after_eq(jiffies,
2014 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2015 fs_warn(sdp, "Unmount seems to be stalled. "
2016 "Dumping lock state...\n");
2017 gfs2_dump_lockstate(sdp);
2018 t = jiffies;
2019 }
2020
2021 invalidate_inodes(sdp->sd_vfs);
2022 msleep(10);
2023 }
2024}
2025
2026/*
2027 * Diagnostic routines to help debug distributed deadlock
2028 */
2029
2030/**
2031 * dump_holder - print information about a glock holder
2032 * @str: a string naming the type of holder
2033 * @gh: the glock holder
2034 *
2035 * Returns: 0 on success, -ENOBUFS when we run out of space
2036 */
2037
2038static int dump_holder(char *str, struct gfs2_holder *gh)
2039{
2040 unsigned int x;
2041 int error = -ENOBUFS;
2042
2043 printk(KERN_INFO " %s\n", str);
2044 printk(KERN_INFO " owner = %ld\n",
2045 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2046 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
2047 printk(KERN_INFO " gh_flags =");
2048 for (x = 0; x < 32; x++)
2049 if (gh->gh_flags & (1 << x))
2050 printk(" %u", x);
2051 printk(" \n");
2052 printk(KERN_INFO " error = %d\n", gh->gh_error);
2053 printk(KERN_INFO " gh_iflags =");
2054 for (x = 0; x < 32; x++)
2055 if (test_bit(x, &gh->gh_iflags))
2056 printk(" %u", x);
2057 printk(" \n");
2058 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
2059
2060 error = 0;
2061
2062 return error;
2063}
2064
2065/**
2066 * dump_inode - print information about an inode
2067 * @ip: the inode
2068 *
2069 * Returns: 0 on success, -ENOBUFS when we run out of space
2070 */
2071
2072static int dump_inode(struct gfs2_inode *ip)
2073{
2074 unsigned int x;
2075 int error = -ENOBUFS;
2076
2077 printk(KERN_INFO " Inode:\n");
2078 printk(KERN_INFO " num = %llu %llu\n",
2079 (unsigned long long)ip->i_num.no_formal_ino,
2080 (unsigned long long)ip->i_num.no_addr);
2081 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_di.di_mode));
2082 printk(KERN_INFO " i_flags =");
2083 for (x = 0; x < 32; x++)
2084 if (test_bit(x, &ip->i_flags))
2085 printk(" %u", x);
2086 printk(" \n");
2087
2088 error = 0;
2089
2090 return error;
2091}
2092
2093/**
2094 * dump_glock - print information about a glock
2095 * @gl: the glock
2096 * @count: where we are in the buffer
2097 *
2098 * Returns: 0 on success, -ENOBUFS when we run out of space
2099 */
2100
2101static int dump_glock(struct gfs2_glock *gl)
2102{
2103 struct gfs2_holder *gh;
2104 unsigned int x;
2105 int error = -ENOBUFS;
2106
2107 spin_lock(&gl->gl_spin);
2108
2109 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2110 (unsigned long long)gl->gl_name.ln_number);
2111 printk(KERN_INFO " gl_flags =");
2112 for (x = 0; x < 32; x++) {
2113 if (test_bit(x, &gl->gl_flags))
2114 printk(" %u", x);
2115 }
2116 printk(" \n");
2117 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref));
2118 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
2119 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
2120 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
2121 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2122 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2123 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2124 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
2125 printk(KERN_INFO " le = %s\n",
2126 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2127 printk(KERN_INFO " reclaim = %s\n",
2128 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2129 if (gl->gl_aspace)
2130 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2131 gl->gl_aspace->i_mapping->nrpages);
2132 else
2133 printk(KERN_INFO " aspace = no\n");
2134 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
2135 if (gl->gl_req_gh) {
2136 error = dump_holder("Request", gl->gl_req_gh);
2137 if (error)
2138 goto out;
2139 }
2140 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2141 error = dump_holder("Holder", gh);
2142 if (error)
2143 goto out;
2144 }
2145 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2146 error = dump_holder("Waiter1", gh);
2147 if (error)
2148 goto out;
2149 }
2150 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2151 error = dump_holder("Waiter2", gh);
2152 if (error)
2153 goto out;
2154 }
2155 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2156 error = dump_holder("Waiter3", gh);
2157 if (error)
2158 goto out;
2159 }
2160 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2161 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2162 list_empty(&gl->gl_holders)) {
2163 error = dump_inode(gl->gl_object);
2164 if (error)
2165 goto out;
2166 } else {
2167 error = -ENOBUFS;
2168 printk(KERN_INFO " Inode: busy\n");
2169 }
2170 }
2171
2172 error = 0;
2173
2174out:
2175 spin_unlock(&gl->gl_spin);
2176 return error;
2177}
2178
2179/**
2180 * gfs2_dump_lockstate - print out the current lockstate
2181 * @sdp: the filesystem
2182 * @ub: the buffer to copy the information into
2183 *
2184 * If @ub is NULL, dump the lockstate to the console.
2185 *
2186 */
2187
2188static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2189{
2190 struct gfs2_glock *gl;
2191 struct hlist_node *h;
2192 unsigned int x;
2193 int error = 0;
2194
2195 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2196
2197 read_lock(gl_lock_addr(x));
2198
2199 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2200 if (gl->gl_sbd != sdp)
2201 continue;
2202
2203 error = dump_glock(gl);
2204 if (error)
2205 break;
2206 }
2207
2208 read_unlock(gl_lock_addr(x));
2209
2210 if (error)
2211 break;
2212 }
2213
2214
2215 return error;
2216}
2217
2218int __init gfs2_glock_init(void)
2219{
2220 unsigned i;
2221 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2222 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2223 }
2224#ifdef GL_HASH_LOCK_SZ
2225 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2226 rwlock_init(&gl_hash_locks[i]);
2227 }
2228#endif
2229 return 0;
2230}
2231
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
new file mode 100644
index 000000000000..2b2a889ee2cc
--- /dev/null
+++ b/fs/gfs2/glock.h
@@ -0,0 +1,153 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __GLOCK_DOT_H__
11#define __GLOCK_DOT_H__
12
13#include "incore.h"
14
15/* Flags for lock requests; used in gfs2_holder gh_flag field.
16 From lm_interface.h:
17#define LM_FLAG_TRY 0x00000001
18#define LM_FLAG_TRY_1CB 0x00000002
19#define LM_FLAG_NOEXP 0x00000004
20#define LM_FLAG_ANY 0x00000008
21#define LM_FLAG_PRIORITY 0x00000010 */
22
23#define GL_LOCAL_EXCL 0x00000020
24#define GL_ASYNC 0x00000040
25#define GL_EXACT 0x00000080
26#define GL_SKIP 0x00000100
27#define GL_ATIME 0x00000200
28#define GL_NOCACHE 0x00000400
29#define GL_NOCANCEL 0x00001000
30#define GL_AOP 0x00004000
31#define GL_DUMP 0x00008000
32
33#define GLR_TRYFAILED 13
34#define GLR_CANCELED 14
35
36static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
37{
38 struct gfs2_holder *gh;
39 int locked = 0;
40
41 /* Look in glock's list of holders for one with current task as owner */
42 spin_lock(&gl->gl_spin);
43 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
44 if (gh->gh_owner == current) {
45 locked = 1;
46 break;
47 }
48 }
49 spin_unlock(&gl->gl_spin);
50
51 return locked;
52}
53
54static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
55{
56 return gl->gl_state == LM_ST_EXCLUSIVE;
57}
58
59static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
60{
61 return gl->gl_state == LM_ST_DEFERRED;
62}
63
64static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
65{
66 return gl->gl_state == LM_ST_SHARED;
67}
68
69static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
70{
71 int ret;
72 spin_lock(&gl->gl_spin);
73 ret = !list_empty(&gl->gl_waiters2) || !list_empty(&gl->gl_waiters3);
74 spin_unlock(&gl->gl_spin);
75 return ret;
76}
77
78int gfs2_glock_get(struct gfs2_sbd *sdp,
79 u64 number, const struct gfs2_glock_operations *glops,
80 int create, struct gfs2_glock **glp);
81void gfs2_glock_hold(struct gfs2_glock *gl);
82int gfs2_glock_put(struct gfs2_glock *gl);
83void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
84 struct gfs2_holder *gh);
85void gfs2_holder_reinit(unsigned int state, unsigned flags,
86 struct gfs2_holder *gh);
87void gfs2_holder_uninit(struct gfs2_holder *gh);
88
89void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags);
90void gfs2_glock_drop_th(struct gfs2_glock *gl);
91
92int gfs2_glock_nq(struct gfs2_holder *gh);
93int gfs2_glock_poll(struct gfs2_holder *gh);
94int gfs2_glock_wait(struct gfs2_holder *gh);
95void gfs2_glock_dq(struct gfs2_holder *gh);
96
97int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time);
98
99void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
100int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
101 u64 number, const struct gfs2_glock_operations *glops,
102 unsigned int state, int flags, struct gfs2_holder *gh);
103
104int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
105void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
106void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
107
108void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
109 const struct gfs2_glock_operations *glops,
110 unsigned int state, int flags);
111void gfs2_glock_inode_squish(struct inode *inode);
112
113/**
114 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
115 * @gl: the glock
116 * @state: the state we're requesting
117 * @flags: the modifier flags
118 * @gh: the holder structure
119 *
120 * Returns: 0, GLR_*, or errno
121 */
122
123static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
124 unsigned int state, int flags,
125 struct gfs2_holder *gh)
126{
127 int error;
128
129 gfs2_holder_init(gl, state, flags, gh);
130
131 error = gfs2_glock_nq(gh);
132 if (error)
133 gfs2_holder_uninit(gh);
134
135 return error;
136}
137
138/* Lock Value Block functions */
139
140int gfs2_lvb_hold(struct gfs2_glock *gl);
141void gfs2_lvb_unhold(struct gfs2_glock *gl);
142
143void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
144
145void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
146void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
147
148void gfs2_scand_internal(struct gfs2_sbd *sdp);
149void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
150
151int __init gfs2_glock_init(void);
152
153#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
new file mode 100644
index 000000000000..41a6b6818a50
--- /dev/null
+++ b/fs/gfs2/glops.c
@@ -0,0 +1,615 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/lm_interface.h>
17
18#include "gfs2.h"
19#include "incore.h"
20#include "bmap.h"
21#include "glock.h"
22#include "glops.h"
23#include "inode.h"
24#include "log.h"
25#include "meta_io.h"
26#include "recovery.h"
27#include "rgrp.h"
28#include "util.h"
29#include "trans.h"
30
31/**
32 * ail_empty_gl - remove all buffers for a given lock from the AIL
33 * @gl: the glock
34 *
35 * None of the buffers should be dirty, locked, or pinned.
36 */
37
38static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
39{
40 struct gfs2_sbd *sdp = gl->gl_sbd;
41 unsigned int blocks;
42 struct list_head *head = &gl->gl_ail_list;
43 struct gfs2_bufdata *bd;
44 struct buffer_head *bh;
45 u64 blkno;
46 int error;
47
48 blocks = atomic_read(&gl->gl_ail_count);
49 if (!blocks)
50 return;
51
52 error = gfs2_trans_begin(sdp, 0, blocks);
53 if (gfs2_assert_withdraw(sdp, !error))
54 return;
55
56 gfs2_log_lock(sdp);
57 while (!list_empty(head)) {
58 bd = list_entry(head->next, struct gfs2_bufdata,
59 bd_ail_gl_list);
60 bh = bd->bd_bh;
61 blkno = bh->b_blocknr;
62 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
63
64 bd->bd_ail = NULL;
65 list_del(&bd->bd_ail_st_list);
66 list_del(&bd->bd_ail_gl_list);
67 atomic_dec(&gl->gl_ail_count);
68 brelse(bh);
69 gfs2_log_unlock(sdp);
70
71 gfs2_trans_add_revoke(sdp, blkno);
72
73 gfs2_log_lock(sdp);
74 }
75 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
76 gfs2_log_unlock(sdp);
77
78 gfs2_trans_end(sdp);
79 gfs2_log_flush(sdp, NULL);
80}
81
82/**
83 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
84 * @gl: the glock
85 *
86 */
87
88static void gfs2_pte_inval(struct gfs2_glock *gl)
89{
90 struct gfs2_inode *ip;
91 struct inode *inode;
92
93 ip = gl->gl_object;
94 inode = &ip->i_inode;
95 if (!ip || !S_ISREG(ip->i_di.di_mode))
96 return;
97
98 if (!test_bit(GIF_PAGED, &ip->i_flags))
99 return;
100
101 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
102
103 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
104 set_bit(GLF_DIRTY, &gl->gl_flags);
105
106 clear_bit(GIF_SW_PAGED, &ip->i_flags);
107}
108
109/**
110 * gfs2_page_inval - Invalidate all pages associated with a glock
111 * @gl: the glock
112 *
113 */
114
115static void gfs2_page_inval(struct gfs2_glock *gl)
116{
117 struct gfs2_inode *ip;
118 struct inode *inode;
119
120 ip = gl->gl_object;
121 inode = &ip->i_inode;
122 if (!ip || !S_ISREG(ip->i_di.di_mode))
123 return;
124
125 truncate_inode_pages(inode->i_mapping, 0);
126 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !inode->i_mapping->nrpages);
127 clear_bit(GIF_PAGED, &ip->i_flags);
128}
129
130/**
131 * gfs2_page_wait - Wait for writeback of data
132 * @gl: the glock
133 *
134 * Syncs data (not metadata) for a regular file.
135 * No-op for all other types.
136 */
137
138static void gfs2_page_wait(struct gfs2_glock *gl)
139{
140 struct gfs2_inode *ip = gl->gl_object;
141 struct inode *inode = &ip->i_inode;
142 struct address_space *mapping = inode->i_mapping;
143 int error;
144
145 if (!S_ISREG(ip->i_di.di_mode))
146 return;
147
148 error = filemap_fdatawait(mapping);
149
150 /* Put back any errors cleared by filemap_fdatawait()
151 so they can be caught by someone who can pass them
152 up to user space. */
153
154 if (error == -ENOSPC)
155 set_bit(AS_ENOSPC, &mapping->flags);
156 else if (error)
157 set_bit(AS_EIO, &mapping->flags);
158
159}
160
161static void gfs2_page_writeback(struct gfs2_glock *gl)
162{
163 struct gfs2_inode *ip = gl->gl_object;
164 struct inode *inode = &ip->i_inode;
165 struct address_space *mapping = inode->i_mapping;
166
167 if (!S_ISREG(ip->i_di.di_mode))
168 return;
169
170 filemap_fdatawrite(mapping);
171}
172
173/**
174 * meta_go_sync - sync out the metadata for this glock
175 * @gl: the glock
176 * @flags: DIO_*
177 *
178 * Called when demoting or unlocking an EX glock. We must flush
179 * to disk all dirty buffers/pages relating to this glock, and must not
180 * not return to caller to demote/unlock the glock until I/O is complete.
181 */
182
183static void meta_go_sync(struct gfs2_glock *gl, int flags)
184{
185 if (!(flags & DIO_METADATA))
186 return;
187
188 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
189 gfs2_log_flush(gl->gl_sbd, gl);
190 gfs2_meta_sync(gl);
191 if (flags & DIO_RELEASE)
192 gfs2_ail_empty_gl(gl);
193 }
194
195}
196
197/**
198 * meta_go_inval - invalidate the metadata for this glock
199 * @gl: the glock
200 * @flags:
201 *
202 */
203
204static void meta_go_inval(struct gfs2_glock *gl, int flags)
205{
206 if (!(flags & DIO_METADATA))
207 return;
208
209 gfs2_meta_inval(gl);
210 gl->gl_vn++;
211}
212
213/**
214 * inode_go_xmote_th - promote/demote a glock
215 * @gl: the glock
216 * @state: the requested state
217 * @flags:
218 *
219 */
220
221static void inode_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
222 int flags)
223{
224 if (gl->gl_state != LM_ST_UNLOCKED)
225 gfs2_pte_inval(gl);
226 gfs2_glock_xmote_th(gl, state, flags);
227}
228
229/**
230 * inode_go_xmote_bh - After promoting/demoting a glock
231 * @gl: the glock
232 *
233 */
234
235static void inode_go_xmote_bh(struct gfs2_glock *gl)
236{
237 struct gfs2_holder *gh = gl->gl_req_gh;
238 struct buffer_head *bh;
239 int error;
240
241 if (gl->gl_state != LM_ST_UNLOCKED &&
242 (!gh || !(gh->gh_flags & GL_SKIP))) {
243 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
244 if (!error)
245 brelse(bh);
246 }
247}
248
249/**
250 * inode_go_drop_th - unlock a glock
251 * @gl: the glock
252 *
253 * Invoked from rq_demote().
254 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
255 * is being purged from our node's glock cache; we're dropping lock.
256 */
257
258static void inode_go_drop_th(struct gfs2_glock *gl)
259{
260 gfs2_pte_inval(gl);
261 gfs2_glock_drop_th(gl);
262}
263
264/**
265 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
266 * @gl: the glock protecting the inode
267 * @flags:
268 *
269 */
270
271static void inode_go_sync(struct gfs2_glock *gl, int flags)
272{
273 int meta = (flags & DIO_METADATA);
274 int data = (flags & DIO_DATA);
275
276 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
277 if (meta && data) {
278 gfs2_page_writeback(gl);
279 gfs2_log_flush(gl->gl_sbd, gl);
280 gfs2_meta_sync(gl);
281 gfs2_page_wait(gl);
282 clear_bit(GLF_DIRTY, &gl->gl_flags);
283 } else if (meta) {
284 gfs2_log_flush(gl->gl_sbd, gl);
285 gfs2_meta_sync(gl);
286 } else if (data) {
287 gfs2_page_writeback(gl);
288 gfs2_page_wait(gl);
289 }
290 if (flags & DIO_RELEASE)
291 gfs2_ail_empty_gl(gl);
292 }
293}
294
295/**
296 * inode_go_inval - prepare a inode glock to be released
297 * @gl: the glock
298 * @flags:
299 *
300 */
301
302static void inode_go_inval(struct gfs2_glock *gl, int flags)
303{
304 int meta = (flags & DIO_METADATA);
305 int data = (flags & DIO_DATA);
306
307 if (meta) {
308 gfs2_meta_inval(gl);
309 gl->gl_vn++;
310 }
311 if (data)
312 gfs2_page_inval(gl);
313}
314
315/**
316 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
317 * @gl: the glock
318 *
319 * Returns: 1 if it's ok
320 */
321
322static int inode_go_demote_ok(struct gfs2_glock *gl)
323{
324 struct gfs2_sbd *sdp = gl->gl_sbd;
325 int demote = 0;
326
327 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
328 demote = 1;
329 else if (!sdp->sd_args.ar_localcaching &&
330 time_after_eq(jiffies, gl->gl_stamp +
331 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
332 demote = 1;
333
334 return demote;
335}
336
337/**
338 * inode_go_lock - operation done after an inode lock is locked by a process
339 * @gl: the glock
340 * @flags:
341 *
342 * Returns: errno
343 */
344
345static int inode_go_lock(struct gfs2_holder *gh)
346{
347 struct gfs2_glock *gl = gh->gh_gl;
348 struct gfs2_inode *ip = gl->gl_object;
349 int error = 0;
350
351 if (!ip)
352 return 0;
353
354 if (ip->i_vn != gl->gl_vn) {
355 error = gfs2_inode_refresh(ip);
356 if (error)
357 return error;
358 gfs2_inode_attr_in(ip);
359 }
360
361 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
362 (gl->gl_state == LM_ST_EXCLUSIVE) &&
363 (gh->gh_flags & GL_LOCAL_EXCL))
364 error = gfs2_truncatei_resume(ip);
365
366 return error;
367}
368
369/**
370 * inode_go_unlock - operation done before an inode lock is unlocked by a
371 * process
372 * @gl: the glock
373 * @flags:
374 *
375 */
376
377static void inode_go_unlock(struct gfs2_holder *gh)
378{
379 struct gfs2_glock *gl = gh->gh_gl;
380 struct gfs2_inode *ip = gl->gl_object;
381
382 if (ip == NULL)
383 return;
384 if (test_bit(GLF_DIRTY, &gl->gl_flags))
385 gfs2_inode_attr_in(ip);
386 gfs2_meta_cache_flush(ip);
387}
388
389/**
390 * inode_greedy -
391 * @gl: the glock
392 *
393 */
394
395static void inode_greedy(struct gfs2_glock *gl)
396{
397 struct gfs2_sbd *sdp = gl->gl_sbd;
398 struct gfs2_inode *ip = gl->gl_object;
399 unsigned int quantum = gfs2_tune_get(sdp, gt_greedy_quantum);
400 unsigned int max = gfs2_tune_get(sdp, gt_greedy_max);
401 unsigned int new_time;
402
403 spin_lock(&ip->i_spin);
404
405 if (time_after(ip->i_last_pfault + quantum, jiffies)) {
406 new_time = ip->i_greedy + quantum;
407 if (new_time > max)
408 new_time = max;
409 } else {
410 new_time = ip->i_greedy - quantum;
411 if (!new_time || new_time > max)
412 new_time = 1;
413 }
414
415 ip->i_greedy = new_time;
416
417 spin_unlock(&ip->i_spin);
418
419 iput(&ip->i_inode);
420}
421
422/**
423 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
424 * @gl: the glock
425 *
426 * Returns: 1 if it's ok
427 */
428
429static int rgrp_go_demote_ok(struct gfs2_glock *gl)
430{
431 return !gl->gl_aspace->i_mapping->nrpages;
432}
433
434/**
435 * rgrp_go_lock - operation done after an rgrp lock is locked by
436 * a first holder on this node.
437 * @gl: the glock
438 * @flags:
439 *
440 * Returns: errno
441 */
442
443static int rgrp_go_lock(struct gfs2_holder *gh)
444{
445 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
446}
447
448/**
449 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
450 * a last holder on this node.
451 * @gl: the glock
452 * @flags:
453 *
454 */
455
456static void rgrp_go_unlock(struct gfs2_holder *gh)
457{
458 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
459}
460
461/**
462 * trans_go_xmote_th - promote/demote the transaction glock
463 * @gl: the glock
464 * @state: the requested state
465 * @flags:
466 *
467 */
468
469static void trans_go_xmote_th(struct gfs2_glock *gl, unsigned int state,
470 int flags)
471{
472 struct gfs2_sbd *sdp = gl->gl_sbd;
473
474 if (gl->gl_state != LM_ST_UNLOCKED &&
475 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
476 gfs2_meta_syncfs(sdp);
477 gfs2_log_shutdown(sdp);
478 }
479
480 gfs2_glock_xmote_th(gl, state, flags);
481}
482
483/**
484 * trans_go_xmote_bh - After promoting/demoting the transaction glock
485 * @gl: the glock
486 *
487 */
488
489static void trans_go_xmote_bh(struct gfs2_glock *gl)
490{
491 struct gfs2_sbd *sdp = gl->gl_sbd;
492 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
493 struct gfs2_glock *j_gl = ip->i_gl;
494 struct gfs2_log_header head;
495 int error;
496
497 if (gl->gl_state != LM_ST_UNLOCKED &&
498 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
499 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
500 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
501
502 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
503 if (error)
504 gfs2_consist(sdp);
505 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
506 gfs2_consist(sdp);
507
508 /* Initialize some head of the log stuff */
509 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
510 sdp->sd_log_sequence = head.lh_sequence + 1;
511 gfs2_log_pointers_init(sdp, head.lh_blkno);
512 }
513 }
514}
515
516/**
517 * trans_go_drop_th - unlock the transaction glock
518 * @gl: the glock
519 *
520 * We want to sync the device even with localcaching. Remember
521 * that localcaching journal replay only marks buffers dirty.
522 */
523
524static void trans_go_drop_th(struct gfs2_glock *gl)
525{
526 struct gfs2_sbd *sdp = gl->gl_sbd;
527
528 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
529 gfs2_meta_syncfs(sdp);
530 gfs2_log_shutdown(sdp);
531 }
532
533 gfs2_glock_drop_th(gl);
534}
535
536/**
537 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
538 * @gl: the glock
539 *
540 * Returns: 1 if it's ok
541 */
542
543static int quota_go_demote_ok(struct gfs2_glock *gl)
544{
545 return !atomic_read(&gl->gl_lvb_count);
546}
547
548const struct gfs2_glock_operations gfs2_meta_glops = {
549 .go_xmote_th = gfs2_glock_xmote_th,
550 .go_drop_th = gfs2_glock_drop_th,
551 .go_type = LM_TYPE_META,
552};
553
554const struct gfs2_glock_operations gfs2_inode_glops = {
555 .go_xmote_th = inode_go_xmote_th,
556 .go_xmote_bh = inode_go_xmote_bh,
557 .go_drop_th = inode_go_drop_th,
558 .go_sync = inode_go_sync,
559 .go_inval = inode_go_inval,
560 .go_demote_ok = inode_go_demote_ok,
561 .go_lock = inode_go_lock,
562 .go_unlock = inode_go_unlock,
563 .go_greedy = inode_greedy,
564 .go_type = LM_TYPE_INODE,
565};
566
567const struct gfs2_glock_operations gfs2_rgrp_glops = {
568 .go_xmote_th = gfs2_glock_xmote_th,
569 .go_drop_th = gfs2_glock_drop_th,
570 .go_sync = meta_go_sync,
571 .go_inval = meta_go_inval,
572 .go_demote_ok = rgrp_go_demote_ok,
573 .go_lock = rgrp_go_lock,
574 .go_unlock = rgrp_go_unlock,
575 .go_type = LM_TYPE_RGRP,
576};
577
578const struct gfs2_glock_operations gfs2_trans_glops = {
579 .go_xmote_th = trans_go_xmote_th,
580 .go_xmote_bh = trans_go_xmote_bh,
581 .go_drop_th = trans_go_drop_th,
582 .go_type = LM_TYPE_NONDISK,
583};
584
585const struct gfs2_glock_operations gfs2_iopen_glops = {
586 .go_xmote_th = gfs2_glock_xmote_th,
587 .go_drop_th = gfs2_glock_drop_th,
588 .go_type = LM_TYPE_IOPEN,
589};
590
591const struct gfs2_glock_operations gfs2_flock_glops = {
592 .go_xmote_th = gfs2_glock_xmote_th,
593 .go_drop_th = gfs2_glock_drop_th,
594 .go_type = LM_TYPE_FLOCK,
595};
596
597const struct gfs2_glock_operations gfs2_nondisk_glops = {
598 .go_xmote_th = gfs2_glock_xmote_th,
599 .go_drop_th = gfs2_glock_drop_th,
600 .go_type = LM_TYPE_NONDISK,
601};
602
603const struct gfs2_glock_operations gfs2_quota_glops = {
604 .go_xmote_th = gfs2_glock_xmote_th,
605 .go_drop_th = gfs2_glock_drop_th,
606 .go_demote_ok = quota_go_demote_ok,
607 .go_type = LM_TYPE_QUOTA,
608};
609
610const struct gfs2_glock_operations gfs2_journal_glops = {
611 .go_xmote_th = gfs2_glock_xmote_th,
612 .go_drop_th = gfs2_glock_drop_th,
613 .go_type = LM_TYPE_JOURNAL,
614};
615
diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h
new file mode 100644
index 000000000000..a1d9b5b024e6
--- /dev/null
+++ b/fs/gfs2/glops.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __GLOPS_DOT_H__
11#define __GLOPS_DOT_H__
12
13#include "incore.h"
14
15extern const struct gfs2_glock_operations gfs2_meta_glops;
16extern const struct gfs2_glock_operations gfs2_inode_glops;
17extern const struct gfs2_glock_operations gfs2_rgrp_glops;
18extern const struct gfs2_glock_operations gfs2_trans_glops;
19extern const struct gfs2_glock_operations gfs2_iopen_glops;
20extern const struct gfs2_glock_operations gfs2_flock_glops;
21extern const struct gfs2_glock_operations gfs2_nondisk_glops;
22extern const struct gfs2_glock_operations gfs2_quota_glops;
23extern const struct gfs2_glock_operations gfs2_journal_glops;
24
25#endif /* __GLOPS_DOT_H__ */
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
new file mode 100644
index 000000000000..118dc693d111
--- /dev/null
+++ b/fs/gfs2/incore.h
@@ -0,0 +1,634 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __INCORE_DOT_H__
11#define __INCORE_DOT_H__
12
13#include <linux/fs.h>
14
15#define DIO_WAIT 0x00000010
16#define DIO_METADATA 0x00000020
17#define DIO_DATA 0x00000040
18#define DIO_RELEASE 0x00000080
19#define DIO_ALL 0x00000100
20
21struct gfs2_log_operations;
22struct gfs2_log_element;
23struct gfs2_holder;
24struct gfs2_glock;
25struct gfs2_quota_data;
26struct gfs2_trans;
27struct gfs2_ail;
28struct gfs2_jdesc;
29struct gfs2_sbd;
30
31typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
32
33/*
34 * Structure of operations that are associated with each
35 * type of element in the log.
36 */
37
38struct gfs2_log_operations {
39 void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
40 void (*lo_incore_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
41 void (*lo_before_commit) (struct gfs2_sbd *sdp);
42 void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
43 void (*lo_before_scan) (struct gfs2_jdesc *jd,
44 struct gfs2_log_header *head, int pass);
45 int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
46 struct gfs2_log_descriptor *ld, __be64 *ptr,
47 int pass);
48 void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
49 const char *lo_name;
50};
51
52struct gfs2_log_element {
53 struct list_head le_list;
54 const struct gfs2_log_operations *le_ops;
55};
56
57struct gfs2_bitmap {
58 struct buffer_head *bi_bh;
59 char *bi_clone;
60 u32 bi_offset;
61 u32 bi_start;
62 u32 bi_len;
63};
64
65struct gfs2_rgrpd {
66 struct list_head rd_list; /* Link with superblock */
67 struct list_head rd_list_mru;
68 struct list_head rd_recent; /* Recently used rgrps */
69 struct gfs2_glock *rd_gl; /* Glock for this rgrp */
70 struct gfs2_rindex rd_ri;
71 struct gfs2_rgrp rd_rg;
72 u64 rd_rg_vn;
73 struct gfs2_bitmap *rd_bits;
74 unsigned int rd_bh_count;
75 struct mutex rd_mutex;
76 u32 rd_free_clone;
77 struct gfs2_log_element rd_le;
78 u32 rd_last_alloc_data;
79 u32 rd_last_alloc_meta;
80 struct gfs2_sbd *rd_sbd;
81};
82
83enum gfs2_state_bits {
84 BH_Pinned = BH_PrivateStart,
85 BH_Escaped = BH_PrivateStart + 1,
86};
87
88BUFFER_FNS(Pinned, pinned)
89TAS_BUFFER_FNS(Pinned, pinned)
90BUFFER_FNS(Escaped, escaped)
91TAS_BUFFER_FNS(Escaped, escaped)
92
93struct gfs2_bufdata {
94 struct buffer_head *bd_bh;
95 struct gfs2_glock *bd_gl;
96
97 struct list_head bd_list_tr;
98 struct gfs2_log_element bd_le;
99
100 struct gfs2_ail *bd_ail;
101 struct list_head bd_ail_st_list;
102 struct list_head bd_ail_gl_list;
103};
104
105struct gfs2_glock_operations {
106 void (*go_xmote_th) (struct gfs2_glock * gl, unsigned int state,
107 int flags);
108 void (*go_xmote_bh) (struct gfs2_glock * gl);
109 void (*go_drop_th) (struct gfs2_glock * gl);
110 void (*go_drop_bh) (struct gfs2_glock * gl);
111 void (*go_sync) (struct gfs2_glock * gl, int flags);
112 void (*go_inval) (struct gfs2_glock * gl, int flags);
113 int (*go_demote_ok) (struct gfs2_glock * gl);
114 int (*go_lock) (struct gfs2_holder * gh);
115 void (*go_unlock) (struct gfs2_holder * gh);
116 void (*go_callback) (struct gfs2_glock * gl, unsigned int state);
117 void (*go_greedy) (struct gfs2_glock * gl);
118 const int go_type;
119};
120
121enum {
122 /* Actions */
123 HIF_MUTEX = 0,
124 HIF_PROMOTE = 1,
125 HIF_DEMOTE = 2,
126 HIF_GREEDY = 3,
127
128 /* States */
129 HIF_ALLOCED = 4,
130 HIF_DEALLOC = 5,
131 HIF_HOLDER = 6,
132 HIF_FIRST = 7,
133 HIF_ABORTED = 9,
134};
135
136struct gfs2_holder {
137 struct list_head gh_list;
138
139 struct gfs2_glock *gh_gl;
140 struct task_struct *gh_owner;
141 unsigned int gh_state;
142 unsigned gh_flags;
143
144 int gh_error;
145 unsigned long gh_iflags;
146 struct completion gh_wait;
147 unsigned long gh_ip;
148};
149
150enum {
151 GLF_LOCK = 1,
152 GLF_STICKY = 2,
153 GLF_PREFETCH = 3,
154 GLF_DIRTY = 5,
155 GLF_SKIP_WAITERS2 = 6,
156 GLF_GREEDY = 7,
157};
158
159struct gfs2_glock {
160 struct hlist_node gl_list;
161 unsigned long gl_flags; /* GLF_... */
162 struct lm_lockname gl_name;
163 atomic_t gl_ref;
164
165 spinlock_t gl_spin;
166
167 unsigned int gl_state;
168 unsigned int gl_hash;
169 struct task_struct *gl_owner;
170 unsigned long gl_ip;
171 struct list_head gl_holders;
172 struct list_head gl_waiters1; /* HIF_MUTEX */
173 struct list_head gl_waiters2; /* HIF_DEMOTE, HIF_GREEDY */
174 struct list_head gl_waiters3; /* HIF_PROMOTE */
175
176 const struct gfs2_glock_operations *gl_ops;
177
178 struct gfs2_holder *gl_req_gh;
179 gfs2_glop_bh_t gl_req_bh;
180
181 void *gl_lock;
182 char *gl_lvb;
183 atomic_t gl_lvb_count;
184
185 u64 gl_vn;
186 unsigned long gl_stamp;
187 void *gl_object;
188
189 struct list_head gl_reclaim;
190
191 struct gfs2_sbd *gl_sbd;
192
193 struct inode *gl_aspace;
194 struct gfs2_log_element gl_le;
195 struct list_head gl_ail_list;
196 atomic_t gl_ail_count;
197};
198
199struct gfs2_alloc {
200 /* Quota stuff */
201
202 struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
203 struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
204 unsigned int al_qd_num;
205
206 u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
207 u32 al_alloced; /* Filled in by gfs2_alloc_*() */
208
209 /* Filled in by gfs2_inplace_reserve() */
210
211 unsigned int al_line;
212 char *al_file;
213 struct gfs2_holder al_ri_gh;
214 struct gfs2_holder al_rgd_gh;
215 struct gfs2_rgrpd *al_rgd;
216
217};
218
219enum {
220 GIF_QD_LOCKED = 1,
221 GIF_PAGED = 2,
222 GIF_SW_PAGED = 3,
223};
224
225struct gfs2_inode {
226 struct inode i_inode;
227 struct gfs2_inum i_num;
228
229 unsigned long i_flags; /* GIF_... */
230
231 u64 i_vn;
232 struct gfs2_dinode i_di; /* To be replaced by ref to block */
233
234 struct gfs2_glock *i_gl; /* Move into i_gh? */
235 struct gfs2_holder i_iopen_gh;
236 struct gfs2_holder i_gh; /* for prepare/commit_write only */
237 struct gfs2_alloc i_alloc;
238 u64 i_last_rg_alloc;
239
240 spinlock_t i_spin;
241 struct rw_semaphore i_rw_mutex;
242 unsigned int i_greedy;
243 unsigned long i_last_pfault;
244
245 struct buffer_head *i_cache[GFS2_MAX_META_HEIGHT];
246};
247
248/*
249 * Since i_inode is the first element of struct gfs2_inode,
250 * this is effectively a cast.
251 */
252static inline struct gfs2_inode *GFS2_I(struct inode *inode)
253{
254 return container_of(inode, struct gfs2_inode, i_inode);
255}
256
257/* To be removed? */
258static inline struct gfs2_sbd *GFS2_SB(struct inode *inode)
259{
260 return inode->i_sb->s_fs_info;
261}
262
263enum {
264 GFF_DID_DIRECT_ALLOC = 0,
265 GFF_EXLOCK = 1,
266};
267
268struct gfs2_file {
269 unsigned long f_flags; /* GFF_... */
270 struct mutex f_fl_mutex;
271 struct gfs2_holder f_fl_gh;
272};
273
274struct gfs2_revoke {
275 struct gfs2_log_element rv_le;
276 u64 rv_blkno;
277};
278
279struct gfs2_revoke_replay {
280 struct list_head rr_list;
281 u64 rr_blkno;
282 unsigned int rr_where;
283};
284
285enum {
286 QDF_USER = 0,
287 QDF_CHANGE = 1,
288 QDF_LOCKED = 2,
289};
290
291struct gfs2_quota_lvb {
292 __be32 qb_magic;
293 u32 __pad;
294 __be64 qb_limit; /* Hard limit of # blocks to alloc */
295 __be64 qb_warn; /* Warn user when alloc is above this # */
296 __be64 qb_value; /* Current # blocks allocated */
297};
298
299struct gfs2_quota_data {
300 struct list_head qd_list;
301 unsigned int qd_count;
302
303 u32 qd_id;
304 unsigned long qd_flags; /* QDF_... */
305
306 s64 qd_change;
307 s64 qd_change_sync;
308
309 unsigned int qd_slot;
310 unsigned int qd_slot_count;
311
312 struct buffer_head *qd_bh;
313 struct gfs2_quota_change *qd_bh_qc;
314 unsigned int qd_bh_count;
315
316 struct gfs2_glock *qd_gl;
317 struct gfs2_quota_lvb qd_qb;
318
319 u64 qd_sync_gen;
320 unsigned long qd_last_warn;
321 unsigned long qd_last_touched;
322};
323
324struct gfs2_log_buf {
325 struct list_head lb_list;
326 struct buffer_head *lb_bh;
327 struct buffer_head *lb_real;
328};
329
330struct gfs2_trans {
331 unsigned long tr_ip;
332
333 unsigned int tr_blocks;
334 unsigned int tr_revokes;
335 unsigned int tr_reserved;
336
337 struct gfs2_holder tr_t_gh;
338
339 int tr_touched;
340
341 unsigned int tr_num_buf;
342 unsigned int tr_num_buf_new;
343 unsigned int tr_num_buf_rm;
344 struct list_head tr_list_buf;
345
346 unsigned int tr_num_revoke;
347 unsigned int tr_num_revoke_rm;
348};
349
350struct gfs2_ail {
351 struct list_head ai_list;
352
353 unsigned int ai_first;
354 struct list_head ai_ail1_list;
355 struct list_head ai_ail2_list;
356
357 u64 ai_sync_gen;
358};
359
360struct gfs2_jdesc {
361 struct list_head jd_list;
362
363 struct inode *jd_inode;
364 unsigned int jd_jid;
365 int jd_dirty;
366
367 unsigned int jd_blocks;
368};
369
370#define GFS2_GLOCKD_DEFAULT 1
371#define GFS2_GLOCKD_MAX 16
372
373#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
374#define GFS2_QUOTA_OFF 0
375#define GFS2_QUOTA_ACCOUNT 1
376#define GFS2_QUOTA_ON 2
377
378#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
379#define GFS2_DATA_WRITEBACK 1
380#define GFS2_DATA_ORDERED 2
381
382struct gfs2_args {
383 char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
384 char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
385 char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
386 int ar_spectator; /* Don't get a journal because we're always RO */
387 int ar_ignore_local_fs; /* Don't optimize even if local_fs is 1 */
388 int ar_localflocks; /* Let the VFS do flock|fcntl locks for us */
389 int ar_localcaching; /* Local-style caching (dangerous on multihost) */
390 int ar_debug; /* Oops on errors instead of trying to be graceful */
391 int ar_upgrade; /* Upgrade ondisk/multihost format */
392 unsigned int ar_num_glockd; /* Number of glockd threads */
393 int ar_posix_acl; /* Enable posix acls */
394 int ar_quota; /* off/account/on */
395 int ar_suiddir; /* suiddir support */
396 int ar_data; /* ordered/writeback */
397};
398
399struct gfs2_tune {
400 spinlock_t gt_spin;
401
402 unsigned int gt_ilimit;
403 unsigned int gt_ilimit_tries;
404 unsigned int gt_ilimit_min;
405 unsigned int gt_demote_secs; /* Cache retention for unheld glock */
406 unsigned int gt_incore_log_blocks;
407 unsigned int gt_log_flush_secs;
408 unsigned int gt_jindex_refresh_secs; /* Check for new journal index */
409
410 unsigned int gt_scand_secs;
411 unsigned int gt_recoverd_secs;
412 unsigned int gt_logd_secs;
413 unsigned int gt_quotad_secs;
414
415 unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
416 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
417 unsigned int gt_quota_scale_num; /* Numerator */
418 unsigned int gt_quota_scale_den; /* Denominator */
419 unsigned int gt_quota_cache_secs;
420 unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
421 unsigned int gt_atime_quantum; /* Min secs between atime updates */
422 unsigned int gt_new_files_jdata;
423 unsigned int gt_new_files_directio;
424 unsigned int gt_max_atomic_write; /* Split big writes into this size */
425 unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
426 unsigned int gt_lockdump_size;
427 unsigned int gt_stall_secs; /* Detects trouble! */
428 unsigned int gt_complain_secs;
429 unsigned int gt_reclaim_limit; /* Max num of glocks in reclaim list */
430 unsigned int gt_entries_per_readdir;
431 unsigned int gt_prefetch_secs; /* Usage window for prefetched glocks */
432 unsigned int gt_greedy_default;
433 unsigned int gt_greedy_quantum;
434 unsigned int gt_greedy_max;
435 unsigned int gt_statfs_quantum;
436 unsigned int gt_statfs_slow;
437};
438
439enum {
440 SDF_JOURNAL_CHECKED = 0,
441 SDF_JOURNAL_LIVE = 1,
442 SDF_SHUTDOWN = 2,
443 SDF_NOATIME = 3,
444};
445
446#define GFS2_FSNAME_LEN 256
447
448struct gfs2_sbd {
449 struct super_block *sd_vfs;
450 struct super_block *sd_vfs_meta;
451 struct kobject sd_kobj;
452 unsigned long sd_flags; /* SDF_... */
453 struct gfs2_sb sd_sb;
454
455 /* Constants computed on mount */
456
457 u32 sd_fsb2bb;
458 u32 sd_fsb2bb_shift;
459 u32 sd_diptrs; /* Number of pointers in a dinode */
460 u32 sd_inptrs; /* Number of pointers in a indirect block */
461 u32 sd_jbsize; /* Size of a journaled data block */
462 u32 sd_hash_bsize; /* sizeof(exhash block) */
463 u32 sd_hash_bsize_shift;
464 u32 sd_hash_ptrs; /* Number of pointers in a hash block */
465 u32 sd_qc_per_block;
466 u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
467 u32 sd_max_height; /* Max height of a file's metadata tree */
468 u64 sd_heightsize[GFS2_MAX_META_HEIGHT];
469 u32 sd_max_jheight; /* Max height of journaled file's meta tree */
470 u64 sd_jheightsize[GFS2_MAX_META_HEIGHT];
471
472 struct gfs2_args sd_args; /* Mount arguments */
473 struct gfs2_tune sd_tune; /* Filesystem tuning structure */
474
475 /* Lock Stuff */
476
477 struct lm_lockstruct sd_lockstruct;
478 struct list_head sd_reclaim_list;
479 spinlock_t sd_reclaim_lock;
480 wait_queue_head_t sd_reclaim_wq;
481 atomic_t sd_reclaim_count;
482 struct gfs2_holder sd_live_gh;
483 struct gfs2_glock *sd_rename_gl;
484 struct gfs2_glock *sd_trans_gl;
485
486 /* Inode Stuff */
487
488 struct inode *sd_master_dir;
489 struct inode *sd_jindex;
490 struct inode *sd_inum_inode;
491 struct inode *sd_statfs_inode;
492 struct inode *sd_ir_inode;
493 struct inode *sd_sc_inode;
494 struct inode *sd_qc_inode;
495 struct inode *sd_rindex;
496 struct inode *sd_quota_inode;
497
498 /* Inum stuff */
499
500 struct mutex sd_inum_mutex;
501
502 /* StatFS stuff */
503
504 spinlock_t sd_statfs_spin;
505 struct mutex sd_statfs_mutex;
506 struct gfs2_statfs_change sd_statfs_master;
507 struct gfs2_statfs_change sd_statfs_local;
508 unsigned long sd_statfs_sync_time;
509
510 /* Resource group stuff */
511
512 u64 sd_rindex_vn;
513 spinlock_t sd_rindex_spin;
514 struct mutex sd_rindex_mutex;
515 struct list_head sd_rindex_list;
516 struct list_head sd_rindex_mru_list;
517 struct list_head sd_rindex_recent_list;
518 struct gfs2_rgrpd *sd_rindex_forward;
519 unsigned int sd_rgrps;
520
521 /* Journal index stuff */
522
523 struct list_head sd_jindex_list;
524 spinlock_t sd_jindex_spin;
525 struct mutex sd_jindex_mutex;
526 unsigned int sd_journals;
527 unsigned long sd_jindex_refresh_time;
528
529 struct gfs2_jdesc *sd_jdesc;
530 struct gfs2_holder sd_journal_gh;
531 struct gfs2_holder sd_jinode_gh;
532
533 struct gfs2_holder sd_ir_gh;
534 struct gfs2_holder sd_sc_gh;
535 struct gfs2_holder sd_qc_gh;
536
537 /* Daemon stuff */
538
539 struct task_struct *sd_scand_process;
540 struct task_struct *sd_recoverd_process;
541 struct task_struct *sd_logd_process;
542 struct task_struct *sd_quotad_process;
543 struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX];
544 unsigned int sd_glockd_num;
545
546 /* Quota stuff */
547
548 struct list_head sd_quota_list;
549 atomic_t sd_quota_count;
550 spinlock_t sd_quota_spin;
551 struct mutex sd_quota_mutex;
552
553 unsigned int sd_quota_slots;
554 unsigned int sd_quota_chunks;
555 unsigned char **sd_quota_bitmap;
556
557 u64 sd_quota_sync_gen;
558 unsigned long sd_quota_sync_time;
559
560 /* Log stuff */
561
562 spinlock_t sd_log_lock;
563
564 unsigned int sd_log_blks_reserved;
565 unsigned int sd_log_commited_buf;
566 unsigned int sd_log_commited_revoke;
567
568 unsigned int sd_log_num_gl;
569 unsigned int sd_log_num_buf;
570 unsigned int sd_log_num_revoke;
571 unsigned int sd_log_num_rg;
572 unsigned int sd_log_num_databuf;
573 unsigned int sd_log_num_jdata;
574 unsigned int sd_log_num_hdrs;
575
576 struct list_head sd_log_le_gl;
577 struct list_head sd_log_le_buf;
578 struct list_head sd_log_le_revoke;
579 struct list_head sd_log_le_rg;
580 struct list_head sd_log_le_databuf;
581
582 unsigned int sd_log_blks_free;
583 struct mutex sd_log_reserve_mutex;
584
585 u64 sd_log_sequence;
586 unsigned int sd_log_head;
587 unsigned int sd_log_tail;
588 int sd_log_idle;
589
590 unsigned long sd_log_flush_time;
591 struct rw_semaphore sd_log_flush_lock;
592 struct list_head sd_log_flush_list;
593
594 unsigned int sd_log_flush_head;
595 u64 sd_log_flush_wrapped;
596
597 struct list_head sd_ail1_list;
598 struct list_head sd_ail2_list;
599 u64 sd_ail_sync_gen;
600
601 /* Replay stuff */
602
603 struct list_head sd_revoke_list;
604 unsigned int sd_replay_tail;
605
606 unsigned int sd_found_blocks;
607 unsigned int sd_found_revokes;
608 unsigned int sd_replayed_blocks;
609
610 /* For quiescing the filesystem */
611
612 struct gfs2_holder sd_freeze_gh;
613 struct mutex sd_freeze_lock;
614 unsigned int sd_freeze_count;
615
616 /* Counters */
617
618 atomic_t sd_glock_count;
619 atomic_t sd_glock_held_count;
620 atomic_t sd_inode_count;
621 atomic_t sd_reclaimed;
622
623 char sd_fsname[GFS2_FSNAME_LEN];
624 char sd_table_name[GFS2_FSNAME_LEN];
625 char sd_proto_name[GFS2_FSNAME_LEN];
626
627 /* Debugging crud */
628
629 unsigned long sd_last_warning;
630 struct vfsmount *sd_gfs2mnt;
631};
632
633#endif /* __INCORE_DOT_H__ */
634
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
new file mode 100644
index 000000000000..57c43ac47925
--- /dev/null
+++ b/fs/gfs2/inode.c
@@ -0,0 +1,1379 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/posix_acl.h>
16#include <linux/sort.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/crc32.h>
19#include <linux/lm_interface.h>
20#include <linux/security.h>
21
22#include "gfs2.h"
23#include "incore.h"
24#include "acl.h"
25#include "bmap.h"
26#include "dir.h"
27#include "eattr.h"
28#include "glock.h"
29#include "glops.h"
30#include "inode.h"
31#include "log.h"
32#include "meta_io.h"
33#include "ops_address.h"
34#include "ops_file.h"
35#include "ops_inode.h"
36#include "quota.h"
37#include "rgrp.h"
38#include "trans.h"
39#include "util.h"
40
41/**
42 * gfs2_inode_attr_in - Copy attributes from the dinode into the VFS inode
43 * @ip: The GFS2 inode (with embedded disk inode data)
44 * @inode: The Linux VFS inode
45 *
46 */
47
48void gfs2_inode_attr_in(struct gfs2_inode *ip)
49{
50 struct inode *inode = &ip->i_inode;
51 struct gfs2_dinode *di = &ip->i_di;
52
53 inode->i_ino = ip->i_num.no_addr;
54
55 switch (di->di_mode & S_IFMT) {
56 case S_IFBLK:
57 case S_IFCHR:
58 inode->i_rdev = MKDEV(di->di_major, di->di_minor);
59 break;
60 default:
61 inode->i_rdev = 0;
62 break;
63 };
64
65 inode->i_mode = di->di_mode;
66 inode->i_nlink = di->di_nlink;
67 inode->i_uid = di->di_uid;
68 inode->i_gid = di->di_gid;
69 i_size_write(inode, di->di_size);
70 inode->i_atime.tv_sec = di->di_atime;
71 inode->i_mtime.tv_sec = di->di_mtime;
72 inode->i_ctime.tv_sec = di->di_ctime;
73 inode->i_atime.tv_nsec = 0;
74 inode->i_mtime.tv_nsec = 0;
75 inode->i_ctime.tv_nsec = 0;
76 inode->i_blocks = di->di_blocks <<
77 (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
78
79 if (di->di_flags & GFS2_DIF_IMMUTABLE)
80 inode->i_flags |= S_IMMUTABLE;
81 else
82 inode->i_flags &= ~S_IMMUTABLE;
83
84 if (di->di_flags & GFS2_DIF_APPENDONLY)
85 inode->i_flags |= S_APPEND;
86 else
87 inode->i_flags &= ~S_APPEND;
88}
89
90/**
91 * gfs2_inode_attr_out - Copy attributes from VFS inode into the dinode
92 * @ip: The GFS2 inode
93 *
94 * Only copy out the attributes that we want the VFS layer
95 * to be able to modify.
96 */
97
98void gfs2_inode_attr_out(struct gfs2_inode *ip)
99{
100 struct inode *inode = &ip->i_inode;
101 struct gfs2_dinode *di = &ip->i_di;
102 gfs2_assert_withdraw(GFS2_SB(inode),
103 (di->di_mode & S_IFMT) == (inode->i_mode & S_IFMT));
104 di->di_mode = inode->i_mode;
105 di->di_uid = inode->i_uid;
106 di->di_gid = inode->i_gid;
107 di->di_atime = inode->i_atime.tv_sec;
108 di->di_mtime = inode->i_mtime.tv_sec;
109 di->di_ctime = inode->i_ctime.tv_sec;
110}
111
112static int iget_test(struct inode *inode, void *opaque)
113{
114 struct gfs2_inode *ip = GFS2_I(inode);
115 struct gfs2_inum *inum = opaque;
116
117 if (ip && ip->i_num.no_addr == inum->no_addr)
118 return 1;
119
120 return 0;
121}
122
123static int iget_set(struct inode *inode, void *opaque)
124{
125 struct gfs2_inode *ip = GFS2_I(inode);
126 struct gfs2_inum *inum = opaque;
127
128 ip->i_num = *inum;
129 return 0;
130}
131
132struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum)
133{
134 return ilookup5(sb, (unsigned long)inum->no_formal_ino,
135 iget_test, inum);
136}
137
138static struct inode *gfs2_iget(struct super_block *sb, struct gfs2_inum *inum)
139{
140 return iget5_locked(sb, (unsigned long)inum->no_formal_ino,
141 iget_test, iget_set, inum);
142}
143
144/**
145 * gfs2_inode_lookup - Lookup an inode
146 * @sb: The super block
147 * @inum: The inode number
148 * @type: The type of the inode
149 *
150 * Returns: A VFS inode, or an error
151 */
152
153struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned int type)
154{
155 struct inode *inode = gfs2_iget(sb, inum);
156 struct gfs2_inode *ip = GFS2_I(inode);
157 struct gfs2_glock *io_gl;
158 int error;
159
160 if (inode->i_state & I_NEW) {
161 struct gfs2_sbd *sdp = GFS2_SB(inode);
162 umode_t mode = DT2IF(type);
163 inode->i_private = ip;
164 inode->i_mode = mode;
165
166 if (S_ISREG(mode)) {
167 inode->i_op = &gfs2_file_iops;
168 inode->i_fop = &gfs2_file_fops;
169 inode->i_mapping->a_ops = &gfs2_file_aops;
170 } else if (S_ISDIR(mode)) {
171 inode->i_op = &gfs2_dir_iops;
172 inode->i_fop = &gfs2_dir_fops;
173 } else if (S_ISLNK(mode)) {
174 inode->i_op = &gfs2_symlink_iops;
175 } else {
176 inode->i_op = &gfs2_dev_iops;
177 }
178
179 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
180 if (unlikely(error))
181 goto fail;
182 ip->i_gl->gl_object = ip;
183
184 error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
185 if (unlikely(error))
186 goto fail_put;
187
188 ip->i_vn = ip->i_gl->gl_vn - 1;
189 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
190 if (unlikely(error))
191 goto fail_iopen;
192
193 gfs2_glock_put(io_gl);
194 unlock_new_inode(inode);
195 }
196
197 return inode;
198fail_iopen:
199 gfs2_glock_put(io_gl);
200fail_put:
201 ip->i_gl->gl_object = NULL;
202 gfs2_glock_put(ip->i_gl);
203fail:
204 iput(inode);
205 return ERR_PTR(error);
206}
207
208/**
209 * gfs2_inode_refresh - Refresh the incore copy of the dinode
210 * @ip: The GFS2 inode
211 *
212 * Returns: errno
213 */
214
215int gfs2_inode_refresh(struct gfs2_inode *ip)
216{
217 struct buffer_head *dibh;
218 int error;
219
220 error = gfs2_meta_inode_buffer(ip, &dibh);
221 if (error)
222 return error;
223
224 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) {
225 brelse(dibh);
226 return -EIO;
227 }
228
229 gfs2_dinode_in(&ip->i_di, dibh->b_data);
230
231 brelse(dibh);
232
233 if (ip->i_num.no_addr != ip->i_di.di_num.no_addr) {
234 if (gfs2_consist_inode(ip))
235 gfs2_dinode_print(&ip->i_di);
236 return -EIO;
237 }
238 if (ip->i_num.no_formal_ino != ip->i_di.di_num.no_formal_ino)
239 return -ESTALE;
240
241 ip->i_vn = ip->i_gl->gl_vn;
242
243 return 0;
244}
245
246int gfs2_dinode_dealloc(struct gfs2_inode *ip)
247{
248 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
249 struct gfs2_alloc *al;
250 struct gfs2_rgrpd *rgd;
251 int error;
252
253 if (ip->i_di.di_blocks != 1) {
254 if (gfs2_consist_inode(ip))
255 gfs2_dinode_print(&ip->i_di);
256 return -EIO;
257 }
258
259 al = gfs2_alloc_get(ip);
260
261 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
262 if (error)
263 goto out;
264
265 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
266 if (error)
267 goto out_qs;
268
269 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
270 if (!rgd) {
271 gfs2_consist_inode(ip);
272 error = -EIO;
273 goto out_rindex_relse;
274 }
275
276 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0,
277 &al->al_rgd_gh);
278 if (error)
279 goto out_rindex_relse;
280
281 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1);
282 if (error)
283 goto out_rg_gunlock;
284
285 gfs2_trans_add_gl(ip->i_gl);
286
287 gfs2_free_di(rgd, ip);
288
289 gfs2_trans_end(sdp);
290 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags);
291
292out_rg_gunlock:
293 gfs2_glock_dq_uninit(&al->al_rgd_gh);
294out_rindex_relse:
295 gfs2_glock_dq_uninit(&al->al_ri_gh);
296out_qs:
297 gfs2_quota_unhold(ip);
298out:
299 gfs2_alloc_put(ip);
300 return error;
301}
302
303/**
304 * gfs2_change_nlink - Change nlink count on inode
305 * @ip: The GFS2 inode
306 * @diff: The change in the nlink count required
307 *
308 * Returns: errno
309 */
310
311int gfs2_change_nlink(struct gfs2_inode *ip, int diff)
312{
313 struct gfs2_sbd *sdp = ip->i_inode.i_sb->s_fs_info;
314 struct buffer_head *dibh;
315 u32 nlink;
316 int error;
317
318 BUG_ON(ip->i_di.di_nlink != ip->i_inode.i_nlink);
319 nlink = ip->i_di.di_nlink + diff;
320
321 /* If we are reducing the nlink count, but the new value ends up being
322 bigger than the old one, we must have underflowed. */
323 if (diff < 0 && nlink > ip->i_di.di_nlink) {
324 if (gfs2_consist_inode(ip))
325 gfs2_dinode_print(&ip->i_di);
326 return -EIO;
327 }
328
329 error = gfs2_meta_inode_buffer(ip, &dibh);
330 if (error)
331 return error;
332
333 ip->i_di.di_nlink = nlink;
334 ip->i_di.di_ctime = get_seconds();
335 ip->i_inode.i_nlink = nlink;
336
337 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
338 gfs2_dinode_out(&ip->i_di, dibh->b_data);
339 brelse(dibh);
340 mark_inode_dirty(&ip->i_inode);
341
342 if (ip->i_di.di_nlink == 0) {
343 struct gfs2_rgrpd *rgd;
344 struct gfs2_holder ri_gh, rg_gh;
345
346 error = gfs2_rindex_hold(sdp, &ri_gh);
347 if (error)
348 goto out;
349 error = -EIO;
350 rgd = gfs2_blk2rgrpd(sdp, ip->i_num.no_addr);
351 if (!rgd)
352 goto out_norgrp;
353 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
354 if (error)
355 goto out_norgrp;
356
357 clear_nlink(&ip->i_inode);
358 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */
359 gfs2_glock_dq_uninit(&rg_gh);
360out_norgrp:
361 gfs2_glock_dq_uninit(&ri_gh);
362 }
363out:
364 return error;
365}
366
367struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
368{
369 struct qstr qstr;
370 gfs2_str2qstr(&qstr, name);
371 return gfs2_lookupi(dip, &qstr, 1, NULL);
372}
373
374
375/**
376 * gfs2_lookupi - Look up a filename in a directory and return its inode
377 * @d_gh: An initialized holder for the directory glock
378 * @name: The name of the inode to look for
379 * @is_root: If 1, ignore the caller's permissions
380 * @i_gh: An uninitialized holder for the new inode glock
381 *
382 * There will always be a vnode (Linux VFS inode) for the d_gh inode unless
383 * @is_root is true.
384 *
385 * Returns: errno
386 */
387
388struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
389 int is_root, struct nameidata *nd)
390{
391 struct super_block *sb = dir->i_sb;
392 struct gfs2_inode *dip = GFS2_I(dir);
393 struct gfs2_holder d_gh;
394 struct gfs2_inum inum;
395 unsigned int type;
396 int error = 0;
397 struct inode *inode = NULL;
398
399 if (!name->len || name->len > GFS2_FNAMESIZE)
400 return ERR_PTR(-ENAMETOOLONG);
401
402 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) ||
403 (name->len == 2 && memcmp(name->name, "..", 2) == 0 &&
404 dir == sb->s_root->d_inode)) {
405 igrab(dir);
406 return dir;
407 }
408
409 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
410 if (error)
411 return ERR_PTR(error);
412
413 if (!is_root) {
414 error = permission(dir, MAY_EXEC, NULL);
415 if (error)
416 goto out;
417 }
418
419 error = gfs2_dir_search(dir, name, &inum, &type);
420 if (error)
421 goto out;
422
423 inode = gfs2_inode_lookup(sb, &inum, type);
424
425out:
426 gfs2_glock_dq_uninit(&d_gh);
427 if (error == -ENOENT)
428 return NULL;
429 return inode;
430}
431
432static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino)
433{
434 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
435 struct buffer_head *bh;
436 struct gfs2_inum_range ir;
437 int error;
438
439 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
440 if (error)
441 return error;
442 mutex_lock(&sdp->sd_inum_mutex);
443
444 error = gfs2_meta_inode_buffer(ip, &bh);
445 if (error) {
446 mutex_unlock(&sdp->sd_inum_mutex);
447 gfs2_trans_end(sdp);
448 return error;
449 }
450
451 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
452
453 if (ir.ir_length) {
454 *formal_ino = ir.ir_start++;
455 ir.ir_length--;
456 gfs2_trans_add_bh(ip->i_gl, bh, 1);
457 gfs2_inum_range_out(&ir,
458 bh->b_data + sizeof(struct gfs2_dinode));
459 brelse(bh);
460 mutex_unlock(&sdp->sd_inum_mutex);
461 gfs2_trans_end(sdp);
462 return 0;
463 }
464
465 brelse(bh);
466
467 mutex_unlock(&sdp->sd_inum_mutex);
468 gfs2_trans_end(sdp);
469
470 return 1;
471}
472
473static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
474{
475 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
476 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
477 struct gfs2_holder gh;
478 struct buffer_head *bh;
479 struct gfs2_inum_range ir;
480 int error;
481
482 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
483 if (error)
484 return error;
485
486 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
487 if (error)
488 goto out;
489 mutex_lock(&sdp->sd_inum_mutex);
490
491 error = gfs2_meta_inode_buffer(ip, &bh);
492 if (error)
493 goto out_end_trans;
494
495 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));
496
497 if (!ir.ir_length) {
498 struct buffer_head *m_bh;
499 u64 x, y;
500
501 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
502 if (error)
503 goto out_brelse;
504
505 x = *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
506 x = y = be64_to_cpu(x);
507 ir.ir_start = x;
508 ir.ir_length = GFS2_INUM_QUANTUM;
509 x += GFS2_INUM_QUANTUM;
510 if (x < y)
511 gfs2_consist_inode(m_ip);
512 x = cpu_to_be64(x);
513 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
514 *(u64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = x;
515
516 brelse(m_bh);
517 }
518
519 *formal_ino = ir.ir_start++;
520 ir.ir_length--;
521
522 gfs2_trans_add_bh(ip->i_gl, bh, 1);
523 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));
524
525out_brelse:
526 brelse(bh);
527out_end_trans:
528 mutex_unlock(&sdp->sd_inum_mutex);
529 gfs2_trans_end(sdp);
530out:
531 gfs2_glock_dq_uninit(&gh);
532 return error;
533}
534
535static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum)
536{
537 int error;
538
539 error = pick_formal_ino_1(sdp, inum);
540 if (error <= 0)
541 return error;
542
543 error = pick_formal_ino_2(sdp, inum);
544
545 return error;
546}
547
548/**
549 * create_ok - OK to create a new on-disk inode here?
550 * @dip: Directory in which dinode is to be created
551 * @name: Name of new dinode
552 * @mode:
553 *
554 * Returns: errno
555 */
556
557static int create_ok(struct gfs2_inode *dip, const struct qstr *name,
558 unsigned int mode)
559{
560 int error;
561
562 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
563 if (error)
564 return error;
565
566 /* Don't create entries in an unlinked directory */
567 if (!dip->i_di.di_nlink)
568 return -EPERM;
569
570 error = gfs2_dir_search(&dip->i_inode, name, NULL, NULL);
571 switch (error) {
572 case -ENOENT:
573 error = 0;
574 break;
575 case 0:
576 return -EEXIST;
577 default:
578 return error;
579 }
580
581 if (dip->i_di.di_entries == (u32)-1)
582 return -EFBIG;
583 if (S_ISDIR(mode) && dip->i_di.di_nlink == (u32)-1)
584 return -EMLINK;
585
586 return 0;
587}
588
589static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode,
590 unsigned int *uid, unsigned int *gid)
591{
592 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir &&
593 (dip->i_di.di_mode & S_ISUID) && dip->i_di.di_uid) {
594 if (S_ISDIR(*mode))
595 *mode |= S_ISUID;
596 else if (dip->i_di.di_uid != current->fsuid)
597 *mode &= ~07111;
598 *uid = dip->i_di.di_uid;
599 } else
600 *uid = current->fsuid;
601
602 if (dip->i_di.di_mode & S_ISGID) {
603 if (S_ISDIR(*mode))
604 *mode |= S_ISGID;
605 *gid = dip->i_di.di_gid;
606 } else
607 *gid = current->fsgid;
608}
609
610static int alloc_dinode(struct gfs2_inode *dip, struct gfs2_inum *inum,
611 u64 *generation)
612{
613 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
614 int error;
615
616 gfs2_alloc_get(dip);
617
618 dip->i_alloc.al_requested = RES_DINODE;
619 error = gfs2_inplace_reserve(dip);
620 if (error)
621 goto out;
622
623 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
624 if (error)
625 goto out_ipreserv;
626
627 inum->no_addr = gfs2_alloc_di(dip, generation);
628
629 gfs2_trans_end(sdp);
630
631out_ipreserv:
632 gfs2_inplace_release(dip);
633out:
634 gfs2_alloc_put(dip);
635 return error;
636}
637
638/**
639 * init_dinode - Fill in a new dinode structure
640 * @dip: the directory this inode is being created in
641 * @gl: The glock covering the new inode
642 * @inum: the inode number
643 * @mode: the file permissions
644 * @uid:
645 * @gid:
646 *
647 */
648
649static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
650 const struct gfs2_inum *inum, unsigned int mode,
651 unsigned int uid, unsigned int gid,
652 const u64 *generation)
653{
654 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
655 struct gfs2_dinode *di;
656 struct buffer_head *dibh;
657
658 dibh = gfs2_meta_new(gl, inum->no_addr);
659 gfs2_trans_add_bh(gl, dibh, 1);
660 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
661 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
662 di = (struct gfs2_dinode *)dibh->b_data;
663
664 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino);
665 di->di_num.no_addr = cpu_to_be64(inum->no_addr);
666 di->di_mode = cpu_to_be32(mode);
667 di->di_uid = cpu_to_be32(uid);
668 di->di_gid = cpu_to_be32(gid);
669 di->di_nlink = cpu_to_be32(0);
670 di->di_size = cpu_to_be64(0);
671 di->di_blocks = cpu_to_be64(1);
672 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(get_seconds());
673 di->di_major = di->di_minor = cpu_to_be32(0);
674 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr);
675 di->di_generation = cpu_to_be64(*generation);
676 di->di_flags = cpu_to_be32(0);
677
678 if (S_ISREG(mode)) {
679 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) ||
680 gfs2_tune_get(sdp, gt_new_files_jdata))
681 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
682 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) ||
683 gfs2_tune_get(sdp, gt_new_files_directio))
684 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO);
685 } else if (S_ISDIR(mode)) {
686 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
687 GFS2_DIF_INHERIT_DIRECTIO);
688 di->di_flags |= cpu_to_be32(dip->i_di.di_flags &
689 GFS2_DIF_INHERIT_JDATA);
690 }
691
692 di->__pad1 = 0;
693 di->di_payload_format = cpu_to_be32(0);
694 di->di_height = cpu_to_be32(0);
695 di->__pad2 = 0;
696 di->__pad3 = 0;
697 di->di_depth = cpu_to_be16(0);
698 di->di_entries = cpu_to_be32(0);
699 memset(&di->__pad4, 0, sizeof(di->__pad4));
700 di->di_eattr = cpu_to_be64(0);
701 memset(&di->di_reserved, 0, sizeof(di->di_reserved));
702
703 brelse(dibh);
704}
705
706static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
707 unsigned int mode, const struct gfs2_inum *inum,
708 const u64 *generation)
709{
710 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
711 unsigned int uid, gid;
712 int error;
713
714 munge_mode_uid_gid(dip, &mode, &uid, &gid);
715 gfs2_alloc_get(dip);
716
717 error = gfs2_quota_lock(dip, uid, gid);
718 if (error)
719 goto out;
720
721 error = gfs2_quota_check(dip, uid, gid);
722 if (error)
723 goto out_quota;
724
725 error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
726 if (error)
727 goto out_quota;
728
729 init_dinode(dip, gl, inum, mode, uid, gid, generation);
730 gfs2_quota_change(dip, +1, uid, gid);
731 gfs2_trans_end(sdp);
732
733out_quota:
734 gfs2_quota_unlock(dip);
735out:
736 gfs2_alloc_put(dip);
737 return error;
738}
739
740static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
741 struct gfs2_inode *ip)
742{
743 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
744 struct gfs2_alloc *al;
745 int alloc_required;
746 struct buffer_head *dibh;
747 int error;
748
749 al = gfs2_alloc_get(dip);
750
751 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
752 if (error)
753 goto fail;
754
755 error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
756 if (alloc_required < 0)
757 goto fail;
758 if (alloc_required) {
759 error = gfs2_quota_check(dip, dip->i_di.di_uid,
760 dip->i_di.di_gid);
761 if (error)
762 goto fail_quota_locks;
763
764 al->al_requested = sdp->sd_max_dirres;
765
766 error = gfs2_inplace_reserve(dip);
767 if (error)
768 goto fail_quota_locks;
769
770 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
771 al->al_rgd->rd_ri.ri_length +
772 2 * RES_DINODE +
773 RES_STATFS + RES_QUOTA, 0);
774 if (error)
775 goto fail_ipreserv;
776 } else {
777 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0);
778 if (error)
779 goto fail_quota_locks;
780 }
781
782 error = gfs2_dir_add(&dip->i_inode, name, &ip->i_num, IF2DT(ip->i_di.di_mode));
783 if (error)
784 goto fail_end_trans;
785
786 error = gfs2_meta_inode_buffer(ip, &dibh);
787 if (error)
788 goto fail_end_trans;
789 ip->i_di.di_nlink = 1;
790 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
791 gfs2_dinode_out(&ip->i_di, dibh->b_data);
792 brelse(dibh);
793 return 0;
794
795fail_end_trans:
796 gfs2_trans_end(sdp);
797
798fail_ipreserv:
799 if (dip->i_alloc.al_rgd)
800 gfs2_inplace_release(dip);
801
802fail_quota_locks:
803 gfs2_quota_unlock(dip);
804
805fail:
806 gfs2_alloc_put(dip);
807 return error;
808}
809
810static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip)
811{
812 int err;
813 size_t len;
814 void *value;
815 char *name;
816 struct gfs2_ea_request er;
817
818 err = security_inode_init_security(&ip->i_inode, &dip->i_inode,
819 &name, &value, &len);
820
821 if (err) {
822 if (err == -EOPNOTSUPP)
823 return 0;
824 return err;
825 }
826
827 memset(&er, 0, sizeof(struct gfs2_ea_request));
828
829 er.er_type = GFS2_EATYPE_SECURITY;
830 er.er_name = name;
831 er.er_data = value;
832 er.er_name_len = strlen(name);
833 er.er_data_len = len;
834
835 err = gfs2_ea_set_i(ip, &er);
836
837 kfree(value);
838 kfree(name);
839
840 return err;
841}
842
843/**
844 * gfs2_createi - Create a new inode
845 * @ghs: An array of two holders
846 * @name: The name of the new file
847 * @mode: the permissions on the new inode
848 *
849 * @ghs[0] is an initialized holder for the directory
850 * @ghs[1] is the holder for the inode lock
851 *
852 * If the return value is not NULL, the glocks on both the directory and the new
853 * file are held. A transaction has been started and an inplace reservation
854 * is held, as well.
855 *
856 * Returns: An inode
857 */
858
859struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
860 unsigned int mode)
861{
862 struct inode *inode;
863 struct gfs2_inode *dip = ghs->gh_gl->gl_object;
864 struct inode *dir = &dip->i_inode;
865 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
866 struct gfs2_inum inum;
867 int error;
868 u64 generation;
869
870 if (!name->len || name->len > GFS2_FNAMESIZE)
871 return ERR_PTR(-ENAMETOOLONG);
872
873 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
874 error = gfs2_glock_nq(ghs);
875 if (error)
876 goto fail;
877
878 error = create_ok(dip, name, mode);
879 if (error)
880 goto fail_gunlock;
881
882 error = pick_formal_ino(sdp, &inum.no_formal_ino);
883 if (error)
884 goto fail_gunlock;
885
886 error = alloc_dinode(dip, &inum, &generation);
887 if (error)
888 goto fail_gunlock;
889
890 if (inum.no_addr < dip->i_num.no_addr) {
891 gfs2_glock_dq(ghs);
892
893 error = gfs2_glock_nq_num(sdp, inum.no_addr,
894 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
895 GL_SKIP, ghs + 1);
896 if (error) {
897 return ERR_PTR(error);
898 }
899
900 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs);
901 error = gfs2_glock_nq(ghs);
902 if (error) {
903 gfs2_glock_dq_uninit(ghs + 1);
904 return ERR_PTR(error);
905 }
906
907 error = create_ok(dip, name, mode);
908 if (error)
909 goto fail_gunlock2;
910 } else {
911 error = gfs2_glock_nq_num(sdp, inum.no_addr,
912 &gfs2_inode_glops, LM_ST_EXCLUSIVE,
913 GL_SKIP, ghs + 1);
914 if (error)
915 goto fail_gunlock;
916 }
917
918 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation);
919 if (error)
920 goto fail_gunlock2;
921
922 inode = gfs2_inode_lookup(dir->i_sb, &inum, IF2DT(mode));
923 if (IS_ERR(inode))
924 goto fail_gunlock2;
925
926 error = gfs2_inode_refresh(GFS2_I(inode));
927 if (error)
928 goto fail_iput;
929
930 error = gfs2_acl_create(dip, GFS2_I(inode));
931 if (error)
932 goto fail_iput;
933
934 error = gfs2_security_init(dip, GFS2_I(inode));
935 if (error)
936 goto fail_iput;
937
938 error = link_dinode(dip, name, GFS2_I(inode));
939 if (error)
940 goto fail_iput;
941
942 if (!inode)
943 return ERR_PTR(-ENOMEM);
944 return inode;
945
946fail_iput:
947 iput(inode);
948fail_gunlock2:
949 gfs2_glock_dq_uninit(ghs + 1);
950fail_gunlock:
951 gfs2_glock_dq(ghs);
952fail:
953 return ERR_PTR(error);
954}
955
956/**
957 * gfs2_rmdiri - Remove a directory
958 * @dip: The parent directory of the directory to be removed
959 * @name: The name of the directory to be removed
960 * @ip: The GFS2 inode of the directory to be removed
961 *
962 * Assumes Glocks on dip and ip are held
963 *
964 * Returns: errno
965 */
966
967int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
968 struct gfs2_inode *ip)
969{
970 struct qstr dotname;
971 int error;
972
973 if (ip->i_di.di_entries != 2) {
974 if (gfs2_consist_inode(ip))
975 gfs2_dinode_print(&ip->i_di);
976 return -EIO;
977 }
978
979 error = gfs2_dir_del(dip, name);
980 if (error)
981 return error;
982
983 error = gfs2_change_nlink(dip, -1);
984 if (error)
985 return error;
986
987 gfs2_str2qstr(&dotname, ".");
988 error = gfs2_dir_del(ip, &dotname);
989 if (error)
990 return error;
991
992 gfs2_str2qstr(&dotname, "..");
993 error = gfs2_dir_del(ip, &dotname);
994 if (error)
995 return error;
996
997 error = gfs2_change_nlink(ip, -2);
998 if (error)
999 return error;
1000
1001 return error;
1002}
1003
1004/*
1005 * gfs2_unlink_ok - check to see that a inode is still in a directory
1006 * @dip: the directory
1007 * @name: the name of the file
1008 * @ip: the inode
1009 *
1010 * Assumes that the lock on (at least) @dip is held.
1011 *
1012 * Returns: 0 if the parent/child relationship is correct, errno if it isn't
1013 */
1014
1015int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
1016 struct gfs2_inode *ip)
1017{
1018 struct gfs2_inum inum;
1019 unsigned int type;
1020 int error;
1021
1022 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
1023 return -EPERM;
1024
1025 if ((dip->i_di.di_mode & S_ISVTX) &&
1026 dip->i_di.di_uid != current->fsuid &&
1027 ip->i_di.di_uid != current->fsuid && !capable(CAP_FOWNER))
1028 return -EPERM;
1029
1030 if (IS_APPEND(&dip->i_inode))
1031 return -EPERM;
1032
1033 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL);
1034 if (error)
1035 return error;
1036
1037 error = gfs2_dir_search(&dip->i_inode, name, &inum, &type);
1038 if (error)
1039 return error;
1040
1041 if (!gfs2_inum_equal(&inum, &ip->i_num))
1042 return -ENOENT;
1043
1044 if (IF2DT(ip->i_di.di_mode) != type) {
1045 gfs2_consist_inode(dip);
1046 return -EIO;
1047 }
1048
1049 return 0;
1050}
1051
1052/*
1053 * gfs2_ok_to_move - check if it's ok to move a directory to another directory
1054 * @this: move this
1055 * @to: to here
1056 *
1057 * Follow @to back to the root and make sure we don't encounter @this
1058 * Assumes we already hold the rename lock.
1059 *
1060 * Returns: errno
1061 */
1062
1063int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to)
1064{
1065 struct inode *dir = &to->i_inode;
1066 struct super_block *sb = dir->i_sb;
1067 struct inode *tmp;
1068 struct qstr dotdot;
1069 int error = 0;
1070
1071 gfs2_str2qstr(&dotdot, "..");
1072
1073 igrab(dir);
1074
1075 for (;;) {
1076 if (dir == &this->i_inode) {
1077 error = -EINVAL;
1078 break;
1079 }
1080 if (dir == sb->s_root->d_inode) {
1081 error = 0;
1082 break;
1083 }
1084
1085 tmp = gfs2_lookupi(dir, &dotdot, 1, NULL);
1086 if (IS_ERR(tmp)) {
1087 error = PTR_ERR(tmp);
1088 break;
1089 }
1090
1091 iput(dir);
1092 dir = tmp;
1093 }
1094
1095 iput(dir);
1096
1097 return error;
1098}
1099
1100/**
1101 * gfs2_readlinki - return the contents of a symlink
1102 * @ip: the symlink's inode
1103 * @buf: a pointer to the buffer to be filled
1104 * @len: a pointer to the length of @buf
1105 *
1106 * If @buf is too small, a piece of memory is kmalloc()ed and needs
1107 * to be freed by the caller.
1108 *
1109 * Returns: errno
1110 */
1111
1112int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len)
1113{
1114 struct gfs2_holder i_gh;
1115 struct buffer_head *dibh;
1116 unsigned int x;
1117 int error;
1118
1119 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
1120 error = gfs2_glock_nq_atime(&i_gh);
1121 if (error) {
1122 gfs2_holder_uninit(&i_gh);
1123 return error;
1124 }
1125
1126 if (!ip->i_di.di_size) {
1127 gfs2_consist_inode(ip);
1128 error = -EIO;
1129 goto out;
1130 }
1131
1132 error = gfs2_meta_inode_buffer(ip, &dibh);
1133 if (error)
1134 goto out;
1135
1136 x = ip->i_di.di_size + 1;
1137 if (x > *len) {
1138 *buf = kmalloc(x, GFP_KERNEL);
1139 if (!*buf) {
1140 error = -ENOMEM;
1141 goto out_brelse;
1142 }
1143 }
1144
1145 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x);
1146 *len = x;
1147
1148out_brelse:
1149 brelse(dibh);
1150out:
1151 gfs2_glock_dq_uninit(&i_gh);
1152 return error;
1153}
1154
1155/**
1156 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and
1157 * conditionally update the inode's atime
1158 * @gh: the holder to acquire
1159 *
1160 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap
1161 * Update if the difference between the current time and the inode's current
1162 * atime is greater than an interval specified at mount.
1163 *
1164 * Returns: errno
1165 */
1166
1167int gfs2_glock_nq_atime(struct gfs2_holder *gh)
1168{
1169 struct gfs2_glock *gl = gh->gh_gl;
1170 struct gfs2_sbd *sdp = gl->gl_sbd;
1171 struct gfs2_inode *ip = gl->gl_object;
1172 s64 curtime, quantum = gfs2_tune_get(sdp, gt_atime_quantum);
1173 unsigned int state;
1174 int flags;
1175 int error;
1176
1177 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) ||
1178 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) ||
1179 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops))
1180 return -EINVAL;
1181
1182 state = gh->gh_state;
1183 flags = gh->gh_flags;
1184
1185 error = gfs2_glock_nq(gh);
1186 if (error)
1187 return error;
1188
1189 if (test_bit(SDF_NOATIME, &sdp->sd_flags) ||
1190 (sdp->sd_vfs->s_flags & MS_RDONLY))
1191 return 0;
1192
1193 curtime = get_seconds();
1194 if (curtime - ip->i_di.di_atime >= quantum) {
1195 gfs2_glock_dq(gh);
1196 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY,
1197 gh);
1198 error = gfs2_glock_nq(gh);
1199 if (error)
1200 return error;
1201
1202 /* Verify that atime hasn't been updated while we were
1203 trying to get exclusive lock. */
1204
1205 curtime = get_seconds();
1206 if (curtime - ip->i_di.di_atime >= quantum) {
1207 struct buffer_head *dibh;
1208 struct gfs2_dinode *di;
1209
1210 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1211 if (error == -EROFS)
1212 return 0;
1213 if (error)
1214 goto fail;
1215
1216 error = gfs2_meta_inode_buffer(ip, &dibh);
1217 if (error)
1218 goto fail_end_trans;
1219
1220 ip->i_di.di_atime = curtime;
1221
1222 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1223 di = (struct gfs2_dinode *)dibh->b_data;
1224 di->di_atime = cpu_to_be64(ip->i_di.di_atime);
1225 brelse(dibh);
1226
1227 gfs2_trans_end(sdp);
1228 }
1229
1230 /* If someone else has asked for the glock,
1231 unlock and let them have it. Then reacquire
1232 in the original state. */
1233 if (gfs2_glock_is_blocking(gl)) {
1234 gfs2_glock_dq(gh);
1235 gfs2_holder_reinit(state, flags, gh);
1236 return gfs2_glock_nq(gh);
1237 }
1238 }
1239
1240 return 0;
1241
1242fail_end_trans:
1243 gfs2_trans_end(sdp);
1244fail:
1245 gfs2_glock_dq(gh);
1246 return error;
1247}
1248
1249/**
1250 * glock_compare_atime - Compare two struct gfs2_glock structures for sort
1251 * @arg_a: the first structure
1252 * @arg_b: the second structure
1253 *
1254 * Returns: 1 if A > B
1255 * -1 if A < B
1256 * 0 if A == B
1257 */
1258
1259static int glock_compare_atime(const void *arg_a, const void *arg_b)
1260{
1261 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1262 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1263 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1264 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1265
1266 if (a->ln_number > b->ln_number)
1267 return 1;
1268 if (a->ln_number < b->ln_number)
1269 return -1;
1270 if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1271 return 1;
1272 if (gh_a->gh_state == LM_ST_SHARED && (gh_b->gh_flags & GL_ATIME))
1273 return 1;
1274
1275 return 0;
1276}
1277
1278/**
1279 * gfs2_glock_nq_m_atime - acquire multiple glocks where one may need an
1280 * atime update
1281 * @num_gh: the number of structures
1282 * @ghs: an array of struct gfs2_holder structures
1283 *
1284 * Returns: 0 on success (all glocks acquired),
1285 * errno on failure (no glocks acquired)
1286 */
1287
1288int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs)
1289{
1290 struct gfs2_holder **p;
1291 unsigned int x;
1292 int error = 0;
1293
1294 if (!num_gh)
1295 return 0;
1296
1297 if (num_gh == 1) {
1298 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1299 if (ghs->gh_flags & GL_ATIME)
1300 error = gfs2_glock_nq_atime(ghs);
1301 else
1302 error = gfs2_glock_nq(ghs);
1303 return error;
1304 }
1305
1306 p = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1307 if (!p)
1308 return -ENOMEM;
1309
1310 for (x = 0; x < num_gh; x++)
1311 p[x] = &ghs[x];
1312
1313 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare_atime,NULL);
1314
1315 for (x = 0; x < num_gh; x++) {
1316 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1317
1318 if (p[x]->gh_flags & GL_ATIME)
1319 error = gfs2_glock_nq_atime(p[x]);
1320 else
1321 error = gfs2_glock_nq(p[x]);
1322
1323 if (error) {
1324 while (x--)
1325 gfs2_glock_dq(p[x]);
1326 break;
1327 }
1328 }
1329
1330 kfree(p);
1331 return error;
1332}
1333
1334
1335static int
1336__gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1337{
1338 struct buffer_head *dibh;
1339 int error;
1340
1341 error = gfs2_meta_inode_buffer(ip, &dibh);
1342 if (!error) {
1343 error = inode_setattr(&ip->i_inode, attr);
1344 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1345 gfs2_inode_attr_out(ip);
1346
1347 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1348 gfs2_dinode_out(&ip->i_di, dibh->b_data);
1349 brelse(dibh);
1350 }
1351 return error;
1352}
1353
1354/**
1355 * gfs2_setattr_simple -
1356 * @ip:
1357 * @attr:
1358 *
1359 * Called with a reference on the vnode.
1360 *
1361 * Returns: errno
1362 */
1363
1364int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr)
1365{
1366 int error;
1367
1368 if (current->journal_info)
1369 return __gfs2_setattr_simple(ip, attr);
1370
1371 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0);
1372 if (error)
1373 return error;
1374
1375 error = __gfs2_setattr_simple(ip, attr);
1376 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1377 return error;
1378}
1379
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
new file mode 100644
index 000000000000..f5d861760579
--- /dev/null
+++ b/fs/gfs2/inode.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __INODE_DOT_H__
11#define __INODE_DOT_H__
12
13static inline int gfs2_is_stuffed(struct gfs2_inode *ip)
14{
15 return !ip->i_di.di_height;
16}
17
18static inline int gfs2_is_jdata(struct gfs2_inode *ip)
19{
20 return ip->i_di.di_flags & GFS2_DIF_JDATA;
21}
22
23static inline int gfs2_is_dir(struct gfs2_inode *ip)
24{
25 return S_ISDIR(ip->i_di.di_mode);
26}
27
28void gfs2_inode_attr_in(struct gfs2_inode *ip);
29void gfs2_inode_attr_out(struct gfs2_inode *ip);
30struct inode *gfs2_inode_lookup(struct super_block *sb, struct gfs2_inum *inum, unsigned type);
31struct inode *gfs2_ilookup(struct super_block *sb, struct gfs2_inum *inum);
32
33int gfs2_inode_refresh(struct gfs2_inode *ip);
34
35int gfs2_dinode_dealloc(struct gfs2_inode *inode);
36int gfs2_change_nlink(struct gfs2_inode *ip, int diff);
37struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
38 int is_root, struct nameidata *nd);
39struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name,
40 unsigned int mode);
41int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name,
42 struct gfs2_inode *ip);
43int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name,
44 struct gfs2_inode *ip);
45int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to);
46int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len);
47
48int gfs2_glock_nq_atime(struct gfs2_holder *gh);
49int gfs2_glock_nq_m_atime(unsigned int num_gh, struct gfs2_holder *ghs);
50
51int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr);
52
53struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
54
55#endif /* __INODE_DOT_H__ */
56
diff --git a/fs/gfs2/lm.c b/fs/gfs2/lm.c
new file mode 100644
index 000000000000..effe4a337c1d
--- /dev/null
+++ b/fs/gfs2/lm.c
@@ -0,0 +1,217 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "glock.h"
22#include "lm.h"
23#include "super.h"
24#include "util.h"
25
26/**
27 * gfs2_lm_mount - mount a locking protocol
28 * @sdp: the filesystem
29 * @args: mount arguements
30 * @silent: if 1, don't complain if the FS isn't a GFS2 fs
31 *
32 * Returns: errno
33 */
34
35int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
36{
37 char *proto = sdp->sd_proto_name;
38 char *table = sdp->sd_table_name;
39 int flags = 0;
40 int error;
41
42 if (sdp->sd_args.ar_spectator)
43 flags |= LM_MFLAG_SPECTATOR;
44
45 fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
46
47 error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
48 gfs2_glock_cb, sdp,
49 GFS2_MIN_LVB_SIZE, flags,
50 &sdp->sd_lockstruct, &sdp->sd_kobj);
51 if (error) {
52 fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
53 proto, table, sdp->sd_args.ar_hostdata);
54 goto out;
55 }
56
57 if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
58 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
59 gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
60 GFS2_MIN_LVB_SIZE)) {
61 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
62 goto out;
63 }
64
65 if (sdp->sd_args.ar_spectator)
66 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table);
67 else
68 snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
69 sdp->sd_lockstruct.ls_jid);
70
71 fs_info(sdp, "Joined cluster. Now mounting FS...\n");
72
73 if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
74 !sdp->sd_args.ar_ignore_local_fs) {
75 sdp->sd_args.ar_localflocks = 1;
76 sdp->sd_args.ar_localcaching = 1;
77 }
78
79out:
80 return error;
81}
82
83void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
84{
85 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
86 sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
87 sdp->sd_lockstruct.ls_lockspace);
88}
89
90void gfs2_lm_unmount(struct gfs2_sbd *sdp)
91{
92 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
93 gfs2_unmount_lockproto(&sdp->sd_lockstruct);
94}
95
96int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
97{
98 va_list args;
99
100 if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
101 return 0;
102
103 va_start(args, fmt);
104 vprintk(fmt, args);
105 va_end(args);
106
107 fs_err(sdp, "about to withdraw from the cluster\n");
108 BUG_ON(sdp->sd_args.ar_debug);
109
110
111 fs_err(sdp, "waiting for outstanding I/O\n");
112
113 /* FIXME: suspend dm device so oustanding bio's complete
114 and all further io requests fail */
115
116 fs_err(sdp, "telling LM to withdraw\n");
117 gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
118 fs_err(sdp, "withdrawn\n");
119 dump_stack();
120
121 return -1;
122}
123
124int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
125 void **lockp)
126{
127 int error = -EIO;
128 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
129 error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
130 sdp->sd_lockstruct.ls_lockspace, name, lockp);
131 return error;
132}
133
134void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock)
135{
136 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
137 sdp->sd_lockstruct.ls_ops->lm_put_lock(lock);
138}
139
140unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
141 unsigned int cur_state, unsigned int req_state,
142 unsigned int flags)
143{
144 int ret = 0;
145 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
146 ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
147 req_state, flags);
148 return ret;
149}
150
151unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
152 unsigned int cur_state)
153{
154 int ret = 0;
155 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
156 ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
157 return ret;
158}
159
160void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock)
161{
162 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
163 sdp->sd_lockstruct.ls_ops->lm_cancel(lock);
164}
165
166int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
167{
168 int error = -EIO;
169 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
170 error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
171 return error;
172}
173
174void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb)
175{
176 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
177 sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(lock, lvb);
178}
179
180int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
181 struct file *file, struct file_lock *fl)
182{
183 int error = -EIO;
184 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
185 error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
186 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
187 return error;
188}
189
190int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
191 struct file *file, int cmd, struct file_lock *fl)
192{
193 int error = -EIO;
194 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
195 error = sdp->sd_lockstruct.ls_ops->lm_plock(
196 sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
197 return error;
198}
199
200int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
201 struct file *file, struct file_lock *fl)
202{
203 int error = -EIO;
204 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
205 error = sdp->sd_lockstruct.ls_ops->lm_punlock(
206 sdp->sd_lockstruct.ls_lockspace, name, file, fl);
207 return error;
208}
209
210void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
211 unsigned int message)
212{
213 if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
214 sdp->sd_lockstruct.ls_ops->lm_recovery_done(
215 sdp->sd_lockstruct.ls_lockspace, jid, message);
216}
217
diff --git a/fs/gfs2/lm.h b/fs/gfs2/lm.h
new file mode 100644
index 000000000000..21cdc30ee08c
--- /dev/null
+++ b/fs/gfs2/lm.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __LM_DOT_H__
11#define __LM_DOT_H__
12
13struct gfs2_sbd;
14
15#define GFS2_MIN_LVB_SIZE 32
16
17int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent);
18void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp);
19void gfs2_lm_unmount(struct gfs2_sbd *sdp);
20int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
21 __attribute__ ((format(printf, 2, 3)));
22int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
23 void **lockp);
24void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock);
25unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
26 unsigned int cur_state, unsigned int req_state,
27 unsigned int flags);
28unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
29 unsigned int cur_state);
30void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock);
31int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp);
32void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb);
33int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
34 struct file *file, struct file_lock *fl);
35int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
36 struct file *file, int cmd, struct file_lock *fl);
37int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
38 struct file *file, struct file_lock *fl);
39void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
40 unsigned int message);
41
42#endif /* __LM_DOT_H__ */
diff --git a/fs/gfs2/locking.c b/fs/gfs2/locking.c
new file mode 100644
index 000000000000..663fee728783
--- /dev/null
+++ b/fs/gfs2/locking.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/wait.h>
15#include <linux/sched.h>
16#include <linux/kmod.h>
17#include <linux/fs.h>
18#include <linux/delay.h>
19#include <linux/lm_interface.h>
20
21struct lmh_wrapper {
22 struct list_head lw_list;
23 const struct lm_lockops *lw_ops;
24};
25
26/* List of registered low-level locking protocols. A file system selects one
27 of them by name at mount time, e.g. lock_nolock, lock_dlm. */
28
29static LIST_HEAD(lmh_list);
30static DEFINE_MUTEX(lmh_lock);
31
32/**
33 * gfs2_register_lockproto - Register a low-level locking protocol
34 * @proto: the protocol definition
35 *
36 * Returns: 0 on success, -EXXX on failure
37 */
38
39int gfs2_register_lockproto(const struct lm_lockops *proto)
40{
41 struct lmh_wrapper *lw;
42
43 mutex_lock(&lmh_lock);
44
45 list_for_each_entry(lw, &lmh_list, lw_list) {
46 if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
47 mutex_unlock(&lmh_lock);
48 printk(KERN_INFO "GFS2: protocol %s already exists\n",
49 proto->lm_proto_name);
50 return -EEXIST;
51 }
52 }
53
54 lw = kzalloc(sizeof(struct lmh_wrapper), GFP_KERNEL);
55 if (!lw) {
56 mutex_unlock(&lmh_lock);
57 return -ENOMEM;
58 }
59
60 lw->lw_ops = proto;
61 list_add(&lw->lw_list, &lmh_list);
62
63 mutex_unlock(&lmh_lock);
64
65 return 0;
66}
67
68/**
69 * gfs2_unregister_lockproto - Unregister a low-level locking protocol
70 * @proto: the protocol definition
71 *
72 */
73
74void gfs2_unregister_lockproto(const struct lm_lockops *proto)
75{
76 struct lmh_wrapper *lw;
77
78 mutex_lock(&lmh_lock);
79
80 list_for_each_entry(lw, &lmh_list, lw_list) {
81 if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
82 list_del(&lw->lw_list);
83 mutex_unlock(&lmh_lock);
84 kfree(lw);
85 return;
86 }
87 }
88
89 mutex_unlock(&lmh_lock);
90
91 printk(KERN_WARNING "GFS2: can't unregister lock protocol %s\n",
92 proto->lm_proto_name);
93}
94
95/**
96 * gfs2_mount_lockproto - Mount a lock protocol
97 * @proto_name - the name of the protocol
98 * @table_name - the name of the lock space
99 * @host_data - data specific to this host
100 * @cb - the callback to the code using the lock module
101 * @sdp - The GFS2 superblock
102 * @min_lvb_size - the mininum LVB size that the caller can deal with
103 * @flags - LM_MFLAG_*
104 * @lockstruct - a structure returned describing the mount
105 *
106 * Returns: 0 on success, -EXXX on failure
107 */
108
109int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
110 lm_callback_t cb, void *cb_data,
111 unsigned int min_lvb_size, int flags,
112 struct lm_lockstruct *lockstruct,
113 struct kobject *fskobj)
114{
115 struct lmh_wrapper *lw = NULL;
116 int try = 0;
117 int error, found;
118
119retry:
120 mutex_lock(&lmh_lock);
121
122 found = 0;
123 list_for_each_entry(lw, &lmh_list, lw_list) {
124 if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
125 found = 1;
126 break;
127 }
128 }
129
130 if (!found) {
131 if (!try && capable(CAP_SYS_MODULE)) {
132 try = 1;
133 mutex_unlock(&lmh_lock);
134 request_module(proto_name);
135 goto retry;
136 }
137 printk(KERN_INFO "GFS2: can't find protocol %s\n", proto_name);
138 error = -ENOENT;
139 goto out;
140 }
141
142 if (!try_module_get(lw->lw_ops->lm_owner)) {
143 try = 0;
144 mutex_unlock(&lmh_lock);
145 msleep(1000);
146 goto retry;
147 }
148
149 error = lw->lw_ops->lm_mount(table_name, host_data, cb, cb_data,
150 min_lvb_size, flags, lockstruct, fskobj);
151 if (error)
152 module_put(lw->lw_ops->lm_owner);
153out:
154 mutex_unlock(&lmh_lock);
155 return error;
156}
157
158void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
159{
160 mutex_lock(&lmh_lock);
161 lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
162 if (lockstruct->ls_ops->lm_owner)
163 module_put(lockstruct->ls_ops->lm_owner);
164 mutex_unlock(&lmh_lock);
165}
166
167/**
168 * gfs2_withdraw_lockproto - abnormally unmount a lock module
169 * @lockstruct: the lockstruct passed into mount
170 *
171 */
172
173void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct)
174{
175 mutex_lock(&lmh_lock);
176 lockstruct->ls_ops->lm_withdraw(lockstruct->ls_lockspace);
177 if (lockstruct->ls_ops->lm_owner)
178 module_put(lockstruct->ls_ops->lm_owner);
179 mutex_unlock(&lmh_lock);
180}
181
182EXPORT_SYMBOL_GPL(gfs2_register_lockproto);
183EXPORT_SYMBOL_GPL(gfs2_unregister_lockproto);
184
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
new file mode 100644
index 000000000000..89b93b6b45cf
--- /dev/null
+++ b/fs/gfs2/locking/dlm/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
2lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o
3
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
new file mode 100644
index 000000000000..b167addf9fd1
--- /dev/null
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -0,0 +1,524 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include "lock_dlm.h"
11
12static char junk_lvb[GDLM_LVB_SIZE];
13
14static void queue_complete(struct gdlm_lock *lp)
15{
16 struct gdlm_ls *ls = lp->ls;
17
18 clear_bit(LFL_ACTIVE, &lp->flags);
19
20 spin_lock(&ls->async_lock);
21 list_add_tail(&lp->clist, &ls->complete);
22 spin_unlock(&ls->async_lock);
23 wake_up(&ls->thread_wait);
24}
25
26static inline void gdlm_ast(void *astarg)
27{
28 queue_complete(astarg);
29}
30
31static inline void gdlm_bast(void *astarg, int mode)
32{
33 struct gdlm_lock *lp = astarg;
34 struct gdlm_ls *ls = lp->ls;
35
36 if (!mode) {
37 printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
38 lp->lockname.ln_type,
39 (unsigned long long)lp->lockname.ln_number);
40 return;
41 }
42
43 spin_lock(&ls->async_lock);
44 if (!lp->bast_mode) {
45 list_add_tail(&lp->blist, &ls->blocking);
46 lp->bast_mode = mode;
47 } else if (lp->bast_mode < mode)
48 lp->bast_mode = mode;
49 spin_unlock(&ls->async_lock);
50 wake_up(&ls->thread_wait);
51}
52
53void gdlm_queue_delayed(struct gdlm_lock *lp)
54{
55 struct gdlm_ls *ls = lp->ls;
56
57 spin_lock(&ls->async_lock);
58 list_add_tail(&lp->delay_list, &ls->delayed);
59 spin_unlock(&ls->async_lock);
60}
61
62/* convert gfs lock-state to dlm lock-mode */
63
64static s16 make_mode(s16 lmstate)
65{
66 switch (lmstate) {
67 case LM_ST_UNLOCKED:
68 return DLM_LOCK_NL;
69 case LM_ST_EXCLUSIVE:
70 return DLM_LOCK_EX;
71 case LM_ST_DEFERRED:
72 return DLM_LOCK_CW;
73 case LM_ST_SHARED:
74 return DLM_LOCK_PR;
75 }
76 gdlm_assert(0, "unknown LM state %d", lmstate);
77 return -1;
78}
79
80/* convert dlm lock-mode to gfs lock-state */
81
82s16 gdlm_make_lmstate(s16 dlmmode)
83{
84 switch (dlmmode) {
85 case DLM_LOCK_IV:
86 case DLM_LOCK_NL:
87 return LM_ST_UNLOCKED;
88 case DLM_LOCK_EX:
89 return LM_ST_EXCLUSIVE;
90 case DLM_LOCK_CW:
91 return LM_ST_DEFERRED;
92 case DLM_LOCK_PR:
93 return LM_ST_SHARED;
94 }
95 gdlm_assert(0, "unknown DLM mode %d", dlmmode);
96 return -1;
97}
98
99/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
100 DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
101
102static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
103{
104 s16 cur = make_mode(cur_state);
105 if (lp->cur != DLM_LOCK_IV)
106 gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
107}
108
109static inline unsigned int make_flags(struct gdlm_lock *lp,
110 unsigned int gfs_flags,
111 s16 cur, s16 req)
112{
113 unsigned int lkf = 0;
114
115 if (gfs_flags & LM_FLAG_TRY)
116 lkf |= DLM_LKF_NOQUEUE;
117
118 if (gfs_flags & LM_FLAG_TRY_1CB) {
119 lkf |= DLM_LKF_NOQUEUE;
120 lkf |= DLM_LKF_NOQUEUEBAST;
121 }
122
123 if (gfs_flags & LM_FLAG_PRIORITY) {
124 lkf |= DLM_LKF_NOORDER;
125 lkf |= DLM_LKF_HEADQUE;
126 }
127
128 if (gfs_flags & LM_FLAG_ANY) {
129 if (req == DLM_LOCK_PR)
130 lkf |= DLM_LKF_ALTCW;
131 else if (req == DLM_LOCK_CW)
132 lkf |= DLM_LKF_ALTPR;
133 }
134
135 if (lp->lksb.sb_lkid != 0) {
136 lkf |= DLM_LKF_CONVERT;
137
138 /* Conversion deadlock avoidance by DLM */
139
140 if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) &&
141 !(lkf & DLM_LKF_NOQUEUE) &&
142 cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req)
143 lkf |= DLM_LKF_CONVDEADLK;
144 }
145
146 if (lp->lvb)
147 lkf |= DLM_LKF_VALBLK;
148
149 return lkf;
150}
151
152/* make_strname - convert GFS lock numbers to a string */
153
154static inline void make_strname(struct lm_lockname *lockname,
155 struct gdlm_strname *str)
156{
157 sprintf(str->name, "%8x%16llx", lockname->ln_type,
158 (unsigned long long)lockname->ln_number);
159 str->namelen = GDLM_STRNAME_BYTES;
160}
161
162static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
163 struct gdlm_lock **lpp)
164{
165 struct gdlm_lock *lp;
166
167 lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL);
168 if (!lp)
169 return -ENOMEM;
170
171 lp->lockname = *name;
172 lp->ls = ls;
173 lp->cur = DLM_LOCK_IV;
174 lp->lvb = NULL;
175 lp->hold_null = NULL;
176 init_completion(&lp->ast_wait);
177 INIT_LIST_HEAD(&lp->clist);
178 INIT_LIST_HEAD(&lp->blist);
179 INIT_LIST_HEAD(&lp->delay_list);
180
181 spin_lock(&ls->async_lock);
182 list_add(&lp->all_list, &ls->all_locks);
183 ls->all_locks_count++;
184 spin_unlock(&ls->async_lock);
185
186 *lpp = lp;
187 return 0;
188}
189
190void gdlm_delete_lp(struct gdlm_lock *lp)
191{
192 struct gdlm_ls *ls = lp->ls;
193
194 spin_lock(&ls->async_lock);
195 if (!list_empty(&lp->clist))
196 list_del_init(&lp->clist);
197 if (!list_empty(&lp->blist))
198 list_del_init(&lp->blist);
199 if (!list_empty(&lp->delay_list))
200 list_del_init(&lp->delay_list);
201 gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
202 (unsigned long long)lp->lockname.ln_number);
203 list_del_init(&lp->all_list);
204 ls->all_locks_count--;
205 spin_unlock(&ls->async_lock);
206
207 kfree(lp);
208}
209
210int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
211 void **lockp)
212{
213 struct gdlm_lock *lp;
214 int error;
215
216 error = gdlm_create_lp(lockspace, name, &lp);
217
218 *lockp = lp;
219 return error;
220}
221
222void gdlm_put_lock(void *lock)
223{
224 gdlm_delete_lp(lock);
225}
226
227unsigned int gdlm_do_lock(struct gdlm_lock *lp)
228{
229 struct gdlm_ls *ls = lp->ls;
230 struct gdlm_strname str;
231 int error, bast = 1;
232
233 /*
234 * When recovery is in progress, delay lock requests for submission
235 * once recovery is done. Requests for recovery (NOEXP) and unlocks
236 * can pass.
237 */
238
239 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
240 !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
241 gdlm_queue_delayed(lp);
242 return LM_OUT_ASYNC;
243 }
244
245 /*
246 * Submit the actual lock request.
247 */
248
249 if (test_bit(LFL_NOBAST, &lp->flags))
250 bast = 0;
251
252 make_strname(&lp->lockname, &str);
253
254 set_bit(LFL_ACTIVE, &lp->flags);
255
256 log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
257 (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
258 lp->cur, lp->req, lp->lkf);
259
260 error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
261 str.name, str.namelen, 0, gdlm_ast, lp,
262 bast ? gdlm_bast : NULL);
263
264 if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
265 lp->lksb.sb_status = -EAGAIN;
266 queue_complete(lp);
267 error = 0;
268 }
269
270 if (error) {
271 log_debug("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
272 "flags=%lx", ls->fsname, lp->lockname.ln_type,
273 (unsigned long long)lp->lockname.ln_number, error,
274 lp->cur, lp->req, lp->lkf, lp->flags);
275 return LM_OUT_ERROR;
276 }
277 return LM_OUT_ASYNC;
278}
279
280static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
281{
282 struct gdlm_ls *ls = lp->ls;
283 unsigned int lkf = 0;
284 int error;
285
286 set_bit(LFL_DLM_UNLOCK, &lp->flags);
287 set_bit(LFL_ACTIVE, &lp->flags);
288
289 if (lp->lvb)
290 lkf = DLM_LKF_VALBLK;
291
292 log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
293 (unsigned long long)lp->lockname.ln_number,
294 lp->lksb.sb_lkid, lp->cur, lkf);
295
296 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
297
298 if (error) {
299 log_debug("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
300 "flags=%lx", ls->fsname, lp->lockname.ln_type,
301 (unsigned long long)lp->lockname.ln_number, error,
302 lp->cur, lp->req, lp->lkf, lp->flags);
303 return LM_OUT_ERROR;
304 }
305 return LM_OUT_ASYNC;
306}
307
308unsigned int gdlm_lock(void *lock, unsigned int cur_state,
309 unsigned int req_state, unsigned int flags)
310{
311 struct gdlm_lock *lp = lock;
312
313 clear_bit(LFL_DLM_CANCEL, &lp->flags);
314 if (flags & LM_FLAG_NOEXP)
315 set_bit(LFL_NOBLOCK, &lp->flags);
316
317 check_cur_state(lp, cur_state);
318 lp->req = make_mode(req_state);
319 lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
320
321 return gdlm_do_lock(lp);
322}
323
324unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
325{
326 struct gdlm_lock *lp = lock;
327
328 clear_bit(LFL_DLM_CANCEL, &lp->flags);
329 if (lp->cur == DLM_LOCK_IV)
330 return 0;
331 return gdlm_do_unlock(lp);
332}
333
334void gdlm_cancel(void *lock)
335{
336 struct gdlm_lock *lp = lock;
337 struct gdlm_ls *ls = lp->ls;
338 int error, delay_list = 0;
339
340 if (test_bit(LFL_DLM_CANCEL, &lp->flags))
341 return;
342
343 log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
344 (unsigned long long)lp->lockname.ln_number, lp->flags);
345
346 spin_lock(&ls->async_lock);
347 if (!list_empty(&lp->delay_list)) {
348 list_del_init(&lp->delay_list);
349 delay_list = 1;
350 }
351 spin_unlock(&ls->async_lock);
352
353 if (delay_list) {
354 set_bit(LFL_CANCEL, &lp->flags);
355 set_bit(LFL_ACTIVE, &lp->flags);
356 queue_complete(lp);
357 return;
358 }
359
360 if (!test_bit(LFL_ACTIVE, &lp->flags) ||
361 test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
362 log_info("gdlm_cancel skip %x,%llx flags %lx",
363 lp->lockname.ln_type,
364 (unsigned long long)lp->lockname.ln_number, lp->flags);
365 return;
366 }
367
368 /* the lock is blocked in the dlm */
369
370 set_bit(LFL_DLM_CANCEL, &lp->flags);
371 set_bit(LFL_ACTIVE, &lp->flags);
372
373 error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
374 NULL, lp);
375
376 log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
377 lp->lockname.ln_type,
378 (unsigned long long)lp->lockname.ln_number, lp->flags);
379
380 if (error == -EBUSY)
381 clear_bit(LFL_DLM_CANCEL, &lp->flags);
382}
383
384static int gdlm_add_lvb(struct gdlm_lock *lp)
385{
386 char *lvb;
387
388 lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL);
389 if (!lvb)
390 return -ENOMEM;
391
392 lp->lksb.sb_lvbptr = lvb;
393 lp->lvb = lvb;
394 return 0;
395}
396
397static void gdlm_del_lvb(struct gdlm_lock *lp)
398{
399 kfree(lp->lvb);
400 lp->lvb = NULL;
401 lp->lksb.sb_lvbptr = NULL;
402}
403
404/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
405 the completion) because gfs won't call hold_lvb() during a callback (from
406 the context of a lock_dlm thread). */
407
408static int hold_null_lock(struct gdlm_lock *lp)
409{
410 struct gdlm_lock *lpn = NULL;
411 int error;
412
413 if (lp->hold_null) {
414 printk(KERN_INFO "lock_dlm: lvb already held\n");
415 return 0;
416 }
417
418 error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
419 if (error)
420 goto out;
421
422 lpn->lksb.sb_lvbptr = junk_lvb;
423 lpn->lvb = junk_lvb;
424
425 lpn->req = DLM_LOCK_NL;
426 lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
427 set_bit(LFL_NOBAST, &lpn->flags);
428 set_bit(LFL_INLOCK, &lpn->flags);
429
430 init_completion(&lpn->ast_wait);
431 gdlm_do_lock(lpn);
432 wait_for_completion(&lpn->ast_wait);
433 error = lpn->lksb.sb_status;
434 if (error) {
435 printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
436 error);
437 gdlm_delete_lp(lpn);
438 lpn = NULL;
439 }
440out:
441 lp->hold_null = lpn;
442 return error;
443}
444
445/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
446 the completion) because gfs may call unhold_lvb() during a callback (from
447 the context of a lock_dlm thread) which could cause a deadlock since the
448 other lock_dlm thread could be engaged in recovery. */
449
450static void unhold_null_lock(struct gdlm_lock *lp)
451{
452 struct gdlm_lock *lpn = lp->hold_null;
453
454 gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
455 (unsigned long long)lp->lockname.ln_number);
456 lpn->lksb.sb_lvbptr = NULL;
457 lpn->lvb = NULL;
458 set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
459 gdlm_do_unlock(lpn);
460 lp->hold_null = NULL;
461}
462
463/* Acquire a NL lock because gfs requires the value block to remain
464 intact on the resource while the lvb is "held" even if it's holding no locks
465 on the resource. */
466
467int gdlm_hold_lvb(void *lock, char **lvbp)
468{
469 struct gdlm_lock *lp = lock;
470 int error;
471
472 error = gdlm_add_lvb(lp);
473 if (error)
474 return error;
475
476 *lvbp = lp->lvb;
477
478 error = hold_null_lock(lp);
479 if (error)
480 gdlm_del_lvb(lp);
481
482 return error;
483}
484
485void gdlm_unhold_lvb(void *lock, char *lvb)
486{
487 struct gdlm_lock *lp = lock;
488
489 unhold_null_lock(lp);
490 gdlm_del_lvb(lp);
491}
492
493void gdlm_submit_delayed(struct gdlm_ls *ls)
494{
495 struct gdlm_lock *lp, *safe;
496
497 spin_lock(&ls->async_lock);
498 list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
499 list_del_init(&lp->delay_list);
500 list_add_tail(&lp->delay_list, &ls->submit);
501 }
502 spin_unlock(&ls->async_lock);
503 wake_up(&ls->thread_wait);
504}
505
506int gdlm_release_all_locks(struct gdlm_ls *ls)
507{
508 struct gdlm_lock *lp, *safe;
509 int count = 0;
510
511 spin_lock(&ls->async_lock);
512 list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) {
513 list_del_init(&lp->all_list);
514
515 if (lp->lvb && lp->lvb != junk_lvb)
516 kfree(lp->lvb);
517 kfree(lp);
518 count++;
519 }
520 spin_unlock(&ls->async_lock);
521
522 return count;
523}
524
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
new file mode 100644
index 000000000000..33af707a4d3f
--- /dev/null
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef LOCK_DLM_DOT_H
11#define LOCK_DLM_DOT_H
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/string.h>
19#include <linux/list.h>
20#include <linux/socket.h>
21#include <linux/delay.h>
22#include <linux/kthread.h>
23#include <linux/kobject.h>
24#include <linux/fcntl.h>
25#include <linux/wait.h>
26#include <net/sock.h>
27
28#include <linux/dlm.h>
29#include <linux/lm_interface.h>
30
31/*
32 * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
33 * prefix of lock_dlm_ gets awkward. Externally, GFS refers to this module
34 * as "lock_dlm".
35 */
36
37#define GDLM_STRNAME_BYTES 24
38#define GDLM_LVB_SIZE 32
39#define GDLM_DROP_COUNT 50000
40#define GDLM_DROP_PERIOD 60
41#define GDLM_NAME_LEN 128
42
43/* GFS uses 12 bytes to identify a resource (32 bit type + 64 bit number).
44 We sprintf these numbers into a 24 byte string of hex values to make them
45 human-readable (to make debugging simpler.) */
46
47struct gdlm_strname {
48 unsigned char name[GDLM_STRNAME_BYTES];
49 unsigned short namelen;
50};
51
52enum {
53 DFL_BLOCK_LOCKS = 0,
54 DFL_SPECTATOR = 1,
55 DFL_WITHDRAW = 2,
56};
57
58struct gdlm_ls {
59 u32 id;
60 int jid;
61 int first;
62 int first_done;
63 unsigned long flags;
64 struct kobject kobj;
65 char clustername[GDLM_NAME_LEN];
66 char fsname[GDLM_NAME_LEN];
67 int fsflags;
68 dlm_lockspace_t *dlm_lockspace;
69 lm_callback_t fscb;
70 struct gfs2_sbd *sdp;
71 int recover_jid;
72 int recover_jid_done;
73 int recover_jid_status;
74 spinlock_t async_lock;
75 struct list_head complete;
76 struct list_head blocking;
77 struct list_head delayed;
78 struct list_head submit;
79 struct list_head all_locks;
80 u32 all_locks_count;
81 wait_queue_head_t wait_control;
82 struct task_struct *thread1;
83 struct task_struct *thread2;
84 wait_queue_head_t thread_wait;
85 unsigned long drop_time;
86 int drop_locks_count;
87 int drop_locks_period;
88};
89
90enum {
91 LFL_NOBLOCK = 0,
92 LFL_NOCACHE = 1,
93 LFL_DLM_UNLOCK = 2,
94 LFL_DLM_CANCEL = 3,
95 LFL_SYNC_LVB = 4,
96 LFL_FORCE_PROMOTE = 5,
97 LFL_REREQUEST = 6,
98 LFL_ACTIVE = 7,
99 LFL_INLOCK = 8,
100 LFL_CANCEL = 9,
101 LFL_NOBAST = 10,
102 LFL_HEADQUE = 11,
103 LFL_UNLOCK_DELETE = 12,
104};
105
106struct gdlm_lock {
107 struct gdlm_ls *ls;
108 struct lm_lockname lockname;
109 char *lvb;
110 struct dlm_lksb lksb;
111
112 s16 cur;
113 s16 req;
114 s16 prev_req;
115 u32 lkf; /* dlm flags DLM_LKF_ */
116 unsigned long flags; /* lock_dlm flags LFL_ */
117
118 int bast_mode; /* protected by async_lock */
119 struct completion ast_wait;
120
121 struct list_head clist; /* complete */
122 struct list_head blist; /* blocking */
123 struct list_head delay_list; /* delayed */
124 struct list_head all_list; /* all locks for the fs */
125 struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
126};
127
128#define gdlm_assert(assertion, fmt, args...) \
129do { \
130 if (unlikely(!(assertion))) { \
131 printk(KERN_EMERG "lock_dlm: fatal assertion failed \"%s\"\n" \
132 "lock_dlm: " fmt "\n", \
133 #assertion, ##args); \
134 BUG(); \
135 } \
136} while (0)
137
138#define log_print(lev, fmt, arg...) printk(lev "lock_dlm: " fmt "\n" , ## arg)
139#define log_info(fmt, arg...) log_print(KERN_INFO , fmt , ## arg)
140#define log_error(fmt, arg...) log_print(KERN_ERR , fmt , ## arg)
141#ifdef LOCK_DLM_LOG_DEBUG
142#define log_debug(fmt, arg...) log_print(KERN_DEBUG , fmt , ## arg)
143#else
144#define log_debug(fmt, arg...)
145#endif
146
147/* sysfs.c */
148
149int gdlm_sysfs_init(void);
150void gdlm_sysfs_exit(void);
151int gdlm_kobject_setup(struct gdlm_ls *, struct kobject *);
152void gdlm_kobject_release(struct gdlm_ls *);
153
154/* thread.c */
155
156int gdlm_init_threads(struct gdlm_ls *);
157void gdlm_release_threads(struct gdlm_ls *);
158
159/* lock.c */
160
161s16 gdlm_make_lmstate(s16);
162void gdlm_queue_delayed(struct gdlm_lock *);
163void gdlm_submit_delayed(struct gdlm_ls *);
164int gdlm_release_all_locks(struct gdlm_ls *);
165void gdlm_delete_lp(struct gdlm_lock *);
166unsigned int gdlm_do_lock(struct gdlm_lock *);
167
168int gdlm_get_lock(void *, struct lm_lockname *, void **);
169void gdlm_put_lock(void *);
170unsigned int gdlm_lock(void *, unsigned int, unsigned int, unsigned int);
171unsigned int gdlm_unlock(void *, unsigned int);
172void gdlm_cancel(void *);
173int gdlm_hold_lvb(void *, char **);
174void gdlm_unhold_lvb(void *, char *);
175
176/* plock.c */
177
178int gdlm_plock_init(void);
179void gdlm_plock_exit(void);
180int gdlm_plock(void *, struct lm_lockname *, struct file *, int,
181 struct file_lock *);
182int gdlm_plock_get(void *, struct lm_lockname *, struct file *,
183 struct file_lock *);
184int gdlm_punlock(void *, struct lm_lockname *, struct file *,
185 struct file_lock *);
186#endif
187
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
new file mode 100644
index 000000000000..2194b1d5b5ec
--- /dev/null
+++ b/fs/gfs2/locking/dlm/main.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/init.h>
11
12#include "lock_dlm.h"
13
14extern int gdlm_drop_count;
15extern int gdlm_drop_period;
16
17extern struct lm_lockops gdlm_ops;
18
19static int __init init_lock_dlm(void)
20{
21 int error;
22
23 error = gfs2_register_lockproto(&gdlm_ops);
24 if (error) {
25 printk(KERN_WARNING "lock_dlm: can't register protocol: %d\n",
26 error);
27 return error;
28 }
29
30 error = gdlm_sysfs_init();
31 if (error) {
32 gfs2_unregister_lockproto(&gdlm_ops);
33 return error;
34 }
35
36 error = gdlm_plock_init();
37 if (error) {
38 gdlm_sysfs_exit();
39 gfs2_unregister_lockproto(&gdlm_ops);
40 return error;
41 }
42
43 gdlm_drop_count = GDLM_DROP_COUNT;
44 gdlm_drop_period = GDLM_DROP_PERIOD;
45
46 printk(KERN_INFO
47 "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
48 return 0;
49}
50
51static void __exit exit_lock_dlm(void)
52{
53 gdlm_plock_exit();
54 gdlm_sysfs_exit();
55 gfs2_unregister_lockproto(&gdlm_ops);
56}
57
58module_init(init_lock_dlm);
59module_exit(exit_lock_dlm);
60
61MODULE_DESCRIPTION("GFS DLM Locking Module");
62MODULE_AUTHOR("Red Hat, Inc.");
63MODULE_LICENSE("GPL");
64
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
new file mode 100644
index 000000000000..1f94dd35a943
--- /dev/null
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include "lock_dlm.h"
11
12int gdlm_drop_count;
13int gdlm_drop_period;
14const struct lm_lockops gdlm_ops;
15
16
17static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
18 int flags, char *table_name)
19{
20 struct gdlm_ls *ls;
21 char buf[256], *p;
22
23 ls = kzalloc(sizeof(struct gdlm_ls), GFP_KERNEL);
24 if (!ls)
25 return NULL;
26
27 ls->drop_locks_count = gdlm_drop_count;
28 ls->drop_locks_period = gdlm_drop_period;
29 ls->fscb = cb;
30 ls->sdp = sdp;
31 ls->fsflags = flags;
32 spin_lock_init(&ls->async_lock);
33 INIT_LIST_HEAD(&ls->complete);
34 INIT_LIST_HEAD(&ls->blocking);
35 INIT_LIST_HEAD(&ls->delayed);
36 INIT_LIST_HEAD(&ls->submit);
37 INIT_LIST_HEAD(&ls->all_locks);
38 init_waitqueue_head(&ls->thread_wait);
39 init_waitqueue_head(&ls->wait_control);
40 ls->thread1 = NULL;
41 ls->thread2 = NULL;
42 ls->drop_time = jiffies;
43 ls->jid = -1;
44
45 strncpy(buf, table_name, 256);
46 buf[255] = '\0';
47
48 p = strstr(buf, ":");
49 if (!p) {
50 log_info("invalid table_name \"%s\"", table_name);
51 kfree(ls);
52 return NULL;
53 }
54 *p = '\0';
55 p++;
56
57 strncpy(ls->clustername, buf, GDLM_NAME_LEN);
58 strncpy(ls->fsname, p, GDLM_NAME_LEN);
59
60 return ls;
61}
62
63static int make_args(struct gdlm_ls *ls, char *data_arg, int *nodir)
64{
65 char data[256];
66 char *options, *x, *y;
67 int error = 0;
68
69 memset(data, 0, 256);
70 strncpy(data, data_arg, 255);
71
72 for (options = data; (x = strsep(&options, ":")); ) {
73 if (!*x)
74 continue;
75
76 y = strchr(x, '=');
77 if (y)
78 *y++ = 0;
79
80 if (!strcmp(x, "jid")) {
81 if (!y) {
82 log_error("need argument to jid");
83 error = -EINVAL;
84 break;
85 }
86 sscanf(y, "%u", &ls->jid);
87
88 } else if (!strcmp(x, "first")) {
89 if (!y) {
90 log_error("need argument to first");
91 error = -EINVAL;
92 break;
93 }
94 sscanf(y, "%u", &ls->first);
95
96 } else if (!strcmp(x, "id")) {
97 if (!y) {
98 log_error("need argument to id");
99 error = -EINVAL;
100 break;
101 }
102 sscanf(y, "%u", &ls->id);
103
104 } else if (!strcmp(x, "nodir")) {
105 if (!y) {
106 log_error("need argument to nodir");
107 error = -EINVAL;
108 break;
109 }
110 sscanf(y, "%u", nodir);
111
112 } else {
113 log_error("unkonwn option: %s", x);
114 error = -EINVAL;
115 break;
116 }
117 }
118
119 return error;
120}
121
122static int gdlm_mount(char *table_name, char *host_data,
123 lm_callback_t cb, void *cb_data,
124 unsigned int min_lvb_size, int flags,
125 struct lm_lockstruct *lockstruct,
126 struct kobject *fskobj)
127{
128 struct gdlm_ls *ls;
129 int error = -ENOMEM, nodir = 0;
130
131 if (min_lvb_size > GDLM_LVB_SIZE)
132 goto out;
133
134 ls = init_gdlm(cb, cb_data, flags, table_name);
135 if (!ls)
136 goto out;
137
138 error = make_args(ls, host_data, &nodir);
139 if (error)
140 goto out;
141
142 error = gdlm_init_threads(ls);
143 if (error)
144 goto out_free;
145
146 error = gdlm_kobject_setup(ls, fskobj);
147 if (error)
148 goto out_thread;
149
150 error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
151 &ls->dlm_lockspace,
152 nodir ? DLM_LSFL_NODIR : 0,
153 GDLM_LVB_SIZE);
154 if (error) {
155 log_error("dlm_new_lockspace error %d", error);
156 goto out_kobj;
157 }
158
159 lockstruct->ls_jid = ls->jid;
160 lockstruct->ls_first = ls->first;
161 lockstruct->ls_lockspace = ls;
162 lockstruct->ls_ops = &gdlm_ops;
163 lockstruct->ls_flags = 0;
164 lockstruct->ls_lvb_size = GDLM_LVB_SIZE;
165 return 0;
166
167out_kobj:
168 gdlm_kobject_release(ls);
169out_thread:
170 gdlm_release_threads(ls);
171out_free:
172 kfree(ls);
173out:
174 return error;
175}
176
177static void gdlm_unmount(void *lockspace)
178{
179 struct gdlm_ls *ls = lockspace;
180 int rv;
181
182 log_debug("unmount flags %lx", ls->flags);
183
184 /* FIXME: serialize unmount and withdraw in case they
185 happen at once. Also, if unmount follows withdraw,
186 wait for withdraw to finish. */
187
188 if (test_bit(DFL_WITHDRAW, &ls->flags))
189 goto out;
190
191 gdlm_kobject_release(ls);
192 dlm_release_lockspace(ls->dlm_lockspace, 2);
193 gdlm_release_threads(ls);
194 rv = gdlm_release_all_locks(ls);
195 if (rv)
196 log_info("gdlm_unmount: %d stray locks freed", rv);
197out:
198 kfree(ls);
199}
200
201static void gdlm_recovery_done(void *lockspace, unsigned int jid,
202 unsigned int message)
203{
204 struct gdlm_ls *ls = lockspace;
205 ls->recover_jid_done = jid;
206 ls->recover_jid_status = message;
207 kobject_uevent(&ls->kobj, KOBJ_CHANGE);
208}
209
210static void gdlm_others_may_mount(void *lockspace)
211{
212 struct gdlm_ls *ls = lockspace;
213 ls->first_done = 1;
214 kobject_uevent(&ls->kobj, KOBJ_CHANGE);
215}
216
217/* Userspace gets the offline uevent, blocks new gfs locks on
218 other mounters, and lets us know (sets WITHDRAW flag). Then,
219 userspace leaves the mount group while we leave the lockspace. */
220
221static void gdlm_withdraw(void *lockspace)
222{
223 struct gdlm_ls *ls = lockspace;
224
225 kobject_uevent(&ls->kobj, KOBJ_OFFLINE);
226
227 wait_event_interruptible(ls->wait_control,
228 test_bit(DFL_WITHDRAW, &ls->flags));
229
230 dlm_release_lockspace(ls->dlm_lockspace, 2);
231 gdlm_release_threads(ls);
232 gdlm_release_all_locks(ls);
233 gdlm_kobject_release(ls);
234}
235
236const struct lm_lockops gdlm_ops = {
237 .lm_proto_name = "lock_dlm",
238 .lm_mount = gdlm_mount,
239 .lm_others_may_mount = gdlm_others_may_mount,
240 .lm_unmount = gdlm_unmount,
241 .lm_withdraw = gdlm_withdraw,
242 .lm_get_lock = gdlm_get_lock,
243 .lm_put_lock = gdlm_put_lock,
244 .lm_lock = gdlm_lock,
245 .lm_unlock = gdlm_unlock,
246 .lm_plock = gdlm_plock,
247 .lm_punlock = gdlm_punlock,
248 .lm_plock_get = gdlm_plock_get,
249 .lm_cancel = gdlm_cancel,
250 .lm_hold_lvb = gdlm_hold_lvb,
251 .lm_unhold_lvb = gdlm_unhold_lvb,
252 .lm_recovery_done = gdlm_recovery_done,
253 .lm_owner = THIS_MODULE,
254};
255
diff --git a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c
new file mode 100644
index 000000000000..7365aec9511b
--- /dev/null
+++ b/fs/gfs2/locking/dlm/plock.c
@@ -0,0 +1,301 @@
1/*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License version 2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/lock_dlm_plock.h>
11
12#include "lock_dlm.h"
13
14
15static spinlock_t ops_lock;
16static struct list_head send_list;
17static struct list_head recv_list;
18static wait_queue_head_t send_wq;
19static wait_queue_head_t recv_wq;
20
21struct plock_op {
22 struct list_head list;
23 int done;
24 struct gdlm_plock_info info;
25};
26
27static inline void set_version(struct gdlm_plock_info *info)
28{
29 info->version[0] = GDLM_PLOCK_VERSION_MAJOR;
30 info->version[1] = GDLM_PLOCK_VERSION_MINOR;
31 info->version[2] = GDLM_PLOCK_VERSION_PATCH;
32}
33
34static int check_version(struct gdlm_plock_info *info)
35{
36 if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
37 (GDLM_PLOCK_VERSION_MINOR < info->version[1])) {
38 log_error("plock device version mismatch: "
39 "kernel (%u.%u.%u), user (%u.%u.%u)",
40 GDLM_PLOCK_VERSION_MAJOR,
41 GDLM_PLOCK_VERSION_MINOR,
42 GDLM_PLOCK_VERSION_PATCH,
43 info->version[0],
44 info->version[1],
45 info->version[2]);
46 return -EINVAL;
47 }
48 return 0;
49}
50
51static void send_op(struct plock_op *op)
52{
53 set_version(&op->info);
54 INIT_LIST_HEAD(&op->list);
55 spin_lock(&ops_lock);
56 list_add_tail(&op->list, &send_list);
57 spin_unlock(&ops_lock);
58 wake_up(&send_wq);
59}
60
61int gdlm_plock(void *lockspace, struct lm_lockname *name,
62 struct file *file, int cmd, struct file_lock *fl)
63{
64 struct gdlm_ls *ls = lockspace;
65 struct plock_op *op;
66 int rv;
67
68 op = kzalloc(sizeof(*op), GFP_KERNEL);
69 if (!op)
70 return -ENOMEM;
71
72 op->info.optype = GDLM_PLOCK_OP_LOCK;
73 op->info.pid = fl->fl_pid;
74 op->info.ex = (fl->fl_type == F_WRLCK);
75 op->info.wait = IS_SETLKW(cmd);
76 op->info.fsid = ls->id;
77 op->info.number = name->ln_number;
78 op->info.start = fl->fl_start;
79 op->info.end = fl->fl_end;
80 op->info.owner = (__u64)(long) fl->fl_owner;
81
82 send_op(op);
83 wait_event(recv_wq, (op->done != 0));
84
85 spin_lock(&ops_lock);
86 if (!list_empty(&op->list)) {
87 printk(KERN_INFO "plock op on list\n");
88 list_del(&op->list);
89 }
90 spin_unlock(&ops_lock);
91
92 rv = op->info.rv;
93
94 if (!rv) {
95 if (posix_lock_file_wait(file, fl) < 0)
96 log_error("gdlm_plock: vfs lock error %x,%llx",
97 name->ln_type,
98 (unsigned long long)name->ln_number);
99 }
100
101 kfree(op);
102 return rv;
103}
104
105int gdlm_punlock(void *lockspace, struct lm_lockname *name,
106 struct file *file, struct file_lock *fl)
107{
108 struct gdlm_ls *ls = lockspace;
109 struct plock_op *op;
110 int rv;
111
112 op = kzalloc(sizeof(*op), GFP_KERNEL);
113 if (!op)
114 return -ENOMEM;
115
116 if (posix_lock_file_wait(file, fl) < 0)
117 log_error("gdlm_punlock: vfs unlock error %x,%llx",
118 name->ln_type, (unsigned long long)name->ln_number);
119
120 op->info.optype = GDLM_PLOCK_OP_UNLOCK;
121 op->info.pid = fl->fl_pid;
122 op->info.fsid = ls->id;
123 op->info.number = name->ln_number;
124 op->info.start = fl->fl_start;
125 op->info.end = fl->fl_end;
126 op->info.owner = (__u64)(long) fl->fl_owner;
127
128 send_op(op);
129 wait_event(recv_wq, (op->done != 0));
130
131 spin_lock(&ops_lock);
132 if (!list_empty(&op->list)) {
133 printk(KERN_INFO "punlock op on list\n");
134 list_del(&op->list);
135 }
136 spin_unlock(&ops_lock);
137
138 rv = op->info.rv;
139
140 kfree(op);
141 return rv;
142}
143
144int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
145 struct file *file, struct file_lock *fl)
146{
147 struct gdlm_ls *ls = lockspace;
148 struct plock_op *op;
149 int rv;
150
151 op = kzalloc(sizeof(*op), GFP_KERNEL);
152 if (!op)
153 return -ENOMEM;
154
155 op->info.optype = GDLM_PLOCK_OP_GET;
156 op->info.pid = fl->fl_pid;
157 op->info.ex = (fl->fl_type == F_WRLCK);
158 op->info.fsid = ls->id;
159 op->info.number = name->ln_number;
160 op->info.start = fl->fl_start;
161 op->info.end = fl->fl_end;
162
163 send_op(op);
164 wait_event(recv_wq, (op->done != 0));
165
166 spin_lock(&ops_lock);
167 if (!list_empty(&op->list)) {
168 printk(KERN_INFO "plock_get op on list\n");
169 list_del(&op->list);
170 }
171 spin_unlock(&ops_lock);
172
173 rv = op->info.rv;
174
175 if (rv == 0)
176 fl->fl_type = F_UNLCK;
177 else if (rv > 0) {
178 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
179 fl->fl_pid = op->info.pid;
180 fl->fl_start = op->info.start;
181 fl->fl_end = op->info.end;
182 }
183
184 kfree(op);
185 return rv;
186}
187
188/* a read copies out one plock request from the send list */
189static ssize_t dev_read(struct file *file, char __user *u, size_t count,
190 loff_t *ppos)
191{
192 struct gdlm_plock_info info;
193 struct plock_op *op = NULL;
194
195 if (count < sizeof(info))
196 return -EINVAL;
197
198 spin_lock(&ops_lock);
199 if (!list_empty(&send_list)) {
200 op = list_entry(send_list.next, struct plock_op, list);
201 list_move(&op->list, &recv_list);
202 memcpy(&info, &op->info, sizeof(info));
203 }
204 spin_unlock(&ops_lock);
205
206 if (!op)
207 return -EAGAIN;
208
209 if (copy_to_user(u, &info, sizeof(info)))
210 return -EFAULT;
211 return sizeof(info);
212}
213
214/* a write copies in one plock result that should match a plock_op
215 on the recv list */
216static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
217 loff_t *ppos)
218{
219 struct gdlm_plock_info info;
220 struct plock_op *op;
221 int found = 0;
222
223 if (count != sizeof(info))
224 return -EINVAL;
225
226 if (copy_from_user(&info, u, sizeof(info)))
227 return -EFAULT;
228
229 if (check_version(&info))
230 return -EINVAL;
231
232 spin_lock(&ops_lock);
233 list_for_each_entry(op, &recv_list, list) {
234 if (op->info.fsid == info.fsid && op->info.number == info.number &&
235 op->info.owner == info.owner) {
236 list_del_init(&op->list);
237 found = 1;
238 op->done = 1;
239 memcpy(&op->info, &info, sizeof(info));
240 break;
241 }
242 }
243 spin_unlock(&ops_lock);
244
245 if (found)
246 wake_up(&recv_wq);
247 else
248 printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid,
249 (unsigned long long)info.number);
250 return count;
251}
252
253static unsigned int dev_poll(struct file *file, poll_table *wait)
254{
255 poll_wait(file, &send_wq, wait);
256
257 spin_lock(&ops_lock);
258 if (!list_empty(&send_list)) {
259 spin_unlock(&ops_lock);
260 return POLLIN | POLLRDNORM;
261 }
262 spin_unlock(&ops_lock);
263 return 0;
264}
265
266static struct file_operations dev_fops = {
267 .read = dev_read,
268 .write = dev_write,
269 .poll = dev_poll,
270 .owner = THIS_MODULE
271};
272
273static struct miscdevice plock_dev_misc = {
274 .minor = MISC_DYNAMIC_MINOR,
275 .name = GDLM_PLOCK_MISC_NAME,
276 .fops = &dev_fops
277};
278
279int gdlm_plock_init(void)
280{
281 int rv;
282
283 spin_lock_init(&ops_lock);
284 INIT_LIST_HEAD(&send_list);
285 INIT_LIST_HEAD(&recv_list);
286 init_waitqueue_head(&send_wq);
287 init_waitqueue_head(&recv_wq);
288
289 rv = misc_register(&plock_dev_misc);
290 if (rv)
291 printk(KERN_INFO "gdlm_plock_init: misc_register failed %d",
292 rv);
293 return rv;
294}
295
296void gdlm_plock_exit(void)
297{
298 if (misc_deregister(&plock_dev_misc) < 0)
299 printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed");
300}
301
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
new file mode 100644
index 000000000000..29ae06f94944
--- /dev/null
+++ b/fs/gfs2/locking/dlm/sysfs.c
@@ -0,0 +1,226 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/ctype.h>
11#include <linux/stat.h>
12
13#include "lock_dlm.h"
14
15extern struct lm_lockops gdlm_ops;
16
17static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
18{
19 return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
20}
21
22static ssize_t block_show(struct gdlm_ls *ls, char *buf)
23{
24 ssize_t ret;
25 int val = 0;
26
27 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags))
28 val = 1;
29 ret = sprintf(buf, "%d\n", val);
30 return ret;
31}
32
33static ssize_t block_store(struct gdlm_ls *ls, const char *buf, size_t len)
34{
35 ssize_t ret = len;
36 int val;
37
38 val = simple_strtol(buf, NULL, 0);
39
40 if (val == 1)
41 set_bit(DFL_BLOCK_LOCKS, &ls->flags);
42 else if (val == 0) {
43 clear_bit(DFL_BLOCK_LOCKS, &ls->flags);
44 gdlm_submit_delayed(ls);
45 } else {
46 ret = -EINVAL;
47 }
48 return ret;
49}
50
51static ssize_t withdraw_show(struct gdlm_ls *ls, char *buf)
52{
53 ssize_t ret;
54 int val = 0;
55
56 if (test_bit(DFL_WITHDRAW, &ls->flags))
57 val = 1;
58 ret = sprintf(buf, "%d\n", val);
59 return ret;
60}
61
62static ssize_t withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len)
63{
64 ssize_t ret = len;
65 int val;
66
67 val = simple_strtol(buf, NULL, 0);
68
69 if (val == 1)
70 set_bit(DFL_WITHDRAW, &ls->flags);
71 else
72 ret = -EINVAL;
73 wake_up(&ls->wait_control);
74 return ret;
75}
76
77static ssize_t id_show(struct gdlm_ls *ls, char *buf)
78{
79 return sprintf(buf, "%u\n", ls->id);
80}
81
82static ssize_t jid_show(struct gdlm_ls *ls, char *buf)
83{
84 return sprintf(buf, "%d\n", ls->jid);
85}
86
87static ssize_t first_show(struct gdlm_ls *ls, char *buf)
88{
89 return sprintf(buf, "%d\n", ls->first);
90}
91
92static ssize_t first_done_show(struct gdlm_ls *ls, char *buf)
93{
94 return sprintf(buf, "%d\n", ls->first_done);
95}
96
97static ssize_t recover_show(struct gdlm_ls *ls, char *buf)
98{
99 return sprintf(buf, "%d\n", ls->recover_jid);
100}
101
102static ssize_t recover_store(struct gdlm_ls *ls, const char *buf, size_t len)
103{
104 ls->recover_jid = simple_strtol(buf, NULL, 0);
105 ls->fscb(ls->sdp, LM_CB_NEED_RECOVERY, &ls->recover_jid);
106 return len;
107}
108
109static ssize_t recover_done_show(struct gdlm_ls *ls, char *buf)
110{
111 return sprintf(buf, "%d\n", ls->recover_jid_done);
112}
113
114static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
115{
116 return sprintf(buf, "%d\n", ls->recover_jid_status);
117}
118
119struct gdlm_attr {
120 struct attribute attr;
121 ssize_t (*show)(struct gdlm_ls *, char *);
122 ssize_t (*store)(struct gdlm_ls *, const char *, size_t);
123};
124
125#define GDLM_ATTR(_name,_mode,_show,_store) \
126static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
127
128GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
129GDLM_ATTR(block, 0644, block_show, block_store);
130GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
131GDLM_ATTR(id, 0444, id_show, NULL);
132GDLM_ATTR(jid, 0444, jid_show, NULL);
133GDLM_ATTR(first, 0444, first_show, NULL);
134GDLM_ATTR(first_done, 0444, first_done_show, NULL);
135GDLM_ATTR(recover, 0644, recover_show, recover_store);
136GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
137GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
138
139static struct attribute *gdlm_attrs[] = {
140 &gdlm_attr_proto_name.attr,
141 &gdlm_attr_block.attr,
142 &gdlm_attr_withdraw.attr,
143 &gdlm_attr_id.attr,
144 &gdlm_attr_jid.attr,
145 &gdlm_attr_first.attr,
146 &gdlm_attr_first_done.attr,
147 &gdlm_attr_recover.attr,
148 &gdlm_attr_recover_done.attr,
149 &gdlm_attr_recover_status.attr,
150 NULL,
151};
152
153static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr,
154 char *buf)
155{
156 struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
157 struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
158 return a->show ? a->show(ls, buf) : 0;
159}
160
161static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr,
162 const char *buf, size_t len)
163{
164 struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
165 struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
166 return a->store ? a->store(ls, buf, len) : len;
167}
168
169static struct sysfs_ops gdlm_attr_ops = {
170 .show = gdlm_attr_show,
171 .store = gdlm_attr_store,
172};
173
174static struct kobj_type gdlm_ktype = {
175 .default_attrs = gdlm_attrs,
176 .sysfs_ops = &gdlm_attr_ops,
177};
178
179static struct kset gdlm_kset = {
180 .subsys = &kernel_subsys,
181 .kobj = {.name = "lock_dlm",},
182 .ktype = &gdlm_ktype,
183};
184
185int gdlm_kobject_setup(struct gdlm_ls *ls, struct kobject *fskobj)
186{
187 int error;
188
189 error = kobject_set_name(&ls->kobj, "%s", "lock_module");
190 if (error) {
191 log_error("can't set kobj name %d", error);
192 return error;
193 }
194
195 ls->kobj.kset = &gdlm_kset;
196 ls->kobj.ktype = &gdlm_ktype;
197 ls->kobj.parent = fskobj;
198
199 error = kobject_register(&ls->kobj);
200 if (error)
201 log_error("can't register kobj %d", error);
202
203 return error;
204}
205
206void gdlm_kobject_release(struct gdlm_ls *ls)
207{
208 kobject_unregister(&ls->kobj);
209}
210
211int gdlm_sysfs_init(void)
212{
213 int error;
214
215 error = kset_register(&gdlm_kset);
216 if (error)
217 printk("lock_dlm: cannot register kset %d\n", error);
218
219 return error;
220}
221
222void gdlm_sysfs_exit(void)
223{
224 kset_unregister(&gdlm_kset);
225}
226
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
new file mode 100644
index 000000000000..9cf1f168eaf8
--- /dev/null
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -0,0 +1,359 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include "lock_dlm.h"
11
12/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
13 thread gets to it. */
14
15static void queue_submit(struct gdlm_lock *lp)
16{
17 struct gdlm_ls *ls = lp->ls;
18
19 spin_lock(&ls->async_lock);
20 list_add_tail(&lp->delay_list, &ls->submit);
21 spin_unlock(&ls->async_lock);
22 wake_up(&ls->thread_wait);
23}
24
25static void process_blocking(struct gdlm_lock *lp, int bast_mode)
26{
27 struct gdlm_ls *ls = lp->ls;
28 unsigned int cb = 0;
29
30 switch (gdlm_make_lmstate(bast_mode)) {
31 case LM_ST_EXCLUSIVE:
32 cb = LM_CB_NEED_E;
33 break;
34 case LM_ST_DEFERRED:
35 cb = LM_CB_NEED_D;
36 break;
37 case LM_ST_SHARED:
38 cb = LM_CB_NEED_S;
39 break;
40 default:
41 gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
42 }
43
44 ls->fscb(ls->sdp, cb, &lp->lockname);
45}
46
47static void process_complete(struct gdlm_lock *lp)
48{
49 struct gdlm_ls *ls = lp->ls;
50 struct lm_async_cb acb;
51 s16 prev_mode = lp->cur;
52
53 memset(&acb, 0, sizeof(acb));
54
55 if (lp->lksb.sb_status == -DLM_ECANCEL) {
56 log_info("complete dlm cancel %x,%llx flags %lx",
57 lp->lockname.ln_type,
58 (unsigned long long)lp->lockname.ln_number,
59 lp->flags);
60
61 lp->req = lp->cur;
62 acb.lc_ret |= LM_OUT_CANCELED;
63 if (lp->cur == DLM_LOCK_IV)
64 lp->lksb.sb_lkid = 0;
65 goto out;
66 }
67
68 if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
69 if (lp->lksb.sb_status != -DLM_EUNLOCK) {
70 log_info("unlock sb_status %d %x,%llx flags %lx",
71 lp->lksb.sb_status, lp->lockname.ln_type,
72 (unsigned long long)lp->lockname.ln_number,
73 lp->flags);
74 return;
75 }
76
77 lp->cur = DLM_LOCK_IV;
78 lp->req = DLM_LOCK_IV;
79 lp->lksb.sb_lkid = 0;
80
81 if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
82 gdlm_delete_lp(lp);
83 return;
84 }
85 goto out;
86 }
87
88 if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
89 memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
90
91 if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
92 if (lp->req == DLM_LOCK_PR)
93 lp->req = DLM_LOCK_CW;
94 else if (lp->req == DLM_LOCK_CW)
95 lp->req = DLM_LOCK_PR;
96 }
97
98 /*
99 * A canceled lock request. The lock was just taken off the delayed
100 * list and was never even submitted to dlm.
101 */
102
103 if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
104 log_info("complete internal cancel %x,%llx",
105 lp->lockname.ln_type,
106 (unsigned long long)lp->lockname.ln_number);
107 lp->req = lp->cur;
108 acb.lc_ret |= LM_OUT_CANCELED;
109 goto out;
110 }
111
112 /*
113 * An error occured.
114 */
115
116 if (lp->lksb.sb_status) {
117 /* a "normal" error */
118 if ((lp->lksb.sb_status == -EAGAIN) &&
119 (lp->lkf & DLM_LKF_NOQUEUE)) {
120 lp->req = lp->cur;
121 if (lp->cur == DLM_LOCK_IV)
122 lp->lksb.sb_lkid = 0;
123 goto out;
124 }
125
126 /* this could only happen with cancels I think */
127 log_info("ast sb_status %d %x,%llx flags %lx",
128 lp->lksb.sb_status, lp->lockname.ln_type,
129 (unsigned long long)lp->lockname.ln_number,
130 lp->flags);
131 return;
132 }
133
134 /*
135 * This is an AST for an EX->EX conversion for sync_lvb from GFS.
136 */
137
138 if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
139 complete(&lp->ast_wait);
140 return;
141 }
142
143 /*
144 * A lock has been demoted to NL because it initially completed during
145 * BLOCK_LOCKS. Now it must be requested in the originally requested
146 * mode.
147 */
148
149 if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
150 gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
151 lp->lockname.ln_type,
152 (unsigned long long)lp->lockname.ln_number);
153 gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
154 lp->lockname.ln_type,
155 (unsigned long long)lp->lockname.ln_number);
156
157 lp->cur = DLM_LOCK_NL;
158 lp->req = lp->prev_req;
159 lp->prev_req = DLM_LOCK_IV;
160 lp->lkf &= ~DLM_LKF_CONVDEADLK;
161
162 set_bit(LFL_NOCACHE, &lp->flags);
163
164 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
165 !test_bit(LFL_NOBLOCK, &lp->flags))
166 gdlm_queue_delayed(lp);
167 else
168 queue_submit(lp);
169 return;
170 }
171
172 /*
173 * A request is granted during dlm recovery. It may be granted
174 * because the locks of a failed node were cleared. In that case,
175 * there may be inconsistent data beneath this lock and we must wait
176 * for recovery to complete to use it. When gfs recovery is done this
177 * granted lock will be converted to NL and then reacquired in this
178 * granted state.
179 */
180
181 if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
182 !test_bit(LFL_NOBLOCK, &lp->flags) &&
183 lp->req != DLM_LOCK_NL) {
184
185 lp->cur = lp->req;
186 lp->prev_req = lp->req;
187 lp->req = DLM_LOCK_NL;
188 lp->lkf |= DLM_LKF_CONVERT;
189 lp->lkf &= ~DLM_LKF_CONVDEADLK;
190
191 log_debug("rereq %x,%llx id %x %d,%d",
192 lp->lockname.ln_type,
193 (unsigned long long)lp->lockname.ln_number,
194 lp->lksb.sb_lkid, lp->cur, lp->req);
195
196 set_bit(LFL_REREQUEST, &lp->flags);
197 queue_submit(lp);
198 return;
199 }
200
201 /*
202 * DLM demoted the lock to NL before it was granted so GFS must be
203 * told it cannot cache data for this lock.
204 */
205
206 if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
207 set_bit(LFL_NOCACHE, &lp->flags);
208
209out:
210 /*
211 * This is an internal lock_dlm lock
212 */
213
214 if (test_bit(LFL_INLOCK, &lp->flags)) {
215 clear_bit(LFL_NOBLOCK, &lp->flags);
216 lp->cur = lp->req;
217 complete(&lp->ast_wait);
218 return;
219 }
220
221 /*
222 * Normal completion of a lock request. Tell GFS it now has the lock.
223 */
224
225 clear_bit(LFL_NOBLOCK, &lp->flags);
226 lp->cur = lp->req;
227
228 acb.lc_name = lp->lockname;
229 acb.lc_ret |= gdlm_make_lmstate(lp->cur);
230
231 if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
232 (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
233 acb.lc_ret |= LM_OUT_CACHEABLE;
234
235 ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
236}
237
238static inline int no_work(struct gdlm_ls *ls, int blocking)
239{
240 int ret;
241
242 spin_lock(&ls->async_lock);
243 ret = list_empty(&ls->complete) && list_empty(&ls->submit);
244 if (ret && blocking)
245 ret = list_empty(&ls->blocking);
246 spin_unlock(&ls->async_lock);
247
248 return ret;
249}
250
251static inline int check_drop(struct gdlm_ls *ls)
252{
253 if (!ls->drop_locks_count)
254 return 0;
255
256 if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
257 ls->drop_time = jiffies;
258 if (ls->all_locks_count >= ls->drop_locks_count)
259 return 1;
260 }
261 return 0;
262}
263
264static int gdlm_thread(void *data)
265{
266 struct gdlm_ls *ls = (struct gdlm_ls *) data;
267 struct gdlm_lock *lp = NULL;
268 int blist = 0;
269 uint8_t complete, blocking, submit, drop;
270 DECLARE_WAITQUEUE(wait, current);
271
272 /* Only thread1 is allowed to do blocking callbacks since gfs
273 may wait for a completion callback within a blocking cb. */
274
275 if (current == ls->thread1)
276 blist = 1;
277
278 while (!kthread_should_stop()) {
279 set_current_state(TASK_INTERRUPTIBLE);
280 add_wait_queue(&ls->thread_wait, &wait);
281 if (no_work(ls, blist))
282 schedule();
283 remove_wait_queue(&ls->thread_wait, &wait);
284 set_current_state(TASK_RUNNING);
285
286 complete = blocking = submit = drop = 0;
287
288 spin_lock(&ls->async_lock);
289
290 if (blist && !list_empty(&ls->blocking)) {
291 lp = list_entry(ls->blocking.next, struct gdlm_lock,
292 blist);
293 list_del_init(&lp->blist);
294 blocking = lp->bast_mode;
295 lp->bast_mode = 0;
296 } else if (!list_empty(&ls->complete)) {
297 lp = list_entry(ls->complete.next, struct gdlm_lock,
298 clist);
299 list_del_init(&lp->clist);
300 complete = 1;
301 } else if (!list_empty(&ls->submit)) {
302 lp = list_entry(ls->submit.next, struct gdlm_lock,
303 delay_list);
304 list_del_init(&lp->delay_list);
305 submit = 1;
306 }
307
308 drop = check_drop(ls);
309 spin_unlock(&ls->async_lock);
310
311 if (complete)
312 process_complete(lp);
313
314 else if (blocking)
315 process_blocking(lp, blocking);
316
317 else if (submit)
318 gdlm_do_lock(lp);
319
320 if (drop)
321 ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
322
323 schedule();
324 }
325
326 return 0;
327}
328
329int gdlm_init_threads(struct gdlm_ls *ls)
330{
331 struct task_struct *p;
332 int error;
333
334 p = kthread_run(gdlm_thread, ls, "lock_dlm1");
335 error = IS_ERR(p);
336 if (error) {
337 log_error("can't start lock_dlm1 thread %d", error);
338 return error;
339 }
340 ls->thread1 = p;
341
342 p = kthread_run(gdlm_thread, ls, "lock_dlm2");
343 error = IS_ERR(p);
344 if (error) {
345 log_error("can't start lock_dlm2 thread %d", error);
346 kthread_stop(ls->thread1);
347 return error;
348 }
349 ls->thread2 = p;
350
351 return 0;
352}
353
354void gdlm_release_threads(struct gdlm_ls *ls)
355{
356 kthread_stop(ls->thread1);
357 kthread_stop(ls->thread2);
358}
359
diff --git a/fs/gfs2/locking/nolock/Makefile b/fs/gfs2/locking/nolock/Makefile
new file mode 100644
index 000000000000..35e9730bc3a8
--- /dev/null
+++ b/fs/gfs2/locking/nolock/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
2lock_nolock-y := main.o
3
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
new file mode 100644
index 000000000000..acfbc941f319
--- /dev/null
+++ b/fs/gfs2/locking/nolock/main.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/fs.h>
16#include <linux/smp_lock.h>
17#include <linux/lm_interface.h>
18
19struct nolock_lockspace {
20 unsigned int nl_lvb_size;
21};
22
23static const struct lm_lockops nolock_ops;
24
25static int nolock_mount(char *table_name, char *host_data,
26 lm_callback_t cb, void *cb_data,
27 unsigned int min_lvb_size, int flags,
28 struct lm_lockstruct *lockstruct,
29 struct kobject *fskobj)
30{
31 char *c;
32 unsigned int jid;
33 struct nolock_lockspace *nl;
34
35 c = strstr(host_data, "jid=");
36 if (!c)
37 jid = 0;
38 else {
39 c += 4;
40 sscanf(c, "%u", &jid);
41 }
42
43 nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
44 if (!nl)
45 return -ENOMEM;
46
47 nl->nl_lvb_size = min_lvb_size;
48
49 lockstruct->ls_jid = jid;
50 lockstruct->ls_first = 1;
51 lockstruct->ls_lvb_size = min_lvb_size;
52 lockstruct->ls_lockspace = nl;
53 lockstruct->ls_ops = &nolock_ops;
54 lockstruct->ls_flags = LM_LSFLAG_LOCAL;
55
56 return 0;
57}
58
59static void nolock_others_may_mount(void *lockspace)
60{
61}
62
63static void nolock_unmount(void *lockspace)
64{
65 struct nolock_lockspace *nl = lockspace;
66 kfree(nl);
67}
68
69static void nolock_withdraw(void *lockspace)
70{
71}
72
73/**
74 * nolock_get_lock - get a lm_lock_t given a descripton of the lock
75 * @lockspace: the lockspace the lock lives in
76 * @name: the name of the lock
77 * @lockp: return the lm_lock_t here
78 *
79 * Returns: 0 on success, -EXXX on failure
80 */
81
82static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
83 void **lockp)
84{
85 *lockp = lockspace;
86 return 0;
87}
88
89/**
90 * nolock_put_lock - get rid of a lock structure
91 * @lock: the lock to throw away
92 *
93 */
94
95static void nolock_put_lock(void *lock)
96{
97}
98
99/**
100 * nolock_lock - acquire a lock
101 * @lock: the lock to manipulate
102 * @cur_state: the current state
103 * @req_state: the requested state
104 * @flags: modifier flags
105 *
106 * Returns: A bitmap of LM_OUT_*
107 */
108
109static unsigned int nolock_lock(void *lock, unsigned int cur_state,
110 unsigned int req_state, unsigned int flags)
111{
112 return req_state | LM_OUT_CACHEABLE;
113}
114
115/**
116 * nolock_unlock - unlock a lock
117 * @lock: the lock to manipulate
118 * @cur_state: the current state
119 *
120 * Returns: 0
121 */
122
123static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
124{
125 return 0;
126}
127
128static void nolock_cancel(void *lock)
129{
130}
131
132/**
133 * nolock_hold_lvb - hold on to a lock value block
134 * @lock: the lock the LVB is associated with
135 * @lvbp: return the lm_lvb_t here
136 *
137 * Returns: 0 on success, -EXXX on failure
138 */
139
140static int nolock_hold_lvb(void *lock, char **lvbp)
141{
142 struct nolock_lockspace *nl = lock;
143 int error = 0;
144
145 *lvbp = kzalloc(nl->nl_lvb_size, GFP_KERNEL);
146 if (!*lvbp)
147 error = -ENOMEM;
148
149 return error;
150}
151
152/**
153 * nolock_unhold_lvb - release a LVB
154 * @lock: the lock the LVB is associated with
155 * @lvb: the lock value block
156 *
157 */
158
159static void nolock_unhold_lvb(void *lock, char *lvb)
160{
161 kfree(lvb);
162}
163
164static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
165 struct file *file, struct file_lock *fl)
166{
167 struct file_lock tmp;
168 int ret;
169
170 ret = posix_test_lock(file, fl, &tmp);
171 fl->fl_type = F_UNLCK;
172 if (ret)
173 memcpy(fl, &tmp, sizeof(struct file_lock));
174
175 return 0;
176}
177
178static int nolock_plock(void *lockspace, struct lm_lockname *name,
179 struct file *file, int cmd, struct file_lock *fl)
180{
181 int error;
182 error = posix_lock_file_wait(file, fl);
183 return error;
184}
185
186static int nolock_punlock(void *lockspace, struct lm_lockname *name,
187 struct file *file, struct file_lock *fl)
188{
189 int error;
190 error = posix_lock_file_wait(file, fl);
191 return error;
192}
193
194static void nolock_recovery_done(void *lockspace, unsigned int jid,
195 unsigned int message)
196{
197}
198
199static const struct lm_lockops nolock_ops = {
200 .lm_proto_name = "lock_nolock",
201 .lm_mount = nolock_mount,
202 .lm_others_may_mount = nolock_others_may_mount,
203 .lm_unmount = nolock_unmount,
204 .lm_withdraw = nolock_withdraw,
205 .lm_get_lock = nolock_get_lock,
206 .lm_put_lock = nolock_put_lock,
207 .lm_lock = nolock_lock,
208 .lm_unlock = nolock_unlock,
209 .lm_cancel = nolock_cancel,
210 .lm_hold_lvb = nolock_hold_lvb,
211 .lm_unhold_lvb = nolock_unhold_lvb,
212 .lm_plock_get = nolock_plock_get,
213 .lm_plock = nolock_plock,
214 .lm_punlock = nolock_punlock,
215 .lm_recovery_done = nolock_recovery_done,
216 .lm_owner = THIS_MODULE,
217};
218
219static int __init init_nolock(void)
220{
221 int error;
222
223 error = gfs2_register_lockproto(&nolock_ops);
224 if (error) {
225 printk(KERN_WARNING
226 "lock_nolock: can't register protocol: %d\n", error);
227 return error;
228 }
229
230 printk(KERN_INFO
231 "Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
232 return 0;
233}
234
235static void __exit exit_nolock(void)
236{
237 gfs2_unregister_lockproto(&nolock_ops);
238}
239
240module_init(init_nolock);
241module_exit(exit_nolock);
242
243MODULE_DESCRIPTION("GFS Nolock Locking Module");
244MODULE_AUTHOR("Red Hat, Inc.");
245MODULE_LICENSE("GPL");
246
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
new file mode 100644
index 000000000000..554fe5bd1b72
--- /dev/null
+++ b/fs/gfs2/log.c
@@ -0,0 +1,687 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "bmap.h"
22#include "glock.h"
23#include "log.h"
24#include "lops.h"
25#include "meta_io.h"
26#include "util.h"
27#include "dir.h"
28
29#define PULL 1
30
31/**
32 * gfs2_struct2blk - compute stuff
33 * @sdp: the filesystem
34 * @nstruct: the number of structures
35 * @ssize: the size of the structures
36 *
37 * Compute the number of log descriptor blocks needed to hold a certain number
38 * of structures of a certain size.
39 *
40 * Returns: the number of blocks needed (minimum is always 1)
41 */
42
43unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
44 unsigned int ssize)
45{
46 unsigned int blks;
47 unsigned int first, second;
48
49 blks = 1;
50 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
51
52 if (nstruct > first) {
53 second = (sdp->sd_sb.sb_bsize -
54 sizeof(struct gfs2_meta_header)) / ssize;
55 blks += DIV_ROUND_UP(nstruct - first, second);
56 }
57
58 return blks;
59}
60
61/**
62 * gfs2_ail1_start_one - Start I/O on a part of the AIL
63 * @sdp: the filesystem
64 * @tr: the part of the AIL
65 *
66 */
67
68static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
69{
70 struct gfs2_bufdata *bd, *s;
71 struct buffer_head *bh;
72 int retry;
73
74 BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
75
76 do {
77 retry = 0;
78
79 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
80 bd_ail_st_list) {
81 bh = bd->bd_bh;
82
83 gfs2_assert(sdp, bd->bd_ail == ai);
84
85 if (!buffer_busy(bh)) {
86 if (!buffer_uptodate(bh)) {
87 gfs2_log_unlock(sdp);
88 gfs2_io_error_bh(sdp, bh);
89 gfs2_log_lock(sdp);
90 }
91 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
92 continue;
93 }
94
95 if (!buffer_dirty(bh))
96 continue;
97
98 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
99
100 gfs2_log_unlock(sdp);
101 wait_on_buffer(bh);
102 ll_rw_block(WRITE, 1, &bh);
103 gfs2_log_lock(sdp);
104
105 retry = 1;
106 break;
107 }
108 } while (retry);
109}
110
111/**
112 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
113 * @sdp: the filesystem
114 * @ai: the AIL entry
115 *
116 */
117
118static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
119{
120 struct gfs2_bufdata *bd, *s;
121 struct buffer_head *bh;
122
123 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
124 bd_ail_st_list) {
125 bh = bd->bd_bh;
126
127 gfs2_assert(sdp, bd->bd_ail == ai);
128
129 if (buffer_busy(bh)) {
130 if (flags & DIO_ALL)
131 continue;
132 else
133 break;
134 }
135
136 if (!buffer_uptodate(bh))
137 gfs2_io_error_bh(sdp, bh);
138
139 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
140 }
141
142 return list_empty(&ai->ai_ail1_list);
143}
144
145void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
146{
147 struct list_head *head = &sdp->sd_ail1_list;
148 u64 sync_gen;
149 struct list_head *first;
150 struct gfs2_ail *first_ai, *ai, *tmp;
151 int done = 0;
152
153 gfs2_log_lock(sdp);
154 if (list_empty(head)) {
155 gfs2_log_unlock(sdp);
156 return;
157 }
158 sync_gen = sdp->sd_ail_sync_gen++;
159
160 first = head->prev;
161 first_ai = list_entry(first, struct gfs2_ail, ai_list);
162 first_ai->ai_sync_gen = sync_gen;
163 gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
164
165 if (flags & DIO_ALL)
166 first = NULL;
167
168 while(!done) {
169 if (first && (head->prev != first ||
170 gfs2_ail1_empty_one(sdp, first_ai, 0)))
171 break;
172
173 done = 1;
174 list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
175 if (ai->ai_sync_gen >= sync_gen)
176 continue;
177 ai->ai_sync_gen = sync_gen;
178 gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */
179 done = 0;
180 break;
181 }
182 }
183
184 gfs2_log_unlock(sdp);
185}
186
187int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
188{
189 struct gfs2_ail *ai, *s;
190 int ret;
191
192 gfs2_log_lock(sdp);
193
194 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
195 if (gfs2_ail1_empty_one(sdp, ai, flags))
196 list_move(&ai->ai_list, &sdp->sd_ail2_list);
197 else if (!(flags & DIO_ALL))
198 break;
199 }
200
201 ret = list_empty(&sdp->sd_ail1_list);
202
203 gfs2_log_unlock(sdp);
204
205 return ret;
206}
207
208
209/**
210 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
211 * @sdp: the filesystem
212 * @ai: the AIL entry
213 *
214 */
215
216static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
217{
218 struct list_head *head = &ai->ai_ail2_list;
219 struct gfs2_bufdata *bd;
220
221 while (!list_empty(head)) {
222 bd = list_entry(head->prev, struct gfs2_bufdata,
223 bd_ail_st_list);
224 gfs2_assert(sdp, bd->bd_ail == ai);
225 bd->bd_ail = NULL;
226 list_del(&bd->bd_ail_st_list);
227 list_del(&bd->bd_ail_gl_list);
228 atomic_dec(&bd->bd_gl->gl_ail_count);
229 brelse(bd->bd_bh);
230 }
231}
232
233static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
234{
235 struct gfs2_ail *ai, *safe;
236 unsigned int old_tail = sdp->sd_log_tail;
237 int wrap = (new_tail < old_tail);
238 int a, b, rm;
239
240 gfs2_log_lock(sdp);
241
242 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
243 a = (old_tail <= ai->ai_first);
244 b = (ai->ai_first < new_tail);
245 rm = (wrap) ? (a || b) : (a && b);
246 if (!rm)
247 continue;
248
249 gfs2_ail2_empty_one(sdp, ai);
250 list_del(&ai->ai_list);
251 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
252 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
253 kfree(ai);
254 }
255
256 gfs2_log_unlock(sdp);
257}
258
259/**
260 * gfs2_log_reserve - Make a log reservation
261 * @sdp: The GFS2 superblock
262 * @blks: The number of blocks to reserve
263 *
264 * Returns: errno
265 */
266
267int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
268{
269 unsigned int try = 0;
270
271 if (gfs2_assert_warn(sdp, blks) ||
272 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
273 return -EINVAL;
274
275 mutex_lock(&sdp->sd_log_reserve_mutex);
276 gfs2_log_lock(sdp);
277 while(sdp->sd_log_blks_free <= blks) {
278 gfs2_log_unlock(sdp);
279 gfs2_ail1_empty(sdp, 0);
280 gfs2_log_flush(sdp, NULL);
281
282 if (try++)
283 gfs2_ail1_start(sdp, 0);
284 gfs2_log_lock(sdp);
285 }
286 sdp->sd_log_blks_free -= blks;
287 gfs2_log_unlock(sdp);
288 mutex_unlock(&sdp->sd_log_reserve_mutex);
289
290 down_read(&sdp->sd_log_flush_lock);
291
292 return 0;
293}
294
295/**
296 * gfs2_log_release - Release a given number of log blocks
297 * @sdp: The GFS2 superblock
298 * @blks: The number of blocks
299 *
300 */
301
302void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
303{
304
305 gfs2_log_lock(sdp);
306 sdp->sd_log_blks_free += blks;
307 gfs2_assert_withdraw(sdp,
308 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
309 gfs2_log_unlock(sdp);
310 up_read(&sdp->sd_log_flush_lock);
311}
312
313static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
314{
315 int error;
316 struct buffer_head bh_map;
317
318 error = gfs2_block_map(sdp->sd_jdesc->jd_inode, lbn, 0, &bh_map, 1);
319 if (error || !bh_map.b_blocknr)
320 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error, bh_map.b_blocknr, lbn);
321 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
322
323 return bh_map.b_blocknr;
324}
325
326/**
327 * log_distance - Compute distance between two journal blocks
328 * @sdp: The GFS2 superblock
329 * @newer: The most recent journal block of the pair
330 * @older: The older journal block of the pair
331 *
332 * Compute the distance (in the journal direction) between two
333 * blocks in the journal
334 *
335 * Returns: the distance in blocks
336 */
337
338static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
339 unsigned int older)
340{
341 int dist;
342
343 dist = newer - older;
344 if (dist < 0)
345 dist += sdp->sd_jdesc->jd_blocks;
346
347 return dist;
348}
349
350static unsigned int current_tail(struct gfs2_sbd *sdp)
351{
352 struct gfs2_ail *ai;
353 unsigned int tail;
354
355 gfs2_log_lock(sdp);
356
357 if (list_empty(&sdp->sd_ail1_list)) {
358 tail = sdp->sd_log_head;
359 } else {
360 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
361 tail = ai->ai_first;
362 }
363
364 gfs2_log_unlock(sdp);
365
366 return tail;
367}
368
369static inline void log_incr_head(struct gfs2_sbd *sdp)
370{
371 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
372 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
373
374 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
375 sdp->sd_log_flush_head = 0;
376 sdp->sd_log_flush_wrapped = 1;
377 }
378}
379
380/**
381 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
382 * @sdp: The GFS2 superblock
383 *
384 * Returns: the buffer_head
385 */
386
387struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
388{
389 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
390 struct gfs2_log_buf *lb;
391 struct buffer_head *bh;
392
393 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
394 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
395
396 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
397 lock_buffer(bh);
398 memset(bh->b_data, 0, bh->b_size);
399 set_buffer_uptodate(bh);
400 clear_buffer_dirty(bh);
401 unlock_buffer(bh);
402
403 log_incr_head(sdp);
404
405 return bh;
406}
407
408/**
409 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
410 * @sdp: the filesystem
411 * @data: the data the buffer_head should point to
412 *
413 * Returns: the log buffer descriptor
414 */
415
416struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
417 struct buffer_head *real)
418{
419 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
420 struct gfs2_log_buf *lb;
421 struct buffer_head *bh;
422
423 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
424 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
425 lb->lb_real = real;
426
427 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
428 atomic_set(&bh->b_count, 1);
429 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
430 set_bh_page(bh, real->b_page, bh_offset(real));
431 bh->b_blocknr = blkno;
432 bh->b_size = sdp->sd_sb.sb_bsize;
433 bh->b_bdev = sdp->sd_vfs->s_bdev;
434
435 log_incr_head(sdp);
436
437 return bh;
438}
439
440static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail, int pull)
441{
442 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
443
444 ail2_empty(sdp, new_tail);
445
446 gfs2_log_lock(sdp);
447 sdp->sd_log_blks_free += dist - (pull ? 1 : 0);
448 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
449 gfs2_log_unlock(sdp);
450
451 sdp->sd_log_tail = new_tail;
452}
453
454/**
455 * log_write_header - Get and initialize a journal header buffer
456 * @sdp: The GFS2 superblock
457 *
458 * Returns: the initialized log buffer descriptor
459 */
460
461static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
462{
463 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
464 struct buffer_head *bh;
465 struct gfs2_log_header *lh;
466 unsigned int tail;
467 u32 hash;
468
469 bh = sb_getblk(sdp->sd_vfs, blkno);
470 lock_buffer(bh);
471 memset(bh->b_data, 0, bh->b_size);
472 set_buffer_uptodate(bh);
473 clear_buffer_dirty(bh);
474 unlock_buffer(bh);
475
476 gfs2_ail1_empty(sdp, 0);
477 tail = current_tail(sdp);
478
479 lh = (struct gfs2_log_header *)bh->b_data;
480 memset(lh, 0, sizeof(struct gfs2_log_header));
481 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
482 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
483 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
484 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
485 lh->lh_flags = cpu_to_be32(flags);
486 lh->lh_tail = cpu_to_be32(tail);
487 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
488 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
489 lh->lh_hash = cpu_to_be32(hash);
490
491 set_buffer_dirty(bh);
492 if (sync_dirty_buffer(bh))
493 gfs2_io_error_bh(sdp, bh);
494 brelse(bh);
495
496 if (sdp->sd_log_tail != tail)
497 log_pull_tail(sdp, tail, pull);
498 else
499 gfs2_assert_withdraw(sdp, !pull);
500
501 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
502 log_incr_head(sdp);
503}
504
505static void log_flush_commit(struct gfs2_sbd *sdp)
506{
507 struct list_head *head = &sdp->sd_log_flush_list;
508 struct gfs2_log_buf *lb;
509 struct buffer_head *bh;
510
511 while (!list_empty(head)) {
512 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
513 list_del(&lb->lb_list);
514 bh = lb->lb_bh;
515
516 wait_on_buffer(bh);
517 if (!buffer_uptodate(bh))
518 gfs2_io_error_bh(sdp, bh);
519 if (lb->lb_real) {
520 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
521 schedule();
522 free_buffer_head(bh);
523 } else
524 brelse(bh);
525 kfree(lb);
526 }
527
528 log_write_header(sdp, 0, 0);
529}
530
531/**
532 * gfs2_log_flush - flush incore transaction(s)
533 * @sdp: the filesystem
534 * @gl: The glock structure to flush. If NULL, flush the whole incore log
535 *
536 */
537
538void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
539{
540 struct gfs2_ail *ai;
541
542 down_write(&sdp->sd_log_flush_lock);
543
544 if (gl) {
545 gfs2_log_lock(sdp);
546 if (list_empty(&gl->gl_le.le_list)) {
547 gfs2_log_unlock(sdp);
548 up_write(&sdp->sd_log_flush_lock);
549 return;
550 }
551 gfs2_log_unlock(sdp);
552 }
553
554 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
555 INIT_LIST_HEAD(&ai->ai_ail1_list);
556 INIT_LIST_HEAD(&ai->ai_ail2_list);
557
558 gfs2_assert_withdraw(sdp, sdp->sd_log_num_buf == sdp->sd_log_commited_buf);
559 gfs2_assert_withdraw(sdp,
560 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
561
562 sdp->sd_log_flush_head = sdp->sd_log_head;
563 sdp->sd_log_flush_wrapped = 0;
564 ai->ai_first = sdp->sd_log_flush_head;
565
566 lops_before_commit(sdp);
567 if (!list_empty(&sdp->sd_log_flush_list))
568 log_flush_commit(sdp);
569 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle)
570 log_write_header(sdp, 0, PULL);
571 lops_after_commit(sdp, ai);
572 sdp->sd_log_head = sdp->sd_log_flush_head;
573
574 sdp->sd_log_blks_free -= sdp->sd_log_num_hdrs;
575
576 sdp->sd_log_blks_reserved = 0;
577 sdp->sd_log_commited_buf = 0;
578 sdp->sd_log_num_hdrs = 0;
579 sdp->sd_log_commited_revoke = 0;
580
581 gfs2_log_lock(sdp);
582 if (!list_empty(&ai->ai_ail1_list)) {
583 list_add(&ai->ai_list, &sdp->sd_ail1_list);
584 ai = NULL;
585 }
586 gfs2_log_unlock(sdp);
587
588 sdp->sd_vfs->s_dirt = 0;
589 up_write(&sdp->sd_log_flush_lock);
590
591 kfree(ai);
592}
593
594static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
595{
596 unsigned int reserved = 0;
597 unsigned int old;
598
599 gfs2_log_lock(sdp);
600
601 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
602 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_buf) >= 0);
603 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
604 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
605
606 if (sdp->sd_log_commited_buf)
607 reserved += sdp->sd_log_commited_buf;
608 if (sdp->sd_log_commited_revoke)
609 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
610 sizeof(u64));
611 if (reserved)
612 reserved++;
613
614 old = sdp->sd_log_blks_free;
615 sdp->sd_log_blks_free += tr->tr_reserved -
616 (reserved - sdp->sd_log_blks_reserved);
617
618 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old);
619 gfs2_assert_withdraw(sdp,
620 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks +
621 sdp->sd_log_num_hdrs);
622
623 sdp->sd_log_blks_reserved = reserved;
624
625 gfs2_log_unlock(sdp);
626}
627
628/**
629 * gfs2_log_commit - Commit a transaction to the log
630 * @sdp: the filesystem
631 * @tr: the transaction
632 *
633 * Returns: errno
634 */
635
636void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
637{
638 log_refund(sdp, tr);
639 lops_incore_commit(sdp, tr);
640
641 sdp->sd_vfs->s_dirt = 1;
642 up_read(&sdp->sd_log_flush_lock);
643
644 gfs2_log_lock(sdp);
645 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks)) {
646 gfs2_log_unlock(sdp);
647 gfs2_log_flush(sdp, NULL);
648 } else {
649 gfs2_log_unlock(sdp);
650 }
651}
652
653/**
654 * gfs2_log_shutdown - write a shutdown header into a journal
655 * @sdp: the filesystem
656 *
657 */
658
659void gfs2_log_shutdown(struct gfs2_sbd *sdp)
660{
661 down_write(&sdp->sd_log_flush_lock);
662
663 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
664 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
665 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
666 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
667 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
668 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
669 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
670 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_hdrs);
671 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
672
673 sdp->sd_log_flush_head = sdp->sd_log_head;
674 sdp->sd_log_flush_wrapped = 0;
675
676 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT, 0);
677
678 gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks);
679 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
680 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
681
682 sdp->sd_log_head = sdp->sd_log_flush_head;
683 sdp->sd_log_tail = sdp->sd_log_head;
684
685 up_write(&sdp->sd_log_flush_lock);
686}
687
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
new file mode 100644
index 000000000000..7f5737d55612
--- /dev/null
+++ b/fs/gfs2/log.h
@@ -0,0 +1,65 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __LOG_DOT_H__
11#define __LOG_DOT_H__
12
13#include <linux/list.h>
14#include <linux/spinlock.h>
15#include "incore.h"
16
17/**
18 * gfs2_log_lock - acquire the right to mess with the log manager
19 * @sdp: the filesystem
20 *
21 */
22
23static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
24{
25 spin_lock(&sdp->sd_log_lock);
26}
27
28/**
29 * gfs2_log_unlock - release the right to mess with the log manager
30 * @sdp: the filesystem
31 *
32 */
33
34static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
35{
36 spin_unlock(&sdp->sd_log_lock);
37}
38
39static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
40 unsigned int value)
41{
42 if (++value == sdp->sd_jdesc->jd_blocks) {
43 value = 0;
44 }
45 sdp->sd_log_head = sdp->sd_log_tail = value;
46}
47
48unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49 unsigned int ssize);
50
51void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags);
52int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags);
53
54int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks);
55void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
56
57struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp);
58struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
59 struct buffer_head *real);
60void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl);
61void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
62
63void gfs2_log_shutdown(struct gfs2_sbd *sdp);
64
65#endif /* __LOG_DOT_H__ */
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
new file mode 100644
index 000000000000..881e337b6a70
--- /dev/null
+++ b/fs/gfs2/lops.c
@@ -0,0 +1,809 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/lm_interface.h>
17
18#include "gfs2.h"
19#include "incore.h"
20#include "glock.h"
21#include "log.h"
22#include "lops.h"
23#include "meta_io.h"
24#include "recovery.h"
25#include "rgrp.h"
26#include "trans.h"
27#include "util.h"
28
29static void glock_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
30{
31 struct gfs2_glock *gl;
32 struct gfs2_trans *tr = current->journal_info;
33
34 tr->tr_touched = 1;
35
36 if (!list_empty(&le->le_list))
37 return;
38
39 gl = container_of(le, struct gfs2_glock, gl_le);
40 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl)))
41 return;
42 gfs2_glock_hold(gl);
43 set_bit(GLF_DIRTY, &gl->gl_flags);
44
45 gfs2_log_lock(sdp);
46 sdp->sd_log_num_gl++;
47 list_add(&le->le_list, &sdp->sd_log_le_gl);
48 gfs2_log_unlock(sdp);
49}
50
51static void glock_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
52{
53 struct list_head *head = &sdp->sd_log_le_gl;
54 struct gfs2_glock *gl;
55
56 while (!list_empty(head)) {
57 gl = list_entry(head->next, struct gfs2_glock, gl_le.le_list);
58 list_del_init(&gl->gl_le.le_list);
59 sdp->sd_log_num_gl--;
60
61 gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(gl));
62 gfs2_glock_put(gl);
63 }
64 gfs2_assert_warn(sdp, !sdp->sd_log_num_gl);
65}
66
67static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
68{
69 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
70 struct gfs2_trans *tr;
71
72 if (!list_empty(&bd->bd_list_tr))
73 return;
74
75 tr = current->journal_info;
76 tr->tr_touched = 1;
77 tr->tr_num_buf++;
78 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
79
80 if (!list_empty(&le->le_list))
81 return;
82
83 gfs2_trans_add_gl(bd->bd_gl);
84
85 gfs2_meta_check(sdp, bd->bd_bh);
86 gfs2_pin(sdp, bd->bd_bh);
87
88 gfs2_log_lock(sdp);
89 sdp->sd_log_num_buf++;
90 list_add(&le->le_list, &sdp->sd_log_le_buf);
91 gfs2_log_unlock(sdp);
92
93 tr->tr_num_buf_new++;
94}
95
96static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
97{
98 struct list_head *head = &tr->tr_list_buf;
99 struct gfs2_bufdata *bd;
100
101 while (!list_empty(head)) {
102 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
103 list_del_init(&bd->bd_list_tr);
104 tr->tr_num_buf--;
105 }
106 gfs2_assert_warn(sdp, !tr->tr_num_buf);
107}
108
109static void buf_lo_before_commit(struct gfs2_sbd *sdp)
110{
111 struct buffer_head *bh;
112 struct gfs2_log_descriptor *ld;
113 struct gfs2_bufdata *bd1 = NULL, *bd2;
114 unsigned int total = sdp->sd_log_num_buf;
115 unsigned int offset = sizeof(struct gfs2_log_descriptor);
116 unsigned int limit;
117 unsigned int num;
118 unsigned n;
119 __be64 *ptr;
120
121 offset += sizeof(__be64) - 1;
122 offset &= ~(sizeof(__be64) - 1);
123 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
124 /* for 4k blocks, limit = 503 */
125
126 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
127 while(total) {
128 num = total;
129 if (total > limit)
130 num = limit;
131 bh = gfs2_log_get_buf(sdp);
132 sdp->sd_log_num_hdrs++;
133 ld = (struct gfs2_log_descriptor *)bh->b_data;
134 ptr = (__be64 *)(bh->b_data + offset);
135 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
136 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
137 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
138 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_METADATA);
139 ld->ld_length = cpu_to_be32(num + 1);
140 ld->ld_data1 = cpu_to_be32(num);
141 ld->ld_data2 = cpu_to_be32(0);
142 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
143
144 n = 0;
145 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
146 bd_le.le_list) {
147 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
148 if (++n >= num)
149 break;
150 }
151
152 set_buffer_dirty(bh);
153 ll_rw_block(WRITE, 1, &bh);
154
155 n = 0;
156 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
157 bd_le.le_list) {
158 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
159 set_buffer_dirty(bh);
160 ll_rw_block(WRITE, 1, &bh);
161 if (++n >= num)
162 break;
163 }
164
165 total -= num;
166 }
167}
168
169static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
170{
171 struct list_head *head = &sdp->sd_log_le_buf;
172 struct gfs2_bufdata *bd;
173
174 while (!list_empty(head)) {
175 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
176 list_del_init(&bd->bd_le.le_list);
177 sdp->sd_log_num_buf--;
178
179 gfs2_unpin(sdp, bd->bd_bh, ai);
180 }
181 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
182}
183
184static void buf_lo_before_scan(struct gfs2_jdesc *jd,
185 struct gfs2_log_header *head, int pass)
186{
187 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
188
189 if (pass != 0)
190 return;
191
192 sdp->sd_found_blocks = 0;
193 sdp->sd_replayed_blocks = 0;
194}
195
196static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
197 struct gfs2_log_descriptor *ld, __be64 *ptr,
198 int pass)
199{
200 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
201 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
202 struct gfs2_glock *gl = ip->i_gl;
203 unsigned int blks = be32_to_cpu(ld->ld_data1);
204 struct buffer_head *bh_log, *bh_ip;
205 u64 blkno;
206 int error = 0;
207
208 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
209 return 0;
210
211 gfs2_replay_incr_blk(sdp, &start);
212
213 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
214 blkno = be64_to_cpu(*ptr++);
215
216 sdp->sd_found_blocks++;
217
218 if (gfs2_revoke_check(sdp, blkno, start))
219 continue;
220
221 error = gfs2_replay_read_block(jd, start, &bh_log);
222 if (error)
223 return error;
224
225 bh_ip = gfs2_meta_new(gl, blkno);
226 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
227
228 if (gfs2_meta_check(sdp, bh_ip))
229 error = -EIO;
230 else
231 mark_buffer_dirty(bh_ip);
232
233 brelse(bh_log);
234 brelse(bh_ip);
235
236 if (error)
237 break;
238
239 sdp->sd_replayed_blocks++;
240 }
241
242 return error;
243}
244
245static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
246{
247 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
248 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
249
250 if (error) {
251 gfs2_meta_sync(ip->i_gl);
252 return;
253 }
254 if (pass != 1)
255 return;
256
257 gfs2_meta_sync(ip->i_gl);
258
259 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
260 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
261}
262
263static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
264{
265 struct gfs2_trans *tr;
266
267 tr = current->journal_info;
268 tr->tr_touched = 1;
269 tr->tr_num_revoke++;
270
271 gfs2_log_lock(sdp);
272 sdp->sd_log_num_revoke++;
273 list_add(&le->le_list, &sdp->sd_log_le_revoke);
274 gfs2_log_unlock(sdp);
275}
276
277static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
278{
279 struct gfs2_log_descriptor *ld;
280 struct gfs2_meta_header *mh;
281 struct buffer_head *bh;
282 unsigned int offset;
283 struct list_head *head = &sdp->sd_log_le_revoke;
284 struct gfs2_revoke *rv;
285
286 if (!sdp->sd_log_num_revoke)
287 return;
288
289 bh = gfs2_log_get_buf(sdp);
290 ld = (struct gfs2_log_descriptor *)bh->b_data;
291 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
292 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
293 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
294 ld->ld_type = cpu_to_be32(GFS2_LOG_DESC_REVOKE);
295 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
296 sizeof(u64)));
297 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
298 ld->ld_data2 = cpu_to_be32(0);
299 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
300 offset = sizeof(struct gfs2_log_descriptor);
301
302 while (!list_empty(head)) {
303 rv = list_entry(head->next, struct gfs2_revoke, rv_le.le_list);
304 list_del_init(&rv->rv_le.le_list);
305 sdp->sd_log_num_revoke--;
306
307 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
308 set_buffer_dirty(bh);
309 ll_rw_block(WRITE, 1, &bh);
310
311 bh = gfs2_log_get_buf(sdp);
312 mh = (struct gfs2_meta_header *)bh->b_data;
313 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
314 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
315 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
316 offset = sizeof(struct gfs2_meta_header);
317 }
318
319 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(rv->rv_blkno);
320 kfree(rv);
321
322 offset += sizeof(u64);
323 }
324 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
325
326 set_buffer_dirty(bh);
327 ll_rw_block(WRITE, 1, &bh);
328}
329
330static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
331 struct gfs2_log_header *head, int pass)
332{
333 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
334
335 if (pass != 0)
336 return;
337
338 sdp->sd_found_revokes = 0;
339 sdp->sd_replay_tail = head->lh_tail;
340}
341
342static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
343 struct gfs2_log_descriptor *ld, __be64 *ptr,
344 int pass)
345{
346 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
347 unsigned int blks = be32_to_cpu(ld->ld_length);
348 unsigned int revokes = be32_to_cpu(ld->ld_data1);
349 struct buffer_head *bh;
350 unsigned int offset;
351 u64 blkno;
352 int first = 1;
353 int error;
354
355 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
356 return 0;
357
358 offset = sizeof(struct gfs2_log_descriptor);
359
360 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
361 error = gfs2_replay_read_block(jd, start, &bh);
362 if (error)
363 return error;
364
365 if (!first)
366 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
367
368 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
369 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
370
371 error = gfs2_revoke_add(sdp, blkno, start);
372 if (error < 0)
373 return error;
374 else if (error)
375 sdp->sd_found_revokes++;
376
377 if (!--revokes)
378 break;
379 offset += sizeof(u64);
380 }
381
382 brelse(bh);
383 offset = sizeof(struct gfs2_meta_header);
384 first = 0;
385 }
386
387 return 0;
388}
389
390static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
391{
392 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
393
394 if (error) {
395 gfs2_revoke_clean(sdp);
396 return;
397 }
398 if (pass != 1)
399 return;
400
401 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
402 jd->jd_jid, sdp->sd_found_revokes);
403
404 gfs2_revoke_clean(sdp);
405}
406
407static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
408{
409 struct gfs2_rgrpd *rgd;
410 struct gfs2_trans *tr = current->journal_info;
411
412 tr->tr_touched = 1;
413
414 if (!list_empty(&le->le_list))
415 return;
416
417 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
418 gfs2_rgrp_bh_hold(rgd);
419
420 gfs2_log_lock(sdp);
421 sdp->sd_log_num_rg++;
422 list_add(&le->le_list, &sdp->sd_log_le_rg);
423 gfs2_log_unlock(sdp);
424}
425
426static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
427{
428 struct list_head *head = &sdp->sd_log_le_rg;
429 struct gfs2_rgrpd *rgd;
430
431 while (!list_empty(head)) {
432 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
433 list_del_init(&rgd->rd_le.le_list);
434 sdp->sd_log_num_rg--;
435
436 gfs2_rgrp_repolish_clones(rgd);
437 gfs2_rgrp_bh_put(rgd);
438 }
439 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
440}
441
442/**
443 * databuf_lo_add - Add a databuf to the transaction.
444 *
445 * This is used in two distinct cases:
446 * i) In ordered write mode
447 * We put the data buffer on a list so that we can ensure that its
448 * synced to disk at the right time
449 * ii) In journaled data mode
450 * We need to journal the data block in the same way as metadata in
451 * the functions above. The difference is that here we have a tag
452 * which is two __be64's being the block number (as per meta data)
453 * and a flag which says whether the data block needs escaping or
454 * not. This means we need a new log entry for each 251 or so data
455 * blocks, which isn't an enormous overhead but twice as much as
456 * for normal metadata blocks.
457 */
458static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
459{
460 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
461 struct gfs2_trans *tr = current->journal_info;
462 struct address_space *mapping = bd->bd_bh->b_page->mapping;
463 struct gfs2_inode *ip = GFS2_I(mapping->host);
464
465 tr->tr_touched = 1;
466 if (list_empty(&bd->bd_list_tr) &&
467 (ip->i_di.di_flags & GFS2_DIF_JDATA)) {
468 tr->tr_num_buf++;
469 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
470 gfs2_pin(sdp, bd->bd_bh);
471 tr->tr_num_buf_new++;
472 }
473 gfs2_trans_add_gl(bd->bd_gl);
474 gfs2_log_lock(sdp);
475 if (list_empty(&le->le_list)) {
476 if (ip->i_di.di_flags & GFS2_DIF_JDATA)
477 sdp->sd_log_num_jdata++;
478 sdp->sd_log_num_databuf++;
479 list_add(&le->le_list, &sdp->sd_log_le_databuf);
480 }
481 gfs2_log_unlock(sdp);
482}
483
484static int gfs2_check_magic(struct buffer_head *bh)
485{
486 struct page *page = bh->b_page;
487 void *kaddr;
488 __be32 *ptr;
489 int rv = 0;
490
491 kaddr = kmap_atomic(page, KM_USER0);
492 ptr = kaddr + bh_offset(bh);
493 if (*ptr == cpu_to_be32(GFS2_MAGIC))
494 rv = 1;
495 kunmap_atomic(page, KM_USER0);
496
497 return rv;
498}
499
500/**
501 * databuf_lo_before_commit - Scan the data buffers, writing as we go
502 *
503 * Here we scan through the lists of buffers and make the assumption
504 * that any buffer thats been pinned is being journaled, and that
505 * any unpinned buffer is an ordered write data buffer and therefore
506 * will be written back rather than journaled.
507 */
508static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
509{
510 LIST_HEAD(started);
511 struct gfs2_bufdata *bd1 = NULL, *bd2, *bdt;
512 struct buffer_head *bh = NULL;
513 unsigned int offset = sizeof(struct gfs2_log_descriptor);
514 struct gfs2_log_descriptor *ld;
515 unsigned int limit;
516 unsigned int total_dbuf = sdp->sd_log_num_databuf;
517 unsigned int total_jdata = sdp->sd_log_num_jdata;
518 unsigned int num, n;
519 __be64 *ptr = NULL;
520
521 offset += 2*sizeof(__be64) - 1;
522 offset &= ~(2*sizeof(__be64) - 1);
523 limit = (sdp->sd_sb.sb_bsize - offset)/sizeof(__be64);
524
525 /*
526 * Start writing ordered buffers, write journaled buffers
527 * into the log along with a header
528 */
529 gfs2_log_lock(sdp);
530 bd2 = bd1 = list_prepare_entry(bd1, &sdp->sd_log_le_databuf,
531 bd_le.le_list);
532 while(total_dbuf) {
533 num = total_jdata;
534 if (num > limit)
535 num = limit;
536 n = 0;
537 list_for_each_entry_safe_continue(bd1, bdt,
538 &sdp->sd_log_le_databuf,
539 bd_le.le_list) {
540 /* An ordered write buffer */
541 if (bd1->bd_bh && !buffer_pinned(bd1->bd_bh)) {
542 list_move(&bd1->bd_le.le_list, &started);
543 if (bd1 == bd2) {
544 bd2 = NULL;
545 bd2 = list_prepare_entry(bd2,
546 &sdp->sd_log_le_databuf,
547 bd_le.le_list);
548 }
549 total_dbuf--;
550 if (bd1->bd_bh) {
551 get_bh(bd1->bd_bh);
552 if (buffer_dirty(bd1->bd_bh)) {
553 gfs2_log_unlock(sdp);
554 wait_on_buffer(bd1->bd_bh);
555 ll_rw_block(WRITE, 1,
556 &bd1->bd_bh);
557 gfs2_log_lock(sdp);
558 }
559 brelse(bd1->bd_bh);
560 continue;
561 }
562 continue;
563 } else if (bd1->bd_bh) { /* A journaled buffer */
564 int magic;
565 gfs2_log_unlock(sdp);
566 if (!bh) {
567 bh = gfs2_log_get_buf(sdp);
568 sdp->sd_log_num_hdrs++;
569 ld = (struct gfs2_log_descriptor *)
570 bh->b_data;
571 ptr = (__be64 *)(bh->b_data + offset);
572 ld->ld_header.mh_magic =
573 cpu_to_be32(GFS2_MAGIC);
574 ld->ld_header.mh_type =
575 cpu_to_be32(GFS2_METATYPE_LD);
576 ld->ld_header.mh_format =
577 cpu_to_be32(GFS2_FORMAT_LD);
578 ld->ld_type =
579 cpu_to_be32(GFS2_LOG_DESC_JDATA);
580 ld->ld_length = cpu_to_be32(num + 1);
581 ld->ld_data1 = cpu_to_be32(num);
582 ld->ld_data2 = cpu_to_be32(0);
583 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
584 }
585 magic = gfs2_check_magic(bd1->bd_bh);
586 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
587 *ptr++ = cpu_to_be64((__u64)magic);
588 clear_buffer_escaped(bd1->bd_bh);
589 if (unlikely(magic != 0))
590 set_buffer_escaped(bd1->bd_bh);
591 gfs2_log_lock(sdp);
592 if (n++ > num)
593 break;
594 } else if (!bd1->bd_bh) {
595 total_dbuf--;
596 sdp->sd_log_num_databuf--;
597 list_del_init(&bd1->bd_le.le_list);
598 if (bd1 == bd2) {
599 bd2 = NULL;
600 bd2 = list_prepare_entry(bd2,
601 &sdp->sd_log_le_databuf,
602 bd_le.le_list);
603 }
604 kmem_cache_free(gfs2_bufdata_cachep, bd1);
605 }
606 }
607 gfs2_log_unlock(sdp);
608 if (bh) {
609 set_buffer_dirty(bh);
610 ll_rw_block(WRITE, 1, &bh);
611 bh = NULL;
612 }
613 n = 0;
614 gfs2_log_lock(sdp);
615 list_for_each_entry_continue(bd2, &sdp->sd_log_le_databuf,
616 bd_le.le_list) {
617 if (!bd2->bd_bh)
618 continue;
619 /* copy buffer if it needs escaping */
620 gfs2_log_unlock(sdp);
621 if (unlikely(buffer_escaped(bd2->bd_bh))) {
622 void *kaddr;
623 struct page *page = bd2->bd_bh->b_page;
624 bh = gfs2_log_get_buf(sdp);
625 kaddr = kmap_atomic(page, KM_USER0);
626 memcpy(bh->b_data,
627 kaddr + bh_offset(bd2->bd_bh),
628 sdp->sd_sb.sb_bsize);
629 kunmap_atomic(page, KM_USER0);
630 *(__be32 *)bh->b_data = 0;
631 } else {
632 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
633 }
634 set_buffer_dirty(bh);
635 ll_rw_block(WRITE, 1, &bh);
636 gfs2_log_lock(sdp);
637 if (++n >= num)
638 break;
639 }
640 bh = NULL;
641 total_dbuf -= num;
642 total_jdata -= num;
643 }
644 gfs2_log_unlock(sdp);
645
646 /* Wait on all ordered buffers */
647 while (!list_empty(&started)) {
648 gfs2_log_lock(sdp);
649 bd1 = list_entry(started.next, struct gfs2_bufdata,
650 bd_le.le_list);
651 list_del_init(&bd1->bd_le.le_list);
652 sdp->sd_log_num_databuf--;
653 bh = bd1->bd_bh;
654 if (bh) {
655 bh->b_private = NULL;
656 get_bh(bh);
657 gfs2_log_unlock(sdp);
658 wait_on_buffer(bh);
659 brelse(bh);
660 } else
661 gfs2_log_unlock(sdp);
662
663 kmem_cache_free(gfs2_bufdata_cachep, bd1);
664 }
665
666 /* We've removed all the ordered write bufs here, so only jdata left */
667 gfs2_assert_warn(sdp, sdp->sd_log_num_databuf == sdp->sd_log_num_jdata);
668}
669
670static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
671 struct gfs2_log_descriptor *ld,
672 __be64 *ptr, int pass)
673{
674 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
675 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
676 struct gfs2_glock *gl = ip->i_gl;
677 unsigned int blks = be32_to_cpu(ld->ld_data1);
678 struct buffer_head *bh_log, *bh_ip;
679 u64 blkno;
680 u64 esc;
681 int error = 0;
682
683 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
684 return 0;
685
686 gfs2_replay_incr_blk(sdp, &start);
687 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
688 blkno = be64_to_cpu(*ptr++);
689 esc = be64_to_cpu(*ptr++);
690
691 sdp->sd_found_blocks++;
692
693 if (gfs2_revoke_check(sdp, blkno, start))
694 continue;
695
696 error = gfs2_replay_read_block(jd, start, &bh_log);
697 if (error)
698 return error;
699
700 bh_ip = gfs2_meta_new(gl, blkno);
701 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
702
703 /* Unescape */
704 if (esc) {
705 __be32 *eptr = (__be32 *)bh_ip->b_data;
706 *eptr = cpu_to_be32(GFS2_MAGIC);
707 }
708 mark_buffer_dirty(bh_ip);
709
710 brelse(bh_log);
711 brelse(bh_ip);
712 if (error)
713 break;
714
715 sdp->sd_replayed_blocks++;
716 }
717
718 return error;
719}
720
721/* FIXME: sort out accounting for log blocks etc. */
722
723static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
724{
725 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
726 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
727
728 if (error) {
729 gfs2_meta_sync(ip->i_gl);
730 return;
731 }
732 if (pass != 1)
733 return;
734
735 /* data sync? */
736 gfs2_meta_sync(ip->i_gl);
737
738 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
739 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
740}
741
742static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
743{
744 struct list_head *head = &sdp->sd_log_le_databuf;
745 struct gfs2_bufdata *bd;
746
747 while (!list_empty(head)) {
748 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
749 list_del_init(&bd->bd_le.le_list);
750 sdp->sd_log_num_databuf--;
751 sdp->sd_log_num_jdata--;
752 gfs2_unpin(sdp, bd->bd_bh, ai);
753 }
754 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
755 gfs2_assert_warn(sdp, !sdp->sd_log_num_jdata);
756}
757
758
759const struct gfs2_log_operations gfs2_glock_lops = {
760 .lo_add = glock_lo_add,
761 .lo_after_commit = glock_lo_after_commit,
762 .lo_name = "glock",
763};
764
765const struct gfs2_log_operations gfs2_buf_lops = {
766 .lo_add = buf_lo_add,
767 .lo_incore_commit = buf_lo_incore_commit,
768 .lo_before_commit = buf_lo_before_commit,
769 .lo_after_commit = buf_lo_after_commit,
770 .lo_before_scan = buf_lo_before_scan,
771 .lo_scan_elements = buf_lo_scan_elements,
772 .lo_after_scan = buf_lo_after_scan,
773 .lo_name = "buf",
774};
775
776const struct gfs2_log_operations gfs2_revoke_lops = {
777 .lo_add = revoke_lo_add,
778 .lo_before_commit = revoke_lo_before_commit,
779 .lo_before_scan = revoke_lo_before_scan,
780 .lo_scan_elements = revoke_lo_scan_elements,
781 .lo_after_scan = revoke_lo_after_scan,
782 .lo_name = "revoke",
783};
784
785const struct gfs2_log_operations gfs2_rg_lops = {
786 .lo_add = rg_lo_add,
787 .lo_after_commit = rg_lo_after_commit,
788 .lo_name = "rg",
789};
790
791const struct gfs2_log_operations gfs2_databuf_lops = {
792 .lo_add = databuf_lo_add,
793 .lo_incore_commit = buf_lo_incore_commit,
794 .lo_before_commit = databuf_lo_before_commit,
795 .lo_after_commit = databuf_lo_after_commit,
796 .lo_scan_elements = databuf_lo_scan_elements,
797 .lo_after_scan = databuf_lo_after_scan,
798 .lo_name = "databuf",
799};
800
801const struct gfs2_log_operations *gfs2_log_ops[] = {
802 &gfs2_glock_lops,
803 &gfs2_buf_lops,
804 &gfs2_revoke_lops,
805 &gfs2_rg_lops,
806 &gfs2_databuf_lops,
807 NULL,
808};
809
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
new file mode 100644
index 000000000000..5839c05ae6be
--- /dev/null
+++ b/fs/gfs2/lops.h
@@ -0,0 +1,99 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __LOPS_DOT_H__
11#define __LOPS_DOT_H__
12
13#include <linux/list.h>
14#include "incore.h"
15
16extern const struct gfs2_log_operations gfs2_glock_lops;
17extern const struct gfs2_log_operations gfs2_buf_lops;
18extern const struct gfs2_log_operations gfs2_revoke_lops;
19extern const struct gfs2_log_operations gfs2_rg_lops;
20extern const struct gfs2_log_operations gfs2_databuf_lops;
21
22extern const struct gfs2_log_operations *gfs2_log_ops[];
23
24static inline void lops_init_le(struct gfs2_log_element *le,
25 const struct gfs2_log_operations *lops)
26{
27 INIT_LIST_HEAD(&le->le_list);
28 le->le_ops = lops;
29}
30
31static inline void lops_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
32{
33 if (le->le_ops->lo_add)
34 le->le_ops->lo_add(sdp, le);
35}
36
37static inline void lops_incore_commit(struct gfs2_sbd *sdp,
38 struct gfs2_trans *tr)
39{
40 int x;
41 for (x = 0; gfs2_log_ops[x]; x++)
42 if (gfs2_log_ops[x]->lo_incore_commit)
43 gfs2_log_ops[x]->lo_incore_commit(sdp, tr);
44}
45
46static inline void lops_before_commit(struct gfs2_sbd *sdp)
47{
48 int x;
49 for (x = 0; gfs2_log_ops[x]; x++)
50 if (gfs2_log_ops[x]->lo_before_commit)
51 gfs2_log_ops[x]->lo_before_commit(sdp);
52}
53
54static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
55{
56 int x;
57 for (x = 0; gfs2_log_ops[x]; x++)
58 if (gfs2_log_ops[x]->lo_after_commit)
59 gfs2_log_ops[x]->lo_after_commit(sdp, ai);
60}
61
62static inline void lops_before_scan(struct gfs2_jdesc *jd,
63 struct gfs2_log_header *head,
64 unsigned int pass)
65{
66 int x;
67 for (x = 0; gfs2_log_ops[x]; x++)
68 if (gfs2_log_ops[x]->lo_before_scan)
69 gfs2_log_ops[x]->lo_before_scan(jd, head, pass);
70}
71
72static inline int lops_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
73 struct gfs2_log_descriptor *ld,
74 __be64 *ptr,
75 unsigned int pass)
76{
77 int x, error;
78 for (x = 0; gfs2_log_ops[x]; x++)
79 if (gfs2_log_ops[x]->lo_scan_elements) {
80 error = gfs2_log_ops[x]->lo_scan_elements(jd, start,
81 ld, ptr, pass);
82 if (error)
83 return error;
84 }
85
86 return 0;
87}
88
89static inline void lops_after_scan(struct gfs2_jdesc *jd, int error,
90 unsigned int pass)
91{
92 int x;
93 for (x = 0; gfs2_log_ops[x]; x++)
94 if (gfs2_log_ops[x]->lo_before_scan)
95 gfs2_log_ops[x]->lo_after_scan(jd, error, pass);
96}
97
98#endif /* __LOPS_DOT_H__ */
99
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
new file mode 100644
index 000000000000..21508a13bb78
--- /dev/null
+++ b/fs/gfs2/main.c
@@ -0,0 +1,150 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19#include <asm/atomic.h>
20
21#include "gfs2.h"
22#include "incore.h"
23#include "ops_fstype.h"
24#include "sys.h"
25#include "util.h"
26#include "glock.h"
27
28static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
29{
30 struct gfs2_inode *ip = foo;
31 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
32 SLAB_CTOR_CONSTRUCTOR) {
33 inode_init_once(&ip->i_inode);
34 spin_lock_init(&ip->i_spin);
35 init_rwsem(&ip->i_rw_mutex);
36 memset(ip->i_cache, 0, sizeof(ip->i_cache));
37 }
38}
39
40static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
41{
42 struct gfs2_glock *gl = foo;
43 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
44 SLAB_CTOR_CONSTRUCTOR) {
45 INIT_HLIST_NODE(&gl->gl_list);
46 spin_lock_init(&gl->gl_spin);
47 INIT_LIST_HEAD(&gl->gl_holders);
48 INIT_LIST_HEAD(&gl->gl_waiters1);
49 INIT_LIST_HEAD(&gl->gl_waiters2);
50 INIT_LIST_HEAD(&gl->gl_waiters3);
51 gl->gl_lvb = NULL;
52 atomic_set(&gl->gl_lvb_count, 0);
53 INIT_LIST_HEAD(&gl->gl_reclaim);
54 INIT_LIST_HEAD(&gl->gl_ail_list);
55 atomic_set(&gl->gl_ail_count, 0);
56 }
57}
58
59/**
60 * init_gfs2_fs - Register GFS2 as a filesystem
61 *
62 * Returns: 0 on success, error code on failure
63 */
64
65static int __init init_gfs2_fs(void)
66{
67 int error;
68
69 error = gfs2_sys_init();
70 if (error)
71 return error;
72
73 error = gfs2_glock_init();
74 if (error)
75 goto fail;
76
77 error = -ENOMEM;
78 gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
79 sizeof(struct gfs2_glock),
80 0, 0,
81 gfs2_init_glock_once, NULL);
82 if (!gfs2_glock_cachep)
83 goto fail;
84
85 gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
86 sizeof(struct gfs2_inode),
87 0, (SLAB_RECLAIM_ACCOUNT|
88 SLAB_PANIC|SLAB_MEM_SPREAD),
89 gfs2_init_inode_once, NULL);
90 if (!gfs2_inode_cachep)
91 goto fail;
92
93 gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
94 sizeof(struct gfs2_bufdata),
95 0, 0, NULL, NULL);
96 if (!gfs2_bufdata_cachep)
97 goto fail;
98
99 error = register_filesystem(&gfs2_fs_type);
100 if (error)
101 goto fail;
102
103 error = register_filesystem(&gfs2meta_fs_type);
104 if (error)
105 goto fail_unregister;
106
107 printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);
108
109 return 0;
110
111fail_unregister:
112 unregister_filesystem(&gfs2_fs_type);
113fail:
114 if (gfs2_bufdata_cachep)
115 kmem_cache_destroy(gfs2_bufdata_cachep);
116
117 if (gfs2_inode_cachep)
118 kmem_cache_destroy(gfs2_inode_cachep);
119
120 if (gfs2_glock_cachep)
121 kmem_cache_destroy(gfs2_glock_cachep);
122
123 gfs2_sys_uninit();
124 return error;
125}
126
127/**
128 * exit_gfs2_fs - Unregister the file system
129 *
130 */
131
132static void __exit exit_gfs2_fs(void)
133{
134 unregister_filesystem(&gfs2_fs_type);
135 unregister_filesystem(&gfs2meta_fs_type);
136
137 kmem_cache_destroy(gfs2_bufdata_cachep);
138 kmem_cache_destroy(gfs2_inode_cachep);
139 kmem_cache_destroy(gfs2_glock_cachep);
140
141 gfs2_sys_uninit();
142}
143
144MODULE_DESCRIPTION("Global File System");
145MODULE_AUTHOR("Red Hat, Inc.");
146MODULE_LICENSE("GPL");
147
148module_init(init_gfs2_fs);
149module_exit(exit_gfs2_fs);
150
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
new file mode 100644
index 000000000000..3912d6a4b1e6
--- /dev/null
+++ b/fs/gfs2/meta_io.c
@@ -0,0 +1,590 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/writeback.h>
18#include <linux/swap.h>
19#include <linux/delay.h>
20#include <linux/bio.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/lm_interface.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "glock.h"
27#include "glops.h"
28#include "inode.h"
29#include "log.h"
30#include "lops.h"
31#include "meta_io.h"
32#include "rgrp.h"
33#include "trans.h"
34#include "util.h"
35#include "ops_address.h"
36
37static int aspace_get_block(struct inode *inode, sector_t lblock,
38 struct buffer_head *bh_result, int create)
39{
40 gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
41 return -EOPNOTSUPP;
42}
43
44static int gfs2_aspace_writepage(struct page *page,
45 struct writeback_control *wbc)
46{
47 return block_write_full_page(page, aspace_get_block, wbc);
48}
49
50static const struct address_space_operations aspace_aops = {
51 .writepage = gfs2_aspace_writepage,
52 .releasepage = gfs2_releasepage,
53};
54
55/**
56 * gfs2_aspace_get - Create and initialize a struct inode structure
57 * @sdp: the filesystem the aspace is in
58 *
59 * Right now a struct inode is just a struct inode. Maybe Linux
60 * will supply a more lightweight address space construct (that works)
61 * in the future.
62 *
63 * Make sure pages/buffers in this aspace aren't in high memory.
64 *
65 * Returns: the aspace
66 */
67
68struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
69{
70 struct inode *aspace;
71
72 aspace = new_inode(sdp->sd_vfs);
73 if (aspace) {
74 mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
75 aspace->i_mapping->a_ops = &aspace_aops;
76 aspace->i_size = ~0ULL;
77 aspace->i_private = NULL;
78 insert_inode_hash(aspace);
79 }
80 return aspace;
81}
82
83void gfs2_aspace_put(struct inode *aspace)
84{
85 remove_inode_hash(aspace);
86 iput(aspace);
87}
88
89/**
90 * gfs2_meta_inval - Invalidate all buffers associated with a glock
91 * @gl: the glock
92 *
93 */
94
95void gfs2_meta_inval(struct gfs2_glock *gl)
96{
97 struct gfs2_sbd *sdp = gl->gl_sbd;
98 struct inode *aspace = gl->gl_aspace;
99 struct address_space *mapping = gl->gl_aspace->i_mapping;
100
101 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
102
103 atomic_inc(&aspace->i_writecount);
104 truncate_inode_pages(mapping, 0);
105 atomic_dec(&aspace->i_writecount);
106
107 gfs2_assert_withdraw(sdp, !mapping->nrpages);
108}
109
110/**
111 * gfs2_meta_sync - Sync all buffers associated with a glock
112 * @gl: The glock
113 *
114 */
115
116void gfs2_meta_sync(struct gfs2_glock *gl)
117{
118 struct address_space *mapping = gl->gl_aspace->i_mapping;
119 int error;
120
121 filemap_fdatawrite(mapping);
122 error = filemap_fdatawait(mapping);
123
124 if (error)
125 gfs2_io_error(gl->gl_sbd);
126}
127
128/**
129 * getbuf - Get a buffer with a given address space
130 * @sdp: the filesystem
131 * @aspace: the address space
132 * @blkno: the block number (filesystem scope)
133 * @create: 1 if the buffer should be created
134 *
135 * Returns: the buffer
136 */
137
138static struct buffer_head *getbuf(struct gfs2_sbd *sdp, struct inode *aspace,
139 u64 blkno, int create)
140{
141 struct page *page;
142 struct buffer_head *bh;
143 unsigned int shift;
144 unsigned long index;
145 unsigned int bufnum;
146
147 shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
148 index = blkno >> shift; /* convert block to page */
149 bufnum = blkno - (index << shift); /* block buf index within page */
150
151 if (create) {
152 for (;;) {
153 page = grab_cache_page(aspace->i_mapping, index);
154 if (page)
155 break;
156 yield();
157 }
158 } else {
159 page = find_lock_page(aspace->i_mapping, index);
160 if (!page)
161 return NULL;
162 }
163
164 if (!page_has_buffers(page))
165 create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
166
167 /* Locate header for our buffer within our page */
168 for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
169 /* Do nothing */;
170 get_bh(bh);
171
172 if (!buffer_mapped(bh))
173 map_bh(bh, sdp->sd_vfs, blkno);
174
175 unlock_page(page);
176 mark_page_accessed(page);
177 page_cache_release(page);
178
179 return bh;
180}
181
182static void meta_prep_new(struct buffer_head *bh)
183{
184 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
185
186 lock_buffer(bh);
187 clear_buffer_dirty(bh);
188 set_buffer_uptodate(bh);
189 unlock_buffer(bh);
190
191 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
192}
193
194/**
195 * gfs2_meta_new - Get a block
196 * @gl: The glock associated with this block
197 * @blkno: The block number
198 *
199 * Returns: The buffer
200 */
201
202struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
203{
204 struct buffer_head *bh;
205 bh = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
206 meta_prep_new(bh);
207 return bh;
208}
209
210/**
211 * gfs2_meta_read - Read a block from disk
212 * @gl: The glock covering the block
213 * @blkno: The block number
214 * @flags: flags
215 * @bhp: the place where the buffer is returned (NULL on failure)
216 *
217 * Returns: errno
218 */
219
220int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
221 struct buffer_head **bhp)
222{
223 *bhp = getbuf(gl->gl_sbd, gl->gl_aspace, blkno, CREATE);
224 if (!buffer_uptodate(*bhp))
225 ll_rw_block(READ_META, 1, bhp);
226 if (flags & DIO_WAIT) {
227 int error = gfs2_meta_wait(gl->gl_sbd, *bhp);
228 if (error) {
229 brelse(*bhp);
230 return error;
231 }
232 }
233
234 return 0;
235}
236
237/**
238 * gfs2_meta_wait - Reread a block from disk
239 * @sdp: the filesystem
240 * @bh: The block to wait for
241 *
242 * Returns: errno
243 */
244
245int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
246{
247 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
248 return -EIO;
249
250 wait_on_buffer(bh);
251
252 if (!buffer_uptodate(bh)) {
253 struct gfs2_trans *tr = current->journal_info;
254 if (tr && tr->tr_touched)
255 gfs2_io_error_bh(sdp, bh);
256 return -EIO;
257 }
258 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
259 return -EIO;
260
261 return 0;
262}
263
264/**
265 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
266 * @gl: the glock the buffer belongs to
267 * @bh: The buffer to be attached to
268 * @meta: Flag to indicate whether its metadata or not
269 */
270
271void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
272 int meta)
273{
274 struct gfs2_bufdata *bd;
275
276 if (meta)
277 lock_page(bh->b_page);
278
279 if (bh->b_private) {
280 if (meta)
281 unlock_page(bh->b_page);
282 return;
283 }
284
285 bd = kmem_cache_alloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL),
286 memset(bd, 0, sizeof(struct gfs2_bufdata));
287 bd->bd_bh = bh;
288 bd->bd_gl = gl;
289
290 INIT_LIST_HEAD(&bd->bd_list_tr);
291 if (meta)
292 lops_init_le(&bd->bd_le, &gfs2_buf_lops);
293 else
294 lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
295 bh->b_private = bd;
296
297 if (meta)
298 unlock_page(bh->b_page);
299}
300
301/**
302 * gfs2_pin - Pin a buffer in memory
303 * @sdp: the filesystem the buffer belongs to
304 * @bh: The buffer to be pinned
305 *
306 */
307
308void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
309{
310 struct gfs2_bufdata *bd = bh->b_private;
311
312 gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
313
314 if (test_set_buffer_pinned(bh))
315 gfs2_assert_withdraw(sdp, 0);
316
317 wait_on_buffer(bh);
318
319 /* If this buffer is in the AIL and it has already been written
320 to in-place disk block, remove it from the AIL. */
321
322 gfs2_log_lock(sdp);
323 if (bd->bd_ail && !buffer_in_io(bh))
324 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
325 gfs2_log_unlock(sdp);
326
327 clear_buffer_dirty(bh);
328 wait_on_buffer(bh);
329
330 if (!buffer_uptodate(bh))
331 gfs2_io_error_bh(sdp, bh);
332
333 get_bh(bh);
334}
335
336/**
337 * gfs2_unpin - Unpin a buffer
338 * @sdp: the filesystem the buffer belongs to
339 * @bh: The buffer to unpin
340 * @ai:
341 *
342 */
343
344void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
345 struct gfs2_ail *ai)
346{
347 struct gfs2_bufdata *bd = bh->b_private;
348
349 gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
350
351 if (!buffer_pinned(bh))
352 gfs2_assert_withdraw(sdp, 0);
353
354 mark_buffer_dirty(bh);
355 clear_buffer_pinned(bh);
356
357 gfs2_log_lock(sdp);
358 if (bd->bd_ail) {
359 list_del(&bd->bd_ail_st_list);
360 brelse(bh);
361 } else {
362 struct gfs2_glock *gl = bd->bd_gl;
363 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
364 atomic_inc(&gl->gl_ail_count);
365 }
366 bd->bd_ail = ai;
367 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
368 gfs2_log_unlock(sdp);
369}
370
371/**
372 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
373 * @ip: the inode who owns the buffers
374 * @bstart: the first buffer in the run
375 * @blen: the number of buffers in the run
376 *
377 */
378
379void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
380{
381 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
382 struct inode *aspace = ip->i_gl->gl_aspace;
383 struct buffer_head *bh;
384
385 while (blen) {
386 bh = getbuf(sdp, aspace, bstart, NO_CREATE);
387 if (bh) {
388 struct gfs2_bufdata *bd = bh->b_private;
389
390 if (test_clear_buffer_pinned(bh)) {
391 struct gfs2_trans *tr = current->journal_info;
392 gfs2_log_lock(sdp);
393 list_del_init(&bd->bd_le.le_list);
394 gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
395 sdp->sd_log_num_buf--;
396 gfs2_log_unlock(sdp);
397 tr->tr_num_buf_rm++;
398 brelse(bh);
399 }
400 if (bd) {
401 gfs2_log_lock(sdp);
402 if (bd->bd_ail) {
403 u64 blkno = bh->b_blocknr;
404 bd->bd_ail = NULL;
405 list_del(&bd->bd_ail_st_list);
406 list_del(&bd->bd_ail_gl_list);
407 atomic_dec(&bd->bd_gl->gl_ail_count);
408 brelse(bh);
409 gfs2_log_unlock(sdp);
410 gfs2_trans_add_revoke(sdp, blkno);
411 } else
412 gfs2_log_unlock(sdp);
413 }
414
415 lock_buffer(bh);
416 clear_buffer_dirty(bh);
417 clear_buffer_uptodate(bh);
418 unlock_buffer(bh);
419
420 brelse(bh);
421 }
422
423 bstart++;
424 blen--;
425 }
426}
427
428/**
429 * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
430 * @ip: The GFS2 inode
431 *
432 * This releases buffers that are in the most-recently-used array of
433 * blocks used for indirect block addressing for this inode.
434 */
435
436void gfs2_meta_cache_flush(struct gfs2_inode *ip)
437{
438 struct buffer_head **bh_slot;
439 unsigned int x;
440
441 spin_lock(&ip->i_spin);
442
443 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++) {
444 bh_slot = &ip->i_cache[x];
445 if (!*bh_slot)
446 break;
447 brelse(*bh_slot);
448 *bh_slot = NULL;
449 }
450
451 spin_unlock(&ip->i_spin);
452}
453
454/**
455 * gfs2_meta_indirect_buffer - Get a metadata buffer
456 * @ip: The GFS2 inode
457 * @height: The level of this buf in the metadata (indir addr) tree (if any)
458 * @num: The block number (device relative) of the buffer
459 * @new: Non-zero if we may create a new buffer
460 * @bhp: the buffer is returned here
461 *
462 * Try to use the gfs2_inode's MRU metadata tree cache.
463 *
464 * Returns: errno
465 */
466
467int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
468 int new, struct buffer_head **bhp)
469{
470 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
471 struct gfs2_glock *gl = ip->i_gl;
472 struct buffer_head *bh = NULL, **bh_slot = ip->i_cache + height;
473 int in_cache = 0;
474
475 spin_lock(&ip->i_spin);
476 if (*bh_slot && (*bh_slot)->b_blocknr == num) {
477 bh = *bh_slot;
478 get_bh(bh);
479 in_cache = 1;
480 }
481 spin_unlock(&ip->i_spin);
482
483 if (!bh)
484 bh = getbuf(gl->gl_sbd, gl->gl_aspace, num, CREATE);
485
486 if (!bh)
487 return -ENOBUFS;
488
489 if (new) {
490 if (gfs2_assert_warn(sdp, height))
491 goto err;
492 meta_prep_new(bh);
493 gfs2_trans_add_bh(ip->i_gl, bh, 1);
494 gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
495 gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
496 } else {
497 u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
498 if (!buffer_uptodate(bh)) {
499 ll_rw_block(READ_META, 1, &bh);
500 if (gfs2_meta_wait(sdp, bh))
501 goto err;
502 }
503 if (gfs2_metatype_check(sdp, bh, mtype))
504 goto err;
505 }
506
507 if (!in_cache) {
508 spin_lock(&ip->i_spin);
509 if (*bh_slot)
510 brelse(*bh_slot);
511 *bh_slot = bh;
512 get_bh(bh);
513 spin_unlock(&ip->i_spin);
514 }
515
516 *bhp = bh;
517 return 0;
518err:
519 brelse(bh);
520 return -EIO;
521}
522
523/**
524 * gfs2_meta_ra - start readahead on an extent of a file
525 * @gl: the glock the blocks belong to
526 * @dblock: the starting disk block
527 * @extlen: the number of blocks in the extent
528 *
529 * returns: the first buffer in the extent
530 */
531
532struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
533{
534 struct gfs2_sbd *sdp = gl->gl_sbd;
535 struct inode *aspace = gl->gl_aspace;
536 struct buffer_head *first_bh, *bh;
537 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
538 sdp->sd_sb.sb_bsize_shift;
539
540 BUG_ON(!extlen);
541
542 if (max_ra < 1)
543 max_ra = 1;
544 if (extlen > max_ra)
545 extlen = max_ra;
546
547 first_bh = getbuf(sdp, aspace, dblock, CREATE);
548
549 if (buffer_uptodate(first_bh))
550 goto out;
551 if (!buffer_locked(first_bh))
552 ll_rw_block(READ_META, 1, &first_bh);
553
554 dblock++;
555 extlen--;
556
557 while (extlen) {
558 bh = getbuf(sdp, aspace, dblock, CREATE);
559
560 if (!buffer_uptodate(bh) && !buffer_locked(bh))
561 ll_rw_block(READA, 1, &bh);
562 brelse(bh);
563 dblock++;
564 extlen--;
565 if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
566 goto out;
567 }
568
569 wait_on_buffer(first_bh);
570out:
571 return first_bh;
572}
573
574/**
575 * gfs2_meta_syncfs - sync all the buffers in a filesystem
576 * @sdp: the filesystem
577 *
578 */
579
580void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
581{
582 gfs2_log_flush(sdp, NULL);
583 for (;;) {
584 gfs2_ail1_start(sdp, DIO_ALL);
585 if (gfs2_ail1_empty(sdp, DIO_ALL))
586 break;
587 msleep(10);
588 }
589}
590
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
new file mode 100644
index 000000000000..3ec939e20dff
--- /dev/null
+++ b/fs/gfs2/meta_io.h
@@ -0,0 +1,78 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __DIO_DOT_H__
11#define __DIO_DOT_H__
12
13#include <linux/buffer_head.h>
14#include <linux/string.h>
15#include "incore.h"
16
17static inline void gfs2_buffer_clear(struct buffer_head *bh)
18{
19 memset(bh->b_data, 0, bh->b_size);
20}
21
22static inline void gfs2_buffer_clear_tail(struct buffer_head *bh, int head)
23{
24 BUG_ON(head > bh->b_size);
25 memset(bh->b_data + head, 0, bh->b_size - head);
26}
27
28static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
29 int to_head,
30 struct buffer_head *from_bh,
31 int from_head)
32{
33 BUG_ON(from_head < to_head);
34 memcpy(to_bh->b_data + to_head, from_bh->b_data + from_head,
35 from_bh->b_size - from_head);
36 memset(to_bh->b_data + to_bh->b_size + to_head - from_head,
37 0, from_head - to_head);
38}
39
40struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
41void gfs2_aspace_put(struct inode *aspace);
42
43void gfs2_meta_inval(struct gfs2_glock *gl);
44void gfs2_meta_sync(struct gfs2_glock *gl);
45
46struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
47int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
48 int flags, struct buffer_head **bhp);
49int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
50
51void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
52 int meta);
53void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
54void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
55 struct gfs2_ail *ai);
56
57void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
58
59void gfs2_meta_cache_flush(struct gfs2_inode *ip);
60int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
61 int new, struct buffer_head **bhp);
62
63static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
64 struct buffer_head **bhp)
65{
66 return gfs2_meta_indirect_buffer(ip, 0, ip->i_num.no_addr, 0, bhp);
67}
68
69struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
70void gfs2_meta_syncfs(struct gfs2_sbd *sdp);
71
72#define buffer_busy(bh) \
73((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock) | (1ul << BH_Pinned)))
74#define buffer_in_io(bh) \
75((bh)->b_state & ((1ul << BH_Dirty) | (1ul << BH_Lock)))
76
77#endif /* __DIO_DOT_H__ */
78
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c
new file mode 100644
index 000000000000..ef3092e29607
--- /dev/null
+++ b/fs/gfs2/mount.c
@@ -0,0 +1,214 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/lm_interface.h>
17
18#include "gfs2.h"
19#include "incore.h"
20#include "mount.h"
21#include "sys.h"
22#include "util.h"
23
24/**
25 * gfs2_mount_args - Parse mount options
26 * @sdp:
27 * @data:
28 *
29 * Return: errno
30 */
31
32int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
33{
34 struct gfs2_args *args = &sdp->sd_args;
35 char *data = data_arg;
36 char *options, *o, *v;
37 int error = 0;
38
39 if (!remount) {
40 /* If someone preloaded options, use those instead */
41 spin_lock(&gfs2_sys_margs_lock);
42 if (gfs2_sys_margs) {
43 data = gfs2_sys_margs;
44 gfs2_sys_margs = NULL;
45 }
46 spin_unlock(&gfs2_sys_margs_lock);
47
48 /* Set some defaults */
49 args->ar_num_glockd = GFS2_GLOCKD_DEFAULT;
50 args->ar_quota = GFS2_QUOTA_DEFAULT;
51 args->ar_data = GFS2_DATA_DEFAULT;
52 }
53
54 /* Split the options into tokens with the "," character and
55 process them */
56
57 for (options = data; (o = strsep(&options, ",")); ) {
58 if (!*o)
59 continue;
60
61 v = strchr(o, '=');
62 if (v)
63 *v++ = 0;
64
65 if (!strcmp(o, "lockproto")) {
66 if (!v)
67 goto need_value;
68 if (remount && strcmp(v, args->ar_lockproto))
69 goto cant_remount;
70 strncpy(args->ar_lockproto, v, GFS2_LOCKNAME_LEN);
71 args->ar_lockproto[GFS2_LOCKNAME_LEN - 1] = 0;
72 }
73
74 else if (!strcmp(o, "locktable")) {
75 if (!v)
76 goto need_value;
77 if (remount && strcmp(v, args->ar_locktable))
78 goto cant_remount;
79 strncpy(args->ar_locktable, v, GFS2_LOCKNAME_LEN);
80 args->ar_locktable[GFS2_LOCKNAME_LEN - 1] = 0;
81 }
82
83 else if (!strcmp(o, "hostdata")) {
84 if (!v)
85 goto need_value;
86 if (remount && strcmp(v, args->ar_hostdata))
87 goto cant_remount;
88 strncpy(args->ar_hostdata, v, GFS2_LOCKNAME_LEN);
89 args->ar_hostdata[GFS2_LOCKNAME_LEN - 1] = 0;
90 }
91
92 else if (!strcmp(o, "spectator")) {
93 if (remount && !args->ar_spectator)
94 goto cant_remount;
95 args->ar_spectator = 1;
96 sdp->sd_vfs->s_flags |= MS_RDONLY;
97 }
98
99 else if (!strcmp(o, "ignore_local_fs")) {
100 if (remount && !args->ar_ignore_local_fs)
101 goto cant_remount;
102 args->ar_ignore_local_fs = 1;
103 }
104
105 else if (!strcmp(o, "localflocks")) {
106 if (remount && !args->ar_localflocks)
107 goto cant_remount;
108 args->ar_localflocks = 1;
109 }
110
111 else if (!strcmp(o, "localcaching")) {
112 if (remount && !args->ar_localcaching)
113 goto cant_remount;
114 args->ar_localcaching = 1;
115 }
116
117 else if (!strcmp(o, "debug"))
118 args->ar_debug = 1;
119
120 else if (!strcmp(o, "nodebug"))
121 args->ar_debug = 0;
122
123 else if (!strcmp(o, "upgrade")) {
124 if (remount && !args->ar_upgrade)
125 goto cant_remount;
126 args->ar_upgrade = 1;
127 }
128
129 else if (!strcmp(o, "num_glockd")) {
130 unsigned int x;
131 if (!v)
132 goto need_value;
133 sscanf(v, "%u", &x);
134 if (remount && x != args->ar_num_glockd)
135 goto cant_remount;
136 if (!x || x > GFS2_GLOCKD_MAX) {
137 fs_info(sdp, "0 < num_glockd <= %u (not %u)\n",
138 GFS2_GLOCKD_MAX, x);
139 error = -EINVAL;
140 break;
141 }
142 args->ar_num_glockd = x;
143 }
144
145 else if (!strcmp(o, "acl")) {
146 args->ar_posix_acl = 1;
147 sdp->sd_vfs->s_flags |= MS_POSIXACL;
148 }
149
150 else if (!strcmp(o, "noacl")) {
151 args->ar_posix_acl = 0;
152 sdp->sd_vfs->s_flags &= ~MS_POSIXACL;
153 }
154
155 else if (!strcmp(o, "quota")) {
156 if (!v)
157 goto need_value;
158 if (!strcmp(v, "off"))
159 args->ar_quota = GFS2_QUOTA_OFF;
160 else if (!strcmp(v, "account"))
161 args->ar_quota = GFS2_QUOTA_ACCOUNT;
162 else if (!strcmp(v, "on"))
163 args->ar_quota = GFS2_QUOTA_ON;
164 else {
165 fs_info(sdp, "invalid value for quota\n");
166 error = -EINVAL;
167 break;
168 }
169 }
170
171 else if (!strcmp(o, "suiddir"))
172 args->ar_suiddir = 1;
173
174 else if (!strcmp(o, "nosuiddir"))
175 args->ar_suiddir = 0;
176
177 else if (!strcmp(o, "data")) {
178 if (!v)
179 goto need_value;
180 if (!strcmp(v, "writeback"))
181 args->ar_data = GFS2_DATA_WRITEBACK;
182 else if (!strcmp(v, "ordered"))
183 args->ar_data = GFS2_DATA_ORDERED;
184 else {
185 fs_info(sdp, "invalid value for data\n");
186 error = -EINVAL;
187 break;
188 }
189 }
190
191 else {
192 fs_info(sdp, "unknown option: %s\n", o);
193 error = -EINVAL;
194 break;
195 }
196 }
197
198 if (error)
199 fs_info(sdp, "invalid mount option(s)\n");
200
201 if (data != data_arg)
202 kfree(data);
203
204 return error;
205
206need_value:
207 fs_info(sdp, "need value for option %s\n", o);
208 return -EINVAL;
209
210cant_remount:
211 fs_info(sdp, "can't remount with option %s\n", o);
212 return -EINVAL;
213}
214
diff --git a/fs/gfs2/mount.h b/fs/gfs2/mount.h
new file mode 100644
index 000000000000..401288acfdf3
--- /dev/null
+++ b/fs/gfs2/mount.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __MOUNT_DOT_H__
11#define __MOUNT_DOT_H__
12
13struct gfs2_sbd;
14
15int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount);
16
17#endif /* __MOUNT_DOT_H__ */
diff --git a/fs/gfs2/ondisk.c b/fs/gfs2/ondisk.c
new file mode 100644
index 000000000000..1025960b0e6e
--- /dev/null
+++ b/fs/gfs2/ondisk.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15
16#include "gfs2.h"
17#include <linux/gfs2_ondisk.h>
18
19#define pv(struct, member, fmt) printk(KERN_INFO " "#member" = "fmt"\n", \
20 struct->member);
21
22/*
23 * gfs2_xxx_in - read in an xxx struct
24 * first arg: the cpu-order structure
25 * buf: the disk-order buffer
26 *
27 * gfs2_xxx_out - write out an xxx struct
28 * first arg: the cpu-order structure
29 * buf: the disk-order buffer
30 *
31 * gfs2_xxx_print - print out an xxx struct
32 * first arg: the cpu-order structure
33 */
34
35void gfs2_inum_in(struct gfs2_inum *no, const void *buf)
36{
37 const struct gfs2_inum *str = buf;
38
39 no->no_formal_ino = be64_to_cpu(str->no_formal_ino);
40 no->no_addr = be64_to_cpu(str->no_addr);
41}
42
43void gfs2_inum_out(const struct gfs2_inum *no, void *buf)
44{
45 struct gfs2_inum *str = buf;
46
47 str->no_formal_ino = cpu_to_be64(no->no_formal_ino);
48 str->no_addr = cpu_to_be64(no->no_addr);
49}
50
51static void gfs2_inum_print(const struct gfs2_inum *no)
52{
53 printk(KERN_INFO " no_formal_ino = %llu\n", (unsigned long long)no->no_formal_ino);
54 printk(KERN_INFO " no_addr = %llu\n", (unsigned long long)no->no_addr);
55}
56
57static void gfs2_meta_header_in(struct gfs2_meta_header *mh, const void *buf)
58{
59 const struct gfs2_meta_header *str = buf;
60
61 mh->mh_magic = be32_to_cpu(str->mh_magic);
62 mh->mh_type = be32_to_cpu(str->mh_type);
63 mh->mh_format = be32_to_cpu(str->mh_format);
64}
65
66static void gfs2_meta_header_out(const struct gfs2_meta_header *mh, void *buf)
67{
68 struct gfs2_meta_header *str = buf;
69
70 str->mh_magic = cpu_to_be32(mh->mh_magic);
71 str->mh_type = cpu_to_be32(mh->mh_type);
72 str->mh_format = cpu_to_be32(mh->mh_format);
73}
74
75static void gfs2_meta_header_print(const struct gfs2_meta_header *mh)
76{
77 pv(mh, mh_magic, "0x%.8X");
78 pv(mh, mh_type, "%u");
79 pv(mh, mh_format, "%u");
80}
81
82void gfs2_sb_in(struct gfs2_sb *sb, const void *buf)
83{
84 const struct gfs2_sb *str = buf;
85
86 gfs2_meta_header_in(&sb->sb_header, buf);
87
88 sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
89 sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
90 sb->sb_bsize = be32_to_cpu(str->sb_bsize);
91 sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
92
93 gfs2_inum_in(&sb->sb_master_dir, (char *)&str->sb_master_dir);
94 gfs2_inum_in(&sb->sb_root_dir, (char *)&str->sb_root_dir);
95
96 memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
97 memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
98}
99
100void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf)
101{
102 const struct gfs2_rindex *str = buf;
103
104 ri->ri_addr = be64_to_cpu(str->ri_addr);
105 ri->ri_length = be32_to_cpu(str->ri_length);
106 ri->ri_data0 = be64_to_cpu(str->ri_data0);
107 ri->ri_data = be32_to_cpu(str->ri_data);
108 ri->ri_bitbytes = be32_to_cpu(str->ri_bitbytes);
109
110}
111
112void gfs2_rindex_print(const struct gfs2_rindex *ri)
113{
114 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)ri->ri_addr);
115 pv(ri, ri_length, "%u");
116
117 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)ri->ri_data0);
118 pv(ri, ri_data, "%u");
119
120 pv(ri, ri_bitbytes, "%u");
121}
122
123void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf)
124{
125 const struct gfs2_rgrp *str = buf;
126
127 gfs2_meta_header_in(&rg->rg_header, buf);
128 rg->rg_flags = be32_to_cpu(str->rg_flags);
129 rg->rg_free = be32_to_cpu(str->rg_free);
130 rg->rg_dinodes = be32_to_cpu(str->rg_dinodes);
131 rg->rg_igeneration = be64_to_cpu(str->rg_igeneration);
132}
133
134void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf)
135{
136 struct gfs2_rgrp *str = buf;
137
138 gfs2_meta_header_out(&rg->rg_header, buf);
139 str->rg_flags = cpu_to_be32(rg->rg_flags);
140 str->rg_free = cpu_to_be32(rg->rg_free);
141 str->rg_dinodes = cpu_to_be32(rg->rg_dinodes);
142 str->__pad = cpu_to_be32(0);
143 str->rg_igeneration = cpu_to_be64(rg->rg_igeneration);
144 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
145}
146
147void gfs2_quota_in(struct gfs2_quota *qu, const void *buf)
148{
149 const struct gfs2_quota *str = buf;
150
151 qu->qu_limit = be64_to_cpu(str->qu_limit);
152 qu->qu_warn = be64_to_cpu(str->qu_warn);
153 qu->qu_value = be64_to_cpu(str->qu_value);
154}
155
156void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf)
157{
158 const struct gfs2_dinode *str = buf;
159
160 gfs2_meta_header_in(&di->di_header, buf);
161 gfs2_inum_in(&di->di_num, &str->di_num);
162
163 di->di_mode = be32_to_cpu(str->di_mode);
164 di->di_uid = be32_to_cpu(str->di_uid);
165 di->di_gid = be32_to_cpu(str->di_gid);
166 di->di_nlink = be32_to_cpu(str->di_nlink);
167 di->di_size = be64_to_cpu(str->di_size);
168 di->di_blocks = be64_to_cpu(str->di_blocks);
169 di->di_atime = be64_to_cpu(str->di_atime);
170 di->di_mtime = be64_to_cpu(str->di_mtime);
171 di->di_ctime = be64_to_cpu(str->di_ctime);
172 di->di_major = be32_to_cpu(str->di_major);
173 di->di_minor = be32_to_cpu(str->di_minor);
174
175 di->di_goal_meta = be64_to_cpu(str->di_goal_meta);
176 di->di_goal_data = be64_to_cpu(str->di_goal_data);
177 di->di_generation = be64_to_cpu(str->di_generation);
178
179 di->di_flags = be32_to_cpu(str->di_flags);
180 di->di_payload_format = be32_to_cpu(str->di_payload_format);
181 di->di_height = be16_to_cpu(str->di_height);
182
183 di->di_depth = be16_to_cpu(str->di_depth);
184 di->di_entries = be32_to_cpu(str->di_entries);
185
186 di->di_eattr = be64_to_cpu(str->di_eattr);
187
188}
189
190void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf)
191{
192 struct gfs2_dinode *str = buf;
193
194 gfs2_meta_header_out(&di->di_header, buf);
195 gfs2_inum_out(&di->di_num, (char *)&str->di_num);
196
197 str->di_mode = cpu_to_be32(di->di_mode);
198 str->di_uid = cpu_to_be32(di->di_uid);
199 str->di_gid = cpu_to_be32(di->di_gid);
200 str->di_nlink = cpu_to_be32(di->di_nlink);
201 str->di_size = cpu_to_be64(di->di_size);
202 str->di_blocks = cpu_to_be64(di->di_blocks);
203 str->di_atime = cpu_to_be64(di->di_atime);
204 str->di_mtime = cpu_to_be64(di->di_mtime);
205 str->di_ctime = cpu_to_be64(di->di_ctime);
206 str->di_major = cpu_to_be32(di->di_major);
207 str->di_minor = cpu_to_be32(di->di_minor);
208
209 str->di_goal_meta = cpu_to_be64(di->di_goal_meta);
210 str->di_goal_data = cpu_to_be64(di->di_goal_data);
211 str->di_generation = cpu_to_be64(di->di_generation);
212
213 str->di_flags = cpu_to_be32(di->di_flags);
214 str->di_payload_format = cpu_to_be32(di->di_payload_format);
215 str->di_height = cpu_to_be16(di->di_height);
216
217 str->di_depth = cpu_to_be16(di->di_depth);
218 str->di_entries = cpu_to_be32(di->di_entries);
219
220 str->di_eattr = cpu_to_be64(di->di_eattr);
221
222}
223
224void gfs2_dinode_print(const struct gfs2_dinode *di)
225{
226 gfs2_meta_header_print(&di->di_header);
227 gfs2_inum_print(&di->di_num);
228
229 pv(di, di_mode, "0%o");
230 pv(di, di_uid, "%u");
231 pv(di, di_gid, "%u");
232 pv(di, di_nlink, "%u");
233 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size);
234 printk(KERN_INFO " di_blocks = %llu\n", (unsigned long long)di->di_blocks);
235 printk(KERN_INFO " di_atime = %lld\n", (long long)di->di_atime);
236 printk(KERN_INFO " di_mtime = %lld\n", (long long)di->di_mtime);
237 printk(KERN_INFO " di_ctime = %lld\n", (long long)di->di_ctime);
238 pv(di, di_major, "%u");
239 pv(di, di_minor, "%u");
240
241 printk(KERN_INFO " di_goal_meta = %llu\n", (unsigned long long)di->di_goal_meta);
242 printk(KERN_INFO " di_goal_data = %llu\n", (unsigned long long)di->di_goal_data);
243
244 pv(di, di_flags, "0x%.8X");
245 pv(di, di_payload_format, "%u");
246 pv(di, di_height, "%u");
247
248 pv(di, di_depth, "%u");
249 pv(di, di_entries, "%u");
250
251 printk(KERN_INFO " di_eattr = %llu\n", (unsigned long long)di->di_eattr);
252}
253
254void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf)
255{
256 const struct gfs2_log_header *str = buf;
257
258 gfs2_meta_header_in(&lh->lh_header, buf);
259 lh->lh_sequence = be64_to_cpu(str->lh_sequence);
260 lh->lh_flags = be32_to_cpu(str->lh_flags);
261 lh->lh_tail = be32_to_cpu(str->lh_tail);
262 lh->lh_blkno = be32_to_cpu(str->lh_blkno);
263 lh->lh_hash = be32_to_cpu(str->lh_hash);
264}
265
266void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf)
267{
268 const struct gfs2_inum_range *str = buf;
269
270 ir->ir_start = be64_to_cpu(str->ir_start);
271 ir->ir_length = be64_to_cpu(str->ir_length);
272}
273
274void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf)
275{
276 struct gfs2_inum_range *str = buf;
277
278 str->ir_start = cpu_to_be64(ir->ir_start);
279 str->ir_length = cpu_to_be64(ir->ir_length);
280}
281
282void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf)
283{
284 const struct gfs2_statfs_change *str = buf;
285
286 sc->sc_total = be64_to_cpu(str->sc_total);
287 sc->sc_free = be64_to_cpu(str->sc_free);
288 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
289}
290
291void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf)
292{
293 struct gfs2_statfs_change *str = buf;
294
295 str->sc_total = cpu_to_be64(sc->sc_total);
296 str->sc_free = cpu_to_be64(sc->sc_free);
297 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
298}
299
300void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf)
301{
302 const struct gfs2_quota_change *str = buf;
303
304 qc->qc_change = be64_to_cpu(str->qc_change);
305 qc->qc_flags = be32_to_cpu(str->qc_flags);
306 qc->qc_id = be32_to_cpu(str->qc_id);
307}
308
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
new file mode 100644
index 000000000000..4fb743f4e4a4
--- /dev/null
+++ b/fs/gfs2/ops_address.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
17#include <linux/mpage.h>
18#include <linux/fs.h>
19#include <linux/gfs2_ondisk.h>
20#include <linux/lm_interface.h>
21
22#include "gfs2.h"
23#include "incore.h"
24#include "bmap.h"
25#include "glock.h"
26#include "inode.h"
27#include "log.h"
28#include "meta_io.h"
29#include "ops_address.h"
30#include "quota.h"
31#include "trans.h"
32#include "rgrp.h"
33#include "ops_file.h"
34#include "util.h"
35#include "glops.h"
36
37
38static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
39 unsigned int from, unsigned int to)
40{
41 struct buffer_head *head = page_buffers(page);
42 unsigned int bsize = head->b_size;
43 struct buffer_head *bh;
44 unsigned int start, end;
45
46 for (bh = head, start = 0; bh != head || !start;
47 bh = bh->b_this_page, start = end) {
48 end = start + bsize;
49 if (end <= from || start >= to)
50 continue;
51 gfs2_trans_add_bh(ip->i_gl, bh, 0);
52 }
53}
54
55/**
56 * gfs2_get_block - Fills in a buffer head with details about a block
57 * @inode: The inode
58 * @lblock: The block number to look up
59 * @bh_result: The buffer head to return the result in
60 * @create: Non-zero if we may add block to the file
61 *
62 * Returns: errno
63 */
64
65int gfs2_get_block(struct inode *inode, sector_t lblock,
66 struct buffer_head *bh_result, int create)
67{
68 return gfs2_block_map(inode, lblock, create, bh_result, 32);
69}
70
71/**
72 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
73 * @inode: The inode
74 * @lblock: The block number to look up
75 * @bh_result: The buffer head to return the result in
76 * @create: Non-zero if we may add block to the file
77 *
78 * Returns: errno
79 */
80
81static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
82 struct buffer_head *bh_result, int create)
83{
84 int error;
85
86 error = gfs2_block_map(inode, lblock, 0, bh_result, 1);
87 if (error)
88 return error;
89 if (bh_result->b_blocknr == 0)
90 return -EIO;
91 return 0;
92}
93
94static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
95 struct buffer_head *bh_result, int create)
96{
97 return gfs2_block_map(inode, lblock, 0, bh_result, 32);
98}
99
100/**
101 * gfs2_writepage - Write complete page
102 * @page: Page to write
103 *
104 * Returns: errno
105 *
106 * Some of this is copied from block_write_full_page() although we still
107 * call it to do most of the work.
108 */
109
110static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
111{
112 struct inode *inode = page->mapping->host;
113 struct gfs2_inode *ip = GFS2_I(inode);
114 struct gfs2_sbd *sdp = GFS2_SB(inode);
115 loff_t i_size = i_size_read(inode);
116 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
117 unsigned offset;
118 int error;
119 int done_trans = 0;
120
121 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) {
122 unlock_page(page);
123 return -EIO;
124 }
125 if (current->journal_info)
126 goto out_ignore;
127
128 /* Is the page fully outside i_size? (truncate in progress) */
129 offset = i_size & (PAGE_CACHE_SIZE-1);
130 if (page->index > end_index || (page->index == end_index && !offset)) {
131 page->mapping->a_ops->invalidatepage(page, 0);
132 unlock_page(page);
133 return 0; /* don't care */
134 }
135
136 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED || gfs2_is_jdata(ip)) {
137 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
138 if (error)
139 goto out_ignore;
140 if (!page_has_buffers(page)) {
141 create_empty_buffers(page, inode->i_sb->s_blocksize,
142 (1 << BH_Dirty)|(1 << BH_Uptodate));
143 }
144 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
145 done_trans = 1;
146 }
147 error = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
148 if (done_trans)
149 gfs2_trans_end(sdp);
150 gfs2_meta_cache_flush(ip);
151 return error;
152
153out_ignore:
154 redirty_page_for_writepage(wbc, page);
155 unlock_page(page);
156 return 0;
157}
158
159static int zero_readpage(struct page *page)
160{
161 void *kaddr;
162
163 kaddr = kmap_atomic(page, KM_USER0);
164 memset(kaddr, 0, PAGE_CACHE_SIZE);
165 kunmap_atomic(page, KM_USER0);
166
167 SetPageUptodate(page);
168
169 return 0;
170}
171
172/**
173 * stuffed_readpage - Fill in a Linux page with stuffed file data
174 * @ip: the inode
175 * @page: the page
176 *
177 * Returns: errno
178 */
179
180static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
181{
182 struct buffer_head *dibh;
183 void *kaddr;
184 int error;
185
186 /* Only the first page of a stuffed file might contain data */
187 if (unlikely(page->index))
188 return zero_readpage(page);
189
190 error = gfs2_meta_inode_buffer(ip, &dibh);
191 if (error)
192 return error;
193
194 kaddr = kmap_atomic(page, KM_USER0);
195 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
196 ip->i_di.di_size);
197 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
198 kunmap_atomic(page, KM_USER0);
199
200 brelse(dibh);
201
202 SetPageUptodate(page);
203
204 return 0;
205}
206
207
208/**
209 * gfs2_readpage - readpage with locking
210 * @file: The file to read a page for. N.B. This may be NULL if we are
211 * reading an internal file.
212 * @page: The page to read
213 *
214 * Returns: errno
215 */
216
217static int gfs2_readpage(struct file *file, struct page *page)
218{
219 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
220 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
221 struct gfs2_file *gf = NULL;
222 struct gfs2_holder gh;
223 int error;
224 int do_unlock = 0;
225
226 if (likely(file != &gfs2_internal_file_sentinel)) {
227 if (file) {
228 gf = file->private_data;
229 if (test_bit(GFF_EXLOCK, &gf->f_flags))
230 /* gfs2_sharewrite_nopage has grabbed the ip->i_gl already */
231 goto skip_lock;
232 }
233 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
234 do_unlock = 1;
235 error = gfs2_glock_nq_m_atime(1, &gh);
236 if (unlikely(error))
237 goto out_unlock;
238 }
239
240skip_lock:
241 if (gfs2_is_stuffed(ip)) {
242 error = stuffed_readpage(ip, page);
243 unlock_page(page);
244 } else
245 error = mpage_readpage(page, gfs2_get_block);
246
247 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
248 error = -EIO;
249
250 if (do_unlock) {
251 gfs2_glock_dq_m(1, &gh);
252 gfs2_holder_uninit(&gh);
253 }
254out:
255 return error;
256out_unlock:
257 unlock_page(page);
258 if (do_unlock)
259 gfs2_holder_uninit(&gh);
260 goto out;
261}
262
263/**
264 * gfs2_readpages - Read a bunch of pages at once
265 *
266 * Some notes:
267 * 1. This is only for readahead, so we can simply ignore any things
268 * which are slightly inconvenient (such as locking conflicts between
269 * the page lock and the glock) and return having done no I/O. Its
270 * obviously not something we'd want to do on too regular a basis.
271 * Any I/O we ignore at this time will be done via readpage later.
272 * 2. We have to handle stuffed files here too.
273 * 3. mpage_readpages() does most of the heavy lifting in the common case.
274 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
275 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
276 * well as read-ahead.
277 */
278static int gfs2_readpages(struct file *file, struct address_space *mapping,
279 struct list_head *pages, unsigned nr_pages)
280{
281 struct inode *inode = mapping->host;
282 struct gfs2_inode *ip = GFS2_I(inode);
283 struct gfs2_sbd *sdp = GFS2_SB(inode);
284 struct gfs2_holder gh;
285 unsigned page_idx;
286 int ret;
287 int do_unlock = 0;
288
289 if (likely(file != &gfs2_internal_file_sentinel)) {
290 if (file) {
291 struct gfs2_file *gf = file->private_data;
292 if (test_bit(GFF_EXLOCK, &gf->f_flags))
293 goto skip_lock;
294 }
295 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
296 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
297 do_unlock = 1;
298 ret = gfs2_glock_nq_m_atime(1, &gh);
299 if (ret == GLR_TRYFAILED)
300 goto out_noerror;
301 if (unlikely(ret))
302 goto out_unlock;
303 }
304skip_lock:
305 if (gfs2_is_stuffed(ip)) {
306 struct pagevec lru_pvec;
307 pagevec_init(&lru_pvec, 0);
308 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
309 struct page *page = list_entry(pages->prev, struct page, lru);
310 prefetchw(&page->flags);
311 list_del(&page->lru);
312 if (!add_to_page_cache(page, mapping,
313 page->index, GFP_KERNEL)) {
314 ret = stuffed_readpage(ip, page);
315 unlock_page(page);
316 if (!pagevec_add(&lru_pvec, page))
317 __pagevec_lru_add(&lru_pvec);
318 } else {
319 page_cache_release(page);
320 }
321 }
322 pagevec_lru_add(&lru_pvec);
323 ret = 0;
324 } else {
325 /* What we really want to do .... */
326 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
327 }
328
329 if (do_unlock) {
330 gfs2_glock_dq_m(1, &gh);
331 gfs2_holder_uninit(&gh);
332 }
333out:
334 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
335 ret = -EIO;
336 return ret;
337out_noerror:
338 ret = 0;
339out_unlock:
340 /* unlock all pages, we can't do any I/O right now */
341 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
342 struct page *page = list_entry(pages->prev, struct page, lru);
343 list_del(&page->lru);
344 unlock_page(page);
345 page_cache_release(page);
346 }
347 if (do_unlock)
348 gfs2_holder_uninit(&gh);
349 goto out;
350}
351
352/**
353 * gfs2_prepare_write - Prepare to write a page to a file
354 * @file: The file to write to
355 * @page: The page which is to be prepared for writing
356 * @from: From (byte range within page)
357 * @to: To (byte range within page)
358 *
359 * Returns: errno
360 */
361
362static int gfs2_prepare_write(struct file *file, struct page *page,
363 unsigned from, unsigned to)
364{
365 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
366 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
367 unsigned int data_blocks, ind_blocks, rblocks;
368 int alloc_required;
369 int error = 0;
370 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + from;
371 loff_t end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
372 struct gfs2_alloc *al;
373
374 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME|GL_AOP, &ip->i_gh);
375 error = gfs2_glock_nq_m_atime(1, &ip->i_gh);
376 if (error)
377 goto out_uninit;
378
379 gfs2_write_calc_reserv(ip, to - from, &data_blocks, &ind_blocks);
380
381 error = gfs2_write_alloc_required(ip, pos, from - to, &alloc_required);
382 if (error)
383 goto out_unlock;
384
385
386 if (alloc_required) {
387 al = gfs2_alloc_get(ip);
388
389 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
390 if (error)
391 goto out_alloc_put;
392
393 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
394 if (error)
395 goto out_qunlock;
396
397 al->al_requested = data_blocks + ind_blocks;
398 error = gfs2_inplace_reserve(ip);
399 if (error)
400 goto out_qunlock;
401 }
402
403 rblocks = RES_DINODE + ind_blocks;
404 if (gfs2_is_jdata(ip))
405 rblocks += data_blocks ? data_blocks : 1;
406 if (ind_blocks || data_blocks)
407 rblocks += RES_STATFS + RES_QUOTA;
408
409 error = gfs2_trans_begin(sdp, rblocks, 0);
410 if (error)
411 goto out;
412
413 if (gfs2_is_stuffed(ip)) {
414 if (end > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
415 error = gfs2_unstuff_dinode(ip, page);
416 if (error == 0)
417 goto prepare_write;
418 } else if (!PageUptodate(page))
419 error = stuffed_readpage(ip, page);
420 goto out;
421 }
422
423prepare_write:
424 error = block_prepare_write(page, from, to, gfs2_get_block);
425
426out:
427 if (error) {
428 gfs2_trans_end(sdp);
429 if (alloc_required) {
430 gfs2_inplace_release(ip);
431out_qunlock:
432 gfs2_quota_unlock(ip);
433out_alloc_put:
434 gfs2_alloc_put(ip);
435 }
436out_unlock:
437 gfs2_glock_dq_m(1, &ip->i_gh);
438out_uninit:
439 gfs2_holder_uninit(&ip->i_gh);
440 }
441
442 return error;
443}
444
445/**
446 * gfs2_commit_write - Commit write to a file
447 * @file: The file to write to
448 * @page: The page containing the data
449 * @from: From (byte range within page)
450 * @to: To (byte range within page)
451 *
452 * Returns: errno
453 */
454
455static int gfs2_commit_write(struct file *file, struct page *page,
456 unsigned from, unsigned to)
457{
458 struct inode *inode = page->mapping->host;
459 struct gfs2_inode *ip = GFS2_I(inode);
460 struct gfs2_sbd *sdp = GFS2_SB(inode);
461 int error = -EOPNOTSUPP;
462 struct buffer_head *dibh;
463 struct gfs2_alloc *al = &ip->i_alloc;
464 struct gfs2_dinode *di;
465
466 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_locked_by_me(ip->i_gl)))
467 goto fail_nounlock;
468
469 error = gfs2_meta_inode_buffer(ip, &dibh);
470 if (error)
471 goto fail_endtrans;
472
473 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
474 di = (struct gfs2_dinode *)dibh->b_data;
475
476 if (gfs2_is_stuffed(ip)) {
477 u64 file_size;
478 void *kaddr;
479
480 file_size = ((u64)page->index << PAGE_CACHE_SHIFT) + to;
481
482 kaddr = kmap_atomic(page, KM_USER0);
483 memcpy(dibh->b_data + sizeof(struct gfs2_dinode) + from,
484 kaddr + from, to - from);
485 kunmap_atomic(page, KM_USER0);
486
487 SetPageUptodate(page);
488
489 if (inode->i_size < file_size)
490 i_size_write(inode, file_size);
491 } else {
492 if (sdp->sd_args.ar_data == GFS2_DATA_ORDERED ||
493 gfs2_is_jdata(ip))
494 gfs2_page_add_databufs(ip, page, from, to);
495 error = generic_commit_write(file, page, from, to);
496 if (error)
497 goto fail;
498 }
499
500 if (ip->i_di.di_size < inode->i_size) {
501 ip->i_di.di_size = inode->i_size;
502 di->di_size = cpu_to_be64(inode->i_size);
503 }
504
505 di->di_mode = cpu_to_be32(inode->i_mode);
506 di->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
507 di->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
508 di->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
509
510 brelse(dibh);
511 gfs2_trans_end(sdp);
512 if (al->al_requested) {
513 gfs2_inplace_release(ip);
514 gfs2_quota_unlock(ip);
515 gfs2_alloc_put(ip);
516 }
517 gfs2_glock_dq_m(1, &ip->i_gh);
518 gfs2_holder_uninit(&ip->i_gh);
519 return 0;
520
521fail:
522 brelse(dibh);
523fail_endtrans:
524 gfs2_trans_end(sdp);
525 if (al->al_requested) {
526 gfs2_inplace_release(ip);
527 gfs2_quota_unlock(ip);
528 gfs2_alloc_put(ip);
529 }
530 gfs2_glock_dq_m(1, &ip->i_gh);
531 gfs2_holder_uninit(&ip->i_gh);
532fail_nounlock:
533 ClearPageUptodate(page);
534 return error;
535}
536
537/**
538 * gfs2_bmap - Block map function
539 * @mapping: Address space info
540 * @lblock: The block to map
541 *
542 * Returns: The disk address for the block or 0 on hole or error
543 */
544
545static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
546{
547 struct gfs2_inode *ip = GFS2_I(mapping->host);
548 struct gfs2_holder i_gh;
549 sector_t dblock = 0;
550 int error;
551
552 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
553 if (error)
554 return 0;
555
556 if (!gfs2_is_stuffed(ip))
557 dblock = generic_block_bmap(mapping, lblock, gfs2_get_block);
558
559 gfs2_glock_dq_uninit(&i_gh);
560
561 return dblock;
562}
563
564static void discard_buffer(struct gfs2_sbd *sdp, struct buffer_head *bh)
565{
566 struct gfs2_bufdata *bd;
567
568 gfs2_log_lock(sdp);
569 bd = bh->b_private;
570 if (bd) {
571 bd->bd_bh = NULL;
572 bh->b_private = NULL;
573 }
574 gfs2_log_unlock(sdp);
575
576 lock_buffer(bh);
577 clear_buffer_dirty(bh);
578 bh->b_bdev = NULL;
579 clear_buffer_mapped(bh);
580 clear_buffer_req(bh);
581 clear_buffer_new(bh);
582 clear_buffer_delay(bh);
583 unlock_buffer(bh);
584}
585
586static void gfs2_invalidatepage(struct page *page, unsigned long offset)
587{
588 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
589 struct buffer_head *head, *bh, *next;
590 unsigned int curr_off = 0;
591
592 BUG_ON(!PageLocked(page));
593 if (!page_has_buffers(page))
594 return;
595
596 bh = head = page_buffers(page);
597 do {
598 unsigned int next_off = curr_off + bh->b_size;
599 next = bh->b_this_page;
600
601 if (offset <= curr_off)
602 discard_buffer(sdp, bh);
603
604 curr_off = next_off;
605 bh = next;
606 } while (bh != head);
607
608 if (!offset)
609 try_to_release_page(page, 0);
610
611 return;
612}
613
614static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
615 const struct iovec *iov, loff_t offset,
616 unsigned long nr_segs)
617{
618 struct file *file = iocb->ki_filp;
619 struct inode *inode = file->f_mapping->host;
620 struct gfs2_inode *ip = GFS2_I(inode);
621 struct gfs2_holder gh;
622 int rv;
623
624 if (rw == READ)
625 mutex_lock(&inode->i_mutex);
626 /*
627 * Shared lock, even if its a write, since we do no allocation
628 * on this path. All we need change is atime.
629 */
630 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
631 rv = gfs2_glock_nq_m_atime(1, &gh);
632 if (rv)
633 goto out;
634
635 if (offset > i_size_read(inode))
636 goto out;
637
638 /*
639 * Should we return an error here? I can't see that O_DIRECT for
640 * a journaled file makes any sense. For now we'll silently fall
641 * back to buffered I/O, likewise we do the same for stuffed
642 * files since they are (a) small and (b) unaligned.
643 */
644 if (gfs2_is_jdata(ip))
645 goto out;
646
647 if (gfs2_is_stuffed(ip))
648 goto out;
649
650 rv = blockdev_direct_IO_own_locking(rw, iocb, inode,
651 inode->i_sb->s_bdev,
652 iov, offset, nr_segs,
653 gfs2_get_block_direct, NULL);
654out:
655 gfs2_glock_dq_m(1, &gh);
656 gfs2_holder_uninit(&gh);
657 if (rw == READ)
658 mutex_unlock(&inode->i_mutex);
659
660 return rv;
661}
662
663/**
664 * stuck_releasepage - We're stuck in gfs2_releasepage(). Print stuff out.
665 * @bh: the buffer we're stuck on
666 *
667 */
668
669static void stuck_releasepage(struct buffer_head *bh)
670{
671 struct inode *inode = bh->b_page->mapping->host;
672 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
673 struct gfs2_bufdata *bd = bh->b_private;
674 struct gfs2_glock *gl;
675static unsigned limit = 0;
676
677 if (limit > 3)
678 return;
679 limit++;
680
681 fs_warn(sdp, "stuck in gfs2_releasepage() %p\n", inode);
682 fs_warn(sdp, "blkno = %llu, bh->b_count = %d\n",
683 (unsigned long long)bh->b_blocknr, atomic_read(&bh->b_count));
684 fs_warn(sdp, "pinned = %u\n", buffer_pinned(bh));
685 fs_warn(sdp, "bh->b_private = %s\n", (bd) ? "!NULL" : "NULL");
686
687 if (!bd)
688 return;
689
690 gl = bd->bd_gl;
691
692 fs_warn(sdp, "gl = (%u, %llu)\n",
693 gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number);
694
695 fs_warn(sdp, "bd_list_tr = %s, bd_le.le_list = %s\n",
696 (list_empty(&bd->bd_list_tr)) ? "no" : "yes",
697 (list_empty(&bd->bd_le.le_list)) ? "no" : "yes");
698
699 if (gl->gl_ops == &gfs2_inode_glops) {
700 struct gfs2_inode *ip = gl->gl_object;
701 unsigned int x;
702
703 if (!ip)
704 return;
705
706 fs_warn(sdp, "ip = %llu %llu\n",
707 (unsigned long long)ip->i_num.no_formal_ino,
708 (unsigned long long)ip->i_num.no_addr);
709
710 for (x = 0; x < GFS2_MAX_META_HEIGHT; x++)
711 fs_warn(sdp, "ip->i_cache[%u] = %s\n",
712 x, (ip->i_cache[x]) ? "!NULL" : "NULL");
713 }
714}
715
716/**
717 * gfs2_releasepage - free the metadata associated with a page
718 * @page: the page that's being released
719 * @gfp_mask: passed from Linux VFS, ignored by us
720 *
721 * Call try_to_free_buffers() if the buffers in this page can be
722 * released.
723 *
724 * Returns: 0
725 */
726
727int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
728{
729 struct inode *aspace = page->mapping->host;
730 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
731 struct buffer_head *bh, *head;
732 struct gfs2_bufdata *bd;
733 unsigned long t = jiffies + gfs2_tune_get(sdp, gt_stall_secs) * HZ;
734
735 if (!page_has_buffers(page))
736 goto out;
737
738 head = bh = page_buffers(page);
739 do {
740 while (atomic_read(&bh->b_count)) {
741 if (!atomic_read(&aspace->i_writecount))
742 return 0;
743
744 if (time_after_eq(jiffies, t)) {
745 stuck_releasepage(bh);
746 /* should we withdraw here? */
747 return 0;
748 }
749
750 yield();
751 }
752
753 gfs2_assert_warn(sdp, !buffer_pinned(bh));
754 gfs2_assert_warn(sdp, !buffer_dirty(bh));
755
756 gfs2_log_lock(sdp);
757 bd = bh->b_private;
758 if (bd) {
759 gfs2_assert_warn(sdp, bd->bd_bh == bh);
760 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
761 gfs2_assert_warn(sdp, !bd->bd_ail);
762 bd->bd_bh = NULL;
763 if (!list_empty(&bd->bd_le.le_list))
764 bd = NULL;
765 bh->b_private = NULL;
766 }
767 gfs2_log_unlock(sdp);
768 if (bd)
769 kmem_cache_free(gfs2_bufdata_cachep, bd);
770
771 bh = bh->b_this_page;
772 } while (bh != head);
773
774out:
775 return try_to_free_buffers(page);
776}
777
778const struct address_space_operations gfs2_file_aops = {
779 .writepage = gfs2_writepage,
780 .readpage = gfs2_readpage,
781 .readpages = gfs2_readpages,
782 .sync_page = block_sync_page,
783 .prepare_write = gfs2_prepare_write,
784 .commit_write = gfs2_commit_write,
785 .bmap = gfs2_bmap,
786 .invalidatepage = gfs2_invalidatepage,
787 .releasepage = gfs2_releasepage,
788 .direct_IO = gfs2_direct_IO,
789};
790
diff --git a/fs/gfs2/ops_address.h b/fs/gfs2/ops_address.h
new file mode 100644
index 000000000000..35aaee4aa7e1
--- /dev/null
+++ b/fs/gfs2/ops_address.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_ADDRESS_DOT_H__
11#define __OPS_ADDRESS_DOT_H__
12
13#include <linux/fs.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16
17extern const struct address_space_operations gfs2_file_aops;
18extern int gfs2_get_block(struct inode *inode, sector_t lblock,
19 struct buffer_head *bh_result, int create);
20extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
21
22#endif /* __OPS_ADDRESS_DOT_H__ */
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
new file mode 100644
index 000000000000..00041b1b8025
--- /dev/null
+++ b/fs/gfs2/ops_dentry.c
@@ -0,0 +1,119 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/smp_lock.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/crc32.h>
18#include <linux/lm_interface.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "dir.h"
23#include "glock.h"
24#include "ops_dentry.h"
25#include "util.h"
26
27/**
28 * gfs2_drevalidate - Check directory lookup consistency
29 * @dentry: the mapping to check
30 * @nd:
31 *
32 * Check to make sure the lookup necessary to arrive at this inode from its
33 * parent is still good.
34 *
35 * Returns: 1 if the dentry is ok, 0 if it isn't
36 */
37
38static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
39{
40 struct dentry *parent = dget_parent(dentry);
41 struct gfs2_sbd *sdp = GFS2_SB(parent->d_inode);
42 struct gfs2_inode *dip = GFS2_I(parent->d_inode);
43 struct inode *inode = dentry->d_inode;
44 struct gfs2_holder d_gh;
45 struct gfs2_inode *ip;
46 struct gfs2_inum inum;
47 unsigned int type;
48 int error;
49
50 if (inode && is_bad_inode(inode))
51 goto invalid;
52
53 if (sdp->sd_args.ar_localcaching)
54 goto valid;
55
56 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
57 if (error)
58 goto fail;
59
60 error = gfs2_dir_search(parent->d_inode, &dentry->d_name, &inum, &type);
61 switch (error) {
62 case 0:
63 if (!inode)
64 goto invalid_gunlock;
65 break;
66 case -ENOENT:
67 if (!inode)
68 goto valid_gunlock;
69 goto invalid_gunlock;
70 default:
71 goto fail_gunlock;
72 }
73
74 ip = GFS2_I(inode);
75
76 if (!gfs2_inum_equal(&ip->i_num, &inum))
77 goto invalid_gunlock;
78
79 if (IF2DT(ip->i_di.di_mode) != type) {
80 gfs2_consist_inode(dip);
81 goto fail_gunlock;
82 }
83
84valid_gunlock:
85 gfs2_glock_dq_uninit(&d_gh);
86valid:
87 dput(parent);
88 return 1;
89
90invalid_gunlock:
91 gfs2_glock_dq_uninit(&d_gh);
92invalid:
93 if (inode && S_ISDIR(inode->i_mode)) {
94 if (have_submounts(dentry))
95 goto valid;
96 shrink_dcache_parent(dentry);
97 }
98 d_drop(dentry);
99 dput(parent);
100 return 0;
101
102fail_gunlock:
103 gfs2_glock_dq_uninit(&d_gh);
104fail:
105 dput(parent);
106 return 0;
107}
108
109static int gfs2_dhash(struct dentry *dentry, struct qstr *str)
110{
111 str->hash = gfs2_disk_hash(str->name, str->len);
112 return 0;
113}
114
115struct dentry_operations gfs2_dops = {
116 .d_revalidate = gfs2_drevalidate,
117 .d_hash = gfs2_dhash,
118};
119
diff --git a/fs/gfs2/ops_dentry.h b/fs/gfs2/ops_dentry.h
new file mode 100644
index 000000000000..5caa3db4d3f5
--- /dev/null
+++ b/fs/gfs2/ops_dentry.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_DENTRY_DOT_H__
11#define __OPS_DENTRY_DOT_H__
12
13#include <linux/dcache.h>
14
15extern struct dentry_operations gfs2_dops;
16
17#endif /* __OPS_DENTRY_DOT_H__ */
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
new file mode 100644
index 000000000000..86127d93bd35
--- /dev/null
+++ b/fs/gfs2/ops_export.c
@@ -0,0 +1,298 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "dir.h"
22#include "glock.h"
23#include "glops.h"
24#include "inode.h"
25#include "ops_export.h"
26#include "rgrp.h"
27#include "util.h"
28
29static struct dentry *gfs2_decode_fh(struct super_block *sb,
30 __u32 *fh,
31 int fh_len,
32 int fh_type,
33 int (*acceptable)(void *context,
34 struct dentry *dentry),
35 void *context)
36{
37 struct gfs2_fh_obj fh_obj;
38 struct gfs2_inum *this, parent;
39
40 if (fh_type != fh_len)
41 return NULL;
42
43 this = &fh_obj.this;
44 fh_obj.imode = DT_UNKNOWN;
45 memset(&parent, 0, sizeof(struct gfs2_inum));
46
47 switch (fh_type) {
48 case GFS2_LARGE_FH_SIZE:
49 parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
50 parent.no_formal_ino |= be32_to_cpu(fh[5]);
51 parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
52 parent.no_addr |= be32_to_cpu(fh[7]);
53 fh_obj.imode = be32_to_cpu(fh[8]);
54 case GFS2_SMALL_FH_SIZE:
55 this->no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
56 this->no_formal_ino |= be32_to_cpu(fh[1]);
57 this->no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
58 this->no_addr |= be32_to_cpu(fh[3]);
59 break;
60 default:
61 return NULL;
62 }
63
64 return gfs2_export_ops.find_exported_dentry(sb, &fh_obj, &parent,
65 acceptable, context);
66}
67
68static int gfs2_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
69 int connectable)
70{
71 struct inode *inode = dentry->d_inode;
72 struct super_block *sb = inode->i_sb;
73 struct gfs2_inode *ip = GFS2_I(inode);
74
75 if (*len < GFS2_SMALL_FH_SIZE ||
76 (connectable && *len < GFS2_LARGE_FH_SIZE))
77 return 255;
78
79 fh[0] = ip->i_num.no_formal_ino >> 32;
80 fh[0] = cpu_to_be32(fh[0]);
81 fh[1] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
82 fh[1] = cpu_to_be32(fh[1]);
83 fh[2] = ip->i_num.no_addr >> 32;
84 fh[2] = cpu_to_be32(fh[2]);
85 fh[3] = ip->i_num.no_addr & 0xFFFFFFFF;
86 fh[3] = cpu_to_be32(fh[3]);
87 *len = GFS2_SMALL_FH_SIZE;
88
89 if (!connectable || inode == sb->s_root->d_inode)
90 return *len;
91
92 spin_lock(&dentry->d_lock);
93 inode = dentry->d_parent->d_inode;
94 ip = GFS2_I(inode);
95 igrab(inode);
96 spin_unlock(&dentry->d_lock);
97
98 fh[4] = ip->i_num.no_formal_ino >> 32;
99 fh[4] = cpu_to_be32(fh[4]);
100 fh[5] = ip->i_num.no_formal_ino & 0xFFFFFFFF;
101 fh[5] = cpu_to_be32(fh[5]);
102 fh[6] = ip->i_num.no_addr >> 32;
103 fh[6] = cpu_to_be32(fh[6]);
104 fh[7] = ip->i_num.no_addr & 0xFFFFFFFF;
105 fh[7] = cpu_to_be32(fh[7]);
106
107 fh[8] = cpu_to_be32(inode->i_mode);
108 fh[9] = 0; /* pad to double word */
109 *len = GFS2_LARGE_FH_SIZE;
110
111 iput(inode);
112
113 return *len;
114}
115
116struct get_name_filldir {
117 struct gfs2_inum inum;
118 char *name;
119};
120
121static int get_name_filldir(void *opaque, const char *name, unsigned int length,
122 u64 offset, struct gfs2_inum *inum,
123 unsigned int type)
124{
125 struct get_name_filldir *gnfd = (struct get_name_filldir *)opaque;
126
127 if (!gfs2_inum_equal(inum, &gnfd->inum))
128 return 0;
129
130 memcpy(gnfd->name, name, length);
131 gnfd->name[length] = 0;
132
133 return 1;
134}
135
136static int gfs2_get_name(struct dentry *parent, char *name,
137 struct dentry *child)
138{
139 struct inode *dir = parent->d_inode;
140 struct inode *inode = child->d_inode;
141 struct gfs2_inode *dip, *ip;
142 struct get_name_filldir gnfd;
143 struct gfs2_holder gh;
144 u64 offset = 0;
145 int error;
146
147 if (!dir)
148 return -EINVAL;
149
150 if (!S_ISDIR(dir->i_mode) || !inode)
151 return -EINVAL;
152
153 dip = GFS2_I(dir);
154 ip = GFS2_I(inode);
155
156 *name = 0;
157 gnfd.inum = ip->i_num;
158 gnfd.name = name;
159
160 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &gh);
161 if (error)
162 return error;
163
164 error = gfs2_dir_read(dir, &offset, &gnfd, get_name_filldir);
165
166 gfs2_glock_dq_uninit(&gh);
167
168 if (!error && !*name)
169 error = -ENOENT;
170
171 return error;
172}
173
174static struct dentry *gfs2_get_parent(struct dentry *child)
175{
176 struct qstr dotdot;
177 struct inode *inode;
178 struct dentry *dentry;
179
180 gfs2_str2qstr(&dotdot, "..");
181 inode = gfs2_lookupi(child->d_inode, &dotdot, 1, NULL);
182
183 if (!inode)
184 return ERR_PTR(-ENOENT);
185 /*
186 * In case of an error, @inode carries the error value, and we
187 * have to return that as a(n invalid) pointer to dentry.
188 */
189 if (IS_ERR(inode))
190 return ERR_PTR(PTR_ERR(inode));
191
192 dentry = d_alloc_anon(inode);
193 if (!dentry) {
194 iput(inode);
195 return ERR_PTR(-ENOMEM);
196 }
197
198 return dentry;
199}
200
201static struct dentry *gfs2_get_dentry(struct super_block *sb, void *inum_obj)
202{
203 struct gfs2_sbd *sdp = sb->s_fs_info;
204 struct gfs2_fh_obj *fh_obj = (struct gfs2_fh_obj *)inum_obj;
205 struct gfs2_inum *inum = &fh_obj->this;
206 struct gfs2_holder i_gh, ri_gh, rgd_gh;
207 struct gfs2_rgrpd *rgd;
208 struct inode *inode;
209 struct dentry *dentry;
210 int error;
211
212 /* System files? */
213
214 inode = gfs2_ilookup(sb, inum);
215 if (inode) {
216 if (GFS2_I(inode)->i_num.no_formal_ino != inum->no_formal_ino) {
217 iput(inode);
218 return ERR_PTR(-ESTALE);
219 }
220 goto out_inode;
221 }
222
223 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops,
224 LM_ST_SHARED, LM_FLAG_ANY | GL_LOCAL_EXCL,
225 &i_gh);
226 if (error)
227 return ERR_PTR(error);
228
229 error = gfs2_rindex_hold(sdp, &ri_gh);
230 if (error)
231 goto fail;
232
233 error = -EINVAL;
234 rgd = gfs2_blk2rgrpd(sdp, inum->no_addr);
235 if (!rgd)
236 goto fail_rindex;
237
238 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
239 if (error)
240 goto fail_rindex;
241
242 error = -ESTALE;
243 if (gfs2_get_block_type(rgd, inum->no_addr) != GFS2_BLKST_DINODE)
244 goto fail_rgd;
245
246 gfs2_glock_dq_uninit(&rgd_gh);
247 gfs2_glock_dq_uninit(&ri_gh);
248
249 inode = gfs2_inode_lookup(sb, inum, fh_obj->imode);
250 if (!inode)
251 goto fail;
252 if (IS_ERR(inode)) {
253 error = PTR_ERR(inode);
254 goto fail;
255 }
256
257 error = gfs2_inode_refresh(GFS2_I(inode));
258 if (error) {
259 iput(inode);
260 goto fail;
261 }
262
263 error = -EIO;
264 if (GFS2_I(inode)->i_di.di_flags & GFS2_DIF_SYSTEM) {
265 iput(inode);
266 goto fail;
267 }
268
269 gfs2_glock_dq_uninit(&i_gh);
270
271out_inode:
272 dentry = d_alloc_anon(inode);
273 if (!dentry) {
274 iput(inode);
275 return ERR_PTR(-ENOMEM);
276 }
277
278 return dentry;
279
280fail_rgd:
281 gfs2_glock_dq_uninit(&rgd_gh);
282
283fail_rindex:
284 gfs2_glock_dq_uninit(&ri_gh);
285
286fail:
287 gfs2_glock_dq_uninit(&i_gh);
288 return ERR_PTR(error);
289}
290
291struct export_operations gfs2_export_ops = {
292 .decode_fh = gfs2_decode_fh,
293 .encode_fh = gfs2_encode_fh,
294 .get_name = gfs2_get_name,
295 .get_parent = gfs2_get_parent,
296 .get_dentry = gfs2_get_dentry,
297};
298
diff --git a/fs/gfs2/ops_export.h b/fs/gfs2/ops_export.h
new file mode 100644
index 000000000000..09aca5046fb1
--- /dev/null
+++ b/fs/gfs2/ops_export.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_EXPORT_DOT_H__
11#define __OPS_EXPORT_DOT_H__
12
13#define GFS2_SMALL_FH_SIZE 4
14#define GFS2_LARGE_FH_SIZE 10
15
16extern struct export_operations gfs2_export_ops;
17struct gfs2_fh_obj {
18 struct gfs2_inum this;
19 __u32 imode;
20};
21
22#endif /* __OPS_EXPORT_DOT_H__ */
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
new file mode 100644
index 000000000000..3064f133bf3c
--- /dev/null
+++ b/fs/gfs2/ops_file.c
@@ -0,0 +1,661 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/blkdev.h>
18#include <linux/mm.h>
19#include <linux/smp_lock.h>
20#include <linux/fs.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/ext2_fs.h>
23#include <linux/crc32.h>
24#include <linux/lm_interface.h>
25#include <asm/uaccess.h>
26
27#include "gfs2.h"
28#include "incore.h"
29#include "bmap.h"
30#include "dir.h"
31#include "glock.h"
32#include "glops.h"
33#include "inode.h"
34#include "lm.h"
35#include "log.h"
36#include "meta_io.h"
37#include "ops_file.h"
38#include "ops_vm.h"
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
42#include "util.h"
43#include "eaops.h"
44
45/* For regular, non-NFS */
46struct filldir_reg {
47 struct gfs2_sbd *fdr_sbd;
48 int fdr_prefetch;
49
50 filldir_t fdr_filldir;
51 void *fdr_opaque;
52};
53
54/*
55 * Most fields left uninitialised to catch anybody who tries to
56 * use them. f_flags set to prevent file_accessed() from touching
57 * any other part of this. Its use is purely as a flag so that we
58 * know (in readpage()) whether or not do to locking.
59 */
60struct file gfs2_internal_file_sentinel = {
61 .f_flags = O_NOATIME|O_RDONLY,
62};
63
64static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
65 unsigned long offset, unsigned long size)
66{
67 char *kaddr;
68 unsigned long count = desc->count;
69
70 if (size > count)
71 size = count;
72
73 kaddr = kmap(page);
74 memcpy(desc->arg.buf, kaddr + offset, size);
75 kunmap(page);
76
77 desc->count = count - size;
78 desc->written += size;
79 desc->arg.buf += size;
80 return size;
81}
82
83int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
84 char *buf, loff_t *pos, unsigned size)
85{
86 struct inode *inode = &ip->i_inode;
87 read_descriptor_t desc;
88 desc.written = 0;
89 desc.arg.buf = buf;
90 desc.count = size;
91 desc.error = 0;
92 do_generic_mapping_read(inode->i_mapping, ra_state,
93 &gfs2_internal_file_sentinel, pos, &desc,
94 gfs2_read_actor);
95 return desc.written ? desc.written : desc.error;
96}
97
98/**
99 * gfs2_llseek - seek to a location in a file
100 * @file: the file
101 * @offset: the offset
102 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
103 *
104 * SEEK_END requires the glock for the file because it references the
105 * file's size.
106 *
107 * Returns: The new offset, or errno
108 */
109
110static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
111{
112 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
113 struct gfs2_holder i_gh;
114 loff_t error;
115
116 if (origin == 2) {
117 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
118 &i_gh);
119 if (!error) {
120 error = remote_llseek(file, offset, origin);
121 gfs2_glock_dq_uninit(&i_gh);
122 }
123 } else
124 error = remote_llseek(file, offset, origin);
125
126 return error;
127}
128
129/**
130 * filldir_func - Report a directory entry to the caller of gfs2_dir_read()
131 * @opaque: opaque data used by the function
132 * @name: the name of the directory entry
133 * @length: the length of the name
134 * @offset: the entry's offset in the directory
135 * @inum: the inode number the entry points to
136 * @type: the type of inode the entry points to
137 *
138 * Returns: 0 on success, 1 if buffer full
139 */
140
141static int filldir_func(void *opaque, const char *name, unsigned int length,
142 u64 offset, struct gfs2_inum *inum,
143 unsigned int type)
144{
145 struct filldir_reg *fdr = (struct filldir_reg *)opaque;
146 struct gfs2_sbd *sdp = fdr->fdr_sbd;
147 int error;
148
149 error = fdr->fdr_filldir(fdr->fdr_opaque, name, length, offset,
150 inum->no_addr, type);
151 if (error)
152 return 1;
153
154 if (fdr->fdr_prefetch && !(length == 1 && *name == '.')) {
155 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_inode_glops,
156 LM_ST_SHARED, LM_FLAG_TRY | LM_FLAG_ANY);
157 gfs2_glock_prefetch_num(sdp, inum->no_addr, &gfs2_iopen_glops,
158 LM_ST_SHARED, LM_FLAG_TRY);
159 }
160
161 return 0;
162}
163
164/**
165 * gfs2_readdir - Read directory entries from a directory
166 * @file: The directory to read from
167 * @dirent: Buffer for dirents
168 * @filldir: Function used to do the copying
169 *
170 * Returns: errno
171 */
172
173static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
174{
175 struct inode *dir = file->f_mapping->host;
176 struct gfs2_inode *dip = GFS2_I(dir);
177 struct filldir_reg fdr;
178 struct gfs2_holder d_gh;
179 u64 offset = file->f_pos;
180 int error;
181
182 fdr.fdr_sbd = GFS2_SB(dir);
183 fdr.fdr_prefetch = 1;
184 fdr.fdr_filldir = filldir;
185 fdr.fdr_opaque = dirent;
186
187 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, GL_ATIME, &d_gh);
188 error = gfs2_glock_nq_atime(&d_gh);
189 if (error) {
190 gfs2_holder_uninit(&d_gh);
191 return error;
192 }
193
194 error = gfs2_dir_read(dir, &offset, &fdr, filldir_func);
195
196 gfs2_glock_dq_uninit(&d_gh);
197
198 file->f_pos = offset;
199
200 return error;
201}
202
203/**
204 * fsflags_cvt
205 * @table: A table of 32 u32 flags
206 * @val: a 32 bit value to convert
207 *
208 * This function can be used to convert between fsflags values and
209 * GFS2's own flags values.
210 *
211 * Returns: the converted flags
212 */
213static u32 fsflags_cvt(const u32 *table, u32 val)
214{
215 u32 res = 0;
216 while(val) {
217 if (val & 1)
218 res |= *table;
219 table++;
220 val >>= 1;
221 }
222 return res;
223}
224
225static const u32 fsflags_to_gfs2[32] = {
226 [3] = GFS2_DIF_SYNC,
227 [4] = GFS2_DIF_IMMUTABLE,
228 [5] = GFS2_DIF_APPENDONLY,
229 [7] = GFS2_DIF_NOATIME,
230 [12] = GFS2_DIF_EXHASH,
231 [14] = GFS2_DIF_JDATA,
232 [20] = GFS2_DIF_DIRECTIO,
233};
234
235static const u32 gfs2_to_fsflags[32] = {
236 [gfs2fl_Sync] = FS_SYNC_FL,
237 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
238 [gfs2fl_AppendOnly] = FS_APPEND_FL,
239 [gfs2fl_NoAtime] = FS_NOATIME_FL,
240 [gfs2fl_ExHash] = FS_INDEX_FL,
241 [gfs2fl_Jdata] = FS_JOURNAL_DATA_FL,
242 [gfs2fl_Directio] = FS_DIRECTIO_FL,
243 [gfs2fl_InheritDirectio] = FS_DIRECTIO_FL,
244 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
245};
246
247static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
248{
249 struct inode *inode = filp->f_dentry->d_inode;
250 struct gfs2_inode *ip = GFS2_I(inode);
251 struct gfs2_holder gh;
252 int error;
253 u32 fsflags;
254
255 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
256 error = gfs2_glock_nq_m_atime(1, &gh);
257 if (error)
258 return error;
259
260 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags);
261 if (put_user(fsflags, ptr))
262 error = -EFAULT;
263
264 gfs2_glock_dq_m(1, &gh);
265 gfs2_holder_uninit(&gh);
266 return error;
267}
268
269/* Flags that can be set by user space */
270#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
271 GFS2_DIF_DIRECTIO| \
272 GFS2_DIF_IMMUTABLE| \
273 GFS2_DIF_APPENDONLY| \
274 GFS2_DIF_NOATIME| \
275 GFS2_DIF_SYNC| \
276 GFS2_DIF_SYSTEM| \
277 GFS2_DIF_INHERIT_DIRECTIO| \
278 GFS2_DIF_INHERIT_JDATA)
279
280/**
281 * gfs2_set_flags - set flags on an inode
282 * @inode: The inode
283 * @flags: The flags to set
284 * @mask: Indicates which flags are valid
285 *
286 */
287static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
288{
289 struct inode *inode = filp->f_dentry->d_inode;
290 struct gfs2_inode *ip = GFS2_I(inode);
291 struct gfs2_sbd *sdp = GFS2_SB(inode);
292 struct buffer_head *bh;
293 struct gfs2_holder gh;
294 int error;
295 u32 new_flags, flags;
296
297 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
298 if (error)
299 return error;
300
301 flags = ip->i_di.di_flags;
302 new_flags = (flags & ~mask) | (reqflags & mask);
303 if ((new_flags ^ flags) == 0)
304 goto out;
305
306 if (S_ISDIR(inode->i_mode)) {
307 if ((new_flags ^ flags) & GFS2_DIF_JDATA)
308 new_flags ^= (GFS2_DIF_JDATA|GFS2_DIF_INHERIT_JDATA);
309 if ((new_flags ^ flags) & GFS2_DIF_DIRECTIO)
310 new_flags ^= (GFS2_DIF_DIRECTIO|GFS2_DIF_INHERIT_DIRECTIO);
311 }
312
313 error = -EINVAL;
314 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
315 goto out;
316
317 error = -EPERM;
318 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
319 goto out;
320 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
321 goto out;
322 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
323 !capable(CAP_LINUX_IMMUTABLE))
324 goto out;
325 if (!IS_IMMUTABLE(inode)) {
326 error = permission(inode, MAY_WRITE, NULL);
327 if (error)
328 goto out;
329 }
330
331 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
332 if (error)
333 goto out;
334 error = gfs2_meta_inode_buffer(ip, &bh);
335 if (error)
336 goto out_trans_end;
337 gfs2_trans_add_bh(ip->i_gl, bh, 1);
338 ip->i_di.di_flags = new_flags;
339 gfs2_dinode_out(&ip->i_di, bh->b_data);
340 brelse(bh);
341out_trans_end:
342 gfs2_trans_end(sdp);
343out:
344 gfs2_glock_dq_uninit(&gh);
345 return error;
346}
347
348static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
349{
350 u32 fsflags, gfsflags;
351 if (get_user(fsflags, ptr))
352 return -EFAULT;
353 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
354 return do_gfs2_set_flags(filp, gfsflags, ~0);
355}
356
357static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
358{
359 switch(cmd) {
360 case FS_IOC_GETFLAGS:
361 return gfs2_get_flags(filp, (u32 __user *)arg);
362 case FS_IOC_SETFLAGS:
363 return gfs2_set_flags(filp, (u32 __user *)arg);
364 }
365 return -ENOTTY;
366}
367
368
369/**
370 * gfs2_mmap -
371 * @file: The file to map
372 * @vma: The VMA which described the mapping
373 *
374 * Returns: 0 or error code
375 */
376
377static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
378{
379 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
380 struct gfs2_holder i_gh;
381 int error;
382
383 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh);
384 error = gfs2_glock_nq_atime(&i_gh);
385 if (error) {
386 gfs2_holder_uninit(&i_gh);
387 return error;
388 }
389
390 /* This is VM_MAYWRITE instead of VM_WRITE because a call
391 to mprotect() can turn on VM_WRITE later. */
392
393 if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
394 (VM_MAYSHARE | VM_MAYWRITE))
395 vma->vm_ops = &gfs2_vm_ops_sharewrite;
396 else
397 vma->vm_ops = &gfs2_vm_ops_private;
398
399 gfs2_glock_dq_uninit(&i_gh);
400
401 return error;
402}
403
404/**
405 * gfs2_open - open a file
406 * @inode: the inode to open
407 * @file: the struct file for this opening
408 *
409 * Returns: errno
410 */
411
412static int gfs2_open(struct inode *inode, struct file *file)
413{
414 struct gfs2_inode *ip = GFS2_I(inode);
415 struct gfs2_holder i_gh;
416 struct gfs2_file *fp;
417 int error;
418
419 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
420 if (!fp)
421 return -ENOMEM;
422
423 mutex_init(&fp->f_fl_mutex);
424
425 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
426 file->private_data = fp;
427
428 if (S_ISREG(ip->i_di.di_mode)) {
429 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
430 &i_gh);
431 if (error)
432 goto fail;
433
434 if (!(file->f_flags & O_LARGEFILE) &&
435 ip->i_di.di_size > MAX_NON_LFS) {
436 error = -EFBIG;
437 goto fail_gunlock;
438 }
439
440 /* Listen to the Direct I/O flag */
441
442 if (ip->i_di.di_flags & GFS2_DIF_DIRECTIO)
443 file->f_flags |= O_DIRECT;
444
445 gfs2_glock_dq_uninit(&i_gh);
446 }
447
448 return 0;
449
450fail_gunlock:
451 gfs2_glock_dq_uninit(&i_gh);
452fail:
453 file->private_data = NULL;
454 kfree(fp);
455 return error;
456}
457
458/**
459 * gfs2_close - called to close a struct file
460 * @inode: the inode the struct file belongs to
461 * @file: the struct file being closed
462 *
463 * Returns: errno
464 */
465
466static int gfs2_close(struct inode *inode, struct file *file)
467{
468 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
469 struct gfs2_file *fp;
470
471 fp = file->private_data;
472 file->private_data = NULL;
473
474 if (gfs2_assert_warn(sdp, fp))
475 return -EIO;
476
477 kfree(fp);
478
479 return 0;
480}
481
482/**
483 * gfs2_fsync - sync the dirty data for a file (across the cluster)
484 * @file: the file that points to the dentry (we ignore this)
485 * @dentry: the dentry that points to the inode to sync
486 *
487 * Returns: errno
488 */
489
490static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
491{
492 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
493
494 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
495
496 return 0;
497}
498
499/**
500 * gfs2_lock - acquire/release a posix lock on a file
501 * @file: the file pointer
502 * @cmd: either modify or retrieve lock state, possibly wait
503 * @fl: type and range of lock
504 *
505 * Returns: errno
506 */
507
508static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
509{
510 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
511 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
512 struct lm_lockname name =
513 { .ln_number = ip->i_num.no_addr,
514 .ln_type = LM_TYPE_PLOCK };
515
516 if (!(fl->fl_flags & FL_POSIX))
517 return -ENOLCK;
518 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
519 return -ENOLCK;
520
521 if (sdp->sd_args.ar_localflocks) {
522 if (IS_GETLK(cmd)) {
523 struct file_lock tmp;
524 int ret;
525 ret = posix_test_lock(file, fl, &tmp);
526 fl->fl_type = F_UNLCK;
527 if (ret)
528 memcpy(fl, &tmp, sizeof(struct file_lock));
529 return 0;
530 } else {
531 return posix_lock_file_wait(file, fl);
532 }
533 }
534
535 if (IS_GETLK(cmd))
536 return gfs2_lm_plock_get(sdp, &name, file, fl);
537 else if (fl->fl_type == F_UNLCK)
538 return gfs2_lm_punlock(sdp, &name, file, fl);
539 else
540 return gfs2_lm_plock(sdp, &name, file, cmd, fl);
541}
542
543static int do_flock(struct file *file, int cmd, struct file_lock *fl)
544{
545 struct gfs2_file *fp = file->private_data;
546 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
547 struct gfs2_inode *ip = GFS2_I(file->f_dentry->d_inode);
548 struct gfs2_glock *gl;
549 unsigned int state;
550 int flags;
551 int error = 0;
552
553 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
554 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
555
556 mutex_lock(&fp->f_fl_mutex);
557
558 gl = fl_gh->gh_gl;
559 if (gl) {
560 if (fl_gh->gh_state == state)
561 goto out;
562 gfs2_glock_hold(gl);
563 flock_lock_file_wait(file,
564 &(struct file_lock){.fl_type = F_UNLCK});
565 gfs2_glock_dq_uninit(fl_gh);
566 } else {
567 error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
568 ip->i_num.no_addr, &gfs2_flock_glops,
569 CREATE, &gl);
570 if (error)
571 goto out;
572 }
573
574 gfs2_holder_init(gl, state, flags, fl_gh);
575 gfs2_glock_put(gl);
576
577 error = gfs2_glock_nq(fl_gh);
578 if (error) {
579 gfs2_holder_uninit(fl_gh);
580 if (error == GLR_TRYFAILED)
581 error = -EAGAIN;
582 } else {
583 error = flock_lock_file_wait(file, fl);
584 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
585 }
586
587out:
588 mutex_unlock(&fp->f_fl_mutex);
589 return error;
590}
591
592static void do_unflock(struct file *file, struct file_lock *fl)
593{
594 struct gfs2_file *fp = file->private_data;
595 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
596
597 mutex_lock(&fp->f_fl_mutex);
598 flock_lock_file_wait(file, fl);
599 if (fl_gh->gh_gl)
600 gfs2_glock_dq_uninit(fl_gh);
601 mutex_unlock(&fp->f_fl_mutex);
602}
603
604/**
605 * gfs2_flock - acquire/release a flock lock on a file
606 * @file: the file pointer
607 * @cmd: either modify or retrieve lock state, possibly wait
608 * @fl: type and range of lock
609 *
610 * Returns: errno
611 */
612
613static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
614{
615 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
616 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
617
618 if (!(fl->fl_flags & FL_FLOCK))
619 return -ENOLCK;
620 if ((ip->i_di.di_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
621 return -ENOLCK;
622
623 if (sdp->sd_args.ar_localflocks)
624 return flock_lock_file_wait(file, fl);
625
626 if (fl->fl_type == F_UNLCK) {
627 do_unflock(file, fl);
628 return 0;
629 } else {
630 return do_flock(file, cmd, fl);
631 }
632}
633
634const struct file_operations gfs2_file_fops = {
635 .llseek = gfs2_llseek,
636 .read = do_sync_read,
637 .aio_read = generic_file_aio_read,
638 .write = do_sync_write,
639 .aio_write = generic_file_aio_write,
640 .unlocked_ioctl = gfs2_ioctl,
641 .mmap = gfs2_mmap,
642 .open = gfs2_open,
643 .release = gfs2_close,
644 .fsync = gfs2_fsync,
645 .lock = gfs2_lock,
646 .sendfile = generic_file_sendfile,
647 .flock = gfs2_flock,
648 .splice_read = generic_file_splice_read,
649 .splice_write = generic_file_splice_write,
650};
651
652const struct file_operations gfs2_dir_fops = {
653 .readdir = gfs2_readdir,
654 .unlocked_ioctl = gfs2_ioctl,
655 .open = gfs2_open,
656 .release = gfs2_close,
657 .fsync = gfs2_fsync,
658 .lock = gfs2_lock,
659 .flock = gfs2_flock,
660};
661
diff --git a/fs/gfs2/ops_file.h b/fs/gfs2/ops_file.h
new file mode 100644
index 000000000000..ce319f89ec8e
--- /dev/null
+++ b/fs/gfs2/ops_file.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_FILE_DOT_H__
11#define __OPS_FILE_DOT_H__
12
13#include <linux/fs.h>
14struct gfs2_inode;
15
16extern struct file gfs2_internal_file_sentinel;
17extern int gfs2_internal_read(struct gfs2_inode *ip,
18 struct file_ra_state *ra_state,
19 char *buf, loff_t *pos, unsigned size);
20
21extern const struct file_operations gfs2_file_fops;
22extern const struct file_operations gfs2_dir_fops;
23
24#endif /* __OPS_FILE_DOT_H__ */
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
new file mode 100644
index 000000000000..178b33911843
--- /dev/null
+++ b/fs/gfs2/ops_fstype.c
@@ -0,0 +1,928 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/blkdev.h>
16#include <linux/kthread.h>
17#include <linux/namei.h>
18#include <linux/mount.h>
19#include <linux/gfs2_ondisk.h>
20#include <linux/lm_interface.h>
21
22#include "gfs2.h"
23#include "incore.h"
24#include "daemon.h"
25#include "glock.h"
26#include "glops.h"
27#include "inode.h"
28#include "lm.h"
29#include "mount.h"
30#include "ops_export.h"
31#include "ops_fstype.h"
32#include "ops_super.h"
33#include "recovery.h"
34#include "rgrp.h"
35#include "super.h"
36#include "sys.h"
37#include "util.h"
38
39#define DO 0
40#define UNDO 1
41
42extern struct dentry_operations gfs2_dops;
43
44static struct gfs2_sbd *init_sbd(struct super_block *sb)
45{
46 struct gfs2_sbd *sdp;
47
48 sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
49 if (!sdp)
50 return NULL;
51
52 sb->s_fs_info = sdp;
53 sdp->sd_vfs = sb;
54
55 gfs2_tune_init(&sdp->sd_tune);
56
57 INIT_LIST_HEAD(&sdp->sd_reclaim_list);
58 spin_lock_init(&sdp->sd_reclaim_lock);
59 init_waitqueue_head(&sdp->sd_reclaim_wq);
60
61 mutex_init(&sdp->sd_inum_mutex);
62 spin_lock_init(&sdp->sd_statfs_spin);
63 mutex_init(&sdp->sd_statfs_mutex);
64
65 spin_lock_init(&sdp->sd_rindex_spin);
66 mutex_init(&sdp->sd_rindex_mutex);
67 INIT_LIST_HEAD(&sdp->sd_rindex_list);
68 INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
69 INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
70
71 INIT_LIST_HEAD(&sdp->sd_jindex_list);
72 spin_lock_init(&sdp->sd_jindex_spin);
73 mutex_init(&sdp->sd_jindex_mutex);
74
75 INIT_LIST_HEAD(&sdp->sd_quota_list);
76 spin_lock_init(&sdp->sd_quota_spin);
77 mutex_init(&sdp->sd_quota_mutex);
78
79 spin_lock_init(&sdp->sd_log_lock);
80
81 INIT_LIST_HEAD(&sdp->sd_log_le_gl);
82 INIT_LIST_HEAD(&sdp->sd_log_le_buf);
83 INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
84 INIT_LIST_HEAD(&sdp->sd_log_le_rg);
85 INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
86
87 mutex_init(&sdp->sd_log_reserve_mutex);
88 INIT_LIST_HEAD(&sdp->sd_ail1_list);
89 INIT_LIST_HEAD(&sdp->sd_ail2_list);
90
91 init_rwsem(&sdp->sd_log_flush_lock);
92 INIT_LIST_HEAD(&sdp->sd_log_flush_list);
93
94 INIT_LIST_HEAD(&sdp->sd_revoke_list);
95
96 mutex_init(&sdp->sd_freeze_lock);
97
98 return sdp;
99}
100
101static void init_vfs(struct super_block *sb, unsigned noatime)
102{
103 struct gfs2_sbd *sdp = sb->s_fs_info;
104
105 sb->s_magic = GFS2_MAGIC;
106 sb->s_op = &gfs2_super_ops;
107 sb->s_export_op = &gfs2_export_ops;
108 sb->s_maxbytes = MAX_LFS_FILESIZE;
109
110 if (sb->s_flags & (MS_NOATIME | MS_NODIRATIME))
111 set_bit(noatime, &sdp->sd_flags);
112
113 /* Don't let the VFS update atimes. GFS2 handles this itself. */
114 sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
115}
116
117static int init_names(struct gfs2_sbd *sdp, int silent)
118{
119 struct page *page;
120 char *proto, *table;
121 int error = 0;
122
123 proto = sdp->sd_args.ar_lockproto;
124 table = sdp->sd_args.ar_locktable;
125
126 /* Try to autodetect */
127
128 if (!proto[0] || !table[0]) {
129 struct gfs2_sb *sb;
130 page = gfs2_read_super(sdp->sd_vfs, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
131 if (!page)
132 return -ENOBUFS;
133 sb = kmap(page);
134 gfs2_sb_in(&sdp->sd_sb, sb);
135 kunmap(page);
136 __free_page(page);
137
138 error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
139 if (error)
140 goto out;
141
142 if (!proto[0])
143 proto = sdp->sd_sb.sb_lockproto;
144 if (!table[0])
145 table = sdp->sd_sb.sb_locktable;
146 }
147
148 if (!table[0])
149 table = sdp->sd_vfs->s_id;
150
151 snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
152 snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
153
154out:
155 return error;
156}
157
158static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
159 int undo)
160{
161 struct task_struct *p;
162 int error = 0;
163
164 if (undo)
165 goto fail_trans;
166
167 p = kthread_run(gfs2_scand, sdp, "gfs2_scand");
168 error = IS_ERR(p);
169 if (error) {
170 fs_err(sdp, "can't start scand thread: %d\n", error);
171 return error;
172 }
173 sdp->sd_scand_process = p;
174
175 for (sdp->sd_glockd_num = 0;
176 sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
177 sdp->sd_glockd_num++) {
178 p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd");
179 error = IS_ERR(p);
180 if (error) {
181 fs_err(sdp, "can't start glockd thread: %d\n", error);
182 goto fail;
183 }
184 sdp->sd_glockd_process[sdp->sd_glockd_num] = p;
185 }
186
187 error = gfs2_glock_nq_num(sdp,
188 GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
189 LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
190 mount_gh);
191 if (error) {
192 fs_err(sdp, "can't acquire mount glock: %d\n", error);
193 goto fail;
194 }
195
196 error = gfs2_glock_nq_num(sdp,
197 GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
198 LM_ST_SHARED,
199 LM_FLAG_NOEXP | GL_EXACT,
200 &sdp->sd_live_gh);
201 if (error) {
202 fs_err(sdp, "can't acquire live glock: %d\n", error);
203 goto fail_mount;
204 }
205
206 error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
207 CREATE, &sdp->sd_rename_gl);
208 if (error) {
209 fs_err(sdp, "can't create rename glock: %d\n", error);
210 goto fail_live;
211 }
212
213 error = gfs2_glock_get(sdp, GFS2_TRANS_LOCK, &gfs2_trans_glops,
214 CREATE, &sdp->sd_trans_gl);
215 if (error) {
216 fs_err(sdp, "can't create transaction glock: %d\n", error);
217 goto fail_rename;
218 }
219 set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags);
220
221 return 0;
222
223fail_trans:
224 gfs2_glock_put(sdp->sd_trans_gl);
225fail_rename:
226 gfs2_glock_put(sdp->sd_rename_gl);
227fail_live:
228 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
229fail_mount:
230 gfs2_glock_dq_uninit(mount_gh);
231fail:
232 while (sdp->sd_glockd_num--)
233 kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
234
235 kthread_stop(sdp->sd_scand_process);
236 return error;
237}
238
239static struct inode *gfs2_lookup_root(struct super_block *sb,
240 struct gfs2_inum *inum)
241{
242 return gfs2_inode_lookup(sb, inum, DT_DIR);
243}
244
245static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
246{
247 struct super_block *sb = sdp->sd_vfs;
248 struct gfs2_holder sb_gh;
249 struct gfs2_inum *inum;
250 struct inode *inode;
251 int error = 0;
252
253 if (undo) {
254 if (sb->s_root) {
255 dput(sb->s_root);
256 sb->s_root = NULL;
257 }
258 return 0;
259 }
260
261 error = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
262 LM_ST_SHARED, 0, &sb_gh);
263 if (error) {
264 fs_err(sdp, "can't acquire superblock glock: %d\n", error);
265 return error;
266 }
267
268 error = gfs2_read_sb(sdp, sb_gh.gh_gl, silent);
269 if (error) {
270 fs_err(sdp, "can't read superblock: %d\n", error);
271 goto out;
272 }
273
274 /* Set up the buffer cache and SB for real */
275 if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
276 error = -EINVAL;
277 fs_err(sdp, "FS block size (%u) is too small for device "
278 "block size (%u)\n",
279 sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
280 goto out;
281 }
282 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
283 error = -EINVAL;
284 fs_err(sdp, "FS block size (%u) is too big for machine "
285 "page size (%u)\n",
286 sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
287 goto out;
288 }
289 sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
290
291 /* Get the root inode */
292 inum = &sdp->sd_sb.sb_root_dir;
293 if (sb->s_type == &gfs2meta_fs_type)
294 inum = &sdp->sd_sb.sb_master_dir;
295 inode = gfs2_lookup_root(sb, inum);
296 if (IS_ERR(inode)) {
297 error = PTR_ERR(inode);
298 fs_err(sdp, "can't read in root inode: %d\n", error);
299 goto out;
300 }
301
302 sb->s_root = d_alloc_root(inode);
303 if (!sb->s_root) {
304 fs_err(sdp, "can't get root dentry\n");
305 error = -ENOMEM;
306 iput(inode);
307 }
308 sb->s_root->d_op = &gfs2_dops;
309out:
310 gfs2_glock_dq_uninit(&sb_gh);
311 return error;
312}
313
314static int init_journal(struct gfs2_sbd *sdp, int undo)
315{
316 struct gfs2_holder ji_gh;
317 struct task_struct *p;
318 struct gfs2_inode *ip;
319 int jindex = 1;
320 int error = 0;
321
322 if (undo) {
323 jindex = 0;
324 goto fail_recoverd;
325 }
326
327 sdp->sd_jindex = gfs2_lookup_simple(sdp->sd_master_dir, "jindex");
328 if (IS_ERR(sdp->sd_jindex)) {
329 fs_err(sdp, "can't lookup journal index: %d\n", error);
330 return PTR_ERR(sdp->sd_jindex);
331 }
332 ip = GFS2_I(sdp->sd_jindex);
333 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
334
335 /* Load in the journal index special file */
336
337 error = gfs2_jindex_hold(sdp, &ji_gh);
338 if (error) {
339 fs_err(sdp, "can't read journal index: %d\n", error);
340 goto fail;
341 }
342
343 error = -EINVAL;
344 if (!gfs2_jindex_size(sdp)) {
345 fs_err(sdp, "no journals!\n");
346 goto fail_jindex;
347 }
348
349 if (sdp->sd_args.ar_spectator) {
350 sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
351 sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
352 } else {
353 if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
354 fs_err(sdp, "can't mount journal #%u\n",
355 sdp->sd_lockstruct.ls_jid);
356 fs_err(sdp, "there are only %u journals (0 - %u)\n",
357 gfs2_jindex_size(sdp),
358 gfs2_jindex_size(sdp) - 1);
359 goto fail_jindex;
360 }
361 sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
362
363 error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
364 &gfs2_journal_glops,
365 LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
366 &sdp->sd_journal_gh);
367 if (error) {
368 fs_err(sdp, "can't acquire journal glock: %d\n", error);
369 goto fail_jindex;
370 }
371
372 ip = GFS2_I(sdp->sd_jdesc->jd_inode);
373 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
374 LM_FLAG_NOEXP | GL_EXACT,
375 &sdp->sd_jinode_gh);
376 if (error) {
377 fs_err(sdp, "can't acquire journal inode glock: %d\n",
378 error);
379 goto fail_journal_gh;
380 }
381
382 error = gfs2_jdesc_check(sdp->sd_jdesc);
383 if (error) {
384 fs_err(sdp, "my journal (%u) is bad: %d\n",
385 sdp->sd_jdesc->jd_jid, error);
386 goto fail_jinode_gh;
387 }
388 sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
389 }
390
391 if (sdp->sd_lockstruct.ls_first) {
392 unsigned int x;
393 for (x = 0; x < sdp->sd_journals; x++) {
394 error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x));
395 if (error) {
396 fs_err(sdp, "error recovering journal %u: %d\n",
397 x, error);
398 goto fail_jinode_gh;
399 }
400 }
401
402 gfs2_lm_others_may_mount(sdp);
403 } else if (!sdp->sd_args.ar_spectator) {
404 error = gfs2_recover_journal(sdp->sd_jdesc);
405 if (error) {
406 fs_err(sdp, "error recovering my journal: %d\n", error);
407 goto fail_jinode_gh;
408 }
409 }
410
411 set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
412 gfs2_glock_dq_uninit(&ji_gh);
413 jindex = 0;
414
415 p = kthread_run(gfs2_recoverd, sdp, "gfs2_recoverd");
416 error = IS_ERR(p);
417 if (error) {
418 fs_err(sdp, "can't start recoverd thread: %d\n", error);
419 goto fail_jinode_gh;
420 }
421 sdp->sd_recoverd_process = p;
422
423 return 0;
424
425fail_recoverd:
426 kthread_stop(sdp->sd_recoverd_process);
427fail_jinode_gh:
428 if (!sdp->sd_args.ar_spectator)
429 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
430fail_journal_gh:
431 if (!sdp->sd_args.ar_spectator)
432 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
433fail_jindex:
434 gfs2_jindex_free(sdp);
435 if (jindex)
436 gfs2_glock_dq_uninit(&ji_gh);
437fail:
438 iput(sdp->sd_jindex);
439 return error;
440}
441
442
443static int init_inodes(struct gfs2_sbd *sdp, int undo)
444{
445 int error = 0;
446 struct gfs2_inode *ip;
447 struct inode *inode;
448
449 if (undo)
450 goto fail_qinode;
451
452 inode = gfs2_lookup_root(sdp->sd_vfs, &sdp->sd_sb.sb_master_dir);
453 if (IS_ERR(inode)) {
454 error = PTR_ERR(inode);
455 fs_err(sdp, "can't read in master directory: %d\n", error);
456 goto fail;
457 }
458 sdp->sd_master_dir = inode;
459
460 error = init_journal(sdp, undo);
461 if (error)
462 goto fail_master;
463
464 /* Read in the master inode number inode */
465 sdp->sd_inum_inode = gfs2_lookup_simple(sdp->sd_master_dir, "inum");
466 if (IS_ERR(sdp->sd_inum_inode)) {
467 error = PTR_ERR(sdp->sd_inum_inode);
468 fs_err(sdp, "can't read in inum inode: %d\n", error);
469 goto fail_journal;
470 }
471
472
473 /* Read in the master statfs inode */
474 sdp->sd_statfs_inode = gfs2_lookup_simple(sdp->sd_master_dir, "statfs");
475 if (IS_ERR(sdp->sd_statfs_inode)) {
476 error = PTR_ERR(sdp->sd_statfs_inode);
477 fs_err(sdp, "can't read in statfs inode: %d\n", error);
478 goto fail_inum;
479 }
480
481 /* Read in the resource index inode */
482 sdp->sd_rindex = gfs2_lookup_simple(sdp->sd_master_dir, "rindex");
483 if (IS_ERR(sdp->sd_rindex)) {
484 error = PTR_ERR(sdp->sd_rindex);
485 fs_err(sdp, "can't get resource index inode: %d\n", error);
486 goto fail_statfs;
487 }
488 ip = GFS2_I(sdp->sd_rindex);
489 set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
490 sdp->sd_rindex_vn = ip->i_gl->gl_vn - 1;
491
492 /* Read in the quota inode */
493 sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota");
494 if (IS_ERR(sdp->sd_quota_inode)) {
495 error = PTR_ERR(sdp->sd_quota_inode);
496 fs_err(sdp, "can't get quota file inode: %d\n", error);
497 goto fail_rindex;
498 }
499 return 0;
500
501fail_qinode:
502 iput(sdp->sd_quota_inode);
503fail_rindex:
504 gfs2_clear_rgrpd(sdp);
505 iput(sdp->sd_rindex);
506fail_statfs:
507 iput(sdp->sd_statfs_inode);
508fail_inum:
509 iput(sdp->sd_inum_inode);
510fail_journal:
511 init_journal(sdp, UNDO);
512fail_master:
513 iput(sdp->sd_master_dir);
514fail:
515 return error;
516}
517
518static int init_per_node(struct gfs2_sbd *sdp, int undo)
519{
520 struct inode *pn = NULL;
521 char buf[30];
522 int error = 0;
523 struct gfs2_inode *ip;
524
525 if (sdp->sd_args.ar_spectator)
526 return 0;
527
528 if (undo)
529 goto fail_qc_gh;
530
531 pn = gfs2_lookup_simple(sdp->sd_master_dir, "per_node");
532 if (IS_ERR(pn)) {
533 error = PTR_ERR(pn);
534 fs_err(sdp, "can't find per_node directory: %d\n", error);
535 return error;
536 }
537
538 sprintf(buf, "inum_range%u", sdp->sd_jdesc->jd_jid);
539 sdp->sd_ir_inode = gfs2_lookup_simple(pn, buf);
540 if (IS_ERR(sdp->sd_ir_inode)) {
541 error = PTR_ERR(sdp->sd_ir_inode);
542 fs_err(sdp, "can't find local \"ir\" file: %d\n", error);
543 goto fail;
544 }
545
546 sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
547 sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
548 if (IS_ERR(sdp->sd_sc_inode)) {
549 error = PTR_ERR(sdp->sd_sc_inode);
550 fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
551 goto fail_ir_i;
552 }
553
554 sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
555 sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
556 if (IS_ERR(sdp->sd_qc_inode)) {
557 error = PTR_ERR(sdp->sd_qc_inode);
558 fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
559 goto fail_ut_i;
560 }
561
562 iput(pn);
563 pn = NULL;
564
565 ip = GFS2_I(sdp->sd_ir_inode);
566 error = gfs2_glock_nq_init(ip->i_gl,
567 LM_ST_EXCLUSIVE, 0,
568 &sdp->sd_ir_gh);
569 if (error) {
570 fs_err(sdp, "can't lock local \"ir\" file: %d\n", error);
571 goto fail_qc_i;
572 }
573
574 ip = GFS2_I(sdp->sd_sc_inode);
575 error = gfs2_glock_nq_init(ip->i_gl,
576 LM_ST_EXCLUSIVE, 0,
577 &sdp->sd_sc_gh);
578 if (error) {
579 fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
580 goto fail_ir_gh;
581 }
582
583 ip = GFS2_I(sdp->sd_qc_inode);
584 error = gfs2_glock_nq_init(ip->i_gl,
585 LM_ST_EXCLUSIVE, 0,
586 &sdp->sd_qc_gh);
587 if (error) {
588 fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
589 goto fail_ut_gh;
590 }
591
592 return 0;
593
594fail_qc_gh:
595 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
596fail_ut_gh:
597 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
598fail_ir_gh:
599 gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
600fail_qc_i:
601 iput(sdp->sd_qc_inode);
602fail_ut_i:
603 iput(sdp->sd_sc_inode);
604fail_ir_i:
605 iput(sdp->sd_ir_inode);
606fail:
607 if (pn)
608 iput(pn);
609 return error;
610}
611
612static int init_threads(struct gfs2_sbd *sdp, int undo)
613{
614 struct task_struct *p;
615 int error = 0;
616
617 if (undo)
618 goto fail_quotad;
619
620 sdp->sd_log_flush_time = jiffies;
621 sdp->sd_jindex_refresh_time = jiffies;
622
623 p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
624 error = IS_ERR(p);
625 if (error) {
626 fs_err(sdp, "can't start logd thread: %d\n", error);
627 return error;
628 }
629 sdp->sd_logd_process = p;
630
631 sdp->sd_statfs_sync_time = jiffies;
632 sdp->sd_quota_sync_time = jiffies;
633
634 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
635 error = IS_ERR(p);
636 if (error) {
637 fs_err(sdp, "can't start quotad thread: %d\n", error);
638 goto fail;
639 }
640 sdp->sd_quotad_process = p;
641
642 return 0;
643
644
645fail_quotad:
646 kthread_stop(sdp->sd_quotad_process);
647fail:
648 kthread_stop(sdp->sd_logd_process);
649 return error;
650}
651
652/**
653 * fill_super - Read in superblock
654 * @sb: The VFS superblock
655 * @data: Mount options
656 * @silent: Don't complain if it's not a GFS2 filesystem
657 *
658 * Returns: errno
659 */
660
661static int fill_super(struct super_block *sb, void *data, int silent)
662{
663 struct gfs2_sbd *sdp;
664 struct gfs2_holder mount_gh;
665 int error;
666
667 sdp = init_sbd(sb);
668 if (!sdp) {
669 printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
670 return -ENOMEM;
671 }
672
673 error = gfs2_mount_args(sdp, (char *)data, 0);
674 if (error) {
675 printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
676 goto fail;
677 }
678
679 init_vfs(sb, SDF_NOATIME);
680
681 /* Set up the buffer cache and fill in some fake block size values
682 to allow us to read-in the on-disk superblock. */
683 sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
684 sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
685 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
686 GFS2_BASIC_BLOCK_SHIFT;
687 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
688
689 error = init_names(sdp, silent);
690 if (error)
691 goto fail;
692
693 error = gfs2_sys_fs_add(sdp);
694 if (error)
695 goto fail;
696
697 error = gfs2_lm_mount(sdp, silent);
698 if (error)
699 goto fail_sys;
700
701 error = init_locking(sdp, &mount_gh, DO);
702 if (error)
703 goto fail_lm;
704
705 error = init_sb(sdp, silent, DO);
706 if (error)
707 goto fail_locking;
708
709 error = init_inodes(sdp, DO);
710 if (error)
711 goto fail_sb;
712
713 error = init_per_node(sdp, DO);
714 if (error)
715 goto fail_inodes;
716
717 error = gfs2_statfs_init(sdp);
718 if (error) {
719 fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
720 goto fail_per_node;
721 }
722
723 error = init_threads(sdp, DO);
724 if (error)
725 goto fail_per_node;
726
727 if (!(sb->s_flags & MS_RDONLY)) {
728 error = gfs2_make_fs_rw(sdp);
729 if (error) {
730 fs_err(sdp, "can't make FS RW: %d\n", error);
731 goto fail_threads;
732 }
733 }
734
735 gfs2_glock_dq_uninit(&mount_gh);
736
737 return 0;
738
739fail_threads:
740 init_threads(sdp, UNDO);
741fail_per_node:
742 init_per_node(sdp, UNDO);
743fail_inodes:
744 init_inodes(sdp, UNDO);
745fail_sb:
746 init_sb(sdp, 0, UNDO);
747fail_locking:
748 init_locking(sdp, &mount_gh, UNDO);
749fail_lm:
750 gfs2_gl_hash_clear(sdp, WAIT);
751 gfs2_lm_unmount(sdp);
752 while (invalidate_inodes(sb))
753 yield();
754fail_sys:
755 gfs2_sys_fs_del(sdp);
756fail:
757 kfree(sdp);
758 sb->s_fs_info = NULL;
759 return error;
760}
761
762static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
763 const char *dev_name, void *data, struct vfsmount *mnt)
764{
765 struct super_block *sb;
766 struct gfs2_sbd *sdp;
767 int error = get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
768 if (error)
769 goto out;
770 sb = mnt->mnt_sb;
771 sdp = sb->s_fs_info;
772 sdp->sd_gfs2mnt = mnt;
773out:
774 return error;
775}
776
777static int fill_super_meta(struct super_block *sb, struct super_block *new,
778 void *data, int silent)
779{
780 struct gfs2_sbd *sdp = sb->s_fs_info;
781 struct inode *inode;
782 int error = 0;
783
784 new->s_fs_info = sdp;
785 sdp->sd_vfs_meta = sb;
786
787 init_vfs(new, SDF_NOATIME);
788
789 /* Get the master inode */
790 inode = igrab(sdp->sd_master_dir);
791
792 new->s_root = d_alloc_root(inode);
793 if (!new->s_root) {
794 fs_err(sdp, "can't get root dentry\n");
795 error = -ENOMEM;
796 iput(inode);
797 }
798 new->s_root->d_op = &gfs2_dops;
799
800 return error;
801}
802
803static int set_bdev_super(struct super_block *s, void *data)
804{
805 s->s_bdev = data;
806 s->s_dev = s->s_bdev->bd_dev;
807 return 0;
808}
809
810static int test_bdev_super(struct super_block *s, void *data)
811{
812 return s->s_bdev == data;
813}
814
815static struct super_block* get_gfs2_sb(const char *dev_name)
816{
817 struct kstat stat;
818 struct nameidata nd;
819 struct file_system_type *fstype;
820 struct super_block *sb = NULL, *s;
821 struct list_head *l;
822 int error;
823
824 error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
825 if (error) {
826 printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n",
827 dev_name);
828 goto out;
829 }
830 error = vfs_getattr(nd.mnt, nd.dentry, &stat);
831
832 fstype = get_fs_type("gfs2");
833 list_for_each(l, &fstype->fs_supers) {
834 s = list_entry(l, struct super_block, s_instances);
835 if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
836 (S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
837 sb = s;
838 goto free_nd;
839 }
840 }
841
842 printk(KERN_WARNING "GFS2: Unrecognized block device or "
843 "mount point %s", dev_name);
844
845free_nd:
846 path_release(&nd);
847out:
848 return sb;
849}
850
851static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
852 const char *dev_name, void *data, struct vfsmount *mnt)
853{
854 int error = 0;
855 struct super_block *sb = NULL, *new;
856 struct gfs2_sbd *sdp;
857 char *gfs2mnt = NULL;
858
859 sb = get_gfs2_sb(dev_name);
860 if (!sb) {
861 printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
862 error = -ENOENT;
863 goto error;
864 }
865 sdp = (struct gfs2_sbd*) sb->s_fs_info;
866 if (sdp->sd_vfs_meta) {
867 printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n");
868 error = -EBUSY;
869 goto error;
870 }
871 mutex_lock(&sb->s_bdev->bd_mount_mutex);
872 new = sget(fs_type, test_bdev_super, set_bdev_super, sb->s_bdev);
873 mutex_unlock(&sb->s_bdev->bd_mount_mutex);
874 if (IS_ERR(new)) {
875 error = PTR_ERR(new);
876 goto error;
877 }
878 module_put(fs_type->owner);
879 new->s_flags = flags;
880 strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
881 sb_set_blocksize(new, sb->s_blocksize);
882 error = fill_super_meta(sb, new, data, flags & MS_SILENT ? 1 : 0);
883 if (error) {
884 up_write(&new->s_umount);
885 deactivate_super(new);
886 goto error;
887 }
888
889 new->s_flags |= MS_ACTIVE;
890
891 /* Grab a reference to the gfs2 mount point */
892 atomic_inc(&sdp->sd_gfs2mnt->mnt_count);
893 return simple_set_mnt(mnt, new);
894error:
895 if (gfs2mnt)
896 kfree(gfs2mnt);
897 return error;
898}
899
900static void gfs2_kill_sb(struct super_block *sb)
901{
902 kill_block_super(sb);
903}
904
905static void gfs2_kill_sb_meta(struct super_block *sb)
906{
907 struct gfs2_sbd *sdp = sb->s_fs_info;
908 generic_shutdown_super(sb);
909 sdp->sd_vfs_meta = NULL;
910 atomic_dec(&sdp->sd_gfs2mnt->mnt_count);
911}
912
913struct file_system_type gfs2_fs_type = {
914 .name = "gfs2",
915 .fs_flags = FS_REQUIRES_DEV,
916 .get_sb = gfs2_get_sb,
917 .kill_sb = gfs2_kill_sb,
918 .owner = THIS_MODULE,
919};
920
921struct file_system_type gfs2meta_fs_type = {
922 .name = "gfs2meta",
923 .fs_flags = FS_REQUIRES_DEV,
924 .get_sb = gfs2_get_sb_meta,
925 .kill_sb = gfs2_kill_sb_meta,
926 .owner = THIS_MODULE,
927};
928
diff --git a/fs/gfs2/ops_fstype.h b/fs/gfs2/ops_fstype.h
new file mode 100644
index 000000000000..7cc2c296271b
--- /dev/null
+++ b/fs/gfs2/ops_fstype.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_FSTYPE_DOT_H__
11#define __OPS_FSTYPE_DOT_H__
12
13#include <linux/fs.h>
14
15extern struct file_system_type gfs2_fs_type;
16extern struct file_system_type gfs2meta_fs_type;
17
18#endif /* __OPS_FSTYPE_DOT_H__ */
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
new file mode 100644
index 000000000000..ef6e5ed70e94
--- /dev/null
+++ b/fs/gfs2/ops_inode.c
@@ -0,0 +1,1151 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/namei.h>
16#include <linux/utsname.h>
17#include <linux/mm.h>
18#include <linux/xattr.h>
19#include <linux/posix_acl.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/crc32.h>
22#include <linux/lm_interface.h>
23#include <asm/uaccess.h>
24
25#include "gfs2.h"
26#include "incore.h"
27#include "acl.h"
28#include "bmap.h"
29#include "dir.h"
30#include "eaops.h"
31#include "eattr.h"
32#include "glock.h"
33#include "inode.h"
34#include "meta_io.h"
35#include "ops_dentry.h"
36#include "ops_inode.h"
37#include "quota.h"
38#include "rgrp.h"
39#include "trans.h"
40#include "util.h"
41
42/**
43 * gfs2_create - Create a file
44 * @dir: The directory in which to create the file
45 * @dentry: The dentry of the new file
46 * @mode: The mode of the new file
47 *
48 * Returns: errno
49 */
50
51static int gfs2_create(struct inode *dir, struct dentry *dentry,
52 int mode, struct nameidata *nd)
53{
54 struct gfs2_inode *dip = GFS2_I(dir);
55 struct gfs2_sbd *sdp = GFS2_SB(dir);
56 struct gfs2_holder ghs[2];
57 struct inode *inode;
58
59 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
60
61 for (;;) {
62 inode = gfs2_createi(ghs, &dentry->d_name, S_IFREG | mode);
63 if (!IS_ERR(inode)) {
64 gfs2_trans_end(sdp);
65 if (dip->i_alloc.al_rgd)
66 gfs2_inplace_release(dip);
67 gfs2_quota_unlock(dip);
68 gfs2_alloc_put(dip);
69 gfs2_glock_dq_uninit_m(2, ghs);
70 mark_inode_dirty(inode);
71 break;
72 } else if (PTR_ERR(inode) != -EEXIST ||
73 (nd->intent.open.flags & O_EXCL)) {
74 gfs2_holder_uninit(ghs);
75 return PTR_ERR(inode);
76 }
77
78 inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd);
79 if (inode) {
80 if (!IS_ERR(inode)) {
81 gfs2_holder_uninit(ghs);
82 break;
83 } else {
84 gfs2_holder_uninit(ghs);
85 return PTR_ERR(inode);
86 }
87 }
88 }
89
90 d_instantiate(dentry, inode);
91
92 return 0;
93}
94
95/**
96 * gfs2_lookup - Look up a filename in a directory and return its inode
97 * @dir: The directory inode
98 * @dentry: The dentry of the new inode
99 * @nd: passed from Linux VFS, ignored by us
100 *
101 * Called by the VFS layer. Lock dir and call gfs2_lookupi()
102 *
103 * Returns: errno
104 */
105
106static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
107 struct nameidata *nd)
108{
109 struct inode *inode = NULL;
110
111 dentry->d_op = &gfs2_dops;
112
113 inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd);
114 if (inode && IS_ERR(inode))
115 return ERR_PTR(PTR_ERR(inode));
116
117 if (inode)
118 return d_splice_alias(inode, dentry);
119 d_add(dentry, inode);
120
121 return NULL;
122}
123
124/**
125 * gfs2_link - Link to a file
126 * @old_dentry: The inode to link
127 * @dir: Add link to this directory
128 * @dentry: The name of the link
129 *
130 * Link the inode in "old_dentry" into the directory "dir" with the
131 * name in "dentry".
132 *
133 * Returns: errno
134 */
135
136static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
137 struct dentry *dentry)
138{
139 struct gfs2_inode *dip = GFS2_I(dir);
140 struct gfs2_sbd *sdp = GFS2_SB(dir);
141 struct inode *inode = old_dentry->d_inode;
142 struct gfs2_inode *ip = GFS2_I(inode);
143 struct gfs2_holder ghs[2];
144 int alloc_required;
145 int error;
146
147 if (S_ISDIR(ip->i_di.di_mode))
148 return -EPERM;
149
150 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
151 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
152
153 error = gfs2_glock_nq_m(2, ghs);
154 if (error)
155 goto out;
156
157 error = permission(dir, MAY_WRITE | MAY_EXEC, NULL);
158 if (error)
159 goto out_gunlock;
160
161 error = gfs2_dir_search(dir, &dentry->d_name, NULL, NULL);
162 switch (error) {
163 case -ENOENT:
164 break;
165 case 0:
166 error = -EEXIST;
167 default:
168 goto out_gunlock;
169 }
170
171 error = -EINVAL;
172 if (!dip->i_di.di_nlink)
173 goto out_gunlock;
174 error = -EFBIG;
175 if (dip->i_di.di_entries == (u32)-1)
176 goto out_gunlock;
177 error = -EPERM;
178 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
179 goto out_gunlock;
180 error = -EINVAL;
181 if (!ip->i_di.di_nlink)
182 goto out_gunlock;
183 error = -EMLINK;
184 if (ip->i_di.di_nlink == (u32)-1)
185 goto out_gunlock;
186
187 alloc_required = error = gfs2_diradd_alloc_required(dir, &dentry->d_name);
188 if (error < 0)
189 goto out_gunlock;
190 error = 0;
191
192 if (alloc_required) {
193 struct gfs2_alloc *al = gfs2_alloc_get(dip);
194
195 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
196 if (error)
197 goto out_alloc;
198
199 error = gfs2_quota_check(dip, dip->i_di.di_uid,
200 dip->i_di.di_gid);
201 if (error)
202 goto out_gunlock_q;
203
204 al->al_requested = sdp->sd_max_dirres;
205
206 error = gfs2_inplace_reserve(dip);
207 if (error)
208 goto out_gunlock_q;
209
210 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
211 al->al_rgd->rd_ri.ri_length +
212 2 * RES_DINODE + RES_STATFS +
213 RES_QUOTA, 0);
214 if (error)
215 goto out_ipres;
216 } else {
217 error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0);
218 if (error)
219 goto out_ipres;
220 }
221
222 error = gfs2_dir_add(dir, &dentry->d_name, &ip->i_num,
223 IF2DT(ip->i_di.di_mode));
224 if (error)
225 goto out_end_trans;
226
227 error = gfs2_change_nlink(ip, +1);
228
229out_end_trans:
230 gfs2_trans_end(sdp);
231out_ipres:
232 if (alloc_required)
233 gfs2_inplace_release(dip);
234out_gunlock_q:
235 if (alloc_required)
236 gfs2_quota_unlock(dip);
237out_alloc:
238 if (alloc_required)
239 gfs2_alloc_put(dip);
240out_gunlock:
241 gfs2_glock_dq_m(2, ghs);
242out:
243 gfs2_holder_uninit(ghs);
244 gfs2_holder_uninit(ghs + 1);
245 if (!error) {
246 atomic_inc(&inode->i_count);
247 d_instantiate(dentry, inode);
248 mark_inode_dirty(inode);
249 }
250 return error;
251}
252
253/**
254 * gfs2_unlink - Unlink a file
255 * @dir: The inode of the directory containing the file to unlink
256 * @dentry: The file itself
257 *
258 * Unlink a file. Call gfs2_unlinki()
259 *
260 * Returns: errno
261 */
262
263static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
264{
265 struct gfs2_inode *dip = GFS2_I(dir);
266 struct gfs2_sbd *sdp = GFS2_SB(dir);
267 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
268 struct gfs2_holder ghs[2];
269 int error;
270
271 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
272 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
273
274 error = gfs2_glock_nq_m(2, ghs);
275 if (error)
276 goto out;
277
278 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
279 if (error)
280 goto out_gunlock;
281
282 error = gfs2_trans_begin(sdp, 2*RES_DINODE + RES_LEAF + RES_RG_BIT, 0);
283 if (error)
284 goto out_gunlock;
285
286 error = gfs2_dir_del(dip, &dentry->d_name);
287 if (error)
288 goto out_end_trans;
289
290 error = gfs2_change_nlink(ip, -1);
291
292out_end_trans:
293 gfs2_trans_end(sdp);
294out_gunlock:
295 gfs2_glock_dq_m(2, ghs);
296out:
297 gfs2_holder_uninit(ghs);
298 gfs2_holder_uninit(ghs + 1);
299 return error;
300}
301
302/**
303 * gfs2_symlink - Create a symlink
304 * @dir: The directory to create the symlink in
305 * @dentry: The dentry to put the symlink in
306 * @symname: The thing which the link points to
307 *
308 * Returns: errno
309 */
310
311static int gfs2_symlink(struct inode *dir, struct dentry *dentry,
312 const char *symname)
313{
314 struct gfs2_inode *dip = GFS2_I(dir), *ip;
315 struct gfs2_sbd *sdp = GFS2_SB(dir);
316 struct gfs2_holder ghs[2];
317 struct inode *inode;
318 struct buffer_head *dibh;
319 int size;
320 int error;
321
322 /* Must be stuffed with a null terminator for gfs2_follow_link() */
323 size = strlen(symname);
324 if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1)
325 return -ENAMETOOLONG;
326
327 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
328
329 inode = gfs2_createi(ghs, &dentry->d_name, S_IFLNK | S_IRWXUGO);
330 if (IS_ERR(inode)) {
331 gfs2_holder_uninit(ghs);
332 return PTR_ERR(inode);
333 }
334
335 ip = ghs[1].gh_gl->gl_object;
336
337 ip->i_di.di_size = size;
338
339 error = gfs2_meta_inode_buffer(ip, &dibh);
340
341 if (!gfs2_assert_withdraw(sdp, !error)) {
342 gfs2_dinode_out(&ip->i_di, dibh->b_data);
343 memcpy(dibh->b_data + sizeof(struct gfs2_dinode), symname,
344 size);
345 brelse(dibh);
346 }
347
348 gfs2_trans_end(sdp);
349 if (dip->i_alloc.al_rgd)
350 gfs2_inplace_release(dip);
351 gfs2_quota_unlock(dip);
352 gfs2_alloc_put(dip);
353
354 gfs2_glock_dq_uninit_m(2, ghs);
355
356 d_instantiate(dentry, inode);
357 mark_inode_dirty(inode);
358
359 return 0;
360}
361
362/**
363 * gfs2_mkdir - Make a directory
364 * @dir: The parent directory of the new one
365 * @dentry: The dentry of the new directory
366 * @mode: The mode of the new directory
367 *
368 * Returns: errno
369 */
370
371static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode)
372{
373 struct gfs2_inode *dip = GFS2_I(dir), *ip;
374 struct gfs2_sbd *sdp = GFS2_SB(dir);
375 struct gfs2_holder ghs[2];
376 struct inode *inode;
377 struct buffer_head *dibh;
378 int error;
379
380 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
381
382 inode = gfs2_createi(ghs, &dentry->d_name, S_IFDIR | mode);
383 if (IS_ERR(inode)) {
384 gfs2_holder_uninit(ghs);
385 return PTR_ERR(inode);
386 }
387
388 ip = ghs[1].gh_gl->gl_object;
389
390 ip->i_di.di_nlink = 2;
391 ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
392 ip->i_di.di_flags |= GFS2_DIF_JDATA;
393 ip->i_di.di_payload_format = GFS2_FORMAT_DE;
394 ip->i_di.di_entries = 2;
395
396 error = gfs2_meta_inode_buffer(ip, &dibh);
397
398 if (!gfs2_assert_withdraw(sdp, !error)) {
399 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
400 struct gfs2_dirent *dent = (struct gfs2_dirent *)(di+1);
401 struct qstr str;
402
403 gfs2_str2qstr(&str, ".");
404 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
405 gfs2_qstr2dirent(&str, GFS2_DIRENT_SIZE(str.len), dent);
406 dent->de_inum = di->di_num; /* already GFS2 endian */
407 dent->de_type = cpu_to_be16(DT_DIR);
408 di->di_entries = cpu_to_be32(1);
409
410 gfs2_str2qstr(&str, "..");
411 dent = (struct gfs2_dirent *)((char*)dent + GFS2_DIRENT_SIZE(1));
412 gfs2_qstr2dirent(&str, dibh->b_size - GFS2_DIRENT_SIZE(1) - sizeof(struct gfs2_dinode), dent);
413
414 gfs2_inum_out(&dip->i_num, &dent->de_inum);
415 dent->de_type = cpu_to_be16(DT_DIR);
416
417 gfs2_dinode_out(&ip->i_di, di);
418
419 brelse(dibh);
420 }
421
422 error = gfs2_change_nlink(dip, +1);
423 gfs2_assert_withdraw(sdp, !error); /* dip already pinned */
424
425 gfs2_trans_end(sdp);
426 if (dip->i_alloc.al_rgd)
427 gfs2_inplace_release(dip);
428 gfs2_quota_unlock(dip);
429 gfs2_alloc_put(dip);
430
431 gfs2_glock_dq_uninit_m(2, ghs);
432
433 d_instantiate(dentry, inode);
434 mark_inode_dirty(inode);
435
436 return 0;
437}
438
439/**
440 * gfs2_rmdir - Remove a directory
441 * @dir: The parent directory of the directory to be removed
442 * @dentry: The dentry of the directory to remove
443 *
444 * Remove a directory. Call gfs2_rmdiri()
445 *
446 * Returns: errno
447 */
448
449static int gfs2_rmdir(struct inode *dir, struct dentry *dentry)
450{
451 struct gfs2_inode *dip = GFS2_I(dir);
452 struct gfs2_sbd *sdp = GFS2_SB(dir);
453 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
454 struct gfs2_holder ghs[2];
455 int error;
456
457 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
458 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
459
460 error = gfs2_glock_nq_m(2, ghs);
461 if (error)
462 goto out;
463
464 error = gfs2_unlink_ok(dip, &dentry->d_name, ip);
465 if (error)
466 goto out_gunlock;
467
468 if (ip->i_di.di_entries < 2) {
469 if (gfs2_consist_inode(ip))
470 gfs2_dinode_print(&ip->i_di);
471 error = -EIO;
472 goto out_gunlock;
473 }
474 if (ip->i_di.di_entries > 2) {
475 error = -ENOTEMPTY;
476 goto out_gunlock;
477 }
478
479 error = gfs2_trans_begin(sdp, 2 * RES_DINODE + 3 * RES_LEAF + RES_RG_BIT, 0);
480 if (error)
481 goto out_gunlock;
482
483 error = gfs2_rmdiri(dip, &dentry->d_name, ip);
484
485 gfs2_trans_end(sdp);
486
487out_gunlock:
488 gfs2_glock_dq_m(2, ghs);
489out:
490 gfs2_holder_uninit(ghs);
491 gfs2_holder_uninit(ghs + 1);
492 return error;
493}
494
495/**
496 * gfs2_mknod - Make a special file
497 * @dir: The directory in which the special file will reside
498 * @dentry: The dentry of the special file
499 * @mode: The mode of the special file
500 * @rdev: The device specification of the special file
501 *
502 */
503
504static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode,
505 dev_t dev)
506{
507 struct gfs2_inode *dip = GFS2_I(dir), *ip;
508 struct gfs2_sbd *sdp = GFS2_SB(dir);
509 struct gfs2_holder ghs[2];
510 struct inode *inode;
511 struct buffer_head *dibh;
512 u32 major = 0, minor = 0;
513 int error;
514
515 switch (mode & S_IFMT) {
516 case S_IFBLK:
517 case S_IFCHR:
518 major = MAJOR(dev);
519 minor = MINOR(dev);
520 break;
521 case S_IFIFO:
522 case S_IFSOCK:
523 break;
524 default:
525 return -EOPNOTSUPP;
526 };
527
528 gfs2_holder_init(dip->i_gl, 0, 0, ghs);
529
530 inode = gfs2_createi(ghs, &dentry->d_name, mode);
531 if (IS_ERR(inode)) {
532 gfs2_holder_uninit(ghs);
533 return PTR_ERR(inode);
534 }
535
536 ip = ghs[1].gh_gl->gl_object;
537
538 ip->i_di.di_major = major;
539 ip->i_di.di_minor = minor;
540
541 error = gfs2_meta_inode_buffer(ip, &dibh);
542
543 if (!gfs2_assert_withdraw(sdp, !error)) {
544 gfs2_dinode_out(&ip->i_di, dibh->b_data);
545 brelse(dibh);
546 }
547
548 gfs2_trans_end(sdp);
549 if (dip->i_alloc.al_rgd)
550 gfs2_inplace_release(dip);
551 gfs2_quota_unlock(dip);
552 gfs2_alloc_put(dip);
553
554 gfs2_glock_dq_uninit_m(2, ghs);
555
556 d_instantiate(dentry, inode);
557 mark_inode_dirty(inode);
558
559 return 0;
560}
561
562/**
563 * gfs2_rename - Rename a file
564 * @odir: Parent directory of old file name
565 * @odentry: The old dentry of the file
566 * @ndir: Parent directory of new file name
567 * @ndentry: The new dentry of the file
568 *
569 * Returns: errno
570 */
571
572static int gfs2_rename(struct inode *odir, struct dentry *odentry,
573 struct inode *ndir, struct dentry *ndentry)
574{
575 struct gfs2_inode *odip = GFS2_I(odir);
576 struct gfs2_inode *ndip = GFS2_I(ndir);
577 struct gfs2_inode *ip = GFS2_I(odentry->d_inode);
578 struct gfs2_inode *nip = NULL;
579 struct gfs2_sbd *sdp = GFS2_SB(odir);
580 struct gfs2_holder ghs[4], r_gh;
581 unsigned int num_gh;
582 int dir_rename = 0;
583 int alloc_required;
584 unsigned int x;
585 int error;
586
587 if (ndentry->d_inode) {
588 nip = GFS2_I(ndentry->d_inode);
589 if (ip == nip)
590 return 0;
591 }
592
593 /* Make sure we aren't trying to move a dirctory into it's subdir */
594
595 if (S_ISDIR(ip->i_di.di_mode) && odip != ndip) {
596 dir_rename = 1;
597
598 error = gfs2_glock_nq_init(sdp->sd_rename_gl,
599 LM_ST_EXCLUSIVE, 0,
600 &r_gh);
601 if (error)
602 goto out;
603
604 error = gfs2_ok_to_move(ip, ndip);
605 if (error)
606 goto out_gunlock_r;
607 }
608
609 num_gh = 1;
610 gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
611 if (odip != ndip) {
612 gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
613 num_gh++;
614 }
615 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
616 num_gh++;
617
618 if (nip) {
619 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh);
620 num_gh++;
621 }
622
623 error = gfs2_glock_nq_m(num_gh, ghs);
624 if (error)
625 goto out_uninit;
626
627 /* Check out the old directory */
628
629 error = gfs2_unlink_ok(odip, &odentry->d_name, ip);
630 if (error)
631 goto out_gunlock;
632
633 /* Check out the new directory */
634
635 if (nip) {
636 error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip);
637 if (error)
638 goto out_gunlock;
639
640 if (S_ISDIR(nip->i_di.di_mode)) {
641 if (nip->i_di.di_entries < 2) {
642 if (gfs2_consist_inode(nip))
643 gfs2_dinode_print(&nip->i_di);
644 error = -EIO;
645 goto out_gunlock;
646 }
647 if (nip->i_di.di_entries > 2) {
648 error = -ENOTEMPTY;
649 goto out_gunlock;
650 }
651 }
652 } else {
653 error = permission(ndir, MAY_WRITE | MAY_EXEC, NULL);
654 if (error)
655 goto out_gunlock;
656
657 error = gfs2_dir_search(ndir, &ndentry->d_name, NULL, NULL);
658 switch (error) {
659 case -ENOENT:
660 error = 0;
661 break;
662 case 0:
663 error = -EEXIST;
664 default:
665 goto out_gunlock;
666 };
667
668 if (odip != ndip) {
669 if (!ndip->i_di.di_nlink) {
670 error = -EINVAL;
671 goto out_gunlock;
672 }
673 if (ndip->i_di.di_entries == (u32)-1) {
674 error = -EFBIG;
675 goto out_gunlock;
676 }
677 if (S_ISDIR(ip->i_di.di_mode) &&
678 ndip->i_di.di_nlink == (u32)-1) {
679 error = -EMLINK;
680 goto out_gunlock;
681 }
682 }
683 }
684
685 /* Check out the dir to be renamed */
686
687 if (dir_rename) {
688 error = permission(odentry->d_inode, MAY_WRITE, NULL);
689 if (error)
690 goto out_gunlock;
691 }
692
693 alloc_required = error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
694 if (error < 0)
695 goto out_gunlock;
696 error = 0;
697
698 if (alloc_required) {
699 struct gfs2_alloc *al = gfs2_alloc_get(ndip);
700
701 error = gfs2_quota_lock(ndip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
702 if (error)
703 goto out_alloc;
704
705 error = gfs2_quota_check(ndip, ndip->i_di.di_uid,
706 ndip->i_di.di_gid);
707 if (error)
708 goto out_gunlock_q;
709
710 al->al_requested = sdp->sd_max_dirres;
711
712 error = gfs2_inplace_reserve(ndip);
713 if (error)
714 goto out_gunlock_q;
715
716 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres +
717 al->al_rgd->rd_ri.ri_length +
718 4 * RES_DINODE + 4 * RES_LEAF +
719 RES_STATFS + RES_QUOTA, 0);
720 if (error)
721 goto out_ipreserv;
722 } else {
723 error = gfs2_trans_begin(sdp, 4 * RES_DINODE +
724 5 * RES_LEAF, 0);
725 if (error)
726 goto out_gunlock;
727 }
728
729 /* Remove the target file, if it exists */
730
731 if (nip) {
732 if (S_ISDIR(nip->i_di.di_mode))
733 error = gfs2_rmdiri(ndip, &ndentry->d_name, nip);
734 else {
735 error = gfs2_dir_del(ndip, &ndentry->d_name);
736 if (error)
737 goto out_end_trans;
738 error = gfs2_change_nlink(nip, -1);
739 }
740 if (error)
741 goto out_end_trans;
742 }
743
744 if (dir_rename) {
745 struct qstr name;
746 gfs2_str2qstr(&name, "..");
747
748 error = gfs2_change_nlink(ndip, +1);
749 if (error)
750 goto out_end_trans;
751 error = gfs2_change_nlink(odip, -1);
752 if (error)
753 goto out_end_trans;
754
755 error = gfs2_dir_mvino(ip, &name, &ndip->i_num, DT_DIR);
756 if (error)
757 goto out_end_trans;
758 } else {
759 struct buffer_head *dibh;
760 error = gfs2_meta_inode_buffer(ip, &dibh);
761 if (error)
762 goto out_end_trans;
763 ip->i_di.di_ctime = get_seconds();
764 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
765 gfs2_dinode_out(&ip->i_di, dibh->b_data);
766 brelse(dibh);
767 }
768
769 error = gfs2_dir_del(odip, &odentry->d_name);
770 if (error)
771 goto out_end_trans;
772
773 error = gfs2_dir_add(ndir, &ndentry->d_name, &ip->i_num,
774 IF2DT(ip->i_di.di_mode));
775 if (error)
776 goto out_end_trans;
777
778out_end_trans:
779 gfs2_trans_end(sdp);
780out_ipreserv:
781 if (alloc_required)
782 gfs2_inplace_release(ndip);
783out_gunlock_q:
784 if (alloc_required)
785 gfs2_quota_unlock(ndip);
786out_alloc:
787 if (alloc_required)
788 gfs2_alloc_put(ndip);
789out_gunlock:
790 gfs2_glock_dq_m(num_gh, ghs);
791out_uninit:
792 for (x = 0; x < num_gh; x++)
793 gfs2_holder_uninit(ghs + x);
794out_gunlock_r:
795 if (dir_rename)
796 gfs2_glock_dq_uninit(&r_gh);
797out:
798 return error;
799}
800
801/**
802 * gfs2_readlink - Read the value of a symlink
803 * @dentry: the symlink
804 * @buf: the buffer to read the symlink data into
805 * @size: the size of the buffer
806 *
807 * Returns: errno
808 */
809
810static int gfs2_readlink(struct dentry *dentry, char __user *user_buf,
811 int user_size)
812{
813 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
814 char array[GFS2_FAST_NAME_SIZE], *buf = array;
815 unsigned int len = GFS2_FAST_NAME_SIZE;
816 int error;
817
818 error = gfs2_readlinki(ip, &buf, &len);
819 if (error)
820 return error;
821
822 if (user_size > len - 1)
823 user_size = len - 1;
824
825 if (copy_to_user(user_buf, buf, user_size))
826 error = -EFAULT;
827 else
828 error = user_size;
829
830 if (buf != array)
831 kfree(buf);
832
833 return error;
834}
835
836/**
837 * gfs2_follow_link - Follow a symbolic link
838 * @dentry: The dentry of the link
839 * @nd: Data that we pass to vfs_follow_link()
840 *
841 * This can handle symlinks of any size. It is optimised for symlinks
842 * under GFS2_FAST_NAME_SIZE.
843 *
844 * Returns: 0 on success or error code
845 */
846
847static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
848{
849 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
850 char array[GFS2_FAST_NAME_SIZE], *buf = array;
851 unsigned int len = GFS2_FAST_NAME_SIZE;
852 int error;
853
854 error = gfs2_readlinki(ip, &buf, &len);
855 if (!error) {
856 error = vfs_follow_link(nd, buf);
857 if (buf != array)
858 kfree(buf);
859 }
860
861 return ERR_PTR(error);
862}
863
864/**
865 * gfs2_permission -
866 * @inode:
867 * @mask:
868 * @nd: passed from Linux VFS, ignored by us
869 *
870 * Returns: errno
871 */
872
873static int gfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
874{
875 struct gfs2_inode *ip = GFS2_I(inode);
876 struct gfs2_holder i_gh;
877 int error;
878
879 if (ip->i_vn == ip->i_gl->gl_vn)
880 return generic_permission(inode, mask, gfs2_check_acl);
881
882 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
883 if (!error) {
884 error = generic_permission(inode, mask, gfs2_check_acl_locked);
885 gfs2_glock_dq_uninit(&i_gh);
886 }
887
888 return error;
889}
890
891static int setattr_size(struct inode *inode, struct iattr *attr)
892{
893 struct gfs2_inode *ip = GFS2_I(inode);
894 int error;
895
896 if (attr->ia_size != ip->i_di.di_size) {
897 error = vmtruncate(inode, attr->ia_size);
898 if (error)
899 return error;
900 }
901
902 error = gfs2_truncatei(ip, attr->ia_size);
903 if (error)
904 return error;
905
906 return error;
907}
908
909static int setattr_chown(struct inode *inode, struct iattr *attr)
910{
911 struct gfs2_inode *ip = GFS2_I(inode);
912 struct gfs2_sbd *sdp = GFS2_SB(inode);
913 struct buffer_head *dibh;
914 u32 ouid, ogid, nuid, ngid;
915 int error;
916
917 ouid = ip->i_di.di_uid;
918 ogid = ip->i_di.di_gid;
919 nuid = attr->ia_uid;
920 ngid = attr->ia_gid;
921
922 if (!(attr->ia_valid & ATTR_UID) || ouid == nuid)
923 ouid = nuid = NO_QUOTA_CHANGE;
924 if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
925 ogid = ngid = NO_QUOTA_CHANGE;
926
927 gfs2_alloc_get(ip);
928
929 error = gfs2_quota_lock(ip, nuid, ngid);
930 if (error)
931 goto out_alloc;
932
933 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
934 error = gfs2_quota_check(ip, nuid, ngid);
935 if (error)
936 goto out_gunlock_q;
937 }
938
939 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_QUOTA, 0);
940 if (error)
941 goto out_gunlock_q;
942
943 error = gfs2_meta_inode_buffer(ip, &dibh);
944 if (error)
945 goto out_end_trans;
946
947 error = inode_setattr(inode, attr);
948 gfs2_assert_warn(sdp, !error);
949 gfs2_inode_attr_out(ip);
950
951 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
952 gfs2_dinode_out(&ip->i_di, dibh->b_data);
953 brelse(dibh);
954
955 if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
956 gfs2_quota_change(ip, -ip->i_di.di_blocks, ouid, ogid);
957 gfs2_quota_change(ip, ip->i_di.di_blocks, nuid, ngid);
958 }
959
960out_end_trans:
961 gfs2_trans_end(sdp);
962out_gunlock_q:
963 gfs2_quota_unlock(ip);
964out_alloc:
965 gfs2_alloc_put(ip);
966 return error;
967}
968
969/**
970 * gfs2_setattr - Change attributes on an inode
971 * @dentry: The dentry which is changing
972 * @attr: The structure describing the change
973 *
974 * The VFS layer wants to change one or more of an inodes attributes. Write
975 * that change out to disk.
976 *
977 * Returns: errno
978 */
979
980static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
981{
982 struct inode *inode = dentry->d_inode;
983 struct gfs2_inode *ip = GFS2_I(inode);
984 struct gfs2_holder i_gh;
985 int error;
986
987 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
988 if (error)
989 return error;
990
991 error = -EPERM;
992 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
993 goto out;
994
995 error = inode_change_ok(inode, attr);
996 if (error)
997 goto out;
998
999 if (attr->ia_valid & ATTR_SIZE)
1000 error = setattr_size(inode, attr);
1001 else if (attr->ia_valid & (ATTR_UID | ATTR_GID))
1002 error = setattr_chown(inode, attr);
1003 else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode))
1004 error = gfs2_acl_chmod(ip, attr);
1005 else
1006 error = gfs2_setattr_simple(ip, attr);
1007
1008out:
1009 gfs2_glock_dq_uninit(&i_gh);
1010 if (!error)
1011 mark_inode_dirty(inode);
1012 return error;
1013}
1014
1015/**
1016 * gfs2_getattr - Read out an inode's attributes
1017 * @mnt: The vfsmount the inode is being accessed from
1018 * @dentry: The dentry to stat
1019 * @stat: The inode's stats
1020 *
1021 * Returns: errno
1022 */
1023
1024static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
1025 struct kstat *stat)
1026{
1027 struct inode *inode = dentry->d_inode;
1028 struct gfs2_inode *ip = GFS2_I(inode);
1029 struct gfs2_holder gh;
1030 int error;
1031
1032 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
1033 if (!error) {
1034 generic_fillattr(inode, stat);
1035 gfs2_glock_dq_uninit(&gh);
1036 }
1037
1038 return error;
1039}
1040
1041static int gfs2_setxattr(struct dentry *dentry, const char *name,
1042 const void *data, size_t size, int flags)
1043{
1044 struct inode *inode = dentry->d_inode;
1045 struct gfs2_ea_request er;
1046
1047 memset(&er, 0, sizeof(struct gfs2_ea_request));
1048 er.er_type = gfs2_ea_name2type(name, &er.er_name);
1049 if (er.er_type == GFS2_EATYPE_UNUSED)
1050 return -EOPNOTSUPP;
1051 er.er_data = (char *)data;
1052 er.er_name_len = strlen(er.er_name);
1053 er.er_data_len = size;
1054 er.er_flags = flags;
1055
1056 gfs2_assert_warn(GFS2_SB(inode), !(er.er_flags & GFS2_ERF_MODE));
1057
1058 return gfs2_ea_set(GFS2_I(inode), &er);
1059}
1060
1061static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
1062 void *data, size_t size)
1063{
1064 struct gfs2_ea_request er;
1065
1066 memset(&er, 0, sizeof(struct gfs2_ea_request));
1067 er.er_type = gfs2_ea_name2type(name, &er.er_name);
1068 if (er.er_type == GFS2_EATYPE_UNUSED)
1069 return -EOPNOTSUPP;
1070 er.er_data = data;
1071 er.er_name_len = strlen(er.er_name);
1072 er.er_data_len = size;
1073
1074 return gfs2_ea_get(GFS2_I(dentry->d_inode), &er);
1075}
1076
1077static ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
1078{
1079 struct gfs2_ea_request er;
1080
1081 memset(&er, 0, sizeof(struct gfs2_ea_request));
1082 er.er_data = (size) ? buffer : NULL;
1083 er.er_data_len = size;
1084
1085 return gfs2_ea_list(GFS2_I(dentry->d_inode), &er);
1086}
1087
1088static int gfs2_removexattr(struct dentry *dentry, const char *name)
1089{
1090 struct gfs2_ea_request er;
1091
1092 memset(&er, 0, sizeof(struct gfs2_ea_request));
1093 er.er_type = gfs2_ea_name2type(name, &er.er_name);
1094 if (er.er_type == GFS2_EATYPE_UNUSED)
1095 return -EOPNOTSUPP;
1096 er.er_name_len = strlen(er.er_name);
1097
1098 return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er);
1099}
1100
1101struct inode_operations gfs2_file_iops = {
1102 .permission = gfs2_permission,
1103 .setattr = gfs2_setattr,
1104 .getattr = gfs2_getattr,
1105 .setxattr = gfs2_setxattr,
1106 .getxattr = gfs2_getxattr,
1107 .listxattr = gfs2_listxattr,
1108 .removexattr = gfs2_removexattr,
1109};
1110
1111struct inode_operations gfs2_dev_iops = {
1112 .permission = gfs2_permission,
1113 .setattr = gfs2_setattr,
1114 .getattr = gfs2_getattr,
1115 .setxattr = gfs2_setxattr,
1116 .getxattr = gfs2_getxattr,
1117 .listxattr = gfs2_listxattr,
1118 .removexattr = gfs2_removexattr,
1119};
1120
1121struct inode_operations gfs2_dir_iops = {
1122 .create = gfs2_create,
1123 .lookup = gfs2_lookup,
1124 .link = gfs2_link,
1125 .unlink = gfs2_unlink,
1126 .symlink = gfs2_symlink,
1127 .mkdir = gfs2_mkdir,
1128 .rmdir = gfs2_rmdir,
1129 .mknod = gfs2_mknod,
1130 .rename = gfs2_rename,
1131 .permission = gfs2_permission,
1132 .setattr = gfs2_setattr,
1133 .getattr = gfs2_getattr,
1134 .setxattr = gfs2_setxattr,
1135 .getxattr = gfs2_getxattr,
1136 .listxattr = gfs2_listxattr,
1137 .removexattr = gfs2_removexattr,
1138};
1139
1140struct inode_operations gfs2_symlink_iops = {
1141 .readlink = gfs2_readlink,
1142 .follow_link = gfs2_follow_link,
1143 .permission = gfs2_permission,
1144 .setattr = gfs2_setattr,
1145 .getattr = gfs2_getattr,
1146 .setxattr = gfs2_setxattr,
1147 .getxattr = gfs2_getxattr,
1148 .listxattr = gfs2_listxattr,
1149 .removexattr = gfs2_removexattr,
1150};
1151
diff --git a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h
new file mode 100644
index 000000000000..b15acb4fd34c
--- /dev/null
+++ b/fs/gfs2/ops_inode.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_INODE_DOT_H__
11#define __OPS_INODE_DOT_H__
12
13#include <linux/fs.h>
14
15extern struct inode_operations gfs2_file_iops;
16extern struct inode_operations gfs2_dir_iops;
17extern struct inode_operations gfs2_symlink_iops;
18extern struct inode_operations gfs2_dev_iops;
19
20#endif /* __OPS_INODE_DOT_H__ */
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
new file mode 100644
index 000000000000..06f06f7773d0
--- /dev/null
+++ b/fs/gfs2/ops_super.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/statfs.h>
16#include <linux/seq_file.h>
17#include <linux/mount.h>
18#include <linux/kthread.h>
19#include <linux/delay.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/crc32.h>
22#include <linux/lm_interface.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "glock.h"
27#include "inode.h"
28#include "lm.h"
29#include "log.h"
30#include "mount.h"
31#include "ops_super.h"
32#include "quota.h"
33#include "recovery.h"
34#include "rgrp.h"
35#include "super.h"
36#include "sys.h"
37#include "util.h"
38#include "trans.h"
39#include "dir.h"
40#include "eattr.h"
41#include "bmap.h"
42
43/**
44 * gfs2_write_inode - Make sure the inode is stable on the disk
45 * @inode: The inode
46 * @sync: synchronous write flag
47 *
48 * Returns: errno
49 */
50
51static int gfs2_write_inode(struct inode *inode, int sync)
52{
53 struct gfs2_inode *ip = GFS2_I(inode);
54
55 /* Check this is a "normal" inode */
56 if (inode->i_private) {
57 if (current->flags & PF_MEMALLOC)
58 return 0;
59 if (sync)
60 gfs2_log_flush(GFS2_SB(inode), ip->i_gl);
61 }
62
63 return 0;
64}
65
66/**
67 * gfs2_put_super - Unmount the filesystem
68 * @sb: The VFS superblock
69 *
70 */
71
72static void gfs2_put_super(struct super_block *sb)
73{
74 struct gfs2_sbd *sdp = sb->s_fs_info;
75 int error;
76
77 if (!sdp)
78 return;
79
80 if (!strncmp(sb->s_type->name, "gfs2meta", 8))
81 return; /* Nothing to do */
82
83 /* Unfreeze the filesystem, if we need to */
84
85 mutex_lock(&sdp->sd_freeze_lock);
86 if (sdp->sd_freeze_count)
87 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
88 mutex_unlock(&sdp->sd_freeze_lock);
89
90 kthread_stop(sdp->sd_quotad_process);
91 kthread_stop(sdp->sd_logd_process);
92 kthread_stop(sdp->sd_recoverd_process);
93 while (sdp->sd_glockd_num--)
94 kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
95 kthread_stop(sdp->sd_scand_process);
96
97 if (!(sb->s_flags & MS_RDONLY)) {
98 error = gfs2_make_fs_ro(sdp);
99 if (error)
100 gfs2_io_error(sdp);
101 }
102 /* At this point, we're through modifying the disk */
103
104 /* Release stuff */
105
106 iput(sdp->sd_master_dir);
107 iput(sdp->sd_jindex);
108 iput(sdp->sd_inum_inode);
109 iput(sdp->sd_statfs_inode);
110 iput(sdp->sd_rindex);
111 iput(sdp->sd_quota_inode);
112
113 gfs2_glock_put(sdp->sd_rename_gl);
114 gfs2_glock_put(sdp->sd_trans_gl);
115
116 if (!sdp->sd_args.ar_spectator) {
117 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
118 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
119 gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
120 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
121 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
122 iput(sdp->sd_ir_inode);
123 iput(sdp->sd_sc_inode);
124 iput(sdp->sd_qc_inode);
125 }
126
127 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
128 gfs2_clear_rgrpd(sdp);
129 gfs2_jindex_free(sdp);
130 /* Take apart glock structures and buffer lists */
131 gfs2_gl_hash_clear(sdp, WAIT);
132 /* Unmount the locking protocol */
133 gfs2_lm_unmount(sdp);
134
135 /* At this point, we're through participating in the lockspace */
136 gfs2_sys_fs_del(sdp);
137 kfree(sdp);
138}
139
140/**
141 * gfs2_write_super - disk commit all incore transactions
142 * @sb: the filesystem
143 *
144 * This function is called every time sync(2) is called.
145 * After this exits, all dirty buffers are synced.
146 */
147
148static void gfs2_write_super(struct super_block *sb)
149{
150 gfs2_log_flush(sb->s_fs_info, NULL);
151}
152
153/**
154 * gfs2_write_super_lockfs - prevent further writes to the filesystem
155 * @sb: the VFS structure for the filesystem
156 *
157 */
158
159static void gfs2_write_super_lockfs(struct super_block *sb)
160{
161 struct gfs2_sbd *sdp = sb->s_fs_info;
162 int error;
163
164 for (;;) {
165 error = gfs2_freeze_fs(sdp);
166 if (!error)
167 break;
168
169 switch (error) {
170 case -EBUSY:
171 fs_err(sdp, "waiting for recovery before freeze\n");
172 break;
173
174 default:
175 fs_err(sdp, "error freezing FS: %d\n", error);
176 break;
177 }
178
179 fs_err(sdp, "retrying...\n");
180 msleep(1000);
181 }
182}
183
184/**
185 * gfs2_unlockfs - reallow writes to the filesystem
186 * @sb: the VFS structure for the filesystem
187 *
188 */
189
190static void gfs2_unlockfs(struct super_block *sb)
191{
192 gfs2_unfreeze_fs(sb->s_fs_info);
193}
194
195/**
196 * gfs2_statfs - Gather and return stats about the filesystem
197 * @sb: The superblock
198 * @statfsbuf: The buffer
199 *
200 * Returns: 0 on success or error code
201 */
202
203static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
204{
205 struct super_block *sb = dentry->d_inode->i_sb;
206 struct gfs2_sbd *sdp = sb->s_fs_info;
207 struct gfs2_statfs_change sc;
208 int error;
209
210 if (gfs2_tune_get(sdp, gt_statfs_slow))
211 error = gfs2_statfs_slow(sdp, &sc);
212 else
213 error = gfs2_statfs_i(sdp, &sc);
214
215 if (error)
216 return error;
217
218 buf->f_type = GFS2_MAGIC;
219 buf->f_bsize = sdp->sd_sb.sb_bsize;
220 buf->f_blocks = sc.sc_total;
221 buf->f_bfree = sc.sc_free;
222 buf->f_bavail = sc.sc_free;
223 buf->f_files = sc.sc_dinodes + sc.sc_free;
224 buf->f_ffree = sc.sc_free;
225 buf->f_namelen = GFS2_FNAMESIZE;
226
227 return 0;
228}
229
230/**
231 * gfs2_remount_fs - called when the FS is remounted
232 * @sb: the filesystem
233 * @flags: the remount flags
234 * @data: extra data passed in (not used right now)
235 *
236 * Returns: errno
237 */
238
239static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
240{
241 struct gfs2_sbd *sdp = sb->s_fs_info;
242 int error;
243
244 error = gfs2_mount_args(sdp, data, 1);
245 if (error)
246 return error;
247
248 if (sdp->sd_args.ar_spectator)
249 *flags |= MS_RDONLY;
250 else {
251 if (*flags & MS_RDONLY) {
252 if (!(sb->s_flags & MS_RDONLY))
253 error = gfs2_make_fs_ro(sdp);
254 } else if (!(*flags & MS_RDONLY) &&
255 (sb->s_flags & MS_RDONLY)) {
256 error = gfs2_make_fs_rw(sdp);
257 }
258 }
259
260 if (*flags & (MS_NOATIME | MS_NODIRATIME))
261 set_bit(SDF_NOATIME, &sdp->sd_flags);
262 else
263 clear_bit(SDF_NOATIME, &sdp->sd_flags);
264
265 /* Don't let the VFS update atimes. GFS2 handles this itself. */
266 *flags |= MS_NOATIME | MS_NODIRATIME;
267
268 return error;
269}
270
271/**
272 * gfs2_clear_inode - Deallocate an inode when VFS is done with it
273 * @inode: The VFS inode
274 *
275 */
276
277static void gfs2_clear_inode(struct inode *inode)
278{
279 /* This tells us its a "real" inode and not one which only
280 * serves to contain an address space (see rgrp.c, meta_io.c)
281 * which therefore doesn't have its own glocks.
282 */
283 if (inode->i_private) {
284 struct gfs2_inode *ip = GFS2_I(inode);
285 gfs2_glock_inode_squish(inode);
286 gfs2_assert(inode->i_sb->s_fs_info, ip->i_gl->gl_state == LM_ST_UNLOCKED);
287 ip->i_gl->gl_object = NULL;
288 gfs2_glock_schedule_for_reclaim(ip->i_gl);
289 gfs2_glock_put(ip->i_gl);
290 ip->i_gl = NULL;
291 if (ip->i_iopen_gh.gh_gl)
292 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
293 }
294}
295
296/**
297 * gfs2_show_options - Show mount options for /proc/mounts
298 * @s: seq_file structure
299 * @mnt: vfsmount
300 *
301 * Returns: 0 on success or error code
302 */
303
304static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
305{
306 struct gfs2_sbd *sdp = mnt->mnt_sb->s_fs_info;
307 struct gfs2_args *args = &sdp->sd_args;
308
309 if (args->ar_lockproto[0])
310 seq_printf(s, ",lockproto=%s", args->ar_lockproto);
311 if (args->ar_locktable[0])
312 seq_printf(s, ",locktable=%s", args->ar_locktable);
313 if (args->ar_hostdata[0])
314 seq_printf(s, ",hostdata=%s", args->ar_hostdata);
315 if (args->ar_spectator)
316 seq_printf(s, ",spectator");
317 if (args->ar_ignore_local_fs)
318 seq_printf(s, ",ignore_local_fs");
319 if (args->ar_localflocks)
320 seq_printf(s, ",localflocks");
321 if (args->ar_localcaching)
322 seq_printf(s, ",localcaching");
323 if (args->ar_debug)
324 seq_printf(s, ",debug");
325 if (args->ar_upgrade)
326 seq_printf(s, ",upgrade");
327 if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT)
328 seq_printf(s, ",num_glockd=%u", args->ar_num_glockd);
329 if (args->ar_posix_acl)
330 seq_printf(s, ",acl");
331 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
332 char *state;
333 switch (args->ar_quota) {
334 case GFS2_QUOTA_OFF:
335 state = "off";
336 break;
337 case GFS2_QUOTA_ACCOUNT:
338 state = "account";
339 break;
340 case GFS2_QUOTA_ON:
341 state = "on";
342 break;
343 default:
344 state = "unknown";
345 break;
346 }
347 seq_printf(s, ",quota=%s", state);
348 }
349 if (args->ar_suiddir)
350 seq_printf(s, ",suiddir");
351 if (args->ar_data != GFS2_DATA_DEFAULT) {
352 char *state;
353 switch (args->ar_data) {
354 case GFS2_DATA_WRITEBACK:
355 state = "writeback";
356 break;
357 case GFS2_DATA_ORDERED:
358 state = "ordered";
359 break;
360 default:
361 state = "unknown";
362 break;
363 }
364 seq_printf(s, ",data=%s", state);
365 }
366
367 return 0;
368}
369
370/*
371 * We have to (at the moment) hold the inodes main lock to cover
372 * the gap between unlocking the shared lock on the iopen lock and
373 * taking the exclusive lock. I'd rather do a shared -> exclusive
374 * conversion on the iopen lock, but we can change that later. This
375 * is safe, just less efficient.
376 */
377static void gfs2_delete_inode(struct inode *inode)
378{
379 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
380 struct gfs2_inode *ip = GFS2_I(inode);
381 struct gfs2_holder gh;
382 int error;
383
384 if (!inode->i_private)
385 goto out;
386
387 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &gh);
388 if (unlikely(error)) {
389 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
390 goto out;
391 }
392
393 gfs2_glock_dq(&ip->i_iopen_gh);
394 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
395 error = gfs2_glock_nq(&ip->i_iopen_gh);
396 if (error)
397 goto out_uninit;
398
399 if (S_ISDIR(ip->i_di.di_mode) &&
400 (ip->i_di.di_flags & GFS2_DIF_EXHASH)) {
401 error = gfs2_dir_exhash_dealloc(ip);
402 if (error)
403 goto out_unlock;
404 }
405
406 if (ip->i_di.di_eattr) {
407 error = gfs2_ea_dealloc(ip);
408 if (error)
409 goto out_unlock;
410 }
411
412 if (!gfs2_is_stuffed(ip)) {
413 error = gfs2_file_dealloc(ip);
414 if (error)
415 goto out_unlock;
416 }
417
418 error = gfs2_dinode_dealloc(ip);
419
420out_unlock:
421 gfs2_glock_dq(&ip->i_iopen_gh);
422out_uninit:
423 gfs2_holder_uninit(&ip->i_iopen_gh);
424 gfs2_glock_dq_uninit(&gh);
425 if (error)
426 fs_warn(sdp, "gfs2_delete_inode: %d\n", error);
427out:
428 truncate_inode_pages(&inode->i_data, 0);
429 clear_inode(inode);
430}
431
432
433
434static struct inode *gfs2_alloc_inode(struct super_block *sb)
435{
436 struct gfs2_sbd *sdp = sb->s_fs_info;
437 struct gfs2_inode *ip;
438
439 ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
440 if (ip) {
441 ip->i_flags = 0;
442 ip->i_gl = NULL;
443 ip->i_greedy = gfs2_tune_get(sdp, gt_greedy_default);
444 ip->i_last_pfault = jiffies;
445 }
446 return &ip->i_inode;
447}
448
449static void gfs2_destroy_inode(struct inode *inode)
450{
451 kmem_cache_free(gfs2_inode_cachep, inode);
452}
453
454struct super_operations gfs2_super_ops = {
455 .alloc_inode = gfs2_alloc_inode,
456 .destroy_inode = gfs2_destroy_inode,
457 .write_inode = gfs2_write_inode,
458 .delete_inode = gfs2_delete_inode,
459 .put_super = gfs2_put_super,
460 .write_super = gfs2_write_super,
461 .write_super_lockfs = gfs2_write_super_lockfs,
462 .unlockfs = gfs2_unlockfs,
463 .statfs = gfs2_statfs,
464 .remount_fs = gfs2_remount_fs,
465 .clear_inode = gfs2_clear_inode,
466 .show_options = gfs2_show_options,
467};
468
diff --git a/fs/gfs2/ops_super.h b/fs/gfs2/ops_super.h
new file mode 100644
index 000000000000..9de73f042f78
--- /dev/null
+++ b/fs/gfs2/ops_super.h
@@ -0,0 +1,17 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_SUPER_DOT_H__
11#define __OPS_SUPER_DOT_H__
12
13#include <linux/fs.h>
14
15extern struct super_operations gfs2_super_ops;
16
17#endif /* __OPS_SUPER_DOT_H__ */
diff --git a/fs/gfs2/ops_vm.c b/fs/gfs2/ops_vm.c
new file mode 100644
index 000000000000..5453d2947ab3
--- /dev/null
+++ b/fs/gfs2/ops_vm.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/mm.h>
16#include <linux/pagemap.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "bmap.h"
23#include "glock.h"
24#include "inode.h"
25#include "ops_vm.h"
26#include "quota.h"
27#include "rgrp.h"
28#include "trans.h"
29#include "util.h"
30
31static void pfault_be_greedy(struct gfs2_inode *ip)
32{
33 unsigned int time;
34
35 spin_lock(&ip->i_spin);
36 time = ip->i_greedy;
37 ip->i_last_pfault = jiffies;
38 spin_unlock(&ip->i_spin);
39
40 igrab(&ip->i_inode);
41 if (gfs2_glock_be_greedy(ip->i_gl, time))
42 iput(&ip->i_inode);
43}
44
45static struct page *gfs2_private_nopage(struct vm_area_struct *area,
46 unsigned long address, int *type)
47{
48 struct gfs2_inode *ip = GFS2_I(area->vm_file->f_mapping->host);
49 struct page *result;
50
51 set_bit(GIF_PAGED, &ip->i_flags);
52
53 result = filemap_nopage(area, address, type);
54
55 if (result && result != NOPAGE_OOM)
56 pfault_be_greedy(ip);
57
58 return result;
59}
60
61static int alloc_page_backing(struct gfs2_inode *ip, struct page *page)
62{
63 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
64 unsigned long index = page->index;
65 u64 lblock = index << (PAGE_CACHE_SHIFT -
66 sdp->sd_sb.sb_bsize_shift);
67 unsigned int blocks = PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift;
68 struct gfs2_alloc *al;
69 unsigned int data_blocks, ind_blocks;
70 unsigned int x;
71 int error;
72
73 al = gfs2_alloc_get(ip);
74
75 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
76 if (error)
77 goto out;
78
79 error = gfs2_quota_check(ip, ip->i_di.di_uid, ip->i_di.di_gid);
80 if (error)
81 goto out_gunlock_q;
82
83 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
84
85 al->al_requested = data_blocks + ind_blocks;
86
87 error = gfs2_inplace_reserve(ip);
88 if (error)
89 goto out_gunlock_q;
90
91 error = gfs2_trans_begin(sdp, al->al_rgd->rd_ri.ri_length +
92 ind_blocks + RES_DINODE +
93 RES_STATFS + RES_QUOTA, 0);
94 if (error)
95 goto out_ipres;
96
97 if (gfs2_is_stuffed(ip)) {
98 error = gfs2_unstuff_dinode(ip, NULL);
99 if (error)
100 goto out_trans;
101 }
102
103 for (x = 0; x < blocks; ) {
104 u64 dblock;
105 unsigned int extlen;
106 int new = 1;
107
108 error = gfs2_extent_map(&ip->i_inode, lblock, &new, &dblock, &extlen);
109 if (error)
110 goto out_trans;
111
112 lblock += extlen;
113 x += extlen;
114 }
115
116 gfs2_assert_warn(sdp, al->al_alloced);
117
118out_trans:
119 gfs2_trans_end(sdp);
120out_ipres:
121 gfs2_inplace_release(ip);
122out_gunlock_q:
123 gfs2_quota_unlock(ip);
124out:
125 gfs2_alloc_put(ip);
126 return error;
127}
128
129static struct page *gfs2_sharewrite_nopage(struct vm_area_struct *area,
130 unsigned long address, int *type)
131{
132 struct file *file = area->vm_file;
133 struct gfs2_file *gf = file->private_data;
134 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
135 struct gfs2_holder i_gh;
136 struct page *result = NULL;
137 unsigned long index = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) +
138 area->vm_pgoff;
139 int alloc_required;
140 int error;
141
142 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
143 if (error)
144 return NULL;
145
146 set_bit(GIF_PAGED, &ip->i_flags);
147 set_bit(GIF_SW_PAGED, &ip->i_flags);
148
149 error = gfs2_write_alloc_required(ip, (u64)index << PAGE_CACHE_SHIFT,
150 PAGE_CACHE_SIZE, &alloc_required);
151 if (error)
152 goto out;
153
154 set_bit(GFF_EXLOCK, &gf->f_flags);
155 result = filemap_nopage(area, address, type);
156 clear_bit(GFF_EXLOCK, &gf->f_flags);
157 if (!result || result == NOPAGE_OOM)
158 goto out;
159
160 if (alloc_required) {
161 error = alloc_page_backing(ip, result);
162 if (error) {
163 page_cache_release(result);
164 result = NULL;
165 goto out;
166 }
167 set_page_dirty(result);
168 }
169
170 pfault_be_greedy(ip);
171out:
172 gfs2_glock_dq_uninit(&i_gh);
173
174 return result;
175}
176
177struct vm_operations_struct gfs2_vm_ops_private = {
178 .nopage = gfs2_private_nopage,
179};
180
181struct vm_operations_struct gfs2_vm_ops_sharewrite = {
182 .nopage = gfs2_sharewrite_nopage,
183};
184
diff --git a/fs/gfs2/ops_vm.h b/fs/gfs2/ops_vm.h
new file mode 100644
index 000000000000..4ae8f43ed5e3
--- /dev/null
+++ b/fs/gfs2/ops_vm.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __OPS_VM_DOT_H__
11#define __OPS_VM_DOT_H__
12
13#include <linux/mm.h>
14
15extern struct vm_operations_struct gfs2_vm_ops_private;
16extern struct vm_operations_struct gfs2_vm_ops_sharewrite;
17
18#endif /* __OPS_VM_DOT_H__ */
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
new file mode 100644
index 000000000000..c69b94a55588
--- /dev/null
+++ b/fs/gfs2/quota.c
@@ -0,0 +1,1227 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
44#include <linux/sort.h>
45#include <linux/fs.h>
46#include <linux/bio.h>
47#include <linux/gfs2_ondisk.h>
48#include <linux/lm_interface.h>
49
50#include "gfs2.h"
51#include "incore.h"
52#include "bmap.h"
53#include "glock.h"
54#include "glops.h"
55#include "log.h"
56#include "meta_io.h"
57#include "quota.h"
58#include "rgrp.h"
59#include "super.h"
60#include "trans.h"
61#include "inode.h"
62#include "ops_file.h"
63#include "ops_address.h"
64#include "util.h"
65
66#define QUOTA_USER 1
67#define QUOTA_GROUP 0
68
69static u64 qd2offset(struct gfs2_quota_data *qd)
70{
71 u64 offset;
72
73 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
74 offset *= sizeof(struct gfs2_quota);
75
76 return offset;
77}
78
79static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
80 struct gfs2_quota_data **qdp)
81{
82 struct gfs2_quota_data *qd;
83 int error;
84
85 qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);
86 if (!qd)
87 return -ENOMEM;
88
89 qd->qd_count = 1;
90 qd->qd_id = id;
91 if (user)
92 set_bit(QDF_USER, &qd->qd_flags);
93 qd->qd_slot = -1;
94
95 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
96 &gfs2_quota_glops, CREATE, &qd->qd_gl);
97 if (error)
98 goto fail;
99
100 error = gfs2_lvb_hold(qd->qd_gl);
101 gfs2_glock_put(qd->qd_gl);
102 if (error)
103 goto fail;
104
105 *qdp = qd;
106
107 return 0;
108
109fail:
110 kfree(qd);
111 return error;
112}
113
114static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
115 struct gfs2_quota_data **qdp)
116{
117 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
118 int error, found;
119
120 *qdp = NULL;
121
122 for (;;) {
123 found = 0;
124 spin_lock(&sdp->sd_quota_spin);
125 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
126 if (qd->qd_id == id &&
127 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
128 qd->qd_count++;
129 found = 1;
130 break;
131 }
132 }
133
134 if (!found)
135 qd = NULL;
136
137 if (!qd && new_qd) {
138 qd = new_qd;
139 list_add(&qd->qd_list, &sdp->sd_quota_list);
140 atomic_inc(&sdp->sd_quota_count);
141 new_qd = NULL;
142 }
143
144 spin_unlock(&sdp->sd_quota_spin);
145
146 if (qd || !create) {
147 if (new_qd) {
148 gfs2_lvb_unhold(new_qd->qd_gl);
149 kfree(new_qd);
150 }
151 *qdp = qd;
152 return 0;
153 }
154
155 error = qd_alloc(sdp, user, id, &new_qd);
156 if (error)
157 return error;
158 }
159}
160
161static void qd_hold(struct gfs2_quota_data *qd)
162{
163 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
164
165 spin_lock(&sdp->sd_quota_spin);
166 gfs2_assert(sdp, qd->qd_count);
167 qd->qd_count++;
168 spin_unlock(&sdp->sd_quota_spin);
169}
170
171static void qd_put(struct gfs2_quota_data *qd)
172{
173 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
174 spin_lock(&sdp->sd_quota_spin);
175 gfs2_assert(sdp, qd->qd_count);
176 if (!--qd->qd_count)
177 qd->qd_last_touched = jiffies;
178 spin_unlock(&sdp->sd_quota_spin);
179}
180
181static int slot_get(struct gfs2_quota_data *qd)
182{
183 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
184 unsigned int c, o = 0, b;
185 unsigned char byte = 0;
186
187 spin_lock(&sdp->sd_quota_spin);
188
189 if (qd->qd_slot_count++) {
190 spin_unlock(&sdp->sd_quota_spin);
191 return 0;
192 }
193
194 for (c = 0; c < sdp->sd_quota_chunks; c++)
195 for (o = 0; o < PAGE_SIZE; o++) {
196 byte = sdp->sd_quota_bitmap[c][o];
197 if (byte != 0xFF)
198 goto found;
199 }
200
201 goto fail;
202
203found:
204 for (b = 0; b < 8; b++)
205 if (!(byte & (1 << b)))
206 break;
207 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
208
209 if (qd->qd_slot >= sdp->sd_quota_slots)
210 goto fail;
211
212 sdp->sd_quota_bitmap[c][o] |= 1 << b;
213
214 spin_unlock(&sdp->sd_quota_spin);
215
216 return 0;
217
218fail:
219 qd->qd_slot_count--;
220 spin_unlock(&sdp->sd_quota_spin);
221 return -ENOSPC;
222}
223
224static void slot_hold(struct gfs2_quota_data *qd)
225{
226 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
227
228 spin_lock(&sdp->sd_quota_spin);
229 gfs2_assert(sdp, qd->qd_slot_count);
230 qd->qd_slot_count++;
231 spin_unlock(&sdp->sd_quota_spin);
232}
233
234static void slot_put(struct gfs2_quota_data *qd)
235{
236 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
237
238 spin_lock(&sdp->sd_quota_spin);
239 gfs2_assert(sdp, qd->qd_slot_count);
240 if (!--qd->qd_slot_count) {
241 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
242 qd->qd_slot = -1;
243 }
244 spin_unlock(&sdp->sd_quota_spin);
245}
246
247static int bh_get(struct gfs2_quota_data *qd)
248{
249 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
250 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
251 unsigned int block, offset;
252 struct buffer_head *bh;
253 int error;
254 struct buffer_head bh_map;
255
256 mutex_lock(&sdp->sd_quota_mutex);
257
258 if (qd->qd_bh_count++) {
259 mutex_unlock(&sdp->sd_quota_mutex);
260 return 0;
261 }
262
263 block = qd->qd_slot / sdp->sd_qc_per_block;
264 offset = qd->qd_slot % sdp->sd_qc_per_block;;
265
266 error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map, 1);
267 if (error)
268 goto fail;
269 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
270 if (error)
271 goto fail;
272 error = -EIO;
273 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
274 goto fail_brelse;
275
276 qd->qd_bh = bh;
277 qd->qd_bh_qc = (struct gfs2_quota_change *)
278 (bh->b_data + sizeof(struct gfs2_meta_header) +
279 offset * sizeof(struct gfs2_quota_change));
280
281 mutex_lock(&sdp->sd_quota_mutex);
282
283 return 0;
284
285fail_brelse:
286 brelse(bh);
287fail:
288 qd->qd_bh_count--;
289 mutex_unlock(&sdp->sd_quota_mutex);
290 return error;
291}
292
293static void bh_put(struct gfs2_quota_data *qd)
294{
295 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
296
297 mutex_lock(&sdp->sd_quota_mutex);
298 gfs2_assert(sdp, qd->qd_bh_count);
299 if (!--qd->qd_bh_count) {
300 brelse(qd->qd_bh);
301 qd->qd_bh = NULL;
302 qd->qd_bh_qc = NULL;
303 }
304 mutex_unlock(&sdp->sd_quota_mutex);
305}
306
307static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
308{
309 struct gfs2_quota_data *qd = NULL;
310 int error;
311 int found = 0;
312
313 *qdp = NULL;
314
315 if (sdp->sd_vfs->s_flags & MS_RDONLY)
316 return 0;
317
318 spin_lock(&sdp->sd_quota_spin);
319
320 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
321 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
322 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
323 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
324 continue;
325
326 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
327
328 set_bit(QDF_LOCKED, &qd->qd_flags);
329 gfs2_assert_warn(sdp, qd->qd_count);
330 qd->qd_count++;
331 qd->qd_change_sync = qd->qd_change;
332 gfs2_assert_warn(sdp, qd->qd_slot_count);
333 qd->qd_slot_count++;
334 found = 1;
335
336 break;
337 }
338
339 if (!found)
340 qd = NULL;
341
342 spin_unlock(&sdp->sd_quota_spin);
343
344 if (qd) {
345 gfs2_assert_warn(sdp, qd->qd_change_sync);
346 error = bh_get(qd);
347 if (error) {
348 clear_bit(QDF_LOCKED, &qd->qd_flags);
349 slot_put(qd);
350 qd_put(qd);
351 return error;
352 }
353 }
354
355 *qdp = qd;
356
357 return 0;
358}
359
360static int qd_trylock(struct gfs2_quota_data *qd)
361{
362 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
363
364 if (sdp->sd_vfs->s_flags & MS_RDONLY)
365 return 0;
366
367 spin_lock(&sdp->sd_quota_spin);
368
369 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
370 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
371 spin_unlock(&sdp->sd_quota_spin);
372 return 0;
373 }
374
375 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
376
377 set_bit(QDF_LOCKED, &qd->qd_flags);
378 gfs2_assert_warn(sdp, qd->qd_count);
379 qd->qd_count++;
380 qd->qd_change_sync = qd->qd_change;
381 gfs2_assert_warn(sdp, qd->qd_slot_count);
382 qd->qd_slot_count++;
383
384 spin_unlock(&sdp->sd_quota_spin);
385
386 gfs2_assert_warn(sdp, qd->qd_change_sync);
387 if (bh_get(qd)) {
388 clear_bit(QDF_LOCKED, &qd->qd_flags);
389 slot_put(qd);
390 qd_put(qd);
391 return 0;
392 }
393
394 return 1;
395}
396
397static void qd_unlock(struct gfs2_quota_data *qd)
398{
399 gfs2_assert_warn(qd->qd_gl->gl_sbd,
400 test_bit(QDF_LOCKED, &qd->qd_flags));
401 clear_bit(QDF_LOCKED, &qd->qd_flags);
402 bh_put(qd);
403 slot_put(qd);
404 qd_put(qd);
405}
406
407static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
408 struct gfs2_quota_data **qdp)
409{
410 int error;
411
412 error = qd_get(sdp, user, id, create, qdp);
413 if (error)
414 return error;
415
416 error = slot_get(*qdp);
417 if (error)
418 goto fail;
419
420 error = bh_get(*qdp);
421 if (error)
422 goto fail_slot;
423
424 return 0;
425
426fail_slot:
427 slot_put(*qdp);
428fail:
429 qd_put(*qdp);
430 return error;
431}
432
433static void qdsb_put(struct gfs2_quota_data *qd)
434{
435 bh_put(qd);
436 slot_put(qd);
437 qd_put(qd);
438}
439
440int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
441{
442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
443 struct gfs2_alloc *al = &ip->i_alloc;
444 struct gfs2_quota_data **qd = al->al_qd;
445 int error;
446
447 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
448 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
449 return -EIO;
450
451 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
452 return 0;
453
454 error = qdsb_get(sdp, QUOTA_USER, ip->i_di.di_uid, CREATE, qd);
455 if (error)
456 goto out;
457 al->al_qd_num++;
458 qd++;
459
460 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_di.di_gid, CREATE, qd);
461 if (error)
462 goto out;
463 al->al_qd_num++;
464 qd++;
465
466 if (uid != NO_QUOTA_CHANGE && uid != ip->i_di.di_uid) {
467 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
468 if (error)
469 goto out;
470 al->al_qd_num++;
471 qd++;
472 }
473
474 if (gid != NO_QUOTA_CHANGE && gid != ip->i_di.di_gid) {
475 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
476 if (error)
477 goto out;
478 al->al_qd_num++;
479 qd++;
480 }
481
482out:
483 if (error)
484 gfs2_quota_unhold(ip);
485 return error;
486}
487
488void gfs2_quota_unhold(struct gfs2_inode *ip)
489{
490 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
491 struct gfs2_alloc *al = &ip->i_alloc;
492 unsigned int x;
493
494 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
495
496 for (x = 0; x < al->al_qd_num; x++) {
497 qdsb_put(al->al_qd[x]);
498 al->al_qd[x] = NULL;
499 }
500 al->al_qd_num = 0;
501}
502
503static int sort_qd(const void *a, const void *b)
504{
505 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
506 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
507
508 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
509 !test_bit(QDF_USER, &qd_b->qd_flags)) {
510 if (test_bit(QDF_USER, &qd_a->qd_flags))
511 return -1;
512 else
513 return 1;
514 }
515 if (qd_a->qd_id < qd_b->qd_id)
516 return -1;
517 if (qd_a->qd_id > qd_b->qd_id)
518 return 1;
519
520 return 0;
521}
522
523static void do_qc(struct gfs2_quota_data *qd, s64 change)
524{
525 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
526 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
527 struct gfs2_quota_change *qc = qd->qd_bh_qc;
528 s64 x;
529
530 mutex_lock(&sdp->sd_quota_mutex);
531 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
532
533 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
534 qc->qc_change = 0;
535 qc->qc_flags = 0;
536 if (test_bit(QDF_USER, &qd->qd_flags))
537 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
538 qc->qc_id = cpu_to_be32(qd->qd_id);
539 }
540
541 x = qc->qc_change;
542 x = be64_to_cpu(x) + change;
543 qc->qc_change = cpu_to_be64(x);
544
545 spin_lock(&sdp->sd_quota_spin);
546 qd->qd_change = x;
547 spin_unlock(&sdp->sd_quota_spin);
548
549 if (!x) {
550 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
551 clear_bit(QDF_CHANGE, &qd->qd_flags);
552 qc->qc_flags = 0;
553 qc->qc_id = 0;
554 slot_put(qd);
555 qd_put(qd);
556 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
557 qd_hold(qd);
558 slot_hold(qd);
559 }
560
561 mutex_unlock(&sdp->sd_quota_mutex);
562}
563
564/**
565 * gfs2_adjust_quota
566 *
567 * This function was mostly borrowed from gfs2_block_truncate_page which was
568 * in turn mostly borrowed from ext3
569 */
570static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
571 s64 change, struct gfs2_quota_data *qd)
572{
573 struct inode *inode = &ip->i_inode;
574 struct address_space *mapping = inode->i_mapping;
575 unsigned long index = loc >> PAGE_CACHE_SHIFT;
576 unsigned offset = loc & (PAGE_CACHE_SHIFT - 1);
577 unsigned blocksize, iblock, pos;
578 struct buffer_head *bh;
579 struct page *page;
580 void *kaddr;
581 __be64 *ptr;
582 s64 value;
583 int err = -EIO;
584
585 page = grab_cache_page(mapping, index);
586 if (!page)
587 return -ENOMEM;
588
589 blocksize = inode->i_sb->s_blocksize;
590 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
591
592 if (!page_has_buffers(page))
593 create_empty_buffers(page, blocksize, 0);
594
595 bh = page_buffers(page);
596 pos = blocksize;
597 while (offset >= pos) {
598 bh = bh->b_this_page;
599 iblock++;
600 pos += blocksize;
601 }
602
603 if (!buffer_mapped(bh)) {
604 gfs2_get_block(inode, iblock, bh, 1);
605 if (!buffer_mapped(bh))
606 goto unlock;
607 }
608
609 if (PageUptodate(page))
610 set_buffer_uptodate(bh);
611
612 if (!buffer_uptodate(bh)) {
613 ll_rw_block(READ_META, 1, &bh);
614 wait_on_buffer(bh);
615 if (!buffer_uptodate(bh))
616 goto unlock;
617 }
618
619 gfs2_trans_add_bh(ip->i_gl, bh, 0);
620
621 kaddr = kmap_atomic(page, KM_USER0);
622 ptr = kaddr + offset;
623 value = (s64)be64_to_cpu(*ptr) + change;
624 *ptr = cpu_to_be64(value);
625 flush_dcache_page(page);
626 kunmap_atomic(kaddr, KM_USER0);
627 err = 0;
628 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
629 qd->qd_qb.qb_value = cpu_to_be64(value);
630unlock:
631 unlock_page(page);
632 page_cache_release(page);
633 return err;
634}
635
636static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
637{
638 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
639 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
640 unsigned int data_blocks, ind_blocks;
641 struct gfs2_holder *ghs, i_gh;
642 unsigned int qx, x;
643 struct gfs2_quota_data *qd;
644 loff_t offset;
645 unsigned int nalloc = 0;
646 struct gfs2_alloc *al = NULL;
647 int error;
648
649 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
650 &data_blocks, &ind_blocks);
651
652 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL);
653 if (!ghs)
654 return -ENOMEM;
655
656 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
657 for (qx = 0; qx < num_qd; qx++) {
658 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
659 LM_ST_EXCLUSIVE,
660 GL_NOCACHE, &ghs[qx]);
661 if (error)
662 goto out;
663 }
664
665 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
666 if (error)
667 goto out;
668
669 for (x = 0; x < num_qd; x++) {
670 int alloc_required;
671
672 offset = qd2offset(qda[x]);
673 error = gfs2_write_alloc_required(ip, offset,
674 sizeof(struct gfs2_quota),
675 &alloc_required);
676 if (error)
677 goto out_gunlock;
678 if (alloc_required)
679 nalloc++;
680 }
681
682 if (nalloc) {
683 al = gfs2_alloc_get(ip);
684
685 al->al_requested = nalloc * (data_blocks + ind_blocks);
686
687 error = gfs2_inplace_reserve(ip);
688 if (error)
689 goto out_alloc;
690
691 error = gfs2_trans_begin(sdp,
692 al->al_rgd->rd_ri.ri_length +
693 num_qd * data_blocks +
694 nalloc * ind_blocks +
695 RES_DINODE + num_qd +
696 RES_STATFS, 0);
697 if (error)
698 goto out_ipres;
699 } else {
700 error = gfs2_trans_begin(sdp,
701 num_qd * data_blocks +
702 RES_DINODE + num_qd, 0);
703 if (error)
704 goto out_gunlock;
705 }
706
707 for (x = 0; x < num_qd; x++) {
708 qd = qda[x];
709 offset = qd2offset(qd);
710 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
711 (struct gfs2_quota_data *)
712 qd->qd_gl->gl_lvb);
713 if (error)
714 goto out_end_trans;
715
716 do_qc(qd, -qd->qd_change_sync);
717 }
718
719 error = 0;
720
721out_end_trans:
722 gfs2_trans_end(sdp);
723out_ipres:
724 if (nalloc)
725 gfs2_inplace_release(ip);
726out_alloc:
727 if (nalloc)
728 gfs2_alloc_put(ip);
729out_gunlock:
730 gfs2_glock_dq_uninit(&i_gh);
731out:
732 while (qx--)
733 gfs2_glock_dq_uninit(&ghs[qx]);
734 kfree(ghs);
735 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
736 return error;
737}
738
739static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
740 struct gfs2_holder *q_gh)
741{
742 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
743 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
744 struct gfs2_holder i_gh;
745 struct gfs2_quota q;
746 char buf[sizeof(struct gfs2_quota)];
747 struct file_ra_state ra_state;
748 int error;
749 struct gfs2_quota_lvb *qlvb;
750
751 file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);
752restart:
753 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
754 if (error)
755 return error;
756
757 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
758
759 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
760 loff_t pos;
761 gfs2_glock_dq_uninit(q_gh);
762 error = gfs2_glock_nq_init(qd->qd_gl,
763 LM_ST_EXCLUSIVE, GL_NOCACHE,
764 q_gh);
765 if (error)
766 return error;
767
768 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
769 if (error)
770 goto fail;
771
772 memset(buf, 0, sizeof(struct gfs2_quota));
773 pos = qd2offset(qd);
774 error = gfs2_internal_read(ip, &ra_state, buf,
775 &pos, sizeof(struct gfs2_quota));
776 if (error < 0)
777 goto fail_gunlock;
778
779 gfs2_glock_dq_uninit(&i_gh);
780
781
782 gfs2_quota_in(&q, buf);
783 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
784 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
785 qlvb->__pad = 0;
786 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
787 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
788 qlvb->qb_value = cpu_to_be64(q.qu_value);
789 qd->qd_qb = *qlvb;
790
791 if (gfs2_glock_is_blocking(qd->qd_gl)) {
792 gfs2_glock_dq_uninit(q_gh);
793 force_refresh = 0;
794 goto restart;
795 }
796 }
797
798 return 0;
799
800fail_gunlock:
801 gfs2_glock_dq_uninit(&i_gh);
802fail:
803 gfs2_glock_dq_uninit(q_gh);
804 return error;
805}
806
807int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
808{
809 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
810 struct gfs2_alloc *al = &ip->i_alloc;
811 unsigned int x;
812 int error = 0;
813
814 gfs2_quota_hold(ip, uid, gid);
815
816 if (capable(CAP_SYS_RESOURCE) ||
817 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
818 return 0;
819
820 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
821 sort_qd, NULL);
822
823 for (x = 0; x < al->al_qd_num; x++) {
824 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
825 if (error)
826 break;
827 }
828
829 if (!error)
830 set_bit(GIF_QD_LOCKED, &ip->i_flags);
831 else {
832 while (x--)
833 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
834 gfs2_quota_unhold(ip);
835 }
836
837 return error;
838}
839
840static int need_sync(struct gfs2_quota_data *qd)
841{
842 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
843 struct gfs2_tune *gt = &sdp->sd_tune;
844 s64 value;
845 unsigned int num, den;
846 int do_sync = 1;
847
848 if (!qd->qd_qb.qb_limit)
849 return 0;
850
851 spin_lock(&sdp->sd_quota_spin);
852 value = qd->qd_change;
853 spin_unlock(&sdp->sd_quota_spin);
854
855 spin_lock(&gt->gt_spin);
856 num = gt->gt_quota_scale_num;
857 den = gt->gt_quota_scale_den;
858 spin_unlock(&gt->gt_spin);
859
860 if (value < 0)
861 do_sync = 0;
862 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
863 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
864 do_sync = 0;
865 else {
866 value *= gfs2_jindex_size(sdp) * num;
867 do_div(value, den);
868 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
869 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
870 do_sync = 0;
871 }
872
873 return do_sync;
874}
875
876void gfs2_quota_unlock(struct gfs2_inode *ip)
877{
878 struct gfs2_alloc *al = &ip->i_alloc;
879 struct gfs2_quota_data *qda[4];
880 unsigned int count = 0;
881 unsigned int x;
882
883 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
884 goto out;
885
886 for (x = 0; x < al->al_qd_num; x++) {
887 struct gfs2_quota_data *qd;
888 int sync;
889
890 qd = al->al_qd[x];
891 sync = need_sync(qd);
892
893 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
894
895 if (sync && qd_trylock(qd))
896 qda[count++] = qd;
897 }
898
899 if (count) {
900 do_sync(count, qda);
901 for (x = 0; x < count; x++)
902 qd_unlock(qda[x]);
903 }
904
905out:
906 gfs2_quota_unhold(ip);
907}
908
909#define MAX_LINE 256
910
911static int print_message(struct gfs2_quota_data *qd, char *type)
912{
913 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
914
915 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
916 sdp->sd_fsname, type,
917 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
918 qd->qd_id);
919
920 return 0;
921}
922
923int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
924{
925 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
926 struct gfs2_alloc *al = &ip->i_alloc;
927 struct gfs2_quota_data *qd;
928 s64 value;
929 unsigned int x;
930 int error = 0;
931
932 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
933 return 0;
934
935 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
936 return 0;
937
938 for (x = 0; x < al->al_qd_num; x++) {
939 qd = al->al_qd[x];
940
941 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
942 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
943 continue;
944
945 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
946 spin_lock(&sdp->sd_quota_spin);
947 value += qd->qd_change;
948 spin_unlock(&sdp->sd_quota_spin);
949
950 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
951 print_message(qd, "exceeded");
952 error = -EDQUOT;
953 break;
954 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
955 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
956 time_after_eq(jiffies, qd->qd_last_warn +
957 gfs2_tune_get(sdp,
958 gt_quota_warn_period) * HZ)) {
959 error = print_message(qd, "warning");
960 qd->qd_last_warn = jiffies;
961 }
962 }
963
964 return error;
965}
966
967void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
968 u32 uid, u32 gid)
969{
970 struct gfs2_alloc *al = &ip->i_alloc;
971 struct gfs2_quota_data *qd;
972 unsigned int x;
973 unsigned int found = 0;
974
975 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
976 return;
977 if (ip->i_di.di_flags & GFS2_DIF_SYSTEM)
978 return;
979
980 for (x = 0; x < al->al_qd_num; x++) {
981 qd = al->al_qd[x];
982
983 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
984 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
985 do_qc(qd, change);
986 found++;
987 }
988 }
989}
990
991int gfs2_quota_sync(struct gfs2_sbd *sdp)
992{
993 struct gfs2_quota_data **qda;
994 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
995 unsigned int num_qd;
996 unsigned int x;
997 int error = 0;
998
999 sdp->sd_quota_sync_gen++;
1000
1001 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1002 if (!qda)
1003 return -ENOMEM;
1004
1005 do {
1006 num_qd = 0;
1007
1008 for (;;) {
1009 error = qd_fish(sdp, qda + num_qd);
1010 if (error || !qda[num_qd])
1011 break;
1012 if (++num_qd == max_qd)
1013 break;
1014 }
1015
1016 if (num_qd) {
1017 if (!error)
1018 error = do_sync(num_qd, qda);
1019 if (!error)
1020 for (x = 0; x < num_qd; x++)
1021 qda[x]->qd_sync_gen =
1022 sdp->sd_quota_sync_gen;
1023
1024 for (x = 0; x < num_qd; x++)
1025 qd_unlock(qda[x]);
1026 }
1027 } while (!error && num_qd == max_qd);
1028
1029 kfree(qda);
1030
1031 return error;
1032}
1033
1034int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1035{
1036 struct gfs2_quota_data *qd;
1037 struct gfs2_holder q_gh;
1038 int error;
1039
1040 error = qd_get(sdp, user, id, CREATE, &qd);
1041 if (error)
1042 return error;
1043
1044 error = do_glock(qd, FORCE, &q_gh);
1045 if (!error)
1046 gfs2_glock_dq_uninit(&q_gh);
1047
1048 qd_put(qd);
1049
1050 return error;
1051}
1052
1053int gfs2_quota_init(struct gfs2_sbd *sdp)
1054{
1055 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1056 unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
1057 unsigned int x, slot = 0;
1058 unsigned int found = 0;
1059 u64 dblock;
1060 u32 extlen = 0;
1061 int error;
1062
1063 if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) ||
1064 ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) {
1065 gfs2_consist_inode(ip);
1066 return -EIO;
1067 }
1068 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1069 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1070
1071 error = -ENOMEM;
1072
1073 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1074 sizeof(unsigned char *), GFP_KERNEL);
1075 if (!sdp->sd_quota_bitmap)
1076 return error;
1077
1078 for (x = 0; x < sdp->sd_quota_chunks; x++) {
1079 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL);
1080 if (!sdp->sd_quota_bitmap[x])
1081 goto fail;
1082 }
1083
1084 for (x = 0; x < blocks; x++) {
1085 struct buffer_head *bh;
1086 unsigned int y;
1087
1088 if (!extlen) {
1089 int new = 0;
1090 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1091 if (error)
1092 goto fail;
1093 }
1094 error = -EIO;
1095 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1096 if (!bh)
1097 goto fail;
1098 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1099 brelse(bh);
1100 goto fail;
1101 }
1102
1103 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1104 y++, slot++) {
1105 struct gfs2_quota_change qc;
1106 struct gfs2_quota_data *qd;
1107
1108 gfs2_quota_change_in(&qc, bh->b_data +
1109 sizeof(struct gfs2_meta_header) +
1110 y * sizeof(struct gfs2_quota_change));
1111 if (!qc.qc_change)
1112 continue;
1113
1114 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1115 qc.qc_id, &qd);
1116 if (error) {
1117 brelse(bh);
1118 goto fail;
1119 }
1120
1121 set_bit(QDF_CHANGE, &qd->qd_flags);
1122 qd->qd_change = qc.qc_change;
1123 qd->qd_slot = slot;
1124 qd->qd_slot_count = 1;
1125 qd->qd_last_touched = jiffies;
1126
1127 spin_lock(&sdp->sd_quota_spin);
1128 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1129 list_add(&qd->qd_list, &sdp->sd_quota_list);
1130 atomic_inc(&sdp->sd_quota_count);
1131 spin_unlock(&sdp->sd_quota_spin);
1132
1133 found++;
1134 }
1135
1136 brelse(bh);
1137 dblock++;
1138 extlen--;
1139 }
1140
1141 if (found)
1142 fs_info(sdp, "found %u quota changes\n", found);
1143
1144 return 0;
1145
1146fail:
1147 gfs2_quota_cleanup(sdp);
1148 return error;
1149}
1150
1151void gfs2_quota_scan(struct gfs2_sbd *sdp)
1152{
1153 struct gfs2_quota_data *qd, *safe;
1154 LIST_HEAD(dead);
1155
1156 spin_lock(&sdp->sd_quota_spin);
1157 list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
1158 if (!qd->qd_count &&
1159 time_after_eq(jiffies, qd->qd_last_touched +
1160 gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
1161 list_move(&qd->qd_list, &dead);
1162 gfs2_assert_warn(sdp,
1163 atomic_read(&sdp->sd_quota_count) > 0);
1164 atomic_dec(&sdp->sd_quota_count);
1165 }
1166 }
1167 spin_unlock(&sdp->sd_quota_spin);
1168
1169 while (!list_empty(&dead)) {
1170 qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
1171 list_del(&qd->qd_list);
1172
1173 gfs2_assert_warn(sdp, !qd->qd_change);
1174 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1175 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1176
1177 gfs2_lvb_unhold(qd->qd_gl);
1178 kfree(qd);
1179 }
1180}
1181
1182void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1183{
1184 struct list_head *head = &sdp->sd_quota_list;
1185 struct gfs2_quota_data *qd;
1186 unsigned int x;
1187
1188 spin_lock(&sdp->sd_quota_spin);
1189 while (!list_empty(head)) {
1190 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1191
1192 if (qd->qd_count > 1 ||
1193 (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1194 list_move(&qd->qd_list, head);
1195 spin_unlock(&sdp->sd_quota_spin);
1196 schedule();
1197 spin_lock(&sdp->sd_quota_spin);
1198 continue;
1199 }
1200
1201 list_del(&qd->qd_list);
1202 atomic_dec(&sdp->sd_quota_count);
1203 spin_unlock(&sdp->sd_quota_spin);
1204
1205 if (!qd->qd_count) {
1206 gfs2_assert_warn(sdp, !qd->qd_change);
1207 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1208 } else
1209 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1210 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1211
1212 gfs2_lvb_unhold(qd->qd_gl);
1213 kfree(qd);
1214
1215 spin_lock(&sdp->sd_quota_spin);
1216 }
1217 spin_unlock(&sdp->sd_quota_spin);
1218
1219 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1220
1221 if (sdp->sd_quota_bitmap) {
1222 for (x = 0; x < sdp->sd_quota_chunks; x++)
1223 kfree(sdp->sd_quota_bitmap[x]);
1224 kfree(sdp->sd_quota_bitmap);
1225 }
1226}
1227
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
new file mode 100644
index 000000000000..a8be1417051f
--- /dev/null
+++ b/fs/gfs2/quota.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __QUOTA_DOT_H__
11#define __QUOTA_DOT_H__
12
13struct gfs2_inode;
14struct gfs2_sbd;
15
16#define NO_QUOTA_CHANGE ((u32)-1)
17
18int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid);
19void gfs2_quota_unhold(struct gfs2_inode *ip);
20
21int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid);
22void gfs2_quota_unlock(struct gfs2_inode *ip);
23
24int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
25void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
26 u32 uid, u32 gid);
27
28int gfs2_quota_sync(struct gfs2_sbd *sdp);
29int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
30
31int gfs2_quota_init(struct gfs2_sbd *sdp);
32void gfs2_quota_scan(struct gfs2_sbd *sdp);
33void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
34
35#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
new file mode 100644
index 000000000000..0a8a4b87dcc6
--- /dev/null
+++ b/fs/gfs2/recovery.c
@@ -0,0 +1,570 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/crc32.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "bmap.h"
22#include "glock.h"
23#include "glops.h"
24#include "lm.h"
25#include "lops.h"
26#include "meta_io.h"
27#include "recovery.h"
28#include "super.h"
29#include "util.h"
30#include "dir.h"
31
32int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
33 struct buffer_head **bh)
34{
35 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
36 struct gfs2_glock *gl = ip->i_gl;
37 int new = 0;
38 u64 dblock;
39 u32 extlen;
40 int error;
41
42 error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen);
43 if (error)
44 return error;
45 if (!dblock) {
46 gfs2_consist_inode(ip);
47 return -EIO;
48 }
49
50 *bh = gfs2_meta_ra(gl, dblock, extlen);
51
52 return error;
53}
54
55int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where)
56{
57 struct list_head *head = &sdp->sd_revoke_list;
58 struct gfs2_revoke_replay *rr;
59 int found = 0;
60
61 list_for_each_entry(rr, head, rr_list) {
62 if (rr->rr_blkno == blkno) {
63 found = 1;
64 break;
65 }
66 }
67
68 if (found) {
69 rr->rr_where = where;
70 return 0;
71 }
72
73 rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_KERNEL);
74 if (!rr)
75 return -ENOMEM;
76
77 rr->rr_blkno = blkno;
78 rr->rr_where = where;
79 list_add(&rr->rr_list, head);
80
81 return 1;
82}
83
84int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where)
85{
86 struct gfs2_revoke_replay *rr;
87 int wrap, a, b, revoke;
88 int found = 0;
89
90 list_for_each_entry(rr, &sdp->sd_revoke_list, rr_list) {
91 if (rr->rr_blkno == blkno) {
92 found = 1;
93 break;
94 }
95 }
96
97 if (!found)
98 return 0;
99
100 wrap = (rr->rr_where < sdp->sd_replay_tail);
101 a = (sdp->sd_replay_tail < where);
102 b = (where < rr->rr_where);
103 revoke = (wrap) ? (a || b) : (a && b);
104
105 return revoke;
106}
107
108void gfs2_revoke_clean(struct gfs2_sbd *sdp)
109{
110 struct list_head *head = &sdp->sd_revoke_list;
111 struct gfs2_revoke_replay *rr;
112
113 while (!list_empty(head)) {
114 rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list);
115 list_del(&rr->rr_list);
116 kfree(rr);
117 }
118}
119
120/**
121 * get_log_header - read the log header for a given segment
122 * @jd: the journal
123 * @blk: the block to look at
124 * @lh: the log header to return
125 *
126 * Read the log header for a given segement in a given journal. Do a few
127 * sanity checks on it.
128 *
129 * Returns: 0 on success,
130 * 1 if the header was invalid or incomplete,
131 * errno on error
132 */
133
134static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
135 struct gfs2_log_header *head)
136{
137 struct buffer_head *bh;
138 struct gfs2_log_header lh;
139 u32 hash;
140 int error;
141
142 error = gfs2_replay_read_block(jd, blk, &bh);
143 if (error)
144 return error;
145
146 memcpy(&lh, bh->b_data, sizeof(struct gfs2_log_header));
147 lh.lh_hash = 0;
148 hash = gfs2_disk_hash((char *)&lh, sizeof(struct gfs2_log_header));
149 gfs2_log_header_in(&lh, bh->b_data);
150
151 brelse(bh);
152
153 if (lh.lh_header.mh_magic != GFS2_MAGIC ||
154 lh.lh_header.mh_type != GFS2_METATYPE_LH ||
155 lh.lh_blkno != blk || lh.lh_hash != hash)
156 return 1;
157
158 *head = lh;
159
160 return 0;
161}
162
163/**
164 * find_good_lh - find a good log header
165 * @jd: the journal
166 * @blk: the segment to start searching from
167 * @lh: the log header to fill in
168 * @forward: if true search forward in the log, else search backward
169 *
170 * Call get_log_header() to get a log header for a segment, but if the
171 * segment is bad, either scan forward or backward until we find a good one.
172 *
173 * Returns: errno
174 */
175
176static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
177 struct gfs2_log_header *head)
178{
179 unsigned int orig_blk = *blk;
180 int error;
181
182 for (;;) {
183 error = get_log_header(jd, *blk, head);
184 if (error <= 0)
185 return error;
186
187 if (++*blk == jd->jd_blocks)
188 *blk = 0;
189
190 if (*blk == orig_blk) {
191 gfs2_consist_inode(GFS2_I(jd->jd_inode));
192 return -EIO;
193 }
194 }
195}
196
197/**
198 * jhead_scan - make sure we've found the head of the log
199 * @jd: the journal
200 * @head: this is filled in with the log descriptor of the head
201 *
202 * At this point, seg and lh should be either the head of the log or just
203 * before. Scan forward until we find the head.
204 *
205 * Returns: errno
206 */
207
208static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
209{
210 unsigned int blk = head->lh_blkno;
211 struct gfs2_log_header lh;
212 int error;
213
214 for (;;) {
215 if (++blk == jd->jd_blocks)
216 blk = 0;
217
218 error = get_log_header(jd, blk, &lh);
219 if (error < 0)
220 return error;
221 if (error == 1)
222 continue;
223
224 if (lh.lh_sequence == head->lh_sequence) {
225 gfs2_consist_inode(GFS2_I(jd->jd_inode));
226 return -EIO;
227 }
228 if (lh.lh_sequence < head->lh_sequence)
229 break;
230
231 *head = lh;
232 }
233
234 return 0;
235}
236
237/**
238 * gfs2_find_jhead - find the head of a log
239 * @jd: the journal
240 * @head: the log descriptor for the head of the log is returned here
241 *
242 * Do a binary search of a journal and find the valid log entry with the
243 * highest sequence number. (i.e. the log head)
244 *
245 * Returns: errno
246 */
247
248int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
249{
250 struct gfs2_log_header lh_1, lh_m;
251 u32 blk_1, blk_2, blk_m;
252 int error;
253
254 blk_1 = 0;
255 blk_2 = jd->jd_blocks - 1;
256
257 for (;;) {
258 blk_m = (blk_1 + blk_2) / 2;
259
260 error = find_good_lh(jd, &blk_1, &lh_1);
261 if (error)
262 return error;
263
264 error = find_good_lh(jd, &blk_m, &lh_m);
265 if (error)
266 return error;
267
268 if (blk_1 == blk_m || blk_m == blk_2)
269 break;
270
271 if (lh_1.lh_sequence <= lh_m.lh_sequence)
272 blk_1 = blk_m;
273 else
274 blk_2 = blk_m;
275 }
276
277 error = jhead_scan(jd, &lh_1);
278 if (error)
279 return error;
280
281 *head = lh_1;
282
283 return error;
284}
285
286/**
287 * foreach_descriptor - go through the active part of the log
288 * @jd: the journal
289 * @start: the first log header in the active region
290 * @end: the last log header (don't process the contents of this entry))
291 *
292 * Call a given function once for every log descriptor in the active
293 * portion of the log.
294 *
295 * Returns: errno
296 */
297
298static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
299 unsigned int end, int pass)
300{
301 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
302 struct buffer_head *bh;
303 struct gfs2_log_descriptor *ld;
304 int error = 0;
305 u32 length;
306 __be64 *ptr;
307 unsigned int offset = sizeof(struct gfs2_log_descriptor);
308 offset += sizeof(__be64) - 1;
309 offset &= ~(sizeof(__be64) - 1);
310
311 while (start != end) {
312 error = gfs2_replay_read_block(jd, start, &bh);
313 if (error)
314 return error;
315 if (gfs2_meta_check(sdp, bh)) {
316 brelse(bh);
317 return -EIO;
318 }
319 ld = (struct gfs2_log_descriptor *)bh->b_data;
320 length = be32_to_cpu(ld->ld_length);
321
322 if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
323 struct gfs2_log_header lh;
324 error = get_log_header(jd, start, &lh);
325 if (!error) {
326 gfs2_replay_incr_blk(sdp, &start);
327 brelse(bh);
328 continue;
329 }
330 if (error == 1) {
331 gfs2_consist_inode(GFS2_I(jd->jd_inode));
332 error = -EIO;
333 }
334 brelse(bh);
335 return error;
336 } else if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LD)) {
337 brelse(bh);
338 return -EIO;
339 }
340 ptr = (__be64 *)(bh->b_data + offset);
341 error = lops_scan_elements(jd, start, ld, ptr, pass);
342 if (error) {
343 brelse(bh);
344 return error;
345 }
346
347 while (length--)
348 gfs2_replay_incr_blk(sdp, &start);
349
350 brelse(bh);
351 }
352
353 return 0;
354}
355
356/**
357 * clean_journal - mark a dirty journal as being clean
358 * @sdp: the filesystem
359 * @jd: the journal
360 * @gl: the journal's glock
361 * @head: the head journal to start from
362 *
363 * Returns: errno
364 */
365
366static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header *head)
367{
368 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
369 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
370 unsigned int lblock;
371 struct gfs2_log_header *lh;
372 u32 hash;
373 struct buffer_head *bh;
374 int error;
375 struct buffer_head bh_map;
376
377 lblock = head->lh_blkno;
378 gfs2_replay_incr_blk(sdp, &lblock);
379 error = gfs2_block_map(&ip->i_inode, lblock, 0, &bh_map, 1);
380 if (error)
381 return error;
382 if (!bh_map.b_blocknr) {
383 gfs2_consist_inode(ip);
384 return -EIO;
385 }
386
387 bh = sb_getblk(sdp->sd_vfs, bh_map.b_blocknr);
388 lock_buffer(bh);
389 memset(bh->b_data, 0, bh->b_size);
390 set_buffer_uptodate(bh);
391 clear_buffer_dirty(bh);
392 unlock_buffer(bh);
393
394 lh = (struct gfs2_log_header *)bh->b_data;
395 memset(lh, 0, sizeof(struct gfs2_log_header));
396 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
397 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
398 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
399 lh->lh_sequence = cpu_to_be64(head->lh_sequence + 1);
400 lh->lh_flags = cpu_to_be32(GFS2_LOG_HEAD_UNMOUNT);
401 lh->lh_blkno = cpu_to_be32(lblock);
402 hash = gfs2_disk_hash((const char *)lh, sizeof(struct gfs2_log_header));
403 lh->lh_hash = cpu_to_be32(hash);
404
405 set_buffer_dirty(bh);
406 if (sync_dirty_buffer(bh))
407 gfs2_io_error_bh(sdp, bh);
408 brelse(bh);
409
410 return error;
411}
412
413/**
414 * gfs2_recover_journal - recovery a given journal
415 * @jd: the struct gfs2_jdesc describing the journal
416 *
417 * Acquire the journal's lock, check to see if the journal is clean, and
418 * do recovery if necessary.
419 *
420 * Returns: errno
421 */
422
423int gfs2_recover_journal(struct gfs2_jdesc *jd)
424{
425 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
426 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
427 struct gfs2_log_header head;
428 struct gfs2_holder j_gh, ji_gh, t_gh;
429 unsigned long t;
430 int ro = 0;
431 unsigned int pass;
432 int error;
433
434 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
435 fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
436 jd->jd_jid);
437
438 /* Aquire the journal lock so we can do recovery */
439
440 error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
441 LM_ST_EXCLUSIVE,
442 LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
443 &j_gh);
444 switch (error) {
445 case 0:
446 break;
447
448 case GLR_TRYFAILED:
449 fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
450 error = 0;
451
452 default:
453 goto fail;
454 };
455
456 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
457 LM_FLAG_NOEXP, &ji_gh);
458 if (error)
459 goto fail_gunlock_j;
460 } else {
461 fs_info(sdp, "jid=%u, already locked for use\n", jd->jd_jid);
462 }
463
464 fs_info(sdp, "jid=%u: Looking at journal...\n", jd->jd_jid);
465
466 error = gfs2_jdesc_check(jd);
467 if (error)
468 goto fail_gunlock_ji;
469
470 error = gfs2_find_jhead(jd, &head);
471 if (error)
472 goto fail_gunlock_ji;
473
474 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
475 fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
476 jd->jd_jid);
477
478 t = jiffies;
479
480 /* Acquire a shared hold on the transaction lock */
481
482 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
483 LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
484 GL_NOCANCEL | GL_NOCACHE, &t_gh);
485 if (error)
486 goto fail_gunlock_ji;
487
488 if (test_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags)) {
489 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
490 ro = 1;
491 } else {
492 if (sdp->sd_vfs->s_flags & MS_RDONLY)
493 ro = 1;
494 }
495
496 if (ro) {
497 fs_warn(sdp, "jid=%u: Can't replay: read-only FS\n",
498 jd->jd_jid);
499 error = -EROFS;
500 goto fail_gunlock_tr;
501 }
502
503 fs_info(sdp, "jid=%u: Replaying journal...\n", jd->jd_jid);
504
505 for (pass = 0; pass < 2; pass++) {
506 lops_before_scan(jd, &head, pass);
507 error = foreach_descriptor(jd, head.lh_tail,
508 head.lh_blkno, pass);
509 lops_after_scan(jd, error, pass);
510 if (error)
511 goto fail_gunlock_tr;
512 }
513
514 error = clean_journal(jd, &head);
515 if (error)
516 goto fail_gunlock_tr;
517
518 gfs2_glock_dq_uninit(&t_gh);
519 t = DIV_ROUND_UP(jiffies - t, HZ);
520 fs_info(sdp, "jid=%u: Journal replayed in %lus\n",
521 jd->jd_jid, t);
522 }
523
524 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
525 gfs2_glock_dq_uninit(&ji_gh);
526
527 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
528
529 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
530 gfs2_glock_dq_uninit(&j_gh);
531
532 fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
533 return 0;
534
535fail_gunlock_tr:
536 gfs2_glock_dq_uninit(&t_gh);
537fail_gunlock_ji:
538 if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
539 gfs2_glock_dq_uninit(&ji_gh);
540fail_gunlock_j:
541 gfs2_glock_dq_uninit(&j_gh);
542 }
543
544 fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
545
546fail:
547 gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
548 return error;
549}
550
551/**
552 * gfs2_check_journals - Recover any dirty journals
553 * @sdp: the filesystem
554 *
555 */
556
557void gfs2_check_journals(struct gfs2_sbd *sdp)
558{
559 struct gfs2_jdesc *jd;
560
561 for (;;) {
562 jd = gfs2_jdesc_find_dirty(sdp);
563 if (!jd)
564 break;
565
566 if (jd != sdp->sd_jdesc)
567 gfs2_recover_journal(jd);
568 }
569}
570
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
new file mode 100644
index 000000000000..961feedf4d8b
--- /dev/null
+++ b/fs/gfs2/recovery.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __RECOVERY_DOT_H__
11#define __RECOVERY_DOT_H__
12
13#include "incore.h"
14
15static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
16{
17 if (++*blk == sdp->sd_jdesc->jd_blocks)
18 *blk = 0;
19}
20
21int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
22 struct buffer_head **bh);
23
24int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
25int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where);
26void gfs2_revoke_clean(struct gfs2_sbd *sdp);
27
28int gfs2_find_jhead(struct gfs2_jdesc *jd,
29 struct gfs2_log_header *head);
30int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd);
31void gfs2_check_journals(struct gfs2_sbd *sdp);
32
33#endif /* __RECOVERY_DOT_H__ */
34
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
new file mode 100644
index 000000000000..b261385c0065
--- /dev/null
+++ b/fs/gfs2/rgrp.c
@@ -0,0 +1,1513 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/fs.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "glock.h"
22#include "glops.h"
23#include "lops.h"
24#include "meta_io.h"
25#include "quota.h"
26#include "rgrp.h"
27#include "super.h"
28#include "trans.h"
29#include "ops_file.h"
30#include "util.h"
31
32#define BFITNOENT ((u32)~0)
33
34/*
35 * These routines are used by the resource group routines (rgrp.c)
36 * to keep track of block allocation. Each block is represented by two
37 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
38 *
39 * 0 = Free
40 * 1 = Used (not metadata)
41 * 2 = Unlinked (still in use) inode
42 * 3 = Used (metadata)
43 */
44
45static const char valid_change[16] = {
46 /* current */
47 /* n */ 0, 1, 1, 1,
48 /* e */ 1, 0, 0, 0,
49 /* w */ 0, 0, 0, 1,
50 1, 0, 0, 0
51};
52
53/**
54 * gfs2_setbit - Set a bit in the bitmaps
55 * @buffer: the buffer that holds the bitmaps
56 * @buflen: the length (in bytes) of the buffer
57 * @block: the block to set
58 * @new_state: the new state of the block
59 *
60 */
61
62static void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
63 unsigned int buflen, u32 block,
64 unsigned char new_state)
65{
66 unsigned char *byte, *end, cur_state;
67 unsigned int bit;
68
69 byte = buffer + (block / GFS2_NBBY);
70 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
71 end = buffer + buflen;
72
73 gfs2_assert(rgd->rd_sbd, byte < end);
74
75 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
76
77 if (valid_change[new_state * 4 + cur_state]) {
78 *byte ^= cur_state << bit;
79 *byte |= new_state << bit;
80 } else
81 gfs2_consist_rgrpd(rgd);
82}
83
84/**
85 * gfs2_testbit - test a bit in the bitmaps
86 * @buffer: the buffer that holds the bitmaps
87 * @buflen: the length (in bytes) of the buffer
88 * @block: the block to read
89 *
90 */
91
92static unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
93 unsigned int buflen, u32 block)
94{
95 unsigned char *byte, *end, cur_state;
96 unsigned int bit;
97
98 byte = buffer + (block / GFS2_NBBY);
99 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
100 end = buffer + buflen;
101
102 gfs2_assert(rgd->rd_sbd, byte < end);
103
104 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
105
106 return cur_state;
107}
108
109/**
110 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
111 * a block in a given allocation state.
112 * @buffer: the buffer that holds the bitmaps
113 * @buflen: the length (in bytes) of the buffer
114 * @goal: start search at this block's bit-pair (within @buffer)
115 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for;
116 * bit 0 = alloc(1)/free(0), bit 1 = meta(1)/data(0)
117 *
118 * Scope of @goal and returned block number is only within this bitmap buffer,
119 * not entire rgrp or filesystem. @buffer will be offset from the actual
120 * beginning of a bitmap block buffer, skipping any header structures.
121 *
122 * Return: the block number (bitmap buffer scope) that was found
123 */
124
125static u32 gfs2_bitfit(struct gfs2_rgrpd *rgd, unsigned char *buffer,
126 unsigned int buflen, u32 goal,
127 unsigned char old_state)
128{
129 unsigned char *byte, *end, alloc;
130 u32 blk = goal;
131 unsigned int bit;
132
133 byte = buffer + (goal / GFS2_NBBY);
134 bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
135 end = buffer + buflen;
136 alloc = (old_state & 1) ? 0 : 0x55;
137
138 while (byte < end) {
139 if ((*byte & 0x55) == alloc) {
140 blk += (8 - bit) >> 1;
141
142 bit = 0;
143 byte++;
144
145 continue;
146 }
147
148 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
149 return blk;
150
151 bit += GFS2_BIT_SIZE;
152 if (bit >= 8) {
153 bit = 0;
154 byte++;
155 }
156
157 blk++;
158 }
159
160 return BFITNOENT;
161}
162
163/**
164 * gfs2_bitcount - count the number of bits in a certain state
165 * @buffer: the buffer that holds the bitmaps
166 * @buflen: the length (in bytes) of the buffer
167 * @state: the state of the block we're looking for
168 *
169 * Returns: The number of bits
170 */
171
172static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, unsigned char *buffer,
173 unsigned int buflen, unsigned char state)
174{
175 unsigned char *byte = buffer;
176 unsigned char *end = buffer + buflen;
177 unsigned char state1 = state << 2;
178 unsigned char state2 = state << 4;
179 unsigned char state3 = state << 6;
180 u32 count = 0;
181
182 for (; byte < end; byte++) {
183 if (((*byte) & 0x03) == state)
184 count++;
185 if (((*byte) & 0x0C) == state1)
186 count++;
187 if (((*byte) & 0x30) == state2)
188 count++;
189 if (((*byte) & 0xC0) == state3)
190 count++;
191 }
192
193 return count;
194}
195
196/**
197 * gfs2_rgrp_verify - Verify that a resource group is consistent
198 * @sdp: the filesystem
199 * @rgd: the rgrp
200 *
201 */
202
203void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
204{
205 struct gfs2_sbd *sdp = rgd->rd_sbd;
206 struct gfs2_bitmap *bi = NULL;
207 u32 length = rgd->rd_ri.ri_length;
208 u32 count[4], tmp;
209 int buf, x;
210
211 memset(count, 0, 4 * sizeof(u32));
212
213 /* Count # blocks in each of 4 possible allocation states */
214 for (buf = 0; buf < length; buf++) {
215 bi = rgd->rd_bits + buf;
216 for (x = 0; x < 4; x++)
217 count[x] += gfs2_bitcount(rgd,
218 bi->bi_bh->b_data +
219 bi->bi_offset,
220 bi->bi_len, x);
221 }
222
223 if (count[0] != rgd->rd_rg.rg_free) {
224 if (gfs2_consist_rgrpd(rgd))
225 fs_err(sdp, "free data mismatch: %u != %u\n",
226 count[0], rgd->rd_rg.rg_free);
227 return;
228 }
229
230 tmp = rgd->rd_ri.ri_data -
231 rgd->rd_rg.rg_free -
232 rgd->rd_rg.rg_dinodes;
233 if (count[1] + count[2] != tmp) {
234 if (gfs2_consist_rgrpd(rgd))
235 fs_err(sdp, "used data mismatch: %u != %u\n",
236 count[1], tmp);
237 return;
238 }
239
240 if (count[3] != rgd->rd_rg.rg_dinodes) {
241 if (gfs2_consist_rgrpd(rgd))
242 fs_err(sdp, "used metadata mismatch: %u != %u\n",
243 count[3], rgd->rd_rg.rg_dinodes);
244 return;
245 }
246
247 if (count[2] > count[3]) {
248 if (gfs2_consist_rgrpd(rgd))
249 fs_err(sdp, "unlinked inodes > inodes: %u\n",
250 count[2]);
251 return;
252 }
253
254}
255
256static inline int rgrp_contains_block(struct gfs2_rindex *ri, u64 block)
257{
258 u64 first = ri->ri_data0;
259 u64 last = first + ri->ri_data;
260 return first <= block && block < last;
261}
262
263/**
264 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
265 * @sdp: The GFS2 superblock
266 * @n: The data block number
267 *
268 * Returns: The resource group, or NULL if not found
269 */
270
271struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
272{
273 struct gfs2_rgrpd *rgd;
274
275 spin_lock(&sdp->sd_rindex_spin);
276
277 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
278 if (rgrp_contains_block(&rgd->rd_ri, blk)) {
279 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
280 spin_unlock(&sdp->sd_rindex_spin);
281 return rgd;
282 }
283 }
284
285 spin_unlock(&sdp->sd_rindex_spin);
286
287 return NULL;
288}
289
290/**
291 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
292 * @sdp: The GFS2 superblock
293 *
294 * Returns: The first rgrp in the filesystem
295 */
296
297struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
298{
299 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
300 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
301}
302
303/**
304 * gfs2_rgrpd_get_next - get the next RG
305 * @rgd: A RG
306 *
307 * Returns: The next rgrp
308 */
309
310struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
311{
312 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
313 return NULL;
314 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
315}
316
317static void clear_rgrpdi(struct gfs2_sbd *sdp)
318{
319 struct list_head *head;
320 struct gfs2_rgrpd *rgd;
321 struct gfs2_glock *gl;
322
323 spin_lock(&sdp->sd_rindex_spin);
324 sdp->sd_rindex_forward = NULL;
325 head = &sdp->sd_rindex_recent_list;
326 while (!list_empty(head)) {
327 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
328 list_del(&rgd->rd_recent);
329 }
330 spin_unlock(&sdp->sd_rindex_spin);
331
332 head = &sdp->sd_rindex_list;
333 while (!list_empty(head)) {
334 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
335 gl = rgd->rd_gl;
336
337 list_del(&rgd->rd_list);
338 list_del(&rgd->rd_list_mru);
339
340 if (gl) {
341 gl->gl_object = NULL;
342 gfs2_glock_put(gl);
343 }
344
345 kfree(rgd->rd_bits);
346 kfree(rgd);
347 }
348}
349
350void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
351{
352 mutex_lock(&sdp->sd_rindex_mutex);
353 clear_rgrpdi(sdp);
354 mutex_unlock(&sdp->sd_rindex_mutex);
355}
356
357/**
358 * gfs2_compute_bitstructs - Compute the bitmap sizes
359 * @rgd: The resource group descriptor
360 *
361 * Calculates bitmap descriptors, one for each block that contains bitmap data
362 *
363 * Returns: errno
364 */
365
366static int compute_bitstructs(struct gfs2_rgrpd *rgd)
367{
368 struct gfs2_sbd *sdp = rgd->rd_sbd;
369 struct gfs2_bitmap *bi;
370 u32 length = rgd->rd_ri.ri_length; /* # blocks in hdr & bitmap */
371 u32 bytes_left, bytes;
372 int x;
373
374 if (!length)
375 return -EINVAL;
376
377 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
378 if (!rgd->rd_bits)
379 return -ENOMEM;
380
381 bytes_left = rgd->rd_ri.ri_bitbytes;
382
383 for (x = 0; x < length; x++) {
384 bi = rgd->rd_bits + x;
385
386 /* small rgrp; bitmap stored completely in header block */
387 if (length == 1) {
388 bytes = bytes_left;
389 bi->bi_offset = sizeof(struct gfs2_rgrp);
390 bi->bi_start = 0;
391 bi->bi_len = bytes;
392 /* header block */
393 } else if (x == 0) {
394 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
395 bi->bi_offset = sizeof(struct gfs2_rgrp);
396 bi->bi_start = 0;
397 bi->bi_len = bytes;
398 /* last block */
399 } else if (x + 1 == length) {
400 bytes = bytes_left;
401 bi->bi_offset = sizeof(struct gfs2_meta_header);
402 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
403 bi->bi_len = bytes;
404 /* other blocks */
405 } else {
406 bytes = sdp->sd_sb.sb_bsize -
407 sizeof(struct gfs2_meta_header);
408 bi->bi_offset = sizeof(struct gfs2_meta_header);
409 bi->bi_start = rgd->rd_ri.ri_bitbytes - bytes_left;
410 bi->bi_len = bytes;
411 }
412
413 bytes_left -= bytes;
414 }
415
416 if (bytes_left) {
417 gfs2_consist_rgrpd(rgd);
418 return -EIO;
419 }
420 bi = rgd->rd_bits + (length - 1);
421 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_ri.ri_data) {
422 if (gfs2_consist_rgrpd(rgd)) {
423 gfs2_rindex_print(&rgd->rd_ri);
424 fs_err(sdp, "start=%u len=%u offset=%u\n",
425 bi->bi_start, bi->bi_len, bi->bi_offset);
426 }
427 return -EIO;
428 }
429
430 return 0;
431}
432
433/**
434 * gfs2_ri_update - Pull in a new resource index from the disk
435 * @gl: The glock covering the rindex inode
436 *
437 * Returns: 0 on successful update, error code otherwise
438 */
439
440static int gfs2_ri_update(struct gfs2_inode *ip)
441{
442 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
443 struct inode *inode = &ip->i_inode;
444 struct gfs2_rgrpd *rgd;
445 char buf[sizeof(struct gfs2_rindex)];
446 struct file_ra_state ra_state;
447 u64 junk = ip->i_di.di_size;
448 int error;
449
450 if (do_div(junk, sizeof(struct gfs2_rindex))) {
451 gfs2_consist_inode(ip);
452 return -EIO;
453 }
454
455 clear_rgrpdi(sdp);
456
457 file_ra_state_init(&ra_state, inode->i_mapping);
458 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
459 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
460 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
461 sizeof(struct gfs2_rindex));
462 if (!error)
463 break;
464 if (error != sizeof(struct gfs2_rindex)) {
465 if (error > 0)
466 error = -EIO;
467 goto fail;
468 }
469
470 rgd = kzalloc(sizeof(struct gfs2_rgrpd), GFP_NOFS);
471 error = -ENOMEM;
472 if (!rgd)
473 goto fail;
474
475 mutex_init(&rgd->rd_mutex);
476 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
477 rgd->rd_sbd = sdp;
478
479 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
480 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
481
482 gfs2_rindex_in(&rgd->rd_ri, buf);
483 error = compute_bitstructs(rgd);
484 if (error)
485 goto fail;
486
487 error = gfs2_glock_get(sdp, rgd->rd_ri.ri_addr,
488 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
489 if (error)
490 goto fail;
491
492 rgd->rd_gl->gl_object = rgd;
493 rgd->rd_rg_vn = rgd->rd_gl->gl_vn - 1;
494 }
495
496 sdp->sd_rindex_vn = ip->i_gl->gl_vn;
497 return 0;
498
499fail:
500 clear_rgrpdi(sdp);
501 return error;
502}
503
504/**
505 * gfs2_rindex_hold - Grab a lock on the rindex
506 * @sdp: The GFS2 superblock
507 * @ri_gh: the glock holder
508 *
509 * We grab a lock on the rindex inode to make sure that it doesn't
510 * change whilst we are performing an operation. We keep this lock
511 * for quite long periods of time compared to other locks. This
512 * doesn't matter, since it is shared and it is very, very rarely
513 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
514 *
515 * This makes sure that we're using the latest copy of the resource index
516 * special file, which might have been updated if someone expanded the
517 * filesystem (via gfs2_grow utility), which adds new resource groups.
518 *
519 * Returns: 0 on success, error code otherwise
520 */
521
522int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
523{
524 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
525 struct gfs2_glock *gl = ip->i_gl;
526 int error;
527
528 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
529 if (error)
530 return error;
531
532 /* Read new copy from disk if we don't have the latest */
533 if (sdp->sd_rindex_vn != gl->gl_vn) {
534 mutex_lock(&sdp->sd_rindex_mutex);
535 if (sdp->sd_rindex_vn != gl->gl_vn) {
536 error = gfs2_ri_update(ip);
537 if (error)
538 gfs2_glock_dq_uninit(ri_gh);
539 }
540 mutex_unlock(&sdp->sd_rindex_mutex);
541 }
542
543 return error;
544}
545
546/**
547 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
548 * @rgd: the struct gfs2_rgrpd describing the RG to read in
549 *
550 * Read in all of a Resource Group's header and bitmap blocks.
551 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
552 *
553 * Returns: errno
554 */
555
556int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
557{
558 struct gfs2_sbd *sdp = rgd->rd_sbd;
559 struct gfs2_glock *gl = rgd->rd_gl;
560 unsigned int length = rgd->rd_ri.ri_length;
561 struct gfs2_bitmap *bi;
562 unsigned int x, y;
563 int error;
564
565 mutex_lock(&rgd->rd_mutex);
566
567 spin_lock(&sdp->sd_rindex_spin);
568 if (rgd->rd_bh_count) {
569 rgd->rd_bh_count++;
570 spin_unlock(&sdp->sd_rindex_spin);
571 mutex_unlock(&rgd->rd_mutex);
572 return 0;
573 }
574 spin_unlock(&sdp->sd_rindex_spin);
575
576 for (x = 0; x < length; x++) {
577 bi = rgd->rd_bits + x;
578 error = gfs2_meta_read(gl, rgd->rd_ri.ri_addr + x, 0, &bi->bi_bh);
579 if (error)
580 goto fail;
581 }
582
583 for (y = length; y--;) {
584 bi = rgd->rd_bits + y;
585 error = gfs2_meta_wait(sdp, bi->bi_bh);
586 if (error)
587 goto fail;
588 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
589 GFS2_METATYPE_RG)) {
590 error = -EIO;
591 goto fail;
592 }
593 }
594
595 if (rgd->rd_rg_vn != gl->gl_vn) {
596 gfs2_rgrp_in(&rgd->rd_rg, (rgd->rd_bits[0].bi_bh)->b_data);
597 rgd->rd_rg_vn = gl->gl_vn;
598 }
599
600 spin_lock(&sdp->sd_rindex_spin);
601 rgd->rd_free_clone = rgd->rd_rg.rg_free;
602 rgd->rd_bh_count++;
603 spin_unlock(&sdp->sd_rindex_spin);
604
605 mutex_unlock(&rgd->rd_mutex);
606
607 return 0;
608
609fail:
610 while (x--) {
611 bi = rgd->rd_bits + x;
612 brelse(bi->bi_bh);
613 bi->bi_bh = NULL;
614 gfs2_assert_warn(sdp, !bi->bi_clone);
615 }
616 mutex_unlock(&rgd->rd_mutex);
617
618 return error;
619}
620
621void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
622{
623 struct gfs2_sbd *sdp = rgd->rd_sbd;
624
625 spin_lock(&sdp->sd_rindex_spin);
626 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
627 rgd->rd_bh_count++;
628 spin_unlock(&sdp->sd_rindex_spin);
629}
630
631/**
632 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
633 * @rgd: the struct gfs2_rgrpd describing the RG to read in
634 *
635 */
636
637void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
638{
639 struct gfs2_sbd *sdp = rgd->rd_sbd;
640 int x, length = rgd->rd_ri.ri_length;
641
642 spin_lock(&sdp->sd_rindex_spin);
643 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
644 if (--rgd->rd_bh_count) {
645 spin_unlock(&sdp->sd_rindex_spin);
646 return;
647 }
648
649 for (x = 0; x < length; x++) {
650 struct gfs2_bitmap *bi = rgd->rd_bits + x;
651 kfree(bi->bi_clone);
652 bi->bi_clone = NULL;
653 brelse(bi->bi_bh);
654 bi->bi_bh = NULL;
655 }
656
657 spin_unlock(&sdp->sd_rindex_spin);
658}
659
660void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
661{
662 struct gfs2_sbd *sdp = rgd->rd_sbd;
663 unsigned int length = rgd->rd_ri.ri_length;
664 unsigned int x;
665
666 for (x = 0; x < length; x++) {
667 struct gfs2_bitmap *bi = rgd->rd_bits + x;
668 if (!bi->bi_clone)
669 continue;
670 memcpy(bi->bi_clone + bi->bi_offset,
671 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
672 }
673
674 spin_lock(&sdp->sd_rindex_spin);
675 rgd->rd_free_clone = rgd->rd_rg.rg_free;
676 spin_unlock(&sdp->sd_rindex_spin);
677}
678
679/**
680 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
681 * @ip: the incore GFS2 inode structure
682 *
683 * Returns: the struct gfs2_alloc
684 */
685
686struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
687{
688 struct gfs2_alloc *al = &ip->i_alloc;
689
690 /* FIXME: Should assert that the correct locks are held here... */
691 memset(al, 0, sizeof(*al));
692 return al;
693}
694
695/**
696 * try_rgrp_fit - See if a given reservation will fit in a given RG
697 * @rgd: the RG data
698 * @al: the struct gfs2_alloc structure describing the reservation
699 *
700 * If there's room for the requested blocks to be allocated from the RG:
701 * Sets the $al_reserved_data field in @al.
702 * Sets the $al_reserved_meta field in @al.
703 * Sets the $al_rgd field in @al.
704 *
705 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
706 */
707
708static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
709{
710 struct gfs2_sbd *sdp = rgd->rd_sbd;
711 int ret = 0;
712
713 spin_lock(&sdp->sd_rindex_spin);
714 if (rgd->rd_free_clone >= al->al_requested) {
715 al->al_rgd = rgd;
716 ret = 1;
717 }
718 spin_unlock(&sdp->sd_rindex_spin);
719
720 return ret;
721}
722
723/**
724 * recent_rgrp_first - get first RG from "recent" list
725 * @sdp: The GFS2 superblock
726 * @rglast: address of the rgrp used last
727 *
728 * Returns: The first rgrp in the recent list
729 */
730
731static struct gfs2_rgrpd *recent_rgrp_first(struct gfs2_sbd *sdp,
732 u64 rglast)
733{
734 struct gfs2_rgrpd *rgd = NULL;
735
736 spin_lock(&sdp->sd_rindex_spin);
737
738 if (list_empty(&sdp->sd_rindex_recent_list))
739 goto out;
740
741 if (!rglast)
742 goto first;
743
744 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
745 if (rgd->rd_ri.ri_addr == rglast)
746 goto out;
747 }
748
749first:
750 rgd = list_entry(sdp->sd_rindex_recent_list.next, struct gfs2_rgrpd,
751 rd_recent);
752out:
753 spin_unlock(&sdp->sd_rindex_spin);
754 return rgd;
755}
756
757/**
758 * recent_rgrp_next - get next RG from "recent" list
759 * @cur_rgd: current rgrp
760 * @remove:
761 *
762 * Returns: The next rgrp in the recent list
763 */
764
765static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd,
766 int remove)
767{
768 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
769 struct list_head *head;
770 struct gfs2_rgrpd *rgd;
771
772 spin_lock(&sdp->sd_rindex_spin);
773
774 head = &sdp->sd_rindex_recent_list;
775
776 list_for_each_entry(rgd, head, rd_recent) {
777 if (rgd == cur_rgd) {
778 if (cur_rgd->rd_recent.next != head)
779 rgd = list_entry(cur_rgd->rd_recent.next,
780 struct gfs2_rgrpd, rd_recent);
781 else
782 rgd = NULL;
783
784 if (remove)
785 list_del(&cur_rgd->rd_recent);
786
787 goto out;
788 }
789 }
790
791 rgd = NULL;
792 if (!list_empty(head))
793 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_recent);
794
795out:
796 spin_unlock(&sdp->sd_rindex_spin);
797 return rgd;
798}
799
800/**
801 * recent_rgrp_add - add an RG to tail of "recent" list
802 * @new_rgd: The rgrp to add
803 *
804 */
805
806static void recent_rgrp_add(struct gfs2_rgrpd *new_rgd)
807{
808 struct gfs2_sbd *sdp = new_rgd->rd_sbd;
809 struct gfs2_rgrpd *rgd;
810 unsigned int count = 0;
811 unsigned int max = sdp->sd_rgrps / gfs2_jindex_size(sdp);
812
813 spin_lock(&sdp->sd_rindex_spin);
814
815 list_for_each_entry(rgd, &sdp->sd_rindex_recent_list, rd_recent) {
816 if (rgd == new_rgd)
817 goto out;
818
819 if (++count >= max)
820 goto out;
821 }
822 list_add_tail(&new_rgd->rd_recent, &sdp->sd_rindex_recent_list);
823
824out:
825 spin_unlock(&sdp->sd_rindex_spin);
826}
827
828/**
829 * forward_rgrp_get - get an rgrp to try next from full list
830 * @sdp: The GFS2 superblock
831 *
832 * Returns: The rgrp to try next
833 */
834
835static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
836{
837 struct gfs2_rgrpd *rgd;
838 unsigned int journals = gfs2_jindex_size(sdp);
839 unsigned int rg = 0, x;
840
841 spin_lock(&sdp->sd_rindex_spin);
842
843 rgd = sdp->sd_rindex_forward;
844 if (!rgd) {
845 if (sdp->sd_rgrps >= journals)
846 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
847
848 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg;
849 x++, rgd = gfs2_rgrpd_get_next(rgd))
850 /* Do Nothing */;
851
852 sdp->sd_rindex_forward = rgd;
853 }
854
855 spin_unlock(&sdp->sd_rindex_spin);
856
857 return rgd;
858}
859
860/**
861 * forward_rgrp_set - set the forward rgrp pointer
862 * @sdp: the filesystem
863 * @rgd: The new forward rgrp
864 *
865 */
866
867static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
868{
869 spin_lock(&sdp->sd_rindex_spin);
870 sdp->sd_rindex_forward = rgd;
871 spin_unlock(&sdp->sd_rindex_spin);
872}
873
874/**
875 * get_local_rgrp - Choose and lock a rgrp for allocation
876 * @ip: the inode to reserve space for
877 * @rgp: the chosen and locked rgrp
878 *
879 * Try to acquire rgrp in way which avoids contending with others.
880 *
881 * Returns: errno
882 */
883
884static int get_local_rgrp(struct gfs2_inode *ip)
885{
886 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
887 struct gfs2_rgrpd *rgd, *begin = NULL;
888 struct gfs2_alloc *al = &ip->i_alloc;
889 int flags = LM_FLAG_TRY;
890 int skipped = 0;
891 int loops = 0;
892 int error;
893
894 /* Try recently successful rgrps */
895
896 rgd = recent_rgrp_first(sdp, ip->i_last_rg_alloc);
897
898 while (rgd) {
899 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
900 LM_FLAG_TRY, &al->al_rgd_gh);
901 switch (error) {
902 case 0:
903 if (try_rgrp_fit(rgd, al))
904 goto out;
905 gfs2_glock_dq_uninit(&al->al_rgd_gh);
906 rgd = recent_rgrp_next(rgd, 1);
907 break;
908
909 case GLR_TRYFAILED:
910 rgd = recent_rgrp_next(rgd, 0);
911 break;
912
913 default:
914 return error;
915 }
916 }
917
918 /* Go through full list of rgrps */
919
920 begin = rgd = forward_rgrp_get(sdp);
921
922 for (;;) {
923 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags,
924 &al->al_rgd_gh);
925 switch (error) {
926 case 0:
927 if (try_rgrp_fit(rgd, al))
928 goto out;
929 gfs2_glock_dq_uninit(&al->al_rgd_gh);
930 break;
931
932 case GLR_TRYFAILED:
933 skipped++;
934 break;
935
936 default:
937 return error;
938 }
939
940 rgd = gfs2_rgrpd_get_next(rgd);
941 if (!rgd)
942 rgd = gfs2_rgrpd_get_first(sdp);
943
944 if (rgd == begin) {
945 if (++loops >= 2 || !skipped)
946 return -ENOSPC;
947 flags = 0;
948 }
949 }
950
951out:
952 ip->i_last_rg_alloc = rgd->rd_ri.ri_addr;
953
954 if (begin) {
955 recent_rgrp_add(rgd);
956 rgd = gfs2_rgrpd_get_next(rgd);
957 if (!rgd)
958 rgd = gfs2_rgrpd_get_first(sdp);
959 forward_rgrp_set(sdp, rgd);
960 }
961
962 return 0;
963}
964
965/**
966 * gfs2_inplace_reserve_i - Reserve space in the filesystem
967 * @ip: the inode to reserve space for
968 *
969 * Returns: errno
970 */
971
972int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
973{
974 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
975 struct gfs2_alloc *al = &ip->i_alloc;
976 int error;
977
978 if (gfs2_assert_warn(sdp, al->al_requested))
979 return -EINVAL;
980
981 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
982 if (error)
983 return error;
984
985 error = get_local_rgrp(ip);
986 if (error) {
987 gfs2_glock_dq_uninit(&al->al_ri_gh);
988 return error;
989 }
990
991 al->al_file = file;
992 al->al_line = line;
993
994 return 0;
995}
996
997/**
998 * gfs2_inplace_release - release an inplace reservation
999 * @ip: the inode the reservation was taken out on
1000 *
1001 * Release a reservation made by gfs2_inplace_reserve().
1002 */
1003
1004void gfs2_inplace_release(struct gfs2_inode *ip)
1005{
1006 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1007 struct gfs2_alloc *al = &ip->i_alloc;
1008
1009 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
1010 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
1011 "al_file = %s, al_line = %u\n",
1012 al->al_alloced, al->al_requested, al->al_file,
1013 al->al_line);
1014
1015 al->al_rgd = NULL;
1016 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1017 gfs2_glock_dq_uninit(&al->al_ri_gh);
1018}
1019
1020/**
1021 * gfs2_get_block_type - Check a block in a RG is of given type
1022 * @rgd: the resource group holding the block
1023 * @block: the block number
1024 *
1025 * Returns: The block type (GFS2_BLKST_*)
1026 */
1027
1028unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1029{
1030 struct gfs2_bitmap *bi = NULL;
1031 u32 length, rgrp_block, buf_block;
1032 unsigned int buf;
1033 unsigned char type;
1034
1035 length = rgd->rd_ri.ri_length;
1036 rgrp_block = block - rgd->rd_ri.ri_data0;
1037
1038 for (buf = 0; buf < length; buf++) {
1039 bi = rgd->rd_bits + buf;
1040 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1041 break;
1042 }
1043
1044 gfs2_assert(rgd->rd_sbd, buf < length);
1045 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
1046
1047 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1048 bi->bi_len, buf_block);
1049
1050 return type;
1051}
1052
1053/**
1054 * rgblk_search - find a block in @old_state, change allocation
1055 * state to @new_state
1056 * @rgd: the resource group descriptor
1057 * @goal: the goal block within the RG (start here to search for avail block)
1058 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1059 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1060 *
1061 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1062 * Add the found bitmap buffer to the transaction.
1063 * Set the found bits to @new_state to change block's allocation state.
1064 *
1065 * This function never fails, because we wouldn't call it unless we
1066 * know (from reservation results, etc.) that a block is available.
1067 *
1068 * Scope of @goal and returned block is just within rgrp, not the whole
1069 * filesystem.
1070 *
1071 * Returns: the block number allocated
1072 */
1073
1074static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1075 unsigned char old_state, unsigned char new_state)
1076{
1077 struct gfs2_bitmap *bi = NULL;
1078 u32 length = rgd->rd_ri.ri_length;
1079 u32 blk = 0;
1080 unsigned int buf, x;
1081
1082 /* Find bitmap block that contains bits for goal block */
1083 for (buf = 0; buf < length; buf++) {
1084 bi = rgd->rd_bits + buf;
1085 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1086 break;
1087 }
1088
1089 gfs2_assert(rgd->rd_sbd, buf < length);
1090
1091 /* Convert scope of "goal" from rgrp-wide to within found bit block */
1092 goal -= bi->bi_start * GFS2_NBBY;
1093
1094 /* Search (up to entire) bitmap in this rgrp for allocatable block.
1095 "x <= length", instead of "x < length", because we typically start
1096 the search in the middle of a bit block, but if we can't find an
1097 allocatable block anywhere else, we want to be able wrap around and
1098 search in the first part of our first-searched bit block. */
1099 for (x = 0; x <= length; x++) {
1100 if (bi->bi_clone)
1101 blk = gfs2_bitfit(rgd, bi->bi_clone + bi->bi_offset,
1102 bi->bi_len, goal, old_state);
1103 else
1104 blk = gfs2_bitfit(rgd,
1105 bi->bi_bh->b_data + bi->bi_offset,
1106 bi->bi_len, goal, old_state);
1107 if (blk != BFITNOENT)
1108 break;
1109
1110 /* Try next bitmap block (wrap back to rgrp header if at end) */
1111 buf = (buf + 1) % length;
1112 bi = rgd->rd_bits + buf;
1113 goal = 0;
1114 }
1115
1116 if (gfs2_assert_withdraw(rgd->rd_sbd, x <= length))
1117 blk = 0;
1118
1119 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1120 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1121 bi->bi_len, blk, new_state);
1122 if (bi->bi_clone)
1123 gfs2_setbit(rgd, bi->bi_clone + bi->bi_offset,
1124 bi->bi_len, blk, new_state);
1125
1126 return bi->bi_start * GFS2_NBBY + blk;
1127}
1128
1129/**
1130 * rgblk_free - Change alloc state of given block(s)
1131 * @sdp: the filesystem
1132 * @bstart: the start of a run of blocks to free
1133 * @blen: the length of the block run (all must lie within ONE RG!)
1134 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1135 *
1136 * Returns: Resource group containing the block(s)
1137 */
1138
1139static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1140 u32 blen, unsigned char new_state)
1141{
1142 struct gfs2_rgrpd *rgd;
1143 struct gfs2_bitmap *bi = NULL;
1144 u32 length, rgrp_blk, buf_blk;
1145 unsigned int buf;
1146
1147 rgd = gfs2_blk2rgrpd(sdp, bstart);
1148 if (!rgd) {
1149 if (gfs2_consist(sdp))
1150 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
1151 return NULL;
1152 }
1153
1154 length = rgd->rd_ri.ri_length;
1155
1156 rgrp_blk = bstart - rgd->rd_ri.ri_data0;
1157
1158 while (blen--) {
1159 for (buf = 0; buf < length; buf++) {
1160 bi = rgd->rd_bits + buf;
1161 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1162 break;
1163 }
1164
1165 gfs2_assert(rgd->rd_sbd, buf < length);
1166
1167 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1168 rgrp_blk++;
1169
1170 if (!bi->bi_clone) {
1171 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1172 GFP_NOFS | __GFP_NOFAIL);
1173 memcpy(bi->bi_clone + bi->bi_offset,
1174 bi->bi_bh->b_data + bi->bi_offset,
1175 bi->bi_len);
1176 }
1177 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1178 gfs2_setbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1179 bi->bi_len, buf_blk, new_state);
1180 }
1181
1182 return rgd;
1183}
1184
1185/**
1186 * gfs2_alloc_data - Allocate a data block
1187 * @ip: the inode to allocate the data block for
1188 *
1189 * Returns: the allocated block
1190 */
1191
1192u64 gfs2_alloc_data(struct gfs2_inode *ip)
1193{
1194 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1195 struct gfs2_alloc *al = &ip->i_alloc;
1196 struct gfs2_rgrpd *rgd = al->al_rgd;
1197 u32 goal, blk;
1198 u64 block;
1199
1200 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_data))
1201 goal = ip->i_di.di_goal_data - rgd->rd_ri.ri_data0;
1202 else
1203 goal = rgd->rd_last_alloc_data;
1204
1205 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1206 rgd->rd_last_alloc_data = blk;
1207
1208 block = rgd->rd_ri.ri_data0 + blk;
1209 ip->i_di.di_goal_data = block;
1210
1211 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1212 rgd->rd_rg.rg_free--;
1213
1214 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1215 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1216
1217 al->al_alloced++;
1218
1219 gfs2_statfs_change(sdp, 0, -1, 0);
1220 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1221
1222 spin_lock(&sdp->sd_rindex_spin);
1223 rgd->rd_free_clone--;
1224 spin_unlock(&sdp->sd_rindex_spin);
1225
1226 return block;
1227}
1228
1229/**
1230 * gfs2_alloc_meta - Allocate a metadata block
1231 * @ip: the inode to allocate the metadata block for
1232 *
1233 * Returns: the allocated block
1234 */
1235
1236u64 gfs2_alloc_meta(struct gfs2_inode *ip)
1237{
1238 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1239 struct gfs2_alloc *al = &ip->i_alloc;
1240 struct gfs2_rgrpd *rgd = al->al_rgd;
1241 u32 goal, blk;
1242 u64 block;
1243
1244 if (rgrp_contains_block(&rgd->rd_ri, ip->i_di.di_goal_meta))
1245 goal = ip->i_di.di_goal_meta - rgd->rd_ri.ri_data0;
1246 else
1247 goal = rgd->rd_last_alloc_meta;
1248
1249 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED);
1250 rgd->rd_last_alloc_meta = blk;
1251
1252 block = rgd->rd_ri.ri_data0 + blk;
1253 ip->i_di.di_goal_meta = block;
1254
1255 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1256 rgd->rd_rg.rg_free--;
1257
1258 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1259 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1260
1261 al->al_alloced++;
1262
1263 gfs2_statfs_change(sdp, 0, -1, 0);
1264 gfs2_quota_change(ip, +1, ip->i_di.di_uid, ip->i_di.di_gid);
1265 gfs2_trans_add_unrevoke(sdp, block);
1266
1267 spin_lock(&sdp->sd_rindex_spin);
1268 rgd->rd_free_clone--;
1269 spin_unlock(&sdp->sd_rindex_spin);
1270
1271 return block;
1272}
1273
1274/**
1275 * gfs2_alloc_di - Allocate a dinode
1276 * @dip: the directory that the inode is going in
1277 *
1278 * Returns: the block allocated
1279 */
1280
1281u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1282{
1283 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1284 struct gfs2_alloc *al = &dip->i_alloc;
1285 struct gfs2_rgrpd *rgd = al->al_rgd;
1286 u32 blk;
1287 u64 block;
1288
1289 blk = rgblk_search(rgd, rgd->rd_last_alloc_meta,
1290 GFS2_BLKST_FREE, GFS2_BLKST_DINODE);
1291
1292 rgd->rd_last_alloc_meta = blk;
1293
1294 block = rgd->rd_ri.ri_data0 + blk;
1295
1296 gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free);
1297 rgd->rd_rg.rg_free--;
1298 rgd->rd_rg.rg_dinodes++;
1299 *generation = rgd->rd_rg.rg_igeneration++;
1300 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1301 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1302
1303 al->al_alloced++;
1304
1305 gfs2_statfs_change(sdp, 0, -1, +1);
1306 gfs2_trans_add_unrevoke(sdp, block);
1307
1308 spin_lock(&sdp->sd_rindex_spin);
1309 rgd->rd_free_clone--;
1310 spin_unlock(&sdp->sd_rindex_spin);
1311
1312 return block;
1313}
1314
1315/**
1316 * gfs2_free_data - free a contiguous run of data block(s)
1317 * @ip: the inode these blocks are being freed from
1318 * @bstart: first block of a run of contiguous blocks
1319 * @blen: the length of the block run
1320 *
1321 */
1322
1323void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1324{
1325 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1326 struct gfs2_rgrpd *rgd;
1327
1328 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1329 if (!rgd)
1330 return;
1331
1332 rgd->rd_rg.rg_free += blen;
1333
1334 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1335 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1336
1337 gfs2_trans_add_rg(rgd);
1338
1339 gfs2_statfs_change(sdp, 0, +blen, 0);
1340 gfs2_quota_change(ip, -(s64)blen,
1341 ip->i_di.di_uid, ip->i_di.di_gid);
1342}
1343
1344/**
1345 * gfs2_free_meta - free a contiguous run of data block(s)
1346 * @ip: the inode these blocks are being freed from
1347 * @bstart: first block of a run of contiguous blocks
1348 * @blen: the length of the block run
1349 *
1350 */
1351
1352void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1353{
1354 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1355 struct gfs2_rgrpd *rgd;
1356
1357 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1358 if (!rgd)
1359 return;
1360
1361 rgd->rd_rg.rg_free += blen;
1362
1363 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1364 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1365
1366 gfs2_trans_add_rg(rgd);
1367
1368 gfs2_statfs_change(sdp, 0, +blen, 0);
1369 gfs2_quota_change(ip, -(s64)blen, ip->i_di.di_uid, ip->i_di.di_gid);
1370 gfs2_meta_wipe(ip, bstart, blen);
1371}
1372
1373void gfs2_unlink_di(struct inode *inode)
1374{
1375 struct gfs2_inode *ip = GFS2_I(inode);
1376 struct gfs2_sbd *sdp = GFS2_SB(inode);
1377 struct gfs2_rgrpd *rgd;
1378 u64 blkno = ip->i_num.no_addr;
1379
1380 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1381 if (!rgd)
1382 return;
1383 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1384 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1385 gfs2_trans_add_rg(rgd);
1386}
1387
1388static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1389{
1390 struct gfs2_sbd *sdp = rgd->rd_sbd;
1391 struct gfs2_rgrpd *tmp_rgd;
1392
1393 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1394 if (!tmp_rgd)
1395 return;
1396 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1397
1398 if (!rgd->rd_rg.rg_dinodes)
1399 gfs2_consist_rgrpd(rgd);
1400 rgd->rd_rg.rg_dinodes--;
1401 rgd->rd_rg.rg_free++;
1402
1403 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1404 gfs2_rgrp_out(&rgd->rd_rg, rgd->rd_bits[0].bi_bh->b_data);
1405
1406 gfs2_statfs_change(sdp, 0, +1, -1);
1407 gfs2_trans_add_rg(rgd);
1408}
1409
1410
1411void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1412{
1413 gfs2_free_uninit_di(rgd, ip->i_num.no_addr);
1414 gfs2_quota_change(ip, -1, ip->i_di.di_uid, ip->i_di.di_gid);
1415 gfs2_meta_wipe(ip, ip->i_num.no_addr, 1);
1416}
1417
1418/**
1419 * gfs2_rlist_add - add a RG to a list of RGs
1420 * @sdp: the filesystem
1421 * @rlist: the list of resource groups
1422 * @block: the block
1423 *
1424 * Figure out what RG a block belongs to and add that RG to the list
1425 *
1426 * FIXME: Don't use NOFAIL
1427 *
1428 */
1429
1430void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1431 u64 block)
1432{
1433 struct gfs2_rgrpd *rgd;
1434 struct gfs2_rgrpd **tmp;
1435 unsigned int new_space;
1436 unsigned int x;
1437
1438 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1439 return;
1440
1441 rgd = gfs2_blk2rgrpd(sdp, block);
1442 if (!rgd) {
1443 if (gfs2_consist(sdp))
1444 fs_err(sdp, "block = %llu\n", (unsigned long long)block);
1445 return;
1446 }
1447
1448 for (x = 0; x < rlist->rl_rgrps; x++)
1449 if (rlist->rl_rgd[x] == rgd)
1450 return;
1451
1452 if (rlist->rl_rgrps == rlist->rl_space) {
1453 new_space = rlist->rl_space + 10;
1454
1455 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1456 GFP_NOFS | __GFP_NOFAIL);
1457
1458 if (rlist->rl_rgd) {
1459 memcpy(tmp, rlist->rl_rgd,
1460 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1461 kfree(rlist->rl_rgd);
1462 }
1463
1464 rlist->rl_space = new_space;
1465 rlist->rl_rgd = tmp;
1466 }
1467
1468 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1469}
1470
1471/**
1472 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1473 * and initialize an array of glock holders for them
1474 * @rlist: the list of resource groups
1475 * @state: the lock state to acquire the RG lock in
1476 * @flags: the modifier flags for the holder structures
1477 *
1478 * FIXME: Don't use NOFAIL
1479 *
1480 */
1481
1482void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
1483 int flags)
1484{
1485 unsigned int x;
1486
1487 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1488 GFP_NOFS | __GFP_NOFAIL);
1489 for (x = 0; x < rlist->rl_rgrps; x++)
1490 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1491 state, flags,
1492 &rlist->rl_ghs[x]);
1493}
1494
1495/**
1496 * gfs2_rlist_free - free a resource group list
1497 * @list: the list of resource groups
1498 *
1499 */
1500
1501void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1502{
1503 unsigned int x;
1504
1505 kfree(rlist->rl_rgd);
1506
1507 if (rlist->rl_ghs) {
1508 for (x = 0; x < rlist->rl_rgrps; x++)
1509 gfs2_holder_uninit(&rlist->rl_ghs[x]);
1510 kfree(rlist->rl_ghs);
1511 }
1512}
1513
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
new file mode 100644
index 000000000000..9eedfd12bfff
--- /dev/null
+++ b/fs/gfs2/rgrp.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __RGRP_DOT_H__
11#define __RGRP_DOT_H__
12
13struct gfs2_rgrpd;
14struct gfs2_sbd;
15struct gfs2_holder;
16
17void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
18
19struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk);
20struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
21struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
22
23void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
24int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh);
25
26int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd);
27void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd);
28void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd);
29
30void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd);
31
32struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
33static inline void gfs2_alloc_put(struct gfs2_inode *ip)
34{
35 return; /* Se we can see where ip->i_alloc is used */
36}
37
38int gfs2_inplace_reserve_i(struct gfs2_inode *ip,
39 char *file, unsigned int line);
40#define gfs2_inplace_reserve(ip) \
41gfs2_inplace_reserve_i((ip), __FILE__, __LINE__)
42
43void gfs2_inplace_release(struct gfs2_inode *ip);
44
45unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block);
46
47u64 gfs2_alloc_data(struct gfs2_inode *ip);
48u64 gfs2_alloc_meta(struct gfs2_inode *ip);
49u64 gfs2_alloc_di(struct gfs2_inode *ip, u64 *generation);
50
51void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
52void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
53void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
54void gfs2_unlink_di(struct inode *inode);
55
56struct gfs2_rgrp_list {
57 unsigned int rl_rgrps;
58 unsigned int rl_space;
59 struct gfs2_rgrpd **rl_rgd;
60 struct gfs2_holder *rl_ghs;
61};
62
63void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
64 u64 block);
65void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state,
66 int flags);
67void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
68
69#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
new file mode 100644
index 000000000000..6a78b1b32e25
--- /dev/null
+++ b/fs/gfs2/super.c
@@ -0,0 +1,976 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/crc32.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/bio.h>
18#include <linux/lm_interface.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "bmap.h"
23#include "dir.h"
24#include "glock.h"
25#include "glops.h"
26#include "inode.h"
27#include "log.h"
28#include "meta_io.h"
29#include "quota.h"
30#include "recovery.h"
31#include "rgrp.h"
32#include "super.h"
33#include "trans.h"
34#include "util.h"
35
36static const u32 gfs2_old_fs_formats[] = {
37 0
38};
39
40static const u32 gfs2_old_multihost_formats[] = {
41 0
42};
43
44/**
45 * gfs2_tune_init - Fill a gfs2_tune structure with default values
46 * @gt: tune
47 *
48 */
49
50void gfs2_tune_init(struct gfs2_tune *gt)
51{
52 spin_lock_init(&gt->gt_spin);
53
54 gt->gt_ilimit = 100;
55 gt->gt_ilimit_tries = 3;
56 gt->gt_ilimit_min = 1;
57 gt->gt_demote_secs = 300;
58 gt->gt_incore_log_blocks = 1024;
59 gt->gt_log_flush_secs = 60;
60 gt->gt_jindex_refresh_secs = 60;
61 gt->gt_scand_secs = 15;
62 gt->gt_recoverd_secs = 60;
63 gt->gt_logd_secs = 1;
64 gt->gt_quotad_secs = 5;
65 gt->gt_quota_simul_sync = 64;
66 gt->gt_quota_warn_period = 10;
67 gt->gt_quota_scale_num = 1;
68 gt->gt_quota_scale_den = 1;
69 gt->gt_quota_cache_secs = 300;
70 gt->gt_quota_quantum = 60;
71 gt->gt_atime_quantum = 3600;
72 gt->gt_new_files_jdata = 0;
73 gt->gt_new_files_directio = 0;
74 gt->gt_max_atomic_write = 4 << 20;
75 gt->gt_max_readahead = 1 << 18;
76 gt->gt_lockdump_size = 131072;
77 gt->gt_stall_secs = 600;
78 gt->gt_complain_secs = 10;
79 gt->gt_reclaim_limit = 5000;
80 gt->gt_entries_per_readdir = 32;
81 gt->gt_prefetch_secs = 10;
82 gt->gt_greedy_default = HZ / 10;
83 gt->gt_greedy_quantum = HZ / 40;
84 gt->gt_greedy_max = HZ / 4;
85 gt->gt_statfs_quantum = 30;
86 gt->gt_statfs_slow = 0;
87}
88
89/**
90 * gfs2_check_sb - Check superblock
91 * @sdp: the filesystem
92 * @sb: The superblock
93 * @silent: Don't print a message if the check fails
94 *
95 * Checks the version code of the FS is one that we understand how to
96 * read and that the sizes of the various on-disk structures have not
97 * changed.
98 */
99
100int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent)
101{
102 unsigned int x;
103
104 if (sb->sb_header.mh_magic != GFS2_MAGIC ||
105 sb->sb_header.mh_type != GFS2_METATYPE_SB) {
106 if (!silent)
107 printk(KERN_WARNING "GFS2: not a GFS2 filesystem\n");
108 return -EINVAL;
109 }
110
111 /* If format numbers match exactly, we're done. */
112
113 if (sb->sb_fs_format == GFS2_FORMAT_FS &&
114 sb->sb_multihost_format == GFS2_FORMAT_MULTI)
115 return 0;
116
117 if (sb->sb_fs_format != GFS2_FORMAT_FS) {
118 for (x = 0; gfs2_old_fs_formats[x]; x++)
119 if (gfs2_old_fs_formats[x] == sb->sb_fs_format)
120 break;
121
122 if (!gfs2_old_fs_formats[x]) {
123 printk(KERN_WARNING
124 "GFS2: code version (%u, %u) is incompatible "
125 "with ondisk format (%u, %u)\n",
126 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
127 sb->sb_fs_format, sb->sb_multihost_format);
128 printk(KERN_WARNING
129 "GFS2: I don't know how to upgrade this FS\n");
130 return -EINVAL;
131 }
132 }
133
134 if (sb->sb_multihost_format != GFS2_FORMAT_MULTI) {
135 for (x = 0; gfs2_old_multihost_formats[x]; x++)
136 if (gfs2_old_multihost_formats[x] ==
137 sb->sb_multihost_format)
138 break;
139
140 if (!gfs2_old_multihost_formats[x]) {
141 printk(KERN_WARNING
142 "GFS2: code version (%u, %u) is incompatible "
143 "with ondisk format (%u, %u)\n",
144 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
145 sb->sb_fs_format, sb->sb_multihost_format);
146 printk(KERN_WARNING
147 "GFS2: I don't know how to upgrade this FS\n");
148 return -EINVAL;
149 }
150 }
151
152 if (!sdp->sd_args.ar_upgrade) {
153 printk(KERN_WARNING
154 "GFS2: code version (%u, %u) is incompatible "
155 "with ondisk format (%u, %u)\n",
156 GFS2_FORMAT_FS, GFS2_FORMAT_MULTI,
157 sb->sb_fs_format, sb->sb_multihost_format);
158 printk(KERN_INFO
159 "GFS2: Use the \"upgrade\" mount option to upgrade "
160 "the FS\n");
161 printk(KERN_INFO "GFS2: See the manual for more details\n");
162 return -EINVAL;
163 }
164
165 return 0;
166}
167
168
169static int end_bio_io_page(struct bio *bio, unsigned int bytes_done, int error)
170{
171 struct page *page = bio->bi_private;
172 if (bio->bi_size)
173 return 1;
174
175 if (!error)
176 SetPageUptodate(page);
177 else
178 printk(KERN_WARNING "gfs2: error %d reading superblock\n", error);
179 unlock_page(page);
180 return 0;
181}
182
183struct page *gfs2_read_super(struct super_block *sb, sector_t sector)
184{
185 struct page *page;
186 struct bio *bio;
187
188 page = alloc_page(GFP_KERNEL);
189 if (unlikely(!page))
190 return NULL;
191
192 ClearPageUptodate(page);
193 ClearPageDirty(page);
194 lock_page(page);
195
196 bio = bio_alloc(GFP_KERNEL, 1);
197 if (unlikely(!bio)) {
198 __free_page(page);
199 return NULL;
200 }
201
202 bio->bi_sector = sector;
203 bio->bi_bdev = sb->s_bdev;
204 bio_add_page(bio, page, PAGE_SIZE, 0);
205
206 bio->bi_end_io = end_bio_io_page;
207 bio->bi_private = page;
208 submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
209 wait_on_page_locked(page);
210 bio_put(bio);
211 if (!PageUptodate(page)) {
212 __free_page(page);
213 return NULL;
214 }
215 return page;
216}
217
218/**
219 * gfs2_read_sb - Read super block
220 * @sdp: The GFS2 superblock
221 * @gl: the glock for the superblock (assumed to be held)
222 * @silent: Don't print message if mount fails
223 *
224 */
225
226int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent)
227{
228 u32 hash_blocks, ind_blocks, leaf_blocks;
229 u32 tmp_blocks;
230 unsigned int x;
231 int error;
232 struct page *page;
233 char *sb;
234
235 page = gfs2_read_super(sdp->sd_vfs, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
236 if (!page) {
237 if (!silent)
238 fs_err(sdp, "can't read superblock\n");
239 return -EIO;
240 }
241 sb = kmap(page);
242 gfs2_sb_in(&sdp->sd_sb, sb);
243 kunmap(page);
244 __free_page(page);
245
246 error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
247 if (error)
248 return error;
249
250 sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
251 GFS2_BASIC_BLOCK_SHIFT;
252 sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
253 sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
254 sizeof(struct gfs2_dinode)) / sizeof(u64);
255 sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
256 sizeof(struct gfs2_meta_header)) / sizeof(u64);
257 sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
258 sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
259 sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
260 sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
261 sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
262 sizeof(struct gfs2_meta_header)) /
263 sizeof(struct gfs2_quota_change);
264
265 /* Compute maximum reservation required to add a entry to a directory */
266
267 hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH),
268 sdp->sd_jbsize);
269
270 ind_blocks = 0;
271 for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
272 tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
273 ind_blocks += tmp_blocks;
274 }
275
276 leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
277
278 sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
279
280 sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
281 sizeof(struct gfs2_dinode);
282 sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
283 for (x = 2;; x++) {
284 u64 space, d;
285 u32 m;
286
287 space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
288 d = space;
289 m = do_div(d, sdp->sd_inptrs);
290
291 if (d != sdp->sd_heightsize[x - 1] || m)
292 break;
293 sdp->sd_heightsize[x] = space;
294 }
295 sdp->sd_max_height = x;
296 gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
297
298 sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
299 sizeof(struct gfs2_dinode);
300 sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
301 for (x = 2;; x++) {
302 u64 space, d;
303 u32 m;
304
305 space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
306 d = space;
307 m = do_div(d, sdp->sd_inptrs);
308
309 if (d != sdp->sd_jheightsize[x - 1] || m)
310 break;
311 sdp->sd_jheightsize[x] = space;
312 }
313 sdp->sd_max_jheight = x;
314 gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
315
316 return 0;
317}
318
319/**
320 * gfs2_jindex_hold - Grab a lock on the jindex
321 * @sdp: The GFS2 superblock
322 * @ji_gh: the holder for the jindex glock
323 *
324 * This is very similar to the gfs2_rindex_hold() function, except that
325 * in general we hold the jindex lock for longer periods of time and
326 * we grab it far less frequently (in general) then the rgrp lock.
327 *
328 * Returns: errno
329 */
330
331int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
332{
333 struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
334 struct qstr name;
335 char buf[20];
336 struct gfs2_jdesc *jd;
337 int error;
338
339 name.name = buf;
340
341 mutex_lock(&sdp->sd_jindex_mutex);
342
343 for (;;) {
344 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED,
345 GL_LOCAL_EXCL, ji_gh);
346 if (error)
347 break;
348
349 name.len = sprintf(buf, "journal%u", sdp->sd_journals);
350 name.hash = gfs2_disk_hash(name.name, name.len);
351
352 error = gfs2_dir_search(sdp->sd_jindex, &name, NULL, NULL);
353 if (error == -ENOENT) {
354 error = 0;
355 break;
356 }
357
358 gfs2_glock_dq_uninit(ji_gh);
359
360 if (error)
361 break;
362
363 error = -ENOMEM;
364 jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
365 if (!jd)
366 break;
367
368 jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1, NULL);
369 if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
370 if (!jd->jd_inode)
371 error = -ENOENT;
372 else
373 error = PTR_ERR(jd->jd_inode);
374 kfree(jd);
375 break;
376 }
377
378 spin_lock(&sdp->sd_jindex_spin);
379 jd->jd_jid = sdp->sd_journals++;
380 list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
381 spin_unlock(&sdp->sd_jindex_spin);
382 }
383
384 mutex_unlock(&sdp->sd_jindex_mutex);
385
386 return error;
387}
388
389/**
390 * gfs2_jindex_free - Clear all the journal index information
391 * @sdp: The GFS2 superblock
392 *
393 */
394
395void gfs2_jindex_free(struct gfs2_sbd *sdp)
396{
397 struct list_head list;
398 struct gfs2_jdesc *jd;
399
400 spin_lock(&sdp->sd_jindex_spin);
401 list_add(&list, &sdp->sd_jindex_list);
402 list_del_init(&sdp->sd_jindex_list);
403 sdp->sd_journals = 0;
404 spin_unlock(&sdp->sd_jindex_spin);
405
406 while (!list_empty(&list)) {
407 jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
408 list_del(&jd->jd_list);
409 iput(jd->jd_inode);
410 kfree(jd);
411 }
412}
413
414static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
415{
416 struct gfs2_jdesc *jd;
417 int found = 0;
418
419 list_for_each_entry(jd, head, jd_list) {
420 if (jd->jd_jid == jid) {
421 found = 1;
422 break;
423 }
424 }
425
426 if (!found)
427 jd = NULL;
428
429 return jd;
430}
431
432struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
433{
434 struct gfs2_jdesc *jd;
435
436 spin_lock(&sdp->sd_jindex_spin);
437 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
438 spin_unlock(&sdp->sd_jindex_spin);
439
440 return jd;
441}
442
443void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
444{
445 struct gfs2_jdesc *jd;
446
447 spin_lock(&sdp->sd_jindex_spin);
448 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
449 if (jd)
450 jd->jd_dirty = 1;
451 spin_unlock(&sdp->sd_jindex_spin);
452}
453
454struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp)
455{
456 struct gfs2_jdesc *jd;
457 int found = 0;
458
459 spin_lock(&sdp->sd_jindex_spin);
460
461 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
462 if (jd->jd_dirty) {
463 jd->jd_dirty = 0;
464 found = 1;
465 break;
466 }
467 }
468 spin_unlock(&sdp->sd_jindex_spin);
469
470 if (!found)
471 jd = NULL;
472
473 return jd;
474}
475
476int gfs2_jdesc_check(struct gfs2_jdesc *jd)
477{
478 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
479 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
480 int ar;
481 int error;
482
483 if (ip->i_di.di_size < (8 << 20) || ip->i_di.di_size > (1 << 30) ||
484 (ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1))) {
485 gfs2_consist_inode(ip);
486 return -EIO;
487 }
488 jd->jd_blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift;
489
490 error = gfs2_write_alloc_required(ip, 0, ip->i_di.di_size, &ar);
491 if (!error && ar) {
492 gfs2_consist_inode(ip);
493 error = -EIO;
494 }
495
496 return error;
497}
498
499/**
500 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
501 * @sdp: the filesystem
502 *
503 * Returns: errno
504 */
505
506int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
507{
508 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
509 struct gfs2_glock *j_gl = ip->i_gl;
510 struct gfs2_holder t_gh;
511 struct gfs2_log_header head;
512 int error;
513
514 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
515 GL_LOCAL_EXCL, &t_gh);
516 if (error)
517 return error;
518
519 gfs2_meta_cache_flush(ip);
520 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA | DIO_DATA);
521
522 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
523 if (error)
524 goto fail;
525
526 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
527 gfs2_consist(sdp);
528 error = -EIO;
529 goto fail;
530 }
531
532 /* Initialize some head of the log stuff */
533 sdp->sd_log_sequence = head.lh_sequence + 1;
534 gfs2_log_pointers_init(sdp, head.lh_blkno);
535
536 error = gfs2_quota_init(sdp);
537 if (error)
538 goto fail;
539
540 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
541
542 gfs2_glock_dq_uninit(&t_gh);
543
544 return 0;
545
546fail:
547 t_gh.gh_flags |= GL_NOCACHE;
548 gfs2_glock_dq_uninit(&t_gh);
549
550 return error;
551}
552
553/**
554 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
555 * @sdp: the filesystem
556 *
557 * Returns: errno
558 */
559
560int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
561{
562 struct gfs2_holder t_gh;
563 int error;
564
565 gfs2_quota_sync(sdp);
566 gfs2_statfs_sync(sdp);
567
568 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
569 GL_LOCAL_EXCL | GL_NOCACHE,
570 &t_gh);
571 if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
572 return error;
573
574 gfs2_meta_syncfs(sdp);
575 gfs2_log_shutdown(sdp);
576
577 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
578
579 if (t_gh.gh_gl)
580 gfs2_glock_dq_uninit(&t_gh);
581
582 gfs2_quota_cleanup(sdp);
583
584 return error;
585}
586
587int gfs2_statfs_init(struct gfs2_sbd *sdp)
588{
589 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
590 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
591 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
592 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
593 struct buffer_head *m_bh, *l_bh;
594 struct gfs2_holder gh;
595 int error;
596
597 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
598 &gh);
599 if (error)
600 return error;
601
602 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
603 if (error)
604 goto out;
605
606 if (sdp->sd_args.ar_spectator) {
607 spin_lock(&sdp->sd_statfs_spin);
608 gfs2_statfs_change_in(m_sc, m_bh->b_data +
609 sizeof(struct gfs2_dinode));
610 spin_unlock(&sdp->sd_statfs_spin);
611 } else {
612 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
613 if (error)
614 goto out_m_bh;
615
616 spin_lock(&sdp->sd_statfs_spin);
617 gfs2_statfs_change_in(m_sc, m_bh->b_data +
618 sizeof(struct gfs2_dinode));
619 gfs2_statfs_change_in(l_sc, l_bh->b_data +
620 sizeof(struct gfs2_dinode));
621 spin_unlock(&sdp->sd_statfs_spin);
622
623 brelse(l_bh);
624 }
625
626out_m_bh:
627 brelse(m_bh);
628out:
629 gfs2_glock_dq_uninit(&gh);
630 return 0;
631}
632
633void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
634 s64 dinodes)
635{
636 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
637 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
638 struct buffer_head *l_bh;
639 int error;
640
641 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
642 if (error)
643 return;
644
645 mutex_lock(&sdp->sd_statfs_mutex);
646 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
647 mutex_unlock(&sdp->sd_statfs_mutex);
648
649 spin_lock(&sdp->sd_statfs_spin);
650 l_sc->sc_total += total;
651 l_sc->sc_free += free;
652 l_sc->sc_dinodes += dinodes;
653 gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
654 spin_unlock(&sdp->sd_statfs_spin);
655
656 brelse(l_bh);
657}
658
659int gfs2_statfs_sync(struct gfs2_sbd *sdp)
660{
661 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
662 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
663 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
664 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
665 struct gfs2_holder gh;
666 struct buffer_head *m_bh, *l_bh;
667 int error;
668
669 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
670 &gh);
671 if (error)
672 return error;
673
674 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
675 if (error)
676 goto out;
677
678 spin_lock(&sdp->sd_statfs_spin);
679 gfs2_statfs_change_in(m_sc, m_bh->b_data +
680 sizeof(struct gfs2_dinode));
681 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
682 spin_unlock(&sdp->sd_statfs_spin);
683 goto out_bh;
684 }
685 spin_unlock(&sdp->sd_statfs_spin);
686
687 error = gfs2_meta_inode_buffer(l_ip, &l_bh);
688 if (error)
689 goto out_bh;
690
691 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
692 if (error)
693 goto out_bh2;
694
695 mutex_lock(&sdp->sd_statfs_mutex);
696 gfs2_trans_add_bh(l_ip->i_gl, l_bh, 1);
697 mutex_unlock(&sdp->sd_statfs_mutex);
698
699 spin_lock(&sdp->sd_statfs_spin);
700 m_sc->sc_total += l_sc->sc_total;
701 m_sc->sc_free += l_sc->sc_free;
702 m_sc->sc_dinodes += l_sc->sc_dinodes;
703 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
704 memset(l_bh->b_data + sizeof(struct gfs2_dinode),
705 0, sizeof(struct gfs2_statfs_change));
706 spin_unlock(&sdp->sd_statfs_spin);
707
708 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
709 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
710
711 gfs2_trans_end(sdp);
712
713out_bh2:
714 brelse(l_bh);
715out_bh:
716 brelse(m_bh);
717out:
718 gfs2_glock_dq_uninit(&gh);
719 return error;
720}
721
722/**
723 * gfs2_statfs_i - Do a statfs
724 * @sdp: the filesystem
725 * @sg: the sg structure
726 *
727 * Returns: errno
728 */
729
730int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
731{
732 struct gfs2_statfs_change *m_sc = &sdp->sd_statfs_master;
733 struct gfs2_statfs_change *l_sc = &sdp->sd_statfs_local;
734
735 spin_lock(&sdp->sd_statfs_spin);
736
737 *sc = *m_sc;
738 sc->sc_total += l_sc->sc_total;
739 sc->sc_free += l_sc->sc_free;
740 sc->sc_dinodes += l_sc->sc_dinodes;
741
742 spin_unlock(&sdp->sd_statfs_spin);
743
744 if (sc->sc_free < 0)
745 sc->sc_free = 0;
746 if (sc->sc_free > sc->sc_total)
747 sc->sc_free = sc->sc_total;
748 if (sc->sc_dinodes < 0)
749 sc->sc_dinodes = 0;
750
751 return 0;
752}
753
754/**
755 * statfs_fill - fill in the sg for a given RG
756 * @rgd: the RG
757 * @sc: the sc structure
758 *
759 * Returns: 0 on success, -ESTALE if the LVB is invalid
760 */
761
762static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
763 struct gfs2_statfs_change *sc)
764{
765 gfs2_rgrp_verify(rgd);
766 sc->sc_total += rgd->rd_ri.ri_data;
767 sc->sc_free += rgd->rd_rg.rg_free;
768 sc->sc_dinodes += rgd->rd_rg.rg_dinodes;
769 return 0;
770}
771
772/**
773 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
774 * @sdp: the filesystem
775 * @sc: the sc info that will be returned
776 *
777 * Any error (other than a signal) will cause this routine to fall back
778 * to the synchronous version.
779 *
780 * FIXME: This really shouldn't busy wait like this.
781 *
782 * Returns: errno
783 */
784
785int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc)
786{
787 struct gfs2_holder ri_gh;
788 struct gfs2_rgrpd *rgd_next;
789 struct gfs2_holder *gha, *gh;
790 unsigned int slots = 64;
791 unsigned int x;
792 int done;
793 int error = 0, err;
794
795 memset(sc, 0, sizeof(struct gfs2_statfs_change));
796 gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
797 if (!gha)
798 return -ENOMEM;
799
800 error = gfs2_rindex_hold(sdp, &ri_gh);
801 if (error)
802 goto out;
803
804 rgd_next = gfs2_rgrpd_get_first(sdp);
805
806 for (;;) {
807 done = 1;
808
809 for (x = 0; x < slots; x++) {
810 gh = gha + x;
811
812 if (gh->gh_gl && gfs2_glock_poll(gh)) {
813 err = gfs2_glock_wait(gh);
814 if (err) {
815 gfs2_holder_uninit(gh);
816 error = err;
817 } else {
818 if (!error)
819 error = statfs_slow_fill(
820 gh->gh_gl->gl_object, sc);
821 gfs2_glock_dq_uninit(gh);
822 }
823 }
824
825 if (gh->gh_gl)
826 done = 0;
827 else if (rgd_next && !error) {
828 error = gfs2_glock_nq_init(rgd_next->rd_gl,
829 LM_ST_SHARED,
830 GL_ASYNC,
831 gh);
832 rgd_next = gfs2_rgrpd_get_next(rgd_next);
833 done = 0;
834 }
835
836 if (signal_pending(current))
837 error = -ERESTARTSYS;
838 }
839
840 if (done)
841 break;
842
843 yield();
844 }
845
846 gfs2_glock_dq_uninit(&ri_gh);
847
848out:
849 kfree(gha);
850 return error;
851}
852
853struct lfcc {
854 struct list_head list;
855 struct gfs2_holder gh;
856};
857
858/**
859 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
860 * journals are clean
861 * @sdp: the file system
862 * @state: the state to put the transaction lock into
863 * @t_gh: the hold on the transaction lock
864 *
865 * Returns: errno
866 */
867
868static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
869 struct gfs2_holder *t_gh)
870{
871 struct gfs2_inode *ip;
872 struct gfs2_holder ji_gh;
873 struct gfs2_jdesc *jd;
874 struct lfcc *lfcc;
875 LIST_HEAD(list);
876 struct gfs2_log_header lh;
877 int error;
878
879 error = gfs2_jindex_hold(sdp, &ji_gh);
880 if (error)
881 return error;
882
883 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
884 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
885 if (!lfcc) {
886 error = -ENOMEM;
887 goto out;
888 }
889 ip = GFS2_I(jd->jd_inode);
890 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
891 if (error) {
892 kfree(lfcc);
893 goto out;
894 }
895 list_add(&lfcc->list, &list);
896 }
897
898 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
899 LM_FLAG_PRIORITY | GL_NOCACHE,
900 t_gh);
901
902 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
903 error = gfs2_jdesc_check(jd);
904 if (error)
905 break;
906 error = gfs2_find_jhead(jd, &lh);
907 if (error)
908 break;
909 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
910 error = -EBUSY;
911 break;
912 }
913 }
914
915 if (error)
916 gfs2_glock_dq_uninit(t_gh);
917
918out:
919 while (!list_empty(&list)) {
920 lfcc = list_entry(list.next, struct lfcc, list);
921 list_del(&lfcc->list);
922 gfs2_glock_dq_uninit(&lfcc->gh);
923 kfree(lfcc);
924 }
925 gfs2_glock_dq_uninit(&ji_gh);
926 return error;
927}
928
929/**
930 * gfs2_freeze_fs - freezes the file system
931 * @sdp: the file system
932 *
933 * This function flushes data and meta data for all machines by
934 * aquiring the transaction log exclusively. All journals are
935 * ensured to be in a clean state as well.
936 *
937 * Returns: errno
938 */
939
940int gfs2_freeze_fs(struct gfs2_sbd *sdp)
941{
942 int error = 0;
943
944 mutex_lock(&sdp->sd_freeze_lock);
945
946 if (!sdp->sd_freeze_count++) {
947 error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
948 if (error)
949 sdp->sd_freeze_count--;
950 }
951
952 mutex_unlock(&sdp->sd_freeze_lock);
953
954 return error;
955}
956
957/**
958 * gfs2_unfreeze_fs - unfreezes the file system
959 * @sdp: the file system
960 *
961 * This function allows the file system to proceed by unlocking
962 * the exclusively held transaction lock. Other GFS2 nodes are
963 * now free to acquire the lock shared and go on with their lives.
964 *
965 */
966
967void gfs2_unfreeze_fs(struct gfs2_sbd *sdp)
968{
969 mutex_lock(&sdp->sd_freeze_lock);
970
971 if (sdp->sd_freeze_count && !--sdp->sd_freeze_count)
972 gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
973
974 mutex_unlock(&sdp->sd_freeze_lock);
975}
976
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
new file mode 100644
index 000000000000..5bb443ae0f59
--- /dev/null
+++ b/fs/gfs2/super.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __SUPER_DOT_H__
11#define __SUPER_DOT_H__
12
13#include "incore.h"
14
15void gfs2_tune_init(struct gfs2_tune *gt);
16
17int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb *sb, int silent);
18int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent);
19struct page *gfs2_read_super(struct super_block *sb, sector_t sector);
20
21static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
22{
23 unsigned int x;
24 spin_lock(&sdp->sd_jindex_spin);
25 x = sdp->sd_journals;
26 spin_unlock(&sdp->sd_jindex_spin);
27 return x;
28}
29
30int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh);
31void gfs2_jindex_free(struct gfs2_sbd *sdp);
32
33struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
34void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid);
35struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp);
36int gfs2_jdesc_check(struct gfs2_jdesc *jd);
37
38int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
39 struct gfs2_inode **ipp);
40
41int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
42int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
43
44int gfs2_statfs_init(struct gfs2_sbd *sdp);
45void gfs2_statfs_change(struct gfs2_sbd *sdp,
46 s64 total, s64 free, s64 dinodes);
47int gfs2_statfs_sync(struct gfs2_sbd *sdp);
48int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
49int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change *sc);
50
51int gfs2_freeze_fs(struct gfs2_sbd *sdp);
52void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
53
54#endif /* __SUPER_DOT_H__ */
55
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
new file mode 100644
index 000000000000..0e0ec988f731
--- /dev/null
+++ b/fs/gfs2/sys.c
@@ -0,0 +1,583 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/module.h>
16#include <linux/kobject.h>
17#include <linux/gfs2_ondisk.h>
18#include <linux/lm_interface.h>
19#include <asm/uaccess.h>
20
21#include "gfs2.h"
22#include "incore.h"
23#include "lm.h"
24#include "sys.h"
25#include "super.h"
26#include "glock.h"
27#include "quota.h"
28#include "util.h"
29
30char *gfs2_sys_margs;
31spinlock_t gfs2_sys_margs_lock;
32
33static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
34{
35 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_vfs->s_id);
36}
37
38static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
39{
40 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
41}
42
43static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
44{
45 unsigned int count;
46
47 mutex_lock(&sdp->sd_freeze_lock);
48 count = sdp->sd_freeze_count;
49 mutex_unlock(&sdp->sd_freeze_lock);
50
51 return snprintf(buf, PAGE_SIZE, "%u\n", count);
52}
53
54static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
55{
56 ssize_t ret = len;
57 int error = 0;
58 int n = simple_strtol(buf, NULL, 0);
59
60 if (!capable(CAP_SYS_ADMIN))
61 return -EACCES;
62
63 switch (n) {
64 case 0:
65 gfs2_unfreeze_fs(sdp);
66 break;
67 case 1:
68 error = gfs2_freeze_fs(sdp);
69 break;
70 default:
71 ret = -EINVAL;
72 }
73
74 if (error)
75 fs_warn(sdp, "freeze %d error %d", n, error);
76
77 return ret;
78}
79
80static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
81{
82 unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags);
83 return snprintf(buf, PAGE_SIZE, "%u\n", b);
84}
85
86static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
87{
88 if (!capable(CAP_SYS_ADMIN))
89 return -EACCES;
90
91 if (simple_strtol(buf, NULL, 0) != 1)
92 return -EINVAL;
93
94 gfs2_lm_withdraw(sdp,
95 "GFS2: fsid=%s: withdrawing from cluster at user's request\n",
96 sdp->sd_fsname);
97 return len;
98}
99
100static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
101 size_t len)
102{
103 if (!capable(CAP_SYS_ADMIN))
104 return -EACCES;
105
106 if (simple_strtol(buf, NULL, 0) != 1)
107 return -EINVAL;
108
109 gfs2_statfs_sync(sdp);
110 return len;
111}
112
113static ssize_t shrink_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
114{
115 if (!capable(CAP_SYS_ADMIN))
116 return -EACCES;
117
118 if (simple_strtol(buf, NULL, 0) != 1)
119 return -EINVAL;
120
121 gfs2_gl_hash_clear(sdp, NO_WAIT);
122 return len;
123}
124
125static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
126 size_t len)
127{
128 if (!capable(CAP_SYS_ADMIN))
129 return -EACCES;
130
131 if (simple_strtol(buf, NULL, 0) != 1)
132 return -EINVAL;
133
134 gfs2_quota_sync(sdp);
135 return len;
136}
137
138static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
139 size_t len)
140{
141 u32 id;
142
143 if (!capable(CAP_SYS_ADMIN))
144 return -EACCES;
145
146 id = simple_strtoul(buf, NULL, 0);
147
148 gfs2_quota_refresh(sdp, 1, id);
149 return len;
150}
151
152static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
153 size_t len)
154{
155 u32 id;
156
157 if (!capable(CAP_SYS_ADMIN))
158 return -EACCES;
159
160 id = simple_strtoul(buf, NULL, 0);
161
162 gfs2_quota_refresh(sdp, 0, id);
163 return len;
164}
165
166struct gfs2_attr {
167 struct attribute attr;
168 ssize_t (*show)(struct gfs2_sbd *, char *);
169 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
170};
171
172#define GFS2_ATTR(name, mode, show, store) \
173static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
174
175GFS2_ATTR(id, 0444, id_show, NULL);
176GFS2_ATTR(fsname, 0444, fsname_show, NULL);
177GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
178GFS2_ATTR(shrink, 0200, NULL, shrink_store);
179GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
180GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
181GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
182GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store);
183GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store);
184
185static struct attribute *gfs2_attrs[] = {
186 &gfs2_attr_id.attr,
187 &gfs2_attr_fsname.attr,
188 &gfs2_attr_freeze.attr,
189 &gfs2_attr_shrink.attr,
190 &gfs2_attr_withdraw.attr,
191 &gfs2_attr_statfs_sync.attr,
192 &gfs2_attr_quota_sync.attr,
193 &gfs2_attr_quota_refresh_user.attr,
194 &gfs2_attr_quota_refresh_group.attr,
195 NULL,
196};
197
198static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr,
199 char *buf)
200{
201 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
202 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
203 return a->show ? a->show(sdp, buf) : 0;
204}
205
206static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
207 const char *buf, size_t len)
208{
209 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
210 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
211 return a->store ? a->store(sdp, buf, len) : len;
212}
213
214static struct sysfs_ops gfs2_attr_ops = {
215 .show = gfs2_attr_show,
216 .store = gfs2_attr_store,
217};
218
219static struct kobj_type gfs2_ktype = {
220 .default_attrs = gfs2_attrs,
221 .sysfs_ops = &gfs2_attr_ops,
222};
223
224static struct kset gfs2_kset = {
225 .subsys = &fs_subsys,
226 .kobj = {.name = "gfs2"},
227 .ktype = &gfs2_ktype,
228};
229
230/*
231 * display struct lm_lockstruct fields
232 */
233
234struct lockstruct_attr {
235 struct attribute attr;
236 ssize_t (*show)(struct gfs2_sbd *, char *);
237};
238
239#define LOCKSTRUCT_ATTR(name, fmt) \
240static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
241{ \
242 return snprintf(buf, PAGE_SIZE, fmt, sdp->sd_lockstruct.ls_##name); \
243} \
244static struct lockstruct_attr lockstruct_attr_##name = __ATTR_RO(name)
245
246LOCKSTRUCT_ATTR(jid, "%u\n");
247LOCKSTRUCT_ATTR(first, "%u\n");
248LOCKSTRUCT_ATTR(lvb_size, "%u\n");
249LOCKSTRUCT_ATTR(flags, "%d\n");
250
251static struct attribute *lockstruct_attrs[] = {
252 &lockstruct_attr_jid.attr,
253 &lockstruct_attr_first.attr,
254 &lockstruct_attr_lvb_size.attr,
255 &lockstruct_attr_flags.attr,
256 NULL,
257};
258
259/*
260 * display struct gfs2_args fields
261 */
262
263struct args_attr {
264 struct attribute attr;
265 ssize_t (*show)(struct gfs2_sbd *, char *);
266};
267
268#define ARGS_ATTR(name, fmt) \
269static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
270{ \
271 return snprintf(buf, PAGE_SIZE, fmt, sdp->sd_args.ar_##name); \
272} \
273static struct args_attr args_attr_##name = __ATTR_RO(name)
274
275ARGS_ATTR(lockproto, "%s\n");
276ARGS_ATTR(locktable, "%s\n");
277ARGS_ATTR(hostdata, "%s\n");
278ARGS_ATTR(spectator, "%d\n");
279ARGS_ATTR(ignore_local_fs, "%d\n");
280ARGS_ATTR(localcaching, "%d\n");
281ARGS_ATTR(localflocks, "%d\n");
282ARGS_ATTR(debug, "%d\n");
283ARGS_ATTR(upgrade, "%d\n");
284ARGS_ATTR(num_glockd, "%u\n");
285ARGS_ATTR(posix_acl, "%d\n");
286ARGS_ATTR(quota, "%u\n");
287ARGS_ATTR(suiddir, "%d\n");
288ARGS_ATTR(data, "%d\n");
289
290/* one oddball doesn't fit the macro mold */
291static ssize_t noatime_show(struct gfs2_sbd *sdp, char *buf)
292{
293 return snprintf(buf, PAGE_SIZE, "%d\n",
294 !!test_bit(SDF_NOATIME, &sdp->sd_flags));
295}
296static struct args_attr args_attr_noatime = __ATTR_RO(noatime);
297
298static struct attribute *args_attrs[] = {
299 &args_attr_lockproto.attr,
300 &args_attr_locktable.attr,
301 &args_attr_hostdata.attr,
302 &args_attr_spectator.attr,
303 &args_attr_ignore_local_fs.attr,
304 &args_attr_localcaching.attr,
305 &args_attr_localflocks.attr,
306 &args_attr_debug.attr,
307 &args_attr_upgrade.attr,
308 &args_attr_num_glockd.attr,
309 &args_attr_posix_acl.attr,
310 &args_attr_quota.attr,
311 &args_attr_suiddir.attr,
312 &args_attr_data.attr,
313 &args_attr_noatime.attr,
314 NULL,
315};
316
317/*
318 * display counters from superblock
319 */
320
321struct counters_attr {
322 struct attribute attr;
323 ssize_t (*show)(struct gfs2_sbd *, char *);
324};
325
326#define COUNTERS_ATTR(name, fmt) \
327static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
328{ \
329 return snprintf(buf, PAGE_SIZE, fmt, \
330 (unsigned int)atomic_read(&sdp->sd_##name)); \
331} \
332static struct counters_attr counters_attr_##name = __ATTR_RO(name)
333
334COUNTERS_ATTR(glock_count, "%u\n");
335COUNTERS_ATTR(glock_held_count, "%u\n");
336COUNTERS_ATTR(inode_count, "%u\n");
337COUNTERS_ATTR(reclaimed, "%u\n");
338
339static struct attribute *counters_attrs[] = {
340 &counters_attr_glock_count.attr,
341 &counters_attr_glock_held_count.attr,
342 &counters_attr_inode_count.attr,
343 &counters_attr_reclaimed.attr,
344 NULL,
345};
346
347/*
348 * get and set struct gfs2_tune fields
349 */
350
351static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
352{
353 return snprintf(buf, PAGE_SIZE, "%u %u\n",
354 sdp->sd_tune.gt_quota_scale_num,
355 sdp->sd_tune.gt_quota_scale_den);
356}
357
358static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
359 size_t len)
360{
361 struct gfs2_tune *gt = &sdp->sd_tune;
362 unsigned int x, y;
363
364 if (!capable(CAP_SYS_ADMIN))
365 return -EACCES;
366
367 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
368 return -EINVAL;
369
370 spin_lock(&gt->gt_spin);
371 gt->gt_quota_scale_num = x;
372 gt->gt_quota_scale_den = y;
373 spin_unlock(&gt->gt_spin);
374 return len;
375}
376
377static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
378 int check_zero, const char *buf, size_t len)
379{
380 struct gfs2_tune *gt = &sdp->sd_tune;
381 unsigned int x;
382
383 if (!capable(CAP_SYS_ADMIN))
384 return -EACCES;
385
386 x = simple_strtoul(buf, NULL, 0);
387
388 if (check_zero && !x)
389 return -EINVAL;
390
391 spin_lock(&gt->gt_spin);
392 *field = x;
393 spin_unlock(&gt->gt_spin);
394 return len;
395}
396
397struct tune_attr {
398 struct attribute attr;
399 ssize_t (*show)(struct gfs2_sbd *, char *);
400 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
401};
402
403#define TUNE_ATTR_3(name, show, store) \
404static struct tune_attr tune_attr_##name = __ATTR(name, 0644, show, store)
405
406#define TUNE_ATTR_2(name, store) \
407static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
408{ \
409 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
410} \
411TUNE_ATTR_3(name, name##_show, store)
412
413#define TUNE_ATTR(name, check_zero) \
414static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
415{ \
416 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
417} \
418TUNE_ATTR_2(name, name##_store)
419
420#define TUNE_ATTR_DAEMON(name, process) \
421static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
422{ \
423 ssize_t r = tune_set(sdp, &sdp->sd_tune.gt_##name, 1, buf, len); \
424 wake_up_process(sdp->sd_##process); \
425 return r; \
426} \
427TUNE_ATTR_2(name, name##_store)
428
429TUNE_ATTR(ilimit, 0);
430TUNE_ATTR(ilimit_tries, 0);
431TUNE_ATTR(ilimit_min, 0);
432TUNE_ATTR(demote_secs, 0);
433TUNE_ATTR(incore_log_blocks, 0);
434TUNE_ATTR(log_flush_secs, 0);
435TUNE_ATTR(jindex_refresh_secs, 0);
436TUNE_ATTR(quota_warn_period, 0);
437TUNE_ATTR(quota_quantum, 0);
438TUNE_ATTR(atime_quantum, 0);
439TUNE_ATTR(max_readahead, 0);
440TUNE_ATTR(complain_secs, 0);
441TUNE_ATTR(reclaim_limit, 0);
442TUNE_ATTR(prefetch_secs, 0);
443TUNE_ATTR(statfs_slow, 0);
444TUNE_ATTR(new_files_jdata, 0);
445TUNE_ATTR(new_files_directio, 0);
446TUNE_ATTR(quota_simul_sync, 1);
447TUNE_ATTR(quota_cache_secs, 1);
448TUNE_ATTR(max_atomic_write, 1);
449TUNE_ATTR(stall_secs, 1);
450TUNE_ATTR(entries_per_readdir, 1);
451TUNE_ATTR(greedy_default, 1);
452TUNE_ATTR(greedy_quantum, 1);
453TUNE_ATTR(greedy_max, 1);
454TUNE_ATTR(statfs_quantum, 1);
455TUNE_ATTR_DAEMON(scand_secs, scand_process);
456TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
457TUNE_ATTR_DAEMON(logd_secs, logd_process);
458TUNE_ATTR_DAEMON(quotad_secs, quotad_process);
459TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
460
461static struct attribute *tune_attrs[] = {
462 &tune_attr_ilimit.attr,
463 &tune_attr_ilimit_tries.attr,
464 &tune_attr_ilimit_min.attr,
465 &tune_attr_demote_secs.attr,
466 &tune_attr_incore_log_blocks.attr,
467 &tune_attr_log_flush_secs.attr,
468 &tune_attr_jindex_refresh_secs.attr,
469 &tune_attr_quota_warn_period.attr,
470 &tune_attr_quota_quantum.attr,
471 &tune_attr_atime_quantum.attr,
472 &tune_attr_max_readahead.attr,
473 &tune_attr_complain_secs.attr,
474 &tune_attr_reclaim_limit.attr,
475 &tune_attr_prefetch_secs.attr,
476 &tune_attr_statfs_slow.attr,
477 &tune_attr_quota_simul_sync.attr,
478 &tune_attr_quota_cache_secs.attr,
479 &tune_attr_max_atomic_write.attr,
480 &tune_attr_stall_secs.attr,
481 &tune_attr_entries_per_readdir.attr,
482 &tune_attr_greedy_default.attr,
483 &tune_attr_greedy_quantum.attr,
484 &tune_attr_greedy_max.attr,
485 &tune_attr_statfs_quantum.attr,
486 &tune_attr_scand_secs.attr,
487 &tune_attr_recoverd_secs.attr,
488 &tune_attr_logd_secs.attr,
489 &tune_attr_quotad_secs.attr,
490 &tune_attr_quota_scale.attr,
491 &tune_attr_new_files_jdata.attr,
492 &tune_attr_new_files_directio.attr,
493 NULL,
494};
495
496static struct attribute_group lockstruct_group = {
497 .name = "lockstruct",
498 .attrs = lockstruct_attrs,
499};
500
501static struct attribute_group counters_group = {
502 .name = "counters",
503 .attrs = counters_attrs,
504};
505
506static struct attribute_group args_group = {
507 .name = "args",
508 .attrs = args_attrs,
509};
510
511static struct attribute_group tune_group = {
512 .name = "tune",
513 .attrs = tune_attrs,
514};
515
516int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
517{
518 int error;
519
520 sdp->sd_kobj.kset = &gfs2_kset;
521 sdp->sd_kobj.ktype = &gfs2_ktype;
522
523 error = kobject_set_name(&sdp->sd_kobj, "%s", sdp->sd_table_name);
524 if (error)
525 goto fail;
526
527 error = kobject_register(&sdp->sd_kobj);
528 if (error)
529 goto fail;
530
531 error = sysfs_create_group(&sdp->sd_kobj, &lockstruct_group);
532 if (error)
533 goto fail_reg;
534
535 error = sysfs_create_group(&sdp->sd_kobj, &counters_group);
536 if (error)
537 goto fail_lockstruct;
538
539 error = sysfs_create_group(&sdp->sd_kobj, &args_group);
540 if (error)
541 goto fail_counters;
542
543 error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
544 if (error)
545 goto fail_args;
546
547 return 0;
548
549fail_args:
550 sysfs_remove_group(&sdp->sd_kobj, &args_group);
551fail_counters:
552 sysfs_remove_group(&sdp->sd_kobj, &counters_group);
553fail_lockstruct:
554 sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group);
555fail_reg:
556 kobject_unregister(&sdp->sd_kobj);
557fail:
558 fs_err(sdp, "error %d adding sysfs files", error);
559 return error;
560}
561
562void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
563{
564 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
565 sysfs_remove_group(&sdp->sd_kobj, &args_group);
566 sysfs_remove_group(&sdp->sd_kobj, &counters_group);
567 sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group);
568 kobject_unregister(&sdp->sd_kobj);
569}
570
571int gfs2_sys_init(void)
572{
573 gfs2_sys_margs = NULL;
574 spin_lock_init(&gfs2_sys_margs_lock);
575 return kset_register(&gfs2_kset);
576}
577
578void gfs2_sys_uninit(void)
579{
580 kfree(gfs2_sys_margs);
581 kset_unregister(&gfs2_kset);
582}
583
diff --git a/fs/gfs2/sys.h b/fs/gfs2/sys.h
new file mode 100644
index 000000000000..1ca8cdac5304
--- /dev/null
+++ b/fs/gfs2/sys.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __SYS_DOT_H__
11#define __SYS_DOT_H__
12
13#include <linux/spinlock.h>
14struct gfs2_sbd;
15
16/* Allow args to be passed to GFS2 when using an initial ram disk */
17extern char *gfs2_sys_margs;
18extern spinlock_t gfs2_sys_margs_lock;
19
20int gfs2_sys_fs_add(struct gfs2_sbd *sdp);
21void gfs2_sys_fs_del(struct gfs2_sbd *sdp);
22
23int gfs2_sys_init(void);
24void gfs2_sys_uninit(void);
25
26#endif /* __SYS_DOT_H__ */
27
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
new file mode 100644
index 000000000000..f8dabf8446bb
--- /dev/null
+++ b/fs/gfs2/trans.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/gfs2_ondisk.h>
16#include <linux/kallsyms.h>
17#include <linux/lm_interface.h>
18
19#include "gfs2.h"
20#include "incore.h"
21#include "glock.h"
22#include "log.h"
23#include "lops.h"
24#include "meta_io.h"
25#include "trans.h"
26#include "util.h"
27
28int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
29 unsigned int revokes)
30{
31 struct gfs2_trans *tr;
32 int error;
33
34 BUG_ON(current->journal_info);
35 BUG_ON(blocks == 0 && revokes == 0);
36
37 tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
38 if (!tr)
39 return -ENOMEM;
40
41 tr->tr_ip = (unsigned long)__builtin_return_address(0);
42 tr->tr_blocks = blocks;
43 tr->tr_revokes = revokes;
44 tr->tr_reserved = 1;
45 if (blocks)
46 tr->tr_reserved += 6 + blocks;
47 if (revokes)
48 tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
49 sizeof(u64));
50 INIT_LIST_HEAD(&tr->tr_list_buf);
51
52 gfs2_holder_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &tr->tr_t_gh);
53
54 error = gfs2_glock_nq(&tr->tr_t_gh);
55 if (error)
56 goto fail_holder_uninit;
57
58 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
59 tr->tr_t_gh.gh_flags |= GL_NOCACHE;
60 error = -EROFS;
61 goto fail_gunlock;
62 }
63
64 error = gfs2_log_reserve(sdp, tr->tr_reserved);
65 if (error)
66 goto fail_gunlock;
67
68 current->journal_info = tr;
69
70 return 0;
71
72fail_gunlock:
73 gfs2_glock_dq(&tr->tr_t_gh);
74
75fail_holder_uninit:
76 gfs2_holder_uninit(&tr->tr_t_gh);
77 kfree(tr);
78
79 return error;
80}
81
82void gfs2_trans_end(struct gfs2_sbd *sdp)
83{
84 struct gfs2_trans *tr = current->journal_info;
85
86 BUG_ON(!tr);
87 current->journal_info = NULL;
88
89 if (!tr->tr_touched) {
90 gfs2_log_release(sdp, tr->tr_reserved);
91 gfs2_glock_dq(&tr->tr_t_gh);
92 gfs2_holder_uninit(&tr->tr_t_gh);
93 kfree(tr);
94 return;
95 }
96
97 if (gfs2_assert_withdraw(sdp, tr->tr_num_buf <= tr->tr_blocks)) {
98 fs_err(sdp, "tr_num_buf = %u, tr_blocks = %u ",
99 tr->tr_num_buf, tr->tr_blocks);
100 print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip);
101 }
102 if (gfs2_assert_withdraw(sdp, tr->tr_num_revoke <= tr->tr_revokes)) {
103 fs_err(sdp, "tr_num_revoke = %u, tr_revokes = %u ",
104 tr->tr_num_revoke, tr->tr_revokes);
105 print_symbol(KERN_WARNING "GFS2: Transaction created at: %s\n", tr->tr_ip);
106 }
107
108 gfs2_log_commit(sdp, tr);
109 gfs2_glock_dq(&tr->tr_t_gh);
110 gfs2_holder_uninit(&tr->tr_t_gh);
111 kfree(tr);
112
113 if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
114 gfs2_log_flush(sdp, NULL);
115}
116
117void gfs2_trans_add_gl(struct gfs2_glock *gl)
118{
119 lops_add(gl->gl_sbd, &gl->gl_le);
120}
121
122/**
123 * gfs2_trans_add_bh - Add a to-be-modified buffer to the current transaction
124 * @gl: the glock the buffer belongs to
125 * @bh: The buffer to add
126 * @meta: True in the case of adding metadata
127 *
128 */
129
130void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta)
131{
132 struct gfs2_sbd *sdp = gl->gl_sbd;
133 struct gfs2_bufdata *bd;
134
135 bd = bh->b_private;
136 if (bd)
137 gfs2_assert(sdp, bd->bd_gl == gl);
138 else {
139 gfs2_attach_bufdata(gl, bh, meta);
140 bd = bh->b_private;
141 }
142 lops_add(sdp, &bd->bd_le);
143}
144
145void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno)
146{
147 struct gfs2_revoke *rv = kmalloc(sizeof(struct gfs2_revoke),
148 GFP_NOFS | __GFP_NOFAIL);
149 lops_init_le(&rv->rv_le, &gfs2_revoke_lops);
150 rv->rv_blkno = blkno;
151 lops_add(sdp, &rv->rv_le);
152}
153
154void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno)
155{
156 struct gfs2_revoke *rv;
157 int found = 0;
158
159 gfs2_log_lock(sdp);
160
161 list_for_each_entry(rv, &sdp->sd_log_le_revoke, rv_le.le_list) {
162 if (rv->rv_blkno == blkno) {
163 list_del(&rv->rv_le.le_list);
164 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
165 sdp->sd_log_num_revoke--;
166 found = 1;
167 break;
168 }
169 }
170
171 gfs2_log_unlock(sdp);
172
173 if (found) {
174 struct gfs2_trans *tr = current->journal_info;
175 kfree(rv);
176 tr->tr_num_revoke_rm++;
177 }
178}
179
180void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd)
181{
182 lops_add(rgd->rd_sbd, &rgd->rd_le);
183}
184
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
new file mode 100644
index 000000000000..23d4cbe1de5b
--- /dev/null
+++ b/fs/gfs2/trans.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __TRANS_DOT_H__
11#define __TRANS_DOT_H__
12
13#include <linux/buffer_head.h>
14struct gfs2_sbd;
15struct gfs2_rgrpd;
16struct gfs2_glock;
17
18#define RES_DINODE 1
19#define RES_INDIRECT 1
20#define RES_JDATA 1
21#define RES_DATA 1
22#define RES_LEAF 1
23#define RES_RG_BIT 2
24#define RES_EATTR 1
25#define RES_STATFS 1
26#define RES_QUOTA 2
27
28int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
29 unsigned int revokes);
30
31void gfs2_trans_end(struct gfs2_sbd *sdp);
32
33void gfs2_trans_add_gl(struct gfs2_glock *gl);
34void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta);
35void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, u64 blkno);
36void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno);
37void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd);
38
39#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
new file mode 100644
index 000000000000..196c604faadc
--- /dev/null
+++ b/fs/gfs2/util.c
@@ -0,0 +1,245 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/crc32.h>
16#include <linux/gfs2_ondisk.h>
17#include <linux/lm_interface.h>
18#include <asm/uaccess.h>
19
20#include "gfs2.h"
21#include "incore.h"
22#include "glock.h"
23#include "lm.h"
24#include "util.h"
25
26kmem_cache_t *gfs2_glock_cachep __read_mostly;
27kmem_cache_t *gfs2_inode_cachep __read_mostly;
28kmem_cache_t *gfs2_bufdata_cachep __read_mostly;
29
30void gfs2_assert_i(struct gfs2_sbd *sdp)
31{
32 printk(KERN_EMERG "GFS2: fsid=%s: fatal assertion failed\n",
33 sdp->sd_fsname);
34}
35
36/**
37 * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false
38 * Returns: -1 if this call withdrew the machine,
39 * -2 if it was already withdrawn
40 */
41
42int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
43 const char *function, char *file, unsigned int line)
44{
45 int me;
46 me = gfs2_lm_withdraw(sdp,
47 "GFS2: fsid=%s: fatal: assertion \"%s\" failed\n"
48 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
49 sdp->sd_fsname, assertion,
50 sdp->sd_fsname, function, file, line);
51 dump_stack();
52 return (me) ? -1 : -2;
53}
54
55/**
56 * gfs2_assert_warn_i - Print a message to the console if @assertion is false
57 * Returns: -1 if we printed something
58 * -2 if we didn't
59 */
60
61int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
62 const char *function, char *file, unsigned int line)
63{
64 if (time_before(jiffies,
65 sdp->sd_last_warning +
66 gfs2_tune_get(sdp, gt_complain_secs) * HZ))
67 return -2;
68
69 printk(KERN_WARNING
70 "GFS2: fsid=%s: warning: assertion \"%s\" failed\n"
71 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
72 sdp->sd_fsname, assertion,
73 sdp->sd_fsname, function, file, line);
74
75 if (sdp->sd_args.ar_debug)
76 BUG();
77 else
78 dump_stack();
79
80 sdp->sd_last_warning = jiffies;
81
82 return -1;
83}
84
85/**
86 * gfs2_consist_i - Flag a filesystem consistency error and withdraw
87 * Returns: -1 if this call withdrew the machine,
88 * 0 if it was already withdrawn
89 */
90
91int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide, const char *function,
92 char *file, unsigned int line)
93{
94 int rv;
95 rv = gfs2_lm_withdraw(sdp,
96 "GFS2: fsid=%s: fatal: filesystem consistency error\n"
97 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
98 sdp->sd_fsname,
99 sdp->sd_fsname, function, file, line);
100 return rv;
101}
102
103/**
104 * gfs2_consist_inode_i - Flag an inode consistency error and withdraw
105 * Returns: -1 if this call withdrew the machine,
106 * 0 if it was already withdrawn
107 */
108
109int gfs2_consist_inode_i(struct gfs2_inode *ip, int cluster_wide,
110 const char *function, char *file, unsigned int line)
111{
112 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
113 int rv;
114 rv = gfs2_lm_withdraw(sdp,
115 "GFS2: fsid=%s: fatal: filesystem consistency error\n"
116 "GFS2: fsid=%s: inode = %llu %llu\n"
117 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
118 sdp->sd_fsname,
119 sdp->sd_fsname, (unsigned long long)ip->i_num.no_formal_ino,
120 (unsigned long long)ip->i_num.no_addr,
121 sdp->sd_fsname, function, file, line);
122 return rv;
123}
124
125/**
126 * gfs2_consist_rgrpd_i - Flag a RG consistency error and withdraw
127 * Returns: -1 if this call withdrew the machine,
128 * 0 if it was already withdrawn
129 */
130
131int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
132 const char *function, char *file, unsigned int line)
133{
134 struct gfs2_sbd *sdp = rgd->rd_sbd;
135 int rv;
136 rv = gfs2_lm_withdraw(sdp,
137 "GFS2: fsid=%s: fatal: filesystem consistency error\n"
138 "GFS2: fsid=%s: RG = %llu\n"
139 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
140 sdp->sd_fsname,
141 sdp->sd_fsname, (unsigned long long)rgd->rd_ri.ri_addr,
142 sdp->sd_fsname, function, file, line);
143 return rv;
144}
145
146/**
147 * gfs2_meta_check_ii - Flag a magic number consistency error and withdraw
148 * Returns: -1 if this call withdrew the machine,
149 * -2 if it was already withdrawn
150 */
151
152int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
153 const char *type, const char *function, char *file,
154 unsigned int line)
155{
156 int me;
157 me = gfs2_lm_withdraw(sdp,
158 "GFS2: fsid=%s: fatal: invalid metadata block\n"
159 "GFS2: fsid=%s: bh = %llu (%s)\n"
160 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
161 sdp->sd_fsname,
162 sdp->sd_fsname, (unsigned long long)bh->b_blocknr, type,
163 sdp->sd_fsname, function, file, line);
164 return (me) ? -1 : -2;
165}
166
167/**
168 * gfs2_metatype_check_ii - Flag a metadata type consistency error and withdraw
169 * Returns: -1 if this call withdrew the machine,
170 * -2 if it was already withdrawn
171 */
172
173int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
174 u16 type, u16 t, const char *function,
175 char *file, unsigned int line)
176{
177 int me;
178 me = gfs2_lm_withdraw(sdp,
179 "GFS2: fsid=%s: fatal: invalid metadata block\n"
180 "GFS2: fsid=%s: bh = %llu (type: exp=%u, found=%u)\n"
181 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
182 sdp->sd_fsname,
183 sdp->sd_fsname, (unsigned long long)bh->b_blocknr, type, t,
184 sdp->sd_fsname, function, file, line);
185 return (me) ? -1 : -2;
186}
187
188/**
189 * gfs2_io_error_i - Flag an I/O error and withdraw
190 * Returns: -1 if this call withdrew the machine,
191 * 0 if it was already withdrawn
192 */
193
194int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
195 unsigned int line)
196{
197 int rv;
198 rv = gfs2_lm_withdraw(sdp,
199 "GFS2: fsid=%s: fatal: I/O error\n"
200 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
201 sdp->sd_fsname,
202 sdp->sd_fsname, function, file, line);
203 return rv;
204}
205
206/**
207 * gfs2_io_error_bh_i - Flag a buffer I/O error and withdraw
208 * Returns: -1 if this call withdrew the machine,
209 * 0 if it was already withdrawn
210 */
211
212int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
213 const char *function, char *file, unsigned int line)
214{
215 int rv;
216 rv = gfs2_lm_withdraw(sdp,
217 "GFS2: fsid=%s: fatal: I/O error\n"
218 "GFS2: fsid=%s: block = %llu\n"
219 "GFS2: fsid=%s: function = %s, file = %s, line = %u\n",
220 sdp->sd_fsname,
221 sdp->sd_fsname, (unsigned long long)bh->b_blocknr,
222 sdp->sd_fsname, function, file, line);
223 return rv;
224}
225
226void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
227 unsigned int bit, int new_value)
228{
229 unsigned int c, o, b = bit;
230 int old_value;
231
232 c = b / (8 * PAGE_SIZE);
233 b %= 8 * PAGE_SIZE;
234 o = b / 8;
235 b %= 8;
236
237 old_value = (bitmap[c][o] & (1 << b));
238 gfs2_assert_withdraw(sdp, !old_value != !new_value);
239
240 if (new_value)
241 bitmap[c][o] |= 1 << b;
242 else
243 bitmap[c][o] &= ~(1 << b);
244}
245
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
new file mode 100644
index 000000000000..76a50899fe9e
--- /dev/null
+++ b/fs/gfs2/util.h
@@ -0,0 +1,170 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __UTIL_DOT_H__
11#define __UTIL_DOT_H__
12
13#include "incore.h"
14
15#define fs_printk(level, fs, fmt, arg...) \
16 printk(level "GFS2: fsid=%s: " fmt , (fs)->sd_fsname , ## arg)
17
18#define fs_info(fs, fmt, arg...) \
19 fs_printk(KERN_INFO , fs , fmt , ## arg)
20
21#define fs_warn(fs, fmt, arg...) \
22 fs_printk(KERN_WARNING , fs , fmt , ## arg)
23
24#define fs_err(fs, fmt, arg...) \
25 fs_printk(KERN_ERR, fs , fmt , ## arg)
26
27
28void gfs2_assert_i(struct gfs2_sbd *sdp);
29
30#define gfs2_assert(sdp, assertion) \
31do { \
32 if (unlikely(!(assertion))) { \
33 gfs2_assert_i(sdp); \
34 BUG(); \
35 } \
36} while (0)
37
38
39int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
40 const char *function, char *file, unsigned int line);
41
42#define gfs2_assert_withdraw(sdp, assertion) \
43((likely(assertion)) ? 0 : gfs2_assert_withdraw_i((sdp), #assertion, \
44 __FUNCTION__, __FILE__, __LINE__))
45
46
47int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
48 const char *function, char *file, unsigned int line);
49
50#define gfs2_assert_warn(sdp, assertion) \
51((likely(assertion)) ? 0 : gfs2_assert_warn_i((sdp), #assertion, \
52 __FUNCTION__, __FILE__, __LINE__))
53
54
55int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide,
56 const char *function, char *file, unsigned int line);
57
58#define gfs2_consist(sdp) \
59gfs2_consist_i((sdp), 0, __FUNCTION__, __FILE__, __LINE__)
60
61
62int gfs2_consist_inode_i(struct gfs2_inode *ip, int cluster_wide,
63 const char *function, char *file, unsigned int line);
64
65#define gfs2_consist_inode(ip) \
66gfs2_consist_inode_i((ip), 0, __FUNCTION__, __FILE__, __LINE__)
67
68
69int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
70 const char *function, char *file, unsigned int line);
71
72#define gfs2_consist_rgrpd(rgd) \
73gfs2_consist_rgrpd_i((rgd), 0, __FUNCTION__, __FILE__, __LINE__)
74
75
76int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
77 const char *type, const char *function,
78 char *file, unsigned int line);
79
80static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
81 struct buffer_head *bh,
82 const char *function,
83 char *file, unsigned int line)
84{
85 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
86 u32 magic = mh->mh_magic;
87 magic = be32_to_cpu(magic);
88 if (unlikely(magic != GFS2_MAGIC))
89 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
90 file, line);
91 return 0;
92}
93
94#define gfs2_meta_check(sdp, bh) \
95gfs2_meta_check_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__)
96
97
98int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 u16 type, u16 t,
100 const char *function,
101 char *file, unsigned int line);
102
103static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
104 struct buffer_head *bh,
105 u16 type,
106 const char *function,
107 char *file, unsigned int line)
108{
109 struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
110 u32 magic = mh->mh_magic;
111 u16 t = be32_to_cpu(mh->mh_type);
112 magic = be32_to_cpu(magic);
113 if (unlikely(magic != GFS2_MAGIC))
114 return gfs2_meta_check_ii(sdp, bh, "magic number", function,
115 file, line);
116 if (unlikely(t != type))
117 return gfs2_metatype_check_ii(sdp, bh, type, t, function,
118 file, line);
119 return 0;
120}
121
122#define gfs2_metatype_check(sdp, bh, type) \
123gfs2_metatype_check_i((sdp), (bh), (type), __FUNCTION__, __FILE__, __LINE__)
124
125static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
126 u16 format)
127{
128 struct gfs2_meta_header *mh;
129 mh = (struct gfs2_meta_header *)bh->b_data;
130 mh->mh_type = cpu_to_be32(type);
131 mh->mh_format = cpu_to_be32(format);
132}
133
134
135int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
136 char *file, unsigned int line);
137
138#define gfs2_io_error(sdp) \
139gfs2_io_error_i((sdp), __FUNCTION__, __FILE__, __LINE__);
140
141
142int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
143 const char *function, char *file, unsigned int line);
144
145#define gfs2_io_error_bh(sdp, bh) \
146gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
147
148
149extern kmem_cache_t *gfs2_glock_cachep;
150extern kmem_cache_t *gfs2_inode_cachep;
151extern kmem_cache_t *gfs2_bufdata_cachep;
152
153static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
154 unsigned int *p)
155{
156 unsigned int x;
157 spin_lock(&gt->gt_spin);
158 x = *p;
159 spin_unlock(&gt->gt_spin);
160 return x;
161}
162
163#define gfs2_tune_get(sdp, field) \
164gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
165
166void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
167 unsigned int bit, int new_value);
168
169#endif /* __UTIL_DOT_H__ */
170
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 87e1d03e8267..e8c7765419e8 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -144,42 +144,12 @@ u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock)
144 */ 144 */
145 145
146/* 146/*
147 * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number,
148 * that we mark locks for reclaiming, and that we bump the pseudo NSM state.
149 */
150static void nlmclnt_prepare_reclaim(struct nlm_host *host)
151{
152 down_write(&host->h_rwsem);
153 host->h_monitored = 0;
154 host->h_state++;
155 host->h_nextrebind = 0;
156 nlm_rebind_host(host);
157
158 /*
159 * Mark the locks for reclaiming.
160 */
161 list_splice_init(&host->h_granted, &host->h_reclaim);
162
163 dprintk("NLM: reclaiming locks for host %s\n", host->h_name);
164}
165
166static void nlmclnt_finish_reclaim(struct nlm_host *host)
167{
168 host->h_reclaiming = 0;
169 up_write(&host->h_rwsem);
170 dprintk("NLM: done reclaiming locks for host %s", host->h_name);
171}
172
173/*
174 * Reclaim all locks on server host. We do this by spawning a separate 147 * Reclaim all locks on server host. We do this by spawning a separate
175 * reclaimer thread. 148 * reclaimer thread.
176 */ 149 */
177void 150void
178nlmclnt_recovery(struct nlm_host *host, u32 newstate) 151nlmclnt_recovery(struct nlm_host *host)
179{ 152{
180 if (host->h_nsmstate == newstate)
181 return;
182 host->h_nsmstate = newstate;
183 if (!host->h_reclaiming++) { 153 if (!host->h_reclaiming++) {
184 nlm_get_host(host); 154 nlm_get_host(host);
185 __module_get(THIS_MODULE); 155 __module_get(THIS_MODULE);
@@ -199,18 +169,30 @@ reclaimer(void *ptr)
199 daemonize("%s-reclaim", host->h_name); 169 daemonize("%s-reclaim", host->h_name);
200 allow_signal(SIGKILL); 170 allow_signal(SIGKILL);
201 171
172 down_write(&host->h_rwsem);
173
202 /* This one ensures that our parent doesn't terminate while the 174 /* This one ensures that our parent doesn't terminate while the
203 * reclaim is in progress */ 175 * reclaim is in progress */
204 lock_kernel(); 176 lock_kernel();
205 lockd_up(0); /* note: this cannot fail as lockd is already running */ 177 lockd_up(0); /* note: this cannot fail as lockd is already running */
206 178
207 nlmclnt_prepare_reclaim(host); 179 dprintk("lockd: reclaiming locks for host %s", host->h_name);
208 /* First, reclaim all locks that have been marked. */ 180
209restart: 181restart:
210 nsmstate = host->h_nsmstate; 182 nsmstate = host->h_nsmstate;
183
184 /* Force a portmap getport - the peer's lockd will
185 * most likely end up on a different port.
186 */
187 host->h_nextrebind = jiffies;
188 nlm_rebind_host(host);
189
190 /* First, reclaim all locks that have been granted. */
191 list_splice_init(&host->h_granted, &host->h_reclaim);
211 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { 192 list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) {
212 list_del_init(&fl->fl_u.nfs_fl.list); 193 list_del_init(&fl->fl_u.nfs_fl.list);
213 194
195 /* Why are we leaking memory here? --okir */
214 if (signalled()) 196 if (signalled())
215 continue; 197 continue;
216 if (nlmclnt_reclaim(host, fl) != 0) 198 if (nlmclnt_reclaim(host, fl) != 0)
@@ -218,11 +200,13 @@ restart:
218 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); 200 list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted);
219 if (host->h_nsmstate != nsmstate) { 201 if (host->h_nsmstate != nsmstate) {
220 /* Argh! The server rebooted again! */ 202 /* Argh! The server rebooted again! */
221 list_splice_init(&host->h_granted, &host->h_reclaim);
222 goto restart; 203 goto restart;
223 } 204 }
224 } 205 }
225 nlmclnt_finish_reclaim(host); 206
207 host->h_reclaiming = 0;
208 up_write(&host->h_rwsem);
209 dprintk("NLM: done reclaiming locks for host %s", host->h_name);
226 210
227 /* Now, wake up all processes that sleep on a blocked lock */ 211 /* Now, wake up all processes that sleep on a blocked lock */
228 list_for_each_entry(block, &nlm_blocked, b_list) { 212 list_for_each_entry(block, &nlm_blocked, b_list) {
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 0116729cec5f..3d84f600b633 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -36,14 +36,14 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
36/* 36/*
37 * Cookie counter for NLM requests 37 * Cookie counter for NLM requests
38 */ 38 */
39static u32 nlm_cookie = 0x1234; 39static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40 40
41static inline void nlmclnt_next_cookie(struct nlm_cookie *c) 41void nlmclnt_next_cookie(struct nlm_cookie *c)
42{ 42{
43 memcpy(c->data, &nlm_cookie, 4); 43 u32 cookie = atomic_inc_return(&nlm_cookie);
44 memset(c->data+4, 0, 4); 44
45 memcpy(c->data, &cookie, 4);
45 c->len=4; 46 c->len=4;
46 nlm_cookie++;
47} 47}
48 48
49static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) 49static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner)
@@ -153,6 +153,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
153{ 153{
154 struct rpc_clnt *client = NFS_CLIENT(inode); 154 struct rpc_clnt *client = NFS_CLIENT(inode);
155 struct sockaddr_in addr; 155 struct sockaddr_in addr;
156 struct nfs_server *nfssrv = NFS_SERVER(inode);
156 struct nlm_host *host; 157 struct nlm_host *host;
157 struct nlm_rqst *call; 158 struct nlm_rqst *call;
158 sigset_t oldset; 159 sigset_t oldset;
@@ -166,7 +167,9 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
166 } 167 }
167 168
168 rpc_peeraddr(client, (struct sockaddr *) &addr, sizeof(addr)); 169 rpc_peeraddr(client, (struct sockaddr *) &addr, sizeof(addr));
169 host = nlmclnt_lookup_host(&addr, client->cl_xprt->prot, vers); 170 host = nlmclnt_lookup_host(&addr, client->cl_xprt->prot, vers,
171 nfssrv->nfs_client->cl_hostname,
172 strlen(nfssrv->nfs_client->cl_hostname));
170 if (host == NULL) 173 if (host == NULL)
171 return -ENOLCK; 174 return -ENOLCK;
172 175
@@ -499,7 +502,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
499 unsigned char fl_flags = fl->fl_flags; 502 unsigned char fl_flags = fl->fl_flags;
500 int status = -ENOLCK; 503 int status = -ENOLCK;
501 504
502 if (!host->h_monitored && nsm_monitor(host) < 0) { 505 if (nsm_monitor(host) < 0) {
503 printk(KERN_NOTICE "lockd: failed to monitor %s\n", 506 printk(KERN_NOTICE "lockd: failed to monitor %s\n",
504 host->h_name); 507 host->h_name);
505 goto out; 508 goto out;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index a0d0b58ce7a4..fb24a9730345 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -27,46 +27,60 @@
27#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ) 27#define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
28#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ) 28#define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
29 29
30static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; 30static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
31static unsigned long next_gc; 31static unsigned long next_gc;
32static int nrhosts; 32static int nrhosts;
33static DEFINE_MUTEX(nlm_host_mutex); 33static DEFINE_MUTEX(nlm_host_mutex);
34 34
35 35
36static void nlm_gc_hosts(void); 36static void nlm_gc_hosts(void);
37static struct nsm_handle * __nsm_find(const struct sockaddr_in *,
38 const char *, int, int);
37 39
38/* 40/*
39 * Find an NLM server handle in the cache. If there is none, create it. 41 * Find an NLM server handle in the cache. If there is none, create it.
40 */ 42 */
41struct nlm_host * 43struct nlm_host *
42nlmclnt_lookup_host(struct sockaddr_in *sin, int proto, int version) 44nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
45 const char *hostname, int hostname_len)
43{ 46{
44 return nlm_lookup_host(0, sin, proto, version); 47 return nlm_lookup_host(0, sin, proto, version,
48 hostname, hostname_len);
45} 49}
46 50
47/* 51/*
48 * Find an NLM client handle in the cache. If there is none, create it. 52 * Find an NLM client handle in the cache. If there is none, create it.
49 */ 53 */
50struct nlm_host * 54struct nlm_host *
51nlmsvc_lookup_host(struct svc_rqst *rqstp) 55nlmsvc_lookup_host(struct svc_rqst *rqstp,
56 const char *hostname, int hostname_len)
52{ 57{
53 return nlm_lookup_host(1, &rqstp->rq_addr, 58 return nlm_lookup_host(1, &rqstp->rq_addr,
54 rqstp->rq_prot, rqstp->rq_vers); 59 rqstp->rq_prot, rqstp->rq_vers,
60 hostname, hostname_len);
55} 61}
56 62
57/* 63/*
58 * Common host lookup routine for server & client 64 * Common host lookup routine for server & client
59 */ 65 */
60struct nlm_host * 66struct nlm_host *
61nlm_lookup_host(int server, struct sockaddr_in *sin, 67nlm_lookup_host(int server, const struct sockaddr_in *sin,
62 int proto, int version) 68 int proto, int version,
69 const char *hostname,
70 int hostname_len)
63{ 71{
64 struct nlm_host *host, **hp; 72 struct hlist_head *chain;
65 u32 addr; 73 struct hlist_node *pos;
74 struct nlm_host *host;
75 struct nsm_handle *nsm = NULL;
66 int hash; 76 int hash;
67 77
68 dprintk("lockd: nlm_lookup_host(%08x, p=%d, v=%d)\n", 78 dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n",
69 (unsigned)(sin? ntohl(sin->sin_addr.s_addr) : 0), proto, version); 79 NIPQUAD(sin->sin_addr.s_addr), proto, version,
80 server? "server" : "client",
81 hostname_len,
82 hostname? hostname : "<none>");
83
70 84
71 hash = NLM_ADDRHASH(sin->sin_addr.s_addr); 85 hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
72 86
@@ -76,7 +90,22 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
76 if (time_after_eq(jiffies, next_gc)) 90 if (time_after_eq(jiffies, next_gc))
77 nlm_gc_hosts(); 91 nlm_gc_hosts();
78 92
79 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 93 /* We may keep several nlm_host objects for a peer, because each
94 * nlm_host is identified by
95 * (address, protocol, version, server/client)
96 * We could probably simplify this a little by putting all those
97 * different NLM rpc_clients into one single nlm_host object.
98 * This would allow us to have one nlm_host per address.
99 */
100 chain = &nlm_hosts[hash];
101 hlist_for_each_entry(host, pos, chain, h_hash) {
102 if (!nlm_cmp_addr(&host->h_addr, sin))
103 continue;
104
105 /* See if we have an NSM handle for this client */
106 if (!nsm)
107 nsm = host->h_nsmhandle;
108
80 if (host->h_proto != proto) 109 if (host->h_proto != proto)
81 continue; 110 continue;
82 if (host->h_version != version) 111 if (host->h_version != version)
@@ -84,28 +113,30 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
84 if (host->h_server != server) 113 if (host->h_server != server)
85 continue; 114 continue;
86 115
87 if (nlm_cmp_addr(&host->h_addr, sin)) { 116 /* Move to head of hash chain. */
88 if (hp != nlm_hosts + hash) { 117 hlist_del(&host->h_hash);
89 *hp = host->h_next; 118 hlist_add_head(&host->h_hash, chain);
90 host->h_next = nlm_hosts[hash];
91 nlm_hosts[hash] = host;
92 }
93 nlm_get_host(host);
94 mutex_unlock(&nlm_host_mutex);
95 return host;
96 }
97 }
98 119
99 /* Ooops, no host found, create it */ 120 nlm_get_host(host);
100 dprintk("lockd: creating host entry\n"); 121 goto out;
122 }
123 if (nsm)
124 atomic_inc(&nsm->sm_count);
101 125
102 host = kzalloc(sizeof(*host), GFP_KERNEL); 126 host = NULL;
103 if (!host)
104 goto nohost;
105 127
106 addr = sin->sin_addr.s_addr; 128 /* Sadly, the host isn't in our hash table yet. See if
107 sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr)); 129 * we have an NSM handle for it. If not, create one.
130 */
131 if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len)))
132 goto out;
108 133
134 host = kzalloc(sizeof(*host), GFP_KERNEL);
135 if (!host) {
136 nsm_release(nsm);
137 goto out;
138 }
139 host->h_name = nsm->sm_name;
109 host->h_addr = *sin; 140 host->h_addr = *sin;
110 host->h_addr.sin_port = 0; /* ouch! */ 141 host->h_addr.sin_port = 0; /* ouch! */
111 host->h_version = version; 142 host->h_version = version;
@@ -119,9 +150,9 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
119 init_rwsem(&host->h_rwsem); 150 init_rwsem(&host->h_rwsem);
120 host->h_state = 0; /* pseudo NSM state */ 151 host->h_state = 0; /* pseudo NSM state */
121 host->h_nsmstate = 0; /* real NSM state */ 152 host->h_nsmstate = 0; /* real NSM state */
153 host->h_nsmhandle = nsm;
122 host->h_server = server; 154 host->h_server = server;
123 host->h_next = nlm_hosts[hash]; 155 hlist_add_head(&host->h_hash, chain);
124 nlm_hosts[hash] = host;
125 INIT_LIST_HEAD(&host->h_lockowners); 156 INIT_LIST_HEAD(&host->h_lockowners);
126 spin_lock_init(&host->h_lock); 157 spin_lock_init(&host->h_lock);
127 INIT_LIST_HEAD(&host->h_granted); 158 INIT_LIST_HEAD(&host->h_granted);
@@ -130,35 +161,39 @@ nlm_lookup_host(int server, struct sockaddr_in *sin,
130 if (++nrhosts > NLM_HOST_MAX) 161 if (++nrhosts > NLM_HOST_MAX)
131 next_gc = 0; 162 next_gc = 0;
132 163
133nohost: 164out:
134 mutex_unlock(&nlm_host_mutex); 165 mutex_unlock(&nlm_host_mutex);
135 return host; 166 return host;
136} 167}
137 168
138struct nlm_host * 169/*
139nlm_find_client(void) 170 * Destroy a host
171 */
172static void
173nlm_destroy_host(struct nlm_host *host)
140{ 174{
141 /* find a nlm_host for a client for which h_killed == 0. 175 struct rpc_clnt *clnt;
142 * and return it 176
177 BUG_ON(!list_empty(&host->h_lockowners));
178 BUG_ON(atomic_read(&host->h_count));
179
180 /*
181 * Release NSM handle and unmonitor host.
143 */ 182 */
144 int hash; 183 nsm_unmonitor(host);
145 mutex_lock(&nlm_host_mutex); 184
146 for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { 185 if ((clnt = host->h_rpcclnt) != NULL) {
147 struct nlm_host *host, **hp; 186 if (atomic_read(&clnt->cl_users)) {
148 for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { 187 printk(KERN_WARNING
149 if (host->h_server && 188 "lockd: active RPC handle\n");
150 host->h_killed == 0) { 189 clnt->cl_dead = 1;
151 nlm_get_host(host); 190 } else {
152 mutex_unlock(&nlm_host_mutex); 191 rpc_destroy_client(host->h_rpcclnt);
153 return host;
154 }
155 } 192 }
156 } 193 }
157 mutex_unlock(&nlm_host_mutex); 194 kfree(host);
158 return NULL;
159} 195}
160 196
161
162/* 197/*
163 * Create the NLM RPC client for an NLM peer 198 * Create the NLM RPC client for an NLM peer
164 */ 199 */
@@ -260,22 +295,82 @@ void nlm_release_host(struct nlm_host *host)
260} 295}
261 296
262/* 297/*
298 * We were notified that the host indicated by address &sin
299 * has rebooted.
300 * Release all resources held by that peer.
301 */
302void nlm_host_rebooted(const struct sockaddr_in *sin,
303 const char *hostname, int hostname_len,
304 u32 new_state)
305{
306 struct hlist_head *chain;
307 struct hlist_node *pos;
308 struct nsm_handle *nsm;
309 struct nlm_host *host;
310
311 dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
312 hostname, NIPQUAD(sin->sin_addr));
313
314 /* Find the NSM handle for this peer */
315 if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0)))
316 return;
317
318 /* When reclaiming locks on this peer, make sure that
319 * we set up a new notification */
320 nsm->sm_monitored = 0;
321
322 /* Mark all hosts tied to this NSM state as having rebooted.
323 * We run the loop repeatedly, because we drop the host table
324 * lock for this.
325 * To avoid processing a host several times, we match the nsmstate.
326 */
327again: mutex_lock(&nlm_host_mutex);
328 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
329 hlist_for_each_entry(host, pos, chain, h_hash) {
330 if (host->h_nsmhandle == nsm
331 && host->h_nsmstate != new_state) {
332 host->h_nsmstate = new_state;
333 host->h_state++;
334
335 nlm_get_host(host);
336 mutex_unlock(&nlm_host_mutex);
337
338 if (host->h_server) {
339 /* We're server for this guy, just ditch
340 * all the locks he held. */
341 nlmsvc_free_host_resources(host);
342 } else {
343 /* He's the server, initiate lock recovery. */
344 nlmclnt_recovery(host);
345 }
346
347 nlm_release_host(host);
348 goto again;
349 }
350 }
351 }
352
353 mutex_unlock(&nlm_host_mutex);
354}
355
356/*
263 * Shut down the hosts module. 357 * Shut down the hosts module.
264 * Note that this routine is called only at server shutdown time. 358 * Note that this routine is called only at server shutdown time.
265 */ 359 */
266void 360void
267nlm_shutdown_hosts(void) 361nlm_shutdown_hosts(void)
268{ 362{
363 struct hlist_head *chain;
364 struct hlist_node *pos;
269 struct nlm_host *host; 365 struct nlm_host *host;
270 int i;
271 366
272 dprintk("lockd: shutting down host module\n"); 367 dprintk("lockd: shutting down host module\n");
273 mutex_lock(&nlm_host_mutex); 368 mutex_lock(&nlm_host_mutex);
274 369
275 /* First, make all hosts eligible for gc */ 370 /* First, make all hosts eligible for gc */
276 dprintk("lockd: nuking all hosts...\n"); 371 dprintk("lockd: nuking all hosts...\n");
277 for (i = 0; i < NLM_HOST_NRHASH; i++) { 372 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
278 for (host = nlm_hosts[i]; host; host = host->h_next) 373 hlist_for_each_entry(host, pos, chain, h_hash)
279 host->h_expires = jiffies - 1; 374 host->h_expires = jiffies - 1;
280 } 375 }
281 376
@@ -287,8 +382,8 @@ nlm_shutdown_hosts(void)
287 if (nrhosts) { 382 if (nrhosts) {
288 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); 383 printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
289 dprintk("lockd: %d hosts left:\n", nrhosts); 384 dprintk("lockd: %d hosts left:\n", nrhosts);
290 for (i = 0; i < NLM_HOST_NRHASH; i++) { 385 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
291 for (host = nlm_hosts[i]; host; host = host->h_next) { 386 hlist_for_each_entry(host, pos, chain, h_hash) {
292 dprintk(" %s (cnt %d use %d exp %ld)\n", 387 dprintk(" %s (cnt %d use %d exp %ld)\n",
293 host->h_name, atomic_read(&host->h_count), 388 host->h_name, atomic_read(&host->h_count),
294 host->h_inuse, host->h_expires); 389 host->h_inuse, host->h_expires);
@@ -305,45 +400,32 @@ nlm_shutdown_hosts(void)
305static void 400static void
306nlm_gc_hosts(void) 401nlm_gc_hosts(void)
307{ 402{
308 struct nlm_host **q, *host; 403 struct hlist_head *chain;
309 struct rpc_clnt *clnt; 404 struct hlist_node *pos, *next;
310 int i; 405 struct nlm_host *host;
311 406
312 dprintk("lockd: host garbage collection\n"); 407 dprintk("lockd: host garbage collection\n");
313 for (i = 0; i < NLM_HOST_NRHASH; i++) { 408 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
314 for (host = nlm_hosts[i]; host; host = host->h_next) 409 hlist_for_each_entry(host, pos, chain, h_hash)
315 host->h_inuse = 0; 410 host->h_inuse = 0;
316 } 411 }
317 412
318 /* Mark all hosts that hold locks, blocks or shares */ 413 /* Mark all hosts that hold locks, blocks or shares */
319 nlmsvc_mark_resources(); 414 nlmsvc_mark_resources();
320 415
321 for (i = 0; i < NLM_HOST_NRHASH; i++) { 416 for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
322 q = &nlm_hosts[i]; 417 hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
323 while ((host = *q) != NULL) {
324 if (atomic_read(&host->h_count) || host->h_inuse 418 if (atomic_read(&host->h_count) || host->h_inuse
325 || time_before(jiffies, host->h_expires)) { 419 || time_before(jiffies, host->h_expires)) {
326 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", 420 dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
327 host->h_name, atomic_read(&host->h_count), 421 host->h_name, atomic_read(&host->h_count),
328 host->h_inuse, host->h_expires); 422 host->h_inuse, host->h_expires);
329 q = &host->h_next;
330 continue; 423 continue;
331 } 424 }
332 dprintk("lockd: delete host %s\n", host->h_name); 425 dprintk("lockd: delete host %s\n", host->h_name);
333 *q = host->h_next; 426 hlist_del_init(&host->h_hash);
334 /* Don't unmonitor hosts that have been invalidated */ 427
335 if (host->h_monitored && !host->h_killed) 428 nlm_destroy_host(host);
336 nsm_unmonitor(host);
337 if ((clnt = host->h_rpcclnt) != NULL) {
338 if (atomic_read(&clnt->cl_users)) {
339 printk(KERN_WARNING
340 "lockd: active RPC handle\n");
341 clnt->cl_dead = 1;
342 } else {
343 rpc_destroy_client(host->h_rpcclnt);
344 }
345 }
346 kfree(host);
347 nrhosts--; 429 nrhosts--;
348 } 430 }
349 } 431 }
@@ -351,3 +433,88 @@ nlm_gc_hosts(void)
351 next_gc = jiffies + NLM_HOST_COLLECT; 433 next_gc = jiffies + NLM_HOST_COLLECT;
352} 434}
353 435
436
437/*
438 * Manage NSM handles
439 */
440static LIST_HEAD(nsm_handles);
441static DEFINE_MUTEX(nsm_mutex);
442
443static struct nsm_handle *
444__nsm_find(const struct sockaddr_in *sin,
445 const char *hostname, int hostname_len,
446 int create)
447{
448 struct nsm_handle *nsm = NULL;
449 struct list_head *pos;
450
451 if (!sin)
452 return NULL;
453
454 if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
455 if (printk_ratelimit()) {
456 printk(KERN_WARNING "Invalid hostname \"%.*s\" "
457 "in NFS lock request\n",
458 hostname_len, hostname);
459 }
460 return NULL;
461 }
462
463 mutex_lock(&nsm_mutex);
464 list_for_each(pos, &nsm_handles) {
465 nsm = list_entry(pos, struct nsm_handle, sm_link);
466
467 if (hostname && nsm_use_hostnames) {
468 if (strlen(nsm->sm_name) != hostname_len
469 || memcmp(nsm->sm_name, hostname, hostname_len))
470 continue;
471 } else if (!nlm_cmp_addr(&nsm->sm_addr, sin))
472 continue;
473 atomic_inc(&nsm->sm_count);
474 goto out;
475 }
476
477 if (!create) {
478 nsm = NULL;
479 goto out;
480 }
481
482 nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
483 if (nsm != NULL) {
484 nsm->sm_addr = *sin;
485 nsm->sm_name = (char *) (nsm + 1);
486 memcpy(nsm->sm_name, hostname, hostname_len);
487 nsm->sm_name[hostname_len] = '\0';
488 atomic_set(&nsm->sm_count, 1);
489
490 list_add(&nsm->sm_link, &nsm_handles);
491 }
492
493out:
494 mutex_unlock(&nsm_mutex);
495 return nsm;
496}
497
498struct nsm_handle *
499nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
500{
501 return __nsm_find(sin, hostname, hostname_len, 1);
502}
503
504/*
505 * Release an NSM handle
506 */
507void
508nsm_release(struct nsm_handle *nsm)
509{
510 if (!nsm)
511 return;
512 if (atomic_dec_and_test(&nsm->sm_count)) {
513 mutex_lock(&nsm_mutex);
514 if (atomic_read(&nsm->sm_count) == 0) {
515 list_del(&nsm->sm_link);
516 kfree(nsm);
517 }
518 mutex_unlock(&nsm_mutex);
519 }
520}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index a816b920d431..e0179f8c327f 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -24,13 +24,13 @@ static struct rpc_program nsm_program;
24/* 24/*
25 * Local NSM state 25 * Local NSM state
26 */ 26 */
27u32 nsm_local_state; 27int nsm_local_state;
28 28
29/* 29/*
30 * Common procedure for SM_MON/SM_UNMON calls 30 * Common procedure for SM_MON/SM_UNMON calls
31 */ 31 */
32static int 32static int
33nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res) 33nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
34{ 34{
35 struct rpc_clnt *clnt; 35 struct rpc_clnt *clnt;
36 int status; 36 int status;
@@ -46,10 +46,11 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
46 goto out; 46 goto out;
47 } 47 }
48 48
49 args.addr = host->h_addr.sin_addr.s_addr; 49 memset(&args, 0, sizeof(args));
50 args.proto= (host->h_proto<<1) | host->h_server; 50 args.mon_name = nsm->sm_name;
51 args.addr = nsm->sm_addr.sin_addr.s_addr;
51 args.prog = NLM_PROGRAM; 52 args.prog = NLM_PROGRAM;
52 args.vers = host->h_version; 53 args.vers = 3;
53 args.proc = NLMPROC_NSM_NOTIFY; 54 args.proc = NLMPROC_NSM_NOTIFY;
54 memset(res, 0, sizeof(*res)); 55 memset(res, 0, sizeof(*res));
55 56
@@ -70,17 +71,22 @@ nsm_mon_unmon(struct nlm_host *host, u32 proc, struct nsm_res *res)
70int 71int
71nsm_monitor(struct nlm_host *host) 72nsm_monitor(struct nlm_host *host)
72{ 73{
74 struct nsm_handle *nsm = host->h_nsmhandle;
73 struct nsm_res res; 75 struct nsm_res res;
74 int status; 76 int status;
75 77
76 dprintk("lockd: nsm_monitor(%s)\n", host->h_name); 78 dprintk("lockd: nsm_monitor(%s)\n", host->h_name);
79 BUG_ON(nsm == NULL);
77 80
78 status = nsm_mon_unmon(host, SM_MON, &res); 81 if (nsm->sm_monitored)
82 return 0;
83
84 status = nsm_mon_unmon(nsm, SM_MON, &res);
79 85
80 if (status < 0 || res.status != 0) 86 if (status < 0 || res.status != 0)
81 printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name); 87 printk(KERN_NOTICE "lockd: cannot monitor %s\n", host->h_name);
82 else 88 else
83 host->h_monitored = 1; 89 nsm->sm_monitored = 1;
84 return status; 90 return status;
85} 91}
86 92
@@ -90,16 +96,26 @@ nsm_monitor(struct nlm_host *host)
90int 96int
91nsm_unmonitor(struct nlm_host *host) 97nsm_unmonitor(struct nlm_host *host)
92{ 98{
99 struct nsm_handle *nsm = host->h_nsmhandle;
93 struct nsm_res res; 100 struct nsm_res res;
94 int status; 101 int status = 0;
95 102
96 dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name); 103 if (nsm == NULL)
97 104 return 0;
98 status = nsm_mon_unmon(host, SM_UNMON, &res); 105 host->h_nsmhandle = NULL;
99 if (status < 0) 106
100 printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", host->h_name); 107 if (atomic_read(&nsm->sm_count) == 1
101 else 108 && nsm->sm_monitored && !nsm->sm_sticky) {
102 host->h_monitored = 0; 109 dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
110
111 status = nsm_mon_unmon(nsm, SM_UNMON, &res);
112 if (status < 0)
113 printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
114 host->h_name);
115 else
116 nsm->sm_monitored = 0;
117 }
118 nsm_release(nsm);
103 return status; 119 return status;
104} 120}
105 121
@@ -135,7 +151,7 @@ nsm_create(void)
135static u32 * 151static u32 *
136xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp) 152xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
137{ 153{
138 char buffer[20]; 154 char buffer[20], *name;
139 155
140 /* 156 /*
141 * Use the dotted-quad IP address of the remote host as 157 * Use the dotted-quad IP address of the remote host as
@@ -143,8 +159,13 @@ xdr_encode_common(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
143 * hostname first for whatever remote hostname it receives, 159 * hostname first for whatever remote hostname it receives,
144 * so this works alright. 160 * so this works alright.
145 */ 161 */
146 sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr)); 162 if (nsm_use_hostnames) {
147 if (!(p = xdr_encode_string(p, buffer)) 163 name = argp->mon_name;
164 } else {
165 sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr));
166 name = buffer;
167 }
168 if (!(p = xdr_encode_string(p, name))
148 || !(p = xdr_encode_string(p, utsname()->nodename))) 169 || !(p = xdr_encode_string(p, utsname()->nodename)))
149 return ERR_PTR(-EIO); 170 return ERR_PTR(-EIO);
150 *p++ = htonl(argp->prog); 171 *p++ = htonl(argp->prog);
@@ -160,9 +181,11 @@ xdr_encode_mon(struct rpc_rqst *rqstp, u32 *p, struct nsm_args *argp)
160 p = xdr_encode_common(rqstp, p, argp); 181 p = xdr_encode_common(rqstp, p, argp);
161 if (IS_ERR(p)) 182 if (IS_ERR(p))
162 return PTR_ERR(p); 183 return PTR_ERR(p);
184
185 /* Surprise - there may even be room for an IPv6 address now */
163 *p++ = argp->addr; 186 *p++ = argp->addr;
164 *p++ = argp->vers; 187 *p++ = 0;
165 *p++ = argp->proto; 188 *p++ = 0;
166 *p++ = 0; 189 *p++ = 0;
167 rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p); 190 rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
168 return 0; 191 return 0;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 3cc369e5693f..634139232aaf 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -33,6 +33,7 @@
33#include <linux/sunrpc/svcsock.h> 33#include <linux/sunrpc/svcsock.h>
34#include <net/ip.h> 34#include <net/ip.h>
35#include <linux/lockd/lockd.h> 35#include <linux/lockd/lockd.h>
36#include <linux/lockd/sm_inter.h>
36#include <linux/nfs.h> 37#include <linux/nfs.h>
37 38
38#define NLMDBG_FACILITY NLMDBG_SVC 39#define NLMDBG_FACILITY NLMDBG_SVC
@@ -61,6 +62,7 @@ static DECLARE_WAIT_QUEUE_HEAD(lockd_exit);
61static unsigned long nlm_grace_period; 62static unsigned long nlm_grace_period;
62static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO; 63static unsigned long nlm_timeout = LOCKD_DFLT_TIMEO;
63static int nlm_udpport, nlm_tcpport; 64static int nlm_udpport, nlm_tcpport;
65int nsm_use_hostnames = 0;
64 66
65/* 67/*
66 * Constants needed for the sysctl interface. 68 * Constants needed for the sysctl interface.
@@ -395,6 +397,22 @@ static ctl_table nlm_sysctls[] = {
395 .extra1 = (int *) &nlm_port_min, 397 .extra1 = (int *) &nlm_port_min,
396 .extra2 = (int *) &nlm_port_max, 398 .extra2 = (int *) &nlm_port_max,
397 }, 399 },
400 {
401 .ctl_name = CTL_UNNUMBERED,
402 .procname = "nsm_use_hostnames",
403 .data = &nsm_use_hostnames,
404 .maxlen = sizeof(int),
405 .mode = 0644,
406 .proc_handler = &proc_dointvec,
407 },
408 {
409 .ctl_name = CTL_UNNUMBERED,
410 .procname = "nsm_local_state",
411 .data = &nsm_local_state,
412 .maxlen = sizeof(int),
413 .mode = 0644,
414 .proc_handler = &proc_dointvec,
415 },
398 { .ctl_name = 0 } 416 { .ctl_name = 0 }
399}; 417};
400 418
@@ -483,6 +501,7 @@ module_param_call(nlm_udpport, param_set_port, param_get_int,
483 &nlm_udpport, 0644); 501 &nlm_udpport, 0644);
484module_param_call(nlm_tcpport, param_set_port, param_get_int, 502module_param_call(nlm_tcpport, param_set_port, param_get_int,
485 &nlm_tcpport, 0644); 503 &nlm_tcpport, 0644);
504module_param(nsm_use_hostnames, bool, 0644);
486 505
487/* 506/*
488 * Initialising and terminating the module. 507 * Initialising and terminating the module.
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index a2dd9ccb9b32..fa370f6eb07b 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -38,8 +38,8 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
38 return nlm_lck_denied_nolocks; 38 return nlm_lck_denied_nolocks;
39 39
40 /* Obtain host handle */ 40 /* Obtain host handle */
41 if (!(host = nlmsvc_lookup_host(rqstp)) 41 if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len))
42 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0)) 42 || (argp->monitor && nsm_monitor(host) < 0))
43 goto no_locks; 43 goto no_locks;
44 *hostp = host; 44 *hostp = host;
45 45
@@ -260,7 +260,9 @@ static int nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *a
260 struct nlm_rqst *call; 260 struct nlm_rqst *call;
261 int stat; 261 int stat;
262 262
263 host = nlmsvc_lookup_host(rqstp); 263 host = nlmsvc_lookup_host(rqstp,
264 argp->lock.caller,
265 argp->lock.len);
264 if (host == NULL) 266 if (host == NULL)
265 return rpc_system_err; 267 return rpc_system_err;
266 268
@@ -420,10 +422,6 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
420 void *resp) 422 void *resp)
421{ 423{
422 struct sockaddr_in saddr = rqstp->rq_addr; 424 struct sockaddr_in saddr = rqstp->rq_addr;
423 int vers = argp->vers;
424 int prot = argp->proto >> 1;
425
426 struct nlm_host *host;
427 425
428 dprintk("lockd: SM_NOTIFY called\n"); 426 dprintk("lockd: SM_NOTIFY called\n");
429 if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) 427 if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
@@ -438,21 +436,10 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
438 /* Obtain the host pointer for this NFS server and try to 436 /* Obtain the host pointer for this NFS server and try to
439 * reclaim all locks we hold on this server. 437 * reclaim all locks we hold on this server.
440 */ 438 */
439 memset(&saddr, 0, sizeof(saddr));
441 saddr.sin_addr.s_addr = argp->addr; 440 saddr.sin_addr.s_addr = argp->addr;
441 nlm_host_rebooted(&saddr, argp->mon, argp->len, argp->state);
442 442
443 if ((argp->proto & 1)==0) {
444 if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
445 nlmclnt_recovery(host, argp->state);
446 nlm_release_host(host);
447 }
448 } else {
449 /* If we run on an NFS server, delete all locks held by the client */
450
451 if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
452 nlmsvc_free_host_resources(host);
453 nlm_release_host(host);
454 }
455 }
456 return rpc_success; 443 return rpc_success;
457} 444}
458 445
@@ -468,7 +455,7 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
468 455
469 dprintk("lockd: GRANTED_RES called\n"); 456 dprintk("lockd: GRANTED_RES called\n");
470 457
471 nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status); 458 nlmsvc_grant_reply(&argp->cookie, argp->status);
472 return rpc_success; 459 return rpc_success;
473} 460}
474 461
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 93c00ee7189d..814c6064c9e0 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -40,7 +40,7 @@
40 40
41static void nlmsvc_release_block(struct nlm_block *block); 41static void nlmsvc_release_block(struct nlm_block *block);
42static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); 42static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
43static int nlmsvc_remove_block(struct nlm_block *block); 43static void nlmsvc_remove_block(struct nlm_block *block);
44 44
45static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); 45static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
46static void nlmsvc_freegrantargs(struct nlm_rqst *call); 46static void nlmsvc_freegrantargs(struct nlm_rqst *call);
@@ -49,7 +49,7 @@ static const struct rpc_call_ops nlmsvc_grant_ops;
49/* 49/*
50 * The list of blocked locks to retry 50 * The list of blocked locks to retry
51 */ 51 */
52static struct nlm_block * nlm_blocked; 52static LIST_HEAD(nlm_blocked);
53 53
54/* 54/*
55 * Insert a blocked lock into the global list 55 * Insert a blocked lock into the global list
@@ -57,48 +57,44 @@ static struct nlm_block * nlm_blocked;
57static void 57static void
58nlmsvc_insert_block(struct nlm_block *block, unsigned long when) 58nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
59{ 59{
60 struct nlm_block **bp, *b; 60 struct nlm_block *b;
61 struct list_head *pos;
61 62
62 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); 63 dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when);
63 kref_get(&block->b_count); 64 if (list_empty(&block->b_list)) {
64 if (block->b_queued) 65 kref_get(&block->b_count);
65 nlmsvc_remove_block(block); 66 } else {
66 bp = &nlm_blocked; 67 list_del_init(&block->b_list);
68 }
69
70 pos = &nlm_blocked;
67 if (when != NLM_NEVER) { 71 if (when != NLM_NEVER) {
68 if ((when += jiffies) == NLM_NEVER) 72 if ((when += jiffies) == NLM_NEVER)
69 when ++; 73 when ++;
70 while ((b = *bp) && time_before_eq(b->b_when,when) && b->b_when != NLM_NEVER) 74 list_for_each(pos, &nlm_blocked) {
71 bp = &b->b_next; 75 b = list_entry(pos, struct nlm_block, b_list);
72 } else 76 if (time_after(b->b_when,when) || b->b_when == NLM_NEVER)
73 while ((b = *bp) != 0) 77 break;
74 bp = &b->b_next; 78 }
79 /* On normal exit from the loop, pos == &nlm_blocked,
80 * so we will be adding to the end of the list - good
81 */
82 }
75 83
76 block->b_queued = 1; 84 list_add_tail(&block->b_list, pos);
77 block->b_when = when; 85 block->b_when = when;
78 block->b_next = b;
79 *bp = block;
80} 86}
81 87
82/* 88/*
83 * Remove a block from the global list 89 * Remove a block from the global list
84 */ 90 */
85static int 91static inline void
86nlmsvc_remove_block(struct nlm_block *block) 92nlmsvc_remove_block(struct nlm_block *block)
87{ 93{
88 struct nlm_block **bp, *b; 94 if (!list_empty(&block->b_list)) {
89 95 list_del_init(&block->b_list);
90 if (!block->b_queued) 96 nlmsvc_release_block(block);
91 return 1;
92 for (bp = &nlm_blocked; (b = *bp) != 0; bp = &b->b_next) {
93 if (b == block) {
94 *bp = block->b_next;
95 block->b_queued = 0;
96 nlmsvc_release_block(block);
97 return 1;
98 }
99 } 97 }
100
101 return 0;
102} 98}
103 99
104/* 100/*
@@ -107,14 +103,14 @@ nlmsvc_remove_block(struct nlm_block *block)
107static struct nlm_block * 103static struct nlm_block *
108nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) 104nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
109{ 105{
110 struct nlm_block **head, *block; 106 struct nlm_block *block;
111 struct file_lock *fl; 107 struct file_lock *fl;
112 108
113 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", 109 dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n",
114 file, lock->fl.fl_pid, 110 file, lock->fl.fl_pid,
115 (long long)lock->fl.fl_start, 111 (long long)lock->fl.fl_start,
116 (long long)lock->fl.fl_end, lock->fl.fl_type); 112 (long long)lock->fl.fl_end, lock->fl.fl_type);
117 for (head = &nlm_blocked; (block = *head) != 0; head = &block->b_next) { 113 list_for_each_entry(block, &nlm_blocked, b_list) {
118 fl = &block->b_call->a_args.lock.fl; 114 fl = &block->b_call->a_args.lock.fl;
119 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", 115 dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
120 block->b_file, fl->fl_pid, 116 block->b_file, fl->fl_pid,
@@ -143,20 +139,20 @@ static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b)
143 * Find a block with a given NLM cookie. 139 * Find a block with a given NLM cookie.
144 */ 140 */
145static inline struct nlm_block * 141static inline struct nlm_block *
146nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin) 142nlmsvc_find_block(struct nlm_cookie *cookie)
147{ 143{
148 struct nlm_block *block; 144 struct nlm_block *block;
149 145
150 for (block = nlm_blocked; block; block = block->b_next) { 146 list_for_each_entry(block, &nlm_blocked, b_list) {
151 dprintk("cookie: head of blocked queue %p, block %p\n", 147 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
152 nlm_blocked, block); 148 goto found;
153 if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)
154 && nlm_cmp_addr(sin, &block->b_host->h_addr))
155 break;
156 } 149 }
157 150
158 if (block != NULL) 151 return NULL;
159 kref_get(&block->b_count); 152
153found:
154 dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
155 kref_get(&block->b_count);
160 return block; 156 return block;
161} 157}
162 158
@@ -169,6 +165,11 @@ nlmsvc_find_block(struct nlm_cookie *cookie, struct sockaddr_in *sin)
169 * request, but (as I found out later) that's because some implementations 165 * request, but (as I found out later) that's because some implementations
170 * do just this. Never mind the standards comittees, they support our 166 * do just this. Never mind the standards comittees, they support our
171 * logging industries. 167 * logging industries.
168 *
169 * 10 years later: I hope we can safely ignore these old and broken
170 * clients by now. Let's fix this so we can uniquely identify an incoming
171 * GRANTED_RES message by cookie, without having to rely on the client's IP
172 * address. --okir
172 */ 173 */
173static inline struct nlm_block * 174static inline struct nlm_block *
174nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file, 175nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
@@ -179,7 +180,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
179 struct nlm_rqst *call = NULL; 180 struct nlm_rqst *call = NULL;
180 181
181 /* Create host handle for callback */ 182 /* Create host handle for callback */
182 host = nlmsvc_lookup_host(rqstp); 183 host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len);
183 if (host == NULL) 184 if (host == NULL)
184 return NULL; 185 return NULL;
185 186
@@ -192,6 +193,8 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
192 if (block == NULL) 193 if (block == NULL)
193 goto failed; 194 goto failed;
194 kref_init(&block->b_count); 195 kref_init(&block->b_count);
196 INIT_LIST_HEAD(&block->b_list);
197 INIT_LIST_HEAD(&block->b_flist);
195 198
196 if (!nlmsvc_setgrantargs(call, lock)) 199 if (!nlmsvc_setgrantargs(call, lock))
197 goto failed_free; 200 goto failed_free;
@@ -199,7 +202,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
199 /* Set notifier function for VFS, and init args */ 202 /* Set notifier function for VFS, and init args */
200 call->a_args.lock.fl.fl_flags |= FL_SLEEP; 203 call->a_args.lock.fl.fl_flags |= FL_SLEEP;
201 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; 204 call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
202 call->a_args.cookie = *cookie; /* see above */ 205 nlmclnt_next_cookie(&call->a_args.cookie);
203 206
204 dprintk("lockd: created block %p...\n", block); 207 dprintk("lockd: created block %p...\n", block);
205 208
@@ -210,8 +213,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
210 file->f_count++; 213 file->f_count++;
211 214
212 /* Add to file's list of blocks */ 215 /* Add to file's list of blocks */
213 block->b_fnext = file->f_blocks; 216 list_add(&block->b_flist, &file->f_blocks);
214 file->f_blocks = block;
215 217
216 /* Set up RPC arguments for callback */ 218 /* Set up RPC arguments for callback */
217 block->b_call = call; 219 block->b_call = call;
@@ -248,19 +250,13 @@ static void nlmsvc_free_block(struct kref *kref)
248{ 250{
249 struct nlm_block *block = container_of(kref, struct nlm_block, b_count); 251 struct nlm_block *block = container_of(kref, struct nlm_block, b_count);
250 struct nlm_file *file = block->b_file; 252 struct nlm_file *file = block->b_file;
251 struct nlm_block **bp;
252 253
253 dprintk("lockd: freeing block %p...\n", block); 254 dprintk("lockd: freeing block %p...\n", block);
254 255
255 down(&file->f_sema);
256 /* Remove block from file's list of blocks */ 256 /* Remove block from file's list of blocks */
257 for (bp = &file->f_blocks; *bp; bp = &(*bp)->b_fnext) { 257 mutex_lock(&file->f_mutex);
258 if (*bp == block) { 258 list_del_init(&block->b_flist);
259 *bp = block->b_fnext; 259 mutex_unlock(&file->f_mutex);
260 break;
261 }
262 }
263 up(&file->f_sema);
264 260
265 nlmsvc_freegrantargs(block->b_call); 261 nlmsvc_freegrantargs(block->b_call);
266 nlm_release_call(block->b_call); 262 nlm_release_call(block->b_call);
@@ -274,47 +270,32 @@ static void nlmsvc_release_block(struct nlm_block *block)
274 kref_put(&block->b_count, nlmsvc_free_block); 270 kref_put(&block->b_count, nlmsvc_free_block);
275} 271}
276 272
277static void nlmsvc_act_mark(struct nlm_host *host, struct nlm_file *file) 273/*
278{ 274 * Loop over all blocks and delete blocks held by
279 struct nlm_block *block; 275 * a matching host.
280 276 */
281 down(&file->f_sema); 277void nlmsvc_traverse_blocks(struct nlm_host *host,
282 for (block = file->f_blocks; block != NULL; block = block->b_fnext) 278 struct nlm_file *file,
283 block->b_host->h_inuse = 1; 279 nlm_host_match_fn_t match)
284 up(&file->f_sema);
285}
286
287static void nlmsvc_act_unlock(struct nlm_host *host, struct nlm_file *file)
288{ 280{
289 struct nlm_block *block; 281 struct nlm_block *block, *next;
290 282
291restart: 283restart:
292 down(&file->f_sema); 284 mutex_lock(&file->f_mutex);
293 for (block = file->f_blocks; block != NULL; block = block->b_fnext) { 285 list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
294 if (host != NULL && host != block->b_host) 286 if (!match(block->b_host, host))
295 continue; 287 continue;
296 if (!block->b_queued) 288 /* Do not destroy blocks that are not on
289 * the global retry list - why? */
290 if (list_empty(&block->b_list))
297 continue; 291 continue;
298 kref_get(&block->b_count); 292 kref_get(&block->b_count);
299 up(&file->f_sema); 293 mutex_unlock(&file->f_mutex);
300 nlmsvc_unlink_block(block); 294 nlmsvc_unlink_block(block);
301 nlmsvc_release_block(block); 295 nlmsvc_release_block(block);
302 goto restart; 296 goto restart;
303 } 297 }
304 up(&file->f_sema); 298 mutex_unlock(&file->f_mutex);
305}
306
307/*
308 * Loop over all blocks and perform the action specified.
309 * (NLM_ACT_CHECK handled by nlmsvc_inspect_file).
310 */
311void
312nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
313{
314 if (action == NLM_ACT_MARK)
315 nlmsvc_act_mark(host, file);
316 else
317 nlmsvc_act_unlock(host, file);
318} 299}
319 300
320/* 301/*
@@ -373,7 +354,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
373 lock->fl.fl_flags &= ~FL_SLEEP; 354 lock->fl.fl_flags &= ~FL_SLEEP;
374again: 355again:
375 /* Lock file against concurrent access */ 356 /* Lock file against concurrent access */
376 down(&file->f_sema); 357 mutex_lock(&file->f_mutex);
377 /* Get existing block (in case client is busy-waiting) */ 358 /* Get existing block (in case client is busy-waiting) */
378 block = nlmsvc_lookup_block(file, lock); 359 block = nlmsvc_lookup_block(file, lock);
379 if (block == NULL) { 360 if (block == NULL) {
@@ -411,10 +392,10 @@ again:
411 392
412 /* If we don't have a block, create and initialize it. Then 393 /* If we don't have a block, create and initialize it. Then
413 * retry because we may have slept in kmalloc. */ 394 * retry because we may have slept in kmalloc. */
414 /* We have to release f_sema as nlmsvc_create_block may try to 395 /* We have to release f_mutex as nlmsvc_create_block may try to
415 * to claim it while doing host garbage collection */ 396 * to claim it while doing host garbage collection */
416 if (newblock == NULL) { 397 if (newblock == NULL) {
417 up(&file->f_sema); 398 mutex_unlock(&file->f_mutex);
418 dprintk("lockd: blocking on this lock (allocating).\n"); 399 dprintk("lockd: blocking on this lock (allocating).\n");
419 if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie))) 400 if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie)))
420 return nlm_lck_denied_nolocks; 401 return nlm_lck_denied_nolocks;
@@ -424,7 +405,7 @@ again:
424 /* Append to list of blocked */ 405 /* Append to list of blocked */
425 nlmsvc_insert_block(newblock, NLM_NEVER); 406 nlmsvc_insert_block(newblock, NLM_NEVER);
426out: 407out:
427 up(&file->f_sema); 408 mutex_unlock(&file->f_mutex);
428 nlmsvc_release_block(newblock); 409 nlmsvc_release_block(newblock);
429 nlmsvc_release_block(block); 410 nlmsvc_release_block(block);
430 dprintk("lockd: nlmsvc_lock returned %u\n", ret); 411 dprintk("lockd: nlmsvc_lock returned %u\n", ret);
@@ -451,6 +432,7 @@ nlmsvc_testlock(struct nlm_file *file, struct nlm_lock *lock,
451 (long long)conflock->fl.fl_start, 432 (long long)conflock->fl.fl_start,
452 (long long)conflock->fl.fl_end); 433 (long long)conflock->fl.fl_end);
453 conflock->caller = "somehost"; /* FIXME */ 434 conflock->caller = "somehost"; /* FIXME */
435 conflock->len = strlen(conflock->caller);
454 conflock->oh.len = 0; /* don't return OH info */ 436 conflock->oh.len = 0; /* don't return OH info */
455 conflock->svid = conflock->fl.fl_pid; 437 conflock->svid = conflock->fl.fl_pid;
456 return nlm_lck_denied; 438 return nlm_lck_denied;
@@ -507,9 +489,9 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
507 (long long)lock->fl.fl_start, 489 (long long)lock->fl.fl_start,
508 (long long)lock->fl.fl_end); 490 (long long)lock->fl.fl_end);
509 491
510 down(&file->f_sema); 492 mutex_lock(&file->f_mutex);
511 block = nlmsvc_lookup_block(file, lock); 493 block = nlmsvc_lookup_block(file, lock);
512 up(&file->f_sema); 494 mutex_unlock(&file->f_mutex);
513 if (block != NULL) { 495 if (block != NULL) {
514 status = nlmsvc_unlink_block(block); 496 status = nlmsvc_unlink_block(block);
515 nlmsvc_release_block(block); 497 nlmsvc_release_block(block);
@@ -527,10 +509,10 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
527static void 509static void
528nlmsvc_notify_blocked(struct file_lock *fl) 510nlmsvc_notify_blocked(struct file_lock *fl)
529{ 511{
530 struct nlm_block **bp, *block; 512 struct nlm_block *block;
531 513
532 dprintk("lockd: VFS unblock notification for block %p\n", fl); 514 dprintk("lockd: VFS unblock notification for block %p\n", fl);
533 for (bp = &nlm_blocked; (block = *bp) != 0; bp = &block->b_next) { 515 list_for_each_entry(block, &nlm_blocked, b_list) {
534 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { 516 if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) {
535 nlmsvc_insert_block(block, 0); 517 nlmsvc_insert_block(block, 0);
536 svc_wake_up(block->b_daemon); 518 svc_wake_up(block->b_daemon);
@@ -663,17 +645,14 @@ static const struct rpc_call_ops nlmsvc_grant_ops = {
663 * block. 645 * block.
664 */ 646 */
665void 647void
666nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status) 648nlmsvc_grant_reply(struct nlm_cookie *cookie, u32 status)
667{ 649{
668 struct nlm_block *block; 650 struct nlm_block *block;
669 struct nlm_file *file;
670 651
671 dprintk("grant_reply: looking for cookie %x, host (%08x), s=%d \n", 652 dprintk("grant_reply: looking for cookie %x, s=%d \n",
672 *(unsigned int *)(cookie->data), 653 *(unsigned int *)(cookie->data), status);
673 ntohl(rqstp->rq_addr.sin_addr.s_addr), status); 654 if (!(block = nlmsvc_find_block(cookie)))
674 if (!(block = nlmsvc_find_block(cookie, &rqstp->rq_addr)))
675 return; 655 return;
676 file = block->b_file;
677 656
678 if (block) { 657 if (block) {
679 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 658 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
@@ -696,16 +675,19 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status
696unsigned long 675unsigned long
697nlmsvc_retry_blocked(void) 676nlmsvc_retry_blocked(void)
698{ 677{
699 struct nlm_block *block; 678 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
679 struct nlm_block *block;
680
681 while (!list_empty(&nlm_blocked)) {
682 block = list_entry(nlm_blocked.next, struct nlm_block, b_list);
700 683
701 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
702 nlm_blocked,
703 nlm_blocked? nlm_blocked->b_when : 0);
704 while ((block = nlm_blocked) != 0) {
705 if (block->b_when == NLM_NEVER) 684 if (block->b_when == NLM_NEVER)
706 break; 685 break;
707 if (time_after(block->b_when,jiffies)) 686 if (time_after(block->b_when,jiffies)) {
687 timeout = block->b_when - jiffies;
708 break; 688 break;
689 }
690
709 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", 691 dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
710 block, block->b_when); 692 block, block->b_when);
711 kref_get(&block->b_count); 693 kref_get(&block->b_count);
@@ -713,8 +695,5 @@ nlmsvc_retry_blocked(void)
713 nlmsvc_release_block(block); 695 nlmsvc_release_block(block);
714 } 696 }
715 697
716 if ((block = nlm_blocked) && block->b_when != NLM_NEVER) 698 return timeout;
717 return (block->b_when - jiffies);
718
719 return MAX_SCHEDULE_TIMEOUT;
720} 699}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index dbb66a3b5cd9..75b2c81bcb93 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -66,8 +66,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
66 return nlm_lck_denied_nolocks; 66 return nlm_lck_denied_nolocks;
67 67
68 /* Obtain host handle */ 68 /* Obtain host handle */
69 if (!(host = nlmsvc_lookup_host(rqstp)) 69 if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len))
70 || (argp->monitor && !host->h_monitored && nsm_monitor(host) < 0)) 70 || (argp->monitor && nsm_monitor(host) < 0))
71 goto no_locks; 71 goto no_locks;
72 *hostp = host; 72 *hostp = host;
73 73
@@ -287,7 +287,9 @@ static int nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *ar
287 struct nlm_rqst *call; 287 struct nlm_rqst *call;
288 int stat; 288 int stat;
289 289
290 host = nlmsvc_lookup_host(rqstp); 290 host = nlmsvc_lookup_host(rqstp,
291 argp->lock.caller,
292 argp->lock.len);
291 if (host == NULL) 293 if (host == NULL)
292 return rpc_system_err; 294 return rpc_system_err;
293 295
@@ -449,9 +451,6 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
449 void *resp) 451 void *resp)
450{ 452{
451 struct sockaddr_in saddr = rqstp->rq_addr; 453 struct sockaddr_in saddr = rqstp->rq_addr;
452 int vers = argp->vers;
453 int prot = argp->proto >> 1;
454 struct nlm_host *host;
455 454
456 dprintk("lockd: SM_NOTIFY called\n"); 455 dprintk("lockd: SM_NOTIFY called\n");
457 if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK) 456 if (saddr.sin_addr.s_addr != htonl(INADDR_LOOPBACK)
@@ -466,19 +465,9 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
466 /* Obtain the host pointer for this NFS server and try to 465 /* Obtain the host pointer for this NFS server and try to
467 * reclaim all locks we hold on this server. 466 * reclaim all locks we hold on this server.
468 */ 467 */
468 memset(&saddr, 0, sizeof(saddr));
469 saddr.sin_addr.s_addr = argp->addr; 469 saddr.sin_addr.s_addr = argp->addr;
470 if ((argp->proto & 1)==0) { 470 nlm_host_rebooted(&saddr, argp->mon, argp->len, argp->state);
471 if ((host = nlmclnt_lookup_host(&saddr, prot, vers)) != NULL) {
472 nlmclnt_recovery(host, argp->state);
473 nlm_release_host(host);
474 }
475 } else {
476 /* If we run on an NFS server, delete all locks held by the client */
477 if ((host = nlm_lookup_host(1, &saddr, prot, vers)) != NULL) {
478 nlmsvc_free_host_resources(host);
479 nlm_release_host(host);
480 }
481 }
482 471
483 return rpc_success; 472 return rpc_success;
484} 473}
@@ -495,7 +484,7 @@ nlmsvc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
495 484
496 dprintk("lockd: GRANTED_RES called\n"); 485 dprintk("lockd: GRANTED_RES called\n");
497 486
498 nlmsvc_grant_reply(rqstp, &argp->cookie, argp->status); 487 nlmsvc_grant_reply(&argp->cookie, argp->status);
499 return rpc_success; 488 return rpc_success;
500} 489}
501 490
diff --git a/fs/lockd/svcshare.c b/fs/lockd/svcshare.c
index 27288c83da96..b9926ce8782e 100644
--- a/fs/lockd/svcshare.c
+++ b/fs/lockd/svcshare.c
@@ -85,24 +85,20 @@ nlmsvc_unshare_file(struct nlm_host *host, struct nlm_file *file,
85} 85}
86 86
87/* 87/*
88 * Traverse all shares for a given file (and host). 88 * Traverse all shares for a given file, and delete
89 * NLM_ACT_CHECK is handled by nlmsvc_inspect_file. 89 * those owned by the given (type of) host
90 */ 90 */
91void 91void nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file,
92nlmsvc_traverse_shares(struct nlm_host *host, struct nlm_file *file, int action) 92 nlm_host_match_fn_t match)
93{ 93{
94 struct nlm_share *share, **shpp; 94 struct nlm_share *share, **shpp;
95 95
96 shpp = &file->f_shares; 96 shpp = &file->f_shares;
97 while ((share = *shpp) != NULL) { 97 while ((share = *shpp) != NULL) {
98 if (action == NLM_ACT_MARK) 98 if (match(share->s_host, host)) {
99 share->s_host->h_inuse = 1; 99 *shpp = share->s_next;
100 else if (action == NLM_ACT_UNLOCK) { 100 kfree(share);
101 if (host == NULL || host == share->s_host) { 101 continue;
102 *shpp = share->s_next;
103 kfree(share);
104 continue;
105 }
106 } 102 }
107 shpp = &share->s_next; 103 shpp = &share->s_next;
108 } 104 }
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index a92dd98f8401..514f5f20701e 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -25,9 +25,9 @@
25/* 25/*
26 * Global file hash table 26 * Global file hash table
27 */ 27 */
28#define FILE_HASH_BITS 5 28#define FILE_HASH_BITS 7
29#define FILE_NRHASH (1<<FILE_HASH_BITS) 29#define FILE_NRHASH (1<<FILE_HASH_BITS)
30static struct nlm_file * nlm_files[FILE_NRHASH]; 30static struct hlist_head nlm_files[FILE_NRHASH];
31static DEFINE_MUTEX(nlm_file_mutex); 31static DEFINE_MUTEX(nlm_file_mutex);
32 32
33#ifdef NFSD_DEBUG 33#ifdef NFSD_DEBUG
@@ -82,6 +82,7 @@ u32
82nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, 82nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
83 struct nfs_fh *f) 83 struct nfs_fh *f)
84{ 84{
85 struct hlist_node *pos;
85 struct nlm_file *file; 86 struct nlm_file *file;
86 unsigned int hash; 87 unsigned int hash;
87 u32 nfserr; 88 u32 nfserr;
@@ -93,7 +94,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
93 /* Lock file table */ 94 /* Lock file table */
94 mutex_lock(&nlm_file_mutex); 95 mutex_lock(&nlm_file_mutex);
95 96
96 for (file = nlm_files[hash]; file; file = file->f_next) 97 hlist_for_each_entry(file, pos, &nlm_files[hash], f_list)
97 if (!nfs_compare_fh(&file->f_handle, f)) 98 if (!nfs_compare_fh(&file->f_handle, f))
98 goto found; 99 goto found;
99 100
@@ -105,8 +106,9 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
105 goto out_unlock; 106 goto out_unlock;
106 107
107 memcpy(&file->f_handle, f, sizeof(struct nfs_fh)); 108 memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
108 file->f_hash = hash; 109 mutex_init(&file->f_mutex);
109 init_MUTEX(&file->f_sema); 110 INIT_HLIST_NODE(&file->f_list);
111 INIT_LIST_HEAD(&file->f_blocks);
110 112
111 /* Open the file. Note that this must not sleep for too long, else 113 /* Open the file. Note that this must not sleep for too long, else
112 * we would lock up lockd:-) So no NFS re-exports, folks. 114 * we would lock up lockd:-) So no NFS re-exports, folks.
@@ -115,12 +117,11 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
115 * the file. 117 * the file.
116 */ 118 */
117 if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) { 119 if ((nfserr = nlmsvc_ops->fopen(rqstp, f, &file->f_file)) != 0) {
118 dprintk("lockd: open failed (nfserr %d)\n", ntohl(nfserr)); 120 dprintk("lockd: open failed (error %d)\n", nfserr);
119 goto out_free; 121 goto out_free;
120 } 122 }
121 123
122 file->f_next = nlm_files[hash]; 124 hlist_add_head(&file->f_list, &nlm_files[hash]);
123 nlm_files[hash] = file;
124 125
125found: 126found:
126 dprintk("lockd: found file %p (count %d)\n", file, file->f_count); 127 dprintk("lockd: found file %p (count %d)\n", file, file->f_count);
@@ -149,22 +150,14 @@ out_free:
149static inline void 150static inline void
150nlm_delete_file(struct nlm_file *file) 151nlm_delete_file(struct nlm_file *file)
151{ 152{
152 struct nlm_file **fp, *f;
153
154 nlm_debug_print_file("closing file", file); 153 nlm_debug_print_file("closing file", file);
155 154 if (!hlist_unhashed(&file->f_list)) {
156 fp = nlm_files + file->f_hash; 155 hlist_del(&file->f_list);
157 while ((f = *fp) != NULL) { 156 nlmsvc_ops->fclose(file->f_file);
158 if (f == file) { 157 kfree(file);
159 *fp = file->f_next; 158 } else {
160 nlmsvc_ops->fclose(file->f_file); 159 printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
161 kfree(file);
162 return;
163 }
164 fp = &f->f_next;
165 } 160 }
166
167 printk(KERN_WARNING "lockd: attempt to release unknown file!\n");
168} 161}
169 162
170/* 163/*
@@ -172,7 +165,8 @@ nlm_delete_file(struct nlm_file *file)
172 * action. 165 * action.
173 */ 166 */
174static int 167static int
175nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, int action) 168nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
169 nlm_host_match_fn_t match)
176{ 170{
177 struct inode *inode = nlmsvc_file_inode(file); 171 struct inode *inode = nlmsvc_file_inode(file);
178 struct file_lock *fl; 172 struct file_lock *fl;
@@ -186,17 +180,11 @@ again:
186 180
187 /* update current lock count */ 181 /* update current lock count */
188 file->f_locks++; 182 file->f_locks++;
183
189 lockhost = (struct nlm_host *) fl->fl_owner; 184 lockhost = (struct nlm_host *) fl->fl_owner;
190 if (action == NLM_ACT_MARK) 185 if (match(lockhost, host)) {
191 lockhost->h_inuse = 1;
192 else if (action == NLM_ACT_CHECK)
193 return 1;
194 else if (action == NLM_ACT_UNLOCK) {
195 struct file_lock lock = *fl; 186 struct file_lock lock = *fl;
196 187
197 if (host && lockhost != host)
198 continue;
199
200 lock.fl_type = F_UNLCK; 188 lock.fl_type = F_UNLCK;
201 lock.fl_start = 0; 189 lock.fl_start = 0;
202 lock.fl_end = OFFSET_MAX; 190 lock.fl_end = OFFSET_MAX;
@@ -213,53 +201,66 @@ again:
213} 201}
214 202
215/* 203/*
216 * Operate on a single file 204 * Inspect a single file
217 */ 205 */
218static inline int 206static inline int
219nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, int action) 207nlm_inspect_file(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match)
220{ 208{
221 if (action == NLM_ACT_CHECK) { 209 nlmsvc_traverse_blocks(host, file, match);
222 /* Fast path for mark and sweep garbage collection */ 210 nlmsvc_traverse_shares(host, file, match);
223 if (file->f_count || file->f_blocks || file->f_shares) 211 return nlm_traverse_locks(host, file, match);
212}
213
214/*
215 * Quick check whether there are still any locks, blocks or
216 * shares on a given file.
217 */
218static inline int
219nlm_file_inuse(struct nlm_file *file)
220{
221 struct inode *inode = nlmsvc_file_inode(file);
222 struct file_lock *fl;
223
224 if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
225 return 1;
226
227 for (fl = inode->i_flock; fl; fl = fl->fl_next) {
228 if (fl->fl_lmops == &nlmsvc_lock_operations)
224 return 1; 229 return 1;
225 } else {
226 nlmsvc_traverse_blocks(host, file, action);
227 nlmsvc_traverse_shares(host, file, action);
228 } 230 }
229 return nlm_traverse_locks(host, file, action); 231 file->f_locks = 0;
232 return 0;
230} 233}
231 234
232/* 235/*
233 * Loop over all files in the file table. 236 * Loop over all files in the file table.
234 */ 237 */
235static int 238static int
236nlm_traverse_files(struct nlm_host *host, int action) 239nlm_traverse_files(struct nlm_host *host, nlm_host_match_fn_t match)
237{ 240{
238 struct nlm_file *file, **fp; 241 struct hlist_node *pos, *next;
242 struct nlm_file *file;
239 int i, ret = 0; 243 int i, ret = 0;
240 244
241 mutex_lock(&nlm_file_mutex); 245 mutex_lock(&nlm_file_mutex);
242 for (i = 0; i < FILE_NRHASH; i++) { 246 for (i = 0; i < FILE_NRHASH; i++) {
243 fp = nlm_files + i; 247 hlist_for_each_entry_safe(file, pos, next, &nlm_files[i], f_list) {
244 while ((file = *fp) != NULL) {
245 file->f_count++; 248 file->f_count++;
246 mutex_unlock(&nlm_file_mutex); 249 mutex_unlock(&nlm_file_mutex);
247 250
248 /* Traverse locks, blocks and shares of this file 251 /* Traverse locks, blocks and shares of this file
249 * and update file->f_locks count */ 252 * and update file->f_locks count */
250 if (nlm_inspect_file(host, file, action)) 253 if (nlm_inspect_file(host, file, match))
251 ret = 1; 254 ret = 1;
252 255
253 mutex_lock(&nlm_file_mutex); 256 mutex_lock(&nlm_file_mutex);
254 file->f_count--; 257 file->f_count--;
255 /* No more references to this file. Let go of it. */ 258 /* No more references to this file. Let go of it. */
256 if (!file->f_blocks && !file->f_locks 259 if (list_empty(&file->f_blocks) && !file->f_locks
257 && !file->f_shares && !file->f_count) { 260 && !file->f_shares && !file->f_count) {
258 *fp = file->f_next; 261 hlist_del(&file->f_list);
259 nlmsvc_ops->fclose(file->f_file); 262 nlmsvc_ops->fclose(file->f_file);
260 kfree(file); 263 kfree(file);
261 } else {
262 fp = &file->f_next;
263 } 264 }
264 } 265 }
265 } 266 }
@@ -286,23 +287,54 @@ nlm_release_file(struct nlm_file *file)
286 mutex_lock(&nlm_file_mutex); 287 mutex_lock(&nlm_file_mutex);
287 288
288 /* If there are no more locks etc, delete the file */ 289 /* If there are no more locks etc, delete the file */
289 if(--file->f_count == 0) { 290 if (--file->f_count == 0 && !nlm_file_inuse(file))
290 if(!nlm_inspect_file(NULL, file, NLM_ACT_CHECK)) 291 nlm_delete_file(file);
291 nlm_delete_file(file);
292 }
293 292
294 mutex_unlock(&nlm_file_mutex); 293 mutex_unlock(&nlm_file_mutex);
295} 294}
296 295
297/* 296/*
297 * Helpers function for resource traversal
298 *
299 * nlmsvc_mark_host:
300 * used by the garbage collector; simply sets h_inuse.
301 * Always returns 0.
302 *
303 * nlmsvc_same_host:
304 * returns 1 iff the two hosts match. Used to release
305 * all resources bound to a specific host.
306 *
307 * nlmsvc_is_client:
308 * returns 1 iff the host is a client.
309 * Used by nlmsvc_invalidate_all
310 */
311static int
312nlmsvc_mark_host(struct nlm_host *host, struct nlm_host *dummy)
313{
314 host->h_inuse = 1;
315 return 0;
316}
317
318static int
319nlmsvc_same_host(struct nlm_host *host, struct nlm_host *other)
320{
321 return host == other;
322}
323
324static int
325nlmsvc_is_client(struct nlm_host *host, struct nlm_host *dummy)
326{
327 return host->h_server;
328}
329
330/*
298 * Mark all hosts that still hold resources 331 * Mark all hosts that still hold resources
299 */ 332 */
300void 333void
301nlmsvc_mark_resources(void) 334nlmsvc_mark_resources(void)
302{ 335{
303 dprintk("lockd: nlmsvc_mark_resources\n"); 336 dprintk("lockd: nlmsvc_mark_resources\n");
304 337 nlm_traverse_files(NULL, nlmsvc_mark_host);
305 nlm_traverse_files(NULL, NLM_ACT_MARK);
306} 338}
307 339
308/* 340/*
@@ -313,23 +345,25 @@ nlmsvc_free_host_resources(struct nlm_host *host)
313{ 345{
314 dprintk("lockd: nlmsvc_free_host_resources\n"); 346 dprintk("lockd: nlmsvc_free_host_resources\n");
315 347
316 if (nlm_traverse_files(host, NLM_ACT_UNLOCK)) 348 if (nlm_traverse_files(host, nlmsvc_same_host)) {
317 printk(KERN_WARNING 349 printk(KERN_WARNING
318 "lockd: couldn't remove all locks held by %s", 350 "lockd: couldn't remove all locks held by %s\n",
319 host->h_name); 351 host->h_name);
352 BUG();
353 }
320} 354}
321 355
322/* 356/*
323 * delete all hosts structs for clients 357 * Remove all locks held for clients
324 */ 358 */
325void 359void
326nlmsvc_invalidate_all(void) 360nlmsvc_invalidate_all(void)
327{ 361{
328 struct nlm_host *host; 362 /* Release all locks held by NFS clients.
329 while ((host = nlm_find_client()) != NULL) { 363 * Previously, the code would call
330 nlmsvc_free_host_resources(host); 364 * nlmsvc_free_host_resources for each client in
331 host->h_expires = 0; 365 * turn, which is about as inefficient as it gets.
332 host->h_killed = 1; 366 * Now we just do it once in nlm_traverse_files.
333 nlm_release_host(host); 367 */
334 } 368 nlm_traverse_files(NULL, nlmsvc_is_client);
335} 369}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index cfe141e5d759..e13fa23bd108 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -319,12 +319,25 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
319 319
320static struct cache_head *export_table[EXPORT_HASHMAX]; 320static struct cache_head *export_table[EXPORT_HASHMAX];
321 321
322static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
323{
324 int i;
325
326 for (i = 0; i < fsloc->locations_count; i++) {
327 kfree(fsloc->locations[i].path);
328 kfree(fsloc->locations[i].hosts);
329 }
330 kfree(fsloc->locations);
331}
332
322static void svc_export_put(struct kref *ref) 333static void svc_export_put(struct kref *ref)
323{ 334{
324 struct svc_export *exp = container_of(ref, struct svc_export, h.ref); 335 struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
325 dput(exp->ex_dentry); 336 dput(exp->ex_dentry);
326 mntput(exp->ex_mnt); 337 mntput(exp->ex_mnt);
327 auth_domain_put(exp->ex_client); 338 auth_domain_put(exp->ex_client);
339 kfree(exp->ex_path);
340 nfsd4_fslocs_free(&exp->ex_fslocs);
328 kfree(exp); 341 kfree(exp);
329} 342}
330 343
@@ -386,6 +399,69 @@ static int check_export(struct inode *inode, int flags)
386 399
387} 400}
388 401
402#ifdef CONFIG_NFSD_V4
403
404static int
405fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc)
406{
407 int len;
408 int migrated, i, err;
409
410 len = qword_get(mesg, buf, PAGE_SIZE);
411 if (len != 5 || memcmp(buf, "fsloc", 5))
412 return 0;
413
414 /* listsize */
415 err = get_int(mesg, &fsloc->locations_count);
416 if (err)
417 return err;
418 if (fsloc->locations_count > MAX_FS_LOCATIONS)
419 return -EINVAL;
420 if (fsloc->locations_count == 0)
421 return 0;
422
423 fsloc->locations = kzalloc(fsloc->locations_count
424 * sizeof(struct nfsd4_fs_location), GFP_KERNEL);
425 if (!fsloc->locations)
426 return -ENOMEM;
427 for (i=0; i < fsloc->locations_count; i++) {
428 /* colon separated host list */
429 err = -EINVAL;
430 len = qword_get(mesg, buf, PAGE_SIZE);
431 if (len <= 0)
432 goto out_free_all;
433 err = -ENOMEM;
434 fsloc->locations[i].hosts = kstrdup(buf, GFP_KERNEL);
435 if (!fsloc->locations[i].hosts)
436 goto out_free_all;
437 err = -EINVAL;
438 /* slash separated path component list */
439 len = qword_get(mesg, buf, PAGE_SIZE);
440 if (len <= 0)
441 goto out_free_all;
442 err = -ENOMEM;
443 fsloc->locations[i].path = kstrdup(buf, GFP_KERNEL);
444 if (!fsloc->locations[i].path)
445 goto out_free_all;
446 }
447 /* migrated */
448 err = get_int(mesg, &migrated);
449 if (err)
450 goto out_free_all;
451 err = -EINVAL;
452 if (migrated < 0 || migrated > 1)
453 goto out_free_all;
454 fsloc->migrated = migrated;
455 return 0;
456out_free_all:
457 nfsd4_fslocs_free(fsloc);
458 return err;
459}
460
461#else /* CONFIG_NFSD_V4 */
462static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { return 0; }
463#endif
464
389static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) 465static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
390{ 466{
391 /* client path expiry [flags anonuid anongid fsid] */ 467 /* client path expiry [flags anonuid anongid fsid] */
@@ -398,6 +474,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
398 int an_int; 474 int an_int;
399 475
400 nd.dentry = NULL; 476 nd.dentry = NULL;
477 exp.ex_path = NULL;
401 478
402 if (mesg[mlen-1] != '\n') 479 if (mesg[mlen-1] != '\n')
403 return -EINVAL; 480 return -EINVAL;
@@ -428,6 +505,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
428 exp.ex_client = dom; 505 exp.ex_client = dom;
429 exp.ex_mnt = nd.mnt; 506 exp.ex_mnt = nd.mnt;
430 exp.ex_dentry = nd.dentry; 507 exp.ex_dentry = nd.dentry;
508 exp.ex_path = kstrdup(buf, GFP_KERNEL);
509 err = -ENOMEM;
510 if (!exp.ex_path)
511 goto out;
431 512
432 /* expiry */ 513 /* expiry */
433 err = -EINVAL; 514 err = -EINVAL;
@@ -435,6 +516,11 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
435 if (exp.h.expiry_time == 0) 516 if (exp.h.expiry_time == 0)
436 goto out; 517 goto out;
437 518
519 /* fs locations */
520 exp.ex_fslocs.locations = NULL;
521 exp.ex_fslocs.locations_count = 0;
522 exp.ex_fslocs.migrated = 0;
523
438 /* flags */ 524 /* flags */
439 err = get_int(&mesg, &an_int); 525 err = get_int(&mesg, &an_int);
440 if (err == -ENOENT) 526 if (err == -ENOENT)
@@ -460,6 +546,10 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
460 546
461 err = check_export(nd.dentry->d_inode, exp.ex_flags); 547 err = check_export(nd.dentry->d_inode, exp.ex_flags);
462 if (err) goto out; 548 if (err) goto out;
549
550 err = fsloc_parse(&mesg, buf, &exp.ex_fslocs);
551 if (err)
552 goto out;
463 } 553 }
464 554
465 expp = svc_export_lookup(&exp); 555 expp = svc_export_lookup(&exp);
@@ -473,6 +563,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
473 else 563 else
474 exp_put(expp); 564 exp_put(expp);
475 out: 565 out:
566 kfree(exp.ex_path);
476 if (nd.dentry) 567 if (nd.dentry)
477 path_release(&nd); 568 path_release(&nd);
478 out_no_path: 569 out_no_path:
@@ -482,7 +573,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
482 return err; 573 return err;
483} 574}
484 575
485static void exp_flags(struct seq_file *m, int flag, int fsid, uid_t anonu, uid_t anong); 576static void exp_flags(struct seq_file *m, int flag, int fsid,
577 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs);
486 578
487static int svc_export_show(struct seq_file *m, 579static int svc_export_show(struct seq_file *m,
488 struct cache_detail *cd, 580 struct cache_detail *cd,
@@ -501,8 +593,8 @@ static int svc_export_show(struct seq_file *m,
501 seq_putc(m, '('); 593 seq_putc(m, '(');
502 if (test_bit(CACHE_VALID, &h->flags) && 594 if (test_bit(CACHE_VALID, &h->flags) &&
503 !test_bit(CACHE_NEGATIVE, &h->flags)) 595 !test_bit(CACHE_NEGATIVE, &h->flags))
504 exp_flags(m, exp->ex_flags, exp->ex_fsid, 596 exp_flags(m, exp->ex_flags, exp->ex_fsid,
505 exp->ex_anon_uid, exp->ex_anon_gid); 597 exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs);
506 seq_puts(m, ")\n"); 598 seq_puts(m, ")\n");
507 return 0; 599 return 0;
508} 600}
@@ -524,6 +616,10 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
524 new->ex_client = item->ex_client; 616 new->ex_client = item->ex_client;
525 new->ex_dentry = dget(item->ex_dentry); 617 new->ex_dentry = dget(item->ex_dentry);
526 new->ex_mnt = mntget(item->ex_mnt); 618 new->ex_mnt = mntget(item->ex_mnt);
619 new->ex_path = NULL;
620 new->ex_fslocs.locations = NULL;
621 new->ex_fslocs.locations_count = 0;
622 new->ex_fslocs.migrated = 0;
527} 623}
528 624
529static void export_update(struct cache_head *cnew, struct cache_head *citem) 625static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -535,6 +631,14 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
535 new->ex_anon_uid = item->ex_anon_uid; 631 new->ex_anon_uid = item->ex_anon_uid;
536 new->ex_anon_gid = item->ex_anon_gid; 632 new->ex_anon_gid = item->ex_anon_gid;
537 new->ex_fsid = item->ex_fsid; 633 new->ex_fsid = item->ex_fsid;
634 new->ex_path = item->ex_path;
635 item->ex_path = NULL;
636 new->ex_fslocs.locations = item->ex_fslocs.locations;
637 item->ex_fslocs.locations = NULL;
638 new->ex_fslocs.locations_count = item->ex_fslocs.locations_count;
639 item->ex_fslocs.locations_count = 0;
640 new->ex_fslocs.migrated = item->ex_fslocs.migrated;
641 item->ex_fslocs.migrated = 0;
538} 642}
539 643
540static struct cache_head *svc_export_alloc(void) 644static struct cache_head *svc_export_alloc(void)
@@ -1048,30 +1152,21 @@ int
1048exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp, 1152exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
1049 struct cache_req *creq) 1153 struct cache_req *creq)
1050{ 1154{
1051 struct svc_expkey *fsid_key;
1052 struct svc_export *exp; 1155 struct svc_export *exp;
1053 int rv; 1156 int rv;
1054 u32 fsidv[2]; 1157 u32 fsidv[2];
1055 1158
1056 mk_fsid_v1(fsidv, 0); 1159 mk_fsid_v1(fsidv, 0);
1057 1160
1058 fsid_key = exp_find_key(clp, 1, fsidv, creq); 1161 exp = exp_find(clp, 1, fsidv, creq);
1059 if (IS_ERR(fsid_key) && PTR_ERR(fsid_key) == -EAGAIN) 1162 if (IS_ERR(exp) && PTR_ERR(exp) == -EAGAIN)
1060 return nfserr_dropit; 1163 return nfserr_dropit;
1061 if (!fsid_key || IS_ERR(fsid_key))
1062 return nfserr_perm;
1063
1064 exp = exp_get_by_name(clp, fsid_key->ek_mnt, fsid_key->ek_dentry, creq);
1065 if (exp == NULL) 1164 if (exp == NULL)
1066 rv = nfserr_perm; 1165 return nfserr_perm;
1067 else if (IS_ERR(exp)) 1166 else if (IS_ERR(exp))
1068 rv = nfserrno(PTR_ERR(exp)); 1167 return nfserrno(PTR_ERR(exp));
1069 else { 1168 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
1070 rv = fh_compose(fhp, exp, 1169 exp_put(exp);
1071 fsid_key->ek_dentry, NULL);
1072 exp_put(exp);
1073 }
1074 cache_put(&fsid_key->h, &svc_expkey_cache);
1075 return rv; 1170 return rv;
1076} 1171}
1077 1172
@@ -1158,7 +1253,8 @@ static struct flags {
1158 { 0, {"", ""}} 1253 { 0, {"", ""}}
1159}; 1254};
1160 1255
1161static void exp_flags(struct seq_file *m, int flag, int fsid, uid_t anonu, uid_t anong) 1256static void exp_flags(struct seq_file *m, int flag, int fsid,
1257 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
1162{ 1258{
1163 int first = 0; 1259 int first = 0;
1164 struct flags *flg; 1260 struct flags *flg;
@@ -1174,6 +1270,21 @@ static void exp_flags(struct seq_file *m, int flag, int fsid, uid_t anonu, uid_t
1174 seq_printf(m, "%sanonuid=%d", first++?",":"", anonu); 1270 seq_printf(m, "%sanonuid=%d", first++?",":"", anonu);
1175 if (anong != (gid_t)-2 && anong != (0x10000-2)) 1271 if (anong != (gid_t)-2 && anong != (0x10000-2))
1176 seq_printf(m, "%sanongid=%d", first++?",":"", anong); 1272 seq_printf(m, "%sanongid=%d", first++?",":"", anong);
1273 if (fsloc && fsloc->locations_count > 0) {
1274 char *loctype = (fsloc->migrated) ? "refer" : "replicas";
1275 int i;
1276
1277 seq_printf(m, "%s%s=", first++?",":"", loctype);
1278 seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\");
1279 seq_putc(m, '@');
1280 seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\");
1281 for (i = 1; i < fsloc->locations_count; i++) {
1282 seq_putc(m, ';');
1283 seq_escape(m, fsloc->locations[i].path, ",;@ \t\n\\");
1284 seq_putc(m, '@');
1285 seq_escape(m, fsloc->locations[i].hosts, ",;@ \t\n\\");
1286 }
1287 }
1177} 1288}
1178 1289
1179static int e_show(struct seq_file *m, void *p) 1290static int e_show(struct seq_file *m, void *p)
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
index fe56b38364cc..9187755661df 100644
--- a/fs/nfsd/nfs2acl.c
+++ b/fs/nfsd/nfs2acl.c
@@ -241,7 +241,7 @@ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, u32 *p,
241 241
242 rqstp->rq_res.page_len = w; 242 rqstp->rq_res.page_len = w;
243 while (w > 0) { 243 while (w > 0) {
244 if (!svc_take_res_page(rqstp)) 244 if (!rqstp->rq_respages[rqstp->rq_resused++])
245 return 0; 245 return 0;
246 w -= PAGE_SIZE; 246 w -= PAGE_SIZE;
247 } 247 }
@@ -333,4 +333,5 @@ struct svc_version nfsd_acl_version2 = {
333 .vs_proc = nfsd_acl_procedures2, 333 .vs_proc = nfsd_acl_procedures2,
334 .vs_dispatch = nfsd_dispatch, 334 .vs_dispatch = nfsd_dispatch,
335 .vs_xdrsize = NFS3_SVC_XDRSIZE, 335 .vs_xdrsize = NFS3_SVC_XDRSIZE,
336 .vs_hidden = 1,
336}; 337};
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
index 16e10c170aed..d4bdc00c1169 100644
--- a/fs/nfsd/nfs3acl.c
+++ b/fs/nfsd/nfs3acl.c
@@ -185,7 +185,7 @@ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, u32 *p,
185 185
186 rqstp->rq_res.page_len = w; 186 rqstp->rq_res.page_len = w;
187 while (w > 0) { 187 while (w > 0) {
188 if (!svc_take_res_page(rqstp)) 188 if (!rqstp->rq_respages[rqstp->rq_resused++])
189 return 0; 189 return 0;
190 w -= PAGE_SIZE; 190 w -= PAGE_SIZE;
191 } 191 }
@@ -263,5 +263,6 @@ struct svc_version nfsd_acl_version3 = {
263 .vs_proc = nfsd_acl_procedures3, 263 .vs_proc = nfsd_acl_procedures3,
264 .vs_dispatch = nfsd_dispatch, 264 .vs_dispatch = nfsd_dispatch,
265 .vs_xdrsize = NFS3_SVC_XDRSIZE, 265 .vs_xdrsize = NFS3_SVC_XDRSIZE,
266 .vs_hidden = 1,
266}; 267};
267 268
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index f61142afea44..a5ebc7dbb384 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -160,6 +160,7 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
160 struct nfsd3_readres *resp) 160 struct nfsd3_readres *resp)
161{ 161{
162 int nfserr; 162 int nfserr;
163 u32 max_blocksize = svc_max_payload(rqstp);
163 164
164 dprintk("nfsd: READ(3) %s %lu bytes at %lu\n", 165 dprintk("nfsd: READ(3) %s %lu bytes at %lu\n",
165 SVCFH_fmt(&argp->fh), 166 SVCFH_fmt(&argp->fh),
@@ -172,15 +173,15 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp,
172 */ 173 */
173 174
174 resp->count = argp->count; 175 resp->count = argp->count;
175 if (NFSSVC_MAXBLKSIZE < resp->count) 176 if (max_blocksize < resp->count)
176 resp->count = NFSSVC_MAXBLKSIZE; 177 resp->count = max_blocksize;
177 178
178 svc_reserve(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); 179 svc_reserve(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4);
179 180
180 fh_copy(&resp->fh, &argp->fh); 181 fh_copy(&resp->fh, &argp->fh);
181 nfserr = nfsd_read(rqstp, &resp->fh, NULL, 182 nfserr = nfsd_read(rqstp, &resp->fh, NULL,
182 argp->offset, 183 argp->offset,
183 argp->vec, argp->vlen, 184 rqstp->rq_vec, argp->vlen,
184 &resp->count); 185 &resp->count);
185 if (nfserr == 0) { 186 if (nfserr == 0) {
186 struct inode *inode = resp->fh.fh_dentry->d_inode; 187 struct inode *inode = resp->fh.fh_dentry->d_inode;
@@ -210,7 +211,7 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp,
210 resp->committed = argp->stable; 211 resp->committed = argp->stable;
211 nfserr = nfsd_write(rqstp, &resp->fh, NULL, 212 nfserr = nfsd_write(rqstp, &resp->fh, NULL,
212 argp->offset, 213 argp->offset,
213 argp->vec, argp->vlen, 214 rqstp->rq_vec, argp->vlen,
214 argp->len, 215 argp->len,
215 &resp->committed); 216 &resp->committed);
216 resp->count = argp->count; 217 resp->count = argp->count;
@@ -538,15 +539,16 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
538 struct nfsd3_fsinfores *resp) 539 struct nfsd3_fsinfores *resp)
539{ 540{
540 int nfserr; 541 int nfserr;
542 u32 max_blocksize = svc_max_payload(rqstp);
541 543
542 dprintk("nfsd: FSINFO(3) %s\n", 544 dprintk("nfsd: FSINFO(3) %s\n",
543 SVCFH_fmt(&argp->fh)); 545 SVCFH_fmt(&argp->fh));
544 546
545 resp->f_rtmax = NFSSVC_MAXBLKSIZE; 547 resp->f_rtmax = max_blocksize;
546 resp->f_rtpref = NFSSVC_MAXBLKSIZE; 548 resp->f_rtpref = max_blocksize;
547 resp->f_rtmult = PAGE_SIZE; 549 resp->f_rtmult = PAGE_SIZE;
548 resp->f_wtmax = NFSSVC_MAXBLKSIZE; 550 resp->f_wtmax = max_blocksize;
549 resp->f_wtpref = NFSSVC_MAXBLKSIZE; 551 resp->f_wtpref = max_blocksize;
550 resp->f_wtmult = PAGE_SIZE; 552 resp->f_wtmult = PAGE_SIZE;
551 resp->f_dtpref = PAGE_SIZE; 553 resp->f_dtpref = PAGE_SIZE;
552 resp->f_maxfilesize = ~(u32) 0; 554 resp->f_maxfilesize = ~(u32) 0;
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 243d94b9653a..247d518248bf 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -330,6 +330,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
330{ 330{
331 unsigned int len; 331 unsigned int len;
332 int v,pn; 332 int v,pn;
333 u32 max_blocksize = svc_max_payload(rqstp);
333 334
334 if (!(p = decode_fh(p, &args->fh)) 335 if (!(p = decode_fh(p, &args->fh))
335 || !(p = xdr_decode_hyper(p, &args->offset))) 336 || !(p = xdr_decode_hyper(p, &args->offset)))
@@ -337,17 +338,16 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
337 338
338 len = args->count = ntohl(*p++); 339 len = args->count = ntohl(*p++);
339 340
340 if (len > NFSSVC_MAXBLKSIZE) 341 if (len > max_blocksize)
341 len = NFSSVC_MAXBLKSIZE; 342 len = max_blocksize;
342 343
343 /* set up the kvec */ 344 /* set up the kvec */
344 v=0; 345 v=0;
345 while (len > 0) { 346 while (len > 0) {
346 pn = rqstp->rq_resused; 347 pn = rqstp->rq_resused++;
347 svc_take_page(rqstp); 348 rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
348 args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]); 349 rqstp->rq_vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE;
349 args->vec[v].iov_len = len < PAGE_SIZE? len : PAGE_SIZE; 350 len -= rqstp->rq_vec[v].iov_len;
350 len -= args->vec[v].iov_len;
351 v++; 351 v++;
352 } 352 }
353 args->vlen = v; 353 args->vlen = v;
@@ -359,6 +359,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
359 struct nfsd3_writeargs *args) 359 struct nfsd3_writeargs *args)
360{ 360{
361 unsigned int len, v, hdr; 361 unsigned int len, v, hdr;
362 u32 max_blocksize = svc_max_payload(rqstp);
362 363
363 if (!(p = decode_fh(p, &args->fh)) 364 if (!(p = decode_fh(p, &args->fh))
364 || !(p = xdr_decode_hyper(p, &args->offset))) 365 || !(p = xdr_decode_hyper(p, &args->offset)))
@@ -373,22 +374,22 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
373 rqstp->rq_arg.len - hdr < len) 374 rqstp->rq_arg.len - hdr < len)
374 return 0; 375 return 0;
375 376
376 args->vec[0].iov_base = (void*)p; 377 rqstp->rq_vec[0].iov_base = (void*)p;
377 args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr; 378 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
378 379
379 if (len > NFSSVC_MAXBLKSIZE) 380 if (len > max_blocksize)
380 len = NFSSVC_MAXBLKSIZE; 381 len = max_blocksize;
381 v= 0; 382 v= 0;
382 while (len > args->vec[v].iov_len) { 383 while (len > rqstp->rq_vec[v].iov_len) {
383 len -= args->vec[v].iov_len; 384 len -= rqstp->rq_vec[v].iov_len;
384 v++; 385 v++;
385 args->vec[v].iov_base = page_address(rqstp->rq_argpages[v]); 386 rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
386 args->vec[v].iov_len = PAGE_SIZE; 387 rqstp->rq_vec[v].iov_len = PAGE_SIZE;
387 } 388 }
388 args->vec[v].iov_len = len; 389 rqstp->rq_vec[v].iov_len = len;
389 args->vlen = v+1; 390 args->vlen = v+1;
390 391
391 return args->count == args->len && args->vec[0].iov_len > 0; 392 return args->count == args->len && rqstp->rq_vec[0].iov_len > 0;
392} 393}
393 394
394int 395int
@@ -446,11 +447,11 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, u32 *p,
446 * This page appears in the rq_res.pages list, but as pages_len is always 447 * This page appears in the rq_res.pages list, but as pages_len is always
447 * 0, it won't get in the way 448 * 0, it won't get in the way
448 */ 449 */
449 svc_take_page(rqstp);
450 len = ntohl(*p++); 450 len = ntohl(*p++);
451 if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE) 451 if (len == 0 || len > NFS3_MAXPATHLEN || len >= PAGE_SIZE)
452 return 0; 452 return 0;
453 args->tname = new = page_address(rqstp->rq_respages[rqstp->rq_resused-1]); 453 args->tname = new =
454 page_address(rqstp->rq_respages[rqstp->rq_resused++]);
454 args->tlen = len; 455 args->tlen = len;
455 /* first copy and check from the first page */ 456 /* first copy and check from the first page */
456 old = (char*)p; 457 old = (char*)p;
@@ -522,8 +523,8 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, u32 *p,
522{ 523{
523 if (!(p = decode_fh(p, &args->fh))) 524 if (!(p = decode_fh(p, &args->fh)))
524 return 0; 525 return 0;
525 svc_take_page(rqstp); 526 args->buffer =
526 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]); 527 page_address(rqstp->rq_respages[rqstp->rq_resused++]);
527 528
528 return xdr_argsize_check(rqstp, p); 529 return xdr_argsize_check(rqstp, p);
529} 530}
@@ -554,8 +555,8 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, u32 *p,
554 if (args->count > PAGE_SIZE) 555 if (args->count > PAGE_SIZE)
555 args->count = PAGE_SIZE; 556 args->count = PAGE_SIZE;
556 557
557 svc_take_page(rqstp); 558 args->buffer =
558 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]); 559 page_address(rqstp->rq_respages[rqstp->rq_resused++]);
559 560
560 return xdr_argsize_check(rqstp, p); 561 return xdr_argsize_check(rqstp, p);
561} 562}
@@ -565,6 +566,7 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, u32 *p,
565 struct nfsd3_readdirargs *args) 566 struct nfsd3_readdirargs *args)
566{ 567{
567 int len, pn; 568 int len, pn;
569 u32 max_blocksize = svc_max_payload(rqstp);
568 570
569 if (!(p = decode_fh(p, &args->fh))) 571 if (!(p = decode_fh(p, &args->fh)))
570 return 0; 572 return 0;
@@ -573,13 +575,12 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, u32 *p,
573 args->dircount = ntohl(*p++); 575 args->dircount = ntohl(*p++);
574 args->count = ntohl(*p++); 576 args->count = ntohl(*p++);
575 577
576 len = (args->count > NFSSVC_MAXBLKSIZE) ? NFSSVC_MAXBLKSIZE : 578 len = (args->count > max_blocksize) ? max_blocksize :
577 args->count; 579 args->count;
578 args->count = len; 580 args->count = len;
579 581
580 while (len > 0) { 582 while (len > 0) {
581 pn = rqstp->rq_resused; 583 pn = rqstp->rq_resused++;
582 svc_take_page(rqstp);
583 if (!args->buffer) 584 if (!args->buffer)
584 args->buffer = page_address(rqstp->rq_respages[pn]); 585 args->buffer = page_address(rqstp->rq_respages[pn]);
585 len -= PAGE_SIZE; 586 len -= PAGE_SIZE;
@@ -668,7 +669,6 @@ nfs3svc_encode_readlinkres(struct svc_rqst *rqstp, u32 *p,
668 rqstp->rq_res.page_len = resp->len; 669 rqstp->rq_res.page_len = resp->len;
669 if (resp->len & 3) { 670 if (resp->len & 3) {
670 /* need to pad the tail */ 671 /* need to pad the tail */
671 rqstp->rq_restailpage = 0;
672 rqstp->rq_res.tail[0].iov_base = p; 672 rqstp->rq_res.tail[0].iov_base = p;
673 *p = 0; 673 *p = 0;
674 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); 674 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
@@ -693,7 +693,6 @@ nfs3svc_encode_readres(struct svc_rqst *rqstp, u32 *p,
693 rqstp->rq_res.page_len = resp->count; 693 rqstp->rq_res.page_len = resp->count;
694 if (resp->count & 3) { 694 if (resp->count & 3) {
695 /* need to pad the tail */ 695 /* need to pad the tail */
696 rqstp->rq_restailpage = 0;
697 rqstp->rq_res.tail[0].iov_base = p; 696 rqstp->rq_res.tail[0].iov_base = p;
698 *p = 0; 697 *p = 0;
699 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3); 698 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count & 3);
@@ -768,7 +767,6 @@ nfs3svc_encode_readdirres(struct svc_rqst *rqstp, u32 *p,
768 rqstp->rq_res.page_len = (resp->count) << 2; 767 rqstp->rq_res.page_len = (resp->count) << 2;
769 768
770 /* add the 'tail' to the end of the 'head' page - page 0. */ 769 /* add the 'tail' to the end of the 'head' page - page 0. */
771 rqstp->rq_restailpage = 0;
772 rqstp->rq_res.tail[0].iov_base = p; 770 rqstp->rq_res.tail[0].iov_base = p;
773 *p++ = 0; /* no more entries */ 771 *p++ = 0; /* no more entries */
774 *p++ = htonl(resp->common.err == nfserr_eof); 772 *p++ = htonl(resp->common.err == nfserr_eof);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index edb107e61b91..5d94555cdc83 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -63,6 +63,8 @@
63#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ 63#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \
64 | NFS4_ACE_DIRECTORY_INHERIT_ACE | NFS4_ACE_INHERIT_ONLY_ACE) 64 | NFS4_ACE_DIRECTORY_INHERIT_ACE | NFS4_ACE_INHERIT_ONLY_ACE)
65 65
66#define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS | NFS4_ACE_IDENTIFIER_GROUP)
67
66#define MASK_EQUAL(mask1, mask2) \ 68#define MASK_EQUAL(mask1, mask2) \
67 ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) ) 69 ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) )
68 70
@@ -96,24 +98,26 @@ deny_mask(u32 allow_mask, unsigned int flags)
96/* XXX: modify functions to return NFS errors; they're only ever 98/* XXX: modify functions to return NFS errors; they're only ever
97 * used by nfs code, after all.... */ 99 * used by nfs code, after all.... */
98 100
99static int 101/* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the
100mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags) 102 * side of being more restrictive, so the mode bit mapping below is
103 * pessimistic. An optimistic version would be needed to handle DENY's,
104 * but we espect to coalesce all ALLOWs and DENYs before mapping to mode
105 * bits. */
106
107static void
108low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags)
101{ 109{
102 u32 ignore = 0; 110 u32 write_mode = NFS4_WRITE_MODE;
103 111
104 if (!(flags & NFS4_ACL_DIR)) 112 if (flags & NFS4_ACL_DIR)
105 ignore |= NFS4_ACE_DELETE_CHILD; /* ignore it */ 113 write_mode |= NFS4_ACE_DELETE_CHILD;
106 perm |= ignore;
107 *mode = 0; 114 *mode = 0;
108 if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE) 115 if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE)
109 *mode |= ACL_READ; 116 *mode |= ACL_READ;
110 if ((perm & NFS4_WRITE_MODE) == NFS4_WRITE_MODE) 117 if ((perm & write_mode) == write_mode)
111 *mode |= ACL_WRITE; 118 *mode |= ACL_WRITE;
112 if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE) 119 if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE)
113 *mode |= ACL_EXECUTE; 120 *mode |= ACL_EXECUTE;
114 if (!MASK_EQUAL(perm, ignore|mask_from_posix(*mode, flags)))
115 return -EINVAL;
116 return 0;
117} 121}
118 122
119struct ace_container { 123struct ace_container {
@@ -338,38 +342,6 @@ sort_pacl(struct posix_acl *pacl)
338 return; 342 return;
339} 343}
340 344
341static int
342write_pace(struct nfs4_ace *ace, struct posix_acl *pacl,
343 struct posix_acl_entry **pace, short tag, unsigned int flags)
344{
345 struct posix_acl_entry *this = *pace;
346
347 if (*pace == pacl->a_entries + pacl->a_count)
348 return -EINVAL; /* fell off the end */
349 (*pace)++;
350 this->e_tag = tag;
351 if (tag == ACL_USER_OBJ)
352 flags |= NFS4_ACL_OWNER;
353 if (mode_from_nfs4(ace->access_mask, &this->e_perm, flags))
354 return -EINVAL;
355 this->e_id = (tag == ACL_USER || tag == ACL_GROUP ?
356 ace->who : ACL_UNDEFINED_ID);
357 return 0;
358}
359
360static struct nfs4_ace *
361get_next_v4_ace(struct list_head **p, struct list_head *head)
362{
363 struct nfs4_ace *ace;
364
365 *p = (*p)->next;
366 if (*p == head)
367 return NULL;
368 ace = list_entry(*p, struct nfs4_ace, l_ace);
369
370 return ace;
371}
372
373int 345int
374nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl, 346nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
375 struct posix_acl **dpacl, unsigned int flags) 347 struct posix_acl **dpacl, unsigned int flags)
@@ -385,42 +357,23 @@ nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
385 goto out; 357 goto out;
386 358
387 error = nfs4_acl_split(acl, dacl); 359 error = nfs4_acl_split(acl, dacl);
388 if (error < 0) 360 if (error)
389 goto out_acl; 361 goto out_acl;
390 362
391 if (pacl != NULL) { 363 *pacl = _nfsv4_to_posix_one(acl, flags);
392 if (acl->naces == 0) { 364 if (IS_ERR(*pacl)) {
393 error = -ENODATA; 365 error = PTR_ERR(*pacl);
394 goto try_dpacl; 366 *pacl = NULL;
395 } 367 goto out_acl;
396
397 *pacl = _nfsv4_to_posix_one(acl, flags);
398 if (IS_ERR(*pacl)) {
399 error = PTR_ERR(*pacl);
400 *pacl = NULL;
401 goto out_acl;
402 }
403 } 368 }
404 369
405try_dpacl: 370 *dpacl = _nfsv4_to_posix_one(dacl, flags);
406 if (dpacl != NULL) { 371 if (IS_ERR(*dpacl)) {
407 if (dacl->naces == 0) { 372 error = PTR_ERR(*dpacl);
408 if (pacl == NULL || *pacl == NULL) 373 *dpacl = NULL;
409 error = -ENODATA;
410 goto out_acl;
411 }
412
413 error = 0;
414 *dpacl = _nfsv4_to_posix_one(dacl, flags);
415 if (IS_ERR(*dpacl)) {
416 error = PTR_ERR(*dpacl);
417 *dpacl = NULL;
418 goto out_acl;
419 }
420 } 374 }
421
422out_acl: 375out_acl:
423 if (error && pacl) { 376 if (error) {
424 posix_acl_release(*pacl); 377 posix_acl_release(*pacl);
425 *pacl = NULL; 378 *pacl = NULL;
426 } 379 }
@@ -429,349 +382,311 @@ out:
429 return error; 382 return error;
430} 383}
431 384
385/*
386 * While processing the NFSv4 ACE, this maintains bitmasks representing
387 * which permission bits have been allowed and which denied to a given
388 * entity: */
389struct posix_ace_state {
390 u32 allow;
391 u32 deny;
392};
393
394struct posix_user_ace_state {
395 uid_t uid;
396 struct posix_ace_state perms;
397};
398
399struct posix_ace_state_array {
400 int n;
401 struct posix_user_ace_state aces[];
402};
403
404/*
405 * While processing the NFSv4 ACE, this maintains the partial permissions
406 * calculated so far: */
407
408struct posix_acl_state {
409 struct posix_ace_state owner;
410 struct posix_ace_state group;
411 struct posix_ace_state other;
412 struct posix_ace_state everyone;
413 struct posix_ace_state mask; /* Deny unused in this case */
414 struct posix_ace_state_array *users;
415 struct posix_ace_state_array *groups;
416};
417
432static int 418static int
433same_who(struct nfs4_ace *a, struct nfs4_ace *b) 419init_state(struct posix_acl_state *state, int cnt)
434{ 420{
435 return a->whotype == b->whotype && 421 int alloc;
436 (a->whotype != NFS4_ACL_WHO_NAMED || a->who == b->who); 422
423 memset(state, 0, sizeof(struct posix_acl_state));
424 /*
425 * In the worst case, each individual acl could be for a distinct
426 * named user or group, but we don't no which, so we allocate
427 * enough space for either:
428 */
429 alloc = sizeof(struct posix_ace_state_array)
430 + cnt*sizeof(struct posix_ace_state);
431 state->users = kzalloc(alloc, GFP_KERNEL);
432 if (!state->users)
433 return -ENOMEM;
434 state->groups = kzalloc(alloc, GFP_KERNEL);
435 if (!state->groups) {
436 kfree(state->users);
437 return -ENOMEM;
438 }
439 return 0;
437} 440}
438 441
439static int 442static void
440complementary_ace_pair(struct nfs4_ace *allow, struct nfs4_ace *deny, 443free_state(struct posix_acl_state *state) {
441 unsigned int flags) 444 kfree(state->users);
442{ 445 kfree(state->groups);
443 int ignore = 0;
444 if (!(flags & NFS4_ACL_DIR))
445 ignore |= NFS4_ACE_DELETE_CHILD;
446 return MASK_EQUAL(ignore|deny_mask(allow->access_mask, flags),
447 ignore|deny->access_mask) &&
448 allow->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
449 deny->type == NFS4_ACE_ACCESS_DENIED_ACE_TYPE &&
450 allow->flag == deny->flag &&
451 same_who(allow, deny);
452} 446}
453 447
454static inline int 448static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate)
455user_obj_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
456 struct posix_acl *pacl, struct posix_acl_entry **pace,
457 unsigned int flags)
458{ 449{
459 int error = -EINVAL; 450 state->mask.allow |= astate->allow;
460 struct nfs4_ace *ace, *ace2;
461
462 ace = get_next_v4_ace(p, &n4acl->ace_head);
463 if (ace == NULL)
464 goto out;
465 if (ace2type(ace) != ACL_USER_OBJ)
466 goto out;
467 error = write_pace(ace, pacl, pace, ACL_USER_OBJ, flags);
468 if (error < 0)
469 goto out;
470 error = -EINVAL;
471 ace2 = get_next_v4_ace(p, &n4acl->ace_head);
472 if (ace2 == NULL)
473 goto out;
474 if (!complementary_ace_pair(ace, ace2, flags))
475 goto out;
476 error = 0;
477out:
478 return error;
479} 451}
480 452
481static inline int 453/*
482users_from_v4(struct nfs4_acl *n4acl, struct list_head **p, 454 * Certain bits (SYNCHRONIZE, DELETE, WRITE_OWNER, READ/WRITE_NAMED_ATTRS,
483 struct nfs4_ace **mask_ace, 455 * READ_ATTRIBUTES, READ_ACL) are currently unenforceable and don't translate
484 struct posix_acl *pacl, struct posix_acl_entry **pace, 456 * to traditional read/write/execute permissions.
485 unsigned int flags) 457 *
486{ 458 * It's problematic to reject acls that use certain mode bits, because it
487 int error = -EINVAL; 459 * places the burden on users to learn the rules about which bits one
488 struct nfs4_ace *ace, *ace2; 460 * particular server sets, without giving the user a lot of help--we return an
461 * error that could mean any number of different things. To make matters
462 * worse, the problematic bits might be introduced by some application that's
463 * automatically mapping from some other acl model.
464 *
465 * So wherever possible we accept anything, possibly erring on the side of
466 * denying more permissions than necessary.
467 *
468 * However we do reject *explicit* DENY's of a few bits representing
469 * permissions we could never deny:
470 */
489 471
490 ace = get_next_v4_ace(p, &n4acl->ace_head); 472static inline int check_deny(u32 mask, int isowner)
491 if (ace == NULL) 473{
492 goto out; 474 if (mask & (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL))
493 while (ace2type(ace) == ACL_USER) { 475 return -EINVAL;
494 if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) 476 if (!isowner)
495 goto out; 477 return 0;
496 if (*mask_ace && 478 if (mask & (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL))
497 !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask)) 479 return -EINVAL;
498 goto out; 480 return 0;
499 *mask_ace = ace;
500 ace = get_next_v4_ace(p, &n4acl->ace_head);
501 if (ace == NULL)
502 goto out;
503 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
504 goto out;
505 error = write_pace(ace, pacl, pace, ACL_USER, flags);
506 if (error < 0)
507 goto out;
508 error = -EINVAL;
509 ace2 = get_next_v4_ace(p, &n4acl->ace_head);
510 if (ace2 == NULL)
511 goto out;
512 if (!complementary_ace_pair(ace, ace2, flags))
513 goto out;
514 if ((*mask_ace)->flag != ace2->flag ||
515 !same_who(*mask_ace, ace2))
516 goto out;
517 ace = get_next_v4_ace(p, &n4acl->ace_head);
518 if (ace == NULL)
519 goto out;
520 }
521 error = 0;
522out:
523 return error;
524} 481}
525 482
526static inline int 483static struct posix_acl *
527group_obj_and_groups_from_v4(struct nfs4_acl *n4acl, struct list_head **p, 484posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
528 struct nfs4_ace **mask_ace,
529 struct posix_acl *pacl, struct posix_acl_entry **pace,
530 unsigned int flags)
531{ 485{
532 int error = -EINVAL; 486 struct posix_acl_entry *pace;
533 struct nfs4_ace *ace, *ace2; 487 struct posix_acl *pacl;
534 struct ace_container *ac; 488 int nace;
535 struct list_head group_l; 489 int i, error = 0;
536
537 INIT_LIST_HEAD(&group_l);
538 ace = list_entry(*p, struct nfs4_ace, l_ace);
539
540 /* group owner (mask and allow aces) */
541 490
542 if (pacl->a_count != 3) { 491 nace = 4 + state->users->n + state->groups->n;
543 /* then the group owner should be preceded by mask */ 492 pacl = posix_acl_alloc(nace, GFP_KERNEL);
544 if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) 493 if (!pacl)
545 goto out; 494 return ERR_PTR(-ENOMEM);
546 if (*mask_ace &&
547 !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
548 goto out;
549 *mask_ace = ace;
550 ace = get_next_v4_ace(p, &n4acl->ace_head);
551 if (ace == NULL)
552 goto out;
553 495
554 if ((*mask_ace)->flag != ace->flag || !same_who(*mask_ace, ace)) 496 pace = pacl->a_entries;
555 goto out; 497 pace->e_tag = ACL_USER_OBJ;
498 error = check_deny(state->owner.deny, 1);
499 if (error)
500 goto out_err;
501 low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags);
502 pace->e_id = ACL_UNDEFINED_ID;
503
504 for (i=0; i < state->users->n; i++) {
505 pace++;
506 pace->e_tag = ACL_USER;
507 error = check_deny(state->users->aces[i].perms.deny, 0);
508 if (error)
509 goto out_err;
510 low_mode_from_nfs4(state->users->aces[i].perms.allow,
511 &pace->e_perm, flags);
512 pace->e_id = state->users->aces[i].uid;
513 add_to_mask(state, &state->users->aces[i].perms);
556 } 514 }
557 515
558 if (ace2type(ace) != ACL_GROUP_OBJ) 516 pace++;
559 goto out; 517 pace->e_tag = ACL_GROUP_OBJ;
560 518 error = check_deny(state->group.deny, 0);
561 ac = kmalloc(sizeof(*ac), GFP_KERNEL); 519 if (error)
562 error = -ENOMEM; 520 goto out_err;
563 if (ac == NULL) 521 low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags);
564 goto out; 522 pace->e_id = ACL_UNDEFINED_ID;
565 ac->ace = ace; 523 add_to_mask(state, &state->group);
566 list_add_tail(&ac->ace_l, &group_l); 524
567 525 for (i=0; i < state->groups->n; i++) {
568 error = -EINVAL; 526 pace++;
569 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) 527 pace->e_tag = ACL_GROUP;
570 goto out; 528 error = check_deny(state->groups->aces[i].perms.deny, 0);
571 529 if (error)
572 error = write_pace(ace, pacl, pace, ACL_GROUP_OBJ, flags); 530 goto out_err;
573 if (error < 0) 531 low_mode_from_nfs4(state->groups->aces[i].perms.allow,
574 goto out; 532 &pace->e_perm, flags);
575 533 pace->e_id = state->groups->aces[i].uid;
576 error = -EINVAL; 534 add_to_mask(state, &state->groups->aces[i].perms);
577 ace = get_next_v4_ace(p, &n4acl->ace_head); 535 }
578 if (ace == NULL)
579 goto out;
580
581 /* groups (mask and allow aces) */
582
583 while (ace2type(ace) == ACL_GROUP) {
584 if (*mask_ace == NULL)
585 goto out;
586
587 if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE ||
588 !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
589 goto out;
590 *mask_ace = ace;
591 536
592 ace = get_next_v4_ace(p, &n4acl->ace_head); 537 pace++;
593 if (ace == NULL) 538 pace->e_tag = ACL_MASK;
594 goto out; 539 low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags);
595 ac = kmalloc(sizeof(*ac), GFP_KERNEL); 540 pace->e_id = ACL_UNDEFINED_ID;
596 error = -ENOMEM;
597 if (ac == NULL)
598 goto out;
599 error = -EINVAL;
600 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE ||
601 !same_who(ace, *mask_ace))
602 goto out;
603 541
604 ac->ace = ace; 542 pace++;
605 list_add_tail(&ac->ace_l, &group_l); 543 pace->e_tag = ACL_OTHER;
544 error = check_deny(state->other.deny, 0);
545 if (error)
546 goto out_err;
547 low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags);
548 pace->e_id = ACL_UNDEFINED_ID;
606 549
607 error = write_pace(ace, pacl, pace, ACL_GROUP, flags); 550 return pacl;
608 if (error < 0) 551out_err:
609 goto out; 552 posix_acl_release(pacl);
610 error = -EINVAL; 553 return ERR_PTR(error);
611 ace = get_next_v4_ace(p, &n4acl->ace_head); 554}
612 if (ace == NULL)
613 goto out;
614 }
615 555
616 /* group owner (deny ace) */ 556static inline void allow_bits(struct posix_ace_state *astate, u32 mask)
557{
558 /* Allow all bits in the mask not already denied: */
559 astate->allow |= mask & ~astate->deny;
560}
617 561
618 if (ace2type(ace) != ACL_GROUP_OBJ) 562static inline void deny_bits(struct posix_ace_state *astate, u32 mask)
619 goto out; 563{
620 ac = list_entry(group_l.next, struct ace_container, ace_l); 564 /* Deny all bits in the mask not already allowed: */
621 ace2 = ac->ace; 565 astate->deny |= mask & ~astate->allow;
622 if (!complementary_ace_pair(ace2, ace, flags)) 566}
623 goto out;
624 list_del(group_l.next);
625 kfree(ac);
626 567
627 /* groups (deny aces) */ 568static int find_uid(struct posix_acl_state *state, struct posix_ace_state_array *a, uid_t uid)
569{
570 int i;
628 571
629 while (!list_empty(&group_l)) { 572 for (i = 0; i < a->n; i++)
630 ace = get_next_v4_ace(p, &n4acl->ace_head); 573 if (a->aces[i].uid == uid)
631 if (ace == NULL) 574 return i;
632 goto out; 575 /* Not found: */
633 if (ace2type(ace) != ACL_GROUP) 576 a->n++;
634 goto out; 577 a->aces[i].uid = uid;
635 ac = list_entry(group_l.next, struct ace_container, ace_l); 578 a->aces[i].perms.allow = state->everyone.allow;
636 ace2 = ac->ace; 579 a->aces[i].perms.deny = state->everyone.deny;
637 if (!complementary_ace_pair(ace2, ace, flags))
638 goto out;
639 list_del(group_l.next);
640 kfree(ac);
641 }
642 580
643 ace = get_next_v4_ace(p, &n4acl->ace_head); 581 return i;
644 if (ace == NULL)
645 goto out;
646 if (ace2type(ace) != ACL_OTHER)
647 goto out;
648 error = 0;
649out:
650 while (!list_empty(&group_l)) {
651 ac = list_entry(group_l.next, struct ace_container, ace_l);
652 list_del(group_l.next);
653 kfree(ac);
654 }
655 return error;
656} 582}
657 583
658static inline int 584static void deny_bits_array(struct posix_ace_state_array *a, u32 mask)
659mask_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
660 struct nfs4_ace **mask_ace,
661 struct posix_acl *pacl, struct posix_acl_entry **pace,
662 unsigned int flags)
663{ 585{
664 int error = -EINVAL; 586 int i;
665 struct nfs4_ace *ace;
666 587
667 ace = list_entry(*p, struct nfs4_ace, l_ace); 588 for (i=0; i < a->n; i++)
668 if (pacl->a_count != 3) { 589 deny_bits(&a->aces[i].perms, mask);
669 if (*mask_ace == NULL)
670 goto out;
671 (*mask_ace)->access_mask = deny_mask((*mask_ace)->access_mask, flags);
672 write_pace(*mask_ace, pacl, pace, ACL_MASK, flags);
673 }
674 error = 0;
675out:
676 return error;
677} 590}
678 591
679static inline int 592static void allow_bits_array(struct posix_ace_state_array *a, u32 mask)
680other_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
681 struct posix_acl *pacl, struct posix_acl_entry **pace,
682 unsigned int flags)
683{ 593{
684 int error = -EINVAL; 594 int i;
685 struct nfs4_ace *ace, *ace2;
686 595
687 ace = list_entry(*p, struct nfs4_ace, l_ace); 596 for (i=0; i < a->n; i++)
688 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) 597 allow_bits(&a->aces[i].perms, mask);
689 goto out;
690 error = write_pace(ace, pacl, pace, ACL_OTHER, flags);
691 if (error < 0)
692 goto out;
693 error = -EINVAL;
694 ace2 = get_next_v4_ace(p, &n4acl->ace_head);
695 if (ace2 == NULL)
696 goto out;
697 if (!complementary_ace_pair(ace, ace2, flags))
698 goto out;
699 error = 0;
700out:
701 return error;
702} 598}
703 599
704static int 600static void process_one_v4_ace(struct posix_acl_state *state,
705calculate_posix_ace_count(struct nfs4_acl *n4acl) 601 struct nfs4_ace *ace)
706{ 602{
707 if (n4acl->naces == 6) /* owner, owner group, and other only */ 603 u32 mask = ace->access_mask;
708 return 3; 604 int i;
709 else { /* Otherwise there must be a mask entry. */ 605
710 /* Also, the remaining entries are for named users and 606 switch (ace2type(ace)) {
711 * groups, and come in threes (mask, allow, deny): */ 607 case ACL_USER_OBJ:
712 if (n4acl->naces < 7) 608 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
713 return -EINVAL; 609 allow_bits(&state->owner, mask);
714 if ((n4acl->naces - 7) % 3) 610 } else {
715 return -EINVAL; 611 deny_bits(&state->owner, mask);
716 return 4 + (n4acl->naces - 7)/3; 612 }
613 break;
614 case ACL_USER:
615 i = find_uid(state, state->users, ace->who);
616 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
617 allow_bits(&state->users->aces[i].perms, mask);
618 } else {
619 deny_bits(&state->users->aces[i].perms, mask);
620 mask = state->users->aces[i].perms.deny;
621 deny_bits(&state->owner, mask);
622 }
623 break;
624 case ACL_GROUP_OBJ:
625 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
626 allow_bits(&state->group, mask);
627 } else {
628 deny_bits(&state->group, mask);
629 mask = state->group.deny;
630 deny_bits(&state->owner, mask);
631 deny_bits(&state->everyone, mask);
632 deny_bits_array(state->users, mask);
633 deny_bits_array(state->groups, mask);
634 }
635 break;
636 case ACL_GROUP:
637 i = find_uid(state, state->groups, ace->who);
638 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
639 allow_bits(&state->groups->aces[i].perms, mask);
640 } else {
641 deny_bits(&state->groups->aces[i].perms, mask);
642 mask = state->groups->aces[i].perms.deny;
643 deny_bits(&state->owner, mask);
644 deny_bits(&state->group, mask);
645 deny_bits(&state->everyone, mask);
646 deny_bits_array(state->users, mask);
647 deny_bits_array(state->groups, mask);
648 }
649 break;
650 case ACL_OTHER:
651 if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
652 allow_bits(&state->owner, mask);
653 allow_bits(&state->group, mask);
654 allow_bits(&state->other, mask);
655 allow_bits(&state->everyone, mask);
656 allow_bits_array(state->users, mask);
657 allow_bits_array(state->groups, mask);
658 } else {
659 deny_bits(&state->owner, mask);
660 deny_bits(&state->group, mask);
661 deny_bits(&state->other, mask);
662 deny_bits(&state->everyone, mask);
663 deny_bits_array(state->users, mask);
664 deny_bits_array(state->groups, mask);
665 }
717 } 666 }
718} 667}
719 668
720
721static struct posix_acl * 669static struct posix_acl *
722_nfsv4_to_posix_one(struct nfs4_acl *n4acl, unsigned int flags) 670_nfsv4_to_posix_one(struct nfs4_acl *n4acl, unsigned int flags)
723{ 671{
672 struct posix_acl_state state;
724 struct posix_acl *pacl; 673 struct posix_acl *pacl;
725 int error = -EINVAL, nace = 0; 674 struct nfs4_ace *ace;
726 struct list_head *p; 675 int ret;
727 struct nfs4_ace *mask_ace = NULL;
728 struct posix_acl_entry *pace;
729
730 nace = calculate_posix_ace_count(n4acl);
731 if (nace < 0)
732 goto out_err;
733
734 pacl = posix_acl_alloc(nace, GFP_KERNEL);
735 error = -ENOMEM;
736 if (pacl == NULL)
737 goto out_err;
738
739 pace = &pacl->a_entries[0];
740 p = &n4acl->ace_head;
741
742 error = user_obj_from_v4(n4acl, &p, pacl, &pace, flags);
743 if (error)
744 goto out_acl;
745
746 error = users_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags);
747 if (error)
748 goto out_acl;
749 676
750 error = group_obj_and_groups_from_v4(n4acl, &p, &mask_ace, pacl, &pace, 677 ret = init_state(&state, n4acl->naces);
751 flags); 678 if (ret)
752 if (error) 679 return ERR_PTR(ret);
753 goto out_acl;
754 680
755 error = mask_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags); 681 list_for_each_entry(ace, &n4acl->ace_head, l_ace)
756 if (error) 682 process_one_v4_ace(&state, ace);
757 goto out_acl;
758 error = other_from_v4(n4acl, &p, pacl, &pace, flags);
759 if (error)
760 goto out_acl;
761 683
762 error = -EINVAL; 684 pacl = posix_state_to_acl(&state, flags);
763 if (p->next != &n4acl->ace_head)
764 goto out_acl;
765 if (pace != pacl->a_entries + pacl->a_count)
766 goto out_acl;
767 685
768 sort_pacl(pacl); 686 free_state(&state);
769 687
770 return pacl; 688 if (!IS_ERR(pacl))
771out_acl: 689 sort_pacl(pacl);
772 posix_acl_release(pacl);
773out_err:
774 pacl = ERR_PTR(error);
775 return pacl; 690 return pacl;
776} 691}
777 692
@@ -785,22 +700,41 @@ nfs4_acl_split(struct nfs4_acl *acl, struct nfs4_acl *dacl)
785 list_for_each_safe(h, n, &acl->ace_head) { 700 list_for_each_safe(h, n, &acl->ace_head) {
786 ace = list_entry(h, struct nfs4_ace, l_ace); 701 ace = list_entry(h, struct nfs4_ace, l_ace);
787 702
788 if ((ace->flag & NFS4_INHERITANCE_FLAGS) 703 if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
789 != NFS4_INHERITANCE_FLAGS) 704 ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
790 continue; 705 return -EINVAL;
791 706
792 error = nfs4_acl_add_ace(dacl, ace->type, ace->flag, 707 if (ace->flag & ~NFS4_SUPPORTED_FLAGS)
793 ace->access_mask, ace->whotype, ace->who); 708 return -EINVAL;
794 if (error < 0)
795 goto out;
796 709
797 list_del(h); 710 switch (ace->flag & NFS4_INHERITANCE_FLAGS) {
798 kfree(ace); 711 case 0:
799 acl->naces--; 712 /* Leave this ace in the effective acl: */
713 continue;
714 case NFS4_INHERITANCE_FLAGS:
715 /* Add this ace to the default acl and remove it
716 * from the effective acl: */
717 error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
718 ace->access_mask, ace->whotype, ace->who);
719 if (error)
720 return error;
721 list_del(h);
722 kfree(ace);
723 acl->naces--;
724 break;
725 case NFS4_INHERITANCE_FLAGS & ~NFS4_ACE_INHERIT_ONLY_ACE:
726 /* Add this ace to the default, but leave it in
727 * the effective acl as well: */
728 error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
729 ace->access_mask, ace->whotype, ace->who);
730 if (error)
731 return error;
732 break;
733 default:
734 return -EINVAL;
735 }
800 } 736 }
801 737 return 0;
802out:
803 return error;
804} 738}
805 739
806static short 740static short
@@ -930,23 +864,6 @@ nfs4_acl_write_who(int who, char *p)
930 return -1; 864 return -1;
931} 865}
932 866
933static inline int
934match_who(struct nfs4_ace *ace, uid_t owner, gid_t group, uid_t who)
935{
936 switch (ace->whotype) {
937 case NFS4_ACL_WHO_NAMED:
938 return who == ace->who;
939 case NFS4_ACL_WHO_OWNER:
940 return who == owner;
941 case NFS4_ACL_WHO_GROUP:
942 return who == group;
943 case NFS4_ACL_WHO_EVERYONE:
944 return 1;
945 default:
946 return 0;
947 }
948}
949
950EXPORT_SYMBOL(nfs4_acl_new); 867EXPORT_SYMBOL(nfs4_acl_new);
951EXPORT_SYMBOL(nfs4_acl_free); 868EXPORT_SYMBOL(nfs4_acl_free);
952EXPORT_SYMBOL(nfs4_acl_add_ace); 869EXPORT_SYMBOL(nfs4_acl_add_ace);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 15ded7a30a72..8333db12caca 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -646,7 +646,7 @@ nfsd4_write(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_writ
646 *p++ = nfssvc_boot.tv_usec; 646 *p++ = nfssvc_boot.tv_usec;
647 647
648 status = nfsd_write(rqstp, current_fh, filp, write->wr_offset, 648 status = nfsd_write(rqstp, current_fh, filp, write->wr_offset,
649 write->wr_vec, write->wr_vlen, write->wr_buflen, 649 rqstp->rq_vec, write->wr_vlen, write->wr_buflen,
650 &write->wr_how_written); 650 &write->wr_how_written);
651 if (filp) 651 if (filp)
652 fput(filp); 652 fput(filp);
@@ -802,13 +802,29 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
802 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH 802 * SETCLIENTID_CONFIRM, PUTFH and PUTROOTFH
803 * require a valid current filehandle 803 * require a valid current filehandle
804 */ 804 */
805 if ((!current_fh->fh_dentry) && 805 if (!current_fh->fh_dentry) {
806 !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) || 806 if (!((op->opnum == OP_PUTFH) ||
807 (op->opnum == OP_SETCLIENTID) || 807 (op->opnum == OP_PUTROOTFH) ||
808 (op->opnum == OP_SETCLIENTID_CONFIRM) || 808 (op->opnum == OP_SETCLIENTID) ||
809 (op->opnum == OP_RENEW) || (op->opnum == OP_RESTOREFH) || 809 (op->opnum == OP_SETCLIENTID_CONFIRM) ||
810 (op->opnum == OP_RELEASE_LOCKOWNER))) { 810 (op->opnum == OP_RENEW) ||
811 op->status = nfserr_nofilehandle; 811 (op->opnum == OP_RESTOREFH) ||
812 (op->opnum == OP_RELEASE_LOCKOWNER))) {
813 op->status = nfserr_nofilehandle;
814 goto encode_op;
815 }
816 }
817 /* Check must be done at start of each operation, except
818 * for GETATTR and ops not listed as returning NFS4ERR_MOVED
819 */
820 else if (current_fh->fh_export->ex_fslocs.migrated &&
821 !((op->opnum == OP_GETATTR) ||
822 (op->opnum == OP_PUTROOTFH) ||
823 (op->opnum == OP_PUTPUBFH) ||
824 (op->opnum == OP_RENEW) ||
825 (op->opnum == OP_SETCLIENTID) ||
826 (op->opnum == OP_RELEASE_LOCKOWNER))) {
827 op->status = nfserr_moved;
812 goto encode_op; 828 goto encode_op;
813 } 829 }
814 switch (op->opnum) { 830 switch (op->opnum) {
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 5be00436b5b8..41fc241b729a 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -60,6 +60,14 @@
60 60
61#define NFSDDBG_FACILITY NFSDDBG_XDR 61#define NFSDDBG_FACILITY NFSDDBG_XDR
62 62
63/*
64 * As per referral draft, the fsid for a referral MUST be different from the fsid of the containing
65 * directory in order to indicate to the client that a filesystem boundary is present
66 * We use a fixed fsid for a referral
67 */
68#define NFS4_REFERRAL_FSID_MAJOR 0x8000000ULL
69#define NFS4_REFERRAL_FSID_MINOR 0x8000000ULL
70
63static int 71static int
64check_filename(char *str, int len, int err) 72check_filename(char *str, int len, int err)
65{ 73{
@@ -926,26 +934,26 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
926 printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__); 934 printk(KERN_NOTICE "xdr error! (%s:%d)\n", __FILE__, __LINE__);
927 goto xdr_error; 935 goto xdr_error;
928 } 936 }
929 write->wr_vec[0].iov_base = p; 937 argp->rqstp->rq_vec[0].iov_base = p;
930 write->wr_vec[0].iov_len = avail; 938 argp->rqstp->rq_vec[0].iov_len = avail;
931 v = 0; 939 v = 0;
932 len = write->wr_buflen; 940 len = write->wr_buflen;
933 while (len > write->wr_vec[v].iov_len) { 941 while (len > argp->rqstp->rq_vec[v].iov_len) {
934 len -= write->wr_vec[v].iov_len; 942 len -= argp->rqstp->rq_vec[v].iov_len;
935 v++; 943 v++;
936 write->wr_vec[v].iov_base = page_address(argp->pagelist[0]); 944 argp->rqstp->rq_vec[v].iov_base = page_address(argp->pagelist[0]);
937 argp->pagelist++; 945 argp->pagelist++;
938 if (argp->pagelen >= PAGE_SIZE) { 946 if (argp->pagelen >= PAGE_SIZE) {
939 write->wr_vec[v].iov_len = PAGE_SIZE; 947 argp->rqstp->rq_vec[v].iov_len = PAGE_SIZE;
940 argp->pagelen -= PAGE_SIZE; 948 argp->pagelen -= PAGE_SIZE;
941 } else { 949 } else {
942 write->wr_vec[v].iov_len = argp->pagelen; 950 argp->rqstp->rq_vec[v].iov_len = argp->pagelen;
943 argp->pagelen -= len; 951 argp->pagelen -= len;
944 } 952 }
945 } 953 }
946 argp->end = (u32*) (write->wr_vec[v].iov_base + write->wr_vec[v].iov_len); 954 argp->end = (u32*) (argp->rqstp->rq_vec[v].iov_base + argp->rqstp->rq_vec[v].iov_len);
947 argp->p = (u32*) (write->wr_vec[v].iov_base + (XDR_QUADLEN(len) << 2)); 955 argp->p = (u32*) (argp->rqstp->rq_vec[v].iov_base + (XDR_QUADLEN(len) << 2));
948 write->wr_vec[v].iov_len = len; 956 argp->rqstp->rq_vec[v].iov_len = len;
949 write->wr_vlen = v+1; 957 write->wr_vlen = v+1;
950 958
951 DECODE_TAIL; 959 DECODE_TAIL;
@@ -1223,6 +1231,119 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1223 stateowner->so_replay.rp_buflen); \ 1231 stateowner->so_replay.rp_buflen); \
1224 } } while (0); 1232 } } while (0);
1225 1233
1234/* Encode as an array of strings the string given with components
1235 * seperated @sep.
1236 */
1237static int nfsd4_encode_components(char sep, char *components,
1238 u32 **pp, int *buflen)
1239{
1240 u32 *p = *pp;
1241 u32 *countp = p;
1242 int strlen, count=0;
1243 char *str, *end;
1244
1245 dprintk("nfsd4_encode_components(%s)\n", components);
1246 if ((*buflen -= 4) < 0)
1247 return nfserr_resource;
1248 WRITE32(0); /* We will fill this in with @count later */
1249 end = str = components;
1250 while (*end) {
1251 for (; *end && (*end != sep); end++)
1252 ; /* Point to end of component */
1253 strlen = end - str;
1254 if (strlen) {
1255 if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
1256 return nfserr_resource;
1257 WRITE32(strlen);
1258 WRITEMEM(str, strlen);
1259 count++;
1260 }
1261 else
1262 end++;
1263 str = end;
1264 }
1265 *pp = p;
1266 p = countp;
1267 WRITE32(count);
1268 return 0;
1269}
1270
1271/*
1272 * encode a location element of a fs_locations structure
1273 */
1274static int nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
1275 u32 **pp, int *buflen)
1276{
1277 int status;
1278 u32 *p = *pp;
1279
1280 status = nfsd4_encode_components(':', location->hosts, &p, buflen);
1281 if (status)
1282 return status;
1283 status = nfsd4_encode_components('/', location->path, &p, buflen);
1284 if (status)
1285 return status;
1286 *pp = p;
1287 return 0;
1288}
1289
1290/*
1291 * Return the path to an export point in the pseudo filesystem namespace
1292 * Returned string is safe to use as long as the caller holds a reference
1293 * to @exp.
1294 */
1295static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp)
1296{
1297 struct svc_fh tmp_fh;
1298 char *path, *rootpath;
1299 int stat;
1300
1301 fh_init(&tmp_fh, NFS4_FHSIZE);
1302 stat = exp_pseudoroot(rqstp->rq_client, &tmp_fh, &rqstp->rq_chandle);
1303 if (stat)
1304 return ERR_PTR(stat);
1305 rootpath = tmp_fh.fh_export->ex_path;
1306
1307 path = exp->ex_path;
1308
1309 if (strncmp(path, rootpath, strlen(rootpath))) {
1310 printk("nfsd: fs_locations failed;"
1311 "%s is not contained in %s\n", path, rootpath);
1312 return ERR_PTR(-EOPNOTSUPP);
1313 }
1314
1315 return path + strlen(rootpath);
1316}
1317
1318/*
1319 * encode a fs_locations structure
1320 */
1321static int nfsd4_encode_fs_locations(struct svc_rqst *rqstp,
1322 struct svc_export *exp,
1323 u32 **pp, int *buflen)
1324{
1325 int status, i;
1326 u32 *p = *pp;
1327 struct nfsd4_fs_locations *fslocs = &exp->ex_fslocs;
1328 char *root = nfsd4_path(rqstp, exp);
1329
1330 if (IS_ERR(root))
1331 return PTR_ERR(root);
1332 status = nfsd4_encode_components('/', root, &p, buflen);
1333 if (status)
1334 return status;
1335 if ((*buflen -= 4) < 0)
1336 return nfserr_resource;
1337 WRITE32(fslocs->locations_count);
1338 for (i=0; i<fslocs->locations_count; i++) {
1339 status = nfsd4_encode_fs_location4(&fslocs->locations[i],
1340 &p, buflen);
1341 if (status)
1342 return status;
1343 }
1344 *pp = p;
1345 return 0;
1346}
1226 1347
1227static u32 nfs4_ftypes[16] = { 1348static u32 nfs4_ftypes[16] = {
1228 NF4BAD, NF4FIFO, NF4CHR, NF4BAD, 1349 NF4BAD, NF4FIFO, NF4CHR, NF4BAD,
@@ -1272,6 +1393,25 @@ nfsd4_encode_aclname(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
1272 return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen); 1393 return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen);
1273} 1394}
1274 1395
1396#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
1397 FATTR4_WORD0_RDATTR_ERROR)
1398#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
1399
1400static int fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
1401{
1402 /* As per referral draft: */
1403 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
1404 *bmval1 & ~WORD1_ABSENT_FS_ATTRS) {
1405 if (*bmval0 & FATTR4_WORD0_RDATTR_ERROR ||
1406 *bmval0 & FATTR4_WORD0_FS_LOCATIONS)
1407 *rdattr_err = NFSERR_MOVED;
1408 else
1409 return nfserr_moved;
1410 }
1411 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
1412 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
1413 return 0;
1414}
1275 1415
1276/* 1416/*
1277 * Note: @fhp can be NULL; in this case, we might have to compose the filehandle 1417 * Note: @fhp can be NULL; in this case, we might have to compose the filehandle
@@ -1294,6 +1434,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1294 u32 *attrlenp; 1434 u32 *attrlenp;
1295 u32 dummy; 1435 u32 dummy;
1296 u64 dummy64; 1436 u64 dummy64;
1437 u32 rdattr_err = 0;
1297 u32 *p = buffer; 1438 u32 *p = buffer;
1298 int status; 1439 int status;
1299 int aclsupport = 0; 1440 int aclsupport = 0;
@@ -1303,6 +1444,12 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1303 BUG_ON(bmval0 & ~NFSD_SUPPORTED_ATTRS_WORD0); 1444 BUG_ON(bmval0 & ~NFSD_SUPPORTED_ATTRS_WORD0);
1304 BUG_ON(bmval1 & ~NFSD_SUPPORTED_ATTRS_WORD1); 1445 BUG_ON(bmval1 & ~NFSD_SUPPORTED_ATTRS_WORD1);
1305 1446
1447 if (exp->ex_fslocs.migrated) {
1448 status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
1449 if (status)
1450 goto out;
1451 }
1452
1306 status = vfs_getattr(exp->ex_mnt, dentry, &stat); 1453 status = vfs_getattr(exp->ex_mnt, dentry, &stat);
1307 if (status) 1454 if (status)
1308 goto out_nfserr; 1455 goto out_nfserr;
@@ -1334,6 +1481,11 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1334 goto out_nfserr; 1481 goto out_nfserr;
1335 } 1482 }
1336 } 1483 }
1484 if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
1485 if (exp->ex_fslocs.locations == NULL) {
1486 bmval0 &= ~FATTR4_WORD0_FS_LOCATIONS;
1487 }
1488 }
1337 if ((buflen -= 16) < 0) 1489 if ((buflen -= 16) < 0)
1338 goto out_resource; 1490 goto out_resource;
1339 1491
@@ -1343,12 +1495,15 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1343 attrlenp = p++; /* to be backfilled later */ 1495 attrlenp = p++; /* to be backfilled later */
1344 1496
1345 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) { 1497 if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
1498 u32 word0 = NFSD_SUPPORTED_ATTRS_WORD0;
1346 if ((buflen -= 12) < 0) 1499 if ((buflen -= 12) < 0)
1347 goto out_resource; 1500 goto out_resource;
1501 if (!aclsupport)
1502 word0 &= ~FATTR4_WORD0_ACL;
1503 if (!exp->ex_fslocs.locations)
1504 word0 &= ~FATTR4_WORD0_FS_LOCATIONS;
1348 WRITE32(2); 1505 WRITE32(2);
1349 WRITE32(aclsupport ? 1506 WRITE32(word0);
1350 NFSD_SUPPORTED_ATTRS_WORD0 :
1351 NFSD_SUPPORTED_ATTRS_WORD0 & ~FATTR4_WORD0_ACL);
1352 WRITE32(NFSD_SUPPORTED_ATTRS_WORD1); 1507 WRITE32(NFSD_SUPPORTED_ATTRS_WORD1);
1353 } 1508 }
1354 if (bmval0 & FATTR4_WORD0_TYPE) { 1509 if (bmval0 & FATTR4_WORD0_TYPE) {
@@ -1402,7 +1557,10 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1402 if (bmval0 & FATTR4_WORD0_FSID) { 1557 if (bmval0 & FATTR4_WORD0_FSID) {
1403 if ((buflen -= 16) < 0) 1558 if ((buflen -= 16) < 0)
1404 goto out_resource; 1559 goto out_resource;
1405 if (is_fsid(fhp, rqstp->rq_reffh)) { 1560 if (exp->ex_fslocs.migrated) {
1561 WRITE64(NFS4_REFERRAL_FSID_MAJOR);
1562 WRITE64(NFS4_REFERRAL_FSID_MINOR);
1563 } else if (is_fsid(fhp, rqstp->rq_reffh)) {
1406 WRITE64((u64)exp->ex_fsid); 1564 WRITE64((u64)exp->ex_fsid);
1407 WRITE64((u64)0); 1565 WRITE64((u64)0);
1408 } else { 1566 } else {
@@ -1425,7 +1583,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
1425 if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) { 1583 if (bmval0 & FATTR4_WORD0_RDATTR_ERROR) {
1426 if ((buflen -= 4) < 0) 1584 if ((buflen -= 4) < 0)
1427 goto out_resource; 1585 goto out_resource;
1428 WRITE32(0); 1586 WRITE32(rdattr_err);
1429 } 1587 }
1430 if (bmval0 & FATTR4_WORD0_ACL) { 1588 if (bmval0 & FATTR4_WORD0_ACL) {
1431 struct nfs4_ace *ace; 1589 struct nfs4_ace *ace;
@@ -1513,6 +1671,13 @@ out_acl:
1513 goto out_resource; 1671 goto out_resource;
1514 WRITE64((u64) statfs.f_files); 1672 WRITE64((u64) statfs.f_files);
1515 } 1673 }
1674 if (bmval0 & FATTR4_WORD0_FS_LOCATIONS) {
1675 status = nfsd4_encode_fs_locations(rqstp, exp, &p, &buflen);
1676 if (status == nfserr_resource)
1677 goto out_resource;
1678 if (status)
1679 goto out;
1680 }
1516 if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) { 1681 if (bmval0 & FATTR4_WORD0_HOMOGENEOUS) {
1517 if ((buflen -= 4) < 0) 1682 if ((buflen -= 4) < 0)
1518 goto out_resource; 1683 goto out_resource;
@@ -1536,12 +1701,12 @@ out_acl:
1536 if (bmval0 & FATTR4_WORD0_MAXREAD) { 1701 if (bmval0 & FATTR4_WORD0_MAXREAD) {
1537 if ((buflen -= 8) < 0) 1702 if ((buflen -= 8) < 0)
1538 goto out_resource; 1703 goto out_resource;
1539 WRITE64((u64) NFSSVC_MAXBLKSIZE); 1704 WRITE64((u64) svc_max_payload(rqstp));
1540 } 1705 }
1541 if (bmval0 & FATTR4_WORD0_MAXWRITE) { 1706 if (bmval0 & FATTR4_WORD0_MAXWRITE) {
1542 if ((buflen -= 8) < 0) 1707 if ((buflen -= 8) < 0)
1543 goto out_resource; 1708 goto out_resource;
1544 WRITE64((u64) NFSSVC_MAXBLKSIZE); 1709 WRITE64((u64) svc_max_payload(rqstp));
1545 } 1710 }
1546 if (bmval1 & FATTR4_WORD1_MODE) { 1711 if (bmval1 & FATTR4_WORD1_MODE) {
1547 if ((buflen -= 4) < 0) 1712 if ((buflen -= 4) < 0)
@@ -1845,7 +2010,6 @@ nfsd4_encode_getattr(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_ge
1845 nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry, 2010 nfserr = nfsd4_encode_fattr(fhp, fhp->fh_export, fhp->fh_dentry,
1846 resp->p, &buflen, getattr->ga_bmval, 2011 resp->p, &buflen, getattr->ga_bmval,
1847 resp->rqstp); 2012 resp->rqstp);
1848
1849 if (!nfserr) 2013 if (!nfserr)
1850 resp->p += buflen; 2014 resp->p += buflen;
1851 return nfserr; 2015 return nfserr;
@@ -2039,7 +2203,8 @@ nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, int nfserr, struct n
2039} 2203}
2040 2204
2041static int 2205static int
2042nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read *read) 2206nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr,
2207 struct nfsd4_read *read)
2043{ 2208{
2044 u32 eof; 2209 u32 eof;
2045 int v, pn; 2210 int v, pn;
@@ -2054,31 +2219,33 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read
2054 2219
2055 RESERVE_SPACE(8); /* eof flag and byte count */ 2220 RESERVE_SPACE(8); /* eof flag and byte count */
2056 2221
2057 maxcount = NFSSVC_MAXBLKSIZE; 2222 maxcount = svc_max_payload(resp->rqstp);
2058 if (maxcount > read->rd_length) 2223 if (maxcount > read->rd_length)
2059 maxcount = read->rd_length; 2224 maxcount = read->rd_length;
2060 2225
2061 len = maxcount; 2226 len = maxcount;
2062 v = 0; 2227 v = 0;
2063 while (len > 0) { 2228 while (len > 0) {
2064 pn = resp->rqstp->rq_resused; 2229 pn = resp->rqstp->rq_resused++;
2065 svc_take_page(resp->rqstp); 2230 resp->rqstp->rq_vec[v].iov_base =
2066 read->rd_iov[v].iov_base = page_address(resp->rqstp->rq_respages[pn]); 2231 page_address(resp->rqstp->rq_respages[pn]);
2067 read->rd_iov[v].iov_len = len < PAGE_SIZE ? len : PAGE_SIZE; 2232 resp->rqstp->rq_vec[v].iov_len =
2233 len < PAGE_SIZE ? len : PAGE_SIZE;
2068 v++; 2234 v++;
2069 len -= PAGE_SIZE; 2235 len -= PAGE_SIZE;
2070 } 2236 }
2071 read->rd_vlen = v; 2237 read->rd_vlen = v;
2072 2238
2073 nfserr = nfsd_read(read->rd_rqstp, read->rd_fhp, read->rd_filp, 2239 nfserr = nfsd_read(read->rd_rqstp, read->rd_fhp, read->rd_filp,
2074 read->rd_offset, read->rd_iov, read->rd_vlen, 2240 read->rd_offset, resp->rqstp->rq_vec, read->rd_vlen,
2075 &maxcount); 2241 &maxcount);
2076 2242
2077 if (nfserr == nfserr_symlink) 2243 if (nfserr == nfserr_symlink)
2078 nfserr = nfserr_inval; 2244 nfserr = nfserr_inval;
2079 if (nfserr) 2245 if (nfserr)
2080 return nfserr; 2246 return nfserr;
2081 eof = (read->rd_offset + maxcount >= read->rd_fhp->fh_dentry->d_inode->i_size); 2247 eof = (read->rd_offset + maxcount >=
2248 read->rd_fhp->fh_dentry->d_inode->i_size);
2082 2249
2083 WRITE32(eof); 2250 WRITE32(eof);
2084 WRITE32(maxcount); 2251 WRITE32(maxcount);
@@ -2088,7 +2255,6 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_read
2088 resp->xbuf->page_len = maxcount; 2255 resp->xbuf->page_len = maxcount;
2089 2256
2090 /* Use rest of head for padding and remaining ops: */ 2257 /* Use rest of head for padding and remaining ops: */
2091 resp->rqstp->rq_restailpage = 0;
2092 resp->xbuf->tail[0].iov_base = p; 2258 resp->xbuf->tail[0].iov_base = p;
2093 resp->xbuf->tail[0].iov_len = 0; 2259 resp->xbuf->tail[0].iov_len = 0;
2094 if (maxcount&3) { 2260 if (maxcount&3) {
@@ -2113,8 +2279,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_r
2113 if (resp->xbuf->page_len) 2279 if (resp->xbuf->page_len)
2114 return nfserr_resource; 2280 return nfserr_resource;
2115 2281
2116 svc_take_page(resp->rqstp); 2282 page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
2117 page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
2118 2283
2119 maxcount = PAGE_SIZE; 2284 maxcount = PAGE_SIZE;
2120 RESERVE_SPACE(4); 2285 RESERVE_SPACE(4);
@@ -2138,7 +2303,6 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_r
2138 resp->xbuf->page_len = maxcount; 2303 resp->xbuf->page_len = maxcount;
2139 2304
2140 /* Use rest of head for padding and remaining ops: */ 2305 /* Use rest of head for padding and remaining ops: */
2141 resp->rqstp->rq_restailpage = 0;
2142 resp->xbuf->tail[0].iov_base = p; 2306 resp->xbuf->tail[0].iov_base = p;
2143 resp->xbuf->tail[0].iov_len = 0; 2307 resp->xbuf->tail[0].iov_len = 0;
2144 if (maxcount&3) { 2308 if (maxcount&3) {
@@ -2189,8 +2353,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_re
2189 goto err_no_verf; 2353 goto err_no_verf;
2190 } 2354 }
2191 2355
2192 svc_take_page(resp->rqstp); 2356 page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused++]);
2193 page = page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
2194 readdir->common.err = 0; 2357 readdir->common.err = 0;
2195 readdir->buflen = maxcount; 2358 readdir->buflen = maxcount;
2196 readdir->buffer = page; 2359 readdir->buffer = page;
@@ -2215,10 +2378,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, int nfserr, struct nfsd4_re
2215 p = readdir->buffer; 2378 p = readdir->buffer;
2216 *p++ = 0; /* no more entries */ 2379 *p++ = 0; /* no more entries */
2217 *p++ = htonl(readdir->common.err == nfserr_eof); 2380 *p++ = htonl(readdir->common.err == nfserr_eof);
2218 resp->xbuf->page_len = ((char*)p) - (char*)page_address(resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]); 2381 resp->xbuf->page_len = ((char*)p) - (char*)page_address(
2382 resp->rqstp->rq_respages[resp->rqstp->rq_resused-1]);
2219 2383
2220 /* Use rest of head for padding and remaining ops: */ 2384 /* Use rest of head for padding and remaining ops: */
2221 resp->rqstp->rq_restailpage = 0;
2222 resp->xbuf->tail[0].iov_base = tailbase; 2385 resp->xbuf->tail[0].iov_base = tailbase;
2223 resp->xbuf->tail[0].iov_len = 0; 2386 resp->xbuf->tail[0].iov_len = 0;
2224 resp->p = resp->xbuf->tail[0].iov_base; 2387 resp->p = resp->xbuf->tail[0].iov_base;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 5c6a477c20ec..39aed901514b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -57,6 +57,7 @@ enum {
57 NFSD_Pool_Threads, 57 NFSD_Pool_Threads,
58 NFSD_Versions, 58 NFSD_Versions,
59 NFSD_Ports, 59 NFSD_Ports,
60 NFSD_MaxBlkSize,
60 /* 61 /*
61 * The below MUST come last. Otherwise we leave a hole in nfsd_files[] 62 * The below MUST come last. Otherwise we leave a hole in nfsd_files[]
62 * with !CONFIG_NFSD_V4 and simple_fill_super() goes oops 63 * with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
@@ -82,6 +83,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size);
82static ssize_t write_pool_threads(struct file *file, char *buf, size_t size); 83static ssize_t write_pool_threads(struct file *file, char *buf, size_t size);
83static ssize_t write_versions(struct file *file, char *buf, size_t size); 84static ssize_t write_versions(struct file *file, char *buf, size_t size);
84static ssize_t write_ports(struct file *file, char *buf, size_t size); 85static ssize_t write_ports(struct file *file, char *buf, size_t size);
86static ssize_t write_maxblksize(struct file *file, char *buf, size_t size);
85#ifdef CONFIG_NFSD_V4 87#ifdef CONFIG_NFSD_V4
86static ssize_t write_leasetime(struct file *file, char *buf, size_t size); 88static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
87static ssize_t write_recoverydir(struct file *file, char *buf, size_t size); 89static ssize_t write_recoverydir(struct file *file, char *buf, size_t size);
@@ -100,6 +102,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
100 [NFSD_Pool_Threads] = write_pool_threads, 102 [NFSD_Pool_Threads] = write_pool_threads,
101 [NFSD_Versions] = write_versions, 103 [NFSD_Versions] = write_versions,
102 [NFSD_Ports] = write_ports, 104 [NFSD_Ports] = write_ports,
105 [NFSD_MaxBlkSize] = write_maxblksize,
103#ifdef CONFIG_NFSD_V4 106#ifdef CONFIG_NFSD_V4
104 [NFSD_Leasetime] = write_leasetime, 107 [NFSD_Leasetime] = write_leasetime,
105 [NFSD_RecoveryDir] = write_recoverydir, 108 [NFSD_RecoveryDir] = write_recoverydir,
@@ -523,18 +526,20 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
523 err = nfsd_create_serv(); 526 err = nfsd_create_serv();
524 if (!err) { 527 if (!err) {
525 int proto = 0; 528 int proto = 0;
526 err = lockd_up(proto); 529 err = svc_addsock(nfsd_serv, fd, buf, &proto);
527 if (!err) { 530 if (err >= 0) {
528 err = svc_addsock(nfsd_serv, fd, buf, &proto); 531 err = lockd_up(proto);
529 if (err) 532 if (err < 0)
530 lockd_down(); 533 svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf);
531 } 534 }
532 /* Decrease the count, but don't shutdown the 535 /* Decrease the count, but don't shutdown the
533 * the service 536 * the service
534 */ 537 */
538 lock_kernel();
535 nfsd_serv->sv_nrthreads--; 539 nfsd_serv->sv_nrthreads--;
540 unlock_kernel();
536 } 541 }
537 return err; 542 return err < 0 ? err : 0;
538 } 543 }
539 if (buf[0] == '-') { 544 if (buf[0] == '-') {
540 char *toclose = kstrdup(buf+1, GFP_KERNEL); 545 char *toclose = kstrdup(buf+1, GFP_KERNEL);
@@ -545,12 +550,43 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
545 if (nfsd_serv) 550 if (nfsd_serv)
546 len = svc_sock_names(buf, nfsd_serv, toclose); 551 len = svc_sock_names(buf, nfsd_serv, toclose);
547 unlock_kernel(); 552 unlock_kernel();
553 if (len >= 0)
554 lockd_down();
548 kfree(toclose); 555 kfree(toclose);
549 return len; 556 return len;
550 } 557 }
551 return -EINVAL; 558 return -EINVAL;
552} 559}
553 560
561int nfsd_max_blksize;
562
563static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
564{
565 char *mesg = buf;
566 if (size > 0) {
567 int bsize;
568 int rv = get_int(&mesg, &bsize);
569 if (rv)
570 return rv;
571 /* force bsize into allowed range and
572 * required alignment.
573 */
574 if (bsize < 1024)
575 bsize = 1024;
576 if (bsize > NFSSVC_MAXBLKSIZE)
577 bsize = NFSSVC_MAXBLKSIZE;
578 bsize &= ~(1024-1);
579 lock_kernel();
580 if (nfsd_serv && nfsd_serv->sv_nrthreads) {
581 unlock_kernel();
582 return -EBUSY;
583 }
584 nfsd_max_blksize = bsize;
585 unlock_kernel();
586 }
587 return sprintf(buf, "%d\n", nfsd_max_blksize);
588}
589
554#ifdef CONFIG_NFSD_V4 590#ifdef CONFIG_NFSD_V4
555extern time_t nfs4_leasetime(void); 591extern time_t nfs4_leasetime(void);
556 592
@@ -616,6 +652,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
616 [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR}, 652 [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR},
617 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 653 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
618 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 654 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
655 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
619#ifdef CONFIG_NFSD_V4 656#ifdef CONFIG_NFSD_V4
620 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, 657 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
621 [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR}, 658 [NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 06cd0db0f32b..9ee1dab5d44a 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -146,20 +146,20 @@ nfsd_proc_read(struct svc_rqst *rqstp, struct nfsd_readargs *argp,
146 * status, 17 words for fattr, and 1 word for the byte count. 146 * status, 17 words for fattr, and 1 word for the byte count.
147 */ 147 */
148 148
149 if (NFSSVC_MAXBLKSIZE < argp->count) { 149 if (NFSSVC_MAXBLKSIZE_V2 < argp->count) {
150 printk(KERN_NOTICE 150 printk(KERN_NOTICE
151 "oversized read request from %u.%u.%u.%u:%d (%d bytes)\n", 151 "oversized read request from %u.%u.%u.%u:%d (%d bytes)\n",
152 NIPQUAD(rqstp->rq_addr.sin_addr.s_addr), 152 NIPQUAD(rqstp->rq_addr.sin_addr.s_addr),
153 ntohs(rqstp->rq_addr.sin_port), 153 ntohs(rqstp->rq_addr.sin_port),
154 argp->count); 154 argp->count);
155 argp->count = NFSSVC_MAXBLKSIZE; 155 argp->count = NFSSVC_MAXBLKSIZE_V2;
156 } 156 }
157 svc_reserve(rqstp, (19<<2) + argp->count + 4); 157 svc_reserve(rqstp, (19<<2) + argp->count + 4);
158 158
159 resp->count = argp->count; 159 resp->count = argp->count;
160 nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL, 160 nfserr = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh), NULL,
161 argp->offset, 161 argp->offset,
162 argp->vec, argp->vlen, 162 rqstp->rq_vec, argp->vlen,
163 &resp->count); 163 &resp->count);
164 164
165 if (nfserr) return nfserr; 165 if (nfserr) return nfserr;
@@ -185,7 +185,7 @@ nfsd_proc_write(struct svc_rqst *rqstp, struct nfsd_writeargs *argp,
185 185
186 nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), NULL, 186 nfserr = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh), NULL,
187 argp->offset, 187 argp->offset,
188 argp->vec, argp->vlen, 188 rqstp->rq_vec, argp->vlen,
189 argp->len, 189 argp->len,
190 &stable); 190 &stable);
191 return nfsd_return_attrs(nfserr, resp); 191 return nfsd_return_attrs(nfserr, resp);
@@ -225,7 +225,7 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
225 nfserr = nfserr_exist; 225 nfserr = nfserr_exist;
226 if (isdotent(argp->name, argp->len)) 226 if (isdotent(argp->name, argp->len))
227 goto done; 227 goto done;
228 fh_lock(dirfhp); 228 fh_lock_nested(dirfhp, I_MUTEX_PARENT);
229 dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len); 229 dchild = lookup_one_len(argp->name, dirfhp->fh_dentry, argp->len);
230 if (IS_ERR(dchild)) { 230 if (IS_ERR(dchild)) {
231 nfserr = nfserrno(PTR_ERR(dchild)); 231 nfserr = nfserrno(PTR_ERR(dchild));
@@ -553,7 +553,7 @@ static struct svc_procedure nfsd_procedures2[18] = {
553 PROC(none, void, void, none, RC_NOCACHE, ST), 553 PROC(none, void, void, none, RC_NOCACHE, ST),
554 PROC(lookup, diropargs, diropres, fhandle, RC_NOCACHE, ST+FH+AT), 554 PROC(lookup, diropargs, diropres, fhandle, RC_NOCACHE, ST+FH+AT),
555 PROC(readlink, readlinkargs, readlinkres, none, RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4), 555 PROC(readlink, readlinkargs, readlinkres, none, RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4),
556 PROC(read, readargs, readres, fhandle, RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE/4), 556 PROC(read, readargs, readres, fhandle, RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4),
557 PROC(none, void, void, none, RC_NOCACHE, ST), 557 PROC(none, void, void, none, RC_NOCACHE, ST),
558 PROC(write, writeargs, attrstat, fhandle, RC_REPLBUFF, ST+AT), 558 PROC(write, writeargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
559 PROC(create, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT), 559 PROC(create, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 19443056ec30..6fa6340a5fb8 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -198,9 +198,26 @@ int nfsd_create_serv(void)
198 unlock_kernel(); 198 unlock_kernel();
199 return 0; 199 return 0;
200 } 200 }
201 if (nfsd_max_blksize == 0) {
202 /* choose a suitable default */
203 struct sysinfo i;
204 si_meminfo(&i);
205 /* Aim for 1/4096 of memory per thread
206 * This gives 1MB on 4Gig machines
207 * But only uses 32K on 128M machines.
208 * Bottom out at 8K on 32M and smaller.
209 * Of course, this is only a default.
210 */
211 nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
212 i.totalram <<= PAGE_SHIFT - 12;
213 while (nfsd_max_blksize > i.totalram &&
214 nfsd_max_blksize >= 8*1024*2)
215 nfsd_max_blksize /= 2;
216 }
201 217
202 atomic_set(&nfsd_busy, 0); 218 atomic_set(&nfsd_busy, 0);
203 nfsd_serv = svc_create_pooled(&nfsd_program, NFSD_BUFSIZE, 219 nfsd_serv = svc_create_pooled(&nfsd_program,
220 NFSD_BUFSIZE - NFSSVC_MAXBLKSIZE + nfsd_max_blksize,
204 nfsd_last_thread, 221 nfsd_last_thread,
205 nfsd, SIG_NOCLEAN, THIS_MODULE); 222 nfsd, SIG_NOCLEAN, THIS_MODULE);
206 if (nfsd_serv == NULL) 223 if (nfsd_serv == NULL)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 3f14a17eaa6e..1135c0d14557 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -254,19 +254,18 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, u32 *p,
254 len = args->count = ntohl(*p++); 254 len = args->count = ntohl(*p++);
255 p++; /* totalcount - unused */ 255 p++; /* totalcount - unused */
256 256
257 if (len > NFSSVC_MAXBLKSIZE) 257 if (len > NFSSVC_MAXBLKSIZE_V2)
258 len = NFSSVC_MAXBLKSIZE; 258 len = NFSSVC_MAXBLKSIZE_V2;
259 259
260 /* set up somewhere to store response. 260 /* set up somewhere to store response.
261 * We take pages, put them on reslist and include in iovec 261 * We take pages, put them on reslist and include in iovec
262 */ 262 */
263 v=0; 263 v=0;
264 while (len > 0) { 264 while (len > 0) {
265 pn=rqstp->rq_resused; 265 pn = rqstp->rq_resused++;
266 svc_take_page(rqstp); 266 rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_respages[pn]);
267 args->vec[v].iov_base = page_address(rqstp->rq_respages[pn]); 267 rqstp->rq_vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE;
268 args->vec[v].iov_len = len < PAGE_SIZE?len:PAGE_SIZE; 268 len -= rqstp->rq_vec[v].iov_len;
269 len -= args->vec[v].iov_len;
270 v++; 269 v++;
271 } 270 }
272 args->vlen = v; 271 args->vlen = v;
@@ -286,21 +285,21 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, u32 *p,
286 args->offset = ntohl(*p++); /* offset */ 285 args->offset = ntohl(*p++); /* offset */
287 p++; /* totalcount */ 286 p++; /* totalcount */
288 len = args->len = ntohl(*p++); 287 len = args->len = ntohl(*p++);
289 args->vec[0].iov_base = (void*)p; 288 rqstp->rq_vec[0].iov_base = (void*)p;
290 args->vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - 289 rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len -
291 (((void*)p) - rqstp->rq_arg.head[0].iov_base); 290 (((void*)p) - rqstp->rq_arg.head[0].iov_base);
292 if (len > NFSSVC_MAXBLKSIZE) 291 if (len > NFSSVC_MAXBLKSIZE_V2)
293 len = NFSSVC_MAXBLKSIZE; 292 len = NFSSVC_MAXBLKSIZE_V2;
294 v = 0; 293 v = 0;
295 while (len > args->vec[v].iov_len) { 294 while (len > rqstp->rq_vec[v].iov_len) {
296 len -= args->vec[v].iov_len; 295 len -= rqstp->rq_vec[v].iov_len;
297 v++; 296 v++;
298 args->vec[v].iov_base = page_address(rqstp->rq_argpages[v]); 297 rqstp->rq_vec[v].iov_base = page_address(rqstp->rq_pages[v]);
299 args->vec[v].iov_len = PAGE_SIZE; 298 rqstp->rq_vec[v].iov_len = PAGE_SIZE;
300 } 299 }
301 args->vec[v].iov_len = len; 300 rqstp->rq_vec[v].iov_len = len;
302 args->vlen = v+1; 301 args->vlen = v+1;
303 return args->vec[0].iov_len > 0; 302 return rqstp->rq_vec[0].iov_len > 0;
304} 303}
305 304
306int 305int
@@ -333,8 +332,7 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, u32 *p, struct nfsd_readlinka
333{ 332{
334 if (!(p = decode_fh(p, &args->fh))) 333 if (!(p = decode_fh(p, &args->fh)))
335 return 0; 334 return 0;
336 svc_take_page(rqstp); 335 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]);
337 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
338 336
339 return xdr_argsize_check(rqstp, p); 337 return xdr_argsize_check(rqstp, p);
340} 338}
@@ -375,8 +373,7 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, u32 *p,
375 if (args->count > PAGE_SIZE) 373 if (args->count > PAGE_SIZE)
376 args->count = PAGE_SIZE; 374 args->count = PAGE_SIZE;
377 375
378 svc_take_page(rqstp); 376 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused++]);
379 args->buffer = page_address(rqstp->rq_respages[rqstp->rq_resused-1]);
380 377
381 return xdr_argsize_check(rqstp, p); 378 return xdr_argsize_check(rqstp, p);
382} 379}
@@ -416,7 +413,6 @@ nfssvc_encode_readlinkres(struct svc_rqst *rqstp, u32 *p,
416 rqstp->rq_res.page_len = resp->len; 413 rqstp->rq_res.page_len = resp->len;
417 if (resp->len & 3) { 414 if (resp->len & 3) {
418 /* need to pad the tail */ 415 /* need to pad the tail */
419 rqstp->rq_restailpage = 0;
420 rqstp->rq_res.tail[0].iov_base = p; 416 rqstp->rq_res.tail[0].iov_base = p;
421 *p = 0; 417 *p = 0;
422 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); 418 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3);
@@ -436,7 +432,6 @@ nfssvc_encode_readres(struct svc_rqst *rqstp, u32 *p,
436 rqstp->rq_res.page_len = resp->count; 432 rqstp->rq_res.page_len = resp->count;
437 if (resp->count & 3) { 433 if (resp->count & 3) {
438 /* need to pad the tail */ 434 /* need to pad the tail */
439 rqstp->rq_restailpage = 0;
440 rqstp->rq_res.tail[0].iov_base = p; 435 rqstp->rq_res.tail[0].iov_base = p;
441 *p = 0; 436 *p = 0;
442 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); 437 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3);
@@ -463,7 +458,7 @@ nfssvc_encode_statfsres(struct svc_rqst *rqstp, u32 *p,
463{ 458{
464 struct kstatfs *stat = &resp->stats; 459 struct kstatfs *stat = &resp->stats;
465 460
466 *p++ = htonl(NFSSVC_MAXBLKSIZE); /* max transfer size */ 461 *p++ = htonl(NFSSVC_MAXBLKSIZE_V2); /* max transfer size */
467 *p++ = htonl(stat->f_bsize); 462 *p++ = htonl(stat->f_bsize);
468 *p++ = htonl(stat->f_blocks); 463 *p++ = htonl(stat->f_blocks);
469 *p++ = htonl(stat->f_bfree); 464 *p++ = htonl(stat->f_bfree);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 443ebc52e382..1141bd29e4e3 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -54,6 +54,7 @@
54#include <linux/nfsd_idmap.h> 54#include <linux/nfsd_idmap.h>
55#include <linux/security.h> 55#include <linux/security.h>
56#endif /* CONFIG_NFSD_V4 */ 56#endif /* CONFIG_NFSD_V4 */
57#include <linux/jhash.h>
57 58
58#include <asm/uaccess.h> 59#include <asm/uaccess.h>
59 60
@@ -81,10 +82,19 @@ struct raparms {
81 dev_t p_dev; 82 dev_t p_dev;
82 int p_set; 83 int p_set;
83 struct file_ra_state p_ra; 84 struct file_ra_state p_ra;
85 unsigned int p_hindex;
84}; 86};
85 87
88struct raparm_hbucket {
89 struct raparms *pb_head;
90 spinlock_t pb_lock;
91} ____cacheline_aligned_in_smp;
92
86static struct raparms * raparml; 93static struct raparms * raparml;
87static struct raparms * raparm_cache; 94#define RAPARM_HASH_BITS 4
95#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
96#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
97static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
88 98
89/* 99/*
90 * Called from nfsd_lookup and encode_dirent. Check if we have crossed 100 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
@@ -437,13 +447,11 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
437 } else if (error < 0) 447 } else if (error < 0)
438 goto out_nfserr; 448 goto out_nfserr;
439 449
440 if (pacl) { 450 error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
441 error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS); 451 if (error < 0)
442 if (error < 0) 452 goto out_nfserr;
443 goto out_nfserr;
444 }
445 453
446 if (dpacl) { 454 if (S_ISDIR(inode->i_mode)) {
447 error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT); 455 error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
448 if (error < 0) 456 if (error < 0)
449 goto out_nfserr; 457 goto out_nfserr;
@@ -743,16 +751,20 @@ nfsd_sync_dir(struct dentry *dp)
743 * Obtain the readahead parameters for the file 751 * Obtain the readahead parameters for the file
744 * specified by (dev, ino). 752 * specified by (dev, ino).
745 */ 753 */
746static DEFINE_SPINLOCK(ra_lock);
747 754
748static inline struct raparms * 755static inline struct raparms *
749nfsd_get_raparms(dev_t dev, ino_t ino) 756nfsd_get_raparms(dev_t dev, ino_t ino)
750{ 757{
751 struct raparms *ra, **rap, **frap = NULL; 758 struct raparms *ra, **rap, **frap = NULL;
752 int depth = 0; 759 int depth = 0;
760 unsigned int hash;
761 struct raparm_hbucket *rab;
753 762
754 spin_lock(&ra_lock); 763 hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK;
755 for (rap = &raparm_cache; (ra = *rap); rap = &ra->p_next) { 764 rab = &raparm_hash[hash];
765
766 spin_lock(&rab->pb_lock);
767 for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) {
756 if (ra->p_ino == ino && ra->p_dev == dev) 768 if (ra->p_ino == ino && ra->p_dev == dev)
757 goto found; 769 goto found;
758 depth++; 770 depth++;
@@ -761,7 +773,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
761 } 773 }
762 depth = nfsdstats.ra_size*11/10; 774 depth = nfsdstats.ra_size*11/10;
763 if (!frap) { 775 if (!frap) {
764 spin_unlock(&ra_lock); 776 spin_unlock(&rab->pb_lock);
765 return NULL; 777 return NULL;
766 } 778 }
767 rap = frap; 779 rap = frap;
@@ -769,15 +781,16 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
769 ra->p_dev = dev; 781 ra->p_dev = dev;
770 ra->p_ino = ino; 782 ra->p_ino = ino;
771 ra->p_set = 0; 783 ra->p_set = 0;
784 ra->p_hindex = hash;
772found: 785found:
773 if (rap != &raparm_cache) { 786 if (rap != &rab->pb_head) {
774 *rap = ra->p_next; 787 *rap = ra->p_next;
775 ra->p_next = raparm_cache; 788 ra->p_next = rab->pb_head;
776 raparm_cache = ra; 789 rab->pb_head = ra;
777 } 790 }
778 ra->p_count++; 791 ra->p_count++;
779 nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++; 792 nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++;
780 spin_unlock(&ra_lock); 793 spin_unlock(&rab->pb_lock);
781 return ra; 794 return ra;
782} 795}
783 796
@@ -791,22 +804,26 @@ nfsd_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset
791{ 804{
792 unsigned long count = desc->count; 805 unsigned long count = desc->count;
793 struct svc_rqst *rqstp = desc->arg.data; 806 struct svc_rqst *rqstp = desc->arg.data;
807 struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
794 808
795 if (size > count) 809 if (size > count)
796 size = count; 810 size = count;
797 811
798 if (rqstp->rq_res.page_len == 0) { 812 if (rqstp->rq_res.page_len == 0) {
799 get_page(page); 813 get_page(page);
800 rqstp->rq_respages[rqstp->rq_resused++] = page; 814 put_page(*pp);
815 *pp = page;
816 rqstp->rq_resused++;
801 rqstp->rq_res.page_base = offset; 817 rqstp->rq_res.page_base = offset;
802 rqstp->rq_res.page_len = size; 818 rqstp->rq_res.page_len = size;
803 } else if (page != rqstp->rq_respages[rqstp->rq_resused-1]) { 819 } else if (page != pp[-1]) {
804 get_page(page); 820 get_page(page);
805 rqstp->rq_respages[rqstp->rq_resused++] = page; 821 put_page(*pp);
822 *pp = page;
823 rqstp->rq_resused++;
806 rqstp->rq_res.page_len += size; 824 rqstp->rq_res.page_len += size;
807 } else { 825 } else
808 rqstp->rq_res.page_len += size; 826 rqstp->rq_res.page_len += size;
809 }
810 827
811 desc->count = count - size; 828 desc->count = count - size;
812 desc->written += size; 829 desc->written += size;
@@ -837,7 +854,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
837 file->f_ra = ra->p_ra; 854 file->f_ra = ra->p_ra;
838 855
839 if (file->f_op->sendfile && rqstp->rq_sendfile_ok) { 856 if (file->f_op->sendfile && rqstp->rq_sendfile_ok) {
840 svc_pushback_unused_pages(rqstp); 857 rqstp->rq_resused = 1;
841 err = file->f_op->sendfile(file, &offset, *count, 858 err = file->f_op->sendfile(file, &offset, *count,
842 nfsd_read_actor, rqstp); 859 nfsd_read_actor, rqstp);
843 } else { 860 } else {
@@ -849,11 +866,12 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
849 866
850 /* Write back readahead params */ 867 /* Write back readahead params */
851 if (ra) { 868 if (ra) {
852 spin_lock(&ra_lock); 869 struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex];
870 spin_lock(&rab->pb_lock);
853 ra->p_ra = file->f_ra; 871 ra->p_ra = file->f_ra;
854 ra->p_set = 1; 872 ra->p_set = 1;
855 ra->p_count--; 873 ra->p_count--;
856 spin_unlock(&ra_lock); 874 spin_unlock(&rab->pb_lock);
857 } 875 }
858 876
859 if (err >= 0) { 877 if (err >= 0) {
@@ -1829,11 +1847,11 @@ nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
1829void 1847void
1830nfsd_racache_shutdown(void) 1848nfsd_racache_shutdown(void)
1831{ 1849{
1832 if (!raparm_cache) 1850 if (!raparml)
1833 return; 1851 return;
1834 dprintk("nfsd: freeing readahead buffers.\n"); 1852 dprintk("nfsd: freeing readahead buffers.\n");
1835 kfree(raparml); 1853 kfree(raparml);
1836 raparm_cache = raparml = NULL; 1854 raparml = NULL;
1837} 1855}
1838/* 1856/*
1839 * Initialize readahead param cache 1857 * Initialize readahead param cache
@@ -1842,19 +1860,31 @@ int
1842nfsd_racache_init(int cache_size) 1860nfsd_racache_init(int cache_size)
1843{ 1861{
1844 int i; 1862 int i;
1863 int j = 0;
1864 int nperbucket;
1865
1845 1866
1846 if (raparm_cache) 1867 if (raparml)
1847 return 0; 1868 return 0;
1869 if (cache_size < 2*RAPARM_HASH_SIZE)
1870 cache_size = 2*RAPARM_HASH_SIZE;
1848 raparml = kmalloc(sizeof(struct raparms) * cache_size, GFP_KERNEL); 1871 raparml = kmalloc(sizeof(struct raparms) * cache_size, GFP_KERNEL);
1849 1872
1850 if (raparml != NULL) { 1873 if (raparml != NULL) {
1851 dprintk("nfsd: allocating %d readahead buffers.\n", 1874 dprintk("nfsd: allocating %d readahead buffers.\n",
1852 cache_size); 1875 cache_size);
1876 for (i = 0 ; i < RAPARM_HASH_SIZE ; i++) {
1877 raparm_hash[i].pb_head = NULL;
1878 spin_lock_init(&raparm_hash[i].pb_lock);
1879 }
1880 nperbucket = cache_size >> RAPARM_HASH_BITS;
1853 memset(raparml, 0, sizeof(struct raparms) * cache_size); 1881 memset(raparml, 0, sizeof(struct raparms) * cache_size);
1854 for (i = 0; i < cache_size - 1; i++) { 1882 for (i = 0; i < cache_size - 1; i++) {
1855 raparml[i].p_next = raparml + i + 1; 1883 if (i % nperbucket == 0)
1884 raparm_hash[j++].pb_head = raparml + i;
1885 if (i % nperbucket < nperbucket-1)
1886 raparml[i].p_next = raparml + i + 1;
1856 } 1887 }
1857 raparm_cache = raparml;
1858 } else { 1888 } else {
1859 printk(KERN_WARNING 1889 printk(KERN_WARNING
1860 "nfsd: Could not allocate memory read-ahead cache.\n"); 1890 "nfsd: Could not allocate memory read-ahead cache.\n");
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 7e5a2f5ebeb0..9c69bcacad22 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1780,7 +1780,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1780 err = -EDQUOT; 1780 err = -EDQUOT;
1781 goto out_end_trans; 1781 goto out_end_trans;
1782 } 1782 }
1783 if (!dir || !dir->i_nlink) { 1783 if (!dir->i_nlink) {
1784 err = -EPERM; 1784 err = -EPERM;
1785 goto out_bad_inode; 1785 goto out_bad_inode;
1786 } 1786 }
diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h
index 87e5a351d881..88f02a073561 100644
--- a/include/asm-i386/hw_irq.h
+++ b/include/asm-i386/hw_irq.h
@@ -17,8 +17,6 @@
17#include <asm/irq.h> 17#include <asm/irq.h>
18#include <asm/sections.h> 18#include <asm/sections.h>
19 19
20struct hw_interrupt_type;
21
22#define NMI_VECTOR 0x02 20#define NMI_VECTOR 0x02
23 21
24/* 22/*
@@ -30,7 +28,6 @@ struct hw_interrupt_type;
30 28
31extern u8 irq_vector[NR_IRQ_VECTORS]; 29extern u8 irq_vector[NR_IRQ_VECTORS];
32#define IO_APIC_VECTOR(irq) (irq_vector[irq]) 30#define IO_APIC_VECTOR(irq) (irq_vector[irq])
33#define AUTO_ASSIGN -1
34 31
35extern void (*interrupt[NR_IRQS])(void); 32extern void (*interrupt[NR_IRQS])(void);
36 33
diff --git a/include/asm-i386/hypertransport.h b/include/asm-i386/hypertransport.h
new file mode 100644
index 000000000000..c16c6ff4bdd7
--- /dev/null
+++ b/include/asm-i386/hypertransport.h
@@ -0,0 +1,42 @@
1#ifndef ASM_HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H
3
4/*
5 * Constants for x86 Hypertransport Interrupts.
6 */
7
8#define HT_IRQ_LOW_BASE 0xf8000000
9
10#define HT_IRQ_LOW_VECTOR_SHIFT 16
11#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
12#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
13
14#define HT_IRQ_LOW_DEST_ID_SHIFT 8
15#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
16#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
17
18#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
19#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
20
21#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
22#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
23
24
25#define HT_IRQ_LOW_MT_FIXED 0x0000000
26#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
27#define HT_IRQ_LOW_MT_SMI 0x0000008
28#define HT_IRQ_LOW_MT_NMI 0x000000c
29#define HT_IRQ_LOW_MT_INIT 0x0000010
30#define HT_IRQ_LOW_MT_STARTUP 0x0000014
31#define HT_IRQ_LOW_MT_EXTINT 0x0000018
32#define HT_IRQ_LOW_MT_LINT1 0x000008c
33#define HT_IRQ_LOW_MT_LINT0 0x0000098
34
35#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
36
37
38#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
39#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
40#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
41
42#endif /* ASM_HYPERTRANSPORT_H */
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 5d309275a1dc..276ea7e8144a 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -12,46 +12,6 @@
12 12
13#ifdef CONFIG_X86_IO_APIC 13#ifdef CONFIG_X86_IO_APIC
14 14
15#ifdef CONFIG_PCI_MSI
16static inline int use_pci_vector(void) {return 1;}
17static inline void disable_edge_ioapic_vector(unsigned int vector) { }
18static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
19static inline void end_edge_ioapic_vector (unsigned int vector) { }
20#define startup_level_ioapic startup_level_ioapic_vector
21#define shutdown_level_ioapic mask_IO_APIC_vector
22#define enable_level_ioapic unmask_IO_APIC_vector
23#define disable_level_ioapic mask_IO_APIC_vector
24#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
25#define end_level_ioapic end_level_ioapic_vector
26#define set_ioapic_affinity set_ioapic_affinity_vector
27
28#define startup_edge_ioapic startup_edge_ioapic_vector
29#define shutdown_edge_ioapic disable_edge_ioapic_vector
30#define enable_edge_ioapic unmask_IO_APIC_vector
31#define disable_edge_ioapic disable_edge_ioapic_vector
32#define ack_edge_ioapic ack_edge_ioapic_vector
33#define end_edge_ioapic end_edge_ioapic_vector
34#else
35static inline int use_pci_vector(void) {return 0;}
36static inline void disable_edge_ioapic_irq(unsigned int irq) { }
37static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
38static inline void end_edge_ioapic_irq (unsigned int irq) { }
39#define startup_level_ioapic startup_level_ioapic_irq
40#define shutdown_level_ioapic mask_IO_APIC_irq
41#define enable_level_ioapic unmask_IO_APIC_irq
42#define disable_level_ioapic mask_IO_APIC_irq
43#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
44#define end_level_ioapic end_level_ioapic_irq
45#define set_ioapic_affinity set_ioapic_affinity_irq
46
47#define startup_edge_ioapic startup_edge_ioapic_irq
48#define shutdown_edge_ioapic disable_edge_ioapic_irq
49#define enable_edge_ioapic unmask_IO_APIC_irq
50#define disable_edge_ioapic disable_edge_ioapic_irq
51#define ack_edge_ioapic ack_edge_ioapic_irq
52#define end_edge_ioapic end_edge_ioapic_irq
53#endif
54
55#define IO_APIC_BASE(idx) \ 15#define IO_APIC_BASE(idx) \
56 ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \ 16 ((volatile int *)(__fix_to_virt(FIX_IO_APIC_BASE_0 + idx) \
57 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK))) 17 + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK)))
@@ -219,6 +179,4 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
219static inline void disable_ioapic_setup(void) { } 179static inline void disable_ioapic_setup(void) { }
220#endif 180#endif
221 181
222extern int assign_irq_vector(int irq);
223
224#endif 182#endif
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index b330026e6f7f..7f161e760be6 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_IRQ_VECTORS_LIMITS_H 1#ifndef _ASM_IRQ_VECTORS_LIMITS_H
2#define _ASM_IRQ_VECTORS_LIMITS_H 2#define _ASM_IRQ_VECTORS_LIMITS_H
3 3
4#ifdef CONFIG_PCI_MSI
5#define NR_IRQS FIRST_SYSTEM_VECTOR
6#define NR_IRQ_VECTORS NR_IRQS
7#else
8#ifdef CONFIG_X86_IO_APIC 4#ifdef CONFIG_X86_IO_APIC
9#define NR_IRQS 224 5#define NR_IRQS 224
10# if (224 >= 32 * NR_CPUS) 6# if (224 >= 32 * NR_CPUS)
@@ -16,6 +12,5 @@
16#define NR_IRQS 16 12#define NR_IRQS 16
17#define NR_IRQ_VECTORS NR_IRQS 13#define NR_IRQ_VECTORS NR_IRQS
18#endif 14#endif
19#endif
20 15
21#endif /* _ASM_IRQ_VECTORS_LIMITS_H */ 16#endif /* _ASM_IRQ_VECTORS_LIMITS_H */
diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h
deleted file mode 100644
index b11c4b7dfaef..000000000000
--- a/include/asm-i386/msi.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * Copyright (C) 2003-2004 Intel
3 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
4 */
5
6#ifndef ASM_MSI_H
7#define ASM_MSI_H
8
9#include <asm/desc.h>
10#include <mach_apic.h>
11
12#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
13#define MSI_TARGET_CPU_SHIFT 12
14
15extern struct msi_ops msi_apic_ops;
16
17static inline int msi_arch_init(void)
18{
19 msi_register(&msi_apic_ops);
20 return 0;
21}
22
23#endif /* ASM_MSI_H */
diff --git a/include/asm-i386/msidef.h b/include/asm-i386/msidef.h
new file mode 100644
index 000000000000..5b8acddb70fb
--- /dev/null
+++ b/include/asm-i386/msidef.h
@@ -0,0 +1,47 @@
1#ifndef ASM_MSIDEF_H
2#define ASM_MSIDEF_H
3
4/*
5 * Constants for Intel APIC based MSI messages.
6 */
7
8/*
9 * Shifts for MSI data
10 */
11
12#define MSI_DATA_VECTOR_SHIFT 0
13#define MSI_DATA_VECTOR_MASK 0x000000ff
14#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
15
16#define MSI_DATA_DELIVERY_MODE_SHIFT 8
17#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
18#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
19
20#define MSI_DATA_LEVEL_SHIFT 14
21#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
22#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
23
24#define MSI_DATA_TRIGGER_SHIFT 15
25#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
26#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
27
28/*
29 * Shift/mask fields for msi address
30 */
31
32#define MSI_ADDR_BASE_HI 0
33#define MSI_ADDR_BASE_LO 0xfee00000
34
35#define MSI_ADDR_DEST_MODE_SHIFT 2
36#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
37#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
38
39#define MSI_ADDR_REDIRECTION_SHIFT 3
40#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
41#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
42
43#define MSI_ADDR_DEST_ID_SHIFT 12
44#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
45#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
46
47#endif /* ASM_MSIDEF_H */
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h
index 15b545a897a4..90cba967df35 100644
--- a/include/asm-ia64/machvec.h
+++ b/include/asm-ia64/machvec.h
@@ -20,6 +20,7 @@ struct page;
20struct mm_struct; 20struct mm_struct;
21struct pci_bus; 21struct pci_bus;
22struct task_struct; 22struct task_struct;
23struct pci_dev;
23 24
24typedef void ia64_mv_setup_t (char **); 25typedef void ia64_mv_setup_t (char **);
25typedef void ia64_mv_cpu_init_t (void); 26typedef void ia64_mv_cpu_init_t (void);
@@ -75,7 +76,9 @@ typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *);
75typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); 76typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *);
76typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); 77typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *);
77typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); 78typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *);
78typedef int ia64_mv_msi_init_t (void); 79
80typedef int ia64_mv_setup_msi_irq_t (unsigned int irq, struct pci_dev *pdev);
81typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq);
79 82
80static inline void 83static inline void
81machvec_noop (void) 84machvec_noop (void)
@@ -154,7 +157,8 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
154# define platform_readl_relaxed ia64_mv.readl_relaxed 157# define platform_readl_relaxed ia64_mv.readl_relaxed
155# define platform_readq_relaxed ia64_mv.readq_relaxed 158# define platform_readq_relaxed ia64_mv.readq_relaxed
156# define platform_migrate ia64_mv.migrate 159# define platform_migrate ia64_mv.migrate
157# define platform_msi_init ia64_mv.msi_init 160# define platform_setup_msi_irq ia64_mv.setup_msi_irq
161# define platform_teardown_msi_irq ia64_mv.teardown_msi_irq
158# endif 162# endif
159 163
160/* __attribute__((__aligned__(16))) is required to make size of the 164/* __attribute__((__aligned__(16))) is required to make size of the
@@ -204,7 +208,8 @@ struct ia64_machine_vector {
204 ia64_mv_readl_relaxed_t *readl_relaxed; 208 ia64_mv_readl_relaxed_t *readl_relaxed;
205 ia64_mv_readq_relaxed_t *readq_relaxed; 209 ia64_mv_readq_relaxed_t *readq_relaxed;
206 ia64_mv_migrate_t *migrate; 210 ia64_mv_migrate_t *migrate;
207 ia64_mv_msi_init_t *msi_init; 211 ia64_mv_setup_msi_irq_t *setup_msi_irq;
212 ia64_mv_teardown_msi_irq_t *teardown_msi_irq;
208} __attribute__((__aligned__(16))); /* align attrib? see above comment */ 213} __attribute__((__aligned__(16))); /* align attrib? see above comment */
209 214
210#define MACHVEC_INIT(name) \ 215#define MACHVEC_INIT(name) \
@@ -250,7 +255,8 @@ struct ia64_machine_vector {
250 platform_readl_relaxed, \ 255 platform_readl_relaxed, \
251 platform_readq_relaxed, \ 256 platform_readq_relaxed, \
252 platform_migrate, \ 257 platform_migrate, \
253 platform_msi_init, \ 258 platform_setup_msi_irq, \
259 platform_teardown_msi_irq, \
254} 260}
255 261
256extern struct ia64_machine_vector ia64_mv; 262extern struct ia64_machine_vector ia64_mv;
@@ -404,8 +410,11 @@ extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size
404#ifndef platform_migrate 410#ifndef platform_migrate
405# define platform_migrate machvec_noop_task 411# define platform_migrate machvec_noop_task
406#endif 412#endif
407#ifndef platform_msi_init 413#ifndef platform_setup_msi_irq
408# define platform_msi_init ((ia64_mv_msi_init_t*)NULL) 414# define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
415#endif
416#ifndef platform_teardown_msi_irq
417# define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
409#endif 418#endif
410 419
411#endif /* _ASM_IA64_MACHVEC_H */ 420#endif /* _ASM_IA64_MACHVEC_H */
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h
index cf724dc79d8c..c54b165b1c17 100644
--- a/include/asm-ia64/machvec_sn2.h
+++ b/include/asm-ia64/machvec_sn2.h
@@ -67,7 +67,8 @@ extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device;
67extern ia64_mv_dma_mapping_error sn_dma_mapping_error; 67extern ia64_mv_dma_mapping_error sn_dma_mapping_error;
68extern ia64_mv_dma_supported sn_dma_supported; 68extern ia64_mv_dma_supported sn_dma_supported;
69extern ia64_mv_migrate_t sn_migrate; 69extern ia64_mv_migrate_t sn_migrate;
70extern ia64_mv_msi_init_t sn_msi_init; 70extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq;
71extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq;
71 72
72 73
73/* 74/*
@@ -120,9 +121,11 @@ extern ia64_mv_msi_init_t sn_msi_init;
120#define platform_dma_supported sn_dma_supported 121#define platform_dma_supported sn_dma_supported
121#define platform_migrate sn_migrate 122#define platform_migrate sn_migrate
122#ifdef CONFIG_PCI_MSI 123#ifdef CONFIG_PCI_MSI
123#define platform_msi_init sn_msi_init 124#define platform_setup_msi_irq sn_setup_msi_irq
125#define platform_teardown_msi_irq sn_teardown_msi_irq
124#else 126#else
125#define platform_msi_init ((ia64_mv_msi_init_t*)NULL) 127#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL)
128#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL)
126#endif 129#endif
127 130
128#include <asm/sn/io.h> 131#include <asm/sn/io.h>
diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h
deleted file mode 100644
index bb92b0dbde2f..000000000000
--- a/include/asm-ia64/msi.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (C) 2003-2004 Intel
3 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
4 */
5
6#ifndef ASM_MSI_H
7#define ASM_MSI_H
8
9#define NR_VECTORS NR_IRQS
10#define FIRST_DEVICE_VECTOR IA64_FIRST_DEVICE_VECTOR
11#define LAST_DEVICE_VECTOR IA64_LAST_DEVICE_VECTOR
12static inline void set_intr_gate (int nr, void *func) {}
13#define IO_APIC_VECTOR(irq) (irq)
14#define ack_APIC_irq ia64_eoi
15#define MSI_TARGET_CPU_SHIFT 4
16
17extern struct msi_ops msi_apic_ops;
18
19static inline int msi_arch_init(void)
20{
21 if (platform_msi_init)
22 return platform_msi_init();
23
24 /* default ops for most ia64 platforms */
25 msi_register(&msi_apic_ops);
26 return 0;
27}
28
29#endif /* ASM_MSI_H */
diff --git a/include/asm-parisc/agp.h b/include/asm-parisc/agp.h
new file mode 100644
index 000000000000..9f61d4eb6c01
--- /dev/null
+++ b/include/asm-parisc/agp.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_PARISC_AGP_H
2#define _ASM_PARISC_AGP_H
3
4/*
5 * PARISC specific AGP definitions.
6 * Copyright (c) 2006 Kyle McMartin <kyle@parisc-linux.org>
7 *
8 */
9
10#define map_page_into_agp(page) /* nothing */
11#define unmap_page_from_agp(page) /* nothing */
12#define flush_agp_mappings() /* nothing */
13#define flush_agp_cache() mb()
14
15/* Convert a physical address to an address suitable for the GART. */
16#define phys_to_gart(x) (x)
17#define gart_to_phys(x) (x)
18
19/* GATT allocation. Returns/accepts GATT kernel virtual address. */
20#define alloc_gatt_pages(order) \
21 ((char *)__get_free_pages(GFP_KERNEL, (order)))
22#define free_gatt_pages(table, order) \
23 free_pages((unsigned long)(table), (order))
24
25#endif /* _ASM_PARISC_AGP_H */
diff --git a/include/asm-parisc/assembly.h b/include/asm-parisc/assembly.h
index 1a7bfe699e0c..5a1e0e8b1c32 100644
--- a/include/asm-parisc/assembly.h
+++ b/include/asm-parisc/assembly.h
@@ -29,7 +29,8 @@
29#define LDREGX ldd,s 29#define LDREGX ldd,s
30#define LDREGM ldd,mb 30#define LDREGM ldd,mb
31#define STREGM std,ma 31#define STREGM std,ma
32#define SHRREG shrd 32#define SHRREG shrd
33#define SHLREG shld
33#define RP_OFFSET 16 34#define RP_OFFSET 16
34#define FRAME_SIZE 128 35#define FRAME_SIZE 128
35#define CALLEE_REG_FRAME_SIZE 144 36#define CALLEE_REG_FRAME_SIZE 144
@@ -39,7 +40,8 @@
39#define LDREGX ldwx,s 40#define LDREGX ldwx,s
40#define LDREGM ldwm 41#define LDREGM ldwm
41#define STREGM stwm 42#define STREGM stwm
42#define SHRREG shr 43#define SHRREG shr
44#define SHLREG shlw
43#define RP_OFFSET 20 45#define RP_OFFSET 20
44#define FRAME_SIZE 64 46#define FRAME_SIZE 64
45#define CALLEE_REG_FRAME_SIZE 128 47#define CALLEE_REG_FRAME_SIZE 128
diff --git a/include/asm-parisc/cacheflush.h b/include/asm-parisc/cacheflush.h
index 0b459cdfbd6f..2bc41f2e0271 100644
--- a/include/asm-parisc/cacheflush.h
+++ b/include/asm-parisc/cacheflush.h
@@ -191,16 +191,38 @@ flush_anon_page(struct page *page, unsigned long vmaddr)
191} 191}
192#define ARCH_HAS_FLUSH_ANON_PAGE 192#define ARCH_HAS_FLUSH_ANON_PAGE
193 193
194static inline void 194#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
195flush_kernel_dcache_page(struct page *page) 195void flush_kernel_dcache_page_addr(void *addr);
196static inline void flush_kernel_dcache_page(struct page *page)
196{ 197{
197 flush_kernel_dcache_page_asm(page_address(page)); 198 flush_kernel_dcache_page_addr(page_address(page));
198} 199}
199#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
200 200
201#ifdef CONFIG_DEBUG_RODATA 201#ifdef CONFIG_DEBUG_RODATA
202void mark_rodata_ro(void); 202void mark_rodata_ro(void);
203#endif 203#endif
204 204
205#ifdef CONFIG_PA8X00
206/* Only pa8800, pa8900 needs this */
207#define ARCH_HAS_KMAP
208
209void kunmap_parisc(void *addr);
210
211static inline void *kmap(struct page *page)
212{
213 might_sleep();
214 return page_address(page);
215}
216
217#define kunmap(page) kunmap_parisc(page_address(page))
218
219#define kmap_atomic(page, idx) page_address(page)
220
221#define kunmap_atomic(addr, idx) kunmap_parisc(addr)
222
223#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
224#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
225#endif
226
205#endif /* _PARISC_CACHEFLUSH_H */ 227#endif /* _PARISC_CACHEFLUSH_H */
206 228
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index 71b4eeea205a..fe8579023531 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -5,7 +5,7 @@
5 */ 5 */
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/personality.h> 8#include <linux/thread_info.h>
9 9
10#define COMPAT_USER_HZ 100 10#define COMPAT_USER_HZ 100
11 11
@@ -152,7 +152,7 @@ static __inline__ void __user *compat_alloc_user_space(long len)
152 152
153static inline int __is_compat_task(struct task_struct *t) 153static inline int __is_compat_task(struct task_struct *t)
154{ 154{
155 return personality(t->personality) == PER_LINUX32; 155 return test_ti_thread_flag(t->thread_info, TIF_32BIT);
156} 156}
157 157
158static inline int is_compat_task(void) 158static inline int is_compat_task(void)
diff --git a/include/asm-parisc/dma.h b/include/asm-parisc/dma.h
index 9979c3cb3745..da2cf373e31c 100644
--- a/include/asm-parisc/dma.h
+++ b/include/asm-parisc/dma.h
@@ -72,18 +72,13 @@
72#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ 72#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
73#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) 73#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
74 74
75extern spinlock_t dma_spin_lock;
76
77static __inline__ unsigned long claim_dma_lock(void) 75static __inline__ unsigned long claim_dma_lock(void)
78{ 76{
79 unsigned long flags; 77 return 0;
80 spin_lock_irqsave(&dma_spin_lock, flags);
81 return flags;
82} 78}
83 79
84static __inline__ void release_dma_lock(unsigned long flags) 80static __inline__ void release_dma_lock(unsigned long flags)
85{ 81{
86 spin_unlock_irqrestore(&dma_spin_lock, flags);
87} 82}
88 83
89 84
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index 6a332a9f099c..d84bbb283fd1 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -1,6 +1,71 @@
1#ifndef _ASM_FUTEX_H 1#ifndef _ASM_PARISC_FUTEX_H
2#define _ASM_FUTEX_H 2#define _ASM_PARISC_FUTEX_H
3 3
4#include <asm-generic/futex.h> 4#ifdef __KERNEL__
5 5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52/* Non-atomic version */
53static inline int
54futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
55{
56 int err = 0;
57 int uval;
58
59 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
60 return -EFAULT;
61
62 err = get_user(uval, uaddr);
63 if (err) return -EFAULT;
64 if (uval == oldval)
65 err = put_user(newval, uaddr);
66 if (err) return -EFAULT;
67 return uval;
68}
69
70#endif
6#endif 71#endif
diff --git a/include/asm-parisc/io.h b/include/asm-parisc/io.h
index b9eb245b8874..c1963ce19dd2 100644
--- a/include/asm-parisc/io.h
+++ b/include/asm-parisc/io.h
@@ -134,7 +134,7 @@ extern inline void __iomem * ioremap(unsigned long offset, unsigned long size)
134} 134}
135#define ioremap_nocache(off, sz) ioremap((off), (sz)) 135#define ioremap_nocache(off, sz) ioremap((off), (sz))
136 136
137extern void iounmap(void __iomem *addr); 137extern void iounmap(const volatile void __iomem *addr);
138 138
139static inline unsigned char __raw_readb(const volatile void __iomem *addr) 139static inline unsigned char __raw_readb(const volatile void __iomem *addr)
140{ 140{
diff --git a/include/asm-parisc/iosapic.h b/include/asm-parisc/iosapic.h
deleted file mode 100644
index 613390e6805c..000000000000
--- a/include/asm-parisc/iosapic.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2** This file is private to iosapic driver.
3** If stuff needs to be used by another driver, move it to a common file.
4**
5** WARNING: fields most data structures here are ordered to make sure
6** they pack nicely for 64-bit compilation. (ie sizeof(long) == 8)
7*/
8
9
10/*
11** I/O SAPIC init function
12** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
13** Call setup as part of per instance initialization.
14** (ie *not* init_module() function unless only one is present.)
15** fixup_irq is to initialize PCI IRQ line support and
16** virtualize pcidev->irq value. To be called by pci_fixup_bus().
17*/
18extern void *iosapic_register(unsigned long hpa);
19extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
20
21
22#ifdef __IA64__
23/*
24** PA: PIB (Processor Interrupt Block) is handled by Runway bus adapter.
25** and is hardcoded to 0xfeeNNNN0 where NNNN is id_eid field.
26**
27** IA64: PIB is handled by "Local SAPIC" (integrated in the processor).
28*/
29struct local_sapic_info {
30 struct local_sapic_info *lsi_next; /* point to next CPU info */
31 int *lsi_cpu_id; /* point to logical CPU id */
32 unsigned long *lsi_id_eid; /* point to IA-64 CPU id */
33 int *lsi_status; /* point to CPU status */
34 void *lsi_private; /* point to special info */
35};
36
37/*
38** "root" data structure which ties everything together.
39** Should always be able to start with sapic_root and locate
40** the desired information.
41*/
42struct sapic_info {
43 struct sapic_info *si_next; /* info is per cell */
44 int si_cellid; /* cell id */
45 unsigned int si_status; /* status */
46 char *si_pib_base; /* intr blk base address */
47 local_sapic_info_t *si_local_info;
48 io_sapic_info_t *si_io_info;
49 extint_info_t *si_extint_info;/* External Intr info */
50};
51
52#endif /* IA64 */
53
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index 5cae260615a2..399c81981ed5 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -31,7 +31,7 @@ static __inline__ int irq_canonicalize(int irq)
31 return (irq == 2) ? 9 : irq; 31 return (irq == 2) ? 9 : irq;
32} 32}
33 33
34struct hw_interrupt_type; 34struct irq_chip;
35 35
36/* 36/*
37 * Some useful "we don't have to do anything here" handlers. Should 37 * Some useful "we don't have to do anything here" handlers. Should
@@ -39,6 +39,8 @@ struct hw_interrupt_type;
39 */ 39 */
40void no_ack_irq(unsigned int irq); 40void no_ack_irq(unsigned int irq);
41void no_end_irq(unsigned int irq); 41void no_end_irq(unsigned int irq);
42void cpu_ack_irq(unsigned int irq);
43void cpu_end_irq(unsigned int irq);
42 44
43extern int txn_alloc_irq(unsigned int nbits); 45extern int txn_alloc_irq(unsigned int nbits);
44extern int txn_claim_irq(int); 46extern int txn_claim_irq(int);
@@ -46,7 +48,7 @@ extern unsigned int txn_alloc_data(unsigned int);
46extern unsigned long txn_alloc_addr(unsigned int); 48extern unsigned long txn_alloc_addr(unsigned int);
47extern unsigned long txn_affinity_addr(unsigned int irq, int cpu); 49extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
48 50
49extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *); 51extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
50extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest); 52extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
51 53
52/* soft power switch support (power.c) */ 54/* soft power switch support (power.c) */
diff --git a/include/asm-parisc/mckinley.h b/include/asm-parisc/mckinley.h
new file mode 100644
index 000000000000..d1ea6f12915e
--- /dev/null
+++ b/include/asm-parisc/mckinley.h
@@ -0,0 +1,9 @@
1#ifndef ASM_PARISC_MCKINLEY_H
2#define ASM_PARISC_MCKINLEY_H
3#ifdef __KERNEL__
4
5/* declared in arch/parisc/kernel/setup.c */
6extern struct proc_dir_entry * proc_mckinley_root;
7
8#endif /*__KERNEL__*/
9#endif /*ASM_PARISC_MCKINLEY_H*/
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index 57d6d82756dd..3567208191e3 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -26,24 +26,10 @@
26 26
27struct page; 27struct page;
28 28
29extern void purge_kernel_dcache_page(unsigned long); 29void copy_user_page_asm(void *to, void *from);
30extern void copy_user_page_asm(void *to, void *from); 30void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
31extern void clear_user_page_asm(void *page, unsigned long vaddr); 31 struct page *pg);
32 32void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
33static inline void
34copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
35{
36 copy_user_page_asm(vto, vfrom);
37 flush_kernel_dcache_page_asm(vto);
38 /* XXX: ppc flushes icache too, should we? */
39}
40
41static inline void
42clear_user_page(void *page, unsigned long vaddr, struct page *pg)
43{
44 purge_kernel_dcache_page((unsigned long)page);
45 clear_user_page_asm(page, vaddr);
46}
47 33
48/* 34/*
49 * These are used to make use of C type-checking.. 35 * These are used to make use of C type-checking..
diff --git a/include/asm-parisc/param.h b/include/asm-parisc/param.h
index 07cb9b93cfe2..32e03d877858 100644
--- a/include/asm-parisc/param.h
+++ b/include/asm-parisc/param.h
@@ -2,13 +2,9 @@
2#define _ASMPARISC_PARAM_H 2#define _ASMPARISC_PARAM_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5# ifdef CONFIG_PA20 5#define HZ CONFIG_HZ
6# define HZ 1000 /* Faster machines */ 6#define USER_HZ 100 /* some user API use "ticks" */
7# else 7#define CLOCKS_PER_SEC (USER_HZ) /* like times() */
8# define HZ 100 /* Internal kernel timer frequency */
9# endif
10# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
11# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
12#endif 8#endif
13 9
14#ifndef HZ 10#ifndef HZ
diff --git a/include/asm-parisc/parisc-device.h b/include/asm-parisc/parisc-device.h
index 1d247e32a608..e12624d8941d 100644
--- a/include/asm-parisc/parisc-device.h
+++ b/include/asm-parisc/parisc-device.h
@@ -1,3 +1,6 @@
1#ifndef _ASM_PARISC_PARISC_DEVICE_H_
2#define _ASM_PARISC_PARISC_DEVICE_H_
3
1#include <linux/device.h> 4#include <linux/device.h>
2 5
3struct parisc_device { 6struct parisc_device {
@@ -57,3 +60,5 @@ parisc_get_drvdata(struct parisc_device *d)
57} 60}
58 61
59extern struct bus_type parisc_bus_type; 62extern struct bus_type parisc_bus_type;
63
64#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/include/asm-parisc/pci.h b/include/asm-parisc/pci.h
index 8b631f47eb25..7b8ad118d2fe 100644
--- a/include/asm-parisc/pci.h
+++ b/include/asm-parisc/pci.h
@@ -293,4 +293,9 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
293 /* We don't need to penalize isa irq's */ 293 /* We don't need to penalize isa irq's */
294} 294}
295 295
296static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
297{
298 return channel ? 15 : 14;
299}
300
296#endif /* __ASM_PARISC_PCI_H */ 301#endif /* __ASM_PARISC_PCI_H */
diff --git a/include/asm-parisc/prefetch.h b/include/asm-parisc/prefetch.h
new file mode 100644
index 000000000000..5d021726fa33
--- /dev/null
+++ b/include/asm-parisc/prefetch.h
@@ -0,0 +1,39 @@
1/*
2 * include/asm-parisc/prefetch.h
3 *
4 * PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book.
5 * In addition, many implementations do hardware prefetching of both
6 * instructions and data.
7 *
8 * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
9 * to gr0 but not in a way that Linux can use. If the load would cause an
10 * interruption (eg due to prefetching 0), it is suppressed on PA2.0
11 * processors, but not on 7300LC.
12 *
13 */
14
15#ifndef __ASM_PARISC_PREFETCH_H
16#define __ASM_PARISC_PREFETCH_H
17
18#ifndef __ASSEMBLY__
19#ifdef CONFIG_PREFETCH
20
21#define ARCH_HAS_PREFETCH
22extern inline void prefetch(const void *addr)
23{
24 __asm__("ldw 0(%0), %%r0" : : "r" (addr));
25}
26
27/* LDD is a PA2.0 addition. */
28#ifdef CONFIG_PA20
29#define ARCH_HAS_PREFETCHW
30extern inline void prefetchw(const void *addr)
31{
32 __asm__("ldd 0(%0), %%r0" : : "r" (addr));
33}
34#endif /* CONFIG_PA20 */
35
36#endif /* CONFIG_PREFETCH */
37#endif /* __ASSEMBLY__ */
38
39#endif /* __ASM_PARISC_PROCESSOR_H */
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h
index b73626f040da..fd7866dc8c83 100644
--- a/include/asm-parisc/processor.h
+++ b/include/asm-parisc/processor.h
@@ -9,6 +9,8 @@
9#define __ASM_PARISC_PROCESSOR_H 9#define __ASM_PARISC_PROCESSOR_H
10 10
11#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
12#include <asm/prefetch.h> /* lockdep.h needs <linux/prefetch.h> */
13
12#include <linux/threads.h> 14#include <linux/threads.h>
13#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
14 16
@@ -276,7 +278,7 @@ on downward growing arches, it looks like this:
276 */ 278 */
277 279
278#ifdef __LP64__ 280#ifdef __LP64__
279#define USER_WIDE_MODE (personality(current->personality) == PER_LINUX) 281#define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT))
280#else 282#else
281#define USER_WIDE_MODE 0 283#define USER_WIDE_MODE 0
282#endif 284#endif
@@ -328,33 +330,20 @@ extern unsigned long get_wchan(struct task_struct *p);
328#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0]) 330#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
329#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) 331#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
330 332
333#define cpu_relax() barrier()
331 334
332/* 335/* Used as a macro to identify the combined VIPT/PIPT cached
333 * PA 2.0 defines data prefetch instructions on page 6-11 of the Kane book. 336 * CPUs which require a guarantee of coherency (no inequivalent
334 * In addition, many implementations do hardware prefetching of both 337 * aliases with different data, whether clean or not) to operate */
335 * instructions and data. 338static inline int parisc_requires_coherency(void)
336 *
337 * PA7300LC (page 14-4 of the ERS) also implements prefetching by a load
338 * to gr0 but not in a way that Linux can use. If the load would cause an
339 * interruption (eg due to prefetching 0), it is suppressed on PA2.0
340 * processors, but not on 7300LC.
341 */
342#ifdef CONFIG_PREFETCH
343#define ARCH_HAS_PREFETCH
344#define ARCH_HAS_PREFETCHW
345
346extern inline void prefetch(const void *addr)
347{
348 __asm__("ldw 0(%0), %%r0" : : "r" (addr));
349}
350
351extern inline void prefetchw(const void *addr)
352{ 339{
353 __asm__("ldd 0(%0), %%r0" : : "r" (addr)); 340#ifdef CONFIG_PA8X00
354} 341 /* FIXME: also pa8900 - when we see one */
342 return boot_cpu_data.cpu_type == mako;
343#else
344 return 0;
355#endif 345#endif
356 346}
357#define cpu_relax() barrier()
358 347
359#endif /* __ASSEMBLY__ */ 348#endif /* __ASSEMBLY__ */
360 349
diff --git a/include/asm-parisc/ropes.h b/include/asm-parisc/ropes.h
new file mode 100644
index 000000000000..5542dd00472b
--- /dev/null
+++ b/include/asm-parisc/ropes.h
@@ -0,0 +1,322 @@
1#ifndef _ASM_PARISC_ROPES_H_
2#define _ASM_PARISC_ROPES_H_
3
4#include <asm-parisc/parisc-device.h>
5
6#ifdef CONFIG_64BIT
7/* "low end" PA8800 machines use ZX1 chipset: PAT PDC and only run 64-bit */
8#define ZX1_SUPPORT
9#endif
10
11#ifdef CONFIG_PROC_FS
12/* depends on proc fs support. But costs CPU performance */
13#undef SBA_COLLECT_STATS
14#endif
15
16/*
17** The number of pdir entries to "free" before issueing
18** a read to PCOM register to flush out PCOM writes.
19** Interacts with allocation granularity (ie 4 or 8 entries
20** allocated and free'd/purged at a time might make this
21** less interesting).
22*/
23#define DELAYED_RESOURCE_CNT 16
24
25#define MAX_IOC 2 /* per Ike. Pluto/Astro only have 1. */
26#define ROPES_PER_IOC 8 /* per Ike half or Pluto/Astro */
27
28struct ioc {
29 void __iomem *ioc_hpa; /* I/O MMU base address */
30 char *res_map; /* resource map, bit == pdir entry */
31 u64 *pdir_base; /* physical base address */
32 unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
33 unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
34#ifdef ZX1_SUPPORT
35 unsigned long iovp_mask; /* help convert IOVA to IOVP */
36#endif
37 unsigned long *res_hint; /* next avail IOVP - circular search */
38 spinlock_t res_lock;
39 unsigned int res_bitshift; /* from the LEFT! */
40 unsigned int res_size; /* size of resource map in bytes */
41#ifdef SBA_HINT_SUPPORT
42/* FIXME : DMA HINTs not used */
43 unsigned long hint_mask_pdir; /* bits used for DMA hints */
44 unsigned int hint_shift_pdir;
45#endif
46#if DELAYED_RESOURCE_CNT > 0
47 int saved_cnt;
48 struct sba_dma_pair {
49 dma_addr_t iova;
50 size_t size;
51 } saved[DELAYED_RESOURCE_CNT];
52#endif
53
54#ifdef SBA_COLLECT_STATS
55#define SBA_SEARCH_SAMPLE 0x100
56 unsigned long avg_search[SBA_SEARCH_SAMPLE];
57 unsigned long avg_idx; /* current index into avg_search */
58 unsigned long used_pages;
59 unsigned long msingle_calls;
60 unsigned long msingle_pages;
61 unsigned long msg_calls;
62 unsigned long msg_pages;
63 unsigned long usingle_calls;
64 unsigned long usingle_pages;
65 unsigned long usg_calls;
66 unsigned long usg_pages;
67#endif
68 /* STUFF We don't need in performance path */
69 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
70};
71
72struct sba_device {
73 struct sba_device *next; /* list of SBA's in system */
74 struct parisc_device *dev; /* dev found in bus walk */
75 const char *name;
76 void __iomem *sba_hpa; /* base address */
77 spinlock_t sba_lock;
78 unsigned int flags; /* state/functionality enabled */
79 unsigned int hw_rev; /* HW revision of chip */
80
81 struct resource chip_resv; /* MMIO reserved for chip */
82 struct resource iommu_resv; /* MMIO reserved for iommu */
83
84 unsigned int num_ioc; /* number of on-board IOC's */
85 struct ioc ioc[MAX_IOC];
86};
87
88#define ASTRO_RUNWAY_PORT 0x582
89#define IKE_MERCED_PORT 0x803
90#define REO_MERCED_PORT 0x804
91#define REOG_MERCED_PORT 0x805
92#define PLUTO_MCKINLEY_PORT 0x880
93
94static inline int IS_ASTRO(struct parisc_device *d) {
95 return d->id.hversion == ASTRO_RUNWAY_PORT;
96}
97
98static inline int IS_IKE(struct parisc_device *d) {
99 return d->id.hversion == IKE_MERCED_PORT;
100}
101
102static inline int IS_PLUTO(struct parisc_device *d) {
103 return d->id.hversion == PLUTO_MCKINLEY_PORT;
104}
105
106#define PLUTO_IOVA_BASE (1UL*1024*1024*1024) /* 1GB */
107#define PLUTO_IOVA_SIZE (1UL*1024*1024*1024) /* 1GB */
108#define PLUTO_GART_SIZE (PLUTO_IOVA_SIZE / 2)
109
110#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
111
112#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
113
114#define SBA_FUNC_ID 0x0000 /* function id */
115#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */
116
117#define SBA_FUNC_SIZE 4096 /* SBA configuration function reg set */
118
119#define ASTRO_IOC_OFFSET (32 * SBA_FUNC_SIZE)
120#define PLUTO_IOC_OFFSET (1 * SBA_FUNC_SIZE)
121/* Ike's IOC's occupy functions 2 and 3 */
122#define IKE_IOC_OFFSET(p) ((p+2) * SBA_FUNC_SIZE)
123
124#define IOC_CTRL 0x8 /* IOC_CTRL offset */
125#define IOC_CTRL_TC (1 << 0) /* TOC Enable */
126#define IOC_CTRL_CE (1 << 1) /* Coalesce Enable */
127#define IOC_CTRL_DE (1 << 2) /* Dillon Enable */
128#define IOC_CTRL_RM (1 << 8) /* Real Mode */
129#define IOC_CTRL_NC (1 << 9) /* Non Coherent Mode */
130#define IOC_CTRL_D4 (1 << 11) /* Disable 4-byte coalescing */
131#define IOC_CTRL_DD (1 << 13) /* Disable distr. LMMIO range coalescing */
132
133/*
134** Offsets into MBIB (Function 0 on Ike and hopefully Astro)
135** Firmware programs this stuff. Don't touch it.
136*/
137#define LMMIO_DIRECT0_BASE 0x300
138#define LMMIO_DIRECT0_MASK 0x308
139#define LMMIO_DIRECT0_ROUTE 0x310
140
141#define LMMIO_DIST_BASE 0x360
142#define LMMIO_DIST_MASK 0x368
143#define LMMIO_DIST_ROUTE 0x370
144
145#define IOS_DIST_BASE 0x390
146#define IOS_DIST_MASK 0x398
147#define IOS_DIST_ROUTE 0x3A0
148
149#define IOS_DIRECT_BASE 0x3C0
150#define IOS_DIRECT_MASK 0x3C8
151#define IOS_DIRECT_ROUTE 0x3D0
152
153/*
154** Offsets into I/O TLB (Function 2 and 3 on Ike)
155*/
156#define ROPE0_CTL 0x200 /* "regbus pci0" */
157#define ROPE1_CTL 0x208
158#define ROPE2_CTL 0x210
159#define ROPE3_CTL 0x218
160#define ROPE4_CTL 0x220
161#define ROPE5_CTL 0x228
162#define ROPE6_CTL 0x230
163#define ROPE7_CTL 0x238
164
165#define IOC_ROPE0_CFG 0x500 /* pluto only */
166#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
167
168#define HF_ENABLE 0x40
169
170#define IOC_IBASE 0x300 /* IO TLB */
171#define IOC_IMASK 0x308
172#define IOC_PCOM 0x310
173#define IOC_TCNFG 0x318
174#define IOC_PDIR_BASE 0x320
175
176/*
177** IOC supports 4/8/16/64KB page sizes (see TCNFG register)
178** It's safer (avoid memory corruption) to keep DMA page mappings
179** equivalently sized to VM PAGE_SIZE.
180**
181** We really can't avoid generating a new mapping for each
182** page since the Virtual Coherence Index has to be generated
183** and updated for each page.
184**
185** PAGE_SIZE could be greater than IOVP_SIZE. But not the inverse.
186*/
187#define IOVP_SIZE PAGE_SIZE
188#define IOVP_SHIFT PAGE_SHIFT
189#define IOVP_MASK PAGE_MASK
190
191#define SBA_PERF_CFG 0x708 /* Performance Counter stuff */
192#define SBA_PERF_MASK1 0x718
193#define SBA_PERF_MASK2 0x730
194
195/*
196** Offsets into PCI Performance Counters (functions 12 and 13)
197** Controlled by PERF registers in function 2 & 3 respectively.
198*/
199#define SBA_PERF_CNT1 0x200
200#define SBA_PERF_CNT2 0x208
201#define SBA_PERF_CNT3 0x210
202
203/*
204** lba_device: Per instance Elroy data structure
205*/
206struct lba_device {
207 struct pci_hba_data hba;
208
209 spinlock_t lba_lock;
210 void *iosapic_obj;
211
212#ifdef CONFIG_64BIT
213 void __iomem *iop_base; /* PA_VIEW - for IO port accessor funcs */
214#endif
215
216 int flags; /* state/functionality enabled */
217 int hw_rev; /* HW revision of chip */
218};
219
220#define ELROY_HVERS 0x782
221#define MERCURY_HVERS 0x783
222#define QUICKSILVER_HVERS 0x784
223
224static inline int IS_ELROY(struct parisc_device *d) {
225 return (d->id.hversion == ELROY_HVERS);
226}
227
228static inline int IS_MERCURY(struct parisc_device *d) {
229 return (d->id.hversion == MERCURY_HVERS);
230}
231
232static inline int IS_QUICKSILVER(struct parisc_device *d) {
233 return (d->id.hversion == QUICKSILVER_HVERS);
234}
235
236static inline int agp_mode_mercury(void __iomem *hpa) {
237 u64 bus_mode;
238
239 bus_mode = readl(hpa + 0x0620);
240 if (bus_mode & 1)
241 return 1;
242
243 return 0;
244}
245
246/*
247** I/O SAPIC init function
248** Caller knows where an I/O SAPIC is. LBA has an integrated I/O SAPIC.
249** Call setup as part of per instance initialization.
250** (ie *not* init_module() function unless only one is present.)
251** fixup_irq is to initialize PCI IRQ line support and
252** virtualize pcidev->irq value. To be called by pci_fixup_bus().
253*/
254extern void *iosapic_register(unsigned long hpa);
255extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
256
257#define LBA_FUNC_ID 0x0000 /* function id */
258#define LBA_FCLASS 0x0008 /* function class, bist, header, rev... */
259#define LBA_CAPABLE 0x0030 /* capabilities register */
260
261#define LBA_PCI_CFG_ADDR 0x0040 /* poke CFG address here */
262#define LBA_PCI_CFG_DATA 0x0048 /* read or write data here */
263
264#define LBA_PMC_MTLT 0x0050 /* Firmware sets this - read only. */
265#define LBA_FW_SCRATCH 0x0058 /* Firmware writes the PCI bus number here. */
266#define LBA_ERROR_ADDR 0x0070 /* On error, address gets logged here */
267
268#define LBA_ARB_MASK 0x0080 /* bit 0 enable arbitration. PAT/PDC enables */
269#define LBA_ARB_PRI 0x0088 /* firmware sets this. */
270#define LBA_ARB_MODE 0x0090 /* firmware sets this. */
271#define LBA_ARB_MTLT 0x0098 /* firmware sets this. */
272
273#define LBA_MOD_ID 0x0100 /* Module ID. PDC_PAT_CELL reports 4 */
274
275#define LBA_STAT_CTL 0x0108 /* Status & Control */
276#define LBA_BUS_RESET 0x01 /* Deassert PCI Bus Reset Signal */
277#define CLEAR_ERRLOG 0x10 /* "Clear Error Log" cmd */
278#define CLEAR_ERRLOG_ENABLE 0x20 /* "Clear Error Log" Enable */
279#define HF_ENABLE 0x40 /* enable HF mode (default is -1 mode) */
280
281#define LBA_LMMIO_BASE 0x0200 /* < 4GB I/O address range */
282#define LBA_LMMIO_MASK 0x0208
283
284#define LBA_GMMIO_BASE 0x0210 /* > 4GB I/O address range */
285#define LBA_GMMIO_MASK 0x0218
286
287#define LBA_WLMMIO_BASE 0x0220 /* All < 4GB ranges under the same *SBA* */
288#define LBA_WLMMIO_MASK 0x0228
289
290#define LBA_WGMMIO_BASE 0x0230 /* All > 4GB ranges under the same *SBA* */
291#define LBA_WGMMIO_MASK 0x0238
292
293#define LBA_IOS_BASE 0x0240 /* I/O port space for this LBA */
294#define LBA_IOS_MASK 0x0248
295
296#define LBA_ELMMIO_BASE 0x0250 /* Extra LMMIO range */
297#define LBA_ELMMIO_MASK 0x0258
298
299#define LBA_EIOS_BASE 0x0260 /* Extra I/O port space */
300#define LBA_EIOS_MASK 0x0268
301
302#define LBA_GLOBAL_MASK 0x0270 /* Mercury only: Global Address Mask */
303#define LBA_DMA_CTL 0x0278 /* firmware sets this */
304
305#define LBA_IBASE 0x0300 /* SBA DMA support */
306#define LBA_IMASK 0x0308
307
308/* FIXME: ignore DMA Hint stuff until we can measure performance */
309#define LBA_HINT_CFG 0x0310
310#define LBA_HINT_BASE 0x0380 /* 14 registers at every 8 bytes. */
311
312#define LBA_BUS_MODE 0x0620
313
314/* ERROR regs are needed for config cycle kluges */
315#define LBA_ERROR_CONFIG 0x0680
316#define LBA_SMART_MODE 0x20
317#define LBA_ERROR_STATUS 0x0688
318#define LBA_ROPE_CTL 0x06A0
319
320#define LBA_IOSAPIC_BASE 0x800 /* Offset of IRQ logic */
321
322#endif /*_ASM_PARISC_ROPES_H_*/
diff --git a/include/asm-parisc/serial.h b/include/asm-parisc/serial.h
index 82fd820d684f..d7e3cc60dbc3 100644
--- a/include/asm-parisc/serial.h
+++ b/include/asm-parisc/serial.h
@@ -3,20 +3,8 @@
3 */ 3 */
4 4
5/* 5/*
6 * This assumes you have a 7.272727 MHz clock for your UART. 6 * This is used for 16550-compatible UARTs
7 * The documentation implies a 40Mhz clock, and elsewhere a 7Mhz clock
8 * Clarified: 7.2727MHz on LASI. Not yet clarified for DINO
9 */ 7 */
8#define BASE_BAUD ( 1843200 / 16 )
10 9
11#define LASI_BASE_BAUD ( 7272727 / 16 )
12#define BASE_BAUD LASI_BASE_BAUD
13
14/*
15 * We don't use the ISA probing code, so these entries are just to reserve
16 * space. Some example (maximal) configurations:
17 * - 712 w/ additional Lasi & RJ16 ports: 4
18 * - J5k w/ PCI serial cards: 2 + 4 * card ~= 34
19 * A500 w/ PCI serial cards: 5 + 4 * card ~= 17
20 */
21
22#define SERIAL_PORT_DFNS 10#define SERIAL_PORT_DFNS
diff --git a/include/asm-parisc/spinlock.h b/include/asm-parisc/spinlock.h
index e1825530365d..f3d2090a18dc 100644
--- a/include/asm-parisc/spinlock.h
+++ b/include/asm-parisc/spinlock.h
@@ -56,50 +56,79 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
56} 56}
57 57
58/* 58/*
59 * Read-write spinlocks, allowing multiple readers 59 * Read-write spinlocks, allowing multiple readers but only one writer.
60 * but only one writer. 60 * Linux rwlocks are unfair to writers; they can be starved for an indefinite
61 * time by readers. With care, they can also be taken in interrupt context.
62 *
63 * In the PA-RISC implementation, we have a spinlock and a counter.
64 * Readers use the lock to serialise their access to the counter (which
65 * records how many readers currently hold the lock).
66 * Writers hold the spinlock, preventing any readers or other writers from
67 * grabbing the rwlock.
61 */ 68 */
62 69
63#define __raw_read_trylock(lock) generic__raw_read_trylock(lock) 70/* Note that we have to ensure interrupts are disabled in case we're
64 71 * interrupted by some other code that wants to grab the same read lock */
65/* read_lock, read_unlock are pretty straightforward. Of course it somehow
66 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
67
68static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 72static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
69{ 73{
70 __raw_spin_lock(&rw->lock); 74 unsigned long flags;
71 75 local_irq_save(flags);
76 __raw_spin_lock_flags(&rw->lock, flags);
72 rw->counter++; 77 rw->counter++;
73
74 __raw_spin_unlock(&rw->lock); 78 __raw_spin_unlock(&rw->lock);
79 local_irq_restore(flags);
75} 80}
76 81
82/* Note that we have to ensure interrupts are disabled in case we're
83 * interrupted by some other code that wants to grab the same read lock */
77static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 84static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
78{ 85{
79 __raw_spin_lock(&rw->lock); 86 unsigned long flags;
80 87 local_irq_save(flags);
88 __raw_spin_lock_flags(&rw->lock, flags);
81 rw->counter--; 89 rw->counter--;
82
83 __raw_spin_unlock(&rw->lock); 90 __raw_spin_unlock(&rw->lock);
91 local_irq_restore(flags);
84} 92}
85 93
86/* write_lock is less trivial. We optimistically grab the lock and check 94/* Note that we have to ensure interrupts are disabled in case we're
87 * if we surprised any readers. If so we release the lock and wait till 95 * interrupted by some other code that wants to grab the same read lock */
88 * they're all gone before trying again 96static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
89 * 97{
90 * Also note that we don't use the _irqsave / _irqrestore suffixes here. 98 unsigned long flags;
91 * If we're called with interrupts enabled and we've got readers (or other 99 retry:
92 * writers) in interrupt handlers someone fucked up and we'd dead-lock 100 local_irq_save(flags);
93 * sooner or later anyway. prumpf */ 101 if (__raw_spin_trylock(&rw->lock)) {
102 rw->counter++;
103 __raw_spin_unlock(&rw->lock);
104 local_irq_restore(flags);
105 return 1;
106 }
94 107
95static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 108 local_irq_restore(flags);
109 /* If write-locked, we fail to acquire the lock */
110 if (rw->counter < 0)
111 return 0;
112
113 /* Wait until we have a realistic chance at the lock */
114 while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
115 cpu_relax();
116
117 goto retry;
118}
119
120/* Note that we have to ensure interrupts are disabled in case we're
121 * interrupted by some other code that wants to read_trylock() this lock */
122static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
96{ 123{
124 unsigned long flags;
97retry: 125retry:
98 __raw_spin_lock(&rw->lock); 126 local_irq_save(flags);
127 __raw_spin_lock_flags(&rw->lock, flags);
99 128
100 if(rw->counter != 0) { 129 if (rw->counter != 0) {
101 /* this basically never happens */
102 __raw_spin_unlock(&rw->lock); 130 __raw_spin_unlock(&rw->lock);
131 local_irq_restore(flags);
103 132
104 while (rw->counter != 0) 133 while (rw->counter != 0)
105 cpu_relax(); 134 cpu_relax();
@@ -107,31 +136,37 @@ retry:
107 goto retry; 136 goto retry;
108 } 137 }
109 138
110 /* got it. now leave without unlocking */ 139 rw->counter = -1; /* mark as write-locked */
111 rw->counter = -1; /* remember we are locked */ 140 mb();
141 local_irq_restore(flags);
112} 142}
113 143
114/* write_unlock is absolutely trivial - we don't have to wait for anything */ 144static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
115
116static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
117{ 145{
118 rw->counter = 0; 146 rw->counter = 0;
119 __raw_spin_unlock(&rw->lock); 147 __raw_spin_unlock(&rw->lock);
120} 148}
121 149
122static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 150/* Note that we have to ensure interrupts are disabled in case we're
151 * interrupted by some other code that wants to read_trylock() this lock */
152static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
123{ 153{
124 __raw_spin_lock(&rw->lock); 154 unsigned long flags;
125 if (rw->counter != 0) { 155 int result = 0;
126 /* this basically never happens */ 156
127 __raw_spin_unlock(&rw->lock); 157 local_irq_save(flags);
128 158 if (__raw_spin_trylock(&rw->lock)) {
129 return 0; 159 if (rw->counter == 0) {
160 rw->counter = -1;
161 result = 1;
162 } else {
163 /* Read-locked. Oh well. */
164 __raw_spin_unlock(&rw->lock);
165 }
130 } 166 }
167 local_irq_restore(flags);
131 168
132 /* got it. now leave without unlocking */ 169 return result;
133 rw->counter = -1; /* remember we are locked */
134 return 1;
135} 170}
136 171
137/* 172/*
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index 77069df92bf8..1022737f4f34 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -14,34 +14,36 @@
14 14
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16 16
17#ifndef __ASSEMBLY__ 17#include <asm/asm-compat.h>
18 18
19/* firmware feature bitmask values */ 19/* firmware feature bitmask values */
20#define FIRMWARE_MAX_FEATURES 63 20#define FIRMWARE_MAX_FEATURES 63
21 21
22#define FW_FEATURE_PFT (1UL<<0) 22#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
23#define FW_FEATURE_TCE (1UL<<1) 23#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
24#define FW_FEATURE_SPRG0 (1UL<<2) 24#define FW_FEATURE_SPRG0 ASM_CONST(0x0000000000000004)
25#define FW_FEATURE_DABR (1UL<<3) 25#define FW_FEATURE_DABR ASM_CONST(0x0000000000000008)
26#define FW_FEATURE_COPY (1UL<<4) 26#define FW_FEATURE_COPY ASM_CONST(0x0000000000000010)
27#define FW_FEATURE_ASR (1UL<<5) 27#define FW_FEATURE_ASR ASM_CONST(0x0000000000000020)
28#define FW_FEATURE_DEBUG (1UL<<6) 28#define FW_FEATURE_DEBUG ASM_CONST(0x0000000000000040)
29#define FW_FEATURE_TERM (1UL<<7) 29#define FW_FEATURE_TERM ASM_CONST(0x0000000000000080)
30#define FW_FEATURE_PERF (1UL<<8) 30#define FW_FEATURE_PERF ASM_CONST(0x0000000000000100)
31#define FW_FEATURE_DUMP (1UL<<9) 31#define FW_FEATURE_DUMP ASM_CONST(0x0000000000000200)
32#define FW_FEATURE_INTERRUPT (1UL<<10) 32#define FW_FEATURE_INTERRUPT ASM_CONST(0x0000000000000400)
33#define FW_FEATURE_MIGRATE (1UL<<11) 33#define FW_FEATURE_MIGRATE ASM_CONST(0x0000000000000800)
34#define FW_FEATURE_PERFMON (1UL<<12) 34#define FW_FEATURE_PERFMON ASM_CONST(0x0000000000001000)
35#define FW_FEATURE_CRQ (1UL<<13) 35#define FW_FEATURE_CRQ ASM_CONST(0x0000000000002000)
36#define FW_FEATURE_VIO (1UL<<14) 36#define FW_FEATURE_VIO ASM_CONST(0x0000000000004000)
37#define FW_FEATURE_RDMA (1UL<<15) 37#define FW_FEATURE_RDMA ASM_CONST(0x0000000000008000)
38#define FW_FEATURE_LLAN (1UL<<16) 38#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000)
39#define FW_FEATURE_BULK (1UL<<17) 39#define FW_FEATURE_BULK ASM_CONST(0x0000000000020000)
40#define FW_FEATURE_XDABR (1UL<<18) 40#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
41#define FW_FEATURE_MULTITCE (1UL<<19) 41#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
42#define FW_FEATURE_SPLPAR (1UL<<20) 42#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
43#define FW_FEATURE_ISERIES (1UL<<21) 43#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
44#define FW_FEATURE_LPAR (1UL<<22) 44#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
45
46#ifndef __ASSEMBLY__
45 47
46enum { 48enum {
47#ifdef CONFIG_PPC64 49#ifdef CONFIG_PPC64
@@ -94,6 +96,23 @@ extern void machine_check_fwnmi(void);
94/* This is true if we are using the firmware NMI handler (typically LPAR) */ 96/* This is true if we are using the firmware NMI handler (typically LPAR) */
95extern int fwnmi_active; 97extern int fwnmi_active;
96 98
99#else /* __ASSEMBLY__ */
100
101#define BEGIN_FW_FTR_SECTION 96:
102
103#define END_FW_FTR_SECTION(msk, val) \
10497: \
105 .section __fw_ftr_fixup,"a"; \
106 .align 3; \
107 .llong msk; \
108 .llong val; \
109 .llong 96b; \
110 .llong 97b; \
111 .previous
112
113#define END_FW_FTR_SECTION_IFSET(msk) END_FW_FTR_SECTION((msk), (msk))
114#define END_FW_FTR_SECTION_IFCLR(msk) END_FW_FTR_SECTION((msk), 0)
115
97#endif /* __ASSEMBLY__ */ 116#endif /* __ASSEMBLY__ */
98#endif /* __KERNEL__ */ 117#endif /* __KERNEL__ */
99#endif /* __ASM_POWERPC_FIRMWARE_H */ 118#endif /* __ASM_POWERPC_FIRMWARE_H */
diff --git a/include/asm-powerpc/immap_qe.h b/include/asm-powerpc/immap_qe.h
new file mode 100644
index 000000000000..ce12f85fff9b
--- /dev/null
+++ b/include/asm-powerpc/immap_qe.h
@@ -0,0 +1,477 @@
1/*
2 * include/asm-powerpc/immap_qe.h
3 *
4 * QUICC Engine (QE) Internal Memory Map.
5 * The Internal Memory Map for devices with QE on them. This
6 * is the superset of all QE devices (8360, etc.).
7
8 * Copyright (C) 2006. Freescale Semicondutor, Inc. All rights reserved.
9 *
10 * Authors: Shlomi Gridish <gridish@freescale.com>
11 * Li Yang <leoli@freescale.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#ifndef _ASM_POWERPC_IMMAP_QE_H
19#define _ASM_POWERPC_IMMAP_QE_H
20#ifdef __KERNEL__
21
22#include <linux/kernel.h>
23
24#define QE_IMMAP_SIZE (1024 * 1024) /* 1MB from 1MB+IMMR */
25
26/* QE I-RAM */
27struct qe_iram {
28 __be32 iadd; /* I-RAM Address Register */
29 __be32 idata; /* I-RAM Data Register */
30 u8 res0[0x78];
31} __attribute__ ((packed));
32
33/* QE Interrupt Controller */
34struct qe_ic_regs {
35 __be32 qicr;
36 __be32 qivec;
37 __be32 qripnr;
38 __be32 qipnr;
39 __be32 qipxcc;
40 __be32 qipycc;
41 __be32 qipwcc;
42 __be32 qipzcc;
43 __be32 qimr;
44 __be32 qrimr;
45 __be32 qicnr;
46 u8 res0[0x4];
47 __be32 qiprta;
48 __be32 qiprtb;
49 u8 res1[0x4];
50 __be32 qricr;
51 u8 res2[0x20];
52 __be32 qhivec;
53 u8 res3[0x1C];
54} __attribute__ ((packed));
55
56/* Communications Processor */
57struct cp_qe {
58 __be32 cecr; /* QE command register */
59 __be32 ceccr; /* QE controller configuration register */
60 __be32 cecdr; /* QE command data register */
61 u8 res0[0xA];
62 __be16 ceter; /* QE timer event register */
63 u8 res1[0x2];
64 __be16 cetmr; /* QE timers mask register */
65 __be32 cetscr; /* QE time-stamp timer control register */
66 __be32 cetsr1; /* QE time-stamp register 1 */
67 __be32 cetsr2; /* QE time-stamp register 2 */
68 u8 res2[0x8];
69 __be32 cevter; /* QE virtual tasks event register */
70 __be32 cevtmr; /* QE virtual tasks mask register */
71 __be16 cercr; /* QE RAM control register */
72 u8 res3[0x2];
73 u8 res4[0x24];
74 __be16 ceexe1; /* QE external request 1 event register */
75 u8 res5[0x2];
76 __be16 ceexm1; /* QE external request 1 mask register */
77 u8 res6[0x2];
78 __be16 ceexe2; /* QE external request 2 event register */
79 u8 res7[0x2];
80 __be16 ceexm2; /* QE external request 2 mask register */
81 u8 res8[0x2];
82 __be16 ceexe3; /* QE external request 3 event register */
83 u8 res9[0x2];
84 __be16 ceexm3; /* QE external request 3 mask register */
85 u8 res10[0x2];
86 __be16 ceexe4; /* QE external request 4 event register */
87 u8 res11[0x2];
88 __be16 ceexm4; /* QE external request 4 mask register */
89 u8 res12[0x2];
90 u8 res13[0x280];
91} __attribute__ ((packed));
92
93/* QE Multiplexer */
94struct qe_mux {
95 __be32 cmxgcr; /* CMX general clock route register */
96 __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */
97 __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */
98 __be32 cmxsi1syr; /* CMX SI1 SYNC route register */
99 __be32 cmxucr1; /* CMX UCC1, UCC3 clock route register */
100 __be32 cmxucr2; /* CMX UCC5, UCC7 clock route register */
101 __be32 cmxucr3; /* CMX UCC2, UCC4 clock route register */
102 __be32 cmxucr4; /* CMX UCC6, UCC8 clock route register */
103 __be32 cmxupcr; /* CMX UPC clock route register */
104 u8 res0[0x1C];
105} __attribute__ ((packed));
106
107/* QE Timers */
108struct qe_timers {
109 u8 gtcfr1; /* Timer 1 and Timer 2 global config register*/
110 u8 res0[0x3];
111 u8 gtcfr2; /* Timer 3 and timer 4 global config register*/
112 u8 res1[0xB];
113 __be16 gtmdr1; /* Timer 1 mode register */
114 __be16 gtmdr2; /* Timer 2 mode register */
115 __be16 gtrfr1; /* Timer 1 reference register */
116 __be16 gtrfr2; /* Timer 2 reference register */
117 __be16 gtcpr1; /* Timer 1 capture register */
118 __be16 gtcpr2; /* Timer 2 capture register */
119 __be16 gtcnr1; /* Timer 1 counter */
120 __be16 gtcnr2; /* Timer 2 counter */
121 __be16 gtmdr3; /* Timer 3 mode register */
122 __be16 gtmdr4; /* Timer 4 mode register */
123 __be16 gtrfr3; /* Timer 3 reference register */
124 __be16 gtrfr4; /* Timer 4 reference register */
125 __be16 gtcpr3; /* Timer 3 capture register */
126 __be16 gtcpr4; /* Timer 4 capture register */
127 __be16 gtcnr3; /* Timer 3 counter */
128 __be16 gtcnr4; /* Timer 4 counter */
129 __be16 gtevr1; /* Timer 1 event register */
130 __be16 gtevr2; /* Timer 2 event register */
131 __be16 gtevr3; /* Timer 3 event register */
132 __be16 gtevr4; /* Timer 4 event register */
133 __be16 gtps; /* Timer 1 prescale register */
134 u8 res2[0x46];
135} __attribute__ ((packed));
136
137/* BRG */
138struct qe_brg {
139 __be32 brgc1; /* BRG1 configuration register */
140 __be32 brgc2; /* BRG2 configuration register */
141 __be32 brgc3; /* BRG3 configuration register */
142 __be32 brgc4; /* BRG4 configuration register */
143 __be32 brgc5; /* BRG5 configuration register */
144 __be32 brgc6; /* BRG6 configuration register */
145 __be32 brgc7; /* BRG7 configuration register */
146 __be32 brgc8; /* BRG8 configuration register */
147 __be32 brgc9; /* BRG9 configuration register */
148 __be32 brgc10; /* BRG10 configuration register */
149 __be32 brgc11; /* BRG11 configuration register */
150 __be32 brgc12; /* BRG12 configuration register */
151 __be32 brgc13; /* BRG13 configuration register */
152 __be32 brgc14; /* BRG14 configuration register */
153 __be32 brgc15; /* BRG15 configuration register */
154 __be32 brgc16; /* BRG16 configuration register */
155 u8 res0[0x40];
156} __attribute__ ((packed));
157
158/* SPI */
159struct spi {
160 u8 res0[0x20];
161 __be32 spmode; /* SPI mode register */
162 u8 res1[0x2];
163 u8 spie; /* SPI event register */
164 u8 res2[0x1];
165 u8 res3[0x2];
166 u8 spim; /* SPI mask register */
167 u8 res4[0x1];
168 u8 res5[0x1];
169 u8 spcom; /* SPI command register */
170 u8 res6[0x2];
171 __be32 spitd; /* SPI transmit data register (cpu mode) */
172 __be32 spird; /* SPI receive data register (cpu mode) */
173 u8 res7[0x8];
174} __attribute__ ((packed));
175
176/* SI */
177struct si1 {
178 __be16 siamr1; /* SI1 TDMA mode register */
179 __be16 sibmr1; /* SI1 TDMB mode register */
180 __be16 sicmr1; /* SI1 TDMC mode register */
181 __be16 sidmr1; /* SI1 TDMD mode register */
182 u8 siglmr1_h; /* SI1 global mode register high */
183 u8 res0[0x1];
184 u8 sicmdr1_h; /* SI1 command register high */
185 u8 res2[0x1];
186 u8 sistr1_h; /* SI1 status register high */
187 u8 res3[0x1];
188 __be16 sirsr1_h; /* SI1 RAM shadow address register high */
189 u8 sitarc1; /* SI1 RAM counter Tx TDMA */
190 u8 sitbrc1; /* SI1 RAM counter Tx TDMB */
191 u8 sitcrc1; /* SI1 RAM counter Tx TDMC */
192 u8 sitdrc1; /* SI1 RAM counter Tx TDMD */
193 u8 sirarc1; /* SI1 RAM counter Rx TDMA */
194 u8 sirbrc1; /* SI1 RAM counter Rx TDMB */
195 u8 sircrc1; /* SI1 RAM counter Rx TDMC */
196 u8 sirdrc1; /* SI1 RAM counter Rx TDMD */
197 u8 res4[0x8];
198 __be16 siemr1; /* SI1 TDME mode register 16 bits */
199 __be16 sifmr1; /* SI1 TDMF mode register 16 bits */
200 __be16 sigmr1; /* SI1 TDMG mode register 16 bits */
201 __be16 sihmr1; /* SI1 TDMH mode register 16 bits */
202 u8 siglmg1_l; /* SI1 global mode register low 8 bits */
203 u8 res5[0x1];
204 u8 sicmdr1_l; /* SI1 command register low 8 bits */
205 u8 res6[0x1];
206 u8 sistr1_l; /* SI1 status register low 8 bits */
207 u8 res7[0x1];
208 __be16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits*/
209 u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */
210 u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */
211 u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */
212 u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */
213 u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */
214 u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */
215 u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */
216 u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */
217 u8 res8[0x8];
218 __be32 siml1; /* SI1 multiframe limit register */
219 u8 siedm1; /* SI1 extended diagnostic mode register */
220 u8 res9[0xBB];
221} __attribute__ ((packed));
222
223/* SI Routing Tables */
224struct sir {
225 u8 tx[0x400];
226 u8 rx[0x400];
227 u8 res0[0x800];
228} __attribute__ ((packed));
229
230/* USB Controller */
231struct usb_ctlr {
232 u8 usb_usmod;
233 u8 usb_usadr;
234 u8 usb_uscom;
235 u8 res1[1];
236 __be16 usb_usep1;
237 __be16 usb_usep2;
238 __be16 usb_usep3;
239 __be16 usb_usep4;
240 u8 res2[4];
241 __be16 usb_usber;
242 u8 res3[2];
243 __be16 usb_usbmr;
244 u8 res4[1];
245 u8 usb_usbs;
246 __be16 usb_ussft;
247 u8 res5[2];
248 __be16 usb_usfrn;
249 u8 res6[0x22];
250} __attribute__ ((packed));
251
252/* MCC */
253struct mcc {
254 __be32 mcce; /* MCC event register */
255 __be32 mccm; /* MCC mask register */
256 __be32 mccf; /* MCC configuration register */
257 __be32 merl; /* MCC emergency request level register */
258 u8 res0[0xF0];
259} __attribute__ ((packed));
260
261/* QE UCC Slow */
262struct ucc_slow {
263 __be32 gumr_l; /* UCCx general mode register (low) */
264 __be32 gumr_h; /* UCCx general mode register (high) */
265 __be16 upsmr; /* UCCx protocol-specific mode register */
266 u8 res0[0x2];
267 __be16 utodr; /* UCCx transmit on demand register */
268 __be16 udsr; /* UCCx data synchronization register */
269 __be16 ucce; /* UCCx event register */
270 u8 res1[0x2];
271 __be16 uccm; /* UCCx mask register */
272 u8 res2[0x1];
273 u8 uccs; /* UCCx status register */
274 u8 res3[0x24];
275 __be16 utpt;
276 u8 guemr; /* UCC general extended mode register */
277 u8 res4[0x200 - 0x091];
278} __attribute__ ((packed));
279
280/* QE UCC Fast */
281struct ucc_fast {
282 __be32 gumr; /* UCCx general mode register */
283 __be32 upsmr; /* UCCx protocol-specific mode register */
284 __be16 utodr; /* UCCx transmit on demand register */
285 u8 res0[0x2];
286 __be16 udsr; /* UCCx data synchronization register */
287 u8 res1[0x2];
288 __be32 ucce; /* UCCx event register */
289 __be32 uccm; /* UCCx mask register */
290 u8 uccs; /* UCCx status register */
291 u8 res2[0x7];
292 __be32 urfb; /* UCC receive FIFO base */
293 __be16 urfs; /* UCC receive FIFO size */
294 u8 res3[0x2];
295 __be16 urfet; /* UCC receive FIFO emergency threshold */
296 __be16 urfset; /* UCC receive FIFO special emergency
297 threshold */
298 __be32 utfb; /* UCC transmit FIFO base */
299 __be16 utfs; /* UCC transmit FIFO size */
300 u8 res4[0x2];
301 __be16 utfet; /* UCC transmit FIFO emergency threshold */
302 u8 res5[0x2];
303 __be16 utftt; /* UCC transmit FIFO transmit threshold */
304 u8 res6[0x2];
305 __be16 utpt; /* UCC transmit polling timer */
306 u8 res7[0x2];
307 __be32 urtry; /* UCC retry counter register */
308 u8 res8[0x4C];
309 u8 guemr; /* UCC general extended mode register */
310 u8 res9[0x100 - 0x091];
311} __attribute__ ((packed));
312
313/* QE UCC */
314struct ucc_common {
315 u8 res1[0x90];
316 u8 guemr;
317 u8 res2[0x200 - 0x091];
318} __attribute__ ((packed));
319
320struct ucc {
321 union {
322 struct ucc_slow slow;
323 struct ucc_fast fast;
324 struct ucc_common common;
325 };
326} __attribute__ ((packed));
327
328/* MultiPHY UTOPIA POS Controllers (UPC) */
329struct upc {
330 __be32 upgcr; /* UTOPIA/POS general configuration register */
331 __be32 uplpa; /* UTOPIA/POS last PHY address */
332 __be32 uphec; /* ATM HEC register */
333 __be32 upuc; /* UTOPIA/POS UCC configuration */
334 __be32 updc1; /* UTOPIA/POS device 1 configuration */
335 __be32 updc2; /* UTOPIA/POS device 2 configuration */
336 __be32 updc3; /* UTOPIA/POS device 3 configuration */
337 __be32 updc4; /* UTOPIA/POS device 4 configuration */
338 __be32 upstpa; /* UTOPIA/POS STPA threshold */
339 u8 res0[0xC];
340 __be32 updrs1_h; /* UTOPIA/POS device 1 rate select */
341 __be32 updrs1_l; /* UTOPIA/POS device 1 rate select */
342 __be32 updrs2_h; /* UTOPIA/POS device 2 rate select */
343 __be32 updrs2_l; /* UTOPIA/POS device 2 rate select */
344 __be32 updrs3_h; /* UTOPIA/POS device 3 rate select */
345 __be32 updrs3_l; /* UTOPIA/POS device 3 rate select */
346 __be32 updrs4_h; /* UTOPIA/POS device 4 rate select */
347 __be32 updrs4_l; /* UTOPIA/POS device 4 rate select */
348 __be32 updrp1; /* UTOPIA/POS device 1 receive priority low */
349 __be32 updrp2; /* UTOPIA/POS device 2 receive priority low */
350 __be32 updrp3; /* UTOPIA/POS device 3 receive priority low */
351 __be32 updrp4; /* UTOPIA/POS device 4 receive priority low */
352 __be32 upde1; /* UTOPIA/POS device 1 event */
353 __be32 upde2; /* UTOPIA/POS device 2 event */
354 __be32 upde3; /* UTOPIA/POS device 3 event */
355 __be32 upde4; /* UTOPIA/POS device 4 event */
356 __be16 uprp1;
357 __be16 uprp2;
358 __be16 uprp3;
359 __be16 uprp4;
360 u8 res1[0x8];
361 __be16 uptirr1_0; /* Device 1 transmit internal rate 0 */
362 __be16 uptirr1_1; /* Device 1 transmit internal rate 1 */
363 __be16 uptirr1_2; /* Device 1 transmit internal rate 2 */
364 __be16 uptirr1_3; /* Device 1 transmit internal rate 3 */
365 __be16 uptirr2_0; /* Device 2 transmit internal rate 0 */
366 __be16 uptirr2_1; /* Device 2 transmit internal rate 1 */
367 __be16 uptirr2_2; /* Device 2 transmit internal rate 2 */
368 __be16 uptirr2_3; /* Device 2 transmit internal rate 3 */
369 __be16 uptirr3_0; /* Device 3 transmit internal rate 0 */
370 __be16 uptirr3_1; /* Device 3 transmit internal rate 1 */
371 __be16 uptirr3_2; /* Device 3 transmit internal rate 2 */
372 __be16 uptirr3_3; /* Device 3 transmit internal rate 3 */
373 __be16 uptirr4_0; /* Device 4 transmit internal rate 0 */
374 __be16 uptirr4_1; /* Device 4 transmit internal rate 1 */
375 __be16 uptirr4_2; /* Device 4 transmit internal rate 2 */
376 __be16 uptirr4_3; /* Device 4 transmit internal rate 3 */
377 __be32 uper1; /* Device 1 port enable register */
378 __be32 uper2; /* Device 2 port enable register */
379 __be32 uper3; /* Device 3 port enable register */
380 __be32 uper4; /* Device 4 port enable register */
381 u8 res2[0x150];
382} __attribute__ ((packed));
383
384/* SDMA */
385struct sdma {
386 __be32 sdsr; /* Serial DMA status register */
387 __be32 sdmr; /* Serial DMA mode register */
388 __be32 sdtr1; /* SDMA system bus threshold register */
389 __be32 sdtr2; /* SDMA secondary bus threshold register */
390 __be32 sdhy1; /* SDMA system bus hysteresis register */
391 __be32 sdhy2; /* SDMA secondary bus hysteresis register */
392 __be32 sdta1; /* SDMA system bus address register */
393 __be32 sdta2; /* SDMA secondary bus address register */
394 __be32 sdtm1; /* SDMA system bus MSNUM register */
395 __be32 sdtm2; /* SDMA secondary bus MSNUM register */
396 u8 res0[0x10];
397 __be32 sdaqr; /* SDMA address bus qualify register */
398 __be32 sdaqmr; /* SDMA address bus qualify mask register */
399 u8 res1[0x4];
400 __be32 sdebcr; /* SDMA CAM entries base register */
401 u8 res2[0x38];
402} __attribute__ ((packed));
403
404/* Debug Space */
405struct dbg {
406 __be32 bpdcr; /* Breakpoint debug command register */
407 __be32 bpdsr; /* Breakpoint debug status register */
408 __be32 bpdmr; /* Breakpoint debug mask register */
409 __be32 bprmrr0; /* Breakpoint request mode risc register 0 */
410 __be32 bprmrr1; /* Breakpoint request mode risc register 1 */
411 u8 res0[0x8];
412 __be32 bprmtr0; /* Breakpoint request mode trb register 0 */
413 __be32 bprmtr1; /* Breakpoint request mode trb register 1 */
414 u8 res1[0x8];
415 __be32 bprmir; /* Breakpoint request mode immediate register */
416 __be32 bprmsr; /* Breakpoint request mode serial register */
417 __be32 bpemr; /* Breakpoint exit mode register */
418 u8 res2[0x48];
419} __attribute__ ((packed));
420
421/* RISC Special Registers (Trap and Breakpoint) */
422struct rsp {
423 u8 fixme[0x100];
424} __attribute__ ((packed));
425
426struct qe_immap {
427 struct qe_iram iram; /* I-RAM */
428 struct qe_ic_regs ic; /* Interrupt Controller */
429 struct cp_qe cp; /* Communications Processor */
430 struct qe_mux qmx; /* QE Multiplexer */
431 struct qe_timers qet; /* QE Timers */
432 struct spi spi[0x2]; /* spi */
433 struct mcc mcc; /* mcc */
434 struct qe_brg brg; /* brg */
435 struct usb_ctlr usb; /* USB */
436 struct si1 si1; /* SI */
437 u8 res11[0x800];
438 struct sir sir; /* SI Routing Tables */
439 struct ucc ucc1; /* ucc1 */
440 struct ucc ucc3; /* ucc3 */
441 struct ucc ucc5; /* ucc5 */
442 struct ucc ucc7; /* ucc7 */
443 u8 res12[0x600];
444 struct upc upc1; /* MultiPHY UTOPIA POS Ctrlr 1*/
445 struct ucc ucc2; /* ucc2 */
446 struct ucc ucc4; /* ucc4 */
447 struct ucc ucc6; /* ucc6 */
448 struct ucc ucc8; /* ucc8 */
449 u8 res13[0x600];
450 struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/
451 struct sdma sdma; /* SDMA */
452 struct dbg dbg; /* Debug Space */
453 struct rsp rsp[0x2]; /* RISC Special Registers
454 (Trap and Breakpoint) */
455 u8 res14[0x300];
456 u8 res15[0x3A00];
457 u8 res16[0x8000]; /* 0x108000 - 0x110000 */
458 u8 muram[0xC000]; /* 0x110000 - 0x11C000
459 Multi-user RAM */
460 u8 res17[0x24000]; /* 0x11C000 - 0x140000 */
461 u8 res18[0xC0000]; /* 0x140000 - 0x200000 */
462} __attribute__ ((packed));
463
464extern struct qe_immap *qe_immr;
465extern phys_addr_t get_qe_base(void);
466
467static inline unsigned long immrbar_virt_to_phys(volatile void * address)
468{
469 if ( ((u32)address >= (u32)qe_immr) &&
470 ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
471 return (unsigned long)(address - (u32)qe_immr +
472 (u32)get_qe_base());
473 return (unsigned long)virt_to_phys(address);
474}
475
476#endif /* __KERNEL__ */
477#endif /* _ASM_POWERPC_IMMAP_QE_H */
diff --git a/include/asm-powerpc/qe.h b/include/asm-powerpc/qe.h
new file mode 100644
index 000000000000..a62168ec535f
--- /dev/null
+++ b/include/asm-powerpc/qe.h
@@ -0,0 +1,457 @@
1/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * QUICC Engine (QE) external definitions and structure.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#ifndef _ASM_POWERPC_QE_H
16#define _ASM_POWERPC_QE_H
17#ifdef __KERNEL__
18
19#include <asm/immap_qe.h>
20
21#define QE_NUM_OF_SNUM 28
22#define QE_NUM_OF_BRGS 16
23#define QE_NUM_OF_PORTS 1024
24
25/* Memory partitions
26*/
27#define MEM_PART_SYSTEM 0
28#define MEM_PART_SECONDARY 1
29#define MEM_PART_MURAM 2
30
31/* Export QE common operations */
32extern void qe_reset(void);
33extern int par_io_init(struct device_node *np);
34extern int par_io_of_config(struct device_node *np);
35
36/* QE internal API */
37int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
38void qe_setbrg(u32 brg, u32 rate);
39int qe_get_snum(void);
40void qe_put_snum(u8 snum);
41u32 qe_muram_alloc(u32 size, u32 align);
42int qe_muram_free(u32 offset);
43u32 qe_muram_alloc_fixed(u32 offset, u32 size);
44void qe_muram_dump(void);
45void *qe_muram_addr(u32 offset);
46
47/* Buffer descriptors */
48struct qe_bd {
49 u16 status;
50 u16 length;
51 u32 buf;
52} __attribute__ ((packed));
53
54#define BD_STATUS_MASK 0xffff0000
55#define BD_LENGTH_MASK 0x0000ffff
56
57/* Alignment */
58#define QE_INTR_TABLE_ALIGN 16 /* ??? */
59#define QE_ALIGNMENT_OF_BD 8
60#define QE_ALIGNMENT_OF_PRAM 64
61
62/* RISC allocation */
63enum qe_risc_allocation {
64 QE_RISC_ALLOCATION_RISC1 = 1, /* RISC 1 */
65 QE_RISC_ALLOCATION_RISC2 = 2, /* RISC 2 */
66 QE_RISC_ALLOCATION_RISC1_AND_RISC2 = 3 /* Dynamically choose
67 RISC 1 or RISC 2 */
68};
69
70/* QE extended filtering Table Lookup Key Size */
71enum qe_fltr_tbl_lookup_key_size {
72 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES
73 = 0x3f, /* LookupKey parsed by the Generate LookupKey
74 CMD is truncated to 8 bytes */
75 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES
76 = 0x5f, /* LookupKey parsed by the Generate LookupKey
77 CMD is truncated to 16 bytes */
78};
79
80/* QE FLTR extended filtering Largest External Table Lookup Key Size */
81enum qe_fltr_largest_external_tbl_lookup_key_size {
82 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE
83 = 0x0,/* not used */
84 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES
85 = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES, /* 8 bytes */
86 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES
87 = QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES, /* 16 bytes */
88};
89
90/* structure representing QE parameter RAM */
91struct qe_timer_tables {
92 u16 tm_base; /* QE timer table base adr */
93 u16 tm_ptr; /* QE timer table pointer */
94 u16 r_tmr; /* QE timer mode register */
95 u16 r_tmv; /* QE timer valid register */
96 u32 tm_cmd; /* QE timer cmd register */
97 u32 tm_cnt; /* QE timer internal cnt */
98} __attribute__ ((packed));
99
100#define QE_FLTR_TAD_SIZE 8
101
102/* QE extended filtering Termination Action Descriptor (TAD) */
103struct qe_fltr_tad {
104 u8 serialized[QE_FLTR_TAD_SIZE];
105} __attribute__ ((packed));
106
107/* Communication Direction */
108enum comm_dir {
109 COMM_DIR_NONE = 0,
110 COMM_DIR_RX = 1,
111 COMM_DIR_TX = 2,
112 COMM_DIR_RX_AND_TX = 3
113};
114
115/* Clocks and BRGs */
116enum qe_clock {
117 QE_CLK_NONE = 0,
118 QE_BRG1, /* Baud Rate Generator 1 */
119 QE_BRG2, /* Baud Rate Generator 2 */
120 QE_BRG3, /* Baud Rate Generator 3 */
121 QE_BRG4, /* Baud Rate Generator 4 */
122 QE_BRG5, /* Baud Rate Generator 5 */
123 QE_BRG6, /* Baud Rate Generator 6 */
124 QE_BRG7, /* Baud Rate Generator 7 */
125 QE_BRG8, /* Baud Rate Generator 8 */
126 QE_BRG9, /* Baud Rate Generator 9 */
127 QE_BRG10, /* Baud Rate Generator 10 */
128 QE_BRG11, /* Baud Rate Generator 11 */
129 QE_BRG12, /* Baud Rate Generator 12 */
130 QE_BRG13, /* Baud Rate Generator 13 */
131 QE_BRG14, /* Baud Rate Generator 14 */
132 QE_BRG15, /* Baud Rate Generator 15 */
133 QE_BRG16, /* Baud Rate Generator 16 */
134 QE_CLK1, /* Clock 1 */
135 QE_CLK2, /* Clock 2 */
136 QE_CLK3, /* Clock 3 */
137 QE_CLK4, /* Clock 4 */
138 QE_CLK5, /* Clock 5 */
139 QE_CLK6, /* Clock 6 */
140 QE_CLK7, /* Clock 7 */
141 QE_CLK8, /* Clock 8 */
142 QE_CLK9, /* Clock 9 */
143 QE_CLK10, /* Clock 10 */
144 QE_CLK11, /* Clock 11 */
145 QE_CLK12, /* Clock 12 */
146 QE_CLK13, /* Clock 13 */
147 QE_CLK14, /* Clock 14 */
148 QE_CLK15, /* Clock 15 */
149 QE_CLK16, /* Clock 16 */
150 QE_CLK17, /* Clock 17 */
151 QE_CLK18, /* Clock 18 */
152 QE_CLK19, /* Clock 19 */
153 QE_CLK20, /* Clock 20 */
154 QE_CLK21, /* Clock 21 */
155 QE_CLK22, /* Clock 22 */
156 QE_CLK23, /* Clock 23 */
157 QE_CLK24, /* Clock 24 */
158 QE_CLK_DUMMY,
159};
160
161/* QE CMXUCR Registers.
162 * There are two UCCs represented in each of the four CMXUCR registers.
163 * These values are for the UCC in the LSBs
164 */
165#define QE_CMXUCR_MII_ENET_MNG 0x00007000
166#define QE_CMXUCR_MII_ENET_MNG_SHIFT 12
167#define QE_CMXUCR_GRANT 0x00008000
168#define QE_CMXUCR_TSA 0x00004000
169#define QE_CMXUCR_BKPT 0x00000100
170#define QE_CMXUCR_TX_CLK_SRC_MASK 0x0000000F
171
172/* QE CMXGCR Registers.
173*/
174#define QE_CMXGCR_MII_ENET_MNG 0x00007000
175#define QE_CMXGCR_MII_ENET_MNG_SHIFT 12
176#define QE_CMXGCR_USBCS 0x0000000f
177
178/* QE CECR Commands.
179*/
180#define QE_CR_FLG 0x00010000
181#define QE_RESET 0x80000000
182#define QE_INIT_TX_RX 0x00000000
183#define QE_INIT_RX 0x00000001
184#define QE_INIT_TX 0x00000002
185#define QE_ENTER_HUNT_MODE 0x00000003
186#define QE_STOP_TX 0x00000004
187#define QE_GRACEFUL_STOP_TX 0x00000005
188#define QE_RESTART_TX 0x00000006
189#define QE_CLOSE_RX_BD 0x00000007
190#define QE_SWITCH_COMMAND 0x00000007
191#define QE_SET_GROUP_ADDRESS 0x00000008
192#define QE_START_IDMA 0x00000009
193#define QE_MCC_STOP_RX 0x00000009
194#define QE_ATM_TRANSMIT 0x0000000a
195#define QE_HPAC_CLEAR_ALL 0x0000000b
196#define QE_GRACEFUL_STOP_RX 0x0000001a
197#define QE_RESTART_RX 0x0000001b
198#define QE_HPAC_SET_PRIORITY 0x0000010b
199#define QE_HPAC_STOP_TX 0x0000020b
200#define QE_HPAC_STOP_RX 0x0000030b
201#define QE_HPAC_GRACEFUL_STOP_TX 0x0000040b
202#define QE_HPAC_GRACEFUL_STOP_RX 0x0000050b
203#define QE_HPAC_START_TX 0x0000060b
204#define QE_HPAC_START_RX 0x0000070b
205#define QE_USB_STOP_TX 0x0000000a
206#define QE_USB_RESTART_TX 0x0000000b
207#define QE_QMC_STOP_TX 0x0000000c
208#define QE_QMC_STOP_RX 0x0000000d
209#define QE_SS7_SU_FIL_RESET 0x0000000e
210/* jonathbr added from here down for 83xx */
211#define QE_RESET_BCS 0x0000000a
212#define QE_MCC_INIT_TX_RX_16 0x00000003
213#define QE_MCC_STOP_TX 0x00000004
214#define QE_MCC_INIT_TX_1 0x00000005
215#define QE_MCC_INIT_RX_1 0x00000006
216#define QE_MCC_RESET 0x00000007
217#define QE_SET_TIMER 0x00000008
218#define QE_RANDOM_NUMBER 0x0000000c
219#define QE_ATM_MULTI_THREAD_INIT 0x00000011
220#define QE_ASSIGN_PAGE 0x00000012
221#define QE_ADD_REMOVE_HASH_ENTRY 0x00000013
222#define QE_START_FLOW_CONTROL 0x00000014
223#define QE_STOP_FLOW_CONTROL 0x00000015
224#define QE_ASSIGN_PAGE_TO_DEVICE 0x00000016
225
226#define QE_ASSIGN_RISC 0x00000010
227#define QE_CR_MCN_NORMAL_SHIFT 6
228#define QE_CR_MCN_USB_SHIFT 4
229#define QE_CR_MCN_RISC_ASSIGN_SHIFT 8
230#define QE_CR_SNUM_SHIFT 17
231
232/* QE CECR Sub Block - sub block of QE command.
233*/
234#define QE_CR_SUBBLOCK_INVALID 0x00000000
235#define QE_CR_SUBBLOCK_USB 0x03200000
236#define QE_CR_SUBBLOCK_UCCFAST1 0x02000000
237#define QE_CR_SUBBLOCK_UCCFAST2 0x02200000
238#define QE_CR_SUBBLOCK_UCCFAST3 0x02400000
239#define QE_CR_SUBBLOCK_UCCFAST4 0x02600000
240#define QE_CR_SUBBLOCK_UCCFAST5 0x02800000
241#define QE_CR_SUBBLOCK_UCCFAST6 0x02a00000
242#define QE_CR_SUBBLOCK_UCCFAST7 0x02c00000
243#define QE_CR_SUBBLOCK_UCCFAST8 0x02e00000
244#define QE_CR_SUBBLOCK_UCCSLOW1 0x00000000
245#define QE_CR_SUBBLOCK_UCCSLOW2 0x00200000
246#define QE_CR_SUBBLOCK_UCCSLOW3 0x00400000
247#define QE_CR_SUBBLOCK_UCCSLOW4 0x00600000
248#define QE_CR_SUBBLOCK_UCCSLOW5 0x00800000
249#define QE_CR_SUBBLOCK_UCCSLOW6 0x00a00000
250#define QE_CR_SUBBLOCK_UCCSLOW7 0x00c00000
251#define QE_CR_SUBBLOCK_UCCSLOW8 0x00e00000
252#define QE_CR_SUBBLOCK_MCC1 0x03800000
253#define QE_CR_SUBBLOCK_MCC2 0x03a00000
254#define QE_CR_SUBBLOCK_MCC3 0x03000000
255#define QE_CR_SUBBLOCK_IDMA1 0x02800000
256#define QE_CR_SUBBLOCK_IDMA2 0x02a00000
257#define QE_CR_SUBBLOCK_IDMA3 0x02c00000
258#define QE_CR_SUBBLOCK_IDMA4 0x02e00000
259#define QE_CR_SUBBLOCK_HPAC 0x01e00000
260#define QE_CR_SUBBLOCK_SPI1 0x01400000
261#define QE_CR_SUBBLOCK_SPI2 0x01600000
262#define QE_CR_SUBBLOCK_RAND 0x01c00000
263#define QE_CR_SUBBLOCK_TIMER 0x01e00000
264#define QE_CR_SUBBLOCK_GENERAL 0x03c00000
265
266/* QE CECR Protocol - For non-MCC, specifies mode for QE CECR command */
267#define QE_CR_PROTOCOL_UNSPECIFIED 0x00 /* For all other protocols */
268#define QE_CR_PROTOCOL_HDLC_TRANSPARENT 0x00
269#define QE_CR_PROTOCOL_ATM_POS 0x0A
270#define QE_CR_PROTOCOL_ETHERNET 0x0C
271#define QE_CR_PROTOCOL_L2_SWITCH 0x0D
272
273/* BMR byte order */
274#define QE_BMR_BYTE_ORDER_BO_PPC 0x08 /* powerpc little endian */
275#define QE_BMR_BYTE_ORDER_BO_MOT 0x10 /* motorola big endian */
276#define QE_BMR_BYTE_ORDER_BO_MAX 0x18
277
278/* BRG configuration register */
279#define QE_BRGC_ENABLE 0x00010000
280#define QE_BRGC_DIVISOR_SHIFT 1
281#define QE_BRGC_DIVISOR_MAX 0xFFF
282#define QE_BRGC_DIV16 1
283
284/* QE Timers registers */
285#define QE_GTCFR1_PCAS 0x80
286#define QE_GTCFR1_STP2 0x20
287#define QE_GTCFR1_RST2 0x10
288#define QE_GTCFR1_GM2 0x08
289#define QE_GTCFR1_GM1 0x04
290#define QE_GTCFR1_STP1 0x02
291#define QE_GTCFR1_RST1 0x01
292
293/* SDMA registers */
294#define QE_SDSR_BER1 0x02000000
295#define QE_SDSR_BER2 0x01000000
296
297#define QE_SDMR_GLB_1_MSK 0x80000000
298#define QE_SDMR_ADR_SEL 0x20000000
299#define QE_SDMR_BER1_MSK 0x02000000
300#define QE_SDMR_BER2_MSK 0x01000000
301#define QE_SDMR_EB1_MSK 0x00800000
302#define QE_SDMR_ER1_MSK 0x00080000
303#define QE_SDMR_ER2_MSK 0x00040000
304#define QE_SDMR_CEN_MASK 0x0000E000
305#define QE_SDMR_SBER_1 0x00000200
306#define QE_SDMR_SBER_2 0x00000200
307#define QE_SDMR_EB1_PR_MASK 0x000000C0
308#define QE_SDMR_ER1_PR 0x00000008
309
310#define QE_SDMR_CEN_SHIFT 13
311#define QE_SDMR_EB1_PR_SHIFT 6
312
313#define QE_SDTM_MSNUM_SHIFT 24
314
315#define QE_SDEBCR_BA_MASK 0x01FFFFFF
316
317/* UPC */
318#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */
319#define UPGCR_TMS 0x40000000 /* Transmit master/slave mode */
320#define UPGCR_RMS 0x20000000 /* Receive master/slave mode */
321#define UPGCR_ADDR 0x10000000 /* Master MPHY Addr multiplexing */
322#define UPGCR_DIAG 0x01000000 /* Diagnostic mode */
323
324/* UCC */
325#define UCC_GUEMR_MODE_MASK_RX 0x02
326#define UCC_GUEMR_MODE_MASK_TX 0x01
327#define UCC_GUEMR_MODE_FAST_RX 0x02
328#define UCC_GUEMR_MODE_FAST_TX 0x01
329#define UCC_GUEMR_MODE_SLOW_RX 0x00
330#define UCC_GUEMR_MODE_SLOW_TX 0x00
331#define UCC_GUEMR_SET_RESERVED3 0x10 /* Bit 3 in the guemr is reserved but
332 must be set 1 */
333
334/* structure representing UCC SLOW parameter RAM */
335struct ucc_slow_pram {
336 u16 rbase; /* RX BD base address */
337 u16 tbase; /* TX BD base address */
338 u8 rfcr; /* Rx function code */
339 u8 tfcr; /* Tx function code */
340 u16 mrblr; /* Rx buffer length */
341 u32 rstate; /* Rx internal state */
342 u32 rptr; /* Rx internal data pointer */
343 u16 rbptr; /* rb BD Pointer */
344 u16 rcount; /* Rx internal byte count */
345 u32 rtemp; /* Rx temp */
346 u32 tstate; /* Tx internal state */
347 u32 tptr; /* Tx internal data pointer */
348 u16 tbptr; /* Tx BD pointer */
349 u16 tcount; /* Tx byte count */
350 u32 ttemp; /* Tx temp */
351 u32 rcrc; /* temp receive CRC */
352 u32 tcrc; /* temp transmit CRC */
353} __attribute__ ((packed));
354
355/* General UCC SLOW Mode Register (GUMRH & GUMRL) */
356#define UCC_SLOW_GUMR_H_CRC16 0x00004000
357#define UCC_SLOW_GUMR_H_CRC16CCITT 0x00000000
358#define UCC_SLOW_GUMR_H_CRC32CCITT 0x00008000
359#define UCC_SLOW_GUMR_H_REVD 0x00002000
360#define UCC_SLOW_GUMR_H_TRX 0x00001000
361#define UCC_SLOW_GUMR_H_TTX 0x00000800
362#define UCC_SLOW_GUMR_H_CDP 0x00000400
363#define UCC_SLOW_GUMR_H_CTSP 0x00000200
364#define UCC_SLOW_GUMR_H_CDS 0x00000100
365#define UCC_SLOW_GUMR_H_CTSS 0x00000080
366#define UCC_SLOW_GUMR_H_TFL 0x00000040
367#define UCC_SLOW_GUMR_H_RFW 0x00000020
368#define UCC_SLOW_GUMR_H_TXSY 0x00000010
369#define UCC_SLOW_GUMR_H_4SYNC 0x00000004
370#define UCC_SLOW_GUMR_H_8SYNC 0x00000008
371#define UCC_SLOW_GUMR_H_16SYNC 0x0000000c
372#define UCC_SLOW_GUMR_H_RTSM 0x00000002
373#define UCC_SLOW_GUMR_H_RSYN 0x00000001
374
375#define UCC_SLOW_GUMR_L_TCI 0x10000000
376#define UCC_SLOW_GUMR_L_RINV 0x02000000
377#define UCC_SLOW_GUMR_L_TINV 0x01000000
378#define UCC_SLOW_GUMR_L_TEND 0x00020000
379#define UCC_SLOW_GUMR_L_ENR 0x00000020
380#define UCC_SLOW_GUMR_L_ENT 0x00000010
381
382/* General UCC FAST Mode Register */
383#define UCC_FAST_GUMR_TCI 0x20000000
384#define UCC_FAST_GUMR_TRX 0x10000000
385#define UCC_FAST_GUMR_TTX 0x08000000
386#define UCC_FAST_GUMR_CDP 0x04000000
387#define UCC_FAST_GUMR_CTSP 0x02000000
388#define UCC_FAST_GUMR_CDS 0x01000000
389#define UCC_FAST_GUMR_CTSS 0x00800000
390#define UCC_FAST_GUMR_TXSY 0x00020000
391#define UCC_FAST_GUMR_RSYN 0x00010000
392#define UCC_FAST_GUMR_RTSM 0x00002000
393#define UCC_FAST_GUMR_REVD 0x00000400
394#define UCC_FAST_GUMR_ENR 0x00000020
395#define UCC_FAST_GUMR_ENT 0x00000010
396
397/* Slow UCC Event Register (UCCE) */
398#define UCC_SLOW_UCCE_GLR 0x1000
399#define UCC_SLOW_UCCE_GLT 0x0800
400#define UCC_SLOW_UCCE_DCC 0x0400
401#define UCC_SLOW_UCCE_FLG 0x0200
402#define UCC_SLOW_UCCE_AB 0x0200
403#define UCC_SLOW_UCCE_IDLE 0x0100
404#define UCC_SLOW_UCCE_GRA 0x0080
405#define UCC_SLOW_UCCE_TXE 0x0010
406#define UCC_SLOW_UCCE_RXF 0x0008
407#define UCC_SLOW_UCCE_CCR 0x0008
408#define UCC_SLOW_UCCE_RCH 0x0008
409#define UCC_SLOW_UCCE_BSY 0x0004
410#define UCC_SLOW_UCCE_TXB 0x0002
411#define UCC_SLOW_UCCE_TX 0x0002
412#define UCC_SLOW_UCCE_RX 0x0001
413#define UCC_SLOW_UCCE_GOV 0x0001
414#define UCC_SLOW_UCCE_GUN 0x0002
415#define UCC_SLOW_UCCE_GINT 0x0004
416#define UCC_SLOW_UCCE_IQOV 0x0008
417
418#define UCC_SLOW_UCCE_HDLC_SET (UCC_SLOW_UCCE_TXE | UCC_SLOW_UCCE_BSY | \
419 UCC_SLOW_UCCE_GRA | UCC_SLOW_UCCE_TXB | UCC_SLOW_UCCE_RXF | \
420 UCC_SLOW_UCCE_DCC | UCC_SLOW_UCCE_GLT | UCC_SLOW_UCCE_GLR)
421#define UCC_SLOW_UCCE_ENET_SET (UCC_SLOW_UCCE_TXE | UCC_SLOW_UCCE_BSY | \
422 UCC_SLOW_UCCE_GRA | UCC_SLOW_UCCE_TXB | UCC_SLOW_UCCE_RXF)
423#define UCC_SLOW_UCCE_TRANS_SET (UCC_SLOW_UCCE_TXE | UCC_SLOW_UCCE_BSY | \
424 UCC_SLOW_UCCE_GRA | UCC_SLOW_UCCE_TX | UCC_SLOW_UCCE_RX | \
425 UCC_SLOW_UCCE_DCC | UCC_SLOW_UCCE_GLT | UCC_SLOW_UCCE_GLR)
426#define UCC_SLOW_UCCE_UART_SET (UCC_SLOW_UCCE_BSY | UCC_SLOW_UCCE_GRA | \
427 UCC_SLOW_UCCE_TXB | UCC_SLOW_UCCE_TX | UCC_SLOW_UCCE_RX | \
428 UCC_SLOW_UCCE_GLT | UCC_SLOW_UCCE_GLR)
429#define UCC_SLOW_UCCE_QMC_SET (UCC_SLOW_UCCE_IQOV | UCC_SLOW_UCCE_GINT | \
430 UCC_SLOW_UCCE_GUN | UCC_SLOW_UCCE_GOV)
431
432#define UCC_SLOW_UCCE_OTHER (UCC_SLOW_UCCE_TXE | UCC_SLOW_UCCE_BSY | \
433 UCC_SLOW_UCCE_GRA | UCC_SLOW_UCCE_DCC | UCC_SLOW_UCCE_GLT | \
434 UCC_SLOW_UCCE_GLR)
435
436#define UCC_SLOW_INTR_TX UCC_SLOW_UCCE_TXB
437#define UCC_SLOW_INTR_RX (UCC_SLOW_UCCE_RXF | UCC_SLOW_UCCE_RX)
438#define UCC_SLOW_INTR (UCC_SLOW_INTR_TX | UCC_SLOW_INTR_RX)
439
440/* UCC Transmit On Demand Register (UTODR) */
441#define UCC_SLOW_TOD 0x8000
442#define UCC_FAST_TOD 0x8000
443
444/* Function code masks */
445#define FC_GBL 0x20
446#define FC_DTB_LCL 0x02
447#define UCC_FAST_FUNCTION_CODE_GBL 0x20
448#define UCC_FAST_FUNCTION_CODE_DTB_LCL 0x02
449#define UCC_FAST_FUNCTION_CODE_BDB_LCL 0x01
450
451static inline long IS_MURAM_ERR(const u32 offset)
452{
453 return offset > (u32) - 1000L;
454}
455
456#endif /* __KERNEL__ */
457#endif /* _ASM_POWERPC_QE_H */
diff --git a/include/asm-powerpc/qe_ic.h b/include/asm-powerpc/qe_ic.h
new file mode 100644
index 000000000000..e386fb7e44b0
--- /dev/null
+++ b/include/asm-powerpc/qe_ic.h
@@ -0,0 +1,64 @@
1/*
2 * include/asm-powerpc/qe_ic.h
3 *
4 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
5 *
6 * Authors: Shlomi Gridish <gridish@freescale.com>
7 * Li Yang <leoli@freescale.com>
8 *
9 * Description:
10 * QE IC external definitions and structure.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17#ifndef _ASM_POWERPC_QE_IC_H
18#define _ASM_POWERPC_QE_IC_H
19
20#include <linux/irq.h>
21
22#define NUM_OF_QE_IC_GROUPS 6
23
24/* Flags when we init the QE IC */
25#define QE_IC_SPREADMODE_GRP_W 0x00000001
26#define QE_IC_SPREADMODE_GRP_X 0x00000002
27#define QE_IC_SPREADMODE_GRP_Y 0x00000004
28#define QE_IC_SPREADMODE_GRP_Z 0x00000008
29#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
30#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
31
32#define QE_IC_LOW_SIGNAL 0x00000100
33#define QE_IC_HIGH_SIGNAL 0x00000200
34
35#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
36#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
37#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
38#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
39#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
40#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
41#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
42#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
43#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
44#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
45#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
46#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
47#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
48
49/* QE interrupt sources groups */
50enum qe_ic_grp_id {
51 QE_IC_GRP_W = 0, /* QE interrupt controller group W */
52 QE_IC_GRP_X, /* QE interrupt controller group X */
53 QE_IC_GRP_Y, /* QE interrupt controller group Y */
54 QE_IC_GRP_Z, /* QE interrupt controller group Z */
55 QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
56 QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
57};
58
59void qe_ic_init(struct device_node *node, unsigned int flags);
60void qe_ic_set_highest_priority(unsigned int virq, int high);
61int qe_ic_set_priority(unsigned int virq, unsigned int priority);
62int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
63
64#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 4b41deaa8d8d..43627596003b 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -91,10 +91,6 @@ DEBUGGER_BOILERPLATE(debugger_iabr_match)
91DEBUGGER_BOILERPLATE(debugger_dabr_match) 91DEBUGGER_BOILERPLATE(debugger_dabr_match)
92DEBUGGER_BOILERPLATE(debugger_fault_handler) 92DEBUGGER_BOILERPLATE(debugger_fault_handler)
93 93
94#ifdef CONFIG_XMON
95extern void xmon_init(int enable);
96#endif
97
98#else 94#else
99static inline int debugger(struct pt_regs *regs) { return 0; } 95static inline int debugger(struct pt_regs *regs) { return 0; }
100static inline int debugger_ipi(struct pt_regs *regs) { return 0; } 96static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
diff --git a/include/asm-powerpc/ucc.h b/include/asm-powerpc/ucc.h
new file mode 100644
index 000000000000..afe3076bdc03
--- /dev/null
+++ b/include/asm-powerpc/ucc.h
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * Internal header file for UCC unit routines.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#ifndef __UCC_H__
16#define __UCC_H__
17
18#include <asm/immap_qe.h>
19#include <asm/qe.h>
20
21#define STATISTICS
22
23#define UCC_MAX_NUM 8
24
25/* Slow or fast type for UCCs.
26*/
27enum ucc_speed_type {
28 UCC_SPEED_TYPE_FAST, UCC_SPEED_TYPE_SLOW
29};
30
31/* Initial UCCs Parameter RAM address relative to: MEM_MAP_BASE (IMMR).
32*/
33enum ucc_pram_initial_offset {
34 UCC_PRAM_OFFSET_UCC1 = 0x8400,
35 UCC_PRAM_OFFSET_UCC2 = 0x8500,
36 UCC_PRAM_OFFSET_UCC3 = 0x8600,
37 UCC_PRAM_OFFSET_UCC4 = 0x9000,
38 UCC_PRAM_OFFSET_UCC5 = 0x8000,
39 UCC_PRAM_OFFSET_UCC6 = 0x8100,
40 UCC_PRAM_OFFSET_UCC7 = 0x8200,
41 UCC_PRAM_OFFSET_UCC8 = 0x8300
42};
43
44/* ucc_set_type
45 * Sets UCC to slow or fast mode.
46 *
47 * ucc_num - (In) number of UCC (0-7).
48 * regs - (In) pointer to registers base for the UCC.
49 * speed - (In) slow or fast mode for UCC.
50 */
51int ucc_set_type(int ucc_num, struct ucc_common *regs,
52 enum ucc_speed_type speed);
53
54/* ucc_init_guemr
55 * Init the Guemr register.
56 *
57 * regs - (In) pointer to registers base for the UCC.
58 */
59int ucc_init_guemr(struct ucc_common *regs);
60
61int ucc_set_qe_mux_mii_mng(int ucc_num);
62
63int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode);
64
65int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask);
66
67/* QE MUX clock routing for UCC
68*/
69static inline int ucc_set_qe_mux_grant(int ucc_num, int set)
70{
71 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_GRANT);
72}
73
74static inline int ucc_set_qe_mux_tsa(int ucc_num, int set)
75{
76 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_TSA);
77}
78
79static inline int ucc_set_qe_mux_bkpt(int ucc_num, int set)
80{
81 return ucc_mux_set_grant_tsa_bkpt(ucc_num, set, QE_CMXUCR_BKPT);
82}
83
84#endif /* __UCC_H__ */
diff --git a/include/asm-powerpc/ucc_fast.h b/include/asm-powerpc/ucc_fast.h
new file mode 100644
index 000000000000..39d1c90fd2ca
--- /dev/null
+++ b/include/asm-powerpc/ucc_fast.h
@@ -0,0 +1,243 @@
1/*
2 * include/asm-powerpc/ucc_fast.h
3 *
4 * Internal header file for UCC FAST unit routines.
5 *
6 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
7 *
8 * Authors: Shlomi Gridish <gridish@freescale.com>
9 * Li Yang <leoli@freescale.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#ifndef __UCC_FAST_H__
17#define __UCC_FAST_H__
18
19#include <linux/kernel.h>
20
21#include <asm/immap_qe.h>
22#include <asm/qe.h>
23
24#include "ucc.h"
25
26/* Receive BD's status */
27#define R_E 0x80000000 /* buffer empty */
28#define R_W 0x20000000 /* wrap bit */
29#define R_I 0x10000000 /* interrupt on reception */
30#define R_L 0x08000000 /* last */
31#define R_F 0x04000000 /* first */
32
33/* transmit BD's status */
34#define T_R 0x80000000 /* ready bit */
35#define T_W 0x20000000 /* wrap bit */
36#define T_I 0x10000000 /* interrupt on completion */
37#define T_L 0x08000000 /* last */
38
39/* Rx Data buffer must be 4 bytes aligned in most cases */
40#define UCC_FAST_RX_ALIGN 4
41#define UCC_FAST_MRBLR_ALIGNMENT 4
42#define UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT 8
43
44/* Sizes */
45#define UCC_FAST_URFS_MIN_VAL 0x88
46#define UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR 8
47
48/* ucc_fast_channel_protocol_mode - UCC FAST mode */
49enum ucc_fast_channel_protocol_mode {
50 UCC_FAST_PROTOCOL_MODE_HDLC = 0x00000000,
51 UCC_FAST_PROTOCOL_MODE_RESERVED01 = 0x00000001,
52 UCC_FAST_PROTOCOL_MODE_RESERVED_QMC = 0x00000002,
53 UCC_FAST_PROTOCOL_MODE_RESERVED02 = 0x00000003,
54 UCC_FAST_PROTOCOL_MODE_RESERVED_UART = 0x00000004,
55 UCC_FAST_PROTOCOL_MODE_RESERVED03 = 0x00000005,
56 UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_1 = 0x00000006,
57 UCC_FAST_PROTOCOL_MODE_RESERVED_EX_MAC_2 = 0x00000007,
58 UCC_FAST_PROTOCOL_MODE_RESERVED_BISYNC = 0x00000008,
59 UCC_FAST_PROTOCOL_MODE_RESERVED04 = 0x00000009,
60 UCC_FAST_PROTOCOL_MODE_ATM = 0x0000000A,
61 UCC_FAST_PROTOCOL_MODE_RESERVED05 = 0x0000000B,
62 UCC_FAST_PROTOCOL_MODE_ETHERNET = 0x0000000C,
63 UCC_FAST_PROTOCOL_MODE_RESERVED06 = 0x0000000D,
64 UCC_FAST_PROTOCOL_MODE_POS = 0x0000000E,
65 UCC_FAST_PROTOCOL_MODE_RESERVED07 = 0x0000000F
66};
67
68/* ucc_fast_transparent_txrx - UCC Fast Transparent TX & RX */
69enum ucc_fast_transparent_txrx {
70 UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL = 0x00000000,
71 UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT = 0x18000000
72};
73
74/* UCC fast diagnostic mode */
75enum ucc_fast_diag_mode {
76 UCC_FAST_DIAGNOSTIC_NORMAL = 0x0,
77 UCC_FAST_DIAGNOSTIC_LOCAL_LOOP_BACK = 0x40000000,
78 UCC_FAST_DIAGNOSTIC_AUTO_ECHO = 0x80000000,
79 UCC_FAST_DIAGNOSTIC_LOOP_BACK_AND_ECHO = 0xC0000000
80};
81
82/* UCC fast Sync length (transparent mode only) */
83enum ucc_fast_sync_len {
84 UCC_FAST_SYNC_LEN_NOT_USED = 0x0,
85 UCC_FAST_SYNC_LEN_AUTOMATIC = 0x00004000,
86 UCC_FAST_SYNC_LEN_8_BIT = 0x00008000,
87 UCC_FAST_SYNC_LEN_16_BIT = 0x0000C000
88};
89
90/* UCC fast RTS mode */
91enum ucc_fast_ready_to_send {
92 UCC_FAST_SEND_IDLES_BETWEEN_FRAMES = 0x00000000,
93 UCC_FAST_SEND_FLAGS_BETWEEN_FRAMES = 0x00002000
94};
95
96/* UCC fast receiver decoding mode */
97enum ucc_fast_rx_decoding_method {
98 UCC_FAST_RX_ENCODING_NRZ = 0x00000000,
99 UCC_FAST_RX_ENCODING_NRZI = 0x00000800,
100 UCC_FAST_RX_ENCODING_RESERVED0 = 0x00001000,
101 UCC_FAST_RX_ENCODING_RESERVED1 = 0x00001800
102};
103
104/* UCC fast transmitter encoding mode */
105enum ucc_fast_tx_encoding_method {
106 UCC_FAST_TX_ENCODING_NRZ = 0x00000000,
107 UCC_FAST_TX_ENCODING_NRZI = 0x00000100,
108 UCC_FAST_TX_ENCODING_RESERVED0 = 0x00000200,
109 UCC_FAST_TX_ENCODING_RESERVED1 = 0x00000300
110};
111
112/* UCC fast CRC length */
113enum ucc_fast_transparent_tcrc {
114 UCC_FAST_16_BIT_CRC = 0x00000000,
115 UCC_FAST_CRC_RESERVED0 = 0x00000040,
116 UCC_FAST_32_BIT_CRC = 0x00000080,
117 UCC_FAST_CRC_RESERVED1 = 0x000000C0
118};
119
120/* Fast UCC initialization structure */
121struct ucc_fast_info {
122 int ucc_num;
123 enum qe_clock rx_clock;
124 enum qe_clock tx_clock;
125 u32 regs;
126 int irq;
127 u32 uccm_mask;
128 int bd_mem_part;
129 int brkpt_support;
130 int grant_support;
131 int tsa;
132 int cdp;
133 int cds;
134 int ctsp;
135 int ctss;
136 int tci;
137 int txsy;
138 int rtsm;
139 int revd;
140 int rsyn;
141 u16 max_rx_buf_length;
142 u16 urfs;
143 u16 urfet;
144 u16 urfset;
145 u16 utfs;
146 u16 utfet;
147 u16 utftt;
148 u16 ufpt;
149 enum ucc_fast_channel_protocol_mode mode;
150 enum ucc_fast_transparent_txrx ttx_trx;
151 enum ucc_fast_tx_encoding_method tenc;
152 enum ucc_fast_rx_decoding_method renc;
153 enum ucc_fast_transparent_tcrc tcrc;
154 enum ucc_fast_sync_len synl;
155};
156
157struct ucc_fast_private {
158 struct ucc_fast_info *uf_info;
159 struct ucc_fast *uf_regs; /* a pointer to memory map of UCC regs. */
160 u32 *p_ucce; /* a pointer to the event register in memory. */
161 u32 *p_uccm; /* a pointer to the mask register in memory. */
162 int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
163 int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
164 int stopped_tx; /* Whether channel has been stopped for Tx
165 (STOP_TX, etc.) */
166 int stopped_rx; /* Whether channel has been stopped for Rx */
167 u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
168 virtual fifo */
169 u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
170 virtual fifo */
171#ifdef STATISTICS
172 u32 tx_frames; /* Transmitted frames counter. */
173 u32 rx_frames; /* Received frames counter (only frames
174 passed to application). */
175 u32 tx_discarded; /* Discarded tx frames counter (frames that
176 were discarded by the driver due to errors).
177 */
178 u32 rx_discarded; /* Discarded rx frames counter (frames that
179 were discarded by the driver due to errors).
180 */
181#endif /* STATISTICS */
182 u16 mrblr; /* maximum receive buffer length */
183};
184
185/* ucc_fast_init
186 * Initializes Fast UCC according to user provided parameters.
187 *
188 * uf_info - (In) pointer to the fast UCC info structure.
189 * uccf_ret - (Out) pointer to the fast UCC structure.
190 */
191int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret);
192
193/* ucc_fast_free
194 * Frees all resources for fast UCC.
195 *
196 * uccf - (In) pointer to the fast UCC structure.
197 */
198void ucc_fast_free(struct ucc_fast_private * uccf);
199
200/* ucc_fast_enable
201 * Enables a fast UCC port.
202 * This routine enables Tx and/or Rx through the General UCC Mode Register.
203 *
204 * uccf - (In) pointer to the fast UCC structure.
205 * mode - (In) TX, RX, or both.
206 */
207void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode);
208
209/* ucc_fast_disable
210 * Disables a fast UCC port.
211 * This routine disables Tx and/or Rx through the General UCC Mode Register.
212 *
213 * uccf - (In) pointer to the fast UCC structure.
214 * mode - (In) TX, RX, or both.
215 */
216void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode);
217
218/* ucc_fast_irq
219 * Handles interrupts on fast UCC.
220 * Called from the general interrupt routine to handle interrupts on fast UCC.
221 *
222 * uccf - (In) pointer to the fast UCC structure.
223 */
224void ucc_fast_irq(struct ucc_fast_private * uccf);
225
226/* ucc_fast_transmit_on_demand
227 * Immediately forces a poll of the transmitter for data to be sent.
228 * Typically, the hardware performs a periodic poll for data that the
229 * transmit routine has set up to be transmitted. In cases where
230 * this polling cycle is not soon enough, this optional routine can
231 * be invoked to force a poll right away, instead. Proper use for
232 * each transmission for which this functionality is desired is to
233 * call the transmit routine and then this routine right after.
234 *
235 * uccf - (In) pointer to the fast UCC structure.
236 */
237void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf);
238
239u32 ucc_fast_get_qe_cr_subblock(int uccf_num);
240
241void ucc_fast_dump_regs(struct ucc_fast_private * uccf);
242
243#endif /* __UCC_FAST_H__ */
diff --git a/include/asm-powerpc/ucc_slow.h b/include/asm-powerpc/ucc_slow.h
new file mode 100644
index 000000000000..ca93bc99237e
--- /dev/null
+++ b/include/asm-powerpc/ucc_slow.h
@@ -0,0 +1,289 @@
1/*
2 * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
3 *
4 * Authors: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
6 *
7 * Description:
8 * Internal header file for UCC SLOW unit routines.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15#ifndef __UCC_SLOW_H__
16#define __UCC_SLOW_H__
17
18#include <linux/kernel.h>
19
20#include <asm/immap_qe.h>
21#include <asm/qe.h>
22
23#include "ucc.h"
24
25/* transmit BD's status */
26#define T_R 0x80000000 /* ready bit */
27#define T_PAD 0x40000000 /* add pads to short frames */
28#define T_W 0x20000000 /* wrap bit */
29#define T_I 0x10000000 /* interrupt on completion */
30#define T_L 0x08000000 /* last */
31
32#define T_A 0x04000000 /* Address - the data transmitted as address
33 chars */
34#define T_TC 0x04000000 /* transmit CRC */
35#define T_CM 0x02000000 /* continuous mode */
36#define T_DEF 0x02000000 /* collision on previous attempt to transmit */
37#define T_P 0x01000000 /* Preamble - send Preamble sequence before
38 data */
39#define T_HB 0x01000000 /* heartbeat */
40#define T_NS 0x00800000 /* No Stop */
41#define T_LC 0x00800000 /* late collision */
42#define T_RL 0x00400000 /* retransmission limit */
43#define T_UN 0x00020000 /* underrun */
44#define T_CT 0x00010000 /* CTS lost */
45#define T_CSL 0x00010000 /* carrier sense lost */
46#define T_RC 0x003c0000 /* retry count */
47
48/* Receive BD's status */
49#define R_E 0x80000000 /* buffer empty */
50#define R_W 0x20000000 /* wrap bit */
51#define R_I 0x10000000 /* interrupt on reception */
52#define R_L 0x08000000 /* last */
53#define R_C 0x08000000 /* the last byte in this buffer is a cntl
54 char */
55#define R_F 0x04000000 /* first */
56#define R_A 0x04000000 /* the first byte in this buffer is address
57 byte */
58#define R_CM 0x02000000 /* continuous mode */
59#define R_ID 0x01000000 /* buffer close on reception of idles */
60#define R_M 0x01000000 /* Frame received because of promiscuous
61 mode */
62#define R_AM 0x00800000 /* Address match */
63#define R_DE 0x00800000 /* Address match */
64#define R_LG 0x00200000 /* Break received */
65#define R_BR 0x00200000 /* Frame length violation */
66#define R_NO 0x00100000 /* Rx Non Octet Aligned Packet */
67#define R_FR 0x00100000 /* Framing Error (no stop bit) character
68 received */
69#define R_PR 0x00080000 /* Parity Error character received */
70#define R_AB 0x00080000 /* Frame Aborted */
71#define R_SH 0x00080000 /* frame is too short */
72#define R_CR 0x00040000 /* CRC Error */
73#define R_OV 0x00020000 /* Overrun */
74#define R_CD 0x00010000 /* CD lost */
75#define R_CL 0x00010000 /* this frame is closed because of a
76 collision */
77
78/* Rx Data buffer must be 4 bytes aligned in most cases.*/
79#define UCC_SLOW_RX_ALIGN 4
80#define UCC_SLOW_MRBLR_ALIGNMENT 4
81#define UCC_SLOW_PRAM_SIZE 0x100
82#define ALIGNMENT_OF_UCC_SLOW_PRAM 64
83
84/* UCC Slow Channel Protocol Mode */
85enum ucc_slow_channel_protocol_mode {
86 UCC_SLOW_CHANNEL_PROTOCOL_MODE_QMC = 0x00000002,
87 UCC_SLOW_CHANNEL_PROTOCOL_MODE_UART = 0x00000004,
88 UCC_SLOW_CHANNEL_PROTOCOL_MODE_BISYNC = 0x00000008,
89};
90
91/* UCC Slow Transparent Transmit CRC (TCRC) */
92enum ucc_slow_transparent_tcrc {
93 /* 16-bit CCITT CRC (HDLC). (X16 + X12 + X5 + 1) */
94 UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC16 = 0x00000000,
95 /* CRC16 (BISYNC). (X16 + X15 + X2 + 1) */
96 UCC_SLOW_TRANSPARENT_TCRC_CRC16 = 0x00004000,
97 /* 32-bit CCITT CRC (Ethernet and HDLC) */
98 UCC_SLOW_TRANSPARENT_TCRC_CCITT_CRC32 = 0x00008000,
99};
100
101/* UCC Slow oversampling rate for transmitter (TDCR) */
102enum ucc_slow_tx_oversampling_rate {
103 /* 1x clock mode */
104 UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_1 = 0x00000000,
105 /* 8x clock mode */
106 UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_8 = 0x00010000,
107 /* 16x clock mode */
108 UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_16 = 0x00020000,
109 /* 32x clock mode */
110 UCC_SLOW_OVERSAMPLING_RATE_TX_TDCR_32 = 0x00030000,
111};
112
113/* UCC Slow Oversampling rate for receiver (RDCR)
114*/
115enum ucc_slow_rx_oversampling_rate {
116 /* 1x clock mode */
117 UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_1 = 0x00000000,
118 /* 8x clock mode */
119 UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_8 = 0x00004000,
120 /* 16x clock mode */
121 UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_16 = 0x00008000,
122 /* 32x clock mode */
123 UCC_SLOW_OVERSAMPLING_RATE_RX_RDCR_32 = 0x0000c000,
124};
125
126/* UCC Slow Transmitter encoding method (TENC)
127*/
128enum ucc_slow_tx_encoding_method {
129 UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZ = 0x00000000,
130 UCC_SLOW_TRANSMITTER_ENCODING_METHOD_TENC_NRZI = 0x00000100
131};
132
133/* UCC Slow Receiver decoding method (RENC)
134*/
135enum ucc_slow_rx_decoding_method {
136 UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZ = 0x00000000,
137 UCC_SLOW_RECEIVER_DECODING_METHOD_RENC_NRZI = 0x00000800
138};
139
140/* UCC Slow Diagnostic mode (DIAG)
141*/
142enum ucc_slow_diag_mode {
143 UCC_SLOW_DIAG_MODE_NORMAL = 0x00000000,
144 UCC_SLOW_DIAG_MODE_LOOPBACK = 0x00000040,
145 UCC_SLOW_DIAG_MODE_ECHO = 0x00000080,
146 UCC_SLOW_DIAG_MODE_LOOPBACK_ECHO = 0x000000c0
147};
148
149struct ucc_slow_info {
150 int ucc_num;
151 enum qe_clock rx_clock;
152 enum qe_clock tx_clock;
153 struct ucc_slow *us_regs;
154 int irq;
155 u16 uccm_mask;
156 int data_mem_part;
157 int init_tx;
158 int init_rx;
159 u32 tx_bd_ring_len;
160 u32 rx_bd_ring_len;
161 int rx_interrupts;
162 int brkpt_support;
163 int grant_support;
164 int tsa;
165 int cdp;
166 int cds;
167 int ctsp;
168 int ctss;
169 int rinv;
170 int tinv;
171 int rtsm;
172 int rfw;
173 int tci;
174 int tend;
175 int tfl;
176 int txsy;
177 u16 max_rx_buf_length;
178 enum ucc_slow_transparent_tcrc tcrc;
179 enum ucc_slow_channel_protocol_mode mode;
180 enum ucc_slow_diag_mode diag;
181 enum ucc_slow_tx_oversampling_rate tdcr;
182 enum ucc_slow_rx_oversampling_rate rdcr;
183 enum ucc_slow_tx_encoding_method tenc;
184 enum ucc_slow_rx_decoding_method renc;
185};
186
187struct ucc_slow_private {
188 struct ucc_slow_info *us_info;
189 struct ucc_slow *us_regs; /* a pointer to memory map of UCC regs */
190 struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
191 u32 us_pram_offset;
192 int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
193 int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
194 int stopped_tx; /* Whether channel has been stopped for Tx
195 (STOP_TX, etc.) */
196 int stopped_rx; /* Whether channel has been stopped for Rx */
197 struct list_head confQ; /* frames passed to chip waiting for tx */
198 u32 first_tx_bd_mask; /* mask is used in Tx routine to save status
199 and length for first BD in a frame */
200 u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
201 u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
202 u8 *confBd; /* next BD for confirm after Tx */
203 u8 *tx_bd; /* next BD for new Tx request */
204 u8 *rx_bd; /* next BD to collect after Rx */
205 void *p_rx_frame; /* accumulating receive frame */
206 u16 *p_ucce; /* a pointer to the event register in memory.
207 */
208 u16 *p_uccm; /* a pointer to the mask register in memory */
209 u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
210#ifdef STATISTICS
211 u32 tx_frames; /* Transmitted frames counters */
212 u32 rx_frames; /* Received frames counters (only frames
213 passed to application) */
214 u32 rx_discarded; /* Discarded frames counters (frames that
215 were discarded by the driver due to
216 errors) */
217#endif /* STATISTICS */
218};
219
220/* ucc_slow_init
221 * Initializes Slow UCC according to provided parameters.
222 *
223 * us_info - (In) pointer to the slow UCC info structure.
224 * uccs_ret - (Out) pointer to the slow UCC structure.
225 */
226int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret);
227
228/* ucc_slow_free
229 * Frees all resources for slow UCC.
230 *
231 * uccs - (In) pointer to the slow UCC structure.
232 */
233void ucc_slow_free(struct ucc_slow_private * uccs);
234
235/* ucc_slow_enable
236 * Enables a fast UCC port.
237 * This routine enables Tx and/or Rx through the General UCC Mode Register.
238 *
239 * uccs - (In) pointer to the slow UCC structure.
240 * mode - (In) TX, RX, or both.
241 */
242void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
243
244/* ucc_slow_disable
245 * Disables a fast UCC port.
246 * This routine disables Tx and/or Rx through the General UCC Mode Register.
247 *
248 * uccs - (In) pointer to the slow UCC structure.
249 * mode - (In) TX, RX, or both.
250 */
251void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
252
253/* ucc_slow_poll_transmitter_now
254 * Immediately forces a poll of the transmitter for data to be sent.
255 * Typically, the hardware performs a periodic poll for data that the
256 * transmit routine has set up to be transmitted. In cases where
257 * this polling cycle is not soon enough, this optional routine can
258 * be invoked to force a poll right away, instead. Proper use for
259 * each transmission for which this functionality is desired is to
260 * call the transmit routine and then this routine right after.
261 *
262 * uccs - (In) pointer to the slow UCC structure.
263 */
264void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs);
265
266/* ucc_slow_graceful_stop_tx
267 * Smoothly stops transmission on a specified slow UCC.
268 *
269 * uccs - (In) pointer to the slow UCC structure.
270 */
271void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs);
272
273/* ucc_slow_stop_tx
274 * Stops transmission on a specified slow UCC.
275 *
276 * uccs - (In) pointer to the slow UCC structure.
277 */
278void ucc_slow_stop_tx(struct ucc_slow_private * uccs);
279
280/* ucc_slow_restart_x
281 * Restarts transmitting on a specified slow UCC.
282 *
283 * uccs - (In) pointer to the slow UCC structure.
284 */
285void ucc_slow_restart_x(struct ucc_slow_private * uccs);
286
287u32 ucc_slow_get_qe_cr_subblock(int uccs_num);
288
289#endif /* __UCC_SLOW_H__ */
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
index 43f7129984c7..f1d337ed68d5 100644
--- a/include/asm-powerpc/xmon.h
+++ b/include/asm-powerpc/xmon.h
@@ -1,12 +1,22 @@
1#ifndef __PPC_XMON_H 1#ifndef __ASM_POWERPC_XMON_H
2#define __PPC_XMON_H 2#define __ASM_POWERPC_XMON_H
3#ifdef __KERNEL__
4 3
5struct pt_regs; 4/*
5 * Copyrignt (C) 2006 IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
6 12
7extern int xmon(struct pt_regs *excp); 13#ifdef __KERNEL__
8extern void xmon_printf(const char *fmt, ...);
9extern void xmon_init(int);
10 14
15#ifdef CONFIG_XMON
16extern void xmon_setup(void);
17#else
18static inline void xmon_setup(void) { };
11#endif 19#endif
12#endif 20
21#endif /* __KERNEL __ */
22#endif /* __ASM_POWERPC_XMON_H */
diff --git a/include/asm-x86_64/hardirq.h b/include/asm-x86_64/hardirq.h
index 64a65ce2f41f..95d5e090ed89 100644
--- a/include/asm-x86_64/hardirq.h
+++ b/include/asm-x86_64/hardirq.h
@@ -6,6 +6,9 @@
6#include <asm/pda.h> 6#include <asm/pda.h>
7#include <asm/apic.h> 7#include <asm/apic.h>
8 8
9/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
10#define MAX_HARDIRQS_PER_CPU NR_VECTORS
11
9#define __ARCH_IRQ_STAT 1 12#define __ARCH_IRQ_STAT 1
10 13
11#define local_softirq_pending() read_pda(__softirq_pending) 14#define local_softirq_pending() read_pda(__softirq_pending)
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 48a4a5364e85..53d0d9fd10d6 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -19,8 +19,7 @@
19#include <asm/irq.h> 19#include <asm/irq.h>
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22 22#include <linux/percpu.h>
23struct hw_interrupt_type;
24#endif 23#endif
25 24
26#define NMI_VECTOR 0x02 25#define NMI_VECTOR 0x02
@@ -75,9 +74,10 @@ struct hw_interrupt_type;
75 74
76 75
77#ifndef __ASSEMBLY__ 76#ifndef __ASSEMBLY__
78extern u8 irq_vector[NR_IRQ_VECTORS]; 77extern unsigned int irq_vector[NR_IRQ_VECTORS];
78typedef int vector_irq_t[NR_VECTORS];
79DECLARE_PER_CPU(vector_irq_t, vector_irq);
79#define IO_APIC_VECTOR(irq) (irq_vector[irq]) 80#define IO_APIC_VECTOR(irq) (irq_vector[irq])
80#define AUTO_ASSIGN -1
81 81
82/* 82/*
83 * Various low-level irq details needed by irq.c, process.c, 83 * Various low-level irq details needed by irq.c, process.c,
diff --git a/include/asm-x86_64/hypertransport.h b/include/asm-x86_64/hypertransport.h
new file mode 100644
index 000000000000..c16c6ff4bdd7
--- /dev/null
+++ b/include/asm-x86_64/hypertransport.h
@@ -0,0 +1,42 @@
1#ifndef ASM_HYPERTRANSPORT_H
2#define ASM_HYPERTRANSPORT_H
3
4/*
5 * Constants for x86 Hypertransport Interrupts.
6 */
7
8#define HT_IRQ_LOW_BASE 0xf8000000
9
10#define HT_IRQ_LOW_VECTOR_SHIFT 16
11#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
12#define HT_IRQ_LOW_VECTOR(v) (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
13
14#define HT_IRQ_LOW_DEST_ID_SHIFT 8
15#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
16#define HT_IRQ_LOW_DEST_ID(v) (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
17
18#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
19#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
20
21#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
22#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
23
24
25#define HT_IRQ_LOW_MT_FIXED 0x0000000
26#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
27#define HT_IRQ_LOW_MT_SMI 0x0000008
28#define HT_IRQ_LOW_MT_NMI 0x000000c
29#define HT_IRQ_LOW_MT_INIT 0x0000010
30#define HT_IRQ_LOW_MT_STARTUP 0x0000014
31#define HT_IRQ_LOW_MT_EXTINT 0x0000018
32#define HT_IRQ_LOW_MT_LINT1 0x000008c
33#define HT_IRQ_LOW_MT_LINT0 0x0000098
34
35#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
36
37
38#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
39#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
40#define HT_IRQ_HIGH_DEST_ID(v) ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
41
42#endif /* ASM_HYPERTRANSPORT_H */
diff --git a/include/asm-x86_64/io_apic.h b/include/asm-x86_64/io_apic.h
index 5d1b5c68e36e..171ec2dc8c04 100644
--- a/include/asm-x86_64/io_apic.h
+++ b/include/asm-x86_64/io_apic.h
@@ -10,46 +10,6 @@
10 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar 10 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
11 */ 11 */
12 12
13#ifdef CONFIG_PCI_MSI
14static inline int use_pci_vector(void) {return 1;}
15static inline void disable_edge_ioapic_vector(unsigned int vector) { }
16static inline void mask_and_ack_level_ioapic_vector(unsigned int vector) { }
17static inline void end_edge_ioapic_vector (unsigned int vector) { }
18#define startup_level_ioapic startup_level_ioapic_vector
19#define shutdown_level_ioapic mask_IO_APIC_vector
20#define enable_level_ioapic unmask_IO_APIC_vector
21#define disable_level_ioapic mask_IO_APIC_vector
22#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_vector
23#define end_level_ioapic end_level_ioapic_vector
24#define set_ioapic_affinity set_ioapic_affinity_vector
25
26#define startup_edge_ioapic startup_edge_ioapic_vector
27#define shutdown_edge_ioapic disable_edge_ioapic_vector
28#define enable_edge_ioapic unmask_IO_APIC_vector
29#define disable_edge_ioapic disable_edge_ioapic_vector
30#define ack_edge_ioapic ack_edge_ioapic_vector
31#define end_edge_ioapic end_edge_ioapic_vector
32#else
33static inline int use_pci_vector(void) {return 0;}
34static inline void disable_edge_ioapic_irq(unsigned int irq) { }
35static inline void mask_and_ack_level_ioapic_irq(unsigned int irq) { }
36static inline void end_edge_ioapic_irq (unsigned int irq) { }
37#define startup_level_ioapic startup_level_ioapic_irq
38#define shutdown_level_ioapic mask_IO_APIC_irq
39#define enable_level_ioapic unmask_IO_APIC_irq
40#define disable_level_ioapic mask_IO_APIC_irq
41#define mask_and_ack_level_ioapic mask_and_ack_level_ioapic_irq
42#define end_level_ioapic end_level_ioapic_irq
43#define set_ioapic_affinity set_ioapic_affinity_irq
44
45#define startup_edge_ioapic startup_edge_ioapic_irq
46#define shutdown_edge_ioapic disable_edge_ioapic_irq
47#define enable_edge_ioapic unmask_IO_APIC_irq
48#define disable_edge_ioapic disable_edge_ioapic_irq
49#define ack_edge_ioapic ack_edge_ioapic_irq
50#define end_edge_ioapic end_edge_ioapic_irq
51#endif
52
53#define APIC_MISMATCH_DEBUG 13#define APIC_MISMATCH_DEBUG
54 14
55#define IO_APIC_BASE(idx) \ 15#define IO_APIC_BASE(idx) \
@@ -202,13 +162,10 @@ extern int skip_ioapic_setup;
202extern int io_apic_get_version (int ioapic); 162extern int io_apic_get_version (int ioapic);
203extern int io_apic_get_redir_entries (int ioapic); 163extern int io_apic_get_redir_entries (int ioapic);
204extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int); 164extern int io_apic_set_pci_routing (int ioapic, int pin, int irq, int, int);
205extern int timer_uses_ioapic_pin_0;
206#endif 165#endif
207 166
208extern int sis_apic_bug; /* dummy */ 167extern int sis_apic_bug; /* dummy */
209 168
210extern int assign_irq_vector(int irq);
211
212void enable_NMI_through_LVT0 (void * dummy); 169void enable_NMI_through_LVT0 (void * dummy);
213 170
214extern spinlock_t i8259A_lock; 171extern spinlock_t i8259A_lock;
diff --git a/include/asm-x86_64/irq.h b/include/asm-x86_64/irq.h
index 43469d8ab71a..5006c6e75656 100644
--- a/include/asm-x86_64/irq.h
+++ b/include/asm-x86_64/irq.h
@@ -31,13 +31,8 @@
31 31
32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */ 32#define FIRST_SYSTEM_VECTOR 0xef /* duplicated in hw_irq.h */
33 33
34#ifdef CONFIG_PCI_MSI 34#define NR_IRQS (NR_VECTORS + (32 *NR_CPUS))
35#define NR_IRQS FIRST_SYSTEM_VECTOR
36#define NR_IRQ_VECTORS NR_IRQS 35#define NR_IRQ_VECTORS NR_IRQS
37#else
38#define NR_IRQS 224
39#define NR_IRQ_VECTORS (32 * NR_CPUS)
40#endif
41 36
42static __inline__ int irq_canonicalize(int irq) 37static __inline__ int irq_canonicalize(int irq)
43{ 38{
diff --git a/include/asm-x86_64/msi.h b/include/asm-x86_64/msi.h
deleted file mode 100644
index 3ad2346624b2..000000000000
--- a/include/asm-x86_64/msi.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (C) 2003-2004 Intel
3 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
4 */
5
6#ifndef ASM_MSI_H
7#define ASM_MSI_H
8
9#include <asm/desc.h>
10#include <asm/mach_apic.h>
11#include <asm/smp.h>
12
13#define LAST_DEVICE_VECTOR (FIRST_SYSTEM_VECTOR - 1)
14#define MSI_TARGET_CPU_SHIFT 12
15
16extern struct msi_ops msi_apic_ops;
17
18static inline int msi_arch_init(void)
19{
20 msi_register(&msi_apic_ops);
21 return 0;
22}
23
24#endif /* ASM_MSI_H */
diff --git a/include/asm-x86_64/msidef.h b/include/asm-x86_64/msidef.h
new file mode 100644
index 000000000000..5b8acddb70fb
--- /dev/null
+++ b/include/asm-x86_64/msidef.h
@@ -0,0 +1,47 @@
1#ifndef ASM_MSIDEF_H
2#define ASM_MSIDEF_H
3
4/*
5 * Constants for Intel APIC based MSI messages.
6 */
7
8/*
9 * Shifts for MSI data
10 */
11
12#define MSI_DATA_VECTOR_SHIFT 0
13#define MSI_DATA_VECTOR_MASK 0x000000ff
14#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK)
15
16#define MSI_DATA_DELIVERY_MODE_SHIFT 8
17#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
18#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
19
20#define MSI_DATA_LEVEL_SHIFT 14
21#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
22#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
23
24#define MSI_DATA_TRIGGER_SHIFT 15
25#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
26#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
27
28/*
29 * Shift/mask fields for msi address
30 */
31
32#define MSI_ADDR_BASE_HI 0
33#define MSI_ADDR_BASE_LO 0xfee00000
34
35#define MSI_ADDR_DEST_MODE_SHIFT 2
36#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
37#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
38
39#define MSI_ADDR_REDIRECTION_SHIFT 3
40#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) /* dedicated cpu */
41#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) /* lowest priority */
42
43#define MSI_ADDR_DEST_ID_SHIFT 12
44#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
45#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK)
46
47#endif /* ASM_MSIDEF_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f7a52e19b4be..ea005c0a79fd 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -46,6 +46,7 @@ header-y += coff.h
46header-y += comstats.h 46header-y += comstats.h
47header-y += consolemap.h 47header-y += consolemap.h
48header-y += cycx_cfm.h 48header-y += cycx_cfm.h
49header-y += dlm_device.h
49header-y += dm-ioctl.h 50header-y += dm-ioctl.h
50header-y += dn.h 51header-y += dn.h
51header-y += dqblk_v1.h 52header-y += dqblk_v1.h
@@ -104,6 +105,7 @@ header-y += ixjuser.h
104header-y += jffs2.h 105header-y += jffs2.h
105header-y += keyctl.h 106header-y += keyctl.h
106header-y += limits.h 107header-y += limits.h
108header-y += lock_dlm_plock.h
107header-y += magic.h 109header-y += magic.h
108header-y += major.h 110header-y += major.h
109header-y += matroxfb.h 111header-y += matroxfb.h
@@ -156,12 +158,10 @@ header-y += toshiba.h
156header-y += ultrasound.h 158header-y += ultrasound.h
157header-y += un.h 159header-y += un.h
158header-y += utime.h 160header-y += utime.h
159header-y += utsname.h
160header-y += video_decoder.h 161header-y += video_decoder.h
161header-y += video_encoder.h 162header-y += video_encoder.h
162header-y += videotext.h 163header-y += videotext.h
163header-y += vt.h 164header-y += vt.h
164header-y += wavefront.h
165header-y += wireless.h 165header-y += wireless.h
166header-y += xattr.h 166header-y += xattr.h
167header-y += x25.h 167header-y += x25.h
@@ -194,6 +194,7 @@ unifdef-y += cyclades.h
194unifdef-y += dccp.h 194unifdef-y += dccp.h
195unifdef-y += dirent.h 195unifdef-y += dirent.h
196unifdef-y += divert.h 196unifdef-y += divert.h
197unifdef-y += dlm.h
197unifdef-y += elfcore.h 198unifdef-y += elfcore.h
198unifdef-y += errno.h 199unifdef-y += errno.h
199unifdef-y += errqueue.h 200unifdef-y += errqueue.h
@@ -210,6 +211,7 @@ unifdef-y += ftape.h
210unifdef-y += gameport.h 211unifdef-y += gameport.h
211unifdef-y += generic_serial.h 212unifdef-y += generic_serial.h
212unifdef-y += genhd.h 213unifdef-y += genhd.h
214unifdef-y += gfs2_ondisk.h
213unifdef-y += hayesesp.h 215unifdef-y += hayesesp.h
214unifdef-y += hdlcdrv.h 216unifdef-y += hdlcdrv.h
215unifdef-y += hdlc.h 217unifdef-y += hdlc.h
@@ -333,6 +335,7 @@ unifdef-y += unistd.h
333unifdef-y += usb_ch9.h 335unifdef-y += usb_ch9.h
334unifdef-y += usbdevice_fs.h 336unifdef-y += usbdevice_fs.h
335unifdef-y += user.h 337unifdef-y += user.h
338unifdef-y += utsname.h
336unifdef-y += videodev2.h 339unifdef-y += videodev2.h
337unifdef-y += videodev.h 340unifdef-y += videodev.h
338unifdef-y += wait.h 341unifdef-y += wait.h
diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h
index 2ed2fd855133..22eb9367235a 100644
--- a/include/linux/ac97_codec.h
+++ b/include/linux/ac97_codec.h
@@ -331,8 +331,6 @@ extern int ac97_read_proc (char *page_out, char **start, off_t off,
331extern int ac97_probe_codec(struct ac97_codec *); 331extern int ac97_probe_codec(struct ac97_codec *);
332extern unsigned int ac97_set_adc_rate(struct ac97_codec *codec, unsigned int rate); 332extern unsigned int ac97_set_adc_rate(struct ac97_codec *codec, unsigned int rate);
333extern unsigned int ac97_set_dac_rate(struct ac97_codec *codec, unsigned int rate); 333extern unsigned int ac97_set_dac_rate(struct ac97_codec *codec, unsigned int rate);
334extern int ac97_save_state(struct ac97_codec *codec);
335extern int ac97_restore_state(struct ac97_codec *codec);
336 334
337extern struct ac97_codec *ac97_alloc_codec(void); 335extern struct ac97_codec *ac97_alloc_codec(void);
338extern void ac97_release_codec(struct ac97_codec *codec); 336extern void ac97_release_codec(struct ac97_codec *codec);
@@ -346,9 +344,6 @@ struct ac97_driver {
346 void (*remove) (struct ac97_codec *codec, struct ac97_driver *driver); 344 void (*remove) (struct ac97_codec *codec, struct ac97_driver *driver);
347}; 345};
348 346
349extern int ac97_register_driver(struct ac97_driver *driver);
350extern void ac97_unregister_driver(struct ac97_driver *driver);
351
352/* quirk types */ 347/* quirk types */
353enum { 348enum {
354 AC97_TUNE_DEFAULT = -1, /* use default from quirk list (not valid in list) */ 349 AC97_TUNE_DEFAULT = -1, /* use default from quirk list (not valid in list) */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index c3aa09751814..b2ca666d9997 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -75,7 +75,7 @@
75#define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */ 75#define AUDIT_DAEMON_CONFIG 1203 /* Daemon config change */
76 76
77#define AUDIT_SYSCALL 1300 /* Syscall event */ 77#define AUDIT_SYSCALL 1300 /* Syscall event */
78#define AUDIT_FS_WATCH 1301 /* Filesystem watch event */ 78/* #define AUDIT_FS_WATCH 1301 * Deprecated */
79#define AUDIT_PATH 1302 /* Filename path information */ 79#define AUDIT_PATH 1302 /* Filename path information */
80#define AUDIT_IPC 1303 /* IPC record */ 80#define AUDIT_IPC 1303 /* IPC record */
81#define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */ 81#define AUDIT_SOCKETCALL 1304 /* sys_socketcall arguments */
@@ -88,6 +88,7 @@
88#define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */ 88#define AUDIT_MQ_SENDRECV 1313 /* POSIX MQ send/receive record type */
89#define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */ 89#define AUDIT_MQ_NOTIFY 1314 /* POSIX MQ notify record type */
90#define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */ 90#define AUDIT_MQ_GETSETATTR 1315 /* POSIX MQ get/set attribute record type */
91#define AUDIT_KERNEL_OTHER 1316 /* For use by 3rd party modules */
91 92
92#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ 93#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
93#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ 94#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 88dafa246d87..952bee79a8f3 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -43,6 +43,8 @@ extern int debug_locks_off(void);
43# define locking_selftest() do { } while (0) 43# define locking_selftest() do { } while (0)
44#endif 44#endif
45 45
46struct task_struct;
47
46#ifdef CONFIG_LOCKDEP 48#ifdef CONFIG_LOCKDEP
47extern void debug_show_all_locks(void); 49extern void debug_show_all_locks(void);
48extern void debug_show_held_locks(struct task_struct *task); 50extern void debug_show_held_locks(struct task_struct *task);
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
new file mode 100644
index 000000000000..1b1dcb9a40bb
--- /dev/null
+++ b/include/linux/dlm.h
@@ -0,0 +1,302 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __DLM_DOT_H__
15#define __DLM_DOT_H__
16
17/*
18 * Interface to Distributed Lock Manager (DLM)
19 * routines and structures to use DLM lockspaces
20 */
21
22/*
23 * Lock Modes
24 */
25
26#define DLM_LOCK_IV -1 /* invalid */
27#define DLM_LOCK_NL 0 /* null */
28#define DLM_LOCK_CR 1 /* concurrent read */
29#define DLM_LOCK_CW 2 /* concurrent write */
30#define DLM_LOCK_PR 3 /* protected read */
31#define DLM_LOCK_PW 4 /* protected write */
32#define DLM_LOCK_EX 5 /* exclusive */
33
34/*
35 * Maximum size in bytes of a dlm_lock name
36 */
37
38#define DLM_RESNAME_MAXLEN 64
39
40/*
41 * Flags to dlm_lock
42 *
43 * DLM_LKF_NOQUEUE
44 *
45 * Do not queue the lock request on the wait queue if it cannot be granted
46 * immediately. If the lock cannot be granted because of this flag, DLM will
47 * either return -EAGAIN from the dlm_lock call or will return 0 from
48 * dlm_lock and -EAGAIN in the lock status block when the AST is executed.
49 *
50 * DLM_LKF_CANCEL
51 *
52 * Used to cancel a pending lock request or conversion. A converting lock is
53 * returned to its previously granted mode.
54 *
55 * DLM_LKF_CONVERT
56 *
57 * Indicates a lock conversion request. For conversions the name and namelen
58 * are ignored and the lock ID in the LKSB is used to identify the lock.
59 *
60 * DLM_LKF_VALBLK
61 *
62 * Requests DLM to return the current contents of the lock value block in the
63 * lock status block. When this flag is set in a lock conversion from PW or EX
64 * modes, DLM assigns the value specified in the lock status block to the lock
65 * value block of the lock resource. The LVB is a DLM_LVB_LEN size array
66 * containing application-specific information.
67 *
68 * DLM_LKF_QUECVT
69 *
70 * Force a conversion request to be queued, even if it is compatible with
71 * the granted modes of other locks on the same resource.
72 *
73 * DLM_LKF_IVVALBLK
74 *
75 * Invalidate the lock value block.
76 *
77 * DLM_LKF_CONVDEADLK
78 *
79 * Allows the dlm to resolve conversion deadlocks internally by demoting the
80 * granted mode of a converting lock to NL. The DLM_SBF_DEMOTED flag is
81 * returned for a conversion that's been effected by this.
82 *
83 * DLM_LKF_PERSISTENT
84 *
85 * Only relevant to locks originating in userspace. A persistent lock will not
86 * be removed if the process holding the lock exits.
87 *
88 * DLM_LKF_NODLKWT
89 * DLM_LKF_NODLCKBLK
90 *
91 * net yet implemented
92 *
93 * DLM_LKF_EXPEDITE
94 *
95 * Used only with new requests for NL mode locks. Tells the lock manager
96 * to grant the lock, ignoring other locks in convert and wait queues.
97 *
98 * DLM_LKF_NOQUEUEBAST
99 *
100 * Send blocking AST's before returning -EAGAIN to the caller. It is only
101 * used along with the NOQUEUE flag. Blocking AST's are not sent for failed
102 * NOQUEUE requests otherwise.
103 *
104 * DLM_LKF_HEADQUE
105 *
106 * Add a lock to the head of the convert or wait queue rather than the tail.
107 *
108 * DLM_LKF_NOORDER
109 *
110 * Disregard the standard grant order rules and grant a lock as soon as it
111 * is compatible with other granted locks.
112 *
113 * DLM_LKF_ORPHAN
114 *
115 * not yet implemented
116 *
117 * DLM_LKF_ALTPR
118 *
119 * If the requested mode cannot be granted immediately, try to grant the lock
120 * in PR mode instead. If this alternate mode is granted instead of the
121 * requested mode, DLM_SBF_ALTMODE is returned in the lksb.
122 *
123 * DLM_LKF_ALTCW
124 *
125 * The same as ALTPR, but the alternate mode is CW.
126 *
127 * DLM_LKF_FORCEUNLOCK
128 *
129 * Unlock the lock even if it is converting or waiting or has sublocks.
130 * Only really for use by the userland device.c code.
131 *
132 */
133
134#define DLM_LKF_NOQUEUE 0x00000001
135#define DLM_LKF_CANCEL 0x00000002
136#define DLM_LKF_CONVERT 0x00000004
137#define DLM_LKF_VALBLK 0x00000008
138#define DLM_LKF_QUECVT 0x00000010
139#define DLM_LKF_IVVALBLK 0x00000020
140#define DLM_LKF_CONVDEADLK 0x00000040
141#define DLM_LKF_PERSISTENT 0x00000080
142#define DLM_LKF_NODLCKWT 0x00000100
143#define DLM_LKF_NODLCKBLK 0x00000200
144#define DLM_LKF_EXPEDITE 0x00000400
145#define DLM_LKF_NOQUEUEBAST 0x00000800
146#define DLM_LKF_HEADQUE 0x00001000
147#define DLM_LKF_NOORDER 0x00002000
148#define DLM_LKF_ORPHAN 0x00004000
149#define DLM_LKF_ALTPR 0x00008000
150#define DLM_LKF_ALTCW 0x00010000
151#define DLM_LKF_FORCEUNLOCK 0x00020000
152
153/*
154 * Some return codes that are not in errno.h
155 */
156
157#define DLM_ECANCEL 0x10001
158#define DLM_EUNLOCK 0x10002
159
160typedef void dlm_lockspace_t;
161
162/*
163 * Lock status block
164 *
165 * Use this structure to specify the contents of the lock value block. For a
166 * conversion request, this structure is used to specify the lock ID of the
167 * lock. DLM writes the status of the lock request and the lock ID assigned
168 * to the request in the lock status block.
169 *
170 * sb_lkid: the returned lock ID. It is set on new (non-conversion) requests.
171 * It is available when dlm_lock returns.
172 *
173 * sb_lvbptr: saves or returns the contents of the lock's LVB according to rules
174 * shown for the DLM_LKF_VALBLK flag.
175 *
176 * sb_flags: DLM_SBF_DEMOTED is returned if in the process of promoting a lock,
177 * it was first demoted to NL to avoid conversion deadlock.
178 * DLM_SBF_VALNOTVALID is returned if the resource's LVB is marked invalid.
179 *
180 * sb_status: the returned status of the lock request set prior to AST
181 * execution. Possible return values:
182 *
183 * 0 if lock request was successful
184 * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE
185 * -ENOMEM if there is no memory to process request
186 * -EINVAL if there are invalid parameters
187 * -DLM_EUNLOCK if unlock request was successful
188 * -DLM_ECANCEL if a cancel completed successfully
189 */
190
191#define DLM_SBF_DEMOTED 0x01
192#define DLM_SBF_VALNOTVALID 0x02
193#define DLM_SBF_ALTMODE 0x04
194
195struct dlm_lksb {
196 int sb_status;
197 uint32_t sb_lkid;
198 char sb_flags;
199 char * sb_lvbptr;
200};
201
202
203#ifdef __KERNEL__
204
205#define DLM_LSFL_NODIR 0x00000001
206
207/*
208 * dlm_new_lockspace
209 *
210 * Starts a lockspace with the given name. If the named lockspace exists in
211 * the cluster, the calling node joins it.
212 */
213
214int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace,
215 uint32_t flags, int lvblen);
216
217/*
218 * dlm_release_lockspace
219 *
220 * Stop a lockspace.
221 */
222
223int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
224
225/*
226 * dlm_lock
227 *
228 * Make an asyncronous request to acquire or convert a lock on a named
229 * resource.
230 *
231 * lockspace: context for the request
232 * mode: the requested mode of the lock (DLM_LOCK_)
233 * lksb: lock status block for input and async return values
234 * flags: input flags (DLM_LKF_)
235 * name: name of the resource to lock, can be binary
236 * namelen: the length in bytes of the resource name (MAX_RESNAME_LEN)
237 * parent: the lock ID of a parent lock or 0 if none
238 * lockast: function DLM executes when it completes processing the request
239 * astarg: argument passed to lockast and bast functions
240 * bast: function DLM executes when this lock later blocks another request
241 *
242 * Returns:
243 * 0 if request is successfully queued for processing
244 * -EINVAL if any input parameters are invalid
245 * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE
246 * -ENOMEM if there is no memory to process request
247 * -ENOTCONN if there is a communication error
248 *
249 * If the call to dlm_lock returns an error then the operation has failed and
250 * the AST routine will not be called. If dlm_lock returns 0 it is still
251 * possible that the lock operation will fail. The AST routine will be called
252 * when the locking is complete and the status is returned in the lksb.
253 *
254 * If the AST routines or parameter are passed to a conversion operation then
255 * they will overwrite those values that were passed to a previous dlm_lock
256 * call.
257 *
258 * AST routines should not block (at least not for long), but may make
259 * any locking calls they please.
260 */
261
262int dlm_lock(dlm_lockspace_t *lockspace,
263 int mode,
264 struct dlm_lksb *lksb,
265 uint32_t flags,
266 void *name,
267 unsigned int namelen,
268 uint32_t parent_lkid,
269 void (*lockast) (void *astarg),
270 void *astarg,
271 void (*bast) (void *astarg, int mode));
272
273/*
274 * dlm_unlock
275 *
276 * Asynchronously release a lock on a resource. The AST routine is called
277 * when the resource is successfully unlocked.
278 *
279 * lockspace: context for the request
280 * lkid: the lock ID as returned in the lksb
281 * flags: input flags (DLM_LKF_)
282 * lksb: if NULL the lksb parameter passed to last lock request is used
283 * astarg: the arg used with the completion ast for the unlock
284 *
285 * Returns:
286 * 0 if request is successfully queued for processing
287 * -EINVAL if any input parameters are invalid
288 * -ENOTEMPTY if the lock still has sublocks
289 * -EBUSY if the lock is waiting for a remote lock operation
290 * -ENOTCONN if there is a communication error
291 */
292
293int dlm_unlock(dlm_lockspace_t *lockspace,
294 uint32_t lkid,
295 uint32_t flags,
296 struct dlm_lksb *lksb,
297 void *astarg);
298
299#endif /* __KERNEL__ */
300
301#endif /* __DLM_DOT_H__ */
302
diff --git a/include/linux/dlm_device.h b/include/linux/dlm_device.h
new file mode 100644
index 000000000000..2a2dd189b9fd
--- /dev/null
+++ b/include/linux/dlm_device.h
@@ -0,0 +1,86 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/* This is the device interface for dlm, most users will use a library
15 * interface.
16 */
17
18#define DLM_USER_LVB_LEN 32
19
20/* Version of the device interface */
21#define DLM_DEVICE_VERSION_MAJOR 5
22#define DLM_DEVICE_VERSION_MINOR 0
23#define DLM_DEVICE_VERSION_PATCH 0
24
25/* struct passed to the lock write */
26struct dlm_lock_params {
27 __u8 mode;
28 __u8 namelen;
29 __u16 flags;
30 __u32 lkid;
31 __u32 parent;
32 void __user *castparam;
33 void __user *castaddr;
34 void __user *bastparam;
35 void __user *bastaddr;
36 struct dlm_lksb __user *lksb;
37 char lvb[DLM_USER_LVB_LEN];
38 char name[0];
39};
40
41struct dlm_lspace_params {
42 __u32 flags;
43 __u32 minor;
44 char name[0];
45};
46
47struct dlm_write_request {
48 __u32 version[3];
49 __u8 cmd;
50 __u8 is64bit;
51 __u8 unused[2];
52
53 union {
54 struct dlm_lock_params lock;
55 struct dlm_lspace_params lspace;
56 } i;
57};
58
59/* struct read from the "device" fd,
60 consists mainly of userspace pointers for the library to use */
61struct dlm_lock_result {
62 __u32 length;
63 void __user * user_astaddr;
64 void __user * user_astparam;
65 struct dlm_lksb __user * user_lksb;
66 struct dlm_lksb lksb;
67 __u8 bast_mode;
68 __u8 unused[3];
69 /* Offsets may be zero if no data is present */
70 __u32 lvb_offset;
71};
72
73/* Commands passed to the device */
74#define DLM_USER_LOCK 1
75#define DLM_USER_UNLOCK 2
76#define DLM_USER_QUERY 3
77#define DLM_USER_CREATE_LOCKSPACE 4
78#define DLM_USER_REMOVE_LOCKSPACE 5
79
80/* Arbitrary length restriction */
81#define MAX_LS_NAME_LEN 64
82
83/* Lockspace flags */
84#define DLM_USER_LSFLG_AUTOFREE 1
85#define DLM_USER_LSFLG_FORCEFREE 2
86
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f53bf4ff1955..34406ed467c3 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -250,6 +250,8 @@ extern int dir_notify_enable;
250#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ 250#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
251#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 251#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
252#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ 252#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
253#define FS_EXTENT_FL 0x00080000 /* Extents */
254#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
253#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 255#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
254 256
255#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ 257#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 16fbe59edeb1..3da29e2d524a 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -46,18 +46,17 @@
46 46
47struct gianfar_platform_data { 47struct gianfar_platform_data {
48 /* device specific information */ 48 /* device specific information */
49 u32 device_flags; 49 u32 device_flags;
50
51 /* board specific information */ 50 /* board specific information */
52 u32 board_flags; 51 u32 board_flags;
53 u32 bus_id; 52 u32 bus_id;
54 u32 phy_id; 53 u32 phy_id;
55 u8 mac_addr[6]; 54 u8 mac_addr[6];
56}; 55};
57 56
58struct gianfar_mdio_data { 57struct gianfar_mdio_data {
59 /* board specific information */ 58 /* board specific information */
60 int irq[32]; 59 int irq[32];
61}; 60};
62 61
63/* Flags related to gianfar device features */ 62/* Flags related to gianfar device features */
@@ -76,14 +75,13 @@ struct gianfar_mdio_data {
76 75
77struct fsl_i2c_platform_data { 76struct fsl_i2c_platform_data {
78 /* device specific information */ 77 /* device specific information */
79 u32 device_flags; 78 u32 device_flags;
80}; 79};
81 80
82/* Flags related to I2C device features */ 81/* Flags related to I2C device features */
83#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 82#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
84#define FSL_I2C_DEV_CLOCK_5200 0x00000002 83#define FSL_I2C_DEV_CLOCK_5200 0x00000002
85 84
86
87enum fsl_usb2_operating_modes { 85enum fsl_usb2_operating_modes {
88 FSL_USB2_MPH_HOST, 86 FSL_USB2_MPH_HOST,
89 FSL_USB2_DR_HOST, 87 FSL_USB2_DR_HOST,
@@ -101,9 +99,9 @@ enum fsl_usb2_phy_modes {
101 99
102struct fsl_usb2_platform_data { 100struct fsl_usb2_platform_data {
103 /* board specific information */ 101 /* board specific information */
104 enum fsl_usb2_operating_modes operating_mode; 102 enum fsl_usb2_operating_modes operating_mode;
105 enum fsl_usb2_phy_modes phy_mode; 103 enum fsl_usb2_phy_modes phy_mode;
106 unsigned int port_enables; 104 unsigned int port_enables;
107}; 105};
108 106
109/* Flags in fsl_usb2_mph_platform_data */ 107/* Flags in fsl_usb2_mph_platform_data */
@@ -121,5 +119,44 @@ struct fsl_spi_platform_data {
121 u32 sysclk; 119 u32 sysclk;
122}; 120};
123 121
124#endif /* _FSL_DEVICE_H_ */ 122/* Ethernet interface (phy management and speed)
125#endif /* __KERNEL__ */ 123*/
124enum enet_interface {
125 ENET_10_MII, /* 10 Base T, MII interface */
126 ENET_10_RMII, /* 10 Base T, RMII interface */
127 ENET_10_RGMII, /* 10 Base T, RGMII interface */
128 ENET_100_MII, /* 100 Base T, MII interface */
129 ENET_100_RMII, /* 100 Base T, RMII interface */
130 ENET_100_RGMII, /* 100 Base T, RGMII interface */
131 ENET_1000_GMII, /* 1000 Base T, GMII interface */
132 ENET_1000_RGMII, /* 1000 Base T, RGMII interface */
133 ENET_1000_TBI, /* 1000 Base T, TBI interface */
134 ENET_1000_RTBI /* 1000 Base T, RTBI interface */
135};
136
137struct ucc_geth_platform_data {
138 /* device specific information */
139 u32 device_flags;
140 u32 phy_reg_addr;
141
142 /* board specific information */
143 u32 board_flags;
144 u8 rx_clock;
145 u8 tx_clock;
146 u32 phy_id;
147 enum enet_interface phy_interface;
148 u32 phy_interrupt;
149 u8 mac_addr[6];
150};
151
152/* Flags related to UCC Gigabit Ethernet device features */
153#define FSL_UGETH_DEV_HAS_GIGABIT 0x00000001
154#define FSL_UGETH_DEV_HAS_COALESCE 0x00000002
155#define FSL_UGETH_DEV_HAS_RMON 0x00000004
156
157/* Flags in ucc_geth_platform_data */
158#define FSL_UGETH_BRD_HAS_PHY_INTR 0x00000001
159 /* if not set use a timer */
160
161#endif /* _FSL_DEVICE_H_ */
162#endif /* __KERNEL__ */
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
new file mode 100644
index 000000000000..a7ae7c177cac
--- /dev/null
+++ b/include/linux/gfs2_ondisk.h
@@ -0,0 +1,443 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
8 */
9
10#ifndef __GFS2_ONDISK_DOT_H__
11#define __GFS2_ONDISK_DOT_H__
12
13#define GFS2_MAGIC 0x01161970
14#define GFS2_BASIC_BLOCK 512
15#define GFS2_BASIC_BLOCK_SHIFT 9
16
17/* Lock numbers of the LM_TYPE_NONDISK type */
18
19#define GFS2_MOUNT_LOCK 0
20#define GFS2_LIVE_LOCK 1
21#define GFS2_TRANS_LOCK 2
22#define GFS2_RENAME_LOCK 3
23
24/* Format numbers for various metadata types */
25
26#define GFS2_FORMAT_NONE 0
27#define GFS2_FORMAT_SB 100
28#define GFS2_FORMAT_RG 200
29#define GFS2_FORMAT_RB 300
30#define GFS2_FORMAT_DI 400
31#define GFS2_FORMAT_IN 500
32#define GFS2_FORMAT_LF 600
33#define GFS2_FORMAT_JD 700
34#define GFS2_FORMAT_LH 800
35#define GFS2_FORMAT_LD 900
36#define GFS2_FORMAT_LB 1000
37#define GFS2_FORMAT_EA 1600
38#define GFS2_FORMAT_ED 1700
39#define GFS2_FORMAT_QC 1400
40/* These are format numbers for entities contained in files */
41#define GFS2_FORMAT_RI 1100
42#define GFS2_FORMAT_DE 1200
43#define GFS2_FORMAT_QU 1500
44/* These are part of the superblock */
45#define GFS2_FORMAT_FS 1801
46#define GFS2_FORMAT_MULTI 1900
47
48/*
49 * An on-disk inode number
50 */
51
52struct gfs2_inum {
53 __be64 no_formal_ino;
54 __be64 no_addr;
55};
56
57static inline int gfs2_inum_equal(const struct gfs2_inum *ino1,
58 const struct gfs2_inum *ino2)
59{
60 return ino1->no_formal_ino == ino2->no_formal_ino &&
61 ino1->no_addr == ino2->no_addr;
62}
63
64/*
65 * Generic metadata head structure
66 * Every inplace buffer logged in the journal must start with this.
67 */
68
69#define GFS2_METATYPE_NONE 0
70#define GFS2_METATYPE_SB 1
71#define GFS2_METATYPE_RG 2
72#define GFS2_METATYPE_RB 3
73#define GFS2_METATYPE_DI 4
74#define GFS2_METATYPE_IN 5
75#define GFS2_METATYPE_LF 6
76#define GFS2_METATYPE_JD 7
77#define GFS2_METATYPE_LH 8
78#define GFS2_METATYPE_LD 9
79#define GFS2_METATYPE_LB 12
80#define GFS2_METATYPE_EA 10
81#define GFS2_METATYPE_ED 11
82#define GFS2_METATYPE_QC 14
83
84struct gfs2_meta_header {
85 __be32 mh_magic;
86 __be32 mh_type;
87 __be64 __pad0; /* Was generation number in gfs1 */
88 __be32 mh_format;
89 __be32 __pad1; /* Was incarnation number in gfs1 */
90};
91
92/*
93 * super-block structure
94 *
95 * It's probably good if SIZEOF_SB <= GFS2_BASIC_BLOCK (512 bytes)
96 *
97 * Order is important, need to be able to read old superblocks to do on-disk
98 * version upgrades.
99 */
100
101/* Address of superblock in GFS2 basic blocks */
102#define GFS2_SB_ADDR 128
103
104/* The lock number for the superblock (must be zero) */
105#define GFS2_SB_LOCK 0
106
107/* Requirement: GFS2_LOCKNAME_LEN % 8 == 0
108 Includes: the fencing zero at the end */
109#define GFS2_LOCKNAME_LEN 64
110
111struct gfs2_sb {
112 struct gfs2_meta_header sb_header;
113
114 __be32 sb_fs_format;
115 __be32 sb_multihost_format;
116 __u32 __pad0; /* Was superblock flags in gfs1 */
117
118 __be32 sb_bsize;
119 __be32 sb_bsize_shift;
120 __u32 __pad1; /* Was journal segment size in gfs1 */
121
122 struct gfs2_inum sb_master_dir; /* Was jindex dinode in gfs1 */
123 struct gfs2_inum __pad2; /* Was rindex dinode in gfs1 */
124 struct gfs2_inum sb_root_dir;
125
126 char sb_lockproto[GFS2_LOCKNAME_LEN];
127 char sb_locktable[GFS2_LOCKNAME_LEN];
128 /* In gfs1, quota and license dinodes followed */
129};
130
131/*
132 * resource index structure
133 */
134
135struct gfs2_rindex {
136 __be64 ri_addr; /* grp block disk address */
137 __be32 ri_length; /* length of rgrp header in fs blocks */
138 __u32 __pad;
139
140 __be64 ri_data0; /* first data location */
141 __be32 ri_data; /* num of data blocks in rgrp */
142
143 __be32 ri_bitbytes; /* number of bytes in data bitmaps */
144
145 __u8 ri_reserved[64];
146};
147
148/*
149 * resource group header structure
150 */
151
152/* Number of blocks per byte in rgrp */
153#define GFS2_NBBY 4
154#define GFS2_BIT_SIZE 2
155#define GFS2_BIT_MASK 0x00000003
156
157#define GFS2_BLKST_FREE 0
158#define GFS2_BLKST_USED 1
159#define GFS2_BLKST_UNLINKED 2
160#define GFS2_BLKST_DINODE 3
161
162#define GFS2_RGF_JOURNAL 0x00000001
163#define GFS2_RGF_METAONLY 0x00000002
164#define GFS2_RGF_DATAONLY 0x00000004
165#define GFS2_RGF_NOALLOC 0x00000008
166
167struct gfs2_rgrp {
168 struct gfs2_meta_header rg_header;
169
170 __be32 rg_flags;
171 __be32 rg_free;
172 __be32 rg_dinodes;
173 __be32 __pad;
174 __be64 rg_igeneration;
175
176 __u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
177};
178
179/*
180 * quota structure
181 */
182
183struct gfs2_quota {
184 __be64 qu_limit;
185 __be64 qu_warn;
186 __be64 qu_value;
187 __u8 qu_reserved[64];
188};
189
190/*
191 * dinode structure
192 */
193
194#define GFS2_MAX_META_HEIGHT 10
195#define GFS2_DIR_MAX_DEPTH 17
196
197#define DT2IF(dt) (((dt) << 12) & S_IFMT)
198#define IF2DT(sif) (((sif) & S_IFMT) >> 12)
199
200enum {
201 gfs2fl_Jdata = 0,
202 gfs2fl_ExHash = 1,
203 gfs2fl_Unused = 2,
204 gfs2fl_EaIndirect = 3,
205 gfs2fl_Directio = 4,
206 gfs2fl_Immutable = 5,
207 gfs2fl_AppendOnly = 6,
208 gfs2fl_NoAtime = 7,
209 gfs2fl_Sync = 8,
210 gfs2fl_System = 9,
211 gfs2fl_TruncInProg = 29,
212 gfs2fl_InheritDirectio = 30,
213 gfs2fl_InheritJdata = 31,
214};
215
216/* Dinode flags */
217#define GFS2_DIF_JDATA 0x00000001
218#define GFS2_DIF_EXHASH 0x00000002
219#define GFS2_DIF_UNUSED 0x00000004 /* only in gfs1 */
220#define GFS2_DIF_EA_INDIRECT 0x00000008
221#define GFS2_DIF_DIRECTIO 0x00000010
222#define GFS2_DIF_IMMUTABLE 0x00000020
223#define GFS2_DIF_APPENDONLY 0x00000040
224#define GFS2_DIF_NOATIME 0x00000080
225#define GFS2_DIF_SYNC 0x00000100
226#define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */
227#define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */
228#define GFS2_DIF_INHERIT_DIRECTIO 0x40000000
229#define GFS2_DIF_INHERIT_JDATA 0x80000000
230
231struct gfs2_dinode {
232 struct gfs2_meta_header di_header;
233
234 struct gfs2_inum di_num;
235
236 __be32 di_mode; /* mode of file */
237 __be32 di_uid; /* owner's user id */
238 __be32 di_gid; /* owner's group id */
239 __be32 di_nlink; /* number of links to this file */
240 __be64 di_size; /* number of bytes in file */
241 __be64 di_blocks; /* number of blocks in file */
242 __be64 di_atime; /* time last accessed */
243 __be64 di_mtime; /* time last modified */
244 __be64 di_ctime; /* time last changed */
245 __be32 di_major; /* device major number */
246 __be32 di_minor; /* device minor number */
247
248 /* This section varies from gfs1. Padding added to align with
249 * remainder of dinode
250 */
251 __be64 di_goal_meta; /* rgrp to alloc from next */
252 __be64 di_goal_data; /* data block goal */
253 __be64 di_generation; /* generation number for NFS */
254
255 __be32 di_flags; /* GFS2_DIF_... */
256 __be32 di_payload_format; /* GFS2_FORMAT_... */
257 __u16 __pad1; /* Was ditype in gfs1 */
258 __be16 di_height; /* height of metadata */
259 __u32 __pad2; /* Unused incarnation number from gfs1 */
260
261 /* These only apply to directories */
262 __u16 __pad3; /* Padding */
263 __be16 di_depth; /* Number of bits in the table */
264 __be32 di_entries; /* The number of entries in the directory */
265
266 struct gfs2_inum __pad4; /* Unused even in current gfs1 */
267
268 __be64 di_eattr; /* extended attribute block number */
269
270 __u8 di_reserved[56];
271};
272
273/*
274 * directory structure - many of these per directory file
275 */
276
277#define GFS2_FNAMESIZE 255
278#define GFS2_DIRENT_SIZE(name_len) ((sizeof(struct gfs2_dirent) + (name_len) + 7) & ~7)
279
280struct gfs2_dirent {
281 struct gfs2_inum de_inum;
282 __be32 de_hash;
283 __be16 de_rec_len;
284 __be16 de_name_len;
285 __be16 de_type;
286 __u8 __pad[14];
287};
288
289/*
290 * Header of leaf directory nodes
291 */
292
293struct gfs2_leaf {
294 struct gfs2_meta_header lf_header;
295
296 __be16 lf_depth; /* Depth of leaf */
297 __be16 lf_entries; /* Number of dirents in leaf */
298 __be32 lf_dirent_format; /* Format of the dirents */
299 __be64 lf_next; /* Next leaf, if overflow */
300
301 __u8 lf_reserved[64];
302};
303
304/*
305 * Extended attribute header format
306 */
307
308#define GFS2_EA_MAX_NAME_LEN 255
309#define GFS2_EA_MAX_DATA_LEN 65536
310
311#define GFS2_EATYPE_UNUSED 0
312#define GFS2_EATYPE_USR 1
313#define GFS2_EATYPE_SYS 2
314#define GFS2_EATYPE_SECURITY 3
315
316#define GFS2_EATYPE_LAST 3
317#define GFS2_EATYPE_VALID(x) ((x) <= GFS2_EATYPE_LAST)
318
319#define GFS2_EAFLAG_LAST 0x01 /* last ea in block */
320
321struct gfs2_ea_header {
322 __be32 ea_rec_len;
323 __be32 ea_data_len;
324 __u8 ea_name_len; /* no NULL pointer after the string */
325 __u8 ea_type; /* GFS2_EATYPE_... */
326 __u8 ea_flags; /* GFS2_EAFLAG_... */
327 __u8 ea_num_ptrs;
328 __u32 __pad;
329};
330
331/*
332 * Log header structure
333 */
334
335#define GFS2_LOG_HEAD_UNMOUNT 0x00000001 /* log is clean */
336
337struct gfs2_log_header {
338 struct gfs2_meta_header lh_header;
339
340 __be64 lh_sequence; /* Sequence number of this transaction */
341 __be32 lh_flags; /* GFS2_LOG_HEAD_... */
342 __be32 lh_tail; /* Block number of log tail */
343 __be32 lh_blkno;
344 __be32 lh_hash;
345};
346
347/*
348 * Log type descriptor
349 */
350
351#define GFS2_LOG_DESC_METADATA 300
352/* ld_data1 is the number of metadata blocks in the descriptor.
353 ld_data2 is unused. */
354
355#define GFS2_LOG_DESC_REVOKE 301
356/* ld_data1 is the number of revoke blocks in the descriptor.
357 ld_data2 is unused. */
358
359#define GFS2_LOG_DESC_JDATA 302
360/* ld_data1 is the number of data blocks in the descriptor.
361 ld_data2 is unused. */
362
363struct gfs2_log_descriptor {
364 struct gfs2_meta_header ld_header;
365
366 __be32 ld_type; /* GFS2_LOG_DESC_... */
367 __be32 ld_length; /* Number of buffers in this chunk */
368 __be32 ld_data1; /* descriptor-specific field */
369 __be32 ld_data2; /* descriptor-specific field */
370
371 __u8 ld_reserved[32];
372};
373
374/*
375 * Inum Range
376 * Describe a range of formal inode numbers allocated to
377 * one machine to assign to inodes.
378 */
379
380#define GFS2_INUM_QUANTUM 1048576
381
382struct gfs2_inum_range {
383 __be64 ir_start;
384 __be64 ir_length;
385};
386
387/*
388 * Statfs change
389 * Describes an change to the pool of free and allocated
390 * blocks.
391 */
392
393struct gfs2_statfs_change {
394 __be64 sc_total;
395 __be64 sc_free;
396 __be64 sc_dinodes;
397};
398
399/*
400 * Quota change
401 * Describes an allocation change for a particular
402 * user or group.
403 */
404
405#define GFS2_QCF_USER 0x00000001
406
407struct gfs2_quota_change {
408 __be64 qc_change;
409 __be32 qc_flags; /* GFS2_QCF_... */
410 __be32 qc_id;
411};
412
413#ifdef __KERNEL__
414/* Translation functions */
415
416extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf);
417extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf);
418extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf);
419extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf);
420extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf);
421extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf);
422extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf);
423extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf);
424extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf);
425extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf);
426extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
427extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
428extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
429extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf);
430extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf);
431extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf);
432extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf);
433extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf);
434extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf);
435
436/* Printing functions */
437
438extern void gfs2_rindex_print(const struct gfs2_rindex *ri);
439extern void gfs2_dinode_print(const struct gfs2_dinode *di);
440
441#endif /* __KERNEL__ */
442
443#endif /* __GFS2_ONDISK_DOT_H__ */
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 50d8b5744cf6..612472aaa79c 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -28,11 +28,16 @@
28 28
29#ifndef HARDIRQ_BITS 29#ifndef HARDIRQ_BITS
30#define HARDIRQ_BITS 12 30#define HARDIRQ_BITS 12
31
32#ifndef MAX_HARDIRQS_PER_CPU
33#define MAX_HARDIRQS_PER_CPU NR_IRQS
34#endif
35
31/* 36/*
32 * The hardirq mask has to be large enough to have space for potentially 37 * The hardirq mask has to be large enough to have space for potentially
33 * all IRQ sources in the system nesting on a single CPU. 38 * all IRQ sources in the system nesting on a single CPU.
34 */ 39 */
35#if (1 << HARDIRQ_BITS) < NR_IRQS 40#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
36# error HARDIRQ_BITS is too low! 41# error HARDIRQ_BITS is too low!
37#endif 42#endif
38#endif 43#endif
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
new file mode 100644
index 000000000000..1f15ce279a23
--- /dev/null
+++ b/include/linux/htirq.h
@@ -0,0 +1,15 @@
1#ifndef LINUX_HTIRQ_H
2#define LINUX_HTIRQ_H
3
4/* Helper functions.. */
5void write_ht_irq_low(unsigned int irq, u32 data);
6void write_ht_irq_high(unsigned int irq, u32 data);
7u32 read_ht_irq_low(unsigned int irq);
8u32 read_ht_irq_high(unsigned int irq);
9void mask_ht_irq(unsigned int irq);
10void unmask_ht_irq(unsigned int irq);
11
12/* The arch hook for getting things started */
13int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
14
15#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/in.h b/include/linux/in.h
index d79fc75fa7c2..2619859f6e1b 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -40,6 +40,7 @@ enum {
40 40
41 IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */ 41 IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
42 IPPROTO_AH = 51, /* Authentication Header protocol */ 42 IPPROTO_AH = 51, /* Authentication Header protocol */
43 IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
43 IPPROTO_PIM = 103, /* Protocol Independent Multicast */ 44 IPPROTO_PIM = 103, /* Protocol Independent Multicast */
44 45
45 IPPROTO_COMP = 108, /* Compression Header protocol */ 46 IPPROTO_COMP = 108, /* Compression Header protocol */
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 6b25d36fc54c..ecee9bb27d0e 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -80,6 +80,8 @@
80#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ 80#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
81#define IPOPT_TS_PRESPEC 3 /* specified modules only */ 81#define IPOPT_TS_PRESPEC 3 /* specified modules only */
82 82
83#define IPV4_BEET_PHMAXLEN 8
84
83struct iphdr { 85struct iphdr {
84#if defined(__LITTLE_ENDIAN_BITFIELD) 86#if defined(__LITTLE_ENDIAN_BITFIELD)
85 __u8 ihl:4, 87 __u8 ihl:4,
@@ -123,4 +125,11 @@ struct ip_comp_hdr {
123 __be16 cpi; 125 __be16 cpi;
124}; 126};
125 127
128struct ip_beet_phdr {
129 __u8 nexthdr;
130 __u8 hdrlen;
131 __u8 padlen;
132 __u8 reserved;
133};
134
126#endif /* _LINUX_IP_H */ 135#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index d9e2b3f36c35..636094c29b16 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -2,7 +2,6 @@
2#define _LINUX_IPC_H 2#define _LINUX_IPC_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/kref.h>
6 5
7#define IPC_PRIVATE ((__kernel_key_t) 0) 6#define IPC_PRIVATE ((__kernel_key_t) 0)
8 7
@@ -52,6 +51,8 @@ struct ipc_perm
52 51
53#ifdef __KERNEL__ 52#ifdef __KERNEL__
54 53
54#include <linux/kref.h>
55
55#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ 56#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
56 57
57/* used by in-kernel data structures */ 58/* used by in-kernel data structures */
diff --git a/include/linux/ipsec.h b/include/linux/ipsec.h
index d3c527616b5e..d17a6302a0e9 100644
--- a/include/linux/ipsec.h
+++ b/include/linux/ipsec.h
@@ -12,7 +12,8 @@
12enum { 12enum {
13 IPSEC_MODE_ANY = 0, /* We do not support this for SA */ 13 IPSEC_MODE_ANY = 0, /* We do not support this for SA */
14 IPSEC_MODE_TRANSPORT = 1, 14 IPSEC_MODE_TRANSPORT = 1,
15 IPSEC_MODE_TUNNEL = 2 15 IPSEC_MODE_TUNNEL = 2,
16 IPSEC_MODE_BEET = 3
16}; 17};
17 18
18enum { 19enum {
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 48d3cb3b6a47..6f463606c318 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -59,6 +59,7 @@
59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */ 59#define IRQ_NOAUTOEN 0x08000000 /* IRQ will not be enabled on request irq */
60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */ 60#define IRQ_DELAYED_DISABLE 0x10000000 /* IRQ disable (masking) happens delayed. */
61#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */ 61#define IRQ_WAKEUP 0x20000000 /* IRQ triggers system wakeup */
62#define IRQ_MOVE_PENDING 0x40000000 /* need to re-target IRQ destination */
62 63
63struct proc_dir_entry; 64struct proc_dir_entry;
64 65
@@ -132,7 +133,6 @@ struct irq_chip {
132 * @affinity: IRQ affinity on SMP 133 * @affinity: IRQ affinity on SMP
133 * @cpu: cpu index useful for balancing 134 * @cpu: cpu index useful for balancing
134 * @pending_mask: pending rebalanced interrupts 135 * @pending_mask: pending rebalanced interrupts
135 * @move_irq: need to re-target IRQ destination
136 * @dir: /proc/irq/ procfs entry 136 * @dir: /proc/irq/ procfs entry
137 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP 137 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
138 * 138 *
@@ -159,7 +159,6 @@ struct irq_desc {
159#endif 159#endif
160#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) 160#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE)
161 cpumask_t pending_mask; 161 cpumask_t pending_mask;
162 unsigned int move_irq; /* need to re-target IRQ dest */
163#endif 162#endif
164#ifdef CONFIG_PROC_FS 163#ifdef CONFIG_PROC_FS
165 struct proc_dir_entry *dir; 164 struct proc_dir_entry *dir;
@@ -206,36 +205,7 @@ static inline void set_native_irq_info(int irq, cpumask_t mask)
206 205
207void set_pending_irq(unsigned int irq, cpumask_t mask); 206void set_pending_irq(unsigned int irq, cpumask_t mask);
208void move_native_irq(int irq); 207void move_native_irq(int irq);
209 208void move_masked_irq(int irq);
210#ifdef CONFIG_PCI_MSI
211/*
212 * Wonder why these are dummies?
213 * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
214 * counter part after translating the vector to irq info. We need to perform
215 * this operation on the real irq, when we dont use vector, i.e when
216 * pci_use_vector() is false.
217 */
218static inline void move_irq(int irq)
219{
220}
221
222static inline void set_irq_info(int irq, cpumask_t mask)
223{
224}
225
226#else /* CONFIG_PCI_MSI */
227
228static inline void move_irq(int irq)
229{
230 move_native_irq(irq);
231}
232
233static inline void set_irq_info(int irq, cpumask_t mask)
234{
235 set_native_irq_info(irq, mask);
236}
237
238#endif /* CONFIG_PCI_MSI */
239 209
240#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */ 210#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */
241 211
@@ -247,21 +217,20 @@ static inline void move_native_irq(int irq)
247{ 217{
248} 218}
249 219
250static inline void set_pending_irq(unsigned int irq, cpumask_t mask) 220static inline void move_masked_irq(int irq)
251{ 221{
252} 222}
253 223
254static inline void set_irq_info(int irq, cpumask_t mask) 224static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
255{ 225{
256 set_native_irq_info(irq, mask);
257} 226}
258 227
259#endif /* CONFIG_GENERIC_PENDING_IRQ */ 228#endif /* CONFIG_GENERIC_PENDING_IRQ */
260 229
261#else /* CONFIG_SMP */ 230#else /* CONFIG_SMP */
262 231
263#define move_irq(x)
264#define move_native_irq(x) 232#define move_native_irq(x)
233#define move_masked_irq(x)
265 234
266#endif /* CONFIG_SMP */ 235#endif /* CONFIG_SMP */
267 236
@@ -399,8 +368,22 @@ set_irq_chained_handler(unsigned int irq,
399 __set_irq_handler(irq, handle, 1); 368 __set_irq_handler(irq, handle, 1);
400} 369}
401 370
402/* Set/get chip/data for an IRQ: */ 371/* Handle dynamic irq creation and destruction */
372extern int create_irq(void);
373extern void destroy_irq(unsigned int irq);
403 374
375/* Test to see if a driver has successfully requested an irq */
376static inline int irq_has_action(unsigned int irq)
377{
378 struct irq_desc *desc = irq_desc + irq;
379 return desc->action != NULL;
380}
381
382/* Dynamic irq helper functions */
383extern void dynamic_irq_init(unsigned int irq);
384extern void dynamic_irq_cleanup(unsigned int irq);
385
386/* Set/get chip/data for an IRQ: */
404extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); 387extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
405extern int set_irq_data(unsigned int irq, void *data); 388extern int set_irq_data(unsigned int irq, void *data);
406extern int set_irq_chip_data(unsigned int irq, void *data); 389extern int set_irq_chip_data(unsigned int irq, void *data);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d6a3d4b345fc..d1af1dbeaeb4 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -109,6 +109,10 @@ static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
109#define ATA_TAG_POISON 0xfafbfcfdU 109#define ATA_TAG_POISON 0xfafbfcfdU
110 110
111/* move to PCI layer? */ 111/* move to PCI layer? */
112#define PCI_VDEVICE(vendor, device) \
113 PCI_VENDOR_ID_##vendor, (device), \
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0
115
112static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) 116static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
113{ 117{
114 return &pdev->dev; 118 return &pdev->dev;
@@ -138,8 +142,9 @@ enum {
138 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 142 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
139 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 143 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
140 144
141 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 145 ATA_DFLAG_PIO = (1 << 8), /* device limited to PIO mode */
142 ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */ 146 ATA_DFLAG_NCQ_OFF = (1 << 9), /* devied limited to non-NCQ mode */
147 ATA_DFLAG_SUSPENDED = (1 << 10), /* device suspended */
143 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 148 ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
144 149
145 ATA_DFLAG_DETACH = (1 << 16), 150 ATA_DFLAG_DETACH = (1 << 16),
diff --git a/include/linux/lm_interface.h b/include/linux/lm_interface.h
new file mode 100644
index 000000000000..1418fdc9ac02
--- /dev/null
+++ b/include/linux/lm_interface.h
@@ -0,0 +1,273 @@
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#ifndef __LM_INTERFACE_DOT_H__
11#define __LM_INTERFACE_DOT_H__
12
13
14typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data);
15
16/*
17 * lm_mount() flags
18 *
19 * LM_MFLAG_SPECTATOR
20 * GFS is asking to join the filesystem's lockspace, but it doesn't want to
21 * modify the filesystem. The lock module shouldn't assign a journal to the FS
22 * mount. It shouldn't send recovery callbacks to the FS mount. If the node
23 * dies or withdraws, all locks can be wiped immediately.
24 */
25
26#define LM_MFLAG_SPECTATOR 0x00000001
27
28/*
29 * lm_lockstruct flags
30 *
31 * LM_LSFLAG_LOCAL
32 * The lock_nolock module returns LM_LSFLAG_LOCAL to GFS, indicating that GFS
33 * can make single-node optimizations.
34 */
35
36#define LM_LSFLAG_LOCAL 0x00000001
37
38/*
39 * lm_lockname types
40 */
41
42#define LM_TYPE_RESERVED 0x00
43#define LM_TYPE_NONDISK 0x01
44#define LM_TYPE_INODE 0x02
45#define LM_TYPE_RGRP 0x03
46#define LM_TYPE_META 0x04
47#define LM_TYPE_IOPEN 0x05
48#define LM_TYPE_FLOCK 0x06
49#define LM_TYPE_PLOCK 0x07
50#define LM_TYPE_QUOTA 0x08
51#define LM_TYPE_JOURNAL 0x09
52
53/*
54 * lm_lock() states
55 *
56 * SHARED is compatible with SHARED, not with DEFERRED or EX.
57 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
58 */
59
60#define LM_ST_UNLOCKED 0
61#define LM_ST_EXCLUSIVE 1
62#define LM_ST_DEFERRED 2
63#define LM_ST_SHARED 3
64
65/*
66 * lm_lock() flags
67 *
68 * LM_FLAG_TRY
69 * Don't wait to acquire the lock if it can't be granted immediately.
70 *
71 * LM_FLAG_TRY_1CB
72 * Send one blocking callback if TRY is set and the lock is not granted.
73 *
74 * LM_FLAG_NOEXP
75 * GFS sets this flag on lock requests it makes while doing journal recovery.
76 * These special requests should not be blocked due to the recovery like
77 * ordinary locks would be.
78 *
79 * LM_FLAG_ANY
80 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
81 * also be granted in SHARED. The preferred state is whichever is compatible
82 * with other granted locks, or the specified state if no other locks exist.
83 *
84 * LM_FLAG_PRIORITY
85 * Override fairness considerations. Suppose a lock is held in a shared state
86 * and there is a pending request for the deferred state. A shared lock
87 * request with the priority flag would be allowed to bypass the deferred
88 * request and directly join the other shared lock. A shared lock request
89 * without the priority flag might be forced to wait until the deferred
90 * requested had acquired and released the lock.
91 */
92
93#define LM_FLAG_TRY 0x00000001
94#define LM_FLAG_TRY_1CB 0x00000002
95#define LM_FLAG_NOEXP 0x00000004
96#define LM_FLAG_ANY 0x00000008
97#define LM_FLAG_PRIORITY 0x00000010
98
99/*
100 * lm_lock() and lm_async_cb return flags
101 *
102 * LM_OUT_ST_MASK
103 * Masks the lower two bits of lock state in the returned value.
104 *
105 * LM_OUT_CACHEABLE
106 * The lock hasn't been released so GFS can continue to cache data for it.
107 *
108 * LM_OUT_CANCELED
109 * The lock request was canceled.
110 *
111 * LM_OUT_ASYNC
112 * The result of the request will be returned in an LM_CB_ASYNC callback.
113 */
114
115#define LM_OUT_ST_MASK 0x00000003
116#define LM_OUT_CACHEABLE 0x00000004
117#define LM_OUT_CANCELED 0x00000008
118#define LM_OUT_ASYNC 0x00000080
119#define LM_OUT_ERROR 0x00000100
120
121/*
122 * lm_callback_t types
123 *
124 * LM_CB_NEED_E LM_CB_NEED_D LM_CB_NEED_S
125 * Blocking callback, a remote node is requesting the given lock in
126 * EXCLUSIVE, DEFERRED, or SHARED.
127 *
128 * LM_CB_NEED_RECOVERY
129 * The given journal needs to be recovered.
130 *
131 * LM_CB_DROPLOCKS
132 * Reduce the number of cached locks.
133 *
134 * LM_CB_ASYNC
135 * The given lock has been granted.
136 */
137
138#define LM_CB_NEED_E 257
139#define LM_CB_NEED_D 258
140#define LM_CB_NEED_S 259
141#define LM_CB_NEED_RECOVERY 260
142#define LM_CB_DROPLOCKS 261
143#define LM_CB_ASYNC 262
144
145/*
146 * lm_recovery_done() messages
147 */
148
149#define LM_RD_GAVEUP 308
150#define LM_RD_SUCCESS 309
151
152
153struct lm_lockname {
154 u64 ln_number;
155 unsigned int ln_type;
156};
157
158#define lm_name_equal(name1, name2) \
159 (((name1)->ln_number == (name2)->ln_number) && \
160 ((name1)->ln_type == (name2)->ln_type)) \
161
162struct lm_async_cb {
163 struct lm_lockname lc_name;
164 int lc_ret;
165};
166
167struct lm_lockstruct;
168
169struct lm_lockops {
170 const char *lm_proto_name;
171
172 /*
173 * Mount/Unmount
174 */
175
176 int (*lm_mount) (char *table_name, char *host_data,
177 lm_callback_t cb, void *cb_data,
178 unsigned int min_lvb_size, int flags,
179 struct lm_lockstruct *lockstruct,
180 struct kobject *fskobj);
181
182 void (*lm_others_may_mount) (void *lockspace);
183
184 void (*lm_unmount) (void *lockspace);
185
186 void (*lm_withdraw) (void *lockspace);
187
188 /*
189 * Lock oriented operations
190 */
191
192 int (*lm_get_lock) (void *lockspace, struct lm_lockname *name, void **lockp);
193
194 void (*lm_put_lock) (void *lock);
195
196 unsigned int (*lm_lock) (void *lock, unsigned int cur_state,
197 unsigned int req_state, unsigned int flags);
198
199 unsigned int (*lm_unlock) (void *lock, unsigned int cur_state);
200
201 void (*lm_cancel) (void *lock);
202
203 int (*lm_hold_lvb) (void *lock, char **lvbp);
204 void (*lm_unhold_lvb) (void *lock, char *lvb);
205
206 /*
207 * Posix Lock oriented operations
208 */
209
210 int (*lm_plock_get) (void *lockspace, struct lm_lockname *name,
211 struct file *file, struct file_lock *fl);
212
213 int (*lm_plock) (void *lockspace, struct lm_lockname *name,
214 struct file *file, int cmd, struct file_lock *fl);
215
216 int (*lm_punlock) (void *lockspace, struct lm_lockname *name,
217 struct file *file, struct file_lock *fl);
218
219 /*
220 * Client oriented operations
221 */
222
223 void (*lm_recovery_done) (void *lockspace, unsigned int jid,
224 unsigned int message);
225
226 struct module *lm_owner;
227};
228
229/*
230 * lm_mount() return values
231 *
232 * ls_jid - the journal ID this node should use
233 * ls_first - this node is the first to mount the file system
234 * ls_lvb_size - size in bytes of lock value blocks
235 * ls_lockspace - lock module's context for this file system
236 * ls_ops - lock module's functions
237 * ls_flags - lock module features
238 */
239
240struct lm_lockstruct {
241 unsigned int ls_jid;
242 unsigned int ls_first;
243 unsigned int ls_lvb_size;
244 void *ls_lockspace;
245 const struct lm_lockops *ls_ops;
246 int ls_flags;
247};
248
249/*
250 * Lock module bottom interface. A lock module makes itself available to GFS
251 * with these functions.
252 */
253
254int gfs2_register_lockproto(const struct lm_lockops *proto);
255void gfs2_unregister_lockproto(const struct lm_lockops *proto);
256
257/*
258 * Lock module top interface. GFS calls these functions when mounting or
259 * unmounting a file system.
260 */
261
262int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
263 lm_callback_t cb, void *cb_data,
264 unsigned int min_lvb_size, int flags,
265 struct lm_lockstruct *lockstruct,
266 struct kobject *fskobj);
267
268void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct);
269
270void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct);
271
272#endif /* __LM_INTERFACE_DOT_H__ */
273
diff --git a/include/linux/lock_dlm_plock.h b/include/linux/lock_dlm_plock.h
new file mode 100644
index 000000000000..fc3415113973
--- /dev/null
+++ b/include/linux/lock_dlm_plock.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2005 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#ifndef __LOCK_DLM_PLOCK_DOT_H__
10#define __LOCK_DLM_PLOCK_DOT_H__
11
12#define GDLM_PLOCK_MISC_NAME "lock_dlm_plock"
13
14#define GDLM_PLOCK_VERSION_MAJOR 1
15#define GDLM_PLOCK_VERSION_MINOR 1
16#define GDLM_PLOCK_VERSION_PATCH 0
17
18enum {
19 GDLM_PLOCK_OP_LOCK = 1,
20 GDLM_PLOCK_OP_UNLOCK,
21 GDLM_PLOCK_OP_GET,
22};
23
24struct gdlm_plock_info {
25 __u32 version[3];
26 __u8 optype;
27 __u8 ex;
28 __u8 wait;
29 __u8 pad;
30 __u32 pid;
31 __s32 nodeid;
32 __s32 rv;
33 __u32 fsid;
34 __u64 number;
35 __u64 start;
36 __u64 end;
37 __u64 owner;
38};
39
40#endif
41
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 47b7dbd647a6..2909619c0295 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -37,17 +37,15 @@
37 * Lockd host handle (used both by the client and server personality). 37 * Lockd host handle (used both by the client and server personality).
38 */ 38 */
39struct nlm_host { 39struct nlm_host {
40 struct nlm_host * h_next; /* linked list (hash table) */ 40 struct hlist_node h_hash; /* doubly linked list */
41 struct sockaddr_in h_addr; /* peer address */ 41 struct sockaddr_in h_addr; /* peer address */
42 struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */ 42 struct rpc_clnt * h_rpcclnt; /* RPC client to talk to peer */
43 char h_name[20]; /* remote hostname */ 43 char * h_name; /* remote hostname */
44 u32 h_version; /* interface version */ 44 u32 h_version; /* interface version */
45 unsigned short h_proto; /* transport proto */ 45 unsigned short h_proto; /* transport proto */
46 unsigned short h_reclaiming : 1, 46 unsigned short h_reclaiming : 1,
47 h_server : 1, /* server side, not client side */ 47 h_server : 1, /* server side, not client side */
48 h_inuse : 1, 48 h_inuse : 1;
49 h_killed : 1,
50 h_monitored : 1;
51 wait_queue_head_t h_gracewait; /* wait while reclaiming */ 49 wait_queue_head_t h_gracewait; /* wait while reclaiming */
52 struct rw_semaphore h_rwsem; /* Reboot recovery lock */ 50 struct rw_semaphore h_rwsem; /* Reboot recovery lock */
53 u32 h_state; /* pseudo-state counter */ 51 u32 h_state; /* pseudo-state counter */
@@ -61,6 +59,16 @@ struct nlm_host {
61 spinlock_t h_lock; 59 spinlock_t h_lock;
62 struct list_head h_granted; /* Locks in GRANTED state */ 60 struct list_head h_granted; /* Locks in GRANTED state */
63 struct list_head h_reclaim; /* Locks in RECLAIM state */ 61 struct list_head h_reclaim; /* Locks in RECLAIM state */
62 struct nsm_handle * h_nsmhandle; /* NSM status handle */
63};
64
65struct nsm_handle {
66 struct list_head sm_link;
67 atomic_t sm_count;
68 char * sm_name;
69 struct sockaddr_in sm_addr;
70 unsigned int sm_monitored : 1,
71 sm_sticky : 1; /* don't unmonitor */
64}; 72};
65 73
66/* 74/*
@@ -96,15 +104,14 @@ struct nlm_rqst {
96 * an NFS client. 104 * an NFS client.
97 */ 105 */
98struct nlm_file { 106struct nlm_file {
99 struct nlm_file * f_next; /* linked list */ 107 struct hlist_node f_list; /* linked list */
100 struct nfs_fh f_handle; /* NFS file handle */ 108 struct nfs_fh f_handle; /* NFS file handle */
101 struct file * f_file; /* VFS file pointer */ 109 struct file * f_file; /* VFS file pointer */
102 struct nlm_share * f_shares; /* DOS shares */ 110 struct nlm_share * f_shares; /* DOS shares */
103 struct nlm_block * f_blocks; /* blocked locks */ 111 struct list_head f_blocks; /* blocked locks */
104 unsigned int f_locks; /* guesstimate # of locks */ 112 unsigned int f_locks; /* guesstimate # of locks */
105 unsigned int f_count; /* reference count */ 113 unsigned int f_count; /* reference count */
106 struct semaphore f_sema; /* avoid concurrent access */ 114 struct mutex f_mutex; /* avoid concurrent access */
107 int f_hash; /* hash of f_handle */
108}; 115};
109 116
110/* 117/*
@@ -114,26 +121,18 @@ struct nlm_file {
114#define NLM_NEVER (~(unsigned long) 0) 121#define NLM_NEVER (~(unsigned long) 0)
115struct nlm_block { 122struct nlm_block {
116 struct kref b_count; /* Reference count */ 123 struct kref b_count; /* Reference count */
117 struct nlm_block * b_next; /* linked list (all blocks) */ 124 struct list_head b_list; /* linked list of all blocks */
118 struct nlm_block * b_fnext; /* linked list (per file) */ 125 struct list_head b_flist; /* linked list (per file) */
119 struct nlm_rqst * b_call; /* RPC args & callback info */ 126 struct nlm_rqst * b_call; /* RPC args & callback info */
120 struct svc_serv * b_daemon; /* NLM service */ 127 struct svc_serv * b_daemon; /* NLM service */
121 struct nlm_host * b_host; /* host handle for RPC clnt */ 128 struct nlm_host * b_host; /* host handle for RPC clnt */
122 unsigned long b_when; /* next re-xmit */ 129 unsigned long b_when; /* next re-xmit */
123 unsigned int b_id; /* block id */ 130 unsigned int b_id; /* block id */
124 unsigned char b_queued; /* re-queued */
125 unsigned char b_granted; /* VFS granted lock */ 131 unsigned char b_granted; /* VFS granted lock */
126 struct nlm_file * b_file; /* file in question */ 132 struct nlm_file * b_file; /* file in question */
127}; 133};
128 134
129/* 135/*
130 * Valid actions for nlmsvc_traverse_files
131 */
132#define NLM_ACT_CHECK 0 /* check for locks */
133#define NLM_ACT_MARK 1 /* mark & sweep */
134#define NLM_ACT_UNLOCK 2 /* release all locks */
135
136/*
137 * Global variables 136 * Global variables
138 */ 137 */
139extern struct rpc_program nlm_program; 138extern struct rpc_program nlm_program;
@@ -143,6 +142,7 @@ extern struct svc_procedure nlmsvc_procedures4[];
143#endif 142#endif
144extern int nlmsvc_grace_period; 143extern int nlmsvc_grace_period;
145extern unsigned long nlmsvc_timeout; 144extern unsigned long nlmsvc_timeout;
145extern int nsm_use_hostnames;
146 146
147/* 147/*
148 * Lockd client functions 148 * Lockd client functions
@@ -155,22 +155,31 @@ struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock
155void nlmclnt_finish_block(struct nlm_wait *block); 155void nlmclnt_finish_block(struct nlm_wait *block);
156int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout); 156int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
157u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *); 157u32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *);
158void nlmclnt_recovery(struct nlm_host *, u32); 158void nlmclnt_recovery(struct nlm_host *);
159int nlmclnt_reclaim(struct nlm_host *, struct file_lock *); 159int nlmclnt_reclaim(struct nlm_host *, struct file_lock *);
160void nlmclnt_next_cookie(struct nlm_cookie *);
160 161
161/* 162/*
162 * Host cache 163 * Host cache
163 */ 164 */
164struct nlm_host * nlmclnt_lookup_host(struct sockaddr_in *, int, int); 165struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
165struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *); 166struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
166struct nlm_host * nlm_lookup_host(int server, struct sockaddr_in *, int, int); 167struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
167struct rpc_clnt * nlm_bind_host(struct nlm_host *); 168struct rpc_clnt * nlm_bind_host(struct nlm_host *);
168void nlm_rebind_host(struct nlm_host *); 169void nlm_rebind_host(struct nlm_host *);
169struct nlm_host * nlm_get_host(struct nlm_host *); 170struct nlm_host * nlm_get_host(struct nlm_host *);
170void nlm_release_host(struct nlm_host *); 171void nlm_release_host(struct nlm_host *);
171void nlm_shutdown_hosts(void); 172void nlm_shutdown_hosts(void);
172extern struct nlm_host *nlm_find_client(void); 173extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
174struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
175void nsm_release(struct nsm_handle *);
176
173 177
178/*
179 * This is used in garbage collection and resource reclaim
180 * A return value != 0 means destroy the lock/block/share
181 */
182typedef int (*nlm_host_match_fn_t)(struct nlm_host *cur, struct nlm_host *ref);
174 183
175/* 184/*
176 * Server-side lock handling 185 * Server-side lock handling
@@ -183,8 +192,8 @@ u32 nlmsvc_testlock(struct nlm_file *, struct nlm_lock *,
183u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *); 192u32 nlmsvc_cancel_blocked(struct nlm_file *, struct nlm_lock *);
184unsigned long nlmsvc_retry_blocked(void); 193unsigned long nlmsvc_retry_blocked(void);
185void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, 194void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
186 int action); 195 nlm_host_match_fn_t match);
187void nlmsvc_grant_reply(struct svc_rqst *, struct nlm_cookie *, u32); 196void nlmsvc_grant_reply(struct nlm_cookie *, u32);
188 197
189/* 198/*
190 * File handling for the server personality 199 * File handling for the server personality
diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h
index c75a424ebe4c..cd7816e74c05 100644
--- a/include/linux/lockd/share.h
+++ b/include/linux/lockd/share.h
@@ -25,6 +25,7 @@ u32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *,
25 struct nlm_args *); 25 struct nlm_args *);
26u32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *, 26u32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *,
27 struct nlm_args *); 27 struct nlm_args *);
28void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *, int); 28void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *,
29 nlm_host_match_fn_t);
29 30
30#endif /* LINUX_LOCKD_SHARE_H */ 31#endif /* LINUX_LOCKD_SHARE_H */
diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h
index 1080bb6ae315..fc61d40964da 100644
--- a/include/linux/lockd/sm_inter.h
+++ b/include/linux/lockd/sm_inter.h
@@ -28,7 +28,8 @@ struct nsm_args {
28 u32 prog; /* RPC callback info */ 28 u32 prog; /* RPC callback info */
29 u32 vers; 29 u32 vers;
30 u32 proc; 30 u32 proc;
31 u32 proto; /* protocol (udp/tcp) plus server/client flag */ 31
32 char * mon_name;
32}; 33};
33 34
34/* 35/*
@@ -41,6 +42,6 @@ struct nsm_res {
41 42
42int nsm_monitor(struct nlm_host *); 43int nsm_monitor(struct nlm_host *);
43int nsm_unmonitor(struct nlm_host *); 44int nsm_unmonitor(struct nlm_host *);
44extern u32 nsm_local_state; 45extern int nsm_local_state;
45 46
46#endif /* LINUX_LOCKD_SM_INTER_H */ 47#endif /* LINUX_LOCKD_SM_INTER_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
new file mode 100644
index 000000000000..c7ef94343673
--- /dev/null
+++ b/include/linux/msi.h
@@ -0,0 +1,49 @@
1#ifndef LINUX_MSI_H
2#define LINUX_MSI_H
3
4struct msi_msg {
5 u32 address_lo; /* low 32 bits of msi message address */
6 u32 address_hi; /* high 32 bits of msi message address */
7 u32 data; /* 16 bits of msi message data */
8};
9
10/* Heper functions */
11extern void mask_msi_irq(unsigned int irq);
12extern void unmask_msi_irq(unsigned int irq);
13extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
14
15extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
16
17struct msi_desc {
18 struct {
19 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */
20 __u8 maskbit : 1; /* mask-pending bit supported ? */
21 __u8 unused : 1;
22 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
23 __u8 pos; /* Location of the msi capability */
24 __u16 entry_nr; /* specific enabled entry */
25 unsigned default_irq; /* default pre-assigned irq */
26 }msi_attrib;
27
28 struct {
29 __u16 head;
30 __u16 tail;
31 }link;
32
33 void __iomem *mask_base;
34 struct pci_dev *dev;
35
36#ifdef CONFIG_PM
37 /* PM save area for MSIX address/data */
38 struct msi_msg msg_save;
39#endif
40};
41
42/*
43 * The arch hook for setup up msi irqs
44 */
45int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev);
46void arch_teardown_msi_irq(unsigned int irq);
47
48
49#endif /* LINUX_MSI_H */
diff --git a/include/linux/netfilter_bridge/ebt_mark_t.h b/include/linux/netfilter_bridge/ebt_mark_t.h
index 110fec6a40a2..6270f6f33693 100644
--- a/include/linux/netfilter_bridge/ebt_mark_t.h
+++ b/include/linux/netfilter_bridge/ebt_mark_t.h
@@ -1,6 +1,18 @@
1#ifndef __LINUX_BRIDGE_EBT_MARK_T_H 1#ifndef __LINUX_BRIDGE_EBT_MARK_T_H
2#define __LINUX_BRIDGE_EBT_MARK_T_H 2#define __LINUX_BRIDGE_EBT_MARK_T_H
3 3
4/* The target member is reused for adding new actions, the
5 * value of the real target is -1 to -NUM_STANDARD_TARGETS.
6 * For backward compatibility, the 4 lsb (2 would be enough,
7 * but let's play it safe) are kept to designate this target.
8 * The remaining bits designate the action. By making the set
9 * action 0xfffffff0, the result will look ok for older
10 * versions. [September 2006] */
11#define MARK_SET_VALUE (0xfffffff0)
12#define MARK_OR_VALUE (0xffffffe0)
13#define MARK_AND_VALUE (0xffffffd0)
14#define MARK_XOR_VALUE (0xffffffc0)
15
4struct ebt_mark_t_info 16struct ebt_mark_t_info
5{ 17{
6 unsigned long mark; 18 unsigned long mark;
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index ce02c984f3ba..5b63a231a76b 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -77,7 +77,7 @@ enum nf_ip_hook_priorities {
77#define SO_ORIGINAL_DST 80 77#define SO_ORIGINAL_DST 80
78 78
79#ifdef __KERNEL__ 79#ifdef __KERNEL__
80extern int ip_route_me_harder(struct sk_buff **pskb); 80extern int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type);
81extern int ip_xfrm_me_harder(struct sk_buff **pskb); 81extern int ip_xfrm_me_harder(struct sk_buff **pskb);
82extern unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, 82extern unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
83 unsigned int dataoff, u_int8_t protocol); 83 unsigned int dataoff, u_int8_t protocol);
diff --git a/include/linux/nfsd/const.h b/include/linux/nfsd/const.h
index b75bb1b38d09..f0cc77790527 100644
--- a/include/linux/nfsd/const.h
+++ b/include/linux/nfsd/const.h
@@ -20,17 +20,31 @@
20#define NFSSVC_MAXVERS 3 20#define NFSSVC_MAXVERS 3
21 21
22/* 22/*
23 * Maximum blocksize supported by daemon currently at 32K 23 * Maximum blocksizes supported by daemon under various circumstances.
24 */ 24 */
25#define NFSSVC_MAXBLKSIZE (32*1024) 25#define NFSSVC_MAXBLKSIZE RPCSVC_MAXPAYLOAD
26/* NFSv2 is limited by the protocol specification, see RFC 1094 */
27#define NFSSVC_MAXBLKSIZE_V2 (8*1024)
26 28
27#ifdef __KERNEL__ 29#ifdef __KERNEL__
28 30
31#include <linux/sunrpc/msg_prot.h>
32
29#ifndef NFS_SUPER_MAGIC 33#ifndef NFS_SUPER_MAGIC
30# define NFS_SUPER_MAGIC 0x6969 34# define NFS_SUPER_MAGIC 0x6969
31#endif 35#endif
32 36
33#define NFSD_BUFSIZE (1024 + NFSSVC_MAXBLKSIZE) 37/*
38 * Largest number of bytes we need to allocate for an NFS
39 * call or reply. Used to control buffer sizes. We use
40 * the length of v3 WRITE, READDIR and READDIR replies
41 * which are an RPC header, up to 26 XDR units of reply
42 * data, and some page data.
43 *
44 * Note that accuracy here doesn't matter too much as the
45 * size is rounded up to a page size when allocating space.
46 */
47#define NFSD_BUFSIZE ((RPC_MAX_HEADER_WITH_AUTH+26)*XDR_UNIT + NFSSVC_MAXBLKSIZE)
34 48
35#ifdef CONFIG_NFSD_V4 49#ifdef CONFIG_NFSD_V4
36# define NFSSVC_XDRSIZE NFS4_SVC_XDRSIZE 50# define NFSSVC_XDRSIZE NFS4_SVC_XDRSIZE
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index d2a8abb5011a..6e78ea969f49 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -45,15 +45,36 @@
45 45
46#ifdef __KERNEL__ 46#ifdef __KERNEL__
47 47
48/*
49 * FS Locations
50 */
51
52#define MAX_FS_LOCATIONS 128
53
54struct nfsd4_fs_location {
55 char *hosts; /* colon separated list of hosts */
56 char *path; /* slash separated list of path components */
57};
58
59struct nfsd4_fs_locations {
60 uint32_t locations_count;
61 struct nfsd4_fs_location *locations;
62/* If we're not actually serving this data ourselves (only providing a
63 * list of replicas that do serve it) then we set "migrated": */
64 int migrated;
65};
66
48struct svc_export { 67struct svc_export {
49 struct cache_head h; 68 struct cache_head h;
50 struct auth_domain * ex_client; 69 struct auth_domain * ex_client;
51 int ex_flags; 70 int ex_flags;
52 struct vfsmount * ex_mnt; 71 struct vfsmount * ex_mnt;
53 struct dentry * ex_dentry; 72 struct dentry * ex_dentry;
73 char * ex_path;
54 uid_t ex_anon_uid; 74 uid_t ex_anon_uid;
55 gid_t ex_anon_gid; 75 gid_t ex_anon_gid;
56 int ex_fsid; 76 int ex_fsid;
77 struct nfsd4_fs_locations ex_fslocs;
57}; 78};
58 79
59/* an "export key" (expkey) maps a filehandlefragement to an 80/* an "export key" (expkey) maps a filehandlefragement to an
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index e1dbc86c270b..d0d4aae7085f 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -145,6 +145,7 @@ int nfsd_vers(int vers, enum vers_op change);
145void nfsd_reset_versions(void); 145void nfsd_reset_versions(void);
146int nfsd_create_serv(void); 146int nfsd_create_serv(void);
147 147
148extern int nfsd_max_blksize;
148 149
149/* 150/*
150 * NFSv4 State 151 * NFSv4 State
@@ -215,6 +216,7 @@ void nfsd_lockd_shutdown(void);
215#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) 216#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE)
216#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) 217#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID)
217#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) 218#define nfserr_resource __constant_htonl(NFSERR_RESOURCE)
219#define nfserr_moved __constant_htonl(NFSERR_MOVED)
218#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) 220#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE)
219#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) 221#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH)
220#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) 222#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED)
@@ -291,7 +293,6 @@ static inline int is_fsid(struct svc_fh *fh, struct knfsd_fh *reffh)
291/* 293/*
292 * The following attributes are currently not supported by the NFSv4 server: 294 * The following attributes are currently not supported by the NFSv4 server:
293 * ARCHIVE (deprecated anyway) 295 * ARCHIVE (deprecated anyway)
294 * FS_LOCATIONS (will be supported eventually)
295 * HIDDEN (unlikely to be supported any time soon) 296 * HIDDEN (unlikely to be supported any time soon)
296 * MIMETYPE (unlikely to be supported any time soon) 297 * MIMETYPE (unlikely to be supported any time soon)
297 * QUOTA_* (will be supported in a forthcoming patch) 298 * QUOTA_* (will be supported in a forthcoming patch)
@@ -307,7 +308,7 @@ static inline int is_fsid(struct svc_fh *fh, struct knfsd_fh *reffh)
307 | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \ 308 | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \
308 | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \ 309 | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \
309 | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \ 310 | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \
310 | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_HOMOGENEOUS \ 311 | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \
311 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ 312 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
312 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) 313 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
313 314
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
index a38f9d776de9..0e53de87d886 100644
--- a/include/linux/nfsd/xdr.h
+++ b/include/linux/nfsd/xdr.h
@@ -30,7 +30,6 @@ struct nfsd_readargs {
30 struct svc_fh fh; 30 struct svc_fh fh;
31 __u32 offset; 31 __u32 offset;
32 __u32 count; 32 __u32 count;
33 struct kvec vec[RPCSVC_MAXPAGES];
34 int vlen; 33 int vlen;
35}; 34};
36 35
@@ -38,7 +37,6 @@ struct nfsd_writeargs {
38 svc_fh fh; 37 svc_fh fh;
39 __u32 offset; 38 __u32 offset;
40 int len; 39 int len;
41 struct kvec vec[RPCSVC_MAXPAGES];
42 int vlen; 40 int vlen;
43}; 41};
44 42
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
index a4322741f8b9..474d882dc2f3 100644
--- a/include/linux/nfsd/xdr3.h
+++ b/include/linux/nfsd/xdr3.h
@@ -33,7 +33,6 @@ struct nfsd3_readargs {
33 struct svc_fh fh; 33 struct svc_fh fh;
34 __u64 offset; 34 __u64 offset;
35 __u32 count; 35 __u32 count;
36 struct kvec vec[RPCSVC_MAXPAGES];
37 int vlen; 36 int vlen;
38}; 37};
39 38
@@ -43,7 +42,6 @@ struct nfsd3_writeargs {
43 __u32 count; 42 __u32 count;
44 int stable; 43 int stable;
45 __u32 len; 44 __u32 len;
46 struct kvec vec[RPCSVC_MAXPAGES];
47 int vlen; 45 int vlen;
48}; 46};
49 47
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 77adba7d2281..66e642762a07 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -241,7 +241,6 @@ struct nfsd4_read {
241 stateid_t rd_stateid; /* request */ 241 stateid_t rd_stateid; /* request */
242 u64 rd_offset; /* request */ 242 u64 rd_offset; /* request */
243 u32 rd_length; /* request */ 243 u32 rd_length; /* request */
244 struct kvec rd_iov[RPCSVC_MAXPAGES];
245 int rd_vlen; 244 int rd_vlen;
246 struct file *rd_filp; 245 struct file *rd_filp;
247 246
@@ -326,7 +325,6 @@ struct nfsd4_write {
326 u64 wr_offset; /* request */ 325 u64 wr_offset; /* request */
327 u32 wr_stable_how; /* request */ 326 u32 wr_stable_how; /* request */
328 u32 wr_buflen; /* request */ 327 u32 wr_buflen; /* request */
329 struct kvec wr_vec[RPCSVC_MAXPAGES]; /* request */
330 int wr_vlen; 328 int wr_vlen;
331 329
332 u32 wr_bytes_written; /* response */ 330 u32 wr_bytes_written; /* response */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 7ff386a6ae87..10a43ed0527e 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -12,9 +12,10 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/rwsem.h> 14#include <linux/rwsem.h>
15#include <linux/srcu.h>
15 16
16/* 17/*
17 * Notifier chains are of three types: 18 * Notifier chains are of four types:
18 * 19 *
19 * Atomic notifier chains: Chain callbacks run in interrupt/atomic 20 * Atomic notifier chains: Chain callbacks run in interrupt/atomic
20 * context. Callouts are not allowed to block. 21 * context. Callouts are not allowed to block.
@@ -23,13 +24,27 @@
23 * Raw notifier chains: There are no restrictions on callbacks, 24 * Raw notifier chains: There are no restrictions on callbacks,
24 * registration, or unregistration. All locking and protection 25 * registration, or unregistration. All locking and protection
25 * must be provided by the caller. 26 * must be provided by the caller.
27 * SRCU notifier chains: A variant of blocking notifier chains, with
28 * the same restrictions.
26 * 29 *
27 * atomic_notifier_chain_register() may be called from an atomic context, 30 * atomic_notifier_chain_register() may be called from an atomic context,
28 * but blocking_notifier_chain_register() must be called from a process 31 * but blocking_notifier_chain_register() and srcu_notifier_chain_register()
29 * context. Ditto for the corresponding _unregister() routines. 32 * must be called from a process context. Ditto for the corresponding
33 * _unregister() routines.
30 * 34 *
31 * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister() 35 * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
32 * _must not_ be called from within the call chain. 36 * and srcu_notifier_chain_unregister() _must not_ be called from within
37 * the call chain.
38 *
39 * SRCU notifier chains are an alternative form of blocking notifier chains.
40 * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
41 * protection of the chain links. This means there is _very_ low overhead
42 * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
43 * As compensation, srcu_notifier_chain_unregister() is rather expensive.
44 * SRCU notifier chains should be used when the chain will be called very
45 * often but notifier_blocks will seldom be removed. Also, SRCU notifier
46 * chains are slightly more difficult to use because they require special
47 * runtime initialization.
33 */ 48 */
34 49
35struct notifier_block { 50struct notifier_block {
@@ -52,6 +67,12 @@ struct raw_notifier_head {
52 struct notifier_block *head; 67 struct notifier_block *head;
53}; 68};
54 69
70struct srcu_notifier_head {
71 struct mutex mutex;
72 struct srcu_struct srcu;
73 struct notifier_block *head;
74};
75
55#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ 76#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
56 spin_lock_init(&(name)->lock); \ 77 spin_lock_init(&(name)->lock); \
57 (name)->head = NULL; \ 78 (name)->head = NULL; \
@@ -64,6 +85,11 @@ struct raw_notifier_head {
64 (name)->head = NULL; \ 85 (name)->head = NULL; \
65 } while (0) 86 } while (0)
66 87
88/* srcu_notifier_heads must be initialized and cleaned up dynamically */
89extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
90#define srcu_cleanup_notifier_head(name) \
91 cleanup_srcu_struct(&(name)->srcu);
92
67#define ATOMIC_NOTIFIER_INIT(name) { \ 93#define ATOMIC_NOTIFIER_INIT(name) { \
68 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 94 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
69 .head = NULL } 95 .head = NULL }
@@ -72,6 +98,7 @@ struct raw_notifier_head {
72 .head = NULL } 98 .head = NULL }
73#define RAW_NOTIFIER_INIT(name) { \ 99#define RAW_NOTIFIER_INIT(name) { \
74 .head = NULL } 100 .head = NULL }
101/* srcu_notifier_heads cannot be initialized statically */
75 102
76#define ATOMIC_NOTIFIER_HEAD(name) \ 103#define ATOMIC_NOTIFIER_HEAD(name) \
77 struct atomic_notifier_head name = \ 104 struct atomic_notifier_head name = \
@@ -91,6 +118,8 @@ extern int blocking_notifier_chain_register(struct blocking_notifier_head *,
91 struct notifier_block *); 118 struct notifier_block *);
92extern int raw_notifier_chain_register(struct raw_notifier_head *, 119extern int raw_notifier_chain_register(struct raw_notifier_head *,
93 struct notifier_block *); 120 struct notifier_block *);
121extern int srcu_notifier_chain_register(struct srcu_notifier_head *,
122 struct notifier_block *);
94 123
95extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, 124extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *,
96 struct notifier_block *); 125 struct notifier_block *);
@@ -98,6 +127,8 @@ extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *,
98 struct notifier_block *); 127 struct notifier_block *);
99extern int raw_notifier_chain_unregister(struct raw_notifier_head *, 128extern int raw_notifier_chain_unregister(struct raw_notifier_head *,
100 struct notifier_block *); 129 struct notifier_block *);
130extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *,
131 struct notifier_block *);
101 132
102extern int atomic_notifier_call_chain(struct atomic_notifier_head *, 133extern int atomic_notifier_call_chain(struct atomic_notifier_head *,
103 unsigned long val, void *v); 134 unsigned long val, void *v);
@@ -105,6 +136,8 @@ extern int blocking_notifier_call_chain(struct blocking_notifier_head *,
105 unsigned long val, void *v); 136 unsigned long val, void *v);
106extern int raw_notifier_call_chain(struct raw_notifier_head *, 137extern int raw_notifier_call_chain(struct raw_notifier_head *,
107 unsigned long val, void *v); 138 unsigned long val, void *v);
139extern int srcu_notifier_call_chain(struct srcu_notifier_head *,
140 unsigned long val, void *v);
108 141
109#define NOTIFY_DONE 0x0000 /* Don't care */ 142#define NOTIFY_DONE 0x0000 /* Don't care */
110#define NOTIFY_OK 0x0001 /* Suits me */ 143#define NOTIFY_OK 0x0001 /* Suits me */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4431ce4e1e6f..5c604f5fad67 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -595,6 +595,7 @@ struct msix_entry {
595 u16 entry; /* driver uses to specify entry, OS writes */ 595 u16 entry; /* driver uses to specify entry, OS writes */
596}; 596};
597 597
598
598#ifndef CONFIG_PCI_MSI 599#ifndef CONFIG_PCI_MSI
599static inline void pci_scan_msi_device(struct pci_dev *dev) {} 600static inline void pci_scan_msi_device(struct pci_dev *dev) {}
600static inline int pci_enable_msi(struct pci_dev *dev) {return -1;} 601static inline int pci_enable_msi(struct pci_dev *dev) {return -1;}
@@ -613,6 +614,12 @@ extern void pci_disable_msix(struct pci_dev *dev);
613extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); 614extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
614#endif 615#endif
615 616
617#ifdef CONFIG_HT_IRQ
618/* The functions a driver should call */
619int ht_create_irq(struct pci_dev *dev, int idx);
620void ht_destroy_irq(unsigned int irq);
621#endif /* CONFIG_HT_IRQ */
622
616extern void pci_block_user_cfg_access(struct pci_dev *dev); 623extern void pci_block_user_cfg_access(struct pci_dev *dev);
617extern void pci_unblock_user_cfg_access(struct pci_dev *dev); 624extern void pci_unblock_user_cfg_access(struct pci_dev *dev);
618 625
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 7d0e26cba420..c312a12ad2d6 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -12,6 +12,11 @@
12 * PCI Local Bus Specification 12 * PCI Local Bus Specification
13 * PCI to PCI Bridge Specification 13 * PCI to PCI Bridge Specification
14 * PCI System Design Guide 14 * PCI System Design Guide
15 *
16 * For hypertransport information, please consult the following manuals
17 * from http://www.hypertransport.org
18 *
19 * The Hypertransport I/O Link Specification
15 */ 20 */
16 21
17#ifndef LINUX_PCI_REGS_H 22#ifndef LINUX_PCI_REGS_H
@@ -463,4 +468,20 @@
463#define PCI_PWR_CAP 12 /* Capability */ 468#define PCI_PWR_CAP 12 /* Capability */
464#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */ 469#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
465 470
471/* Hypertransport sub capability types */
472#define HT_CAPTYPE_SLAVE 0x00 /* Slave/Primary link configuration */
473#define HT_CAPTYPE_HOST 0x20 /* Host/Secondary link configuration */
474#define HT_CAPTYPE_IRQ 0x80 /* IRQ Configuration */
475#define HT_CAPTYPE_REMAPPING_40 0xA0 /* 40 bit address remapping */
476#define HT_CAPTYPE_REMAPPING_64 0xA2 /* 64 bit address remapping */
477#define HT_CAPTYPE_UNITID_CLUMP 0x90 /* Unit ID clumping */
478#define HT_CAPTYPE_EXTCONF 0x98 /* Extended Configuration Space Access */
479#define HT_CAPTYPE_MSI_MAPPING 0xA8 /* MSI Mapping Capability */
480#define HT_CAPTYPE_DIRECT_ROUTE 0xB0 /* Direct routing configuration */
481#define HT_CAPTYPE_VCSET 0xB8 /* Virtual Channel configuration */
482#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
483#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
484#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
485
486
466#endif /* LINUX_PCI_REGS_H */ 487#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index b4ca73d65891..c6b7485eac7c 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -19,7 +19,7 @@
19 * 19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * 21 *
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers: 24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
@@ -66,6 +66,8 @@ struct rcu_ctrlblk {
66 long completed; /* Number of the last completed batch */ 66 long completed; /* Number of the last completed batch */
67 int next_pending; /* Is the next batch already waiting? */ 67 int next_pending; /* Is the next batch already waiting? */
68 68
69 int signaled;
70
69 spinlock_t lock ____cacheline_internodealigned_in_smp; 71 spinlock_t lock ____cacheline_internodealigned_in_smp;
70 cpumask_t cpumask; /* CPUs that need to switch in order */ 72 cpumask_t cpumask; /* CPUs that need to switch in order */
71 /* for current batch to proceed. */ 73 /* for current batch to proceed. */
@@ -106,9 +108,6 @@ struct rcu_data {
106 long blimit; /* Upper limit on a processed batch */ 108 long blimit; /* Upper limit on a processed batch */
107 int cpu; 109 int cpu;
108 struct rcu_head barrier; 110 struct rcu_head barrier;
109#ifdef CONFIG_SMP
110 long last_rs_qlen; /* qlen during the last resched */
111#endif
112}; 111};
113 112
114DECLARE_PER_CPU(struct rcu_data, rcu_data); 113DECLARE_PER_CPU(struct rcu_data, rcu_data);
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
index 693c0557e70b..de466e11e271 100644
--- a/include/linux/scx200.h
+++ b/include/linux/scx200.h
@@ -32,7 +32,7 @@ extern unsigned scx200_cb_base;
32 32
33/* High Resolution Timer */ 33/* High Resolution Timer */
34#define SCx200_TIMER_OFFSET 0x08 34#define SCx200_TIMER_OFFSET 0x08
35#define SCx200_TIMER_SIZE 0x05 35#define SCx200_TIMER_SIZE 0x06
36 36
37/* Clock Generators */ 37/* Clock Generators */
38#define SCx200_CLOCKGEN_OFFSET 0x10 38#define SCx200_CLOCKGEN_OFFSET 0x10
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 70be57d8ae0d..c4947b8a2c03 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -77,13 +77,6 @@ struct cache_sizes {
77extern struct cache_sizes malloc_sizes[]; 77extern struct cache_sizes malloc_sizes[];
78 78
79extern void *__kmalloc(size_t, gfp_t); 79extern void *__kmalloc(size_t, gfp_t);
80#ifndef CONFIG_DEBUG_SLAB
81#define ____kmalloc(size, flags) __kmalloc(size, flags)
82#else
83extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
84#define ____kmalloc(size, flags) \
85 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
86#endif
87 80
88/** 81/**
89 * kmalloc - allocate memory 82 * kmalloc - allocate memory
@@ -153,6 +146,23 @@ found:
153 return __kmalloc(size, flags); 146 return __kmalloc(size, flags);
154} 147}
155 148
149/*
150 * kmalloc_track_caller is a special version of kmalloc that records the
151 * calling function of the routine calling it for slab leak tracking instead
152 * of just the calling function (confusing, eh?).
153 * It's useful when the call to kmalloc comes from a widely-used standard
154 * allocator where we care about the real place the memory allocation
155 * request comes from.
156 */
157#ifndef CONFIG_DEBUG_SLAB
158#define kmalloc_track_caller(size, flags) \
159 __kmalloc(size, flags)
160#else
161extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
162#define kmalloc_track_caller(size, flags) \
163 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
164#endif
165
156extern void *__kzalloc(size_t, gfp_t); 166extern void *__kzalloc(size_t, gfp_t);
157 167
158/** 168/**
@@ -271,7 +281,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
271#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f) 281#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
272#define kmalloc_node(s, f, n) kmalloc(s, f) 282#define kmalloc_node(s, f, n) kmalloc(s, f)
273#define kzalloc(s, f) __kzalloc(s, f) 283#define kzalloc(s, f) __kzalloc(s, f)
274#define ____kmalloc kmalloc 284#define kmalloc_track_caller kmalloc
275 285
276#endif /* CONFIG_SLOB */ 286#endif /* CONFIG_SLOB */
277 287
diff --git a/include/linux/sound.h b/include/linux/sound.h
index f63d8342ffa3..9e2a94feed6b 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -35,10 +35,8 @@ extern int register_sound_special_device(const struct file_operations *fops, int
35extern int register_sound_mixer(const struct file_operations *fops, int dev); 35extern int register_sound_mixer(const struct file_operations *fops, int dev);
36extern int register_sound_midi(const struct file_operations *fops, int dev); 36extern int register_sound_midi(const struct file_operations *fops, int dev);
37extern int register_sound_dsp(const struct file_operations *fops, int dev); 37extern int register_sound_dsp(const struct file_operations *fops, int dev);
38extern int register_sound_synth(const struct file_operations *fops, int dev);
39 38
40extern void unregister_sound_special(int unit); 39extern void unregister_sound_special(int unit);
41extern void unregister_sound_mixer(int unit); 40extern void unregister_sound_mixer(int unit);
42extern void unregister_sound_midi(int unit); 41extern void unregister_sound_midi(int unit);
43extern void unregister_sound_dsp(int unit); 42extern void unregister_sound_dsp(int unit);
44extern void unregister_sound_synth(int unit);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
new file mode 100644
index 000000000000..aca0eee53930
--- /dev/null
+++ b/include/linux/srcu.h
@@ -0,0 +1,53 @@
1/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
24 *
25 */
26
27#ifndef _LINUX_SRCU_H
28#define _LINUX_SRCU_H
29
30struct srcu_struct_array {
31 int c[2];
32};
33
34struct srcu_struct {
35 int completed;
36 struct srcu_struct_array *per_cpu_ref;
37 struct mutex mutex;
38};
39
40#ifndef CONFIG_PREEMPT
41#define srcu_barrier() barrier()
42#else /* #ifndef CONFIG_PREEMPT */
43#define srcu_barrier()
44#endif /* #else #ifndef CONFIG_PREEMPT */
45
46int init_srcu_struct(struct srcu_struct *sp);
47void cleanup_srcu_struct(struct srcu_struct *sp);
48int srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
49void srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
50void synchronize_srcu(struct srcu_struct *sp);
51long srcu_batches_completed(struct srcu_struct *sp);
52
53#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 862c0d8c8381..534cdc7be58d 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -20,9 +20,6 @@
20/* size of the nodename buffer */ 20/* size of the nodename buffer */
21#define UNX_MAXNODENAME 32 21#define UNX_MAXNODENAME 32
22 22
23/* Maximum size (in bytes) of an rpc credential or verifier */
24#define RPC_MAX_AUTH_SIZE (400)
25
26/* Work around the lack of a VFS credential */ 23/* Work around the lack of a VFS credential */
27struct auth_cred { 24struct auth_cred {
28 uid_t uid; 25 uid_t uid;
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index b5612c958cce..3699dff7db8f 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -163,6 +163,17 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
163 kref_put(&h->ref, cd->cache_put); 163 kref_put(&h->ref, cd->cache_put);
164} 164}
165 165
166static inline int cache_valid(struct cache_head *h)
167{
168 /* If an item has been unhashed pending removal when
169 * the refcount drops to 0, the expiry_time will be
170 * set to 0. We don't want to consider such items
171 * valid in this context even though CACHE_VALID is
172 * set.
173 */
174 return (h->expiry_time != 0 && test_bit(CACHE_VALID, &h->flags));
175}
176
166extern int cache_check(struct cache_detail *detail, 177extern int cache_check(struct cache_detail *detail,
167 struct cache_head *h, struct cache_req *rqstp); 178 struct cache_head *h, struct cache_req *rqstp);
168extern void cache_flush(void); 179extern void cache_flush(void);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 8d10d148834e..1e65f2dd80e5 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -11,6 +11,9 @@
11 11
12#define RPC_VERSION 2 12#define RPC_VERSION 2
13 13
14/* size of an XDR encoding unit in bytes, i.e. 32bit */
15#define XDR_UNIT (4)
16
14/* spec defines authentication flavor as an unsigned 32 bit integer */ 17/* spec defines authentication flavor as an unsigned 32 bit integer */
15typedef u32 rpc_authflavor_t; 18typedef u32 rpc_authflavor_t;
16 19
@@ -34,6 +37,9 @@ enum rpc_auth_flavors {
34 RPC_AUTH_GSS_SPKMP = 390011, 37 RPC_AUTH_GSS_SPKMP = 390011,
35}; 38};
36 39
40/* Maximum size (in bytes) of an rpc credential or verifier */
41#define RPC_MAX_AUTH_SIZE (400)
42
37enum rpc_msg_type { 43enum rpc_msg_type {
38 RPC_CALL = 0, 44 RPC_CALL = 0,
39 RPC_REPLY = 1 45 RPC_REPLY = 1
@@ -101,5 +107,39 @@ typedef __be32 rpc_fraghdr;
101#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) 107#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT)
102#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) 108#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1)
103 109
110/*
111 * RPC call and reply header size as number of 32bit words (verifier
112 * size computed separately, see below)
113 */
114#define RPC_CALLHDRSIZE (6)
115#define RPC_REPHDRSIZE (4)
116
117
118/*
119 * Maximum RPC header size, including authentication,
120 * as number of 32bit words (see RFCs 1831, 1832).
121 *
122 * xid 1 xdr unit = 4 bytes
123 * mtype 1
124 * rpc_version 1
125 * program 1
126 * prog_version 1
127 * procedure 1
128 * cred {
129 * flavor 1
130 * length 1
131 * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes
132 * }
133 * verf {
134 * flavor 1
135 * length 1
136 * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes
137 * }
138 * TOTAL 210 xdr units = 840 bytes
139 */
140#define RPC_MAX_HEADER_WITH_AUTH \
141 (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4))
142
143
104#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
105#endif /* _LINUX_SUNRPC_MSGPROT_H_ */ 145#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 4ebcdf91f3b3..d6288e89fd9d 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -13,6 +13,7 @@
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/sunrpc/types.h> 14#include <linux/sunrpc/types.h>
15#include <linux/sunrpc/xdr.h> 15#include <linux/sunrpc/xdr.h>
16#include <linux/sunrpc/auth.h>
16#include <linux/sunrpc/svcauth.h> 17#include <linux/sunrpc/svcauth.h>
17#include <linux/wait.h> 18#include <linux/wait.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
@@ -95,8 +96,28 @@ static inline void svc_get(struct svc_serv *serv)
95 * Maximum payload size supported by a kernel RPC server. 96 * Maximum payload size supported by a kernel RPC server.
96 * This is use to determine the max number of pages nfsd is 97 * This is use to determine the max number of pages nfsd is
97 * willing to return in a single READ operation. 98 * willing to return in a single READ operation.
99 *
100 * These happen to all be powers of 2, which is not strictly
101 * necessary but helps enforce the real limitation, which is
102 * that they should be multiples of PAGE_CACHE_SIZE.
103 *
104 * For UDP transports, a block plus NFS,RPC, and UDP headers
105 * has to fit into the IP datagram limit of 64K. The largest
106 * feasible number for all known page sizes is probably 48K,
107 * but we choose 32K here. This is the same as the historical
108 * Linux limit; someone who cares more about NFS/UDP performance
109 * can test a larger number.
110 *
111 * For TCP transports we have more freedom. A size of 1MB is
112 * chosen to match the client limit. Other OSes are known to
113 * have larger limits, but those numbers are probably beyond
114 * the point of diminishing returns.
98 */ 115 */
99#define RPCSVC_MAXPAYLOAD (64*1024u) 116#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
117#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
118#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
119
120extern u32 svc_max_payload(const struct svc_rqst *rqstp);
100 121
101/* 122/*
102 * RPC Requsts and replies are stored in one or more pages. 123 * RPC Requsts and replies are stored in one or more pages.
@@ -170,7 +191,6 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
170/* 191/*
171 * The context of a single thread, including the request currently being 192 * The context of a single thread, including the request currently being
172 * processed. 193 * processed.
173 * NOTE: First two items must be prev/next.
174 */ 194 */
175struct svc_rqst { 195struct svc_rqst {
176 struct list_head rq_list; /* idle list */ 196 struct list_head rq_list; /* idle list */
@@ -189,12 +209,11 @@ struct svc_rqst {
189 209
190 struct xdr_buf rq_arg; 210 struct xdr_buf rq_arg;
191 struct xdr_buf rq_res; 211 struct xdr_buf rq_res;
192 struct page * rq_argpages[RPCSVC_MAXPAGES]; 212 struct page * rq_pages[RPCSVC_MAXPAGES];
193 struct page * rq_respages[RPCSVC_MAXPAGES]; 213 struct page * *rq_respages; /* points into rq_pages */
194 int rq_restailpage; 214 int rq_resused; /* number of pages used for result */
195 short rq_argused; /* pages used for argument */ 215
196 short rq_arghi; /* pages available in argument page list */ 216 struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
197 short rq_resused; /* pages used for result */
198 217
199 __be32 rq_xid; /* transmission id */ 218 __be32 rq_xid; /* transmission id */
200 u32 rq_prog; /* program number */ 219 u32 rq_prog; /* program number */
@@ -255,63 +274,18 @@ xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
255 return vec->iov_len <= PAGE_SIZE; 274 return vec->iov_len <= PAGE_SIZE;
256} 275}
257 276
258static inline struct page * 277static inline void svc_free_res_pages(struct svc_rqst *rqstp)
259svc_take_res_page(struct svc_rqst *rqstp)
260{
261 if (rqstp->rq_arghi <= rqstp->rq_argused)
262 return NULL;
263 rqstp->rq_arghi--;
264 rqstp->rq_respages[rqstp->rq_resused] =
265 rqstp->rq_argpages[rqstp->rq_arghi];
266 return rqstp->rq_respages[rqstp->rq_resused++];
267}
268
269static inline void svc_take_page(struct svc_rqst *rqstp)
270{
271 if (rqstp->rq_arghi <= rqstp->rq_argused) {
272 WARN_ON(1);
273 return;
274 }
275 rqstp->rq_arghi--;
276 rqstp->rq_respages[rqstp->rq_resused] =
277 rqstp->rq_argpages[rqstp->rq_arghi];
278 rqstp->rq_resused++;
279}
280
281static inline void svc_pushback_allpages(struct svc_rqst *rqstp)
282{
283 while (rqstp->rq_resused) {
284 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL)
285 continue;
286 rqstp->rq_argpages[rqstp->rq_arghi++] =
287 rqstp->rq_respages[rqstp->rq_resused];
288 rqstp->rq_respages[rqstp->rq_resused] = NULL;
289 }
290}
291
292static inline void svc_pushback_unused_pages(struct svc_rqst *rqstp)
293{ 278{
294 while (rqstp->rq_resused && 279 while (rqstp->rq_resused) {
295 rqstp->rq_res.pages != &rqstp->rq_respages[rqstp->rq_resused]) { 280 struct page **pp = (rqstp->rq_respages +
296 281 --rqstp->rq_resused);
297 if (rqstp->rq_respages[--rqstp->rq_resused] != NULL) { 282 if (*pp) {
298 rqstp->rq_argpages[rqstp->rq_arghi++] = 283 put_page(*pp);
299 rqstp->rq_respages[rqstp->rq_resused]; 284 *pp = NULL;
300 rqstp->rq_respages[rqstp->rq_resused] = NULL;
301 } 285 }
302 } 286 }
303} 287}
304 288
305static inline void svc_free_allpages(struct svc_rqst *rqstp)
306{
307 while (rqstp->rq_resused) {
308 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL)
309 continue;
310 put_page(rqstp->rq_respages[rqstp->rq_resused]);
311 rqstp->rq_respages[rqstp->rq_resused] = NULL;
312 }
313}
314
315struct svc_deferred_req { 289struct svc_deferred_req {
316 u32 prot; /* protocol (UDP or TCP) */ 290 u32 prot; /* protocol (UDP or TCP) */
317 struct sockaddr_in addr; 291 struct sockaddr_in addr;
@@ -347,6 +321,9 @@ struct svc_version {
347 struct svc_procedure * vs_proc; /* per-procedure info */ 321 struct svc_procedure * vs_proc; /* per-procedure info */
348 u32 vs_xdrsize; /* xdrsize needed for this version */ 322 u32 vs_xdrsize; /* xdrsize needed for this version */
349 323
324 unsigned int vs_hidden : 1; /* Don't register with portmapper.
325 * Only used for nfsacl so far. */
326
350 /* Override dispatch function (e.g. when caching replies). 327 /* Override dispatch function (e.g. when caching replies).
351 * A return value of 0 means drop the request. 328 * A return value of 0 means drop the request.
352 * vs_dispatch == NULL means use default dispatcher. 329 * vs_dispatch == NULL means use default dispatcher.
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index a6601650deeb..de92619b0826 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -126,6 +126,7 @@ extern struct auth_domain *auth_domain_find(char *name);
126extern struct auth_domain *auth_unix_lookup(struct in_addr addr); 126extern struct auth_domain *auth_unix_lookup(struct in_addr addr);
127extern int auth_unix_forget_old(struct auth_domain *dom); 127extern int auth_unix_forget_old(struct auth_domain *dom);
128extern void svcauth_unix_purge(void); 128extern void svcauth_unix_purge(void);
129extern void svcauth_unix_info_release(void *);
129 130
130static inline unsigned long hash_str(char *name, int bits) 131static inline unsigned long hash_str(char *name, int bits)
131{ 132{
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 4c296152cbfa..98b21ad370fd 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -54,6 +54,9 @@ struct svc_sock {
54 int sk_reclen; /* length of record */ 54 int sk_reclen; /* length of record */
55 int sk_tcplen; /* current read length */ 55 int sk_tcplen; /* current read length */
56 time_t sk_lastrecv; /* time of last received request */ 56 time_t sk_lastrecv; /* time of last received request */
57
58 /* cache of various info for TCP sockets */
59 void *sk_info_authunix;
57}; 60};
58 61
59/* 62/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 6cf626580752..60394fbc4c70 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -15,6 +15,7 @@
15#include <linux/kref.h> 15#include <linux/kref.h>
16#include <linux/sunrpc/sched.h> 16#include <linux/sunrpc/sched.h>
17#include <linux/sunrpc/xdr.h> 17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/msg_prot.h>
18 19
19extern unsigned int xprt_udp_slot_table_entries; 20extern unsigned int xprt_udp_slot_table_entries;
20extern unsigned int xprt_tcp_slot_table_entries; 21extern unsigned int xprt_tcp_slot_table_entries;
@@ -24,13 +25,6 @@ extern unsigned int xprt_tcp_slot_table_entries;
24#define RPC_MAX_SLOT_TABLE (128U) 25#define RPC_MAX_SLOT_TABLE (128U)
25 26
26/* 27/*
27 * RPC call and reply header size as number of 32bit words (verifier
28 * size computed separately)
29 */
30#define RPC_CALLHDRSIZE 6
31#define RPC_REPHDRSIZE 4
32
33/*
34 * Parameters for choosing a free port 28 * Parameters for choosing a free port
35 */ 29 */
36extern unsigned int xprt_min_resvport; 30extern unsigned int xprt_min_resvport;
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
new file mode 100644
index 000000000000..203dd5e11ecb
--- /dev/null
+++ b/include/linux/tifm.h
@@ -0,0 +1,158 @@
1/*
2 * tifm.h - TI FlashMedia driver
3 *
4 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef _TIFM_H
13#define _TIFM_H
14
15#include <linux/spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/wait.h>
18#include <linux/delay.h>
19#include <linux/pci.h>
20
21/* Host registers (relative to pci base address): */
22enum {
23 FM_SET_INTERRUPT_ENABLE = 0x008,
24 FM_CLEAR_INTERRUPT_ENABLE = 0x00c,
25 FM_INTERRUPT_STATUS = 0x014 };
26
27/* Socket registers (relative to socket base address): */
28enum {
29 SOCK_CONTROL = 0x004,
30 SOCK_PRESENT_STATE = 0x008,
31 SOCK_DMA_ADDRESS = 0x00c,
32 SOCK_DMA_CONTROL = 0x010,
33 SOCK_DMA_FIFO_INT_ENABLE_SET = 0x014,
34 SOCK_DMA_FIFO_INT_ENABLE_CLEAR = 0x018,
35 SOCK_DMA_FIFO_STATUS = 0x020,
36 SOCK_FIFO_CONTROL = 0x024,
37 SOCK_FIFO_PAGE_SIZE = 0x028,
38 SOCK_MMCSD_COMMAND = 0x104,
39 SOCK_MMCSD_ARG_LOW = 0x108,
40 SOCK_MMCSD_ARG_HIGH = 0x10c,
41 SOCK_MMCSD_CONFIG = 0x110,
42 SOCK_MMCSD_STATUS = 0x114,
43 SOCK_MMCSD_INT_ENABLE = 0x118,
44 SOCK_MMCSD_COMMAND_TO = 0x11c,
45 SOCK_MMCSD_DATA_TO = 0x120,
46 SOCK_MMCSD_DATA = 0x124,
47 SOCK_MMCSD_BLOCK_LEN = 0x128,
48 SOCK_MMCSD_NUM_BLOCKS = 0x12c,
49 SOCK_MMCSD_BUFFER_CONFIG = 0x130,
50 SOCK_MMCSD_SPI_CONFIG = 0x134,
51 SOCK_MMCSD_SDIO_MODE_CONFIG = 0x138,
52 SOCK_MMCSD_RESPONSE = 0x144,
53 SOCK_MMCSD_SDIO_SR = 0x164,
54 SOCK_MMCSD_SYSTEM_CONTROL = 0x168,
55 SOCK_MMCSD_SYSTEM_STATUS = 0x16c,
56 SOCK_MS_COMMAND = 0x184,
57 SOCK_MS_DATA = 0x188,
58 SOCK_MS_STATUS = 0x18c,
59 SOCK_MS_SYSTEM = 0x190,
60 SOCK_FIFO_ACCESS = 0x200 };
61
62
63#define TIFM_IRQ_ENABLE 0x80000000
64#define TIFM_IRQ_SOCKMASK 0x00000001
65#define TIFM_IRQ_CARDMASK 0x00000100
66#define TIFM_IRQ_FIFOMASK 0x00010000
67#define TIFM_IRQ_SETALL 0xffffffff
68#define TIFM_IRQ_SETALLSOCK 0x0000000f
69
70#define TIFM_CTRL_LED 0x00000040
71#define TIFM_CTRL_FAST_CLK 0x00000100
72
73#define TIFM_SOCK_STATE_OCCUPIED 0x00000008
74#define TIFM_SOCK_STATE_POWERED 0x00000080
75
76#define TIFM_FIFO_ENABLE 0x00000001 /* Meaning of this constant is unverified */
77#define TIFM_FIFO_INT_SETALL 0x0000ffff
78#define TIFM_FIFO_INTMASK 0x00000005 /* Meaning of this constant is unverified */
79
80#define TIFM_DMA_RESET 0x00000002 /* Meaning of this constant is unverified */
81#define TIFM_DMA_TX 0x00008000 /* Meaning of this constant is unverified */
82#define TIFM_DMA_EN 0x00000001 /* Meaning of this constant is unverified */
83
84typedef enum {FM_NULL = 0, FM_XD = 0x01, FM_MS = 0x02, FM_SD = 0x03} tifm_media_id;
85
86struct tifm_driver;
87struct tifm_dev {
88 char __iomem *addr;
89 spinlock_t lock;
90 tifm_media_id media_id;
91 char wq_name[KOBJ_NAME_LEN];
92 struct workqueue_struct *wq;
93
94 unsigned int (*signal_irq)(struct tifm_dev *sock,
95 unsigned int sock_irq_status);
96
97 struct tifm_driver *drv;
98 struct device dev;
99};
100
101struct tifm_driver {
102 tifm_media_id *id_table;
103 int (*probe)(struct tifm_dev *dev);
104 void (*remove)(struct tifm_dev *dev);
105
106 struct device_driver driver;
107};
108
109struct tifm_adapter {
110 char __iomem *addr;
111 unsigned int irq_status;
112 unsigned int insert_mask;
113 unsigned int remove_mask;
114 spinlock_t lock;
115 unsigned int id;
116 unsigned int max_sockets;
117 char wq_name[KOBJ_NAME_LEN];
118 unsigned int inhibit_new_cards;
119 struct workqueue_struct *wq;
120 struct work_struct media_inserter;
121 struct work_struct media_remover;
122 struct tifm_dev **sockets;
123 struct class_device cdev;
124 struct device *dev;
125
126 void (*eject)(struct tifm_adapter *fm, struct tifm_dev *sock);
127};
128
129struct tifm_adapter *tifm_alloc_adapter(void);
130void tifm_free_device(struct device *dev);
131void tifm_free_adapter(struct tifm_adapter *fm);
132int tifm_add_adapter(struct tifm_adapter *fm);
133void tifm_remove_adapter(struct tifm_adapter *fm);
134struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id);
135int tifm_register_driver(struct tifm_driver *drv);
136void tifm_unregister_driver(struct tifm_driver *drv);
137void tifm_eject(struct tifm_dev *sock);
138int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
139 int direction);
140void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
141 int direction);
142
143
144static inline void *tifm_get_drvdata(struct tifm_dev *dev)
145{
146 return dev_get_drvdata(&dev->dev);
147}
148
149static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data)
150{
151 dev_set_drvdata(&dev->dev, data);
152}
153
154struct tifm_device_id {
155 tifm_media_id media_id;
156};
157
158#endif
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 02e4b6972064..a4555fe3754c 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -1,11 +1,6 @@
1#ifndef _LINUX_UTSNAME_H 1#ifndef _LINUX_UTSNAME_H
2#define _LINUX_UTSNAME_H 2#define _LINUX_UTSNAME_H
3 3
4#include <linux/sched.h>
5#include <linux/kref.h>
6#include <linux/nsproxy.h>
7#include <asm/atomic.h>
8
9#define __OLD_UTS_LEN 8 4#define __OLD_UTS_LEN 8
10 5
11struct oldold_utsname { 6struct oldold_utsname {
@@ -35,6 +30,13 @@ struct new_utsname {
35 char domainname[65]; 30 char domainname[65];
36}; 31};
37 32
33#ifdef __KERNEL__
34
35#include <linux/sched.h>
36#include <linux/kref.h>
37#include <linux/nsproxy.h>
38#include <asm/atomic.h>
39
38struct uts_namespace { 40struct uts_namespace {
39 struct kref kref; 41 struct kref kref;
40 struct new_utsname name; 42 struct new_utsname name;
@@ -86,4 +88,7 @@ static inline struct new_utsname *init_utsname(void)
86} 88}
87 89
88extern struct rw_semaphore uts_sem; 90extern struct rw_semaphore uts_sem;
89#endif 91
92#endif /* __KERNEL__ */
93
94#endif /* _LINUX_UTSNAME_H */
diff --git a/include/linux/wavefront.h b/include/linux/wavefront.h
deleted file mode 100644
index 51ab3c933acd..000000000000
--- a/include/linux/wavefront.h
+++ /dev/null
@@ -1,675 +0,0 @@
1#ifndef __wavefront_h__
2#define __wavefront_h__
3
4/* WaveFront header file.
5 *
6 * Copyright (C) by Paul Barton-Davis 1998
7 *
8 * This program is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
9 * Version 2 (June 1991). See the "COPYING" file distributed with this software
10 * for more info.
11 */
12
13#if (!defined(__GNUC__) && !defined(__GNUG__))
14
15 You will not be able to compile this file correctly without gcc, because
16 it is necessary to pack the "wavefront_alias" structure to a size
17 of 22 bytes, corresponding to 16-bit alignment (as would have been
18 the case on the original platform, MS-DOS). If this is not done,
19 then WavePatch-format files cannot be read/written correctly.
20 The method used to do this here ("__attribute__((packed)") is
21 completely compiler dependent.
22
23 All other wavefront_* types end up aligned to 32 bit values and
24 still have the same (correct) size.
25
26#else
27
28 /* However, note that as of G++ 2.7.3.2, g++ was unable to
29 correctly parse *type* __attribute__ tags. It will do the
30 right thing if we use the "packed" attribute on each struct
31 member, which has the same semantics anyway.
32 */
33
34#endif /* __GNUC__ */
35
36/***************************** WARNING ********************************
37 PLEASE DO NOT MODIFY THIS FILE IN ANY WAY THAT AFFECTS ITS ABILITY TO
38 BE USED WITH EITHER C *OR* C++.
39 **********************************************************************/
40
41#ifndef NUM_MIDIKEYS
42#define NUM_MIDIKEYS 128
43#endif /* NUM_MIDIKEYS */
44
45#ifndef NUM_MIDICHANNELS
46#define NUM_MIDICHANNELS 16
47#endif /* NUM_MIDICHANNELS */
48
49/* These are very useful/important. the original wavefront interface
50 was developed on a 16 bit system, where sizeof(int) = 2
51 bytes. Defining things like this makes the code much more portable, and
52 easier to understand without having to toggle back and forth
53 between a 16-bit view of the world and a 32-bit one.
54 */
55
56typedef short INT16;
57typedef unsigned short UINT16;
58typedef int INT32;
59typedef unsigned int UINT32;
60typedef char CHAR8;
61typedef unsigned char UCHAR8;
62
63/* Pseudo-commands not part of the WaveFront command set.
64 These are used for various driver controls and direct
65 hardware control.
66 */
67
68#define WFC_DEBUG_DRIVER 0
69#define WFC_FX_IOCTL 1
70#define WFC_PATCH_STATUS 2
71#define WFC_PROGRAM_STATUS 3
72#define WFC_SAMPLE_STATUS 4
73#define WFC_DISABLE_INTERRUPTS 5
74#define WFC_ENABLE_INTERRUPTS 6
75#define WFC_INTERRUPT_STATUS 7
76#define WFC_ROMSAMPLES_RDONLY 8
77#define WFC_IDENTIFY_SLOT_TYPE 9
78
79/* Wavefront synth commands
80 */
81
82#define WFC_DOWNLOAD_SAMPLE 0x80
83#define WFC_DOWNLOAD_BLOCK 0x81
84#define WFC_DOWNLOAD_MULTISAMPLE 0x82
85#define WFC_DOWNLOAD_SAMPLE_ALIAS 0x83
86#define WFC_DELETE_SAMPLE 0x84
87#define WFC_REPORT_FREE_MEMORY 0x85
88#define WFC_DOWNLOAD_PATCH 0x86
89#define WFC_DOWNLOAD_PROGRAM 0x87
90#define WFC_SET_SYNTHVOL 0x89
91#define WFC_SET_NVOICES 0x8B
92#define WFC_DOWNLOAD_DRUM 0x90
93#define WFC_GET_SYNTHVOL 0x92
94#define WFC_GET_NVOICES 0x94
95#define WFC_DISABLE_CHANNEL 0x9A
96#define WFC_ENABLE_CHANNEL 0x9B
97#define WFC_MISYNTH_OFF 0x9D
98#define WFC_MISYNTH_ON 0x9E
99#define WFC_FIRMWARE_VERSION 0x9F
100#define WFC_GET_NSAMPLES 0xA0
101#define WFC_DISABLE_DRUM_PROGRAM 0xA2
102#define WFC_UPLOAD_PATCH 0xA3
103#define WFC_UPLOAD_PROGRAM 0xA4
104#define WFC_SET_TUNING 0xA6
105#define WFC_GET_TUNING 0xA7
106#define WFC_VMIDI_ON 0xA8
107#define WFC_VMIDI_OFF 0xA9
108#define WFC_MIDI_STATUS 0xAA
109#define WFC_GET_CHANNEL_STATUS 0xAB
110#define WFC_DOWNLOAD_SAMPLE_HEADER 0xAC
111#define WFC_UPLOAD_SAMPLE_HEADER 0xAD
112#define WFC_UPLOAD_MULTISAMPLE 0xAE
113#define WFC_UPLOAD_SAMPLE_ALIAS 0xAF
114#define WFC_IDENTIFY_SAMPLE_TYPE 0xB0
115#define WFC_DOWNLOAD_EDRUM_PROGRAM 0xB1
116#define WFC_UPLOAD_EDRUM_PROGRAM 0xB2
117#define WFC_SET_EDRUM_CHANNEL 0xB3
118#define WFC_INSTOUT_LEVELS 0xB4
119#define WFC_PEAKOUT_LEVELS 0xB5
120#define WFC_REPORT_CHANNEL_PROGRAMS 0xB6
121#define WFC_HARDWARE_VERSION 0xCF
122#define WFC_UPLOAD_SAMPLE_PARAMS 0xD7
123#define WFC_DOWNLOAD_OS 0xF1
124#define WFC_NOOP 0xFF
125
126#define WF_MAX_SAMPLE 512
127#define WF_MAX_PATCH 256
128#define WF_MAX_PROGRAM 128
129
130#define WF_SECTION_MAX 44 /* longest OS section length */
131
132/* # of bytes we send to the board when sending it various kinds of
133 substantive data, such as samples, patches and programs.
134*/
135
136#define WF_PROGRAM_BYTES 32
137#define WF_PATCH_BYTES 132
138#define WF_SAMPLE_BYTES 27
139#define WF_SAMPLE_HDR_BYTES 25
140#define WF_ALIAS_BYTES 25
141#define WF_DRUM_BYTES 9
142#define WF_MSAMPLE_BYTES 259 /* (MIDI_KEYS * 2) + 3 */
143
144#define WF_ACK 0x80
145#define WF_DMA_ACK 0x81
146
147/* OR-values for MIDI status bits */
148
149#define WF_MIDI_VIRTUAL_ENABLED 0x1
150#define WF_MIDI_VIRTUAL_IS_EXTERNAL 0x2
151#define WF_MIDI_IN_TO_SYNTH_DISABLED 0x4
152
153/* slot indexes for struct address_info: makes code a little more mnemonic */
154
155#define WF_SYNTH_SLOT 0
156#define WF_INTERNAL_MIDI_SLOT 1
157#define WF_EXTERNAL_MIDI_SLOT 2
158
159/* Magic MIDI bytes used to switch I/O streams on the ICS2115 MPU401
160 emulation. Note these NEVER show up in output from the device and
161 should NEVER be used in input unless Virtual MIDI mode has been
162 disabled. If they do show up as input, the results are unpredictable.
163*/
164
165#define WF_EXTERNAL_SWITCH 0xFD
166#define WF_INTERNAL_SWITCH 0xF9
167
168/* Debugging flags */
169
170#define WF_DEBUG_CMD 0x1
171#define WF_DEBUG_DATA 0x2
172#define WF_DEBUG_LOAD_PATCH 0x4
173#define WF_DEBUG_IO 0x8
174
175/* WavePatch file format stuff */
176
177#define WF_WAVEPATCH_VERSION 120; /* Current version number (1.2) */
178#define WF_MAX_COMMENT 64 /* Comment length */
179#define WF_NUM_LAYERS 4
180#define WF_NAME_LENGTH 32
181#define WF_SOURCE_LENGTH 260
182
183#define BankFileID "Bank"
184#define DrumkitFileID "DrumKit"
185#define ProgramFileID "Program"
186
187struct wf_envelope
188{
189 UCHAR8 attack_time:7;
190 UCHAR8 Unused1:1;
191
192 UCHAR8 decay1_time:7;
193 UCHAR8 Unused2:1;
194
195 UCHAR8 decay2_time:7;
196 UCHAR8 Unused3:1;
197
198 UCHAR8 sustain_time:7;
199 UCHAR8 Unused4:1;
200
201 UCHAR8 release_time:7;
202 UCHAR8 Unused5:1;
203
204 UCHAR8 release2_time:7;
205 UCHAR8 Unused6:1;
206
207 CHAR8 attack_level;
208 CHAR8 decay1_level;
209 CHAR8 decay2_level;
210 CHAR8 sustain_level;
211 CHAR8 release_level;
212
213 UCHAR8 attack_velocity:7;
214 UCHAR8 Unused7:1;
215
216 UCHAR8 volume_velocity:7;
217 UCHAR8 Unused8:1;
218
219 UCHAR8 keyboard_scaling:7;
220 UCHAR8 Unused9:1;
221};
222typedef struct wf_envelope wavefront_envelope;
223
224struct wf_lfo
225{
226 UCHAR8 sample_number;
227
228 UCHAR8 frequency:7;
229 UCHAR8 Unused1:1;
230
231 UCHAR8 am_src:4;
232 UCHAR8 fm_src:4;
233
234 CHAR8 fm_amount;
235 CHAR8 am_amount;
236 CHAR8 start_level;
237 CHAR8 end_level;
238
239 UCHAR8 ramp_delay:7;
240 UCHAR8 wave_restart:1; /* for LFO2 only */
241
242 UCHAR8 ramp_time:7;
243 UCHAR8 Unused2:1;
244};
245typedef struct wf_lfo wavefront_lfo;
246
247struct wf_patch
248{
249 INT16 frequency_bias; /* ** THIS IS IN MOTOROLA FORMAT!! ** */
250
251 UCHAR8 amplitude_bias:7;
252 UCHAR8 Unused1:1;
253
254 UCHAR8 portamento:7;
255 UCHAR8 Unused2:1;
256
257 UCHAR8 sample_number;
258
259 UCHAR8 pitch_bend:4;
260 UCHAR8 sample_msb:1;
261 UCHAR8 Unused3:3;
262
263 UCHAR8 mono:1;
264 UCHAR8 retrigger:1;
265 UCHAR8 nohold:1;
266 UCHAR8 restart:1;
267 UCHAR8 filterconfig:2; /* SDK says "not used" */
268 UCHAR8 reuse:1;
269 UCHAR8 reset_lfo:1;
270
271 UCHAR8 fm_src2:4;
272 UCHAR8 fm_src1:4;
273
274 CHAR8 fm_amount1;
275 CHAR8 fm_amount2;
276
277 UCHAR8 am_src:4;
278 UCHAR8 Unused4:4;
279
280 CHAR8 am_amount;
281
282 UCHAR8 fc1_mode:4;
283 UCHAR8 fc2_mode:4;
284
285 CHAR8 fc1_mod_amount;
286 CHAR8 fc1_keyboard_scaling;
287 CHAR8 fc1_bias;
288 CHAR8 fc2_mod_amount;
289 CHAR8 fc2_keyboard_scaling;
290 CHAR8 fc2_bias;
291
292 UCHAR8 randomizer:7;
293 UCHAR8 Unused5:1;
294
295 struct wf_envelope envelope1;
296 struct wf_envelope envelope2;
297 struct wf_lfo lfo1;
298 struct wf_lfo lfo2;
299};
300typedef struct wf_patch wavefront_patch;
301
302struct wf_layer
303{
304 UCHAR8 patch_number;
305
306 UCHAR8 mix_level:7;
307 UCHAR8 mute:1;
308
309 UCHAR8 split_point:7;
310 UCHAR8 play_below:1;
311
312 UCHAR8 pan_mod_src:2;
313 UCHAR8 pan_or_mod:1;
314 UCHAR8 pan:4;
315 UCHAR8 split_type:1;
316};
317typedef struct wf_layer wavefront_layer;
318
319struct wf_program
320{
321 struct wf_layer layer[WF_NUM_LAYERS];
322};
323typedef struct wf_program wavefront_program;
324
325struct wf_sample_offset
326{
327 INT32 Fraction:4;
328 INT32 Integer:20;
329 INT32 Unused:8;
330};
331typedef struct wf_sample_offset wavefront_sample_offset;
332
333/* Sample slot types */
334
335#define WF_ST_SAMPLE 0
336#define WF_ST_MULTISAMPLE 1
337#define WF_ST_ALIAS 2
338#define WF_ST_EMPTY 3
339
340/* pseudo's */
341
342#define WF_ST_DRUM 4
343#define WF_ST_PROGRAM 5
344#define WF_ST_PATCH 6
345#define WF_ST_SAMPLEHDR 7
346
347#define WF_ST_MASK 0xf
348
349/* Flags for slot status. These occupy the upper bits of the same byte
350 as a sample type.
351*/
352
353#define WF_SLOT_USED 0x80 /* XXX don't rely on this being accurate */
354#define WF_SLOT_FILLED 0x40
355#define WF_SLOT_ROM 0x20
356
357#define WF_SLOT_MASK 0xf0
358
359/* channel constants */
360
361#define WF_CH_MONO 0
362#define WF_CH_LEFT 1
363#define WF_CH_RIGHT 2
364
365/* Sample formats */
366
367#define LINEAR_16BIT 0
368#define WHITE_NOISE 1
369#define LINEAR_8BIT 2
370#define MULAW_8BIT 3
371
372#define WF_SAMPLE_IS_8BIT(smpl) ((smpl)->SampleResolution&2)
373
374
375/*
376
377 Because most/all of the sample data we pass in via pointers has
378 never been copied (just mmap-ed into user space straight from the
379 disk), it would be nice to allow handling of multi-channel sample
380 data without forcing user-level extraction of the relevant bytes.
381
382 So, we need a way of specifying which channel to use (the WaveFront
383 only handles mono samples in a given slot), and the only way to do
384 this without using some struct other than wavefront_sample as the
385 interface is the awful hack of using the unused bits in a
386 wavefront_sample:
387
388 Val Meaning
389 --- -------
390 0 no channel selection (use channel 1, sample is MONO)
391 1 use first channel, and skip one
392 2 use second channel, and skip one
393 3 use third channel, and skip two
394 4 use fourth channel, skip three
395 5 use fifth channel, skip four
396 6 use six channel, skip five
397
398
399 This can handle up to 4 channels, and anyone downloading >4 channels
400 of sample data just to select one of them needs to find some tools
401 like sox ...
402
403 NOTE: values 0, 1 and 2 correspond to WF_CH_* above. This is
404 important.
405
406*/
407
408#define WF_SET_CHANNEL(samp,chn) \
409 (samp)->Unused1 = chn & 0x1; \
410 (samp)->Unused2 = chn & 0x2; \
411 (samp)->Unused3 = chn & 0x4
412
413#define WF_GET_CHANNEL(samp) \
414 (((samp)->Unused3 << 2)|((samp)->Unused2<<1)|(samp)->Unused1)
415
416typedef struct wf_sample {
417 struct wf_sample_offset sampleStartOffset;
418 struct wf_sample_offset loopStartOffset;
419 struct wf_sample_offset loopEndOffset;
420 struct wf_sample_offset sampleEndOffset;
421 INT16 FrequencyBias;
422 UCHAR8 SampleResolution:2; /* sample_format */
423 UCHAR8 Unused1:1;
424 UCHAR8 Loop:1;
425 UCHAR8 Bidirectional:1;
426 UCHAR8 Unused2:1;
427 UCHAR8 Reverse:1;
428 UCHAR8 Unused3:1;
429} wavefront_sample;
430
431typedef struct wf_multisample {
432 INT16 NumberOfSamples; /* log2 of the number of samples */
433 INT16 SampleNumber[NUM_MIDIKEYS];
434} wavefront_multisample;
435
436typedef struct wf_alias {
437 INT16 OriginalSample;
438
439 struct wf_sample_offset sampleStartOffset;
440 struct wf_sample_offset loopStartOffset;
441 struct wf_sample_offset sampleEndOffset;
442 struct wf_sample_offset loopEndOffset;
443
444 INT16 FrequencyBias;
445
446 UCHAR8 SampleResolution:2;
447 UCHAR8 Unused1:1;
448 UCHAR8 Loop:1;
449 UCHAR8 Bidirectional:1;
450 UCHAR8 Unused2:1;
451 UCHAR8 Reverse:1;
452 UCHAR8 Unused3:1;
453
454 /* This structure is meant to be padded only to 16 bits on their
455 original. Of course, whoever wrote their documentation didn't
456 realize that sizeof(struct) can be >=
457 sum(sizeof(struct-fields)) and so thought that giving a C level
458 description of the structs used in WavePatch files was
459 sufficient. I suppose it was, as long as you remember the
460 standard 16->32 bit issues.
461 */
462
463 UCHAR8 sixteen_bit_padding;
464} __attribute__((packed)) wavefront_alias;
465
466typedef struct wf_drum {
467 UCHAR8 PatchNumber;
468 UCHAR8 MixLevel:7;
469 UCHAR8 Unmute:1;
470 UCHAR8 Group:4;
471 UCHAR8 Unused1:4;
472 UCHAR8 PanModSource:2;
473 UCHAR8 PanModulated:1;
474 UCHAR8 PanAmount:4;
475 UCHAR8 Unused2:1;
476} wavefront_drum;
477
478typedef struct wf_drumkit {
479 struct wf_drum drum[NUM_MIDIKEYS];
480} wavefront_drumkit;
481
482typedef struct wf_channel_programs {
483 UCHAR8 Program[NUM_MIDICHANNELS];
484} wavefront_channel_programs;
485
486/* How to get MIDI channel status from the data returned by
487 a WFC_GET_CHANNEL_STATUS command (a struct wf_channel_programs)
488*/
489
490#define WF_CHANNEL_STATUS(ch,wcp) (wcp)[(ch/7)] & (1<<((ch)%7))
491
492typedef union wf_any {
493 wavefront_sample s;
494 wavefront_multisample ms;
495 wavefront_alias a;
496 wavefront_program pr;
497 wavefront_patch p;
498 wavefront_drum d;
499} wavefront_any;
500
501/* Hannu Solvainen hoped that his "patch_info" struct in soundcard.h
502 might work for other wave-table based patch loading situations.
503 Alas, his fears were correct. The WaveFront doesn't even come with
504 just "patches", but several different kind of structures that
505 control the sound generation process.
506 */
507
508typedef struct wf_patch_info {
509
510 /* the first two fields are used by the OSS "patch loading" interface
511 only, and are unused by the current user-level library.
512 */
513
514 INT16 key; /* Use WAVEFRONT_PATCH here */
515 UINT16 devno; /* fill in when sending */
516 UCHAR8 subkey; /* WF_ST_{SAMPLE,ALIAS,etc.} */
517
518#define WAVEFRONT_FIND_FREE_SAMPLE_SLOT 999
519
520 UINT16 number; /* patch/sample/prog number */
521
522 UINT32 size; /* size of any data included in
523 one of the fields in `hdrptr', or
524 as `dataptr'.
525
526 NOTE: for actual samples, this is
527 the size of the *SELECTED CHANNEL*
528 even if more data is actually available.
529
530 So, a stereo sample (2 channels) of
531 6000 bytes total has `size' = 3000.
532
533 See the macros and comments for
534 WF_{GET,SET}_CHANNEL above.
535
536 */
537 wavefront_any __user *hdrptr; /* user-space ptr to hdr bytes */
538 UINT16 __user *dataptr; /* actual sample data */
539
540 wavefront_any hdr; /* kernel-space copy of hdr bytes */
541} wavefront_patch_info;
542
543/* The maximum number of bytes we will ever move to or from user space
544 in response to a WFC_* command. This obviously doesn't cover
545 actual sample data.
546*/
547
548#define WF_MAX_READ sizeof(wavefront_multisample)
549#define WF_MAX_WRITE sizeof(wavefront_multisample)
550
551/*
552 This allows us to execute any WF command except the download/upload
553 ones, which are handled differently due to copyin/copyout issues as
554 well as data-nybbling to/from the card.
555 */
556
557typedef struct wavefront_control {
558 int cmd; /* WFC_* */
559 char status; /* return status to user-space */
560 unsigned char rbuf[WF_MAX_READ]; /* bytes read from card */
561 unsigned char wbuf[WF_MAX_WRITE]; /* bytes written to card */
562} wavefront_control;
563
564#define WFCTL_WFCMD 0x1
565#define WFCTL_LOAD_SPP 0x2
566
567/* Modulator table */
568
569#define WF_MOD_LFO1 0
570#define WF_MOD_LFO2 1
571#define WF_MOD_ENV1 2
572#define WF_MOD_ENV2 3
573#define WF_MOD_KEYBOARD 4
574#define WF_MOD_LOGKEY 5
575#define WF_MOD_VELOCITY 6
576#define WF_MOD_LOGVEL 7
577#define WF_MOD_RANDOM 8
578#define WF_MOD_PRESSURE 9
579#define WF_MOD_MOD_WHEEL 10
580#define WF_MOD_1 WF_MOD_MOD_WHEEL
581#define WF_MOD_BREATH 11
582#define WF_MOD_2 WF_MOD_BREATH
583#define WF_MOD_FOOT 12
584#define WF_MOD_4 WF_MOD_FOOT
585#define WF_MOD_VOLUME 13
586#define WF_MOD_7 WF_MOD_VOLUME
587#define WF_MOD_PAN 14
588#define WF_MOD_10 WF_MOD_PAN
589#define WF_MOD_EXPR 15
590#define WF_MOD_11 WF_MOD_EXPR
591
592/* FX-related material */
593
594typedef struct wf_fx_info {
595 int request; /* see list below */
596 int data[4]; /* we don't need much */
597} wavefront_fx_info;
598
599/* support for each of these will be forthcoming once I or someone
600 else has figured out which of the addresses on page 6 and page 7 of
601 the YSS225 control each parameter. Incidentally, these come from
602 the Windows driver interface, but again, Turtle Beach didn't
603 document the API to use them.
604*/
605
606#define WFFX_SETOUTGAIN 0
607#define WFFX_SETSTEREOOUTGAIN 1
608#define WFFX_SETREVERBIN1GAIN 2
609#define WFFX_SETREVERBIN2GAIN 3
610#define WFFX_SETREVERBIN3GAIN 4
611#define WFFX_SETCHORUSINPORT 5
612#define WFFX_SETREVERBIN1PORT 6
613#define WFFX_SETREVERBIN2PORT 7
614#define WFFX_SETREVERBIN3PORT 8
615#define WFFX_SETEFFECTPORT 9
616#define WFFX_SETAUXPORT 10
617#define WFFX_SETREVERBTYPE 11
618#define WFFX_SETREVERBDELAY 12
619#define WFFX_SETCHORUSLFO 13
620#define WFFX_SETCHORUSPMD 14
621#define WFFX_SETCHORUSAMD 15
622#define WFFX_SETEFFECT 16
623#define WFFX_SETBASEALL 17
624#define WFFX_SETREVERBALL 18
625#define WFFX_SETCHORUSALL 20
626#define WFFX_SETREVERBDEF 22
627#define WFFX_SETCHORUSDEF 23
628#define WFFX_DELAYSETINGAIN 24
629#define WFFX_DELAYSETFBGAIN 25
630#define WFFX_DELAYSETFBLPF 26
631#define WFFX_DELAYSETGAIN 27
632#define WFFX_DELAYSETTIME 28
633#define WFFX_DELAYSETFBTIME 29
634#define WFFX_DELAYSETALL 30
635#define WFFX_DELAYSETDEF 32
636#define WFFX_SDELAYSETINGAIN 33
637#define WFFX_SDELAYSETFBGAIN 34
638#define WFFX_SDELAYSETFBLPF 35
639#define WFFX_SDELAYSETGAIN 36
640#define WFFX_SDELAYSETTIME 37
641#define WFFX_SDELAYSETFBTIME 38
642#define WFFX_SDELAYSETALL 39
643#define WFFX_SDELAYSETDEF 41
644#define WFFX_DEQSETINGAIN 42
645#define WFFX_DEQSETFILTER 43
646#define WFFX_DEQSETALL 44
647#define WFFX_DEQSETDEF 46
648#define WFFX_MUTE 47
649#define WFFX_FLANGESETBALANCE 48
650#define WFFX_FLANGESETDELAY 49
651#define WFFX_FLANGESETDWFFX_TH 50
652#define WFFX_FLANGESETFBGAIN 51
653#define WFFX_FLANGESETINGAIN 52
654#define WFFX_FLANGESETLFO 53
655#define WFFX_FLANGESETALL 54
656#define WFFX_FLANGESETDEF 56
657#define WFFX_PITCHSETSHIFT 57
658#define WFFX_PITCHSETBALANCE 58
659#define WFFX_PITCHSETALL 59
660#define WFFX_PITCHSETDEF 61
661#define WFFX_SRSSETINGAIN 62
662#define WFFX_SRSSETSPACE 63
663#define WFFX_SRSSETCENTER 64
664#define WFFX_SRSSETGAIN 65
665#define WFFX_SRSSETMODE 66
666#define WFFX_SRSSETDEF 68
667
668/* Allow direct user-space control over FX memory/coefficient data.
669 In theory this could be used to download the FX microprogram,
670 but it would be a little slower, and involve some weird code.
671 */
672
673#define WFFX_MEMSET 69
674
675#endif /* __wavefront_h__ */
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 430afd058269..8ae7f744917b 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -129,7 +129,8 @@ enum
129#define XFRM_MODE_TUNNEL 1 129#define XFRM_MODE_TUNNEL 1
130#define XFRM_MODE_ROUTEOPTIMIZATION 2 130#define XFRM_MODE_ROUTEOPTIMIZATION 2
131#define XFRM_MODE_IN_TRIGGER 3 131#define XFRM_MODE_IN_TRIGGER 3
132#define XFRM_MODE_MAX 4 132#define XFRM_MODE_BEET 4
133#define XFRM_MODE_MAX 5
133 134
134/* Netlink configuration messages. */ 135/* Netlink configuration messages. */
135enum { 136enum {
diff --git a/kernel/Makefile b/kernel/Makefile
index d948ca12acf0..5e3f3b75563a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o latency.o nsproxy.o 11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o
12 12
13obj-$(CONFIG_STACKTRACE) += stacktrace.o 13obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-y += time/ 14obj-y += time/
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 1a58a81fb09d..4f40d923af8e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -411,7 +411,6 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
411 case AUDIT_FSGID: 411 case AUDIT_FSGID:
412 case AUDIT_LOGINUID: 412 case AUDIT_LOGINUID:
413 case AUDIT_PERS: 413 case AUDIT_PERS:
414 case AUDIT_ARCH:
415 case AUDIT_MSGTYPE: 414 case AUDIT_MSGTYPE:
416 case AUDIT_PPID: 415 case AUDIT_PPID:
417 case AUDIT_DEVMAJOR: 416 case AUDIT_DEVMAJOR:
@@ -423,6 +422,14 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
423 case AUDIT_ARG2: 422 case AUDIT_ARG2:
424 case AUDIT_ARG3: 423 case AUDIT_ARG3:
425 break; 424 break;
425 /* arch is only allowed to be = or != */
426 case AUDIT_ARCH:
427 if ((f->op != AUDIT_NOT_EQUAL) && (f->op != AUDIT_EQUAL)
428 && (f->op != AUDIT_NEGATE) && (f->op)) {
429 err = -EINVAL;
430 goto exit_free;
431 }
432 break;
426 case AUDIT_PERM: 433 case AUDIT_PERM:
427 if (f->val & ~15) 434 if (f->val & ~15)
428 goto exit_free; 435 goto exit_free;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 105147631753..42f2f1179711 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -278,8 +278,11 @@ static int audit_filter_rules(struct task_struct *tsk,
278 result = audit_comparator(tsk->pid, f->op, f->val); 278 result = audit_comparator(tsk->pid, f->op, f->val);
279 break; 279 break;
280 case AUDIT_PPID: 280 case AUDIT_PPID:
281 if (ctx) 281 if (ctx) {
282 if (!ctx->ppid)
283 ctx->ppid = sys_getppid();
282 result = audit_comparator(ctx->ppid, f->op, f->val); 284 result = audit_comparator(ctx->ppid, f->op, f->val);
285 }
283 break; 286 break;
284 case AUDIT_UID: 287 case AUDIT_UID:
285 result = audit_comparator(tsk->uid, f->op, f->val); 288 result = audit_comparator(tsk->uid, f->op, f->val);
@@ -795,7 +798,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
795 798
796 /* tsk == current */ 799 /* tsk == current */
797 context->pid = tsk->pid; 800 context->pid = tsk->pid;
798 context->ppid = sys_getppid(); /* sic. tsk == current in all cases */ 801 if (!context->ppid)
802 context->ppid = sys_getppid();
799 context->uid = tsk->uid; 803 context->uid = tsk->uid;
800 context->gid = tsk->gid; 804 context->gid = tsk->gid;
801 context->euid = tsk->euid; 805 context->euid = tsk->euid;
@@ -1137,6 +1141,7 @@ void audit_syscall_entry(int arch, int major,
1137 context->ctime = CURRENT_TIME; 1141 context->ctime = CURRENT_TIME;
1138 context->in_syscall = 1; 1142 context->in_syscall = 1;
1139 context->auditable = !!(state == AUDIT_RECORD_CONTEXT); 1143 context->auditable = !!(state == AUDIT_RECORD_CONTEXT);
1144 context->ppid = 0;
1140} 1145}
1141 1146
1142/** 1147/**
@@ -1352,7 +1357,13 @@ void __audit_inode_child(const char *dname, const struct inode *inode,
1352 } 1357 }
1353 1358
1354update_context: 1359update_context:
1355 idx = context->name_count++; 1360 idx = context->name_count;
1361 if (context->name_count == AUDIT_NAMES) {
1362 printk(KERN_DEBUG "name_count maxed and losing %s\n",
1363 found_name ?: "(null)");
1364 return;
1365 }
1366 context->name_count++;
1356#if AUDIT_DEBUG 1367#if AUDIT_DEBUG
1357 context->ino_count++; 1368 context->ino_count++;
1358#endif 1369#endif
@@ -1370,7 +1381,16 @@ update_context:
1370 /* A parent was not found in audit_names, so copy the inode data for the 1381 /* A parent was not found in audit_names, so copy the inode data for the
1371 * provided parent. */ 1382 * provided parent. */
1372 if (!found_name) { 1383 if (!found_name) {
1373 idx = context->name_count++; 1384 idx = context->name_count;
1385 if (context->name_count == AUDIT_NAMES) {
1386 printk(KERN_DEBUG
1387 "name_count maxed and losing parent inode data: dev=%02x:%02x, inode=%lu",
1388 MAJOR(parent->i_sb->s_dev),
1389 MINOR(parent->i_sb->s_dev),
1390 parent->i_ino);
1391 return;
1392 }
1393 context->name_count++;
1374#if AUDIT_DEBUG 1394#if AUDIT_DEBUG
1375 context->ino_count++; 1395 context->ino_count++;
1376#endif 1396#endif
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 736cb0bd498f..4cf65f5c6a74 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,6 +18,69 @@
18#include "internals.h" 18#include "internals.h"
19 19
20/** 20/**
21 * dynamic_irq_init - initialize a dynamically allocated irq
22 * @irq: irq number to initialize
23 */
24void dynamic_irq_init(unsigned int irq)
25{
26 struct irq_desc *desc;
27 unsigned long flags;
28
29 if (irq >= NR_IRQS) {
30 printk(KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
31 WARN_ON(1);
32 return;
33 }
34
35 /* Ensure we don't have left over values from a previous use of this irq */
36 desc = irq_desc + irq;
37 spin_lock_irqsave(&desc->lock, flags);
38 desc->status = IRQ_DISABLED;
39 desc->chip = &no_irq_chip;
40 desc->handle_irq = handle_bad_irq;
41 desc->depth = 1;
42 desc->handler_data = NULL;
43 desc->chip_data = NULL;
44 desc->action = NULL;
45 desc->irq_count = 0;
46 desc->irqs_unhandled = 0;
47#ifdef CONFIG_SMP
48 desc->affinity = CPU_MASK_ALL;
49#endif
50 spin_unlock_irqrestore(&desc->lock, flags);
51}
52
53/**
54 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
55 * @irq: irq number to initialize
56 */
57void dynamic_irq_cleanup(unsigned int irq)
58{
59 struct irq_desc *desc;
60 unsigned long flags;
61
62 if (irq >= NR_IRQS) {
63 printk(KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
64 WARN_ON(1);
65 return;
66 }
67
68 desc = irq_desc + irq;
69 spin_lock_irqsave(&desc->lock, flags);
70 if (desc->action) {
71 spin_unlock_irqrestore(&desc->lock, flags);
72 printk(KERN_ERR "Destroying IRQ%d without calling free_irq\n",
73 irq);
74 WARN_ON(1);
75 return;
76 }
77 desc->handle_irq = handle_bad_irq;
78 desc->chip = &no_irq_chip;
79 spin_unlock_irqrestore(&desc->lock, flags);
80}
81
82
83/**
21 * set_irq_chip - set the irq chip for an irq 84 * set_irq_chip - set the irq chip for an irq
22 * @irq: irq number 85 * @irq: irq number
23 * @chip: pointer to irq chip description structure 86 * @chip: pointer to irq chip description structure
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a57ebe9fa6f6..4baa3bbcd25a 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -7,17 +7,17 @@ void set_pending_irq(unsigned int irq, cpumask_t mask)
7 unsigned long flags; 7 unsigned long flags;
8 8
9 spin_lock_irqsave(&desc->lock, flags); 9 spin_lock_irqsave(&desc->lock, flags);
10 desc->move_irq = 1; 10 desc->status |= IRQ_MOVE_PENDING;
11 irq_desc[irq].pending_mask = mask; 11 irq_desc[irq].pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags); 12 spin_unlock_irqrestore(&desc->lock, flags);
13} 13}
14 14
15void move_native_irq(int irq) 15void move_masked_irq(int irq)
16{ 16{
17 struct irq_desc *desc = irq_desc + irq; 17 struct irq_desc *desc = irq_desc + irq;
18 cpumask_t tmp; 18 cpumask_t tmp;
19 19
20 if (likely(!desc->move_irq)) 20 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
21 return; 21 return;
22 22
23 /* 23 /*
@@ -28,7 +28,7 @@ void move_native_irq(int irq)
28 return; 28 return;
29 } 29 }
30 30
31 desc->move_irq = 0; 31 desc->status &= ~IRQ_MOVE_PENDING;
32 32
33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask))) 33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
34 return; 34 return;
@@ -48,15 +48,29 @@ void move_native_irq(int irq)
48 * when an active trigger is comming in. This could 48 * when an active trigger is comming in. This could
49 * cause some ioapics to mal-function. 49 * cause some ioapics to mal-function.
50 * Being paranoid i guess! 50 * Being paranoid i guess!
51 *
52 * For correct operation this depends on the caller
53 * masking the irqs.
51 */ 54 */
52 if (likely(!cpus_empty(tmp))) { 55 if (likely(!cpus_empty(tmp))) {
53 if (likely(!(desc->status & IRQ_DISABLED)))
54 desc->chip->disable(irq);
55
56 desc->chip->set_affinity(irq,tmp); 56 desc->chip->set_affinity(irq,tmp);
57
58 if (likely(!(desc->status & IRQ_DISABLED)))
59 desc->chip->enable(irq);
60 } 57 }
61 cpus_clear(irq_desc[irq].pending_mask); 58 cpus_clear(irq_desc[irq].pending_mask);
62} 59}
60
61void move_native_irq(int irq)
62{
63 struct irq_desc *desc = irq_desc + irq;
64
65 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
66 return;
67
68 if (likely(!(desc->status & IRQ_DISABLED)))
69 desc->chip->disable(irq);
70
71 move_masked_irq(irq);
72
73 if (likely(!(desc->status & IRQ_DISABLED)))
74 desc->chip->enable(irq);
75}
76
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 523e46483b99..26bb5ffe1ef1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -71,9 +71,6 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
71static int blimit = 10; 71static int blimit = 10;
72static int qhimark = 10000; 72static int qhimark = 10000;
73static int qlowmark = 100; 73static int qlowmark = 100;
74#ifdef CONFIG_SMP
75static int rsinterval = 1000;
76#endif
77 74
78static atomic_t rcu_barrier_cpu_count; 75static atomic_t rcu_barrier_cpu_count;
79static DEFINE_MUTEX(rcu_barrier_mutex); 76static DEFINE_MUTEX(rcu_barrier_mutex);
@@ -86,8 +83,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
86 int cpu; 83 int cpu;
87 cpumask_t cpumask; 84 cpumask_t cpumask;
88 set_need_resched(); 85 set_need_resched();
89 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) { 86 if (unlikely(!rcp->signaled)) {
90 rdp->last_rs_qlen = rdp->qlen; 87 rcp->signaled = 1;
91 /* 88 /*
92 * Don't send IPI to itself. With irqs disabled, 89 * Don't send IPI to itself. With irqs disabled,
93 * rdp->cpu is the current cpu. 90 * rdp->cpu is the current cpu.
@@ -301,6 +298,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
301 smp_mb(); 298 smp_mb();
302 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); 299 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
303 300
301 rcp->signaled = 0;
304 } 302 }
305} 303}
306 304
@@ -628,9 +626,6 @@ void synchronize_rcu(void)
628module_param(blimit, int, 0); 626module_param(blimit, int, 0);
629module_param(qhimark, int, 0); 627module_param(qhimark, int, 0);
630module_param(qlowmark, int, 0); 628module_param(qlowmark, int, 0);
631#ifdef CONFIG_SMP
632module_param(rsinterval, int, 0);
633#endif
634EXPORT_SYMBOL_GPL(rcu_batches_completed); 629EXPORT_SYMBOL_GPL(rcu_batches_completed);
635EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 630EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
636EXPORT_SYMBOL_GPL(call_rcu); 631EXPORT_SYMBOL_GPL(call_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 23446e91cded..e2bda18f6f42 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -15,9 +15,10 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * 17 *
18 * Copyright (C) IBM Corporation, 2005 18 * Copyright (C) IBM Corporation, 2005, 2006
19 * 19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com> 20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
21 * 22 *
22 * See also: Documentation/RCU/torture.txt 23 * See also: Documentation/RCU/torture.txt
23 */ 24 */
@@ -44,19 +45,25 @@
44#include <linux/delay.h> 45#include <linux/delay.h>
45#include <linux/byteorder/swabb.h> 46#include <linux/byteorder/swabb.h>
46#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/srcu.h>
47 49
48MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
52 "Josh Triplett <josh@freedesktop.org>");
49 53
50static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */ 54static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
55static int nfakewriters = 4; /* # fake writer threads */
51static int stat_interval; /* Interval between stats, in seconds. */ 56static int stat_interval; /* Interval between stats, in seconds. */
52 /* Defaults to "only at end of test". */ 57 /* Defaults to "only at end of test". */
53static int verbose; /* Print more debug info. */ 58static int verbose; /* Print more debug info. */
54static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ 59static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
55static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/ 60static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
56static char *torture_type = "rcu"; /* What to torture. */ 61static char *torture_type = "rcu"; /* What RCU implementation to torture. */
57 62
58module_param(nreaders, int, 0); 63module_param(nreaders, int, 0);
59MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 64MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
65module_param(nfakewriters, int, 0);
66MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
60module_param(stat_interval, int, 0); 67module_param(stat_interval, int, 0);
61MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); 68MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
62module_param(verbose, bool, 0); 69module_param(verbose, bool, 0);
@@ -66,7 +73,7 @@ MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
66module_param(shuffle_interval, int, 0); 73module_param(shuffle_interval, int, 0);
67MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); 74MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
68module_param(torture_type, charp, 0); 75module_param(torture_type, charp, 0);
69MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh)"); 76MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
70 77
71#define TORTURE_FLAG "-torture:" 78#define TORTURE_FLAG "-torture:"
72#define PRINTK_STRING(s) \ 79#define PRINTK_STRING(s) \
@@ -80,6 +87,7 @@ static char printk_buf[4096];
80 87
81static int nrealreaders; 88static int nrealreaders;
82static struct task_struct *writer_task; 89static struct task_struct *writer_task;
90static struct task_struct **fakewriter_tasks;
83static struct task_struct **reader_tasks; 91static struct task_struct **reader_tasks;
84static struct task_struct *stats_task; 92static struct task_struct *stats_task;
85static struct task_struct *shuffler_task; 93static struct task_struct *shuffler_task;
@@ -104,11 +112,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
104static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = 112static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
105 { 0 }; 113 { 0 };
106static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; 114static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
107atomic_t n_rcu_torture_alloc; 115static atomic_t n_rcu_torture_alloc;
108atomic_t n_rcu_torture_alloc_fail; 116static atomic_t n_rcu_torture_alloc_fail;
109atomic_t n_rcu_torture_free; 117static atomic_t n_rcu_torture_free;
110atomic_t n_rcu_torture_mberror; 118static atomic_t n_rcu_torture_mberror;
111atomic_t n_rcu_torture_error; 119static atomic_t n_rcu_torture_error;
120static struct list_head rcu_torture_removed;
112 121
113/* 122/*
114 * Allocate an element from the rcu_tortures pool. 123 * Allocate an element from the rcu_tortures pool.
@@ -145,7 +154,7 @@ rcu_torture_free(struct rcu_torture *p)
145 154
146struct rcu_random_state { 155struct rcu_random_state {
147 unsigned long rrs_state; 156 unsigned long rrs_state;
148 unsigned long rrs_count; 157 long rrs_count;
149}; 158};
150 159
151#define RCU_RANDOM_MULT 39916801 /* prime */ 160#define RCU_RANDOM_MULT 39916801 /* prime */
@@ -158,7 +167,7 @@ struct rcu_random_state {
158 * Crude but fast random-number generator. Uses a linear congruential 167 * Crude but fast random-number generator. Uses a linear congruential
159 * generator, with occasional help from get_random_bytes(). 168 * generator, with occasional help from get_random_bytes().
160 */ 169 */
161static long 170static unsigned long
162rcu_random(struct rcu_random_state *rrsp) 171rcu_random(struct rcu_random_state *rrsp)
163{ 172{
164 long refresh; 173 long refresh;
@@ -180,9 +189,11 @@ struct rcu_torture_ops {
180 void (*init)(void); 189 void (*init)(void);
181 void (*cleanup)(void); 190 void (*cleanup)(void);
182 int (*readlock)(void); 191 int (*readlock)(void);
192 void (*readdelay)(struct rcu_random_state *rrsp);
183 void (*readunlock)(int idx); 193 void (*readunlock)(int idx);
184 int (*completed)(void); 194 int (*completed)(void);
185 void (*deferredfree)(struct rcu_torture *p); 195 void (*deferredfree)(struct rcu_torture *p);
196 void (*sync)(void);
186 int (*stats)(char *page); 197 int (*stats)(char *page);
187 char *name; 198 char *name;
188}; 199};
@@ -198,6 +209,18 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
198 return 0; 209 return 0;
199} 210}
200 211
212static void rcu_read_delay(struct rcu_random_state *rrsp)
213{
214 long delay;
215 const long longdelay = 200;
216
217 /* We want there to be long-running readers, but not all the time. */
218
219 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
220 if (!delay)
221 udelay(longdelay);
222}
223
201static void rcu_torture_read_unlock(int idx) __releases(RCU) 224static void rcu_torture_read_unlock(int idx) __releases(RCU)
202{ 225{
203 rcu_read_unlock(); 226 rcu_read_unlock();
@@ -239,13 +262,54 @@ static struct rcu_torture_ops rcu_ops = {
239 .init = NULL, 262 .init = NULL,
240 .cleanup = NULL, 263 .cleanup = NULL,
241 .readlock = rcu_torture_read_lock, 264 .readlock = rcu_torture_read_lock,
265 .readdelay = rcu_read_delay,
242 .readunlock = rcu_torture_read_unlock, 266 .readunlock = rcu_torture_read_unlock,
243 .completed = rcu_torture_completed, 267 .completed = rcu_torture_completed,
244 .deferredfree = rcu_torture_deferred_free, 268 .deferredfree = rcu_torture_deferred_free,
269 .sync = synchronize_rcu,
245 .stats = NULL, 270 .stats = NULL,
246 .name = "rcu" 271 .name = "rcu"
247}; 272};
248 273
274static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
275{
276 int i;
277 struct rcu_torture *rp;
278 struct rcu_torture *rp1;
279
280 cur_ops->sync();
281 list_add(&p->rtort_free, &rcu_torture_removed);
282 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
283 i = rp->rtort_pipe_count;
284 if (i > RCU_TORTURE_PIPE_LEN)
285 i = RCU_TORTURE_PIPE_LEN;
286 atomic_inc(&rcu_torture_wcount[i]);
287 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
288 rp->rtort_mbtest = 0;
289 list_del(&rp->rtort_free);
290 rcu_torture_free(rp);
291 }
292 }
293}
294
295static void rcu_sync_torture_init(void)
296{
297 INIT_LIST_HEAD(&rcu_torture_removed);
298}
299
300static struct rcu_torture_ops rcu_sync_ops = {
301 .init = rcu_sync_torture_init,
302 .cleanup = NULL,
303 .readlock = rcu_torture_read_lock,
304 .readdelay = rcu_read_delay,
305 .readunlock = rcu_torture_read_unlock,
306 .completed = rcu_torture_completed,
307 .deferredfree = rcu_sync_torture_deferred_free,
308 .sync = synchronize_rcu,
309 .stats = NULL,
310 .name = "rcu_sync"
311};
312
249/* 313/*
250 * Definitions for rcu_bh torture testing. 314 * Definitions for rcu_bh torture testing.
251 */ 315 */
@@ -271,19 +335,176 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
271 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 335 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
272} 336}
273 337
338struct rcu_bh_torture_synchronize {
339 struct rcu_head head;
340 struct completion completion;
341};
342
343static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
344{
345 struct rcu_bh_torture_synchronize *rcu;
346
347 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
348 complete(&rcu->completion);
349}
350
351static void rcu_bh_torture_synchronize(void)
352{
353 struct rcu_bh_torture_synchronize rcu;
354
355 init_completion(&rcu.completion);
356 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
357 wait_for_completion(&rcu.completion);
358}
359
274static struct rcu_torture_ops rcu_bh_ops = { 360static struct rcu_torture_ops rcu_bh_ops = {
275 .init = NULL, 361 .init = NULL,
276 .cleanup = NULL, 362 .cleanup = NULL,
277 .readlock = rcu_bh_torture_read_lock, 363 .readlock = rcu_bh_torture_read_lock,
364 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
278 .readunlock = rcu_bh_torture_read_unlock, 365 .readunlock = rcu_bh_torture_read_unlock,
279 .completed = rcu_bh_torture_completed, 366 .completed = rcu_bh_torture_completed,
280 .deferredfree = rcu_bh_torture_deferred_free, 367 .deferredfree = rcu_bh_torture_deferred_free,
368 .sync = rcu_bh_torture_synchronize,
281 .stats = NULL, 369 .stats = NULL,
282 .name = "rcu_bh" 370 .name = "rcu_bh"
283}; 371};
284 372
373static struct rcu_torture_ops rcu_bh_sync_ops = {
374 .init = rcu_sync_torture_init,
375 .cleanup = NULL,
376 .readlock = rcu_bh_torture_read_lock,
377 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
378 .readunlock = rcu_bh_torture_read_unlock,
379 .completed = rcu_bh_torture_completed,
380 .deferredfree = rcu_sync_torture_deferred_free,
381 .sync = rcu_bh_torture_synchronize,
382 .stats = NULL,
383 .name = "rcu_bh_sync"
384};
385
386/*
387 * Definitions for srcu torture testing.
388 */
389
390static struct srcu_struct srcu_ctl;
391
392static void srcu_torture_init(void)
393{
394 init_srcu_struct(&srcu_ctl);
395 rcu_sync_torture_init();
396}
397
398static void srcu_torture_cleanup(void)
399{
400 synchronize_srcu(&srcu_ctl);
401 cleanup_srcu_struct(&srcu_ctl);
402}
403
404static int srcu_torture_read_lock(void)
405{
406 return srcu_read_lock(&srcu_ctl);
407}
408
409static void srcu_read_delay(struct rcu_random_state *rrsp)
410{
411 long delay;
412 const long uspertick = 1000000 / HZ;
413 const long longdelay = 10;
414
415 /* We want there to be long-running readers, but not all the time. */
416
417 delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
418 if (!delay)
419 schedule_timeout_interruptible(longdelay);
420}
421
422static void srcu_torture_read_unlock(int idx)
423{
424 srcu_read_unlock(&srcu_ctl, idx);
425}
426
427static int srcu_torture_completed(void)
428{
429 return srcu_batches_completed(&srcu_ctl);
430}
431
432static void srcu_torture_synchronize(void)
433{
434 synchronize_srcu(&srcu_ctl);
435}
436
437static int srcu_torture_stats(char *page)
438{
439 int cnt = 0;
440 int cpu;
441 int idx = srcu_ctl.completed & 0x1;
442
443 cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
444 torture_type, TORTURE_FLAG, idx);
445 for_each_possible_cpu(cpu) {
446 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
447 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
448 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
449 }
450 cnt += sprintf(&page[cnt], "\n");
451 return cnt;
452}
453
454static struct rcu_torture_ops srcu_ops = {
455 .init = srcu_torture_init,
456 .cleanup = srcu_torture_cleanup,
457 .readlock = srcu_torture_read_lock,
458 .readdelay = srcu_read_delay,
459 .readunlock = srcu_torture_read_unlock,
460 .completed = srcu_torture_completed,
461 .deferredfree = rcu_sync_torture_deferred_free,
462 .sync = srcu_torture_synchronize,
463 .stats = srcu_torture_stats,
464 .name = "srcu"
465};
466
467/*
468 * Definitions for sched torture testing.
469 */
470
471static int sched_torture_read_lock(void)
472{
473 preempt_disable();
474 return 0;
475}
476
477static void sched_torture_read_unlock(int idx)
478{
479 preempt_enable();
480}
481
482static int sched_torture_completed(void)
483{
484 return 0;
485}
486
487static void sched_torture_synchronize(void)
488{
489 synchronize_sched();
490}
491
492static struct rcu_torture_ops sched_ops = {
493 .init = rcu_sync_torture_init,
494 .cleanup = NULL,
495 .readlock = sched_torture_read_lock,
496 .readdelay = rcu_read_delay, /* just reuse rcu's version. */
497 .readunlock = sched_torture_read_unlock,
498 .completed = sched_torture_completed,
499 .deferredfree = rcu_sync_torture_deferred_free,
500 .sync = sched_torture_synchronize,
501 .stats = NULL,
502 .name = "sched"
503};
504
285static struct rcu_torture_ops *torture_ops[] = 505static struct rcu_torture_ops *torture_ops[] =
286 { &rcu_ops, &rcu_bh_ops, NULL }; 506 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &srcu_ops,
507 &sched_ops, NULL };
287 508
288/* 509/*
289 * RCU torture writer kthread. Repeatedly substitutes a new structure 510 * RCU torture writer kthread. Repeatedly substitutes a new structure
@@ -330,6 +551,30 @@ rcu_torture_writer(void *arg)
330} 551}
331 552
332/* 553/*
554 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
555 * delay between calls.
556 */
557static int
558rcu_torture_fakewriter(void *arg)
559{
560 DEFINE_RCU_RANDOM(rand);
561
562 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
563 set_user_nice(current, 19);
564
565 do {
566 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
567 udelay(rcu_random(&rand) & 0x3ff);
568 cur_ops->sync();
569 } while (!kthread_should_stop() && !fullstop);
570
571 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
572 while (!kthread_should_stop())
573 schedule_timeout_uninterruptible(1);
574 return 0;
575}
576
577/*
333 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current, 578 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
334 * incrementing the corresponding element of the pipeline array. The 579 * incrementing the corresponding element of the pipeline array. The
335 * counter in the element should never be greater than 1, otherwise, the 580 * counter in the element should never be greater than 1, otherwise, the
@@ -359,7 +604,7 @@ rcu_torture_reader(void *arg)
359 } 604 }
360 if (p->rtort_mbtest == 0) 605 if (p->rtort_mbtest == 0)
361 atomic_inc(&n_rcu_torture_mberror); 606 atomic_inc(&n_rcu_torture_mberror);
362 udelay(rcu_random(&rand) & 0x7f); 607 cur_ops->readdelay(&rand);
363 preempt_disable(); 608 preempt_disable();
364 pipe_count = p->rtort_pipe_count; 609 pipe_count = p->rtort_pipe_count;
365 if (pipe_count > RCU_TORTURE_PIPE_LEN) { 610 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
@@ -483,7 +728,7 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
483/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case 728/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
484 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. 729 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
485 */ 730 */
486void rcu_torture_shuffle_tasks(void) 731static void rcu_torture_shuffle_tasks(void)
487{ 732{
488 cpumask_t tmp_mask = CPU_MASK_ALL; 733 cpumask_t tmp_mask = CPU_MASK_ALL;
489 int i; 734 int i;
@@ -507,6 +752,12 @@ void rcu_torture_shuffle_tasks(void)
507 set_cpus_allowed(reader_tasks[i], tmp_mask); 752 set_cpus_allowed(reader_tasks[i], tmp_mask);
508 } 753 }
509 754
755 if (fakewriter_tasks != NULL) {
756 for (i = 0; i < nfakewriters; i++)
757 if (fakewriter_tasks[i])
758 set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
759 }
760
510 if (writer_task) 761 if (writer_task)
511 set_cpus_allowed(writer_task, tmp_mask); 762 set_cpus_allowed(writer_task, tmp_mask);
512 763
@@ -540,11 +791,12 @@ rcu_torture_shuffle(void *arg)
540static inline void 791static inline void
541rcu_torture_print_module_parms(char *tag) 792rcu_torture_print_module_parms(char *tag)
542{ 793{
543 printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d " 794 printk(KERN_ALERT "%s" TORTURE_FLAG
795 "--- %s: nreaders=%d nfakewriters=%d "
544 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 796 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
545 "shuffle_interval = %d\n", 797 "shuffle_interval = %d\n",
546 torture_type, tag, nrealreaders, stat_interval, verbose, 798 torture_type, tag, nrealreaders, nfakewriters,
547 test_no_idle_hz, shuffle_interval); 799 stat_interval, verbose, test_no_idle_hz, shuffle_interval);
548} 800}
549 801
550static void 802static void
@@ -579,6 +831,19 @@ rcu_torture_cleanup(void)
579 } 831 }
580 rcu_torture_current = NULL; 832 rcu_torture_current = NULL;
581 833
834 if (fakewriter_tasks != NULL) {
835 for (i = 0; i < nfakewriters; i++) {
836 if (fakewriter_tasks[i] != NULL) {
837 VERBOSE_PRINTK_STRING(
838 "Stopping rcu_torture_fakewriter task");
839 kthread_stop(fakewriter_tasks[i]);
840 }
841 fakewriter_tasks[i] = NULL;
842 }
843 kfree(fakewriter_tasks);
844 fakewriter_tasks = NULL;
845 }
846
582 if (stats_task != NULL) { 847 if (stats_task != NULL) {
583 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); 848 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
584 kthread_stop(stats_task); 849 kthread_stop(stats_task);
@@ -666,7 +931,25 @@ rcu_torture_init(void)
666 writer_task = NULL; 931 writer_task = NULL;
667 goto unwind; 932 goto unwind;
668 } 933 }
669 reader_tasks = kmalloc(nrealreaders * sizeof(reader_tasks[0]), 934 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
935 GFP_KERNEL);
936 if (fakewriter_tasks == NULL) {
937 VERBOSE_PRINTK_ERRSTRING("out of memory");
938 firsterr = -ENOMEM;
939 goto unwind;
940 }
941 for (i = 0; i < nfakewriters; i++) {
942 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
943 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
944 "rcu_torture_fakewriter");
945 if (IS_ERR(fakewriter_tasks[i])) {
946 firsterr = PTR_ERR(fakewriter_tasks[i]);
947 VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
948 fakewriter_tasks[i] = NULL;
949 goto unwind;
950 }
951 }
952 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
670 GFP_KERNEL); 953 GFP_KERNEL);
671 if (reader_tasks == NULL) { 954 if (reader_tasks == NULL) {
672 VERBOSE_PRINTK_ERRSTRING("out of memory"); 955 VERBOSE_PRINTK_ERRSTRING("out of memory");
diff --git a/kernel/srcu.c b/kernel/srcu.c
new file mode 100644
index 000000000000..3507cabe963b
--- /dev/null
+++ b/kernel/srcu.c
@@ -0,0 +1,258 @@
1/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Paul McKenney <paulmck@us.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU/ *.txt
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/percpu.h>
30#include <linux/preempt.h>
31#include <linux/rcupdate.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/smp.h>
35#include <linux/srcu.h>
36
37/**
38 * init_srcu_struct - initialize a sleep-RCU structure
39 * @sp: structure to initialize.
40 *
41 * Must invoke this on a given srcu_struct before passing that srcu_struct
42 * to any other function. Each srcu_struct represents a separate domain
43 * of SRCU protection.
44 */
45int init_srcu_struct(struct srcu_struct *sp)
46{
47 sp->completed = 0;
48 mutex_init(&sp->mutex);
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51}
52
53/*
54 * srcu_readers_active_idx -- returns approximate number of readers
55 * active on the specified rank of per-CPU counters.
56 */
57
58static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
59{
60 int cpu;
61 int sum;
62
63 sum = 0;
64 for_each_possible_cpu(cpu)
65 sum += per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx];
66 return sum;
67}
68
69/**
70 * srcu_readers_active - returns approximate number of readers.
71 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
72 *
73 * Note that this is not an atomic primitive, and can therefore suffer
74 * severe errors when invoked on an active srcu_struct. That said, it
75 * can be useful as an error check at cleanup time.
76 */
77int srcu_readers_active(struct srcu_struct *sp)
78{
79 return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
80}
81
82/**
83 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
84 * @sp: structure to clean up.
85 *
86 * Must invoke this after you are finished using a given srcu_struct that
87 * was initialized via init_srcu_struct(), else you leak memory.
88 */
89void cleanup_srcu_struct(struct srcu_struct *sp)
90{
91 int sum;
92
93 sum = srcu_readers_active(sp);
94 WARN_ON(sum); /* Leakage unless caller handles error. */
95 if (sum != 0)
96 return;
97 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL;
99}
100
101/**
102 * srcu_read_lock - register a new reader for an SRCU-protected structure.
103 * @sp: srcu_struct in which to register the new reader.
104 *
105 * Counts the new reader in the appropriate per-CPU element of the
106 * srcu_struct. Must be called from process context.
107 * Returns an index that must be passed to the matching srcu_read_unlock().
108 */
109int srcu_read_lock(struct srcu_struct *sp)
110{
111 int idx;
112
113 preempt_disable();
114 idx = sp->completed & 0x1;
115 barrier(); /* ensure compiler looks -once- at sp->completed. */
116 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]++;
117 srcu_barrier(); /* ensure compiler won't misorder critical section. */
118 preempt_enable();
119 return idx;
120}
121
122/**
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
124 * @sp: srcu_struct in which to unregister the old reader.
125 * @idx: return value from corresponding srcu_read_lock().
126 *
127 * Removes the count for the old reader from the appropriate per-CPU
128 * element of the srcu_struct. Note that this may well be a different
129 * CPU than that which was incremented by the corresponding srcu_read_lock().
130 * Must be called from process context.
131 */
132void srcu_read_unlock(struct srcu_struct *sp, int idx)
133{
134 preempt_disable();
135 srcu_barrier(); /* ensure compiler won't misorder critical section. */
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable();
138}
139
140/**
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */
154void synchronize_srcu(struct srcu_struct *sp)
155{
156 int idx;
157
158 idx = sp->completed;
159 mutex_lock(&sp->mutex);
160
161 /*
162 * Check to see if someone else did the work for us while we were
163 * waiting to acquire the lock. We need -two- advances of
164 * the counter, not just one. If there was but one, we might have
165 * shown up -after- our helper's first synchronize_sched(), thus
166 * having failed to prevent CPU-reordering races with concurrent
167 * srcu_read_unlock()s on other CPUs (see comment below). So we
168 * either (1) wait for two or (2) supply the second ourselves.
169 */
170
171 if ((sp->completed - idx) >= 2) {
172 mutex_unlock(&sp->mutex);
173 return;
174 }
175
176 synchronize_sched(); /* Force memory barrier on all CPUs. */
177
178 /*
179 * The preceding synchronize_sched() ensures that any CPU that
180 * sees the new value of sp->completed will also see any preceding
181 * changes to data structures made by this CPU. This prevents
182 * some other CPU from reordering the accesses in its SRCU
183 * read-side critical section to precede the corresponding
184 * srcu_read_lock() -- ensuring that such references will in
185 * fact be protected.
186 *
187 * So it is now safe to do the flip.
188 */
189
190 idx = sp->completed & 0x1;
191 sp->completed++;
192
193 synchronize_sched(); /* Force memory barrier on all CPUs. */
194
195 /*
196 * At this point, because of the preceding synchronize_sched(),
197 * all srcu_read_lock() calls using the old counters have completed.
198 * Their corresponding critical sections might well be still
199 * executing, but the srcu_read_lock() primitives themselves
200 * will have finished executing.
201 */
202
203 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1);
205
206 synchronize_sched(); /* Force memory barrier on all CPUs. */
207
208 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock()
210 * primitives that were executing concurrently with the preceding
211 * for_each_possible_cpu() loop to have completed by this point.
212 * More importantly, it also forces the corresponding SRCU read-side
213 * critical sections to have also completed, and the corresponding
214 * references to SRCU-protected data items to be dropped.
215 *
216 * Note:
217 *
218 * Despite what you might think at first glance, the
219 * preceding synchronize_sched() -must- be within the
220 * critical section ended by the following mutex_unlock().
221 * Otherwise, a task taking the early exit can race
222 * with a srcu_read_unlock(), which might have executed
223 * just before the preceding srcu_readers_active() check,
224 * and whose CPU might have reordered the srcu_read_unlock()
225 * with the preceding critical section. In this case, there
226 * is nothing preventing the synchronize_sched() task that is
227 * taking the early exit from freeing a data structure that
228 * is still being referenced (out of order) by the task
229 * doing the srcu_read_unlock().
230 *
231 * Alternatively, the comparison with "2" on the early exit
232 * could be changed to "3", but this increases synchronize_srcu()
233 * latency for bulk loads. So the current code is preferred.
234 */
235
236 mutex_unlock(&sp->mutex);
237}
238
239/**
240 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion.
242 *
243 * Report the number of batches, correlated with, but not necessarily
244 * precisely the same as, the number of grace periods that have elapsed.
245 */
246
247long srcu_batches_completed(struct srcu_struct *sp)
248{
249 return sp->completed;
250}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed);
258EXPORT_SYMBOL_GPL(srcu_readers_active);
diff --git a/kernel/sys.c b/kernel/sys.c
index 2314867ae34f..98489d82801b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -153,7 +153,7 @@ static int __kprobes notifier_call_chain(struct notifier_block **nl,
153 153
154/* 154/*
155 * Atomic notifier chain routines. Registration and unregistration 155 * Atomic notifier chain routines. Registration and unregistration
156 * use a mutex, and call_chain is synchronized by RCU (no locks). 156 * use a spinlock, and call_chain is synchronized by RCU (no locks).
157 */ 157 */
158 158
159/** 159/**
@@ -401,6 +401,129 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
401 401
402EXPORT_SYMBOL_GPL(raw_notifier_call_chain); 402EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
403 403
404/*
405 * SRCU notifier chain routines. Registration and unregistration
406 * use a mutex, and call_chain is synchronized by SRCU (no locks).
407 */
408
409/**
410 * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
411 * @nh: Pointer to head of the SRCU notifier chain
412 * @n: New entry in notifier chain
413 *
414 * Adds a notifier to an SRCU notifier chain.
415 * Must be called in process context.
416 *
417 * Currently always returns zero.
418 */
419
420int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
421 struct notifier_block *n)
422{
423 int ret;
424
425 /*
426 * This code gets used during boot-up, when task switching is
427 * not yet working and interrupts must remain disabled. At
428 * such times we must not call mutex_lock().
429 */
430 if (unlikely(system_state == SYSTEM_BOOTING))
431 return notifier_chain_register(&nh->head, n);
432
433 mutex_lock(&nh->mutex);
434 ret = notifier_chain_register(&nh->head, n);
435 mutex_unlock(&nh->mutex);
436 return ret;
437}
438
439EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
440
441/**
442 * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
443 * @nh: Pointer to head of the SRCU notifier chain
444 * @n: Entry to remove from notifier chain
445 *
446 * Removes a notifier from an SRCU notifier chain.
447 * Must be called from process context.
448 *
449 * Returns zero on success or %-ENOENT on failure.
450 */
451int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
452 struct notifier_block *n)
453{
454 int ret;
455
456 /*
457 * This code gets used during boot-up, when task switching is
458 * not yet working and interrupts must remain disabled. At
459 * such times we must not call mutex_lock().
460 */
461 if (unlikely(system_state == SYSTEM_BOOTING))
462 return notifier_chain_unregister(&nh->head, n);
463
464 mutex_lock(&nh->mutex);
465 ret = notifier_chain_unregister(&nh->head, n);
466 mutex_unlock(&nh->mutex);
467 synchronize_srcu(&nh->srcu);
468 return ret;
469}
470
471EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
472
473/**
474 * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
475 * @nh: Pointer to head of the SRCU notifier chain
476 * @val: Value passed unmodified to notifier function
477 * @v: Pointer passed unmodified to notifier function
478 *
479 * Calls each function in a notifier chain in turn. The functions
480 * run in a process context, so they are allowed to block.
481 *
482 * If the return value of the notifier can be and'ed
483 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain
484 * will return immediately, with the return value of
485 * the notifier function which halted execution.
486 * Otherwise the return value is the return value
487 * of the last notifier function called.
488 */
489
490int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
491 unsigned long val, void *v)
492{
493 int ret;
494 int idx;
495
496 idx = srcu_read_lock(&nh->srcu);
497 ret = notifier_call_chain(&nh->head, val, v);
498 srcu_read_unlock(&nh->srcu, idx);
499 return ret;
500}
501
502EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
503
504/**
505 * srcu_init_notifier_head - Initialize an SRCU notifier head
506 * @nh: Pointer to head of the srcu notifier chain
507 *
508 * Unlike other sorts of notifier heads, SRCU notifier heads require
509 * dynamic initialization. Be sure to call this routine before
510 * calling any of the other SRCU notifier routines for this head.
511 *
512 * If an SRCU notifier head is deallocated, it must first be cleaned
513 * up by calling srcu_cleanup_notifier_head(). Otherwise the head's
514 * per-cpu data (used by the SRCU mechanism) will leak.
515 */
516
517void srcu_init_notifier_head(struct srcu_notifier_head *nh)
518{
519 mutex_init(&nh->mutex);
520 if (init_srcu_struct(&nh->srcu) < 0)
521 BUG();
522 nh->head = NULL;
523}
524
525EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
526
404/** 527/**
405 * register_reboot_notifier - Register function to be called at reboot time 528 * register_reboot_notifier - Register function to be called at reboot time
406 * @nb: Info about notifier function to be called 529 * @nb: Info about notifier function to be called
diff --git a/mm/filemap.c b/mm/filemap.c
index ec469235985d..3464b681f844 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1139,11 +1139,11 @@ success:
1139} 1139}
1140 1140
1141/** 1141/**
1142 * __generic_file_aio_read - generic filesystem read routine 1142 * generic_file_aio_read - generic filesystem read routine
1143 * @iocb: kernel I/O control block 1143 * @iocb: kernel I/O control block
1144 * @iov: io vector request 1144 * @iov: io vector request
1145 * @nr_segs: number of segments in the iovec 1145 * @nr_segs: number of segments in the iovec
1146 * @ppos: current file position 1146 * @pos: current file position
1147 * 1147 *
1148 * This is the "read()" routine for all filesystems 1148 * This is the "read()" routine for all filesystems
1149 * that can use the page cache directly. 1149 * that can use the page cache directly.
@@ -1198,8 +1198,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1198 if (retval > 0) 1198 if (retval > 0)
1199 *ppos = pos + retval; 1199 *ppos = pos + retval;
1200 } 1200 }
1201 file_accessed(filp); 1201 if (likely(retval != 0)) {
1202 goto out; 1202 file_accessed(filp);
1203 goto out;
1204 }
1203 } 1205 }
1204 1206
1205 retval = 0; 1207 retval = 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7c7d03dbf73d..1d709ff528e1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -364,6 +364,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
364 pte_t *ptep; 364 pte_t *ptep;
365 pte_t pte; 365 pte_t pte;
366 struct page *page; 366 struct page *page;
367 struct page *tmp;
368 LIST_HEAD(page_list);
367 369
368 WARN_ON(!is_vm_hugetlb_page(vma)); 370 WARN_ON(!is_vm_hugetlb_page(vma));
369 BUG_ON(start & ~HPAGE_MASK); 371 BUG_ON(start & ~HPAGE_MASK);
@@ -384,12 +386,16 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
384 continue; 386 continue;
385 387
386 page = pte_page(pte); 388 page = pte_page(pte);
387 put_page(page); 389 list_add(&page->lru, &page_list);
388 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); 390 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
389 } 391 }
390 392
391 spin_unlock(&mm->page_table_lock); 393 spin_unlock(&mm->page_table_lock);
392 flush_tlb_range(vma, start, end); 394 flush_tlb_range(vma, start, end);
395 list_for_each_entry_safe(page, tmp, &page_list, lru) {
396 list_del(&page->lru);
397 put_page(page);
398 }
393} 399}
394 400
395static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 401static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59d90b81e6..a8c003e7b3d5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -900,7 +900,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
900 int classzone_idx, int alloc_flags) 900 int classzone_idx, int alloc_flags)
901{ 901{
902 /* free_pages my go negative - that's OK */ 902 /* free_pages my go negative - that's OK */
903 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 903 unsigned long min = mark;
904 long free_pages = z->free_pages - (1 << order) + 1;
904 int o; 905 int o;
905 906
906 if (alloc_flags & ALLOC_HIGH) 907 if (alloc_flags & ALLOC_HIGH)
@@ -2050,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn)
2050 2051
2051/** 2052/**
2052 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2053 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
2053 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed 2054 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
2054 * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node 2055 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
2055 * 2056 *
2056 * If an architecture guarantees that all ranges registered with 2057 * If an architecture guarantees that all ranges registered with
2057 * add_active_ranges() contain no holes and may be freed, this 2058 * add_active_ranges() contain no holes and may be freed, this
@@ -2081,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid,
2081 2082
2082/** 2083/**
2083 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2084 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2084 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used 2085 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
2085 * 2086 *
2086 * If an architecture guarantees that all ranges registered with 2087 * If an architecture guarantees that all ranges registered with
2087 * add_active_ranges() contain no holes and may be freed, this 2088 * add_active_ranges() contain no holes and may be freed, this
2088 * this function may be used instead of calling memory_present() manually. 2089 * function may be used instead of calling memory_present() manually.
2089 */ 2090 */
2090void __init sparse_memory_present_with_active_regions(int nid) 2091void __init sparse_memory_present_with_active_regions(int nid)
2091{ 2092{
@@ -2155,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid,
2155 2156
2156/** 2157/**
2157 * get_pfn_range_for_nid - Return the start and end page frames for a node 2158 * get_pfn_range_for_nid - Return the start and end page frames for a node
2158 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned 2159 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
2159 * @start_pfn: Passed by reference. On return, it will have the node start_pfn 2160 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
2160 * @end_pfn: Passed by reference. On return, it will have the node end_pfn 2161 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
2161 * 2162 *
2162 * It returns the start and end page frame of a node based on information 2163 * It returns the start and end page frame of a node based on information
2163 * provided by an arch calling add_active_range(). If called for a node 2164 * provided by an arch calling add_active_range(). If called for a node
2164 * with no available memory, a warning is printed and the start and end 2165 * with no available memory, a warning is printed and the start and end
2165 * PFNs will be 0 2166 * PFNs will be 0.
2166 */ 2167 */
2167void __init get_pfn_range_for_nid(unsigned int nid, 2168void __init get_pfn_range_for_nid(unsigned int nid,
2168 unsigned long *start_pfn, unsigned long *end_pfn) 2169 unsigned long *start_pfn, unsigned long *end_pfn)
@@ -2215,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
2215 2216
2216/* 2217/*
2217 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2218 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
2218 * then all holes in the requested range will be accounted for 2219 * then all holes in the requested range will be accounted for.
2219 */ 2220 */
2220unsigned long __init __absent_pages_in_range(int nid, 2221unsigned long __init __absent_pages_in_range(int nid,
2221 unsigned long range_start_pfn, 2222 unsigned long range_start_pfn,
@@ -2268,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid,
2268 * @start_pfn: The start PFN to start searching for holes 2269 * @start_pfn: The start PFN to start searching for holes
2269 * @end_pfn: The end PFN to stop searching for holes 2270 * @end_pfn: The end PFN to stop searching for holes
2270 * 2271 *
2271 * It returns the number of pages frames in memory holes within a range 2272 * It returns the number of pages frames in memory holes within a range.
2272 */ 2273 */
2273unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2274unsigned long __init absent_pages_in_range(unsigned long start_pfn,
2274 unsigned long end_pfn) 2275 unsigned long end_pfn)
@@ -2582,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
2582 2583
2583/** 2584/**
2584 * remove_all_active_ranges - Remove all currently registered regions 2585 * remove_all_active_ranges - Remove all currently registered regions
2586 *
2585 * During discovery, it may be found that a table like SRAT is invalid 2587 * During discovery, it may be found that a table like SRAT is invalid
2586 * and an alternative discovery method must be used. This function removes 2588 * and an alternative discovery method must be used. This function removes
2587 * all currently registered regions. 2589 * all currently registered regions.
2588 */ 2590 */
2589void __init remove_all_active_ranges() 2591void __init remove_all_active_ranges(void)
2590{ 2592{
2591 memset(early_node_map, 0, sizeof(early_node_map)); 2593 memset(early_node_map, 0, sizeof(early_node_map));
2592 nr_nodemap_entries = 0; 2594 nr_nodemap_entries = 0;
@@ -2636,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
2636 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2638 * find_min_pfn_with_active_regions - Find the minimum PFN registered
2637 * 2639 *
2638 * It returns the minimum PFN based on information provided via 2640 * It returns the minimum PFN based on information provided via
2639 * add_active_range() 2641 * add_active_range().
2640 */ 2642 */
2641unsigned long __init find_min_pfn_with_active_regions(void) 2643unsigned long __init find_min_pfn_with_active_regions(void)
2642{ 2644{
@@ -2647,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
2647 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2649 * find_max_pfn_with_active_regions - Find the maximum PFN registered
2648 * 2650 *
2649 * It returns the maximum PFN based on information provided via 2651 * It returns the maximum PFN based on information provided via
2650 * add_active_range() 2652 * add_active_range().
2651 */ 2653 */
2652unsigned long __init find_max_pfn_with_active_regions(void) 2654unsigned long __init find_max_pfn_with_active_regions(void)
2653{ 2655{
@@ -2662,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void)
2662 2664
2663/** 2665/**
2664 * free_area_init_nodes - Initialise all pg_data_t and zone data 2666 * free_area_init_nodes - Initialise all pg_data_t and zone data
2665 * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA 2667 * @max_zone_pfn: an array of max PFNs for each zone
2666 * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32
2667 * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL
2668 * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM
2669 * 2668 *
2670 * This will call free_area_init_node() for each active node in the system. 2669 * This will call free_area_init_node() for each active node in the system.
2671 * Using the page ranges provided by add_active_range(), the size of each 2670 * Using the page ranges provided by add_active_range(), the size of each
@@ -2723,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
2723#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2722#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
2724 2723
2725/** 2724/**
2726 * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA 2725 * set_dma_reserve - set the specified number of pages reserved in the first zone
2727 * @new_dma_reserve - The number of pages to mark reserved 2726 * @new_dma_reserve: The number of pages to mark reserved
2728 * 2727 *
2729 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2728 * The per-cpu batchsize and zone watermarks are determined by present_pages.
2730 * In the DMA zone, a significant percentage may be consumed by kernel image 2729 * In the DMA zone, a significant percentage may be consumed by kernel image
2731 * and other unfreeable allocations which can skew the watermarks badly. This 2730 * and other unfreeable allocations which can skew the watermarks badly. This
2732 * function may optionally be used to account for unfreeable pages in 2731 * function may optionally be used to account for unfreeable pages in the
2733 * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize 2732 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
2733 * smaller per-cpu batchsize.
2734 */ 2734 */
2735void __init set_dma_reserve(unsigned long new_dma_reserve) 2735void __init set_dma_reserve(unsigned long new_dma_reserve)
2736{ 2736{
@@ -2843,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void)
2843 calculate_totalreserve_pages(); 2843 calculate_totalreserve_pages();
2844} 2844}
2845 2845
2846/* 2846/**
2847 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2847 * setup_per_zone_pages_min - called when min_free_kbytes changes.
2848 * that the pages_{min,low,high} values for each zone are set correctly 2848 *
2849 * with respect to min_free_kbytes. 2849 * Ensures that the pages_{min,low,high} values for each zone are set correctly
2850 * with respect to min_free_kbytes.
2850 */ 2851 */
2851void setup_per_zone_pages_min(void) 2852void setup_per_zone_pages_min(void)
2852{ 2853{
diff --git a/mm/readahead.c b/mm/readahead.c
index aa7ec424656a..1ba736ac0367 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -38,6 +38,7 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
38 ra->ra_pages = mapping->backing_dev_info->ra_pages; 38 ra->ra_pages = mapping->backing_dev_info->ra_pages;
39 ra->prev_page = -1; 39 ra->prev_page = -1;
40} 40}
41EXPORT_SYMBOL_GPL(file_ra_state_init);
41 42
42/* 43/*
43 * Return max readahead size for this inode in number-of-pages. 44 * Return max readahead size for this inode in number-of-pages.
diff --git a/mm/slab.c b/mm/slab.c
index f3514351aed8..e9a63b5a7fb9 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3487,22 +3487,25 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3487} 3487}
3488 3488
3489 3489
3490#ifdef CONFIG_DEBUG_SLAB
3490void *__kmalloc(size_t size, gfp_t flags) 3491void *__kmalloc(size_t size, gfp_t flags)
3491{ 3492{
3492#ifndef CONFIG_DEBUG_SLAB
3493 return __do_kmalloc(size, flags, NULL);
3494#else
3495 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3493 return __do_kmalloc(size, flags, __builtin_return_address(0));
3496#endif
3497} 3494}
3498EXPORT_SYMBOL(__kmalloc); 3495EXPORT_SYMBOL(__kmalloc);
3499 3496
3500#ifdef CONFIG_DEBUG_SLAB
3501void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) 3497void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3502{ 3498{
3503 return __do_kmalloc(size, flags, caller); 3499 return __do_kmalloc(size, flags, caller);
3504} 3500}
3505EXPORT_SYMBOL(__kmalloc_track_caller); 3501EXPORT_SYMBOL(__kmalloc_track_caller);
3502
3503#else
3504void *__kmalloc(size_t size, gfp_t flags)
3505{
3506 return __do_kmalloc(size, flags, NULL);
3507}
3508EXPORT_SYMBOL(__kmalloc);
3506#endif 3509#endif
3507 3510
3508/** 3511/**
diff --git a/mm/util.c b/mm/util.c
index e14fa84ef39a..ace2aea69f1a 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -11,7 +11,7 @@
11 */ 11 */
12void *__kzalloc(size_t size, gfp_t flags) 12void *__kzalloc(size_t size, gfp_t flags)
13{ 13{
14 void *ret = ____kmalloc(size, flags); 14 void *ret = kmalloc_track_caller(size, flags);
15 if (ret) 15 if (ret)
16 memset(ret, 0, size); 16 memset(ret, 0, size);
17 return ret; 17 return ret;
@@ -33,7 +33,7 @@ char *kstrdup(const char *s, gfp_t gfp)
33 return NULL; 33 return NULL;
34 34
35 len = strlen(s) + 1; 35 len = strlen(s) + 1;
36 buf = ____kmalloc(len, gfp); 36 buf = kmalloc_track_caller(len, gfp);
37 if (buf) 37 if (buf)
38 memcpy(buf, s, len); 38 memcpy(buf, s, len);
39 return buf; 39 return buf;
@@ -51,7 +51,7 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
51{ 51{
52 void *p; 52 void *p;
53 53
54 p = ____kmalloc(len, gfp); 54 p = kmalloc_track_caller(len, gfp);
55 if (p) 55 if (p)
56 memcpy(p, src, len); 56 memcpy(p, src, len);
57 return p; 57 return p;
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 770c0df972a3..b54306a934e5 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -22,24 +22,37 @@ static int ebt_target_mark(struct sk_buff **pskb, unsigned int hooknr,
22 const void *data, unsigned int datalen) 22 const void *data, unsigned int datalen)
23{ 23{
24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 24 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
25 int action = info->target & -16;
25 26
26 if ((*pskb)->nfmark != info->mark) 27 if (action == MARK_SET_VALUE)
27 (*pskb)->nfmark = info->mark; 28 (*pskb)->nfmark = info->mark;
29 else if (action == MARK_OR_VALUE)
30 (*pskb)->nfmark |= info->mark;
31 else if (action == MARK_AND_VALUE)
32 (*pskb)->nfmark &= info->mark;
33 else
34 (*pskb)->nfmark ^= info->mark;
28 35
29 return info->target; 36 return info->target | -16;
30} 37}
31 38
32static int ebt_target_mark_check(const char *tablename, unsigned int hookmask, 39static int ebt_target_mark_check(const char *tablename, unsigned int hookmask,
33 const struct ebt_entry *e, void *data, unsigned int datalen) 40 const struct ebt_entry *e, void *data, unsigned int datalen)
34{ 41{
35 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data; 42 struct ebt_mark_t_info *info = (struct ebt_mark_t_info *)data;
43 int tmp;
36 44
37 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info))) 45 if (datalen != EBT_ALIGN(sizeof(struct ebt_mark_t_info)))
38 return -EINVAL; 46 return -EINVAL;
39 if (BASE_CHAIN && info->target == EBT_RETURN) 47 tmp = info->target | -16;
48 if (BASE_CHAIN && tmp == EBT_RETURN)
40 return -EINVAL; 49 return -EINVAL;
41 CLEAR_BASE_CHAIN_BIT; 50 CLEAR_BASE_CHAIN_BIT;
42 if (INVALID_TARGET) 51 if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
52 return -EINVAL;
53 tmp = info->target & -16;
54 if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
55 tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
43 return -EINVAL; 56 return -EINVAL;
44 return 0; 57 return 0;
45} 58}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8ce8c471d868..b4b478353b27 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -344,12 +344,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
344{ 344{
345 struct neighbour *n; 345 struct neighbour *n;
346 int key_len = tbl->key_len; 346 int key_len = tbl->key_len;
347 u32 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask; 347 u32 hash_val = tbl->hash(pkey, dev);
348 348
349 NEIGH_CACHE_STAT_INC(tbl, lookups); 349 NEIGH_CACHE_STAT_INC(tbl, lookups);
350 350
351 read_lock_bh(&tbl->lock); 351 read_lock_bh(&tbl->lock);
352 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 352 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { 353 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
354 neigh_hold(n); 354 neigh_hold(n);
355 NEIGH_CACHE_STAT_INC(tbl, hits); 355 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -364,12 +364,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
364{ 364{
365 struct neighbour *n; 365 struct neighbour *n;
366 int key_len = tbl->key_len; 366 int key_len = tbl->key_len;
367 u32 hash_val = tbl->hash(pkey, NULL) & tbl->hash_mask; 367 u32 hash_val = tbl->hash(pkey, NULL);
368 368
369 NEIGH_CACHE_STAT_INC(tbl, lookups); 369 NEIGH_CACHE_STAT_INC(tbl, lookups);
370 370
371 read_lock_bh(&tbl->lock); 371 read_lock_bh(&tbl->lock);
372 for (n = tbl->hash_buckets[hash_val]; n; n = n->next) { 372 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
373 if (!memcmp(n->primary_key, pkey, key_len)) { 373 if (!memcmp(n->primary_key, pkey, key_len)) {
374 neigh_hold(n); 374 neigh_hold(n);
375 NEIGH_CACHE_STAT_INC(tbl, hits); 375 NEIGH_CACHE_STAT_INC(tbl, hits);
@@ -1998,12 +1998,12 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
1998 int rc, h, s_h = cb->args[1]; 1998 int rc, h, s_h = cb->args[1];
1999 int idx, s_idx = idx = cb->args[2]; 1999 int idx, s_idx = idx = cb->args[2];
2000 2000
2001 read_lock_bh(&tbl->lock);
2001 for (h = 0; h <= tbl->hash_mask; h++) { 2002 for (h = 0; h <= tbl->hash_mask; h++) {
2002 if (h < s_h) 2003 if (h < s_h)
2003 continue; 2004 continue;
2004 if (h > s_h) 2005 if (h > s_h)
2005 s_idx = 0; 2006 s_idx = 0;
2006 read_lock_bh(&tbl->lock);
2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) { 2007 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2008 if (idx < s_idx) 2008 if (idx < s_idx)
2009 continue; 2009 continue;
@@ -2016,8 +2016,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2016 goto out; 2016 goto out;
2017 } 2017 }
2018 } 2018 }
2019 read_unlock_bh(&tbl->lock);
2020 } 2019 }
2020 read_unlock_bh(&tbl->lock);
2021 rc = skb->len; 2021 rc = skb->len;
2022out: 2022out:
2023 cb->args[1] = h; 2023 cb->args[1] = h;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c448c7f6fde2..3c23760c5827 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -156,7 +156,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
156 156
157 /* Get the DATA. Size must match skb_add_mtu(). */ 157 /* Get the DATA. Size must match skb_add_mtu(). */
158 size = SKB_DATA_ALIGN(size); 158 size = SKB_DATA_ALIGN(size);
159 data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 159 data = kmalloc_track_caller(size + sizeof(struct skb_shared_info),
160 gfp_mask);
160 if (!data) 161 if (!data)
161 goto nodata; 162 goto nodata;
162 163
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index d172a9804448..5572071af735 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -434,6 +434,15 @@ config INET_XFRM_MODE_TUNNEL
434 434
435 If unsure, say Y. 435 If unsure, say Y.
436 436
437config INET_XFRM_MODE_BEET
438 tristate "IP: IPsec BEET mode"
439 default y
440 select XFRM
441 ---help---
442 Support for IPsec BEET mode.
443
444 If unsure, say Y.
445
437config INET_DIAG 446config INET_DIAG
438 tristate "INET: socket monitoring interface" 447 tristate "INET: socket monitoring interface"
439 default y 448 default y
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f66049e28aeb..15645c51520c 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_INET_AH) += ah4.o
23obj-$(CONFIG_INET_ESP) += esp4.o 23obj-$(CONFIG_INET_ESP) += esp4.o
24obj-$(CONFIG_INET_IPCOMP) += ipcomp.o 24obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
25obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o 25obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o
26obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o
26obj-$(CONFIG_INET_TUNNEL) += tunnel4.o 27obj-$(CONFIG_INET_TUNNEL) += tunnel4.o
27obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o 28obj-$(CONFIG_INET_XFRM_MODE_TRANSPORT) += xfrm4_mode_transport.o
28obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o 29obj-$(CONFIG_INET_XFRM_MODE_TUNNEL) += xfrm4_mode_tunnel.o
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 13b29360d102..b5c205b57669 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -253,7 +253,8 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
253 * as per draft-ietf-ipsec-udp-encaps-06, 253 * as per draft-ietf-ipsec-udp-encaps-06,
254 * section 3.1.2 254 * section 3.1.2
255 */ 255 */
256 if (x->props.mode == XFRM_MODE_TRANSPORT) 256 if (x->props.mode == XFRM_MODE_TRANSPORT ||
257 x->props.mode == XFRM_MODE_BEET)
257 skb->ip_summed = CHECKSUM_UNNECESSARY; 258 skb->ip_summed = CHECKSUM_UNNECESSARY;
258 } 259 }
259 260
@@ -271,17 +272,28 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
271{ 272{
272 struct esp_data *esp = x->data; 273 struct esp_data *esp = x->data;
273 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); 274 u32 blksize = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
274 275 int enclen = 0;
275 if (x->props.mode == XFRM_MODE_TUNNEL) { 276
276 mtu = ALIGN(mtu + 2, blksize); 277 switch (x->props.mode) {
277 } else { 278 case XFRM_MODE_TUNNEL:
278 /* The worst case. */ 279 mtu = ALIGN(mtu +2, blksize);
280 break;
281 default:
282 case XFRM_MODE_TRANSPORT:
283 /* The worst case */
279 mtu = ALIGN(mtu + 2, 4) + blksize - 4; 284 mtu = ALIGN(mtu + 2, 4) + blksize - 4;
285 break;
286 case XFRM_MODE_BEET:
287 /* The worst case. */
288 enclen = IPV4_BEET_PHMAXLEN;
289 mtu = ALIGN(mtu + enclen + 2, blksize);
290 break;
280 } 291 }
292
281 if (esp->conf.padlen) 293 if (esp->conf.padlen)
282 mtu = ALIGN(mtu, esp->conf.padlen); 294 mtu = ALIGN(mtu, esp->conf.padlen);
283 295
284 return mtu + x->props.header_len + esp->auth.icv_trunc_len; 296 return mtu + x->props.header_len + esp->auth.icv_trunc_len - enclen;
285} 297}
286 298
287static void esp4_err(struct sk_buff *skb, u32 info) 299static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 2017d36024d4..3839b706142e 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -206,6 +206,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) 206static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
207{ 207{
208 struct xfrm_state *t; 208 struct xfrm_state *t;
209 u8 mode = XFRM_MODE_TUNNEL;
209 210
210 t = xfrm_state_alloc(); 211 t = xfrm_state_alloc();
211 if (t == NULL) 212 if (t == NULL)
@@ -216,7 +217,9 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
216 t->id.daddr.a4 = x->id.daddr.a4; 217 t->id.daddr.a4 = x->id.daddr.a4;
217 memcpy(&t->sel, &x->sel, sizeof(t->sel)); 218 memcpy(&t->sel, &x->sel, sizeof(t->sel));
218 t->props.family = AF_INET; 219 t->props.family = AF_INET;
219 t->props.mode = XFRM_MODE_TUNNEL; 220 if (x->props.mode == XFRM_MODE_BEET)
221 mode = x->props.mode;
222 t->props.mode = mode;
220 t->props.saddr.a4 = x->props.saddr.a4; 223 t->props.saddr.a4 = x->props.saddr.a4;
221 t->props.flags = x->props.flags; 224 t->props.flags = x->props.flags;
222 225
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 6dee03935f78..1445bb47fea4 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -813,6 +813,16 @@ ip_vs_out(unsigned int hooknum, struct sk_buff **pskb,
813 skb->nh.iph->saddr = cp->vaddr; 813 skb->nh.iph->saddr = cp->vaddr;
814 ip_send_check(skb->nh.iph); 814 ip_send_check(skb->nh.iph);
815 815
816 /* For policy routing, packets originating from this
817 * machine itself may be routed differently to packets
818 * passing through. We want this packet to be routed as
819 * if it came from this machine itself. So re-compute
820 * the routing information.
821 */
822 if (ip_route_me_harder(pskb, RTN_LOCAL) != 0)
823 goto drop;
824 skb = *pskb;
825
816 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT"); 826 IP_VS_DBG_PKT(10, pp, skb, 0, "After SNAT");
817 827
818 ip_vs_out_stats(cp, skb); 828 ip_vs_out_stats(cp, skb);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 5ac15379a0cf..e2005c6810a4 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -8,7 +8,7 @@
8#include <net/ip.h> 8#include <net/ip.h>
9 9
10/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 10/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
11int ip_route_me_harder(struct sk_buff **pskb) 11int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type)
12{ 12{
13 struct iphdr *iph = (*pskb)->nh.iph; 13 struct iphdr *iph = (*pskb)->nh.iph;
14 struct rtable *rt; 14 struct rtable *rt;
@@ -16,10 +16,13 @@ int ip_route_me_harder(struct sk_buff **pskb)
16 struct dst_entry *odst; 16 struct dst_entry *odst;
17 unsigned int hh_len; 17 unsigned int hh_len;
18 18
19 if (addr_type == RTN_UNSPEC)
20 addr_type = inet_addr_type(iph->saddr);
21
19 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 22 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
20 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. 23 * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook.
21 */ 24 */
22 if (inet_addr_type(iph->saddr) == RTN_LOCAL) { 25 if (addr_type == RTN_LOCAL) {
23 fl.nl_u.ip4_u.daddr = iph->daddr; 26 fl.nl_u.ip4_u.daddr = iph->daddr;
24 fl.nl_u.ip4_u.saddr = iph->saddr; 27 fl.nl_u.ip4_u.saddr = iph->saddr;
25 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); 28 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
@@ -156,7 +159,7 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info)
156 if (!(iph->tos == rt_info->tos 159 if (!(iph->tos == rt_info->tos
157 && iph->daddr == rt_info->daddr 160 && iph->daddr == rt_info->daddr
158 && iph->saddr == rt_info->saddr)) 161 && iph->saddr == rt_info->saddr))
159 return ip_route_me_harder(pskb); 162 return ip_route_me_harder(pskb, RTN_UNSPEC);
160 } 163 }
161 return 0; 164 return 0;
162} 165}
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 021395b67463..d85d2de50449 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -265,7 +265,8 @@ ip_nat_local_fn(unsigned int hooknum,
265 ct->tuplehash[!dir].tuple.src.u.all 265 ct->tuplehash[!dir].tuple.src.u.all
266#endif 266#endif
267 ) 267 )
268 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 268 if (ip_route_me_harder(pskb, RTN_UNSPEC))
269 ret = NF_DROP;
269 } 270 }
270 return ret; 271 return ret;
271} 272}
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index fd0c05efed8a..ad0312d0e4fd 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -38,76 +38,16 @@ MODULE_DESCRIPTION("iptables REJECT target module");
38#define DEBUGP(format, args...) 38#define DEBUGP(format, args...)
39#endif 39#endif
40 40
41static inline struct rtable *route_reverse(struct sk_buff *skb,
42 struct tcphdr *tcph, int hook)
43{
44 struct iphdr *iph = skb->nh.iph;
45 struct dst_entry *odst;
46 struct flowi fl = {};
47 struct rtable *rt;
48
49 /* We don't require ip forwarding to be enabled to be able to
50 * send a RST reply for bridged traffic. */
51 if (hook != NF_IP_FORWARD
52#ifdef CONFIG_BRIDGE_NETFILTER
53 || (skb->nf_bridge && skb->nf_bridge->mask & BRNF_BRIDGED)
54#endif
55 ) {
56 fl.nl_u.ip4_u.daddr = iph->saddr;
57 if (hook == NF_IP_LOCAL_IN)
58 fl.nl_u.ip4_u.saddr = iph->daddr;
59 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
60
61 if (ip_route_output_key(&rt, &fl) != 0)
62 return NULL;
63 } else {
64 /* non-local src, find valid iif to satisfy
65 * rp-filter when calling ip_route_input. */
66 fl.nl_u.ip4_u.daddr = iph->daddr;
67 if (ip_route_output_key(&rt, &fl) != 0)
68 return NULL;
69
70 odst = skb->dst;
71 if (ip_route_input(skb, iph->saddr, iph->daddr,
72 RT_TOS(iph->tos), rt->u.dst.dev) != 0) {
73 dst_release(&rt->u.dst);
74 return NULL;
75 }
76 dst_release(&rt->u.dst);
77 rt = (struct rtable *)skb->dst;
78 skb->dst = odst;
79
80 fl.nl_u.ip4_u.daddr = iph->saddr;
81 fl.nl_u.ip4_u.saddr = iph->daddr;
82 fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
83 }
84
85 if (rt->u.dst.error) {
86 dst_release(&rt->u.dst);
87 return NULL;
88 }
89
90 fl.proto = IPPROTO_TCP;
91 fl.fl_ip_sport = tcph->dest;
92 fl.fl_ip_dport = tcph->source;
93 security_skb_classify_flow(skb, &fl);
94
95 xfrm_lookup((struct dst_entry **)&rt, &fl, NULL, 0);
96
97 return rt;
98}
99
100/* Send RST reply */ 41/* Send RST reply */
101static void send_reset(struct sk_buff *oldskb, int hook) 42static void send_reset(struct sk_buff *oldskb, int hook)
102{ 43{
103 struct sk_buff *nskb; 44 struct sk_buff *nskb;
104 struct iphdr *iph = oldskb->nh.iph; 45 struct iphdr *iph = oldskb->nh.iph;
105 struct tcphdr _otcph, *oth, *tcph; 46 struct tcphdr _otcph, *oth, *tcph;
106 struct rtable *rt;
107 __be16 tmp_port; 47 __be16 tmp_port;
108 __be32 tmp_addr; 48 __be32 tmp_addr;
109 int needs_ack; 49 int needs_ack;
110 int hh_len; 50 unsigned int addr_type;
111 51
112 /* IP header checks: fragment. */ 52 /* IP header checks: fragment. */
113 if (oldskb->nh.iph->frag_off & htons(IP_OFFSET)) 53 if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
@@ -126,23 +66,13 @@ static void send_reset(struct sk_buff *oldskb, int hook)
126 if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP)) 66 if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP))
127 return; 67 return;
128 68
129 if ((rt = route_reverse(oldskb, oth, hook)) == NULL)
130 return;
131
132 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
133
134 /* We need a linear, writeable skb. We also need to expand 69 /* We need a linear, writeable skb. We also need to expand
135 headroom in case hh_len of incoming interface < hh_len of 70 headroom in case hh_len of incoming interface < hh_len of
136 outgoing interface */ 71 outgoing interface */
137 nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb), 72 nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb),
138 GFP_ATOMIC); 73 GFP_ATOMIC);
139 if (!nskb) { 74 if (!nskb)
140 dst_release(&rt->u.dst);
141 return; 75 return;
142 }
143
144 dst_release(nskb->dst);
145 nskb->dst = &rt->u.dst;
146 76
147 /* This packet will not be the same as the other: clear nf fields */ 77 /* This packet will not be the same as the other: clear nf fields */
148 nf_reset(nskb); 78 nf_reset(nskb);
@@ -184,6 +114,21 @@ static void send_reset(struct sk_buff *oldskb, int hook)
184 tcph->window = 0; 114 tcph->window = 0;
185 tcph->urg_ptr = 0; 115 tcph->urg_ptr = 0;
186 116
117 /* Set DF, id = 0 */
118 nskb->nh.iph->frag_off = htons(IP_DF);
119 nskb->nh.iph->id = 0;
120
121 addr_type = RTN_UNSPEC;
122 if (hook != NF_IP_FORWARD
123#ifdef CONFIG_BRIDGE_NETFILTER
124 || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
125#endif
126 )
127 addr_type = RTN_LOCAL;
128
129 if (ip_route_me_harder(&nskb, addr_type))
130 goto free_nskb;
131
187 /* Adjust TCP checksum */ 132 /* Adjust TCP checksum */
188 nskb->ip_summed = CHECKSUM_NONE; 133 nskb->ip_summed = CHECKSUM_NONE;
189 tcph->check = 0; 134 tcph->check = 0;
@@ -192,12 +137,8 @@ static void send_reset(struct sk_buff *oldskb, int hook)
192 nskb->nh.iph->daddr, 137 nskb->nh.iph->daddr,
193 csum_partial((char *)tcph, 138 csum_partial((char *)tcph,
194 sizeof(struct tcphdr), 0)); 139 sizeof(struct tcphdr), 0));
195 140 /* Adjust IP TTL */
196 /* Adjust IP TTL, DF */
197 nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); 141 nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
198 /* Set DF, id = 0 */
199 nskb->nh.iph->frag_off = htons(IP_DF);
200 nskb->nh.iph->id = 0;
201 142
202 /* Adjust IP checksum */ 143 /* Adjust IP checksum */
203 nskb->nh.iph->check = 0; 144 nskb->nh.iph->check = 0;
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index e62ea2bb9c0a..b91f3582359b 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -157,7 +157,8 @@ ipt_local_hook(unsigned int hook,
157 || (*pskb)->nfmark != nfmark 157 || (*pskb)->nfmark != nfmark
158#endif 158#endif
159 || (*pskb)->nh.iph->tos != tos)) 159 || (*pskb)->nh.iph->tos != tos))
160 return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; 160 if (ip_route_me_harder(pskb, RTN_UNSPEC))
161 ret = NF_DROP;
161 162
162 return ret; 163 return ret;
163} 164}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3f884cea14ff..cf06accbe687 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2259,7 +2259,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2259 u32 pkts_acked = 0; 2259 u32 pkts_acked = 0;
2260 void (*rtt_sample)(struct sock *sk, u32 usrtt) 2260 void (*rtt_sample)(struct sock *sk, u32 usrtt)
2261 = icsk->icsk_ca_ops->rtt_sample; 2261 = icsk->icsk_ca_ops->rtt_sample;
2262 struct timeval tv; 2262 struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
2263 2263
2264 while ((skb = skb_peek(&sk->sk_write_queue)) && 2264 while ((skb = skb_peek(&sk->sk_write_queue)) &&
2265 skb != sk->sk_send_head) { 2265 skb != sk->sk_send_head) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 6d6142f9c478..865d75214a9a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -675,6 +675,8 @@ do_append_data:
675 udp_flush_pending_frames(sk); 675 udp_flush_pending_frames(sk);
676 else if (!corkreq) 676 else if (!corkreq)
677 err = udp_push_pending_frames(sk, up); 677 err = udp_push_pending_frames(sk, up);
678 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
679 up->pending = 0;
678 release_sock(sk); 680 release_sock(sk);
679 681
680out: 682out:
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
new file mode 100644
index 000000000000..89cf59ea7bbe
--- /dev/null
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -0,0 +1,139 @@
1/*
2 * xfrm4_mode_beet.c - BEET mode encapsulation for IPv4.
3 *
4 * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com>
5 * Miika Komu <miika@iki.fi>
6 * Herbert Xu <herbert@gondor.apana.org.au>
7 * Abhinav Pathak <abhinav.pathak@hiit.fi>
8 * Jeff Ahrenholz <ahrenholz@gmail.com>
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/stringify.h>
16#include <net/dst.h>
17#include <net/ip.h>
18#include <net/xfrm.h>
19
20/* Add encapsulation header.
21 *
22 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
23 * The following fields in it shall be filled in by x->type->output:
24 * tot_len
25 * check
26 *
27 * On exit, skb->h will be set to the start of the payload to be processed
28 * by x->type->output and skb->nh will be set to the top IP header.
29 */
30static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb)
31{
32 struct iphdr *iph, *top_iph = NULL;
33 int hdrlen, optlen;
34
35 iph = skb->nh.iph;
36 skb->h.ipiph = iph;
37
38 hdrlen = 0;
39 optlen = iph->ihl * 4 - sizeof(*iph);
40 if (unlikely(optlen))
41 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
42
43 skb->nh.raw = skb_push(skb, x->props.header_len + hdrlen);
44 top_iph = skb->nh.iph;
45 hdrlen = iph->ihl * 4 - optlen;
46 skb->h.raw += hdrlen;
47
48 memmove(top_iph, iph, hdrlen);
49 if (unlikely(optlen)) {
50 struct ip_beet_phdr *ph;
51
52 BUG_ON(optlen < 0);
53
54 ph = (struct ip_beet_phdr *)skb->h.raw;
55 ph->padlen = 4 - (optlen & 4);
56 ph->hdrlen = (optlen + ph->padlen + sizeof(*ph)) / 8;
57 ph->nexthdr = top_iph->protocol;
58
59 top_iph->protocol = IPPROTO_BEETPH;
60 top_iph->ihl = sizeof(struct iphdr) / 4;
61 }
62
63 top_iph->saddr = x->props.saddr.a4;
64 top_iph->daddr = x->id.daddr.a4;
65
66 return 0;
67}
68
69static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
70{
71 struct iphdr *iph = skb->nh.iph;
72 int phlen = 0;
73 int optlen = 0;
74 __u8 ph_nexthdr = 0, protocol = 0;
75 int err = -EINVAL;
76
77 protocol = iph->protocol;
78
79 if (unlikely(iph->protocol == IPPROTO_BEETPH)) {
80 struct ip_beet_phdr *ph = (struct ip_beet_phdr*)(iph + 1);
81
82 if (!pskb_may_pull(skb, sizeof(*ph)))
83 goto out;
84
85 phlen = ph->hdrlen * 8;
86 optlen = phlen - ph->padlen - sizeof(*ph);
87 if (optlen < 0 || optlen & 3 || optlen > 250)
88 goto out;
89
90 if (!pskb_may_pull(skb, phlen))
91 goto out;
92
93 ph_nexthdr = ph->nexthdr;
94 }
95
96 skb_push(skb, sizeof(*iph) - phlen + optlen);
97 memmove(skb->data, skb->nh.raw, sizeof(*iph));
98 skb->nh.raw = skb->data;
99
100 iph = skb->nh.iph;
101 iph->ihl = (sizeof(*iph) + optlen) / 4;
102 iph->tot_len = htons(skb->len);
103 iph->daddr = x->sel.daddr.a4;
104 iph->saddr = x->sel.saddr.a4;
105 if (ph_nexthdr)
106 iph->protocol = ph_nexthdr;
107 else
108 iph->protocol = protocol;
109 iph->check = 0;
110 iph->check = ip_fast_csum(skb->nh.raw, iph->ihl);
111 err = 0;
112out:
113 return err;
114}
115
116static struct xfrm_mode xfrm4_beet_mode = {
117 .input = xfrm4_beet_input,
118 .output = xfrm4_beet_output,
119 .owner = THIS_MODULE,
120 .encap = XFRM_MODE_BEET,
121};
122
123static int __init xfrm4_beet_init(void)
124{
125 return xfrm_register_mode(&xfrm4_beet_mode, AF_INET);
126}
127
128static void __exit xfrm4_beet_exit(void)
129{
130 int err;
131
132 err = xfrm_unregister_mode(&xfrm4_beet_mode, AF_INET);
133 BUG_ON(err);
134}
135
136module_init(xfrm4_beet_init);
137module_exit(xfrm4_beet_exit);
138MODULE_LICENSE("GPL");
139MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_BEET);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index a2d211da2aba..a460e8132b4d 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -136,6 +136,16 @@ config INET6_XFRM_MODE_TUNNEL
136 136
137 If unsure, say Y. 137 If unsure, say Y.
138 138
139config INET6_XFRM_MODE_BEET
140 tristate "IPv6: IPsec BEET mode"
141 depends on IPV6
142 default IPV6
143 select XFRM
144 ---help---
145 Support for IPsec BEET mode.
146
147 If unsure, say Y.
148
139config INET6_XFRM_MODE_ROUTEOPTIMIZATION 149config INET6_XFRM_MODE_ROUTEOPTIMIZATION
140 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)" 150 tristate "IPv6: MIPv6 route optimization mode (EXPERIMENTAL)"
141 depends on IPV6 && EXPERIMENTAL 151 depends on IPV6 && EXPERIMENTAL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 0213c6612b58..87274e47fe32 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o
26obj-$(CONFIG_INET6_XFRM_MODE_TRANSPORT) += xfrm6_mode_transport.o 26obj-$(CONFIG_INET6_XFRM_MODE_TRANSPORT) += xfrm6_mode_transport.o
27obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o 27obj-$(CONFIG_INET6_XFRM_MODE_TUNNEL) += xfrm6_mode_tunnel.o
28obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o 28obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
29obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
29obj-$(CONFIG_NETFILTER) += netfilter/ 30obj-$(CONFIG_NETFILTER) += netfilter/
30 31
31obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o 32obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index a2860e35efd7..71f59f18ede8 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -199,6 +199,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
199static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) 199static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
200{ 200{
201 struct xfrm_state *t = NULL; 201 struct xfrm_state *t = NULL;
202 u8 mode = XFRM_MODE_TUNNEL;
202 203
203 t = xfrm_state_alloc(); 204 t = xfrm_state_alloc();
204 if (!t) 205 if (!t)
@@ -212,7 +213,9 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
212 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr)); 213 memcpy(t->id.daddr.a6, x->id.daddr.a6, sizeof(struct in6_addr));
213 memcpy(&t->sel, &x->sel, sizeof(t->sel)); 214 memcpy(&t->sel, &x->sel, sizeof(t->sel));
214 t->props.family = AF_INET6; 215 t->props.family = AF_INET6;
215 t->props.mode = XFRM_MODE_TUNNEL; 216 if (x->props.mode == XFRM_MODE_BEET)
217 mode = x->props.mode;
218 t->props.mode = mode;
216 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); 219 memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
217 220
218 if (xfrm_init_state(t)) 221 if (xfrm_init_state(t))
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9662561701d1..e0c3934a7e4b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -546,7 +546,7 @@ static int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
546 struct in6_addr *daddr, *final_p = NULL, final; 546 struct in6_addr *daddr, *final_p = NULL, final;
547 struct ipv6_txoptions *opt = NULL; 547 struct ipv6_txoptions *opt = NULL;
548 struct ip6_flowlabel *flowlabel = NULL; 548 struct ip6_flowlabel *flowlabel = NULL;
549 struct flowi *fl = &inet->cork.fl; 549 struct flowi fl;
550 struct dst_entry *dst; 550 struct dst_entry *dst;
551 int addr_len = msg->msg_namelen; 551 int addr_len = msg->msg_namelen;
552 int ulen = len; 552 int ulen = len;
@@ -626,19 +626,19 @@ do_udp_sendmsg:
626 } 626 }
627 ulen += sizeof(struct udphdr); 627 ulen += sizeof(struct udphdr);
628 628
629 memset(fl, 0, sizeof(*fl)); 629 memset(&fl, 0, sizeof(fl));
630 630
631 if (sin6) { 631 if (sin6) {
632 if (sin6->sin6_port == 0) 632 if (sin6->sin6_port == 0)
633 return -EINVAL; 633 return -EINVAL;
634 634
635 fl->fl_ip_dport = sin6->sin6_port; 635 fl.fl_ip_dport = sin6->sin6_port;
636 daddr = &sin6->sin6_addr; 636 daddr = &sin6->sin6_addr;
637 637
638 if (np->sndflow) { 638 if (np->sndflow) {
639 fl->fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 639 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
640 if (fl->fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 640 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
641 flowlabel = fl6_sock_lookup(sk, fl->fl6_flowlabel); 641 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
642 if (flowlabel == NULL) 642 if (flowlabel == NULL)
643 return -EINVAL; 643 return -EINVAL;
644 daddr = &flowlabel->dst; 644 daddr = &flowlabel->dst;
@@ -656,32 +656,32 @@ do_udp_sendmsg:
656 if (addr_len >= sizeof(struct sockaddr_in6) && 656 if (addr_len >= sizeof(struct sockaddr_in6) &&
657 sin6->sin6_scope_id && 657 sin6->sin6_scope_id &&
658 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 658 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
659 fl->oif = sin6->sin6_scope_id; 659 fl.oif = sin6->sin6_scope_id;
660 } else { 660 } else {
661 if (sk->sk_state != TCP_ESTABLISHED) 661 if (sk->sk_state != TCP_ESTABLISHED)
662 return -EDESTADDRREQ; 662 return -EDESTADDRREQ;
663 663
664 fl->fl_ip_dport = inet->dport; 664 fl.fl_ip_dport = inet->dport;
665 daddr = &np->daddr; 665 daddr = &np->daddr;
666 fl->fl6_flowlabel = np->flow_label; 666 fl.fl6_flowlabel = np->flow_label;
667 connected = 1; 667 connected = 1;
668 } 668 }
669 669
670 if (!fl->oif) 670 if (!fl.oif)
671 fl->oif = sk->sk_bound_dev_if; 671 fl.oif = sk->sk_bound_dev_if;
672 672
673 if (msg->msg_controllen) { 673 if (msg->msg_controllen) {
674 opt = &opt_space; 674 opt = &opt_space;
675 memset(opt, 0, sizeof(struct ipv6_txoptions)); 675 memset(opt, 0, sizeof(struct ipv6_txoptions));
676 opt->tot_len = sizeof(*opt); 676 opt->tot_len = sizeof(*opt);
677 677
678 err = datagram_send_ctl(msg, fl, opt, &hlimit, &tclass); 678 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass);
679 if (err < 0) { 679 if (err < 0) {
680 fl6_sock_release(flowlabel); 680 fl6_sock_release(flowlabel);
681 return err; 681 return err;
682 } 682 }
683 if ((fl->fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 683 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
684 flowlabel = fl6_sock_lookup(sk, fl->fl6_flowlabel); 684 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
685 if (flowlabel == NULL) 685 if (flowlabel == NULL)
686 return -EINVAL; 686 return -EINVAL;
687 } 687 }
@@ -695,39 +695,39 @@ do_udp_sendmsg:
695 opt = fl6_merge_options(&opt_space, flowlabel, opt); 695 opt = fl6_merge_options(&opt_space, flowlabel, opt);
696 opt = ipv6_fixup_options(&opt_space, opt); 696 opt = ipv6_fixup_options(&opt_space, opt);
697 697
698 fl->proto = IPPROTO_UDP; 698 fl.proto = IPPROTO_UDP;
699 ipv6_addr_copy(&fl->fl6_dst, daddr); 699 ipv6_addr_copy(&fl.fl6_dst, daddr);
700 if (ipv6_addr_any(&fl->fl6_src) && !ipv6_addr_any(&np->saddr)) 700 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
701 ipv6_addr_copy(&fl->fl6_src, &np->saddr); 701 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
702 fl->fl_ip_sport = inet->sport; 702 fl.fl_ip_sport = inet->sport;
703 703
704 /* merge ip6_build_xmit from ip6_output */ 704 /* merge ip6_build_xmit from ip6_output */
705 if (opt && opt->srcrt) { 705 if (opt && opt->srcrt) {
706 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 706 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
707 ipv6_addr_copy(&final, &fl->fl6_dst); 707 ipv6_addr_copy(&final, &fl.fl6_dst);
708 ipv6_addr_copy(&fl->fl6_dst, rt0->addr); 708 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
709 final_p = &final; 709 final_p = &final;
710 connected = 0; 710 connected = 0;
711 } 711 }
712 712
713 if (!fl->oif && ipv6_addr_is_multicast(&fl->fl6_dst)) { 713 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
714 fl->oif = np->mcast_oif; 714 fl.oif = np->mcast_oif;
715 connected = 0; 715 connected = 0;
716 } 716 }
717 717
718 security_sk_classify_flow(sk, fl); 718 security_sk_classify_flow(sk, &fl);
719 719
720 err = ip6_sk_dst_lookup(sk, &dst, fl); 720 err = ip6_sk_dst_lookup(sk, &dst, &fl);
721 if (err) 721 if (err)
722 goto out; 722 goto out;
723 if (final_p) 723 if (final_p)
724 ipv6_addr_copy(&fl->fl6_dst, final_p); 724 ipv6_addr_copy(&fl.fl6_dst, final_p);
725 725
726 if ((err = xfrm_lookup(&dst, fl, sk, 0)) < 0) 726 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
727 goto out; 727 goto out;
728 728
729 if (hlimit < 0) { 729 if (hlimit < 0) {
730 if (ipv6_addr_is_multicast(&fl->fl6_dst)) 730 if (ipv6_addr_is_multicast(&fl.fl6_dst))
731 hlimit = np->mcast_hops; 731 hlimit = np->mcast_hops;
732 else 732 else
733 hlimit = np->hop_limit; 733 hlimit = np->hop_limit;
@@ -763,21 +763,23 @@ back_from_confirm:
763do_append_data: 763do_append_data:
764 up->len += ulen; 764 up->len += ulen;
765 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, 765 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen,
766 sizeof(struct udphdr), hlimit, tclass, opt, fl, 766 sizeof(struct udphdr), hlimit, tclass, opt, &fl,
767 (struct rt6_info*)dst, 767 (struct rt6_info*)dst,
768 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 768 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
769 if (err) 769 if (err)
770 udp_v6_flush_pending_frames(sk); 770 udp_v6_flush_pending_frames(sk);
771 else if (!corkreq) 771 else if (!corkreq)
772 err = udp_v6_push_pending_frames(sk, up); 772 err = udp_v6_push_pending_frames(sk, up);
773 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
774 up->pending = 0;
773 775
774 if (dst) { 776 if (dst) {
775 if (connected) { 777 if (connected) {
776 ip6_dst_store(sk, dst, 778 ip6_dst_store(sk, dst,
777 ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? 779 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
778 &np->daddr : NULL, 780 &np->daddr : NULL,
779#ifdef CONFIG_IPV6_SUBTREES 781#ifdef CONFIG_IPV6_SUBTREES
780 ipv6_addr_equal(&fl->fl6_src, &np->saddr) ? 782 ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
781 &np->saddr : 783 &np->saddr :
782#endif 784#endif
783 NULL); 785 NULL);
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
new file mode 100644
index 000000000000..edcfffa9e87b
--- /dev/null
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -0,0 +1,107 @@
1/*
2 * xfrm6_mode_beet.c - BEET mode encapsulation for IPv6.
3 *
4 * Copyright (c) 2006 Diego Beltrami <diego.beltrami@gmail.com>
5 * Miika Komu <miika@iki.fi>
6 * Herbert Xu <herbert@gondor.apana.org.au>
7 * Abhinav Pathak <abhinav.pathak@hiit.fi>
8 * Jeff Ahrenholz <ahrenholz@gmail.com>
9 */
10
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/stringify.h>
16#include <net/dsfield.h>
17#include <net/dst.h>
18#include <net/inet_ecn.h>
19#include <net/ipv6.h>
20#include <net/xfrm.h>
21
22/* Add encapsulation header.
23 *
24 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
25 * The following fields in it shall be filled in by x->type->output:
26 * payload_len
27 *
28 * On exit, skb->h will be set to the start of the encapsulation header to be
29 * filled in by x->type->output and skb->nh will be set to the nextheader field
30 * of the extension header directly preceding the encapsulation header, or in
31 * its absence, that of the top IP header. The value of skb->data will always
32 * point to the top IP header.
33 */
34static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
35{
36 struct ipv6hdr *iph, *top_iph;
37 u8 *prevhdr;
38 int hdr_len;
39
40 skb_push(skb, x->props.header_len);
41 iph = skb->nh.ipv6h;
42
43 hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
44 skb->nh.raw = prevhdr - x->props.header_len;
45 skb->h.raw = skb->data + hdr_len;
46 memmove(skb->data, iph, hdr_len);
47
48 skb->nh.raw = skb->data;
49 top_iph = skb->nh.ipv6h;
50 skb->nh.raw = &top_iph->nexthdr;
51 skb->h.ipv6h = top_iph + 1;
52
53 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
54 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
55
56 return 0;
57}
58
59static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb)
60{
61 struct ipv6hdr *ip6h;
62 int size = sizeof(struct ipv6hdr);
63 int err = -EINVAL;
64
65 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
66 goto out;
67
68 skb_push(skb, size);
69 memmove(skb->data, skb->nh.raw, size);
70 skb->nh.raw = skb->data;
71
72 skb->mac.raw = memmove(skb->data - skb->mac_len,
73 skb->mac.raw, skb->mac_len);
74
75 ip6h = skb->nh.ipv6h;
76 ip6h->payload_len = htons(skb->len - size);
77 ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6);
78 ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6);
79 err = 0;
80out:
81 return err;
82}
83
84static struct xfrm_mode xfrm6_beet_mode = {
85 .input = xfrm6_beet_input,
86 .output = xfrm6_beet_output,
87 .owner = THIS_MODULE,
88 .encap = XFRM_MODE_BEET,
89};
90
91static int __init xfrm6_beet_init(void)
92{
93 return xfrm_register_mode(&xfrm6_beet_mode, AF_INET6);
94}
95
96static void __exit xfrm6_beet_exit(void)
97{
98 int err;
99
100 err = xfrm_unregister_mode(&xfrm6_beet_mode, AF_INET6);
101 BUG_ON(err);
102}
103
104module_init(xfrm6_beet_init);
105module_exit(xfrm6_beet_exit);
106MODULE_LICENSE("GPL");
107MODULE_ALIAS_XFRM_MODE(AF_INET6, XFRM_MODE_BEET);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0a28d2c5c44f..ce94732b8e23 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -365,7 +365,7 @@ config NETFILTER_XT_MATCH_MULTIPORT
365 365
366config NETFILTER_XT_MATCH_PHYSDEV 366config NETFILTER_XT_MATCH_PHYSDEV
367 tristate '"physdev" match support' 367 tristate '"physdev" match support'
368 depends on NETFILTER_XTABLES && BRIDGE_NETFILTER 368 depends on NETFILTER_XTABLES && BRIDGE && BRIDGE_NETFILTER
369 help 369 help
370 Physdev packet matching matches against the physical bridge ports 370 Physdev packet matching matches against the physical bridge ports
371 the IP packet arrived on or will leave by. 371 the IP packet arrived on or will leave by.
diff --git a/net/sched/estimator.c b/net/sched/estimator.c
deleted file mode 100644
index 0ebc98e9be2d..000000000000
--- a/net/sched/estimator.c
+++ /dev/null
@@ -1,196 +0,0 @@
1/*
2 * net/sched/estimator.c Simple rate estimator.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <asm/uaccess.h>
13#include <asm/system.h>
14#include <linux/bitops.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/jiffies.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/socket.h>
22#include <linux/sockios.h>
23#include <linux/in.h>
24#include <linux/errno.h>
25#include <linux/interrupt.h>
26#include <linux/netdevice.h>
27#include <linux/skbuff.h>
28#include <linux/rtnetlink.h>
29#include <linux/init.h>
30#include <net/sock.h>
31#include <net/pkt_sched.h>
32
33/*
34 This code is NOT intended to be used for statistics collection,
35 its purpose is to provide a base for statistical multiplexing
36 for controlled load service.
37 If you need only statistics, run a user level daemon which
38 periodically reads byte counters.
39
40 Unfortunately, rate estimation is not a very easy task.
41 F.e. I did not find a simple way to estimate the current peak rate
42 and even failed to formulate the problem 8)8)
43
44 So I preferred not to built an estimator into the scheduler,
45 but run this task separately.
46 Ideally, it should be kernel thread(s), but for now it runs
47 from timers, which puts apparent top bounds on the number of rated
48 flows, has minimal overhead on small, but is enough
49 to handle controlled load service, sets of aggregates.
50
51 We measure rate over A=(1<<interval) seconds and evaluate EWMA:
52
53 avrate = avrate*(1-W) + rate*W
54
55 where W is chosen as negative power of 2: W = 2^(-ewma_log)
56
57 The resulting time constant is:
58
59 T = A/(-ln(1-W))
60
61
62 NOTES.
63
64 * The stored value for avbps is scaled by 2^5, so that maximal
65 rate is ~1Gbit, avpps is scaled by 2^10.
66
67 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
68 for HZ=100 and HZ=1024 8)), maximal interval
69 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
70 are too expensive, longer ones can be implemented
71 at user level painlessly.
72 */
73
74#define EST_MAX_INTERVAL 5
75
76struct qdisc_estimator
77{
78 struct qdisc_estimator *next;
79 struct tc_stats *stats;
80 spinlock_t *stats_lock;
81 unsigned interval;
82 int ewma_log;
83 u64 last_bytes;
84 u32 last_packets;
85 u32 avpps;
86 u32 avbps;
87};
88
89struct qdisc_estimator_head
90{
91 struct timer_list timer;
92 struct qdisc_estimator *list;
93};
94
95static struct qdisc_estimator_head elist[EST_MAX_INTERVAL+1];
96
97/* Estimator array lock */
98static DEFINE_RWLOCK(est_lock);
99
100static void est_timer(unsigned long arg)
101{
102 int idx = (int)arg;
103 struct qdisc_estimator *e;
104
105 read_lock(&est_lock);
106 for (e = elist[idx].list; e; e = e->next) {
107 struct tc_stats *st = e->stats;
108 u64 nbytes;
109 u32 npackets;
110 u32 rate;
111
112 spin_lock(e->stats_lock);
113 nbytes = st->bytes;
114 npackets = st->packets;
115 rate = (nbytes - e->last_bytes)<<(7 - idx);
116 e->last_bytes = nbytes;
117 e->avbps += ((long)rate - (long)e->avbps) >> e->ewma_log;
118 st->bps = (e->avbps+0xF)>>5;
119
120 rate = (npackets - e->last_packets)<<(12 - idx);
121 e->last_packets = npackets;
122 e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
123 e->stats->pps = (e->avpps+0x1FF)>>10;
124 spin_unlock(e->stats_lock);
125 }
126
127 mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
128 read_unlock(&est_lock);
129}
130
131int qdisc_new_estimator(struct tc_stats *stats, spinlock_t *stats_lock, struct rtattr *opt)
132{
133 struct qdisc_estimator *est;
134 struct tc_estimator *parm = RTA_DATA(opt);
135
136 if (RTA_PAYLOAD(opt) < sizeof(*parm))
137 return -EINVAL;
138
139 if (parm->interval < -2 || parm->interval > 3)
140 return -EINVAL;
141
142 est = kzalloc(sizeof(*est), GFP_KERNEL);
143 if (est == NULL)
144 return -ENOBUFS;
145
146 est->interval = parm->interval + 2;
147 est->stats = stats;
148 est->stats_lock = stats_lock;
149 est->ewma_log = parm->ewma_log;
150 est->last_bytes = stats->bytes;
151 est->avbps = stats->bps<<5;
152 est->last_packets = stats->packets;
153 est->avpps = stats->pps<<10;
154
155 est->next = elist[est->interval].list;
156 if (est->next == NULL) {
157 init_timer(&elist[est->interval].timer);
158 elist[est->interval].timer.data = est->interval;
159 elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
160 elist[est->interval].timer.function = est_timer;
161 add_timer(&elist[est->interval].timer);
162 }
163 write_lock_bh(&est_lock);
164 elist[est->interval].list = est;
165 write_unlock_bh(&est_lock);
166 return 0;
167}
168
169void qdisc_kill_estimator(struct tc_stats *stats)
170{
171 int idx;
172 struct qdisc_estimator *est, **pest;
173
174 for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
175 int killed = 0;
176 pest = &elist[idx].list;
177 while ((est=*pest) != NULL) {
178 if (est->stats != stats) {
179 pest = &est->next;
180 continue;
181 }
182
183 write_lock_bh(&est_lock);
184 *pest = est->next;
185 write_unlock_bh(&est_lock);
186
187 kfree(est);
188 killed++;
189 }
190 if (killed && elist[idx].list == NULL)
191 del_timer(&elist[idx].timer);
192 }
193}
194
195EXPORT_SYMBOL(qdisc_kill_estimator);
196EXPORT_SYMBOL(qdisc_new_estimator);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 6c058e3660c0..bb3ddd4784b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -391,7 +391,7 @@ static inline void htb_add_class_to_row(struct htb_sched *q,
391/* If this triggers, it is a bug in this code, but it need not be fatal */ 391/* If this triggers, it is a bug in this code, but it need not be fatal */
392static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root) 392static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
393{ 393{
394 if (!RB_EMPTY_NODE(rb)) { 394 if (RB_EMPTY_NODE(rb)) {
395 WARN_ON(1); 395 WARN_ON(1);
396 } else { 396 } else {
397 rb_erase(rb, root); 397 rb_erase(rb, root);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 638c0b576203..447d9aef4605 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -903,9 +903,9 @@ out_seq:
903struct gss_svc_data { 903struct gss_svc_data {
904 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
905 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
906 /* pointer to the beginning of the procedure-specific results, 906 /* save a pointer to the beginning of the encoded verifier,
907 * which may be encrypted/checksummed in svcauth_gss_release: */ 907 * for use in encryption/checksumming in svcauth_gss_release: */
908 __be32 *body_start; 908 __be32 *verf_start;
909 struct rsc *rsci; 909 struct rsc *rsci;
910}; 910};
911 911
@@ -968,7 +968,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
968 if (!svcdata) 968 if (!svcdata)
969 goto auth_err; 969 goto auth_err;
970 rqstp->rq_auth_data = svcdata; 970 rqstp->rq_auth_data = svcdata;
971 svcdata->body_start = NULL; 971 svcdata->verf_start = NULL;
972 svcdata->rsci = NULL; 972 svcdata->rsci = NULL;
973 gc = &svcdata->clcred; 973 gc = &svcdata->clcred;
974 974
@@ -1097,6 +1097,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1097 goto complete; 1097 goto complete;
1098 case RPC_GSS_PROC_DATA: 1098 case RPC_GSS_PROC_DATA:
1099 *authp = rpcsec_gsserr_ctxproblem; 1099 *authp = rpcsec_gsserr_ctxproblem;
1100 svcdata->verf_start = resv->iov_base + resv->iov_len;
1100 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1101 if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1101 goto auth_err; 1102 goto auth_err;
1102 rqstp->rq_cred = rsci->cred; 1103 rqstp->rq_cred = rsci->cred;
@@ -1110,7 +1111,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1110 gc->gc_seq, rsci->mechctx)) 1111 gc->gc_seq, rsci->mechctx))
1111 goto auth_err; 1112 goto auth_err;
1112 /* placeholders for length and seq. number: */ 1113 /* placeholders for length and seq. number: */
1113 svcdata->body_start = resv->iov_base + resv->iov_len;
1114 svc_putnl(resv, 0); 1114 svc_putnl(resv, 0);
1115 svc_putnl(resv, 0); 1115 svc_putnl(resv, 0);
1116 break; 1116 break;
@@ -1119,7 +1119,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1119 gc->gc_seq, rsci->mechctx)) 1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err; 1120 goto auth_err;
1121 /* placeholders for length and seq. number: */ 1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len;
1123 svc_putnl(resv, 0); 1122 svc_putnl(resv, 0);
1124 svc_putnl(resv, 0); 1123 svc_putnl(resv, 0);
1125 break; 1124 break;
@@ -1147,6 +1146,32 @@ out:
1147 return ret; 1146 return ret;
1148} 1147}
1149 1148
1149u32 *
1150svcauth_gss_prepare_to_wrap(struct xdr_buf *resbuf, struct gss_svc_data *gsd)
1151{
1152 u32 *p, verf_len;
1153
1154 p = gsd->verf_start;
1155 gsd->verf_start = NULL;
1156
1157 /* If the reply stat is nonzero, don't wrap: */
1158 if (*(p-1) != rpc_success)
1159 return NULL;
1160 /* Skip the verifier: */
1161 p += 1;
1162 verf_len = ntohl(*p++);
1163 p += XDR_QUADLEN(verf_len);
1164 /* move accept_stat to right place: */
1165 memcpy(p, p + 2, 4);
1166 /* Also don't wrap if the accept stat is nonzero: */
1167 if (*p != rpc_success) {
1168 resbuf->head[0].iov_len -= 2 * 4;
1169 return NULL;
1170 }
1171 p++;
1172 return p;
1173}
1174
1150static inline int 1175static inline int
1151svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) 1176svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1152{ 1177{
@@ -1160,17 +1185,9 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1160 int integ_offset, integ_len; 1185 int integ_offset, integ_len;
1161 int stat = -EINVAL; 1186 int stat = -EINVAL;
1162 1187
1163 p = gsd->body_start; 1188 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1164 gsd->body_start = NULL; 1189 if (p == NULL)
1165 /* move accept_stat to right place: */
1166 memcpy(p, p + 2, 4);
1167 /* Don't wrap in failure case: */
1168 /* Counting on not getting here if call was not even accepted! */
1169 if (*p != rpc_success) {
1170 resbuf->head[0].iov_len -= 2 * 4;
1171 goto out; 1190 goto out;
1172 }
1173 p++;
1174 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; 1191 integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
1175 integ_len = resbuf->len - integ_offset; 1192 integ_len = resbuf->len - integ_offset;
1176 BUG_ON(integ_len % 4); 1193 BUG_ON(integ_len % 4);
@@ -1191,7 +1208,6 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1191 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1208 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1192 + resbuf->head[0].iov_len; 1209 + resbuf->head[0].iov_len;
1193 resbuf->tail[0].iov_len = 0; 1210 resbuf->tail[0].iov_len = 0;
1194 rqstp->rq_restailpage = 0;
1195 resv = &resbuf->tail[0]; 1211 resv = &resbuf->tail[0];
1196 } else { 1212 } else {
1197 resv = &resbuf->tail[0]; 1213 resv = &resbuf->tail[0];
@@ -1223,24 +1239,16 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1223 int offset; 1239 int offset;
1224 int pad; 1240 int pad;
1225 1241
1226 p = gsd->body_start; 1242 p = svcauth_gss_prepare_to_wrap(resbuf, gsd);
1227 gsd->body_start = NULL; 1243 if (p == NULL)
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0; 1244 return 0;
1235 }
1236 p++;
1237 len = p++; 1245 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; 1246 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq); 1247 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages; 1248 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for 1249 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ 1250 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) { 1251 if (resbuf->tail[0].iov_base) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base 1252 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE); 1253 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); 1254 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
@@ -1258,7 +1266,6 @@ svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base 1266 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; 1267 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0; 1268 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 } 1269 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) 1270 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM; 1271 return -ENOMEM;
@@ -1282,7 +1289,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1282 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1289 if (gc->gc_proc != RPC_GSS_PROC_DATA)
1283 goto out; 1290 goto out;
1284 /* Release can be called twice, but we only wrap once. */ 1291 /* Release can be called twice, but we only wrap once. */
1285 if (gsd->body_start == NULL) 1292 if (gsd->verf_start == NULL)
1286 goto out; 1293 goto out;
1287 /* normally not set till svc_send, but we need it here: */ 1294 /* normally not set till svc_send, but we need it here: */
1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32 1295 /* XXX: what for? Do we mess it up the moment we call svc_putu32
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a99e67b164c1..c2c8bb20d07f 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -417,18 +417,15 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
417 if (size > RPCSVC_MAXPAYLOAD) 417 if (size > RPCSVC_MAXPAYLOAD)
418 size = RPCSVC_MAXPAYLOAD; 418 size = RPCSVC_MAXPAYLOAD;
419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE;
420 rqstp->rq_argused = 0;
421 rqstp->rq_resused = 0;
422 arghi = 0; 420 arghi = 0;
423 BUG_ON(pages > RPCSVC_MAXPAGES); 421 BUG_ON(pages > RPCSVC_MAXPAGES);
424 while (pages) { 422 while (pages) {
425 struct page *p = alloc_page(GFP_KERNEL); 423 struct page *p = alloc_page(GFP_KERNEL);
426 if (!p) 424 if (!p)
427 break; 425 break;
428 rqstp->rq_argpages[arghi++] = p; 426 rqstp->rq_pages[arghi++] = p;
429 pages--; 427 pages--;
430 } 428 }
431 rqstp->rq_arghi = arghi;
432 return ! pages; 429 return ! pages;
433} 430}
434 431
@@ -438,14 +435,10 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
438static void 435static void
439svc_release_buffer(struct svc_rqst *rqstp) 436svc_release_buffer(struct svc_rqst *rqstp)
440{ 437{
441 while (rqstp->rq_arghi) 438 int i;
442 put_page(rqstp->rq_argpages[--rqstp->rq_arghi]); 439 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++)
443 while (rqstp->rq_resused) { 440 if (rqstp->rq_pages[i])
444 if (rqstp->rq_respages[--rqstp->rq_resused] == NULL) 441 put_page(rqstp->rq_pages[i]);
445 continue;
446 put_page(rqstp->rq_respages[rqstp->rq_resused]);
447 }
448 rqstp->rq_argused = 0;
449} 442}
450 443
451/* 444/*
@@ -651,23 +644,32 @@ svc_register(struct svc_serv *serv, int proto, unsigned short port)
651 unsigned long flags; 644 unsigned long flags;
652 int i, error = 0, dummy; 645 int i, error = 0, dummy;
653 646
654 progp = serv->sv_program;
655
656 dprintk("RPC: svc_register(%s, %s, %d)\n",
657 progp->pg_name, proto == IPPROTO_UDP? "udp" : "tcp", port);
658
659 if (!port) 647 if (!port)
660 clear_thread_flag(TIF_SIGPENDING); 648 clear_thread_flag(TIF_SIGPENDING);
661 649
662 for (i = 0; i < progp->pg_nvers; i++) { 650 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
663 if (progp->pg_vers[i] == NULL) 651 for (i = 0; i < progp->pg_nvers; i++) {
664 continue; 652 if (progp->pg_vers[i] == NULL)
665 error = rpc_register(progp->pg_prog, i, proto, port, &dummy); 653 continue;
666 if (error < 0) 654
667 break; 655 dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
668 if (port && !dummy) { 656 progp->pg_name,
669 error = -EACCES; 657 proto == IPPROTO_UDP? "udp" : "tcp",
670 break; 658 port,
659 i,
660 progp->pg_vers[i]->vs_hidden?
661 " (but not telling portmap)" : "");
662
663 if (progp->pg_vers[i]->vs_hidden)
664 continue;
665
666 error = rpc_register(progp->pg_prog, i, proto, port, &dummy);
667 if (error < 0)
668 break;
669 if (port && !dummy) {
670 error = -EACCES;
671 break;
672 }
671 } 673 }
672 } 674 }
673 675
@@ -697,7 +699,7 @@ svc_process(struct svc_rqst *rqstp)
697 u32 dir, prog, vers, proc; 699 u32 dir, prog, vers, proc;
698 __be32 auth_stat, rpc_stat; 700 __be32 auth_stat, rpc_stat;
699 int auth_res; 701 int auth_res;
700 __be32 *accept_statp; 702 __be32 *reply_statp;
701 703
702 rpc_stat = rpc_success; 704 rpc_stat = rpc_success;
703 705
@@ -707,10 +709,10 @@ svc_process(struct svc_rqst *rqstp)
707 /* setup response xdr_buf. 709 /* setup response xdr_buf.
708 * Initially it has just one page 710 * Initially it has just one page
709 */ 711 */
710 svc_take_page(rqstp); /* must succeed */ 712 rqstp->rq_resused = 1;
711 resv->iov_base = page_address(rqstp->rq_respages[0]); 713 resv->iov_base = page_address(rqstp->rq_respages[0]);
712 resv->iov_len = 0; 714 resv->iov_len = 0;
713 rqstp->rq_res.pages = rqstp->rq_respages+1; 715 rqstp->rq_res.pages = rqstp->rq_respages + 1;
714 rqstp->rq_res.len = 0; 716 rqstp->rq_res.len = 0;
715 rqstp->rq_res.page_base = 0; 717 rqstp->rq_res.page_base = 0;
716 rqstp->rq_res.page_len = 0; 718 rqstp->rq_res.page_len = 0;
@@ -738,7 +740,7 @@ svc_process(struct svc_rqst *rqstp)
738 goto err_bad_rpc; 740 goto err_bad_rpc;
739 741
740 /* Save position in case we later decide to reject: */ 742 /* Save position in case we later decide to reject: */
741 accept_statp = resv->iov_base + resv->iov_len; 743 reply_statp = resv->iov_base + resv->iov_len;
742 744
743 svc_putnl(resv, 0); /* ACCEPT */ 745 svc_putnl(resv, 0); /* ACCEPT */
744 746
@@ -886,7 +888,7 @@ err_bad_auth:
886 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat)); 888 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
887 serv->sv_stats->rpcbadauth++; 889 serv->sv_stats->rpcbadauth++;
888 /* Restore write pointer to location of accept status: */ 890 /* Restore write pointer to location of accept status: */
889 xdr_ressize_check(rqstp, accept_statp); 891 xdr_ressize_check(rqstp, reply_statp);
890 svc_putnl(resv, 1); /* REJECT */ 892 svc_putnl(resv, 1); /* REJECT */
891 svc_putnl(resv, 1); /* AUTH_ERROR */ 893 svc_putnl(resv, 1); /* AUTH_ERROR */
892 svc_putnl(resv, ntohl(auth_stat)); /* status */ 894 svc_putnl(resv, ntohl(auth_stat)); /* status */
@@ -926,3 +928,18 @@ err_bad:
926 svc_putnl(resv, ntohl(rpc_stat)); 928 svc_putnl(resv, ntohl(rpc_stat));
927 goto sendit; 929 goto sendit;
928} 930}
931
932/*
933 * Return (transport-specific) limit on the rpc payload.
934 */
935u32 svc_max_payload(const struct svc_rqst *rqstp)
936{
937 int max = RPCSVC_MAXPAYLOAD_TCP;
938
939 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
940 max = RPCSVC_MAXPAYLOAD_UDP;
941 if (rqstp->rq_server->sv_bufsz < max)
942 max = rqstp->rq_server->sv_bufsz;
943 return max;
944}
945EXPORT_SYMBOL_GPL(svc_max_payload);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 40d41a2831d7..e1bd933629fe 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -9,6 +9,7 @@
9#include <linux/seq_file.h> 9#include <linux/seq_file.h>
10#include <linux/hash.h> 10#include <linux/hash.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <net/sock.h>
12 13
13#define RPCDBG_FACILITY RPCDBG_AUTH 14#define RPCDBG_FACILITY RPCDBG_AUTH
14 15
@@ -375,6 +376,44 @@ void svcauth_unix_purge(void)
375 cache_purge(&ip_map_cache); 376 cache_purge(&ip_map_cache);
376} 377}
377 378
379static inline struct ip_map *
380ip_map_cached_get(struct svc_rqst *rqstp)
381{
382 struct ip_map *ipm = rqstp->rq_sock->sk_info_authunix;
383 if (ipm != NULL) {
384 if (!cache_valid(&ipm->h)) {
385 /*
386 * The entry has been invalidated since it was
387 * remembered, e.g. by a second mount from the
388 * same IP address.
389 */
390 rqstp->rq_sock->sk_info_authunix = NULL;
391 cache_put(&ipm->h, &ip_map_cache);
392 return NULL;
393 }
394 cache_get(&ipm->h);
395 }
396 return ipm;
397}
398
399static inline void
400ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
401{
402 struct svc_sock *svsk = rqstp->rq_sock;
403
404 if (svsk->sk_sock->type == SOCK_STREAM && svsk->sk_info_authunix == NULL)
405 svsk->sk_info_authunix = ipm; /* newly cached, keep the reference */
406 else
407 cache_put(&ipm->h, &ip_map_cache);
408}
409
410void
411svcauth_unix_info_release(void *info)
412{
413 struct ip_map *ipm = info;
414 cache_put(&ipm->h, &ip_map_cache);
415}
416
378static int 417static int
379svcauth_unix_set_client(struct svc_rqst *rqstp) 418svcauth_unix_set_client(struct svc_rqst *rqstp)
380{ 419{
@@ -384,8 +423,10 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
384 if (rqstp->rq_proc == 0) 423 if (rqstp->rq_proc == 0)
385 return SVC_OK; 424 return SVC_OK;
386 425
387 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 426 ipm = ip_map_cached_get(rqstp);
388 rqstp->rq_addr.sin_addr); 427 if (ipm == NULL)
428 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
429 rqstp->rq_addr.sin_addr);
389 430
390 if (ipm == NULL) 431 if (ipm == NULL)
391 return SVC_DENIED; 432 return SVC_DENIED;
@@ -400,7 +441,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
400 case 0: 441 case 0:
401 rqstp->rq_client = &ipm->m_client->h; 442 rqstp->rq_client = &ipm->m_client->h;
402 kref_get(&rqstp->rq_client->ref); 443 kref_get(&rqstp->rq_client->ref);
403 cache_put(&ipm->h, &ip_map_cache); 444 ip_map_cached_put(rqstp, ipm);
404 break; 445 break;
405 } 446 }
406 return SVC_OK; 447 return SVC_OK;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cba85d195222..b39e7e2b648f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -313,7 +313,7 @@ svc_sock_release(struct svc_rqst *rqstp)
313 313
314 svc_release_skb(rqstp); 314 svc_release_skb(rqstp);
315 315
316 svc_free_allpages(rqstp); 316 svc_free_res_pages(rqstp);
317 rqstp->rq_res.page_len = 0; 317 rqstp->rq_res.page_len = 0;
318 rqstp->rq_res.page_base = 0; 318 rqstp->rq_res.page_base = 0;
319 319
@@ -412,7 +412,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
412 /* send head */ 412 /* send head */
413 if (slen == xdr->head[0].iov_len) 413 if (slen == xdr->head[0].iov_len)
414 flags = 0; 414 flags = 0;
415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, xdr->head[0].iov_len, flags); 415 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0,
416 xdr->head[0].iov_len, flags);
416 if (len != xdr->head[0].iov_len) 417 if (len != xdr->head[0].iov_len)
417 goto out; 418 goto out;
418 slen -= xdr->head[0].iov_len; 419 slen -= xdr->head[0].iov_len;
@@ -437,8 +438,9 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
437 } 438 }
438 /* send tail */ 439 /* send tail */
439 if (xdr->tail[0].iov_len) { 440 if (xdr->tail[0].iov_len) {
440 result = kernel_sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage], 441 result = kernel_sendpage(sock, rqstp->rq_respages[0],
441 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1), 442 ((unsigned long)xdr->tail[0].iov_base)
443 & (PAGE_SIZE-1),
442 xdr->tail[0].iov_len, 0); 444 xdr->tail[0].iov_len, 0);
443 445
444 if (result > 0) 446 if (result > 0)
@@ -492,7 +494,12 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
492 } 494 }
493 spin_unlock(&serv->sv_lock); 495 spin_unlock(&serv->sv_lock);
494 if (closesk) 496 if (closesk)
497 /* Should unregister with portmap, but you cannot
498 * unregister just one protocol...
499 */
495 svc_delete_socket(closesk); 500 svc_delete_socket(closesk);
501 else if (toclose)
502 return -ENOENT;
496 return len; 503 return len;
497} 504}
498EXPORT_SYMBOL(svc_sock_names); 505EXPORT_SYMBOL(svc_sock_names);
@@ -703,9 +710,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
703 if (len <= rqstp->rq_arg.head[0].iov_len) { 710 if (len <= rqstp->rq_arg.head[0].iov_len) {
704 rqstp->rq_arg.head[0].iov_len = len; 711 rqstp->rq_arg.head[0].iov_len = len;
705 rqstp->rq_arg.page_len = 0; 712 rqstp->rq_arg.page_len = 0;
713 rqstp->rq_respages = rqstp->rq_pages+1;
706 } else { 714 } else {
707 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 715 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
708 rqstp->rq_argused += (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE; 716 rqstp->rq_respages = rqstp->rq_pages + 1 +
717 (rqstp->rq_arg.page_len + PAGE_SIZE - 1)/ PAGE_SIZE;
709 } 718 }
710 719
711 if (serv->sv_stats) 720 if (serv->sv_stats)
@@ -946,7 +955,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
946 struct svc_sock *svsk = rqstp->rq_sock; 955 struct svc_sock *svsk = rqstp->rq_sock;
947 struct svc_serv *serv = svsk->sk_server; 956 struct svc_serv *serv = svsk->sk_server;
948 int len; 957 int len;
949 struct kvec vec[RPCSVC_MAXPAGES]; 958 struct kvec *vec;
950 int pnum, vlen; 959 int pnum, vlen;
951 960
952 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 961 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
@@ -1044,15 +1053,17 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1044 len = svsk->sk_reclen; 1053 len = svsk->sk_reclen;
1045 set_bit(SK_DATA, &svsk->sk_flags); 1054 set_bit(SK_DATA, &svsk->sk_flags);
1046 1055
1056 vec = rqstp->rq_vec;
1047 vec[0] = rqstp->rq_arg.head[0]; 1057 vec[0] = rqstp->rq_arg.head[0];
1048 vlen = PAGE_SIZE; 1058 vlen = PAGE_SIZE;
1049 pnum = 1; 1059 pnum = 1;
1050 while (vlen < len) { 1060 while (vlen < len) {
1051 vec[pnum].iov_base = page_address(rqstp->rq_argpages[rqstp->rq_argused++]); 1061 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
1052 vec[pnum].iov_len = PAGE_SIZE; 1062 vec[pnum].iov_len = PAGE_SIZE;
1053 pnum++; 1063 pnum++;
1054 vlen += PAGE_SIZE; 1064 vlen += PAGE_SIZE;
1055 } 1065 }
1066 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1056 1067
1057 /* Now receive data */ 1068 /* Now receive data */
1058 len = svc_recvfrom(rqstp, vec, pnum, len); 1069 len = svc_recvfrom(rqstp, vec, pnum, len);
@@ -1204,7 +1215,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1204 struct svc_sock *svsk =NULL; 1215 struct svc_sock *svsk =NULL;
1205 struct svc_serv *serv = rqstp->rq_server; 1216 struct svc_serv *serv = rqstp->rq_server;
1206 struct svc_pool *pool = rqstp->rq_pool; 1217 struct svc_pool *pool = rqstp->rq_pool;
1207 int len; 1218 int len, i;
1208 int pages; 1219 int pages;
1209 struct xdr_buf *arg; 1220 struct xdr_buf *arg;
1210 DECLARE_WAITQUEUE(wait, current); 1221 DECLARE_WAITQUEUE(wait, current);
@@ -1221,27 +1232,22 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1221 "svc_recv: service %p, wait queue active!\n", 1232 "svc_recv: service %p, wait queue active!\n",
1222 rqstp); 1233 rqstp);
1223 1234
1224 /* Initialize the buffers */
1225 /* first reclaim pages that were moved to response list */
1226 svc_pushback_allpages(rqstp);
1227 1235
1228 /* now allocate needed pages. If we get a failure, sleep briefly */ 1236 /* now allocate needed pages. If we get a failure, sleep briefly */
1229 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1237 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE;
1230 while (rqstp->rq_arghi < pages) { 1238 for (i=0; i < pages ; i++)
1231 struct page *p = alloc_page(GFP_KERNEL); 1239 while (rqstp->rq_pages[i] == NULL) {
1232 if (!p) { 1240 struct page *p = alloc_page(GFP_KERNEL);
1233 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1241 if (!p)
1234 continue; 1242 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1243 rqstp->rq_pages[i] = p;
1235 } 1244 }
1236 rqstp->rq_argpages[rqstp->rq_arghi++] = p;
1237 }
1238 1245
1239 /* Make arg->head point to first page and arg->pages point to rest */ 1246 /* Make arg->head point to first page and arg->pages point to rest */
1240 arg = &rqstp->rq_arg; 1247 arg = &rqstp->rq_arg;
1241 arg->head[0].iov_base = page_address(rqstp->rq_argpages[0]); 1248 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
1242 arg->head[0].iov_len = PAGE_SIZE; 1249 arg->head[0].iov_len = PAGE_SIZE;
1243 rqstp->rq_argused = 1; 1250 arg->pages = rqstp->rq_pages + 1;
1244 arg->pages = rqstp->rq_argpages + 1;
1245 arg->page_base = 0; 1251 arg->page_base = 0;
1246 /* save at least one page for response */ 1252 /* save at least one page for response */
1247 arg->page_len = (pages-2)*PAGE_SIZE; 1253 arg->page_len = (pages-2)*PAGE_SIZE;
@@ -1604,6 +1610,8 @@ svc_delete_socket(struct svc_sock *svsk)
1604 sockfd_put(svsk->sk_sock); 1610 sockfd_put(svsk->sk_sock);
1605 else 1611 else
1606 sock_release(svsk->sk_sock); 1612 sock_release(svsk->sk_sock);
1613 if (svsk->sk_info_authunix != NULL)
1614 svcauth_unix_info_release(svsk->sk_info_authunix);
1607 kfree(svsk); 1615 kfree(svsk);
1608 } else { 1616 } else {
1609 spin_unlock_bh(&serv->sv_lock); 1617 spin_unlock_bh(&serv->sv_lock);
@@ -1699,6 +1707,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1699 rqstp->rq_prot = dr->prot; 1707 rqstp->rq_prot = dr->prot;
1700 rqstp->rq_addr = dr->addr; 1708 rqstp->rq_addr = dr->addr;
1701 rqstp->rq_daddr = dr->daddr; 1709 rqstp->rq_daddr = dr->daddr;
1710 rqstp->rq_respages = rqstp->rq_pages;
1702 return dr->argslen<<2; 1711 return dr->argslen<<2;
1703} 1712}
1704 1713
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 693f02eca6d6..53bc8cb5adbc 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1666,8 +1666,9 @@ static void link_retransmit_failure(struct link *l_ptr, struct sk_buff *buf)
1666 char addr_string[16]; 1666 char addr_string[16];
1667 1667
1668 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg)); 1668 tipc_printf(TIPC_OUTPUT, "Msg seq number: %u, ", msg_seqno(msg));
1669 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %u\n", (u32)TIPC_SKB_CB(buf)->handle); 1669 tipc_printf(TIPC_OUTPUT, "Outstanding acks: %lu\n",
1670 1670 (unsigned long) TIPC_SKB_CB(buf)->handle);
1671
1671 n_ptr = l_ptr->owner->next; 1672 n_ptr = l_ptr->owner->next;
1672 tipc_node_lock(n_ptr); 1673 tipc_node_lock(n_ptr);
1673 1674
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 6ac4e4f033ac..d401dc8f05ed 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -41,17 +41,18 @@ static inline unsigned int __xfrm_dst_hash(xfrm_address_t *daddr, xfrm_address_t
41 return (h ^ (h >> 16)) & hmask; 41 return (h ^ (h >> 16)) & hmask;
42} 42}
43 43
44static inline unsigned __xfrm_src_hash(xfrm_address_t *saddr, 44static inline unsigned __xfrm_src_hash(xfrm_address_t *daddr,
45 xfrm_address_t *saddr,
45 unsigned short family, 46 unsigned short family,
46 unsigned int hmask) 47 unsigned int hmask)
47{ 48{
48 unsigned int h = family; 49 unsigned int h = family;
49 switch (family) { 50 switch (family) {
50 case AF_INET: 51 case AF_INET:
51 h ^= __xfrm4_addr_hash(saddr); 52 h ^= __xfrm4_daddr_saddr_hash(daddr, saddr);
52 break; 53 break;
53 case AF_INET6: 54 case AF_INET6:
54 h ^= __xfrm6_addr_hash(saddr); 55 h ^= __xfrm6_daddr_saddr_hash(daddr, saddr);
55 break; 56 break;
56 }; 57 };
57 return (h ^ (h >> 16)) & hmask; 58 return (h ^ (h >> 16)) & hmask;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b6e2e79d7261..2a7861661f14 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -778,8 +778,9 @@ void xfrm_policy_flush(u8 type)
778 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 778 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
779 struct xfrm_policy *pol; 779 struct xfrm_policy *pol;
780 struct hlist_node *entry; 780 struct hlist_node *entry;
781 int i; 781 int i, killed;
782 782
783 killed = 0;
783 again1: 784 again1:
784 hlist_for_each_entry(pol, entry, 785 hlist_for_each_entry(pol, entry,
785 &xfrm_policy_inexact[dir], bydst) { 786 &xfrm_policy_inexact[dir], bydst) {
@@ -790,6 +791,7 @@ void xfrm_policy_flush(u8 type)
790 write_unlock_bh(&xfrm_policy_lock); 791 write_unlock_bh(&xfrm_policy_lock);
791 792
792 xfrm_policy_kill(pol); 793 xfrm_policy_kill(pol);
794 killed++;
793 795
794 write_lock_bh(&xfrm_policy_lock); 796 write_lock_bh(&xfrm_policy_lock);
795 goto again1; 797 goto again1;
@@ -807,13 +809,14 @@ void xfrm_policy_flush(u8 type)
807 write_unlock_bh(&xfrm_policy_lock); 809 write_unlock_bh(&xfrm_policy_lock);
808 810
809 xfrm_policy_kill(pol); 811 xfrm_policy_kill(pol);
812 killed++;
810 813
811 write_lock_bh(&xfrm_policy_lock); 814 write_lock_bh(&xfrm_policy_lock);
812 goto again2; 815 goto again2;
813 } 816 }
814 } 817 }
815 818
816 xfrm_policy_count[dir] = 0; 819 xfrm_policy_count[dir] -= killed;
817 } 820 }
818 atomic_inc(&flow_cache_genid); 821 atomic_inc(&flow_cache_genid);
819 write_unlock_bh(&xfrm_policy_lock); 822 write_unlock_bh(&xfrm_policy_lock);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index f927b7330f02..39b8bf3a9ded 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -63,10 +63,11 @@ static inline unsigned int xfrm_dst_hash(xfrm_address_t *daddr,
63 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask); 63 return __xfrm_dst_hash(daddr, saddr, reqid, family, xfrm_state_hmask);
64} 64}
65 65
66static inline unsigned int xfrm_src_hash(xfrm_address_t *addr, 66static inline unsigned int xfrm_src_hash(xfrm_address_t *daddr,
67 xfrm_address_t *saddr,
67 unsigned short family) 68 unsigned short family)
68{ 69{
69 return __xfrm_src_hash(addr, family, xfrm_state_hmask); 70 return __xfrm_src_hash(daddr, saddr, family, xfrm_state_hmask);
70} 71}
71 72
72static inline unsigned int 73static inline unsigned int
@@ -92,7 +93,8 @@ static void xfrm_hash_transfer(struct hlist_head *list,
92 nhashmask); 93 nhashmask);
93 hlist_add_head(&x->bydst, ndsttable+h); 94 hlist_add_head(&x->bydst, ndsttable+h);
94 95
95 h = __xfrm_src_hash(&x->props.saddr, x->props.family, 96 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
97 x->props.family,
96 nhashmask); 98 nhashmask);
97 hlist_add_head(&x->bysrc, nsrctable+h); 99 hlist_add_head(&x->bysrc, nsrctable+h);
98 100
@@ -458,7 +460,7 @@ static struct xfrm_state *__xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi,
458 460
459static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family) 461static struct xfrm_state *__xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
460{ 462{
461 unsigned int h = xfrm_src_hash(saddr, family); 463 unsigned int h = xfrm_src_hash(daddr, saddr, family);
462 struct xfrm_state *x; 464 struct xfrm_state *x;
463 struct hlist_node *entry; 465 struct hlist_node *entry;
464 466
@@ -587,7 +589,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
587 if (km_query(x, tmpl, pol) == 0) { 589 if (km_query(x, tmpl, pol) == 0) {
588 x->km.state = XFRM_STATE_ACQ; 590 x->km.state = XFRM_STATE_ACQ;
589 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 591 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
590 h = xfrm_src_hash(saddr, family); 592 h = xfrm_src_hash(daddr, saddr, family);
591 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 593 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
592 if (x->id.spi) { 594 if (x->id.spi) {
593 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family); 595 h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family);
@@ -622,7 +624,7 @@ static void __xfrm_state_insert(struct xfrm_state *x)
622 x->props.reqid, x->props.family); 624 x->props.reqid, x->props.family);
623 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 625 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
624 626
625 h = xfrm_src_hash(&x->props.saddr, x->props.family); 627 h = xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family);
626 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 628 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
627 629
628 if (x->id.spi) { 630 if (x->id.spi) {
@@ -748,7 +750,7 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re
748 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ; 750 x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ;
749 add_timer(&x->timer); 751 add_timer(&x->timer);
750 hlist_add_head(&x->bydst, xfrm_state_bydst+h); 752 hlist_add_head(&x->bydst, xfrm_state_bydst+h);
751 h = xfrm_src_hash(saddr, family); 753 h = xfrm_src_hash(daddr, saddr, family);
752 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h); 754 hlist_add_head(&x->bysrc, xfrm_state_bysrc+h);
753 wake_up(&km_waitq); 755 wake_up(&km_waitq);
754 } 756 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c59a78d2923a..d54b3a70d5df 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -211,6 +211,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
211 case XFRM_MODE_TRANSPORT: 211 case XFRM_MODE_TRANSPORT:
212 case XFRM_MODE_TUNNEL: 212 case XFRM_MODE_TUNNEL:
213 case XFRM_MODE_ROUTEOPTIMIZATION: 213 case XFRM_MODE_ROUTEOPTIMIZATION:
214 case XFRM_MODE_BEET:
214 break; 215 break;
215 216
216 default: 217 default:
diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
index cac8f21a3392..6a026f69b563 100644
--- a/scripts/Makefile.headersinst
+++ b/scripts/Makefile.headersinst
@@ -97,7 +97,7 @@ quiet_cmd_unifdef = UNIFDEF $(patsubst $(INSTALL_HDR_PATH)/%,%,$@)
97 | $(HDRSED) > $@ || : 97 | $(HDRSED) > $@ || :
98 98
99quiet_cmd_check = CHECK $(patsubst $(INSTALL_HDR_PATH)/$(_dst)/.check.%,$(_dst)/%,$@) 99quiet_cmd_check = CHECK $(patsubst $(INSTALL_HDR_PATH)/$(_dst)/.check.%,$(_dst)/%,$@)
100 cmd_check = $(srctree)/scripts/hdrcheck.sh \ 100 cmd_check = $(CONFIG_SHELL) $(srctree)/scripts/hdrcheck.sh \
101 $(INSTALL_HDR_PATH)/include $(subst /.check.,/,$@) $@ 101 $(INSTALL_HDR_PATH)/include $(subst /.check.,/,$@) $@
102 102
103quiet_cmd_remove = REMOVE $(_dst)/$@ 103quiet_cmd_remove = REMOVE $(_dst)/$@
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
index 86811792002f..2489bd6bb085 100644
--- a/sound/oss/Makefile
+++ b/sound/oss/Makefile
@@ -15,71 +15,42 @@ obj-$(CONFIG_SOUND_HAL2) += hal2.o
15obj-$(CONFIG_SOUND_AEDSP16) += aedsp16.o 15obj-$(CONFIG_SOUND_AEDSP16) += aedsp16.o
16obj-$(CONFIG_SOUND_PSS) += pss.o ad1848.o mpu401.o 16obj-$(CONFIG_SOUND_PSS) += pss.o ad1848.o mpu401.o
17obj-$(CONFIG_SOUND_TRIX) += trix.o ad1848.o sb_lib.o uart401.o 17obj-$(CONFIG_SOUND_TRIX) += trix.o ad1848.o sb_lib.o uart401.o
18obj-$(CONFIG_SOUND_OPL3SA1) += opl3sa.o ad1848.o uart401.o
19obj-$(CONFIG_SOUND_SSCAPE) += sscape.o ad1848.o mpu401.o 18obj-$(CONFIG_SOUND_SSCAPE) += sscape.o ad1848.o mpu401.o
20obj-$(CONFIG_SOUND_MAD16) += mad16.o ad1848.o sb_lib.o uart401.o
21obj-$(CONFIG_SOUND_CS4232) += cs4232.o uart401.o 19obj-$(CONFIG_SOUND_CS4232) += cs4232.o uart401.o
22obj-$(CONFIG_SOUND_MSS) += ad1848.o 20obj-$(CONFIG_SOUND_MSS) += ad1848.o
23obj-$(CONFIG_SOUND_OPL3SA2) += opl3sa2.o ad1848.o mpu401.o 21obj-$(CONFIG_SOUND_OPL3SA2) += opl3sa2.o ad1848.o mpu401.o
24obj-$(CONFIG_SOUND_PAS) += pas2.o sb.o sb_lib.o uart401.o 22obj-$(CONFIG_SOUND_PAS) += pas2.o sb.o sb_lib.o uart401.o
25obj-$(CONFIG_SOUND_SB) += sb.o sb_lib.o uart401.o 23obj-$(CONFIG_SOUND_SB) += sb.o sb_lib.o uart401.o
26obj-$(CONFIG_SOUND_KAHLUA) += kahlua.o 24obj-$(CONFIG_SOUND_KAHLUA) += kahlua.o
27obj-$(CONFIG_SOUND_WAVEFRONT) += wavefront.o
28obj-$(CONFIG_SOUND_MAUI) += maui.o mpu401.o
29obj-$(CONFIG_SOUND_MPU401) += mpu401.o 25obj-$(CONFIG_SOUND_MPU401) += mpu401.o
30obj-$(CONFIG_SOUND_UART6850) += uart6850.o 26obj-$(CONFIG_SOUND_UART6850) += uart6850.o
31obj-$(CONFIG_SOUND_GUS) += gus.o ad1848.o
32obj-$(CONFIG_SOUND_ADLIB) += adlib_card.o opl3.o 27obj-$(CONFIG_SOUND_ADLIB) += adlib_card.o opl3.o
33obj-$(CONFIG_SOUND_YM3812) += opl3.o 28obj-$(CONFIG_SOUND_YM3812) += opl3.o
34obj-$(CONFIG_SOUND_VMIDI) += v_midi.o 29obj-$(CONFIG_SOUND_VMIDI) += v_midi.o
35obj-$(CONFIG_SOUND_VIDC) += vidc_mod.o 30obj-$(CONFIG_SOUND_VIDC) += vidc_mod.o
36obj-$(CONFIG_SOUND_WAVEARTIST) += waveartist.o 31obj-$(CONFIG_SOUND_WAVEARTIST) += waveartist.o
37obj-$(CONFIG_SOUND_SGALAXY) += sgalaxy.o ad1848.o
38obj-$(CONFIG_SOUND_AD1816) += ad1816.o 32obj-$(CONFIG_SOUND_AD1816) += ad1816.o
39obj-$(CONFIG_SOUND_AD1889) += ad1889.o ac97_codec.o 33obj-$(CONFIG_SOUND_AD1889) += ad1889.o ac97_codec.o
40obj-$(CONFIG_SOUND_ACI_MIXER) += aci.o 34obj-$(CONFIG_SOUND_ACI_MIXER) += aci.o
41obj-$(CONFIG_SOUND_AWE32_SYNTH) += awe_wave.o
42 35
43obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx_audio.o ac97_codec.o 36obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx_audio.o ac97_codec.o
44ifeq ($(CONFIG_MIDI_VIA82CXXX),y) 37ifeq ($(CONFIG_MIDI_VIA82CXXX),y)
45 obj-$(CONFIG_SOUND_VIA82CXXX) += sound.o uart401.o 38 obj-$(CONFIG_SOUND_VIA82CXXX) += sound.o uart401.o
46endif 39endif
47obj-$(CONFIG_SOUND_YMFPCI) += ymfpci.o ac97_codec.o
48ifeq ($(CONFIG_SOUND_YMFPCI_LEGACY),y)
49 obj-$(CONFIG_SOUND_YMFPCI) += opl3.o uart401.o
50endif
51obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o 40obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o
52obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o 41obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o
53obj-$(CONFIG_SOUND_VWSND) += vwsnd.o 42obj-$(CONFIG_SOUND_VWSND) += vwsnd.o
54obj-$(CONFIG_SOUND_NM256) += nm256_audio.o ac97.o 43obj-$(CONFIG_SOUND_NM256) += nm256_audio.o ac97.o
55obj-$(CONFIG_SOUND_ICH) += i810_audio.o ac97_codec.o 44obj-$(CONFIG_SOUND_ICH) += i810_audio.o ac97_codec.o
56obj-$(CONFIG_SOUND_SONICVIBES) += sonicvibes.o
57obj-$(CONFIG_SOUND_CMPCI) += cmpci.o
58ifeq ($(CONFIG_SOUND_CMPCI_FM),y)
59 obj-$(CONFIG_SOUND_CMPCI) += sound.o opl3.o
60endif
61ifeq ($(CONFIG_SOUND_CMPCI_MIDI),y)
62 obj-$(CONFIG_SOUND_CMPCI) += sound.o mpu401.o
63endif
64obj-$(CONFIG_SOUND_ES1370) += es1370.o
65obj-$(CONFIG_SOUND_ES1371) += es1371.o ac97_codec.o 45obj-$(CONFIG_SOUND_ES1371) += es1371.o ac97_codec.o
66obj-$(CONFIG_SOUND_VRC5477) += nec_vrc5477.o ac97_codec.o 46obj-$(CONFIG_SOUND_VRC5477) += nec_vrc5477.o ac97_codec.o
67obj-$(CONFIG_SOUND_AU1000) += au1000.o ac97_codec.o
68obj-$(CONFIG_SOUND_AU1550_AC97) += au1550_ac97.o ac97_codec.o 47obj-$(CONFIG_SOUND_AU1550_AC97) += au1550_ac97.o ac97_codec.o
69obj-$(CONFIG_SOUND_ESSSOLO1) += esssolo1.o
70obj-$(CONFIG_SOUND_FUSION) += cs46xx.o ac97_codec.o 48obj-$(CONFIG_SOUND_FUSION) += cs46xx.o ac97_codec.o
71obj-$(CONFIG_SOUND_MAESTRO) += maestro.o
72obj-$(CONFIG_SOUND_MAESTRO3) += maestro3.o ac97_codec.o
73obj-$(CONFIG_SOUND_TRIDENT) += trident.o ac97_codec.o 49obj-$(CONFIG_SOUND_TRIDENT) += trident.o ac97_codec.o
74obj-$(CONFIG_SOUND_HARMONY) += harmony.o
75obj-$(CONFIG_SOUND_EMU10K1) += ac97_codec.o 50obj-$(CONFIG_SOUND_EMU10K1) += ac97_codec.o
76obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o 51obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o
77obj-$(CONFIG_SOUND_RME96XX) += rme96xx.o
78obj-$(CONFIG_SOUND_BT878) += btaudio.o 52obj-$(CONFIG_SOUND_BT878) += btaudio.o
79obj-$(CONFIG_SOUND_ALI5455) += ali5455.o ac97_codec.o
80obj-$(CONFIG_SOUND_FORTE) += forte.o ac97_codec.o
81 53
82obj-$(CONFIG_SOUND_AD1980) += ac97_plugin_ad1980.o ac97_codec.o
83obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o 54obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o
84 55
85ifeq ($(CONFIG_MIDI_EMU10K1),y) 56ifeq ($(CONFIG_MIDI_EMU10K1),y)
@@ -87,28 +58,25 @@ ifeq ($(CONFIG_MIDI_EMU10K1),y)
87endif 58endif
88 59
89obj-$(CONFIG_SOUND_EMU10K1) += emu10k1/ 60obj-$(CONFIG_SOUND_EMU10K1) += emu10k1/
90obj-$(CONFIG_SOUND_CS4281) += cs4281/
91obj-$(CONFIG_DMASOUND) += dmasound/ 61obj-$(CONFIG_DMASOUND) += dmasound/
92 62
93# Declare multi-part drivers. 63# Declare multi-part drivers.
94 64
95sound-objs := \ 65sound-objs := \
96 dev_table.o soundcard.o sound_syms.o \ 66 dev_table.o soundcard.o \
97 audio.o audio_syms.o dmabuf.o \ 67 audio.o dmabuf.o \
98 midi_syms.o midi_synth.o midibuf.o \ 68 midi_synth.o midibuf.o \
99 sequencer.o sequencer_syms.o sound_timer.o sys_timer.o 69 sequencer.o sound_timer.o sys_timer.o
100 70
101gus-objs := gus_card.o gus_midi.o gus_vol.o gus_wave.o ics2101.o
102pas2-objs := pas2_card.o pas2_midi.o pas2_mixer.o pas2_pcm.o 71pas2-objs := pas2_card.o pas2_midi.o pas2_mixer.o pas2_pcm.o
103sb-objs := sb_card.o 72sb-objs := sb_card.o
104sb_lib-objs := sb_common.o sb_audio.o sb_midi.o sb_mixer.o sb_ess.o 73sb_lib-objs := sb_common.o sb_audio.o sb_midi.o sb_mixer.o sb_ess.o
105vidc_mod-objs := vidc.o vidc_fill.o 74vidc_mod-objs := vidc.o vidc_fill.o
106wavefront-objs := wavfront.o wf_midi.o yss225.o
107 75
108hostprogs-y := bin2hex hex2hex 76hostprogs-y := bin2hex hex2hex
109 77
110# Files generated that shall be removed upon make clean 78# Files generated that shall be removed upon make clean
111clean-files := maui_boot.h msndperm.c msndinit.c pndsperm.c pndspini.c \ 79clean-files := msndperm.c msndinit.c pndsperm.c pndspini.c \
112 pss_boot.h trix_boot.h 80 pss_boot.h trix_boot.h
113 81
114# Firmware files that need translation 82# Firmware files that need translation
@@ -118,21 +86,6 @@ clean-files := maui_boot.h msndperm.c msndinit.c pndsperm.c pndspini.c \
118# will be forced to be remade. 86# will be forced to be remade.
119# 87#
120 88
121# Turtle Beach Maui / Tropez
122
123$(obj)/maui.o: $(obj)/maui_boot.h
124
125ifeq ($(CONFIG_MAUI_HAVE_BOOT),y)
126 $(obj)/maui_boot.h: $(patsubst "%", %, $(CONFIG_MAUI_BOOT_FILE)) $(obj)/bin2hex
127 $(obj)/bin2hex -i maui_os < $< > $@
128else
129 $(obj)/maui_boot.h:
130 ( \
131 echo 'static unsigned char * maui_os = NULL;'; \
132 echo 'static int maui_osLen = 0;'; \
133 ) > $@
134endif
135
136# Turtle Beach MultiSound 89# Turtle Beach MultiSound
137 90
138ifeq ($(CONFIG_MSNDCLAS_HAVE_BOOT),y) 91ifeq ($(CONFIG_MSNDCLAS_HAVE_BOOT),y)
diff --git a/sound/oss/ac97.c b/sound/oss/ac97.c
index 3ba6d91e891d..72cf4ed77937 100644
--- a/sound/oss/ac97.c
+++ b/sound/oss/ac97.c
@@ -112,25 +112,6 @@ ac97_init (struct ac97_hwint *dev)
112 return 0; 112 return 0;
113} 113}
114 114
115/* Reset the mixer to the currently saved settings. */
116int
117ac97_reset (struct ac97_hwint *dev)
118{
119 int x;
120
121 if (dev->reset_device (dev))
122 return -1;
123
124 /* Now set the registers back to their last-written values. */
125 for (x = 0; mixerRegs[x].ac97_regnum != -1; x++) {
126 int regnum = mixerRegs[x].ac97_regnum;
127 int value = dev->last_written_mixer_values [regnum / 2];
128 if (value >= 0)
129 ac97_put_register (dev, regnum, value);
130 }
131 return 0;
132}
133
134/* Return the contents of register REG; use the cache if the value in it 115/* Return the contents of register REG; use the cache if the value in it
135 is valid. Returns a negative error code on failure. */ 116 is valid. Returns a negative error code on failure. */
136static int 117static int
@@ -441,7 +422,6 @@ EXPORT_SYMBOL(ac97_init);
441EXPORT_SYMBOL(ac97_set_values); 422EXPORT_SYMBOL(ac97_set_values);
442EXPORT_SYMBOL(ac97_put_register); 423EXPORT_SYMBOL(ac97_put_register);
443EXPORT_SYMBOL(ac97_mixer_ioctl); 424EXPORT_SYMBOL(ac97_mixer_ioctl);
444EXPORT_SYMBOL(ac97_reset);
445MODULE_LICENSE("GPL"); 425MODULE_LICENSE("GPL");
446 426
447 427
diff --git a/sound/oss/ac97.h b/sound/oss/ac97.h
index 77d454ea3202..01837a9d7d6e 100644
--- a/sound/oss/ac97.h
+++ b/sound/oss/ac97.h
@@ -192,9 +192,6 @@ extern int ac97_put_register (struct ac97_hwint *dev, u8 reg, u16 value);
192extern int ac97_mixer_ioctl (struct ac97_hwint *dev, unsigned int cmd, 192extern int ac97_mixer_ioctl (struct ac97_hwint *dev, unsigned int cmd,
193 void __user * arg); 193 void __user * arg);
194 194
195/* Do a complete reset on the AC97 mixer, restoring all mixer registers to
196 the current values. Normally used after an APM resume event. */
197extern int ac97_reset (struct ac97_hwint *dev);
198#endif 195#endif
199 196
200/* 197/*
diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c
index 972327c97644..602db497929a 100644
--- a/sound/oss/ac97_codec.c
+++ b/sound/oss/ac97_codec.c
@@ -1399,95 +1399,6 @@ unsigned int ac97_set_adc_rate(struct ac97_codec *codec, unsigned int rate)
1399 1399
1400EXPORT_SYMBOL(ac97_set_adc_rate); 1400EXPORT_SYMBOL(ac97_set_adc_rate);
1401 1401
1402int ac97_save_state(struct ac97_codec *codec)
1403{
1404 return 0;
1405}
1406
1407EXPORT_SYMBOL(ac97_save_state);
1408
1409int ac97_restore_state(struct ac97_codec *codec)
1410{
1411 int i;
1412 unsigned int left, right, val;
1413
1414 for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
1415 if (!supported_mixer(codec, i))
1416 continue;
1417
1418 val = codec->mixer_state[i];
1419 right = val >> 8;
1420 left = val & 0xff;
1421 codec->write_mixer(codec, i, left, right);
1422 }
1423 return 0;
1424}
1425
1426EXPORT_SYMBOL(ac97_restore_state);
1427
1428/**
1429 * ac97_register_driver - register a codec helper
1430 * @driver: Driver handler
1431 *
1432 * Register a handler for codecs matching the codec id. The handler
1433 * attach function is called for all present codecs and will be
1434 * called when new codecs are discovered.
1435 */
1436
1437int ac97_register_driver(struct ac97_driver *driver)
1438{
1439 struct list_head *l;
1440 struct ac97_codec *c;
1441
1442 mutex_lock(&codec_mutex);
1443 INIT_LIST_HEAD(&driver->list);
1444 list_add(&driver->list, &codec_drivers);
1445
1446 list_for_each(l, &codecs)
1447 {
1448 c = list_entry(l, struct ac97_codec, list);
1449 if(c->driver != NULL || ((c->model ^ driver->codec_id) & driver->codec_mask))
1450 continue;
1451 if(driver->probe(c, driver))
1452 continue;
1453 c->driver = driver;
1454 }
1455 mutex_unlock(&codec_mutex);
1456 return 0;
1457}
1458
1459EXPORT_SYMBOL_GPL(ac97_register_driver);
1460
1461/**
1462 * ac97_unregister_driver - unregister a codec helper
1463 * @driver: Driver handler
1464 *
1465 * Unregister a handler for codecs matching the codec id. The handler
1466 * remove function is called for all matching codecs.
1467 */
1468
1469void ac97_unregister_driver(struct ac97_driver *driver)
1470{
1471 struct list_head *l;
1472 struct ac97_codec *c;
1473
1474 mutex_lock(&codec_mutex);
1475 list_del_init(&driver->list);
1476
1477 list_for_each(l, &codecs)
1478 {
1479 c = list_entry(l, struct ac97_codec, list);
1480 if (c->driver == driver) {
1481 driver->remove(c, driver);
1482 c->driver = NULL;
1483 }
1484 }
1485
1486 mutex_unlock(&codec_mutex);
1487}
1488
1489EXPORT_SYMBOL_GPL(ac97_unregister_driver);
1490
1491static int swap_headphone(int remove_master) 1402static int swap_headphone(int remove_master)
1492{ 1403{
1493 struct list_head *l; 1404 struct list_head *l;
diff --git a/sound/oss/ac97_plugin_ad1980.c b/sound/oss/ac97_plugin_ad1980.c
deleted file mode 100644
index 0435c43d9704..000000000000
--- a/sound/oss/ac97_plugin_ad1980.c
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 ac97_plugin_ad1980.c Copyright (C) 2003 Red Hat, Inc. All rights reserved.
3
4 The contents of this file are subject to the Open Software License version 1.1
5 that can be found at http://www.opensource.org/licenses/osl-1.1.txt and is
6 included herein by reference.
7
8 Alternatively, the contents of this file may be used under the
9 terms of the GNU General Public License version 2 (the "GPL") as
10 distributed in the kernel source COPYING file, in which
11 case the provisions of the GPL are applicable instead of the
12 above. If you wish to allow the use of your version of this file
13 only under the terms of the GPL and not to allow others to use
14 your version of this file under the OSL, indicate your decision
15 by deleting the provisions above and replace them with the notice
16 and other provisions required by the GPL. If you do not delete
17 the provisions above, a recipient may use your version of this
18 file under either the OSL or the GPL.
19
20 Authors: Alan Cox <alan@redhat.com>
21
22 This is an example codec plugin. This one switches the connections
23 around to match the setups some vendors use with audio switched to
24 non standard front connectors not the normal rear ones
25
26 This code primarily exists to demonstrate how to use the codec
27 interface
28
29*/
30
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/kernel.h>
34#include <linux/ac97_codec.h>
35
36/**
37 * ad1980_remove - codec remove callback
38 * @codec: The codec that is being removed
39 *
40 * This callback occurs when an AC97 codec is being removed. A
41 * codec remove call will not occur for a codec during that codec
42 * probe callback.
43 *
44 * Most drivers will need to lock their remove versus their
45 * use of the codec after the probe function.
46 */
47
48static void __devexit ad1980_remove(struct ac97_codec *codec, struct ac97_driver *driver)
49{
50 /* Nothing to do in the simple example */
51}
52
53
54/**
55 * ad1980_probe - codec found callback
56 * @codec: ac97 codec matching the idents
57 * @driver: ac97_driver it matched
58 *
59 * This entry point is called when a codec is found which matches
60 * the driver. At the point it is called the codec is basically
61 * operational, mixer operations have been initialised and can
62 * be overriden. Called in process context. The field driver_private
63 * is available for the driver to use to store stuff.
64 *
65 * The caller can claim the device by returning zero, or return
66 * a negative error code.
67 */
68
69static int ad1980_probe(struct ac97_codec *codec, struct ac97_driver *driver)
70{
71 u16 control;
72
73#define AC97_AD_MISC 0x76
74
75 /* Switch the inputs/outputs over (from Dell code) */
76 control = codec->codec_read(codec, AC97_AD_MISC);
77 codec->codec_write(codec, AC97_AD_MISC, control | 0x4420);
78
79 /* We could refuse the device since we dont need to hang around,
80 but we will claim it */
81 return 0;
82}
83
84
85static struct ac97_driver ad1980_driver = {
86 .codec_id = 0x41445370,
87 .codec_mask = 0xFFFFFFFF,
88 .name = "AD1980 example",
89 .probe = ad1980_probe,
90 .remove = __devexit_p(ad1980_remove),
91};
92
93/**
94 * ad1980_exit - module exit path
95 *
96 * Our module is being unloaded. At this point unregister_driver
97 * will call back our remove handler for any existing codecs. You
98 * may not unregister_driver from interrupt context or from a
99 * probe/remove callback.
100 */
101
102static void ad1980_exit(void)
103{
104 ac97_unregister_driver(&ad1980_driver);
105}
106
107/**
108 * ad1980_init - set up ad1980 handlers
109 *
110 * After we call the register function it will call our probe
111 * function for each existing matching device before returning to us.
112 * Any devices appearing afterwards whose id's match the codec_id
113 * will also cause the probe function to be called.
114 * You may not register_driver from interrupt context or from a
115 * probe/remove callback.
116 */
117
118static int ad1980_init(void)
119{
120 return ac97_register_driver(&ad1980_driver);
121}
122
123module_init(ad1980_init);
124module_exit(ad1980_exit);
125MODULE_LICENSE("GPL");
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c
index f6b6b886c2ad..257b7536fb18 100644
--- a/sound/oss/ad1848.c
+++ b/sound/oss/ad1848.c
@@ -195,6 +195,7 @@ static void ad1848_halt(int dev);
195static void ad1848_halt_input(int dev); 195static void ad1848_halt_input(int dev);
196static void ad1848_halt_output(int dev); 196static void ad1848_halt_output(int dev);
197static void ad1848_trigger(int dev, int bits); 197static void ad1848_trigger(int dev, int bits);
198static irqreturn_t adintr(int irq, void *dev_id, struct pt_regs *dummy);
198 199
199#ifndef EXCLUDE_TIMERS 200#ifndef EXCLUDE_TIMERS
200static int ad1848_tmr_install(int dev); 201static int ad1848_tmr_install(int dev);
@@ -2195,7 +2196,7 @@ void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int
2195 printk(KERN_ERR "ad1848: Can't find device to be unloaded. Base=%x\n", io_base); 2196 printk(KERN_ERR "ad1848: Can't find device to be unloaded. Base=%x\n", io_base);
2196} 2197}
2197 2198
2198irqreturn_t adintr(int irq, void *dev_id, struct pt_regs *dummy) 2199static irqreturn_t adintr(int irq, void *dev_id, struct pt_regs *dummy)
2199{ 2200{
2200 unsigned char status; 2201 unsigned char status;
2201 ad1848_info *devc; 2202 ad1848_info *devc;
@@ -2802,7 +2803,6 @@ EXPORT_SYMBOL(ad1848_detect);
2802EXPORT_SYMBOL(ad1848_init); 2803EXPORT_SYMBOL(ad1848_init);
2803EXPORT_SYMBOL(ad1848_unload); 2804EXPORT_SYMBOL(ad1848_unload);
2804EXPORT_SYMBOL(ad1848_control); 2805EXPORT_SYMBOL(ad1848_control);
2805EXPORT_SYMBOL(adintr);
2806EXPORT_SYMBOL(probe_ms_sound); 2806EXPORT_SYMBOL(probe_ms_sound);
2807EXPORT_SYMBOL(attach_ms_sound); 2807EXPORT_SYMBOL(attach_ms_sound);
2808EXPORT_SYMBOL(unload_ms_sound); 2808EXPORT_SYMBOL(unload_ms_sound);
diff --git a/sound/oss/ad1848.h b/sound/oss/ad1848.h
index d0573b023973..b95ebe28d426 100644
--- a/sound/oss/ad1848.h
+++ b/sound/oss/ad1848.h
@@ -18,7 +18,6 @@ void ad1848_unload (int io_base, int irq, int dma_playback, int dma_capture, int
18int ad1848_detect (struct resource *ports, int *flags, int *osp); 18int ad1848_detect (struct resource *ports, int *flags, int *osp);
19int ad1848_control(int cmd, int arg); 19int ad1848_control(int cmd, int arg);
20 20
21irqreturn_t adintr(int irq, void *dev_id, struct pt_regs * dummy);
22void attach_ms_sound(struct address_info * hw_config, struct resource *ports, struct module * owner); 21void attach_ms_sound(struct address_info * hw_config, struct resource *ports, struct module * owner);
23 22
24int probe_ms_sound(struct address_info *hw_config, struct resource *ports); 23int probe_ms_sound(struct address_info *hw_config, struct resource *ports);
diff --git a/sound/oss/ali5455.c b/sound/oss/ali5455.c
deleted file mode 100644
index 70dcd703a66f..000000000000
--- a/sound/oss/ali5455.c
+++ /dev/null
@@ -1,3735 +0,0 @@
1/*
2 * ALI ali5455 and friends ICH driver for Linux
3 * LEI HU <Lei_Hu@ali.com.tw>
4 *
5 * Built from:
6 * drivers/sound/i810_audio
7 *
8 * The ALi 5455 is similar but not quite identical to the Intel ICH
9 * series of controllers. Its easier to keep the driver separated from
10 * the i810 driver.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * ALi 5455 theory of operation
28 *
29 * The chipset provides three DMA channels that talk to an AC97
30 * CODEC (AC97 is a digital/analog mixer standard). At its simplest
31 * you get 48Khz audio with basic volume and mixer controls. At the
32 * best you get rate adaption in the codec. We set the card up so
33 * that we never take completion interrupts but instead keep the card
34 * chasing its tail around a ring buffer. This is needed for mmap
35 * mode audio and happens to work rather well for non-mmap modes too.
36 *
37 * The board has one output channel for PCM audio (supported) and
38 * a stereo line in and mono microphone input. Again these are normally
39 * locked to 48Khz only. Right now recording is not finished.
40 *
41 * There is no midi support, no synth support. Use timidity. To get
42 * esd working you need to use esd -r 48000 as it won't probe 48KHz
43 * by default. mpg123 can't handle 48Khz only audio so use xmms.
44 *
45 * If you need to force a specific rate set the clocking= option
46 *
47 */
48
49#include <linux/module.h>
50#include <linux/string.h>
51#include <linux/ctype.h>
52#include <linux/ioport.h>
53#include <linux/sched.h>
54#include <linux/delay.h>
55#include <linux/sound.h>
56#include <linux/slab.h>
57#include <linux/soundcard.h>
58#include <linux/pci.h>
59#include <asm/io.h>
60#include <asm/dma.h>
61#include <linux/init.h>
62#include <linux/poll.h>
63#include <linux/spinlock.h>
64#include <linux/smp_lock.h>
65#include <linux/ac97_codec.h>
66#include <linux/interrupt.h>
67#include <linux/mutex.h>
68
69#include <asm/uaccess.h>
70
71#ifndef PCI_DEVICE_ID_ALI_5455
72#define PCI_DEVICE_ID_ALI_5455 0x5455
73#endif
74
75#ifndef PCI_VENDOR_ID_ALI
76#define PCI_VENDOR_ID_ALI 0x10b9
77#endif
78
79static int strict_clocking = 0;
80static unsigned int clocking = 0;
81static unsigned int codec_pcmout_share_spdif_locked = 0;
82static unsigned int codec_independent_spdif_locked = 0;
83static unsigned int controller_pcmout_share_spdif_locked = 0;
84static unsigned int controller_independent_spdif_locked = 0;
85static unsigned int globel = 0;
86
87#define ADC_RUNNING 1
88#define DAC_RUNNING 2
89#define CODEC_SPDIFOUT_RUNNING 8
90#define CONTROLLER_SPDIFOUT_RUNNING 4
91
92#define SPDIF_ENABLE_OUTPUT 4 /* bits 0,1 are PCM */
93
94#define ALI5455_FMT_16BIT 1
95#define ALI5455_FMT_STEREO 2
96#define ALI5455_FMT_MASK 3
97
98#define SPDIF_ON 0x0004
99#define SURR_ON 0x0010
100#define CENTER_LFE_ON 0x0020
101#define VOL_MUTED 0x8000
102
103
104#define ALI_SPDIF_OUT_CH_STATUS 0xbf
105/* the 810's array of pointers to data buffers */
106
107struct sg_item {
108#define BUSADDR_MASK 0xFFFFFFFE
109 u32 busaddr;
110#define CON_IOC 0x80000000 /* interrupt on completion */
111#define CON_BUFPAD 0x40000000 /* pad underrun with last sample, else 0 */
112#define CON_BUFLEN_MASK 0x0000ffff /* buffer length in samples */
113 u32 control;
114};
115
116/* an instance of the ali channel */
117#define SG_LEN 32
118struct ali_channel {
119 /* these sg guys should probably be allocated
120 separately as nocache. Must be 8 byte aligned */
121 struct sg_item sg[SG_LEN]; /* 32*8 */
122 u32 offset; /* 4 */
123 u32 port; /* 4 */
124 u32 used;
125 u32 num;
126};
127
128/*
129 * we have 3 separate dma engines. pcm in, pcm out, and mic.
130 * each dma engine has controlling registers. These goofy
131 * names are from the datasheet, but make it easy to write
132 * code while leafing through it.
133 */
134
135#define ENUM_ENGINE(PRE,DIG) \
136enum { \
137 PRE##_BDBAR = 0x##DIG##0, /* Buffer Descriptor list Base Address */ \
138 PRE##_CIV = 0x##DIG##4, /* Current Index Value */ \
139 PRE##_LVI = 0x##DIG##5, /* Last Valid Index */ \
140 PRE##_SR = 0x##DIG##6, /* Status Register */ \
141 PRE##_PICB = 0x##DIG##8, /* Position In Current Buffer */ \
142 PRE##_CR = 0x##DIG##b /* Control Register */ \
143}
144
145ENUM_ENGINE(OFF, 0); /* Offsets */
146ENUM_ENGINE(PI, 4); /* PCM In */
147ENUM_ENGINE(PO, 5); /* PCM Out */
148ENUM_ENGINE(MC, 6); /* Mic In */
149ENUM_ENGINE(CODECSPDIFOUT, 7); /* CODEC SPDIF OUT */
150ENUM_ENGINE(CONTROLLERSPDIFIN, A); /* CONTROLLER SPDIF In */
151ENUM_ENGINE(CONTROLLERSPDIFOUT, B); /* CONTROLLER SPDIF OUT */
152
153
154enum {
155 ALI_SCR = 0x00, /* System Control Register */
156 ALI_SSR = 0x04, /* System Status Register */
157 ALI_DMACR = 0x08, /* DMA Control Register */
158 ALI_FIFOCR1 = 0x0c, /* FIFO Control Register 1 */
159 ALI_INTERFACECR = 0x10, /* Interface Control Register */
160 ALI_INTERRUPTCR = 0x14, /* Interrupt control Register */
161 ALI_INTERRUPTSR = 0x18, /* Interrupt Status Register */
162 ALI_FIFOCR2 = 0x1c, /* FIFO Control Register 2 */
163 ALI_CPR = 0x20, /* Command Port Register */
164 ALI_SPR = 0x24, /* Status Port Register */
165 ALI_FIFOCR3 = 0x2c, /* FIFO Control Register 3 */
166 ALI_TTSR = 0x30, /* Transmit Tag Slot Register */
167 ALI_RTSR = 0x34, /* Receive Tag Slot Register */
168 ALI_CSPSR = 0x38, /* Command/Status Port Status Register */
169 ALI_CAS = 0x3c, /* Codec Write Semaphore Register */
170 ALI_SPDIFCSR = 0xf8, /* spdif channel status register */
171 ALI_SPDIFICS = 0xfc /* spdif interface control/status */
172};
173
174// x-status register(x:pcm in ,pcm out, mic in,)
175/* interrupts for a dma engine */
176#define DMA_INT_FIFO (1<<4) /* fifo under/over flow */
177#define DMA_INT_COMPLETE (1<<3) /* buffer read/write complete and ioc set */
178#define DMA_INT_LVI (1<<2) /* last valid done */
179#define DMA_INT_CELV (1<<1) /* last valid is current */
180#define DMA_INT_DCH (1) /* DMA Controller Halted (happens on LVI interrupts) */ //not eqult intel
181#define DMA_INT_MASK (DMA_INT_FIFO|DMA_INT_COMPLETE|DMA_INT_LVI)
182
183/* interrupts for the whole chip */// by interrupt status register finish
184
185#define INT_SPDIFOUT (1<<23) /* controller spdif out INTERRUPT */
186#define INT_SPDIFIN (1<<22)
187#define INT_CODECSPDIFOUT (1<<19)
188#define INT_MICIN (1<<18)
189#define INT_PCMOUT (1<<17)
190#define INT_PCMIN (1<<16)
191#define INT_CPRAIS (1<<7)
192#define INT_SPRAIS (1<<5)
193#define INT_GPIO (1<<1)
194#define INT_MASK (INT_SPDIFOUT|INT_CODECSPDIFOUT|INT_MICIN|INT_PCMOUT|INT_PCMIN)
195
196#define DRIVER_VERSION "0.02ac"
197
198/* magic numbers to protect our data structures */
199#define ALI5455_CARD_MAGIC 0x5072696E /* "Prin" */
200#define ALI5455_STATE_MAGIC 0x63657373 /* "cess" */
201#define ALI5455_DMA_MASK 0xffffffff /* DMA buffer mask for pci_alloc_consist */
202#define NR_HW_CH 5 //I think 5 channel
203
204/* maxinum number of AC97 codecs connected, AC97 2.0 defined 4 */
205#define NR_AC97 2
206
207/* Please note that an 8bit mono stream is not valid on this card, you must have a 16bit */
208/* stream at a minimum for this card to be happy */
209static const unsigned sample_size[] = { 1, 2, 2, 4 };
210/* Samples are 16bit values, so we are shifting to a word, not to a byte, hence shift */
211/* values are one less than might be expected */
212static const unsigned sample_shift[] = { -1, 0, 0, 1 };
213
214#define ALI5455
215static char *card_names[] = {
216 "ALI 5455"
217};
218
219static struct pci_device_id ali_pci_tbl[] = {
220 {PCI_VENDOR_ID_ALI, PCI_DEVICE_ID_ALI_5455,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, ALI5455},
222 {0,}
223};
224
225MODULE_DEVICE_TABLE(pci, ali_pci_tbl);
226
227#ifdef CONFIG_PM
228#define PM_SUSPENDED(card) (card->pm_suspended)
229#else
230#define PM_SUSPENDED(card) (0)
231#endif
232
233/* "software" or virtual channel, an instance of opened /dev/dsp */
234struct ali_state {
235 unsigned int magic;
236 struct ali_card *card; /* Card info */
237
238 /* single open lock mechanism, only used for recording */
239 struct mutex open_mutex;
240 wait_queue_head_t open_wait;
241
242 /* file mode */
243 mode_t open_mode;
244
245 /* virtual channel number */
246 int virt;
247
248#ifdef CONFIG_PM
249 unsigned int pm_saved_dac_rate, pm_saved_adc_rate;
250#endif
251 struct dmabuf {
252 /* wave sample stuff */
253 unsigned int rate;
254 unsigned char fmt, enable, trigger;
255
256 /* hardware channel */
257 struct ali_channel *read_channel;
258 struct ali_channel *write_channel;
259 struct ali_channel *codec_spdifout_channel;
260 struct ali_channel *controller_spdifout_channel;
261
262 /* OSS buffer management stuff */
263 void *rawbuf;
264 dma_addr_t dma_handle;
265 unsigned buforder;
266 unsigned numfrag;
267 unsigned fragshift;
268
269 /* our buffer acts like a circular ring */
270 unsigned hwptr; /* where dma last started, updated by update_ptr */
271 unsigned swptr; /* where driver last clear/filled, updated by read/write */
272 int count; /* bytes to be consumed or been generated by dma machine */
273 unsigned total_bytes; /* total bytes dmaed by hardware */
274
275 unsigned error; /* number of over/underruns */
276 wait_queue_head_t wait; /* put process on wait queue when no more space in buffer */
277
278 /* redundant, but makes calculations easier */
279 /* what the hardware uses */
280 unsigned dmasize;
281 unsigned fragsize;
282 unsigned fragsamples;
283
284 /* what we tell the user to expect */
285 unsigned userfrags;
286 unsigned userfragsize;
287
288 /* OSS stuff */
289 unsigned mapped:1;
290 unsigned ready:1;
291 unsigned update_flag;
292 unsigned ossfragsize;
293 unsigned ossmaxfrags;
294 unsigned subdivision;
295 } dmabuf;
296};
297
298
299struct ali_card {
300 struct ali_channel channel[5];
301 unsigned int magic;
302
303 /* We keep ali5455 cards in a linked list */
304 struct ali_card *next;
305
306 /* The ali has a certain amount of cross channel interaction
307 so we use a single per card lock */
308 spinlock_t lock;
309 spinlock_t ac97_lock;
310
311 /* PCI device stuff */
312 struct pci_dev *pci_dev;
313 u16 pci_id;
314#ifdef CONFIG_PM
315 u16 pm_suspended;
316 int pm_saved_mixer_settings[SOUND_MIXER_NRDEVICES][NR_AC97];
317#endif
318 /* soundcore stuff */
319 int dev_audio;
320
321 /* structures for abstraction of hardware facilities, codecs, banks and channels */
322 struct ac97_codec *ac97_codec[NR_AC97];
323 struct ali_state *states[NR_HW_CH];
324
325 u16 ac97_features;
326 u16 ac97_status;
327 u16 channels;
328
329 /* hardware resources */
330 unsigned long iobase;
331
332 u32 irq;
333
334 /* Function support */
335 struct ali_channel *(*alloc_pcm_channel) (struct ali_card *);
336 struct ali_channel *(*alloc_rec_pcm_channel) (struct ali_card *);
337 struct ali_channel *(*alloc_rec_mic_channel) (struct ali_card *);
338 struct ali_channel *(*alloc_codec_spdifout_channel) (struct ali_card *);
339 struct ali_channel *(*alloc_controller_spdifout_channel) (struct ali_card *);
340 void (*free_pcm_channel) (struct ali_card *, int chan);
341
342 /* We have a *very* long init time possibly, so use this to block */
343 /* attempts to open our devices before we are ready (stops oops'es) */
344 int initializing;
345};
346
347
348static struct ali_card *devs = NULL;
349
350static int ali_open_mixdev(struct inode *inode, struct file *file);
351static int ali_ioctl_mixdev(struct inode *inode, struct file *file,
352 unsigned int cmd, unsigned long arg);
353static u16 ali_ac97_get(struct ac97_codec *dev, u8 reg);
354static void ali_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
355
356static struct ali_channel *ali_alloc_pcm_channel(struct ali_card *card)
357{
358 if (card->channel[1].used == 1)
359 return NULL;
360 card->channel[1].used = 1;
361 return &card->channel[1];
362}
363
364static struct ali_channel *ali_alloc_rec_pcm_channel(struct ali_card *card)
365{
366 if (card->channel[0].used == 1)
367 return NULL;
368 card->channel[0].used = 1;
369 return &card->channel[0];
370}
371
372static struct ali_channel *ali_alloc_rec_mic_channel(struct ali_card *card)
373{
374 if (card->channel[2].used == 1)
375 return NULL;
376 card->channel[2].used = 1;
377 return &card->channel[2];
378}
379
380static struct ali_channel *ali_alloc_codec_spdifout_channel(struct ali_card *card)
381{
382 if (card->channel[3].used == 1)
383 return NULL;
384 card->channel[3].used = 1;
385 return &card->channel[3];
386}
387
388static struct ali_channel *ali_alloc_controller_spdifout_channel(struct ali_card *card)
389{
390 if (card->channel[4].used == 1)
391 return NULL;
392 card->channel[4].used = 1;
393 return &card->channel[4];
394}
395static void ali_free_pcm_channel(struct ali_card *card, int channel)
396{
397 card->channel[channel].used = 0;
398}
399
400
401//add support codec spdif out
402static int ali_valid_spdif_rate(struct ac97_codec *codec, int rate)
403{
404 unsigned long id = 0L;
405
406 id = (ali_ac97_get(codec, AC97_VENDOR_ID1) << 16);
407 id |= ali_ac97_get(codec, AC97_VENDOR_ID2) & 0xffff;
408 switch (id) {
409 case 0x41445361: /* AD1886 */
410 if (rate == 48000) {
411 return 1;
412 }
413 break;
414 case 0x414c4720: /* ALC650 */
415 if (rate == 48000) {
416 return 1;
417 }
418 break;
419 default: /* all other codecs, until we know otherwiae */
420 if (rate == 48000 || rate == 44100 || rate == 32000) {
421 return 1;
422 }
423 break;
424 }
425 return (0);
426}
427
428/* ali_set_spdif_output
429 *
430 * Configure the S/PDIF output transmitter. When we turn on
431 * S/PDIF, we turn off the analog output. This may not be
432 * the right thing to do.
433 *
434 * Assumptions:
435 * The DSP sample rate must already be set to a supported
436 * S/PDIF rate (32kHz, 44.1kHz, or 48kHz) or we abort.
437 */
438static void ali_set_spdif_output(struct ali_state *state, int slots,
439 int rate)
440{
441 int vol;
442 int aud_reg;
443 struct ac97_codec *codec = state->card->ac97_codec[0];
444
445 if (!(state->card->ac97_features & 4)) {
446 state->card->ac97_status &= ~SPDIF_ON;
447 } else {
448 if (slots == -1) { /* Turn off S/PDIF */
449 aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS);
450 ali_ac97_set(codec, AC97_EXTENDED_STATUS, (aud_reg & ~AC97_EA_SPDIF));
451
452 /* If the volume wasn't muted before we turned on S/PDIF, unmute it */
453 if (!(state->card->ac97_status & VOL_MUTED)) {
454 aud_reg = ali_ac97_get(codec, AC97_MASTER_VOL_STEREO);
455 ali_ac97_set(codec, AC97_MASTER_VOL_STEREO,
456 (aud_reg & ~VOL_MUTED));
457 }
458 state->card->ac97_status &= ~(VOL_MUTED | SPDIF_ON);
459 return;
460 }
461
462 vol = ali_ac97_get(codec, AC97_MASTER_VOL_STEREO);
463 state->card->ac97_status = vol & VOL_MUTED;
464
465 /* Set S/PDIF transmitter sample rate */
466 aud_reg = ali_ac97_get(codec, AC97_SPDIF_CONTROL);
467 switch (rate) {
468 case 32000:
469 aud_reg = (aud_reg & AC97_SC_SPSR_MASK) | AC97_SC_SPSR_32K;
470 break;
471 case 44100:
472 aud_reg = (aud_reg & AC97_SC_SPSR_MASK) | AC97_SC_SPSR_44K;
473 break;
474 case 48000:
475 aud_reg = (aud_reg & AC97_SC_SPSR_MASK) | AC97_SC_SPSR_48K;
476 break;
477 default:
478 /* turn off S/PDIF */
479 aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS);
480 ali_ac97_set(codec, AC97_EXTENDED_STATUS, (aud_reg & ~AC97_EA_SPDIF));
481 state->card->ac97_status &= ~SPDIF_ON;
482 return;
483 }
484
485 ali_ac97_set(codec, AC97_SPDIF_CONTROL, aud_reg);
486
487 aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS);
488 aud_reg = (aud_reg & AC97_EA_SLOT_MASK) | slots | AC97_EA_SPDIF;
489 ali_ac97_set(codec, AC97_EXTENDED_STATUS, aud_reg);
490
491 aud_reg = ali_ac97_get(codec, AC97_POWER_CONTROL);
492 aud_reg |= 0x0002;
493 ali_ac97_set(codec, AC97_POWER_CONTROL, aud_reg);
494 udelay(1);
495
496 state->card->ac97_status |= SPDIF_ON;
497
498 /* Check to make sure the configuration is valid */
499 aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS);
500 if (!(aud_reg & 0x0400)) {
501 /* turn off S/PDIF */
502 ali_ac97_set(codec, AC97_EXTENDED_STATUS, (aud_reg & ~AC97_EA_SPDIF));
503 state->card->ac97_status &= ~SPDIF_ON;
504 return;
505 }
506 if (codec_independent_spdif_locked > 0) {
507 aud_reg = ali_ac97_get(codec, 0x6a);
508 ali_ac97_set(codec, 0x6a, (aud_reg & 0xefff));
509 }
510 /* Mute the analog output */
511 /* Should this only mute the PCM volume??? */
512 }
513}
514
515/* ali_set_dac_channels
516 *
517 * Configure the codec's multi-channel DACs
518 *
519 * The logic is backwards. Setting the bit to 1 turns off the DAC.
520 *
521 * What about the ICH? We currently configure it using the
522 * SNDCTL_DSP_CHANNELS ioctl. If we're turnning on the DAC,
523 * does that imply that we want the ICH set to support
524 * these channels?
525 *
526 * TODO:
527 * vailidate that the codec really supports these DACs
528 * before turning them on.
529 */
530static void ali_set_dac_channels(struct ali_state *state, int channel)
531{
532 int aud_reg;
533 struct ac97_codec *codec = state->card->ac97_codec[0];
534
535 aud_reg = ali_ac97_get(codec, AC97_EXTENDED_STATUS);
536 aud_reg |= AC97_EA_PRI | AC97_EA_PRJ | AC97_EA_PRK;
537 state->card->ac97_status &= ~(SURR_ON | CENTER_LFE_ON);
538
539 switch (channel) {
540 case 2: /* always enabled */
541 break;
542 case 4:
543 aud_reg &= ~AC97_EA_PRJ;
544 state->card->ac97_status |= SURR_ON;
545 break;
546 case 6:
547 aud_reg &= ~(AC97_EA_PRJ | AC97_EA_PRI | AC97_EA_PRK);
548 state->card->ac97_status |= SURR_ON | CENTER_LFE_ON;
549 break;
550 default:
551 break;
552 }
553 ali_ac97_set(codec, AC97_EXTENDED_STATUS, aud_reg);
554
555}
556
557/* set playback sample rate */
558static unsigned int ali_set_dac_rate(struct ali_state *state,
559 unsigned int rate)
560{
561 struct dmabuf *dmabuf = &state->dmabuf;
562 u32 new_rate;
563 struct ac97_codec *codec = state->card->ac97_codec[0];
564
565 if (!(state->card->ac97_features & 0x0001)) {
566 dmabuf->rate = clocking;
567 return clocking;
568 }
569
570 if (rate > 48000)
571 rate = 48000;
572 if (rate < 8000)
573 rate = 8000;
574 dmabuf->rate = rate;
575
576 /*
577 * Adjust for misclocked crap
578 */
579
580 rate = (rate * clocking) / 48000;
581
582 if (strict_clocking && rate < 8000) {
583 rate = 8000;
584 dmabuf->rate = (rate * 48000) / clocking;
585 }
586
587 new_rate = ac97_set_dac_rate(codec, rate);
588 if (new_rate != rate) {
589 dmabuf->rate = (new_rate * 48000) / clocking;
590 }
591 rate = new_rate;
592 return dmabuf->rate;
593}
594
595/* set recording sample rate */
596static unsigned int ali_set_adc_rate(struct ali_state *state,
597 unsigned int rate)
598{
599 struct dmabuf *dmabuf = &state->dmabuf;
600 u32 new_rate;
601 struct ac97_codec *codec = state->card->ac97_codec[0];
602
603 if (!(state->card->ac97_features & 0x0001)) {
604 dmabuf->rate = clocking;
605 return clocking;
606 }
607
608 if (rate > 48000)
609 rate = 48000;
610 if (rate < 8000)
611 rate = 8000;
612 dmabuf->rate = rate;
613
614 /*
615 * Adjust for misclocked crap
616 */
617
618 rate = (rate * clocking) / 48000;
619 if (strict_clocking && rate < 8000) {
620 rate = 8000;
621 dmabuf->rate = (rate * 48000) / clocking;
622 }
623
624 new_rate = ac97_set_adc_rate(codec, rate);
625
626 if (new_rate != rate) {
627 dmabuf->rate = (new_rate * 48000) / clocking;
628 rate = new_rate;
629 }
630 return dmabuf->rate;
631}
632
633/* set codec independent spdifout sample rate */
634static unsigned int ali_set_codecspdifout_rate(struct ali_state *state,
635 unsigned int rate)
636{
637 struct dmabuf *dmabuf = &state->dmabuf;
638
639 if (!(state->card->ac97_features & 0x0001)) {
640 dmabuf->rate = clocking;
641 return clocking;
642 }
643
644 if (rate > 48000)
645 rate = 48000;
646 if (rate < 8000)
647 rate = 8000;
648 dmabuf->rate = rate;
649
650 return dmabuf->rate;
651}
652
653/* set controller independent spdif out function sample rate */
654static void ali_set_spdifout_rate(struct ali_state *state,
655 unsigned int rate)
656{
657 unsigned char ch_st_sel;
658 unsigned short status_rate;
659
660 switch (rate) {
661 case 44100:
662 status_rate = 0;
663 break;
664 case 32000:
665 status_rate = 0x300;
666 break;
667 case 48000:
668 default:
669 status_rate = 0x200;
670 break;
671 }
672
673 ch_st_sel = inb(state->card->iobase + ALI_SPDIFICS) & ALI_SPDIF_OUT_CH_STATUS; //select spdif_out
674
675 ch_st_sel |= 0x80; //select right
676 outb(ch_st_sel, (state->card->iobase + ALI_SPDIFICS));
677 outb(status_rate | 0x20, (state->card->iobase + ALI_SPDIFCSR + 2));
678
679 ch_st_sel &= (~0x80); //select left
680 outb(ch_st_sel, (state->card->iobase + ALI_SPDIFICS));
681 outw(status_rate | 0x10, (state->card->iobase + ALI_SPDIFCSR + 2));
682}
683
684/* get current playback/recording dma buffer pointer (byte offset from LBA),
685 called with spinlock held! */
686
687static inline unsigned ali_get_dma_addr(struct ali_state *state, int rec)
688{
689 struct dmabuf *dmabuf = &state->dmabuf;
690 unsigned int civ, offset, port, port_picb;
691 unsigned int data;
692
693 if (!dmabuf->enable)
694 return 0;
695
696 if (rec == 1)
697 port = state->card->iobase + dmabuf->read_channel->port;
698 else if (rec == 2)
699 port = state->card->iobase + dmabuf->codec_spdifout_channel->port;
700 else if (rec == 3)
701 port = state->card->iobase + dmabuf->controller_spdifout_channel->port;
702 else
703 port = state->card->iobase + dmabuf->write_channel->port;
704
705 port_picb = port + OFF_PICB;
706
707 do {
708 civ = inb(port + OFF_CIV) & 31;
709 offset = inw(port_picb);
710 /* Must have a delay here! */
711 if (offset == 0)
712 udelay(1);
713
714 /* Reread both registers and make sure that that total
715 * offset from the first reading to the second is 0.
716 * There is an issue with SiS hardware where it will count
717 * picb down to 0, then update civ to the next value,
718 * then set the new picb to fragsize bytes. We can catch
719 * it between the civ update and the picb update, making
720 * it look as though we are 1 fragsize ahead of where we
721 * are. The next to we get the address though, it will
722 * be back in thdelay is more than long enough
723 * that we won't have to worry about the chip still being
724 * out of sync with reality ;-)
725 */
726 } while (civ != (inb(port + OFF_CIV) & 31) || offset != inw(port_picb));
727
728 data = ((civ + 1) * dmabuf->fragsize - (2 * offset)) % dmabuf->dmasize;
729 if (inw(port_picb) == 0)
730 data -= 2048;
731
732 return data;
733}
734
735/* Stop recording (lock held) */
736static inline void __stop_adc(struct ali_state *state)
737{
738 struct dmabuf *dmabuf = &state->dmabuf;
739 struct ali_card *card = state->card;
740
741 dmabuf->enable &= ~ADC_RUNNING;
742
743 outl((1 << 18) | (1 << 16), card->iobase + ALI_DMACR);
744 udelay(1);
745
746 outb(0, card->iobase + PI_CR);
747 while (inb(card->iobase + PI_CR) != 0);
748
749 // now clear any latent interrupt bits (like the halt bit)
750 outb(inb(card->iobase + PI_SR) | 0x001e, card->iobase + PI_SR);
751 outl(inl(card->iobase + ALI_INTERRUPTSR) & INT_PCMIN, card->iobase + ALI_INTERRUPTSR);
752}
753
754static void stop_adc(struct ali_state *state)
755{
756 struct ali_card *card = state->card;
757 unsigned long flags;
758 spin_lock_irqsave(&card->lock, flags);
759 __stop_adc(state);
760 spin_unlock_irqrestore(&card->lock, flags);
761}
762
763static inline void __start_adc(struct ali_state *state)
764{
765 struct dmabuf *dmabuf = &state->dmabuf;
766
767 if (dmabuf->count < dmabuf->dmasize && dmabuf->ready
768 && !dmabuf->enable && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
769 dmabuf->enable |= ADC_RUNNING;
770 outb((1 << 4) | (1 << 2), state->card->iobase + PI_CR);
771 if (state->card->channel[0].used == 1)
772 outl(1, state->card->iobase + ALI_DMACR); // DMA CONTROL REGISTRER
773 udelay(100);
774 if (state->card->channel[2].used == 1)
775 outl((1 << 2), state->card->iobase + ALI_DMACR); //DMA CONTROL REGISTER
776 udelay(100);
777 }
778}
779
780static void start_adc(struct ali_state *state)
781{
782 struct ali_card *card = state->card;
783 unsigned long flags;
784
785 spin_lock_irqsave(&card->lock, flags);
786 __start_adc(state);
787 spin_unlock_irqrestore(&card->lock, flags);
788}
789
790/* stop playback (lock held) */
791static inline void __stop_dac(struct ali_state *state)
792{
793 struct dmabuf *dmabuf = &state->dmabuf;
794 struct ali_card *card = state->card;
795
796 dmabuf->enable &= ~DAC_RUNNING;
797 outl(0x00020000, card->iobase + 0x08);
798 outb(0, card->iobase + PO_CR);
799 while (inb(card->iobase + PO_CR) != 0)
800 cpu_relax();
801
802 outb(inb(card->iobase + PO_SR) | 0x001e, card->iobase + PO_SR);
803
804 outl(inl(card->iobase + ALI_INTERRUPTSR) & INT_PCMOUT, card->iobase + ALI_INTERRUPTSR);
805}
806
807static void stop_dac(struct ali_state *state)
808{
809 struct ali_card *card = state->card;
810 unsigned long flags;
811 spin_lock_irqsave(&card->lock, flags);
812 __stop_dac(state);
813 spin_unlock_irqrestore(&card->lock, flags);
814}
815
816static inline void __start_dac(struct ali_state *state)
817{
818 struct dmabuf *dmabuf = &state->dmabuf;
819 if (dmabuf->count > 0 && dmabuf->ready && !dmabuf->enable &&
820 (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
821 dmabuf->enable |= DAC_RUNNING;
822 outb((1 << 4) | (1 << 2), state->card->iobase + PO_CR);
823 outl((1 << 1), state->card->iobase + 0x08); //dma control register
824 }
825}
826
827static void start_dac(struct ali_state *state)
828{
829 struct ali_card *card = state->card;
830 unsigned long flags;
831 spin_lock_irqsave(&card->lock, flags);
832 __start_dac(state);
833 spin_unlock_irqrestore(&card->lock, flags);
834}
835
836/* stop codec and controller spdif out (lock held) */
837static inline void __stop_spdifout(struct ali_state *state)
838{
839 struct dmabuf *dmabuf = &state->dmabuf;
840 struct ali_card *card = state->card;
841
842 if (codec_independent_spdif_locked > 0) {
843 dmabuf->enable &= ~CODEC_SPDIFOUT_RUNNING;
844 outl((1 << 19), card->iobase + 0x08);
845 outb(0, card->iobase + CODECSPDIFOUT_CR);
846
847 while (inb(card->iobase + CODECSPDIFOUT_CR) != 0)
848 cpu_relax();
849
850 outb(inb(card->iobase + CODECSPDIFOUT_SR) | 0x001e, card->iobase + CODECSPDIFOUT_SR);
851 outl(inl(card->iobase + ALI_INTERRUPTSR) & INT_CODECSPDIFOUT, card->iobase + ALI_INTERRUPTSR);
852 } else {
853 if (controller_independent_spdif_locked > 0) {
854 dmabuf->enable &= ~CONTROLLER_SPDIFOUT_RUNNING;
855 outl((1 << 23), card->iobase + 0x08);
856 outb(0, card->iobase + CONTROLLERSPDIFOUT_CR);
857 while (inb(card->iobase + CONTROLLERSPDIFOUT_CR) != 0)
858 cpu_relax();
859 outb(inb(card->iobase + CONTROLLERSPDIFOUT_SR) | 0x001e, card->iobase + CONTROLLERSPDIFOUT_SR);
860 outl(inl(card->iobase + ALI_INTERRUPTSR) & INT_SPDIFOUT, card->iobase + ALI_INTERRUPTSR);
861 }
862 }
863}
864
865static void stop_spdifout(struct ali_state *state)
866{
867 struct ali_card *card = state->card;
868 unsigned long flags;
869 spin_lock_irqsave(&card->lock, flags);
870 __stop_spdifout(state);
871 spin_unlock_irqrestore(&card->lock, flags);
872}
873
874static inline void __start_spdifout(struct ali_state *state)
875{
876 struct dmabuf *dmabuf = &state->dmabuf;
877 if (dmabuf->count > 0 && dmabuf->ready && !dmabuf->enable &&
878 (dmabuf->trigger & SPDIF_ENABLE_OUTPUT)) {
879 if (codec_independent_spdif_locked > 0) {
880 dmabuf->enable |= CODEC_SPDIFOUT_RUNNING;
881 outb((1 << 4) | (1 << 2), state->card->iobase + CODECSPDIFOUT_CR);
882 outl((1 << 3), state->card->iobase + 0x08); //dma control register
883 } else {
884 if (controller_independent_spdif_locked > 0) {
885 dmabuf->enable |= CONTROLLER_SPDIFOUT_RUNNING;
886 outb((1 << 4) | (1 << 2), state->card->iobase + CONTROLLERSPDIFOUT_CR);
887 outl((1 << 7), state->card->iobase + 0x08); //dma control register
888 }
889 }
890 }
891}
892
893static void start_spdifout(struct ali_state *state)
894{
895 struct ali_card *card = state->card;
896 unsigned long flags;
897 spin_lock_irqsave(&card->lock, flags);
898 __start_spdifout(state);
899 spin_unlock_irqrestore(&card->lock, flags);
900}
901
902#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
903#define DMABUF_MINORDER 1
904
905/* allocate DMA buffer, playback , recording,spdif out buffer should be allocated separately */
906static int alloc_dmabuf(struct ali_state *state)
907{
908 struct dmabuf *dmabuf = &state->dmabuf;
909 void *rawbuf = NULL;
910 int order, size;
911 struct page *page, *pend;
912
913 /* If we don't have any oss frag params, then use our default ones */
914 if (dmabuf->ossmaxfrags == 0)
915 dmabuf->ossmaxfrags = 4;
916 if (dmabuf->ossfragsize == 0)
917 dmabuf->ossfragsize = (PAGE_SIZE << DMABUF_DEFAULTORDER) / dmabuf->ossmaxfrags;
918 size = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
919
920 if (dmabuf->rawbuf && (PAGE_SIZE << dmabuf->buforder) == size)
921 return 0;
922 /* alloc enough to satisfy the oss params */
923 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
924 if ((PAGE_SIZE << order) > size)
925 continue;
926 if ((rawbuf = pci_alloc_consistent(state->card->pci_dev,
927 PAGE_SIZE << order,
928 &dmabuf->dma_handle)))
929 break;
930 }
931 if (!rawbuf)
932 return -ENOMEM;
933
934 dmabuf->ready = dmabuf->mapped = 0;
935 dmabuf->rawbuf = rawbuf;
936 dmabuf->buforder = order;
937
938 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
939 pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
940 for (page = virt_to_page(rawbuf); page <= pend; page++)
941 SetPageReserved(page);
942 return 0;
943}
944
945/* free DMA buffer */
946static void dealloc_dmabuf(struct ali_state *state)
947{
948 struct dmabuf *dmabuf = &state->dmabuf;
949 struct page *page, *pend;
950
951 if (dmabuf->rawbuf) {
952 /* undo marking the pages as reserved */
953 pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
954 for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
955 ClearPageReserved(page);
956 pci_free_consistent(state->card->pci_dev,
957 PAGE_SIZE << dmabuf->buforder,
958 dmabuf->rawbuf, dmabuf->dma_handle);
959 }
960 dmabuf->rawbuf = NULL;
961 dmabuf->mapped = dmabuf->ready = 0;
962}
963
964static int prog_dmabuf(struct ali_state *state, unsigned rec)
965{
966 struct dmabuf *dmabuf = &state->dmabuf;
967 struct ali_channel *c = NULL;
968 struct sg_item *sg;
969 unsigned long flags;
970 int ret;
971 unsigned fragint;
972 int i;
973
974 spin_lock_irqsave(&state->card->lock, flags);
975 if (dmabuf->enable & DAC_RUNNING)
976 __stop_dac(state);
977 if (dmabuf->enable & ADC_RUNNING)
978 __stop_adc(state);
979 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
980 __stop_spdifout(state);
981 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
982 __stop_spdifout(state);
983
984 dmabuf->total_bytes = 0;
985 dmabuf->count = dmabuf->error = 0;
986 dmabuf->swptr = dmabuf->hwptr = 0;
987 spin_unlock_irqrestore(&state->card->lock, flags);
988
989 /* allocate DMA buffer, let alloc_dmabuf determine if we are already
990 * allocated well enough or if we should replace the current buffer
991 * (assuming one is already allocated, if it isn't, then allocate it).
992 */
993 if ((ret = alloc_dmabuf(state)))
994 return ret;
995
996 /* FIXME: figure out all this OSS fragment stuff */
997 /* I did, it now does what it should according to the OSS API. DL */
998 /* We may not have realloced our dmabuf, but the fragment size to
999 * fragment number ratio may have changed, so go ahead and reprogram
1000 * things
1001 */
1002
1003 dmabuf->dmasize = PAGE_SIZE << dmabuf->buforder;
1004 dmabuf->numfrag = SG_LEN;
1005 dmabuf->fragsize = dmabuf->dmasize / dmabuf->numfrag;
1006 dmabuf->fragsamples = dmabuf->fragsize >> 1;
1007 dmabuf->userfragsize = dmabuf->ossfragsize;
1008 dmabuf->userfrags = dmabuf->dmasize / dmabuf->ossfragsize;
1009
1010 memset(dmabuf->rawbuf, 0, dmabuf->dmasize);
1011
1012 if (dmabuf->ossmaxfrags == 4) {
1013 fragint = 8;
1014 dmabuf->fragshift = 2;
1015 } else if (dmabuf->ossmaxfrags == 8) {
1016 fragint = 4;
1017 dmabuf->fragshift = 3;
1018 } else if (dmabuf->ossmaxfrags == 16) {
1019 fragint = 2;
1020 dmabuf->fragshift = 4;
1021 } else {
1022 fragint = 1;
1023 dmabuf->fragshift = 5;
1024 }
1025 /*
1026 * Now set up the ring
1027 */
1028
1029 if (rec == 1)
1030 c = dmabuf->read_channel;
1031 else if (rec == 2)
1032 c = dmabuf->codec_spdifout_channel;
1033 else if (rec == 3)
1034 c = dmabuf->controller_spdifout_channel;
1035 else if (rec == 0)
1036 c = dmabuf->write_channel;
1037 if (c != NULL) {
1038 sg = &c->sg[0];
1039 /*
1040 * Load up 32 sg entries and take an interrupt at half
1041 * way (we might want more interrupts later..)
1042 */
1043 for (i = 0; i < dmabuf->numfrag; i++) {
1044 sg->busaddr =
1045 virt_to_bus(dmabuf->rawbuf +
1046 dmabuf->fragsize * i);
1047 // the card will always be doing 16bit stereo
1048 sg->control = dmabuf->fragsamples;
1049 sg->control |= CON_BUFPAD; //I modify
1050 // set us up to get IOC interrupts as often as needed to
1051 // satisfy numfrag requirements, no more
1052 if (((i + 1) % fragint) == 0) {
1053 sg->control |= CON_IOC;
1054 }
1055 sg++;
1056 }
1057 spin_lock_irqsave(&state->card->lock, flags);
1058 outb(2, state->card->iobase + c->port + OFF_CR); /* reset DMA machine */
1059 outl(virt_to_bus(&c->sg[0]), state->card->iobase + c->port + OFF_BDBAR);
1060 outb(0, state->card->iobase + c->port + OFF_CIV);
1061 outb(0, state->card->iobase + c->port + OFF_LVI);
1062 spin_unlock_irqrestore(&state->card->lock, flags);
1063 }
1064 /* set the ready flag for the dma buffer */
1065 dmabuf->ready = 1;
1066 return 0;
1067}
1068
1069static void __ali_update_lvi(struct ali_state *state, int rec)
1070{
1071 struct dmabuf *dmabuf = &state->dmabuf;
1072 int x, port;
1073 port = state->card->iobase;
1074 if (rec == 1)
1075 port += dmabuf->read_channel->port;
1076 else if (rec == 2)
1077 port += dmabuf->codec_spdifout_channel->port;
1078 else if (rec == 3)
1079 port += dmabuf->controller_spdifout_channel->port;
1080 else if (rec == 0)
1081 port += dmabuf->write_channel->port;
1082 /* if we are currently stopped, then our CIV is actually set to our
1083 * *last* sg segment and we are ready to wrap to the next. However,
1084 * if we set our LVI to the last sg segment, then it won't wrap to
1085 * the next sg segment, it won't even get a start. So, instead, when
1086 * we are stopped, we set both the LVI value and also we increment
1087 * the CIV value to the next sg segment to be played so that when
1088 * we call start_{dac,adc}, things will operate properly
1089 */
1090 if (!dmabuf->enable && dmabuf->ready) {
1091 if (rec && dmabuf->count < dmabuf->dmasize && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
1092 outb((inb(port + OFF_CIV) + 1) & 31, port + OFF_LVI);
1093 __start_adc(state);
1094 while (! (inb(port + OFF_CR) & ((1 << 4) | (1 << 2))))
1095 cpu_relax();
1096 } else if (!rec && dmabuf->count && (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
1097 outb((inb(port + OFF_CIV) + 1) & 31, port + OFF_LVI);
1098 __start_dac(state);
1099 while (!(inb(port + OFF_CR) & ((1 << 4) | (1 << 2))))
1100 cpu_relax();
1101 } else if (rec && dmabuf->count && (dmabuf->trigger & SPDIF_ENABLE_OUTPUT)) {
1102 if (codec_independent_spdif_locked > 0) {
1103 // outb((inb(port+OFF_CIV))&31, port+OFF_LVI);
1104 outb((inb(port + OFF_CIV) + 1) & 31, port + OFF_LVI);
1105 __start_spdifout(state);
1106 while (!(inb(port + OFF_CR) & ((1 << 4) | (1 << 2))))
1107 cpu_relax();
1108 } else {
1109 if (controller_independent_spdif_locked > 0) {
1110 outb((inb(port + OFF_CIV) + 1) & 31, port + OFF_LVI);
1111 __start_spdifout(state);
1112 while (!(inb(port + OFF_CR) & ((1 << 4) | (1 << 2))))
1113 cpu_relax();
1114 }
1115 }
1116 }
1117 }
1118
1119 /* swptr - 1 is the tail of our transfer */
1120 x = (dmabuf->dmasize + dmabuf->swptr - 1) % dmabuf->dmasize;
1121 x /= dmabuf->fragsize;
1122 outb(x, port + OFF_LVI);
1123}
1124
1125static void ali_update_lvi(struct ali_state *state, int rec)
1126{
1127 struct dmabuf *dmabuf = &state->dmabuf;
1128 unsigned long flags;
1129 if (!dmabuf->ready)
1130 return;
1131 spin_lock_irqsave(&state->card->lock, flags);
1132 __ali_update_lvi(state, rec);
1133 spin_unlock_irqrestore(&state->card->lock, flags);
1134}
1135
1136/* update buffer manangement pointers, especially, dmabuf->count and dmabuf->hwptr */
1137static void ali_update_ptr(struct ali_state *state)
1138{
1139 struct dmabuf *dmabuf = &state->dmabuf;
1140 unsigned hwptr;
1141 int diff;
1142
1143 /* error handling and process wake up for DAC */
1144 if (dmabuf->enable == ADC_RUNNING) {
1145 /* update hardware pointer */
1146 hwptr = ali_get_dma_addr(state, 1);
1147 diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
1148 dmabuf->hwptr = hwptr;
1149 dmabuf->total_bytes += diff;
1150 dmabuf->count += diff;
1151 if (dmabuf->count > dmabuf->dmasize) {
1152 /* buffer underrun or buffer overrun */
1153 /* this is normal for the end of a read */
1154 /* only give an error if we went past the */
1155 /* last valid sg entry */
1156 if ((inb(state->card->iobase + PI_CIV) & 31) != (inb(state->card->iobase + PI_LVI) & 31)) {
1157 printk(KERN_WARNING "ali_audio: DMA overrun on read\n");
1158 dmabuf->error++;
1159 }
1160 }
1161 if (dmabuf->count > dmabuf->userfragsize)
1162 wake_up(&dmabuf->wait);
1163 }
1164 /* error handling and process wake up for DAC */
1165 if (dmabuf->enable == DAC_RUNNING) {
1166 /* update hardware pointer */
1167 hwptr = ali_get_dma_addr(state, 0);
1168 diff =
1169 (dmabuf->dmasize + hwptr -
1170 dmabuf->hwptr) % dmabuf->dmasize;
1171#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
1172 printk("DAC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
1173#endif
1174 dmabuf->hwptr = hwptr;
1175 dmabuf->total_bytes += diff;
1176 dmabuf->count -= diff;
1177 if (dmabuf->count < 0) {
1178 /* buffer underrun or buffer overrun */
1179 /* this is normal for the end of a write */
1180 /* only give an error if we went past the */
1181 /* last valid sg entry */
1182 if ((inb(state->card->iobase + PO_CIV) & 31) != (inb(state->card->iobase + PO_LVI) & 31)) {
1183 printk(KERN_WARNING "ali_audio: DMA overrun on write\n");
1184 printk(KERN_DEBUG "ali_audio: CIV %d, LVI %d, hwptr %x, count %d\n",
1185 inb(state->card->iobase + PO_CIV) & 31,
1186 inb(state->card->iobase + PO_LVI) & 31,
1187 dmabuf->hwptr,
1188 dmabuf->count);
1189 dmabuf->error++;
1190 }
1191 }
1192 if (dmabuf->count < (dmabuf->dmasize - dmabuf->userfragsize))
1193 wake_up(&dmabuf->wait);
1194 }
1195
1196 /* error handling and process wake up for CODEC SPDIF OUT */
1197 if (dmabuf->enable == CODEC_SPDIFOUT_RUNNING) {
1198 /* update hardware pointer */
1199 hwptr = ali_get_dma_addr(state, 2);
1200 diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
1201 dmabuf->hwptr = hwptr;
1202 dmabuf->total_bytes += diff;
1203 dmabuf->count -= diff;
1204 if (dmabuf->count < 0) {
1205 /* buffer underrun or buffer overrun */
1206 /* this is normal for the end of a write */
1207 /* only give an error if we went past the */
1208 /* last valid sg entry */
1209 if ((inb(state->card->iobase + CODECSPDIFOUT_CIV) & 31) != (inb(state->card->iobase + CODECSPDIFOUT_LVI) & 31)) {
1210 printk(KERN_WARNING "ali_audio: DMA overrun on write\n");
1211 printk(KERN_DEBUG "ali_audio: CIV %d, LVI %d, hwptr %x, count %d\n",
1212 inb(state->card->iobase + CODECSPDIFOUT_CIV) & 31,
1213 inb(state->card->iobase + CODECSPDIFOUT_LVI) & 31,
1214 dmabuf->hwptr, dmabuf->count);
1215 dmabuf->error++;
1216 }
1217 }
1218 if (dmabuf->count < (dmabuf->dmasize - dmabuf->userfragsize))
1219 wake_up(&dmabuf->wait);
1220 }
1221 /* error handling and process wake up for CONTROLLER SPDIF OUT */
1222 if (dmabuf->enable == CONTROLLER_SPDIFOUT_RUNNING) {
1223 /* update hardware pointer */
1224 hwptr = ali_get_dma_addr(state, 3);
1225 diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
1226 dmabuf->hwptr = hwptr;
1227 dmabuf->total_bytes += diff;
1228 dmabuf->count -= diff;
1229 if (dmabuf->count < 0) {
1230 /* buffer underrun or buffer overrun */
1231 /* this is normal for the end of a write */
1232 /* only give an error if we went past the */
1233 /* last valid sg entry */
1234 if ((inb(state->card->iobase + CONTROLLERSPDIFOUT_CIV) & 31) != (inb(state->card->iobase + CONTROLLERSPDIFOUT_LVI) & 31)) {
1235 printk(KERN_WARNING
1236 "ali_audio: DMA overrun on write\n");
1237 printk("ali_audio: CIV %d, LVI %d, hwptr %x, "
1238 "count %d\n",
1239 inb(state->card->iobase + CONTROLLERSPDIFOUT_CIV) & 31,
1240 inb(state->card->iobase + CONTROLLERSPDIFOUT_LVI) & 31,
1241 dmabuf->hwptr, dmabuf->count);
1242 dmabuf->error++;
1243 }
1244 }
1245 if (dmabuf->count < (dmabuf->dmasize - dmabuf->userfragsize))
1246 wake_up(&dmabuf->wait);
1247 }
1248}
1249
1250static inline int ali_get_free_write_space(struct
1251 ali_state
1252 *state)
1253{
1254 struct dmabuf *dmabuf = &state->dmabuf;
1255 int free;
1256
1257 if (dmabuf->count < 0) {
1258 dmabuf->count = 0;
1259 dmabuf->swptr = dmabuf->hwptr;
1260 }
1261 free = dmabuf->dmasize - dmabuf->swptr;
1262 if ((dmabuf->count + free) > dmabuf->dmasize){
1263 free = dmabuf->dmasize - dmabuf->count;
1264 }
1265 return free;
1266}
1267
1268static inline int ali_get_available_read_data(struct
1269 ali_state
1270 *state)
1271{
1272 struct dmabuf *dmabuf = &state->dmabuf;
1273 int avail;
1274 ali_update_ptr(state);
1275 // catch overruns during record
1276 if (dmabuf->count > dmabuf->dmasize) {
1277 dmabuf->count = dmabuf->dmasize;
1278 dmabuf->swptr = dmabuf->hwptr;
1279 }
1280 avail = dmabuf->count;
1281 avail -= (dmabuf->hwptr % dmabuf->fragsize);
1282 if (avail < 0)
1283 return (0);
1284 return (avail);
1285}
1286
1287static int drain_dac(struct ali_state *state, int signals_allowed)
1288{
1289
1290 DECLARE_WAITQUEUE(wait, current);
1291 struct dmabuf *dmabuf = &state->dmabuf;
1292 unsigned long flags;
1293 unsigned long tmo;
1294 int count;
1295 if (!dmabuf->ready)
1296 return 0;
1297 if (dmabuf->mapped) {
1298 stop_dac(state);
1299 return 0;
1300 }
1301 add_wait_queue(&dmabuf->wait, &wait);
1302 for (;;) {
1303
1304 spin_lock_irqsave(&state->card->lock, flags);
1305 ali_update_ptr(state);
1306 count = dmabuf->count;
1307 spin_unlock_irqrestore(&state->card->lock, flags);
1308 if (count <= 0)
1309 break;
1310 /*
1311 * This will make sure that our LVI is correct, that our
1312 * pointer is updated, and that the DAC is running. We
1313 * have to force the setting of dmabuf->trigger to avoid
1314 * any possible deadlocks.
1315 */
1316 if (!dmabuf->enable) {
1317 dmabuf->trigger = PCM_ENABLE_OUTPUT;
1318 ali_update_lvi(state, 0);
1319 }
1320 if (signal_pending(current) && signals_allowed) {
1321 break;
1322 }
1323
1324 /* It seems that we have to set the current state to
1325 * TASK_INTERRUPTIBLE every time to make the process
1326 * really go to sleep. This also has to be *after* the
1327 * update_ptr() call because update_ptr is likely to
1328 * do a wake_up() which will unset this before we ever
1329 * try to sleep, resuling in a tight loop in this code
1330 * instead of actually sleeping and waiting for an
1331 * interrupt to wake us up!
1332 */
1333 set_current_state(TASK_INTERRUPTIBLE);
1334 /*
1335 * set the timeout to significantly longer than it *should*
1336 * take for the DAC to drain the DMA buffer
1337 */
1338 tmo = (count * HZ) / (dmabuf->rate);
1339 if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
1340 printk(KERN_ERR "ali_audio: drain_dac, dma timeout?\n");
1341 count = 0;
1342 break;
1343 }
1344 }
1345 set_current_state(TASK_RUNNING);
1346 remove_wait_queue(&dmabuf->wait, &wait);
1347 if (count > 0 && signal_pending(current) && signals_allowed)
1348 return -ERESTARTSYS;
1349 stop_dac(state);
1350 return 0;
1351}
1352
1353
1354static int drain_spdifout(struct ali_state *state, int signals_allowed)
1355{
1356
1357 DECLARE_WAITQUEUE(wait, current);
1358 struct dmabuf *dmabuf = &state->dmabuf;
1359 unsigned long flags;
1360 unsigned long tmo;
1361 int count;
1362 if (!dmabuf->ready)
1363 return 0;
1364 if (dmabuf->mapped) {
1365 stop_spdifout(state);
1366 return 0;
1367 }
1368 add_wait_queue(&dmabuf->wait, &wait);
1369 for (;;) {
1370
1371 spin_lock_irqsave(&state->card->lock, flags);
1372 ali_update_ptr(state);
1373 count = dmabuf->count;
1374 spin_unlock_irqrestore(&state->card->lock, flags);
1375 if (count <= 0)
1376 break;
1377 /*
1378 * This will make sure that our LVI is correct, that our
1379 * pointer is updated, and that the DAC is running. We
1380 * have to force the setting of dmabuf->trigger to avoid
1381 * any possible deadlocks.
1382 */
1383 if (!dmabuf->enable) {
1384 if (codec_independent_spdif_locked > 0) {
1385 dmabuf->trigger = SPDIF_ENABLE_OUTPUT;
1386 ali_update_lvi(state, 2);
1387 } else {
1388 if (controller_independent_spdif_locked > 0) {
1389 dmabuf->trigger = SPDIF_ENABLE_OUTPUT;
1390 ali_update_lvi(state, 3);
1391 }
1392 }
1393 }
1394 if (signal_pending(current) && signals_allowed) {
1395 break;
1396 }
1397
1398 /* It seems that we have to set the current state to
1399 * TASK_INTERRUPTIBLE every time to make the process
1400 * really go to sleep. This also has to be *after* the
1401 * update_ptr() call because update_ptr is likely to
1402 * do a wake_up() which will unset this before we ever
1403 * try to sleep, resuling in a tight loop in this code
1404 * instead of actually sleeping and waiting for an
1405 * interrupt to wake us up!
1406 */
1407 set_current_state(TASK_INTERRUPTIBLE);
1408 /*
1409 * set the timeout to significantly longer than it *should*
1410 * take for the DAC to drain the DMA buffer
1411 */
1412 tmo = (count * HZ) / (dmabuf->rate);
1413 if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
1414 printk(KERN_ERR "ali_audio: drain_spdifout, dma timeout?\n");
1415 count = 0;
1416 break;
1417 }
1418 }
1419 set_current_state(TASK_RUNNING);
1420 remove_wait_queue(&dmabuf->wait, &wait);
1421 if (count > 0 && signal_pending(current) && signals_allowed)
1422 return -ERESTARTSYS;
1423 stop_spdifout(state);
1424 return 0;
1425}
1426
1427static void ali_channel_interrupt(struct ali_card *card)
1428{
1429 int i, count;
1430
1431 for (i = 0; i < NR_HW_CH; i++) {
1432 struct ali_state *state = card->states[i];
1433 struct ali_channel *c = NULL;
1434 struct dmabuf *dmabuf;
1435 unsigned long port = card->iobase;
1436 u16 status;
1437 if (!state)
1438 continue;
1439 if (!state->dmabuf.ready)
1440 continue;
1441 dmabuf = &state->dmabuf;
1442 if (codec_independent_spdif_locked > 0) {
1443 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING) {
1444 c = dmabuf->codec_spdifout_channel;
1445 }
1446 } else {
1447 if (controller_independent_spdif_locked > 0) {
1448 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1449 c = dmabuf->controller_spdifout_channel;
1450 } else {
1451 if (dmabuf->enable & DAC_RUNNING) {
1452 c = dmabuf->write_channel;
1453 } else if (dmabuf->enable & ADC_RUNNING) {
1454 c = dmabuf->read_channel;
1455 } else
1456 continue;
1457 }
1458 }
1459 port += c->port;
1460
1461 status = inw(port + OFF_SR);
1462
1463 if (status & DMA_INT_COMPLETE) {
1464 /* only wake_up() waiters if this interrupt signals
1465 * us being beyond a userfragsize of data open or
1466 * available, and ali_update_ptr() does that for
1467 * us
1468 */
1469 ali_update_ptr(state);
1470 }
1471
1472 if (status & DMA_INT_LVI) {
1473 ali_update_ptr(state);
1474 wake_up(&dmabuf->wait);
1475
1476 if (dmabuf->enable & DAC_RUNNING)
1477 count = dmabuf->count;
1478 else if (dmabuf->enable & ADC_RUNNING)
1479 count = dmabuf->dmasize - dmabuf->count;
1480 else if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1481 count = dmabuf->count;
1482 else if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1483 count = dmabuf->count;
1484 else count = 0;
1485
1486 if (count > 0) {
1487 if (dmabuf->enable & DAC_RUNNING)
1488 outl((1 << 1), state->card->iobase + ALI_DMACR);
1489 else if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1490 outl((1 << 3), state->card->iobase + ALI_DMACR);
1491 else if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1492 outl((1 << 7), state->card->iobase + ALI_DMACR);
1493 } else {
1494 if (dmabuf->enable & DAC_RUNNING)
1495 __stop_dac(state);
1496 if (dmabuf->enable & ADC_RUNNING)
1497 __stop_adc(state);
1498 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1499 __stop_spdifout(state);
1500 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1501 __stop_spdifout(state);
1502 dmabuf->enable = 0;
1503 wake_up(&dmabuf->wait);
1504 }
1505
1506 }
1507 if (!(status & DMA_INT_DCH)) {
1508 ali_update_ptr(state);
1509 wake_up(&dmabuf->wait);
1510 if (dmabuf->enable & DAC_RUNNING)
1511 count = dmabuf->count;
1512 else if (dmabuf->enable & ADC_RUNNING)
1513 count = dmabuf->dmasize - dmabuf->count;
1514 else if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1515 count = dmabuf->count;
1516 else if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1517 count = dmabuf->count;
1518 else
1519 count = 0;
1520
1521 if (count > 0) {
1522 if (dmabuf->enable & DAC_RUNNING)
1523 outl((1 << 1), state->card->iobase + ALI_DMACR);
1524 else if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1525 outl((1 << 3), state->card->iobase + ALI_DMACR);
1526 else if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1527 outl((1 << 7), state->card->iobase + ALI_DMACR);
1528 } else {
1529 if (dmabuf->enable & DAC_RUNNING)
1530 __stop_dac(state);
1531 if (dmabuf->enable & ADC_RUNNING)
1532 __stop_adc(state);
1533 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING)
1534 __stop_spdifout(state);
1535 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)
1536 __stop_spdifout(state);
1537 dmabuf->enable = 0;
1538 wake_up(&dmabuf->wait);
1539 }
1540 }
1541 outw(status & DMA_INT_MASK, port + OFF_SR);
1542 }
1543}
1544
1545static irqreturn_t ali_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1546{
1547 struct ali_card *card = (struct ali_card *) dev_id;
1548 u32 status;
1549 u16 status2;
1550
1551 spin_lock(&card->lock);
1552 status = inl(card->iobase + ALI_INTERRUPTSR);
1553 if (!(status & INT_MASK)) {
1554 spin_unlock(&card->lock);
1555 return IRQ_NONE; /* not for us */
1556 }
1557
1558 if (codec_independent_spdif_locked > 0) {
1559 if (globel == 0) {
1560 globel += 1;
1561 status2 = inw(card->iobase + 0x76);
1562 outw(status2 | 0x000c, card->iobase + 0x76);
1563 } else {
1564 if (status & (INT_PCMOUT | INT_PCMIN | INT_MICIN | INT_SPDIFOUT | INT_CODECSPDIFOUT))
1565 ali_channel_interrupt(card);
1566 }
1567 } else {
1568 if (status & (INT_PCMOUT | INT_PCMIN | INT_MICIN | INT_SPDIFOUT | INT_CODECSPDIFOUT))
1569 ali_channel_interrupt(card);
1570 }
1571
1572 /* clear 'em */
1573 outl(status & INT_MASK, card->iobase + ALI_INTERRUPTSR);
1574 spin_unlock(&card->lock);
1575 return IRQ_HANDLED;
1576}
1577
1578/* in this loop, dmabuf.count signifies the amount of data that is
1579 waiting to be copied to the user's buffer. It is filled by the dma
1580 machine and drained by this loop. */
1581
1582static ssize_t ali_read(struct file *file, char __user *buffer,
1583 size_t count, loff_t * ppos)
1584{
1585 struct ali_state *state = (struct ali_state *) file->private_data;
1586 struct ali_card *card = state ? state->card : NULL;
1587 struct dmabuf *dmabuf = &state->dmabuf;
1588 ssize_t ret;
1589 unsigned long flags;
1590 unsigned int swptr;
1591 int cnt;
1592 DECLARE_WAITQUEUE(waita, current);
1593#ifdef DEBUG2
1594 printk("ali_audio: ali_read called, count = %d\n", count);
1595#endif
1596 if (dmabuf->mapped)
1597 return -ENXIO;
1598 if (dmabuf->enable & DAC_RUNNING)
1599 return -ENODEV;
1600 if (!dmabuf->read_channel) {
1601 dmabuf->ready = 0;
1602 dmabuf->read_channel = card->alloc_rec_pcm_channel(card);
1603 if (!dmabuf->read_channel) {
1604 return -EBUSY;
1605 }
1606 }
1607 if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
1608 return ret;
1609 if (!access_ok(VERIFY_WRITE, buffer, count))
1610 return -EFAULT;
1611 ret = 0;
1612 add_wait_queue(&dmabuf->wait, &waita);
1613 while (count > 0) {
1614 set_current_state(TASK_INTERRUPTIBLE);
1615 spin_lock_irqsave(&card->lock, flags);
1616 if (PM_SUSPENDED(card)) {
1617 spin_unlock_irqrestore(&card->lock, flags);
1618 schedule();
1619 if (signal_pending(current)) {
1620 if (!ret)
1621 ret = -EAGAIN;
1622 break;
1623 }
1624 continue;
1625 }
1626 swptr = dmabuf->swptr;
1627 cnt = ali_get_available_read_data(state);
1628 // this is to make the copy_to_user simpler below
1629 if (cnt > (dmabuf->dmasize - swptr))
1630 cnt = dmabuf->dmasize - swptr;
1631 spin_unlock_irqrestore(&card->lock, flags);
1632 if (cnt > count)
1633 cnt = count;
1634 /* Lop off the last two bits to force the code to always
1635 * write in full samples. This keeps software that sets
1636 * O_NONBLOCK but doesn't check the return value of the
1637 * write call from getting things out of state where they
1638 * think a full 4 byte sample was written when really only
1639 * a portion was, resulting in odd sound and stereo
1640 * hysteresis.
1641 */
1642 cnt &= ~0x3;
1643 if (cnt <= 0) {
1644 unsigned long tmo;
1645 /*
1646 * Don't let us deadlock. The ADC won't start if
1647 * dmabuf->trigger isn't set. A call to SETTRIGGER
1648 * could have turned it off after we set it to on
1649 * previously.
1650 */
1651 dmabuf->trigger = PCM_ENABLE_INPUT;
1652 /*
1653 * This does three things. Updates LVI to be correct,
1654 * makes sure the ADC is running, and updates the
1655 * hwptr.
1656 */
1657 ali_update_lvi(state, 1);
1658 if (file->f_flags & O_NONBLOCK) {
1659 if (!ret)
1660 ret = -EAGAIN;
1661 goto done;
1662 }
1663 /* Set the timeout to how long it would take to fill
1664 * two of our buffers. If we haven't been woke up
1665 * by then, then we know something is wrong.
1666 */
1667 tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
1668
1669 /* There are two situations when sleep_on_timeout returns, one is when
1670 the interrupt is serviced correctly and the process is waked up by
1671 ISR ON TIME. Another is when timeout is expired, which means that
1672 either interrupt is NOT serviced correctly (pending interrupt) or it
1673 is TOO LATE for the process to be scheduled to run (scheduler latency)
1674 which results in a (potential) buffer overrun. And worse, there is
1675 NOTHING we can do to prevent it. */
1676 if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
1677 printk(KERN_ERR
1678 "ali_audio: recording schedule timeout, "
1679 "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1680 dmabuf->dmasize, dmabuf->fragsize,
1681 dmabuf->count, dmabuf->hwptr,
1682 dmabuf->swptr);
1683 /* a buffer overrun, we delay the recovery until next time the
1684 while loop begin and we REALLY have space to record */
1685 }
1686 if (signal_pending(current)) {
1687 ret = ret ? ret : -ERESTARTSYS;
1688 goto done;
1689 }
1690 continue;
1691 }
1692
1693 if (copy_to_user(buffer, dmabuf->rawbuf + swptr, cnt)) {
1694 if (!ret)
1695 ret = -EFAULT;
1696 goto done;
1697 }
1698
1699 swptr = (swptr + cnt) % dmabuf->dmasize;
1700 spin_lock_irqsave(&card->lock, flags);
1701 if (PM_SUSPENDED(card)) {
1702 spin_unlock_irqrestore(&card->lock, flags);
1703 continue;
1704 }
1705 dmabuf->swptr = swptr;
1706 dmabuf->count -= cnt;
1707 spin_unlock_irqrestore(&card->lock, flags);
1708 count -= cnt;
1709 buffer += cnt;
1710 ret += cnt;
1711 }
1712done:
1713 ali_update_lvi(state, 1);
1714 set_current_state(TASK_RUNNING);
1715 remove_wait_queue(&dmabuf->wait, &waita);
1716 return ret;
1717}
1718
1719/* in this loop, dmabuf.count signifies the amount of data that is waiting to be dma to
1720 the soundcard. it is drained by the dma machine and filled by this loop. */
1721static ssize_t ali_write(struct file *file,
1722 const char __user *buffer, size_t count, loff_t * ppos)
1723{
1724 struct ali_state *state = (struct ali_state *) file->private_data;
1725 struct ali_card *card = state ? state->card : NULL;
1726 struct dmabuf *dmabuf = &state->dmabuf;
1727 ssize_t ret;
1728 unsigned long flags;
1729 unsigned int swptr = 0;
1730 int cnt, x;
1731 DECLARE_WAITQUEUE(waita, current);
1732#ifdef DEBUG2
1733 printk("ali_audio: ali_write called, count = %d\n", count);
1734#endif
1735 if (dmabuf->mapped)
1736 return -ENXIO;
1737 if (dmabuf->enable & ADC_RUNNING)
1738 return -ENODEV;
1739 if (codec_independent_spdif_locked > 0) {
1740 if (!dmabuf->codec_spdifout_channel) {
1741 dmabuf->ready = 0;
1742 dmabuf->codec_spdifout_channel = card->alloc_codec_spdifout_channel(card);
1743 if (!dmabuf->codec_spdifout_channel)
1744 return -EBUSY;
1745 }
1746 } else {
1747 if (controller_independent_spdif_locked > 0) {
1748 if (!dmabuf->controller_spdifout_channel) {
1749 dmabuf->ready = 0;
1750 dmabuf->controller_spdifout_channel = card->alloc_controller_spdifout_channel(card);
1751 if (!dmabuf->controller_spdifout_channel)
1752 return -EBUSY;
1753 }
1754 } else {
1755 if (!dmabuf->write_channel) {
1756 dmabuf->ready = 0;
1757 dmabuf->write_channel =
1758 card->alloc_pcm_channel(card);
1759 if (!dmabuf->write_channel)
1760 return -EBUSY;
1761 }
1762 }
1763 }
1764
1765 if (codec_independent_spdif_locked > 0) {
1766 if (!dmabuf->ready && (ret = prog_dmabuf(state, 2)))
1767 return ret;
1768 } else {
1769 if (controller_independent_spdif_locked > 0) {
1770 if (!dmabuf->ready && (ret = prog_dmabuf(state, 3)))
1771 return ret;
1772 } else {
1773
1774 if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
1775 return ret;
1776 }
1777 }
1778 if (!access_ok(VERIFY_READ, buffer, count))
1779 return -EFAULT;
1780 ret = 0;
1781 add_wait_queue(&dmabuf->wait, &waita);
1782 while (count > 0) {
1783 set_current_state(TASK_INTERRUPTIBLE);
1784 spin_lock_irqsave(&state->card->lock, flags);
1785 if (PM_SUSPENDED(card)) {
1786 spin_unlock_irqrestore(&card->lock, flags);
1787 schedule();
1788 if (signal_pending(current)) {
1789 if (!ret)
1790 ret = -EAGAIN;
1791 break;
1792 }
1793 continue;
1794 }
1795
1796 swptr = dmabuf->swptr;
1797 cnt = ali_get_free_write_space(state);
1798 /* Bound the maximum size to how much we can copy to the
1799 * dma buffer before we hit the end. If we have more to
1800 * copy then it will get done in a second pass of this
1801 * loop starting from the beginning of the buffer.
1802 */
1803 if (cnt > (dmabuf->dmasize - swptr))
1804 cnt = dmabuf->dmasize - swptr;
1805 spin_unlock_irqrestore(&state->card->lock, flags);
1806#ifdef DEBUG2
1807 printk(KERN_INFO
1808 "ali_audio: ali_write: %d bytes available space\n",
1809 cnt);
1810#endif
1811 if (cnt > count)
1812 cnt = count;
1813 /* Lop off the last two bits to force the code to always
1814 * write in full samples. This keeps software that sets
1815 * O_NONBLOCK but doesn't check the return value of the
1816 * write call from getting things out of state where they
1817 * think a full 4 byte sample was written when really only
1818 * a portion was, resulting in odd sound and stereo
1819 * hysteresis.
1820 */
1821 cnt &= ~0x3;
1822 if (cnt <= 0) {
1823 unsigned long tmo;
1824 // There is data waiting to be played
1825 /*
1826 * Force the trigger setting since we would
1827 * deadlock with it set any other way
1828 */
1829 if (codec_independent_spdif_locked > 0) {
1830 dmabuf->trigger = SPDIF_ENABLE_OUTPUT;
1831 ali_update_lvi(state, 2);
1832 } else {
1833 if (controller_independent_spdif_locked > 0) {
1834 dmabuf->trigger = SPDIF_ENABLE_OUTPUT;
1835 ali_update_lvi(state, 3);
1836 } else {
1837
1838 dmabuf->trigger = PCM_ENABLE_OUTPUT;
1839 ali_update_lvi(state, 0);
1840 }
1841 }
1842 if (file->f_flags & O_NONBLOCK) {
1843 if (!ret)
1844 ret = -EAGAIN;
1845 goto ret;
1846 }
1847 /* Not strictly correct but works */
1848 tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
1849 /* There are two situations when sleep_on_timeout returns, one is when
1850 the interrupt is serviced correctly and the process is waked up by
1851 ISR ON TIME. Another is when timeout is expired, which means that
1852 either interrupt is NOT serviced correctly (pending interrupt) or it
1853 is TOO LATE for the process to be scheduled to run (scheduler latency)
1854 which results in a (potential) buffer underrun. And worse, there is
1855 NOTHING we can do to prevent it. */
1856
1857 /* FIXME - do timeout handling here !! */
1858 schedule_timeout(tmo >= 2 ? tmo : 2);
1859
1860 if (signal_pending(current)) {
1861 if (!ret)
1862 ret = -ERESTARTSYS;
1863 goto ret;
1864 }
1865 continue;
1866 }
1867 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, cnt)) {
1868 if (!ret)
1869 ret = -EFAULT;
1870 goto ret;
1871 }
1872
1873 swptr = (swptr + cnt) % dmabuf->dmasize;
1874 spin_lock_irqsave(&state->card->lock, flags);
1875 if (PM_SUSPENDED(card)) {
1876 spin_unlock_irqrestore(&card->lock, flags);
1877 continue;
1878 }
1879
1880 dmabuf->swptr = swptr;
1881 dmabuf->count += cnt;
1882 count -= cnt;
1883 buffer += cnt;
1884 ret += cnt;
1885 spin_unlock_irqrestore(&state->card->lock, flags);
1886 }
1887 if (swptr % dmabuf->fragsize) {
1888 x = dmabuf->fragsize - (swptr % dmabuf->fragsize);
1889 memset(dmabuf->rawbuf + swptr, '\0', x);
1890 }
1891ret:
1892 if (codec_independent_spdif_locked > 0) {
1893 ali_update_lvi(state, 2);
1894 } else {
1895 if (controller_independent_spdif_locked > 0) {
1896 ali_update_lvi(state, 3);
1897 } else {
1898 ali_update_lvi(state, 0);
1899 }
1900 }
1901 set_current_state(TASK_RUNNING);
1902 remove_wait_queue(&dmabuf->wait, &waita);
1903 return ret;
1904}
1905
1906/* No kernel lock - we have our own spinlock */
1907static unsigned int ali_poll(struct file *file, struct poll_table_struct
1908 *wait)
1909{
1910 struct ali_state *state = (struct ali_state *) file->private_data;
1911 struct dmabuf *dmabuf = &state->dmabuf;
1912 unsigned long flags;
1913 unsigned int mask = 0;
1914 if (!dmabuf->ready)
1915 return 0;
1916 poll_wait(file, &dmabuf->wait, wait);
1917 spin_lock_irqsave(&state->card->lock, flags);
1918 ali_update_ptr(state);
1919 if (file->f_mode & FMODE_READ && dmabuf->enable & ADC_RUNNING) {
1920 if (dmabuf->count >= (signed) dmabuf->fragsize)
1921 mask |= POLLIN | POLLRDNORM;
1922 }
1923 if (file->f_mode & FMODE_WRITE && (dmabuf->enable & (DAC_RUNNING|CODEC_SPDIFOUT_RUNNING|CONTROLLER_SPDIFOUT_RUNNING))) {
1924 if ((signed) dmabuf->dmasize >= dmabuf->count + (signed) dmabuf->fragsize)
1925 mask |= POLLOUT | POLLWRNORM;
1926 }
1927 spin_unlock_irqrestore(&state->card->lock, flags);
1928 return mask;
1929}
1930
1931static int ali_mmap(struct file *file, struct vm_area_struct *vma)
1932{
1933 struct ali_state *state = (struct ali_state *) file->private_data;
1934 struct dmabuf *dmabuf = &state->dmabuf;
1935 int ret = -EINVAL;
1936 unsigned long size;
1937 lock_kernel();
1938 if (vma->vm_flags & VM_WRITE) {
1939 if (!dmabuf->write_channel && (dmabuf->write_channel = state->card->alloc_pcm_channel(state->card)) == NULL) {
1940 ret = -EBUSY;
1941 goto out;
1942 }
1943 }
1944 if (vma->vm_flags & VM_READ) {
1945 if (!dmabuf->read_channel && (dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card)) == NULL) {
1946 ret = -EBUSY;
1947 goto out;
1948 }
1949 }
1950 if ((ret = prog_dmabuf(state, 0)) != 0)
1951 goto out;
1952 ret = -EINVAL;
1953 if (vma->vm_pgoff != 0)
1954 goto out;
1955 size = vma->vm_end - vma->vm_start;
1956 if (size > (PAGE_SIZE << dmabuf->buforder))
1957 goto out;
1958 ret = -EAGAIN;
1959 if (remap_pfn_range(vma, vma->vm_start,
1960 virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
1961 size, vma->vm_page_prot))
1962 goto out;
1963 dmabuf->mapped = 1;
1964 dmabuf->trigger = 0;
1965 ret = 0;
1966out:
1967 unlock_kernel();
1968 return ret;
1969}
1970
1971static int ali_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1972{
1973 struct ali_state *state = (struct ali_state *) file->private_data;
1974 struct ali_channel *c = NULL;
1975 struct dmabuf *dmabuf = &state->dmabuf;
1976 unsigned long flags;
1977 audio_buf_info abinfo;
1978 count_info cinfo;
1979 unsigned int i_scr;
1980 int val = 0, ret;
1981 struct ac97_codec *codec = state->card->ac97_codec[0];
1982 void __user *argp = (void __user *)arg;
1983 int __user *p = argp;
1984
1985#ifdef DEBUG
1986 printk("ali_audio: ali_ioctl, arg=0x%x, cmd=",
1987 arg ? *p : 0);
1988#endif
1989 switch (cmd) {
1990 case OSS_GETVERSION:
1991#ifdef DEBUG
1992 printk("OSS_GETVERSION\n");
1993#endif
1994 return put_user(SOUND_VERSION, p);
1995 case SNDCTL_DSP_RESET:
1996#ifdef DEBUG
1997 printk("SNDCTL_DSP_RESET\n");
1998#endif
1999 spin_lock_irqsave(&state->card->lock, flags);
2000 if (dmabuf->enable == DAC_RUNNING) {
2001 c = dmabuf->write_channel;
2002 __stop_dac(state);
2003 }
2004 if (dmabuf->enable == ADC_RUNNING) {
2005 c = dmabuf->read_channel;
2006 __stop_adc(state);
2007 }
2008 if (dmabuf->enable == CODEC_SPDIFOUT_RUNNING) {
2009 c = dmabuf->codec_spdifout_channel;
2010 __stop_spdifout(state);
2011 }
2012 if (dmabuf->enable == CONTROLLER_SPDIFOUT_RUNNING) {
2013 c = dmabuf->controller_spdifout_channel;
2014 __stop_spdifout(state);
2015 }
2016 if (c != NULL) {
2017 outb(2, state->card->iobase + c->port + OFF_CR); /* reset DMA machine */
2018 outl(virt_to_bus(&c->sg[0]),
2019 state->card->iobase + c->port + OFF_BDBAR);
2020 outb(0, state->card->iobase + c->port + OFF_CIV);
2021 outb(0, state->card->iobase + c->port + OFF_LVI);
2022 }
2023
2024 spin_unlock_irqrestore(&state->card->lock, flags);
2025 synchronize_irq(state->card->pci_dev->irq);
2026 dmabuf->ready = 0;
2027 dmabuf->swptr = dmabuf->hwptr = 0;
2028 dmabuf->count = dmabuf->total_bytes = 0;
2029 return 0;
2030 case SNDCTL_DSP_SYNC:
2031#ifdef DEBUG
2032 printk("SNDCTL_DSP_SYNC\n");
2033#endif
2034 if (codec_independent_spdif_locked > 0) {
2035 if (dmabuf->enable != CODEC_SPDIFOUT_RUNNING
2036 || file->f_flags & O_NONBLOCK)
2037 return 0;
2038 if ((val = drain_spdifout(state, 1)))
2039 return val;
2040 } else {
2041 if (controller_independent_spdif_locked > 0) {
2042 if (dmabuf->enable !=
2043 CONTROLLER_SPDIFOUT_RUNNING
2044 || file->f_flags & O_NONBLOCK)
2045 return 0;
2046 if ((val = drain_spdifout(state, 1)))
2047 return val;
2048 } else {
2049 if (dmabuf->enable != DAC_RUNNING
2050 || file->f_flags & O_NONBLOCK)
2051 return 0;
2052 if ((val = drain_dac(state, 1)))
2053 return val;
2054 }
2055 }
2056 dmabuf->total_bytes = 0;
2057 return 0;
2058 case SNDCTL_DSP_SPEED: /* set smaple rate */
2059#ifdef DEBUG
2060 printk("SNDCTL_DSP_SPEED\n");
2061#endif
2062 if (get_user(val, p))
2063 return -EFAULT;
2064 if (val >= 0) {
2065 if (file->f_mode & FMODE_WRITE) {
2066 if ((state->card->ac97_status & SPDIF_ON)) { /* S/PDIF Enabled */
2067 /* RELTEK ALC650 only support 48000, need to check that */
2068 if (ali_valid_spdif_rate(codec, val)) {
2069 if (codec_independent_spdif_locked > 0) {
2070 ali_set_spdif_output(state, -1, 0);
2071 stop_spdifout(state);
2072 dmabuf->ready = 0;
2073 /* I add test codec independent spdif out */
2074 spin_lock_irqsave(&state->card->lock, flags);
2075 ali_set_codecspdifout_rate(state, val); // I modified
2076 spin_unlock_irqrestore(&state->card->lock, flags);
2077 /* Set S/PDIF transmitter rate. */
2078 i_scr = inl(state->card->iobase + ALI_SCR);
2079 if ((i_scr & 0x00300000) == 0x00100000) {
2080 ali_set_spdif_output(state, AC97_EA_SPSA_7_8, codec_independent_spdif_locked);
2081 } else {
2082 if ((i_scr&0x00300000) == 0x00200000)
2083 {
2084 ali_set_spdif_output(state, AC97_EA_SPSA_6_9, codec_independent_spdif_locked);
2085 } else {
2086 if ((i_scr & 0x00300000) == 0x00300000) {
2087 ali_set_spdif_output(state, AC97_EA_SPSA_10_11, codec_independent_spdif_locked);
2088 } else {
2089 ali_set_spdif_output(state, AC97_EA_SPSA_7_8, codec_independent_spdif_locked);
2090 }
2091 }
2092 }
2093
2094 if (!(state->card->ac97_status & SPDIF_ON)) {
2095 val = dmabuf->rate;
2096 }
2097 } else {
2098 if (controller_independent_spdif_locked > 0)
2099 {
2100 stop_spdifout(state);
2101 dmabuf->ready = 0;
2102 spin_lock_irqsave(&state->card->lock, flags);
2103 ali_set_spdifout_rate(state, controller_independent_spdif_locked);
2104 spin_unlock_irqrestore(&state->card->lock, flags);
2105 } else {
2106 /* Set DAC rate */
2107 ali_set_spdif_output(state, -1, 0);
2108 stop_dac(state);
2109 dmabuf->ready = 0;
2110 spin_lock_irqsave(&state->card->lock, flags);
2111 ali_set_dac_rate(state, val);
2112 spin_unlock_irqrestore(&state->card->lock, flags);
2113 /* Set S/PDIF transmitter rate. */
2114 ali_set_spdif_output(state, AC97_EA_SPSA_3_4, val);
2115 if (!(state->card->ac97_status & SPDIF_ON))
2116 {
2117 val = dmabuf->rate;
2118 }
2119 }
2120 }
2121 } else { /* Not a valid rate for S/PDIF, ignore it */
2122 val = dmabuf->rate;
2123 }
2124 } else {
2125 stop_dac(state);
2126 dmabuf->ready = 0;
2127 spin_lock_irqsave(&state->card->lock, flags);
2128 ali_set_dac_rate(state, val);
2129 spin_unlock_irqrestore(&state->card->lock, flags);
2130 }
2131 }
2132 if (file->f_mode & FMODE_READ) {
2133 stop_adc(state);
2134 dmabuf->ready = 0;
2135 spin_lock_irqsave(&state->card->lock, flags);
2136 ali_set_adc_rate(state, val);
2137 spin_unlock_irqrestore(&state->card->lock, flags);
2138 }
2139 }
2140 return put_user(dmabuf->rate, p);
2141 case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
2142#ifdef DEBUG
2143 printk("SNDCTL_DSP_STEREO\n");
2144#endif
2145 if (dmabuf->enable & DAC_RUNNING) {
2146 stop_dac(state);
2147 }
2148 if (dmabuf->enable & ADC_RUNNING) {
2149 stop_adc(state);
2150 }
2151 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING) {
2152 stop_spdifout(state);
2153 }
2154 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING) {
2155 stop_spdifout(state);
2156 }
2157 return put_user(1, p);
2158 case SNDCTL_DSP_GETBLKSIZE:
2159 if (file->f_mode & FMODE_WRITE) {
2160 if (codec_independent_spdif_locked > 0) {
2161 if (!dmabuf->ready && (val = prog_dmabuf(state, 2)))
2162 return val;
2163 } else {
2164 if (controller_independent_spdif_locked > 0) {
2165 if (!dmabuf->ready && (val = prog_dmabuf(state, 3)))
2166 return val;
2167 } else {
2168 if (!dmabuf->ready && (val = prog_dmabuf(state, 0)))
2169 return val;
2170 }
2171 }
2172 }
2173
2174 if (file->f_mode & FMODE_READ) {
2175 if (!dmabuf->ready && (val = prog_dmabuf(state, 1)))
2176 return val;
2177 }
2178#ifdef DEBUG
2179 printk("SNDCTL_DSP_GETBLKSIZE %d\n", dmabuf->userfragsize);
2180#endif
2181 return put_user(dmabuf->userfragsize, p);
2182 case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */
2183#ifdef DEBUG
2184 printk("SNDCTL_DSP_GETFMTS\n");
2185#endif
2186 return put_user(AFMT_S16_LE, p);
2187 case SNDCTL_DSP_SETFMT: /* Select sample format */
2188#ifdef DEBUG
2189 printk("SNDCTL_DSP_SETFMT\n");
2190#endif
2191 return put_user(AFMT_S16_LE, p);
2192 case SNDCTL_DSP_CHANNELS: // add support 4,6 channel
2193#ifdef DEBUG
2194 printk("SNDCTL_DSP_CHANNELS\n");
2195#endif
2196 if (get_user(val, p))
2197 return -EFAULT;
2198 if (val > 0) {
2199 if (dmabuf->enable & DAC_RUNNING) {
2200 stop_dac(state);
2201 }
2202 if (dmabuf->enable & CODEC_SPDIFOUT_RUNNING) {
2203 stop_spdifout(state);
2204 }
2205 if (dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING) {
2206 stop_spdifout(state);
2207 }
2208 if (dmabuf->enable & ADC_RUNNING) {
2209 stop_adc(state);
2210 }
2211 } else {
2212 return put_user(state->card->channels, p);
2213 }
2214
2215 i_scr = inl(state->card->iobase + ALI_SCR);
2216 /* Current # of channels enabled */
2217 if (i_scr & 0x00000100)
2218 ret = 4;
2219 else if (i_scr & 0x00000200)
2220 ret = 6;
2221 else
2222 ret = 2;
2223 switch (val) {
2224 case 2: /* 2 channels is always supported */
2225 if (codec_independent_spdif_locked > 0) {
2226 outl(((i_scr & 0xfffffcff) | 0x00100000), (state->card->iobase + ALI_SCR));
2227 } else
2228 outl((i_scr & 0xfffffcff), (state->card->iobase + ALI_SCR));
2229 /* Do we need to change mixer settings???? */
2230 break;
2231 case 4: /* Supported on some chipsets, better check first */
2232 if (codec_independent_spdif_locked > 0) {
2233 outl(((i_scr & 0xfffffcff) | 0x00000100 | 0x00200000), (state->card->iobase + ALI_SCR));
2234 } else
2235 outl(((i_scr & 0xfffffcff) | 0x00000100), (state->card->iobase + ALI_SCR));
2236 break;
2237 case 6: /* Supported on some chipsets, better check first */
2238 if (codec_independent_spdif_locked > 0) {
2239 outl(((i_scr & 0xfffffcff) | 0x00000200 | 0x00008000 | 0x00300000), (state->card->iobase + ALI_SCR));
2240 } else
2241 outl(((i_scr & 0xfffffcff) | 0x00000200 | 0x00008000), (state->card->iobase + ALI_SCR));
2242 break;
2243 default: /* nothing else is ever supported by the chipset */
2244 val = ret;
2245 break;
2246 }
2247 return put_user(val, p);
2248 case SNDCTL_DSP_POST: /* the user has sent all data and is notifying us */
2249 /* we update the swptr to the end of the last sg segment then return */
2250#ifdef DEBUG
2251 printk("SNDCTL_DSP_POST\n");
2252#endif
2253 if (codec_independent_spdif_locked > 0) {
2254 if (!dmabuf->ready || (dmabuf->enable != CODEC_SPDIFOUT_RUNNING))
2255 return 0;
2256 } else {
2257 if (controller_independent_spdif_locked > 0) {
2258 if (!dmabuf->ready || (dmabuf->enable != CONTROLLER_SPDIFOUT_RUNNING))
2259 return 0;
2260 } else {
2261 if (!dmabuf->ready || (dmabuf->enable != DAC_RUNNING))
2262 return 0;
2263 }
2264 }
2265 if ((dmabuf->swptr % dmabuf->fragsize) != 0) {
2266 val = dmabuf->fragsize - (dmabuf->swptr % dmabuf->fragsize);
2267 dmabuf->swptr += val;
2268 dmabuf->count += val;
2269 }
2270 return 0;
2271 case SNDCTL_DSP_SUBDIVIDE:
2272 if (dmabuf->subdivision)
2273 return -EINVAL;
2274 if (get_user(val, p))
2275 return -EFAULT;
2276 if (val != 1 && val != 2 && val != 4)
2277 return -EINVAL;
2278#ifdef DEBUG
2279 printk("SNDCTL_DSP_SUBDIVIDE %d\n", val);
2280#endif
2281 dmabuf->subdivision = val;
2282 dmabuf->ready = 0;
2283 return 0;
2284 case SNDCTL_DSP_SETFRAGMENT:
2285 if (get_user(val, p))
2286 return -EFAULT;
2287 dmabuf->ossfragsize = 1 << (val & 0xffff);
2288 dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
2289 if (!dmabuf->ossfragsize || !dmabuf->ossmaxfrags)
2290 return -EINVAL;
2291 /*
2292 * Bound the frag size into our allowed range of 256 - 4096
2293 */
2294 if (dmabuf->ossfragsize < 256)
2295 dmabuf->ossfragsize = 256;
2296 else if (dmabuf->ossfragsize > 4096)
2297 dmabuf->ossfragsize = 4096;
2298 /*
2299 * The numfrags could be something reasonable, or it could
2300 * be 0xffff meaning "Give me as much as possible". So,
2301 * we check the numfrags * fragsize doesn't exceed our
2302 * 64k buffer limit, nor is it less than our 8k minimum.
2303 * If it fails either one of these checks, then adjust the
2304 * number of fragments, not the size of them. It's OK if
2305 * our number of fragments doesn't equal 32 or anything
2306 * like our hardware based number now since we are using
2307 * a different frag count for the hardware. Before we get
2308 * into this though, bound the maxfrags to avoid overflow
2309 * issues. A reasonable bound would be 64k / 256 since our
2310 * maximum buffer size is 64k and our minimum frag size is
2311 * 256. On the other end, our minimum buffer size is 8k and
2312 * our maximum frag size is 4k, so the lower bound should
2313 * be 2.
2314 */
2315 if (dmabuf->ossmaxfrags > 256)
2316 dmabuf->ossmaxfrags = 256;
2317 else if (dmabuf->ossmaxfrags < 2)
2318 dmabuf->ossmaxfrags = 2;
2319 val = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
2320 while (val < 8192) {
2321 val <<= 1;
2322 dmabuf->ossmaxfrags <<= 1;
2323 }
2324 while (val > 65536) {
2325 val >>= 1;
2326 dmabuf->ossmaxfrags >>= 1;
2327 }
2328 dmabuf->ready = 0;
2329#ifdef DEBUG
2330 printk("SNDCTL_DSP_SETFRAGMENT 0x%x, %d, %d\n", val,
2331 dmabuf->ossfragsize, dmabuf->ossmaxfrags);
2332#endif
2333 return 0;
2334 case SNDCTL_DSP_GETOSPACE:
2335 if (!(file->f_mode & FMODE_WRITE))
2336 return -EINVAL;
2337 if (codec_independent_spdif_locked > 0) {
2338 if (!dmabuf->ready && (val = prog_dmabuf(state, 2)) != 0)
2339 return val;
2340 } else {
2341 if (controller_independent_spdif_locked > 0) {
2342 if (!dmabuf->ready && (val = prog_dmabuf(state, 3)) != 0)
2343 return val;
2344 } else {
2345 if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
2346 return val;
2347 }
2348 }
2349 spin_lock_irqsave(&state->card->lock, flags);
2350 ali_update_ptr(state);
2351 abinfo.fragsize = dmabuf->userfragsize;
2352 abinfo.fragstotal = dmabuf->userfrags;
2353 if (dmabuf->mapped)
2354 abinfo.bytes = dmabuf->dmasize;
2355 else
2356 abinfo.bytes = ali_get_free_write_space(state);
2357 abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
2358 spin_unlock_irqrestore(&state->card->lock, flags);
2359#if defined(DEBUG) || defined(DEBUG_MMAP)
2360 printk("SNDCTL_DSP_GETOSPACE %d, %d, %d, %d\n",
2361 abinfo.bytes, abinfo.fragsize, abinfo.fragments,
2362 abinfo.fragstotal);
2363#endif
2364 return copy_to_user(argp, &abinfo,
2365 sizeof(abinfo)) ? -EFAULT : 0;
2366 case SNDCTL_DSP_GETOPTR:
2367 if (!(file->f_mode & FMODE_WRITE))
2368 return -EINVAL;
2369 if (codec_independent_spdif_locked > 0) {
2370 if (!dmabuf->ready && (val = prog_dmabuf(state, 2)) != 0)
2371 return val;
2372 } else {
2373 if (controller_independent_spdif_locked > 0) {
2374 if (!dmabuf->ready && (val = prog_dmabuf(state, 3)) != 0)
2375 return val;
2376 } else {
2377 if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
2378 return val;
2379 }
2380 }
2381 spin_lock_irqsave(&state->card->lock, flags);
2382 val = ali_get_free_write_space(state);
2383 cinfo.bytes = dmabuf->total_bytes;
2384 cinfo.ptr = dmabuf->hwptr;
2385 cinfo.blocks = val / dmabuf->userfragsize;
2386 if (codec_independent_spdif_locked > 0) {
2387 if (dmabuf->mapped && (dmabuf->trigger & SPDIF_ENABLE_OUTPUT)) {
2388 dmabuf->count += val;
2389 dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
2390 __ali_update_lvi(state, 2);
2391 }
2392 } else {
2393 if (controller_independent_spdif_locked > 0) {
2394 if (dmabuf->mapped && (dmabuf->trigger & SPDIF_ENABLE_OUTPUT)) {
2395 dmabuf->count += val;
2396 dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
2397 __ali_update_lvi(state, 3);
2398 }
2399 } else {
2400 if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
2401 dmabuf->count += val;
2402 dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
2403 __ali_update_lvi(state, 0);
2404 }
2405 }
2406 }
2407 spin_unlock_irqrestore(&state->card->lock, flags);
2408#if defined(DEBUG) || defined(DEBUG_MMAP)
2409 printk("SNDCTL_DSP_GETOPTR %d, %d, %d, %d\n", cinfo.bytes,
2410 cinfo.blocks, cinfo.ptr, dmabuf->count);
2411#endif
2412 return copy_to_user(argp, &cinfo, sizeof(cinfo))? -EFAULT : 0;
2413 case SNDCTL_DSP_GETISPACE:
2414 if (!(file->f_mode & FMODE_READ))
2415 return -EINVAL;
2416 if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
2417 return val;
2418 spin_lock_irqsave(&state->card->lock, flags);
2419 abinfo.bytes = ali_get_available_read_data(state);
2420 abinfo.fragsize = dmabuf->userfragsize;
2421 abinfo.fragstotal = dmabuf->userfrags;
2422 abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
2423 spin_unlock_irqrestore(&state->card->lock, flags);
2424#if defined(DEBUG) || defined(DEBUG_MMAP)
2425 printk("SNDCTL_DSP_GETISPACE %d, %d, %d, %d\n",
2426 abinfo.bytes, abinfo.fragsize, abinfo.fragments,
2427 abinfo.fragstotal);
2428#endif
2429 return copy_to_user(argp, &abinfo,
2430 sizeof(abinfo)) ? -EFAULT : 0;
2431 case SNDCTL_DSP_GETIPTR:
2432 if (!(file->f_mode & FMODE_READ))
2433 return -EINVAL;
2434 if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
2435 return val;
2436 spin_lock_irqsave(&state->card->lock, flags);
2437 val = ali_get_available_read_data(state);
2438 cinfo.bytes = dmabuf->total_bytes;
2439 cinfo.blocks = val / dmabuf->userfragsize;
2440 cinfo.ptr = dmabuf->hwptr;
2441 if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
2442 dmabuf->count -= val;
2443 dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
2444 __ali_update_lvi(state, 1);
2445 }
2446 spin_unlock_irqrestore(&state->card->lock, flags);
2447#if defined(DEBUG) || defined(DEBUG_MMAP)
2448 printk("SNDCTL_DSP_GETIPTR %d, %d, %d, %d\n", cinfo.bytes,
2449 cinfo.blocks, cinfo.ptr, dmabuf->count);
2450#endif
2451 return copy_to_user(argp, &cinfo, sizeof(cinfo))? -EFAULT: 0;
2452 case SNDCTL_DSP_NONBLOCK:
2453#ifdef DEBUG
2454 printk("SNDCTL_DSP_NONBLOCK\n");
2455#endif
2456 file->f_flags |= O_NONBLOCK;
2457 return 0;
2458 case SNDCTL_DSP_GETCAPS:
2459#ifdef DEBUG
2460 printk("SNDCTL_DSP_GETCAPS\n");
2461#endif
2462 return put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
2463 DSP_CAP_MMAP | DSP_CAP_BIND, p);
2464 case SNDCTL_DSP_GETTRIGGER:
2465 val = 0;
2466#ifdef DEBUG
2467 printk("SNDCTL_DSP_GETTRIGGER 0x%x\n", dmabuf->trigger);
2468#endif
2469 return put_user(dmabuf->trigger, p);
2470 case SNDCTL_DSP_SETTRIGGER:
2471 if (get_user(val, p))
2472 return -EFAULT;
2473#if defined(DEBUG) || defined(DEBUG_MMAP)
2474 printk("SNDCTL_DSP_SETTRIGGER 0x%x\n", val);
2475#endif
2476 if (!(val & PCM_ENABLE_INPUT) && dmabuf->enable == ADC_RUNNING) {
2477 stop_adc(state);
2478 }
2479 if (!(val & PCM_ENABLE_OUTPUT) && dmabuf->enable == DAC_RUNNING) {
2480 stop_dac(state);
2481 }
2482 if (!(val & SPDIF_ENABLE_OUTPUT) && dmabuf->enable == CODEC_SPDIFOUT_RUNNING) {
2483 stop_spdifout(state);
2484 }
2485 if (!(val & SPDIF_ENABLE_OUTPUT) && dmabuf->enable == CONTROLLER_SPDIFOUT_RUNNING) {
2486 stop_spdifout(state);
2487 }
2488 dmabuf->trigger = val;
2489 if (val & PCM_ENABLE_OUTPUT && !(dmabuf->enable & DAC_RUNNING)) {
2490 if (!dmabuf->write_channel) {
2491 dmabuf->ready = 0;
2492 dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
2493 if (!dmabuf->write_channel)
2494 return -EBUSY;
2495 }
2496 if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
2497 return ret;
2498 if (dmabuf->mapped) {
2499 spin_lock_irqsave(&state->card->lock, flags);
2500 ali_update_ptr(state);
2501 dmabuf->count = 0;
2502 dmabuf->swptr = dmabuf->hwptr;
2503 dmabuf->count = ali_get_free_write_space(state);
2504 dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
2505 __ali_update_lvi(state, 0);
2506 spin_unlock_irqrestore(&state->card->lock,
2507 flags);
2508 } else
2509 start_dac(state);
2510 }
2511 if (val & SPDIF_ENABLE_OUTPUT && !(dmabuf->enable & CODEC_SPDIFOUT_RUNNING)) {
2512 if (!dmabuf->codec_spdifout_channel) {
2513 dmabuf->ready = 0;
2514 dmabuf->codec_spdifout_channel = state->card->alloc_codec_spdifout_channel(state->card);
2515 if (!dmabuf->codec_spdifout_channel)
2516 return -EBUSY;
2517 }
2518 if (!dmabuf->ready && (ret = prog_dmabuf(state, 2)))
2519 return ret;
2520 if (dmabuf->mapped) {
2521 spin_lock_irqsave(&state->card->lock, flags);
2522 ali_update_ptr(state);
2523 dmabuf->count = 0;
2524 dmabuf->swptr = dmabuf->hwptr;
2525 dmabuf->count = ali_get_free_write_space(state);
2526 dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
2527 __ali_update_lvi(state, 2);
2528 spin_unlock_irqrestore(&state->card->lock,
2529 flags);
2530 } else
2531 start_spdifout(state);
2532 }
2533 if (val & SPDIF_ENABLE_OUTPUT && !(dmabuf->enable & CONTROLLER_SPDIFOUT_RUNNING)) {
2534 if (!dmabuf->controller_spdifout_channel) {
2535 dmabuf->ready = 0;
2536 dmabuf->controller_spdifout_channel = state->card->alloc_controller_spdifout_channel(state->card);
2537 if (!dmabuf->controller_spdifout_channel)
2538 return -EBUSY;
2539 }
2540 if (!dmabuf->ready && (ret = prog_dmabuf(state, 3)))
2541 return ret;
2542 if (dmabuf->mapped) {
2543 spin_lock_irqsave(&state->card->lock, flags);
2544 ali_update_ptr(state);
2545 dmabuf->count = 0;
2546 dmabuf->swptr = dmabuf->hwptr;
2547 dmabuf->count = ali_get_free_write_space(state);
2548 dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
2549 __ali_update_lvi(state, 3);
2550 spin_unlock_irqrestore(&state->card->lock, flags);
2551 } else
2552 start_spdifout(state);
2553 }
2554 if (val & PCM_ENABLE_INPUT && !(dmabuf->enable & ADC_RUNNING)) {
2555 if (!dmabuf->read_channel) {
2556 dmabuf->ready = 0;
2557 dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
2558 if (!dmabuf->read_channel)
2559 return -EBUSY;
2560 }
2561 if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
2562 return ret;
2563 if (dmabuf->mapped) {
2564 spin_lock_irqsave(&state->card->lock,
2565 flags);
2566 ali_update_ptr(state);
2567 dmabuf->swptr = dmabuf->hwptr;
2568 dmabuf->count = 0;
2569 spin_unlock_irqrestore(&state->card->lock, flags);
2570 }
2571 ali_update_lvi(state, 1);
2572 start_adc(state);
2573 }
2574 return 0;
2575 case SNDCTL_DSP_SETDUPLEX:
2576#ifdef DEBUG
2577 printk("SNDCTL_DSP_SETDUPLEX\n");
2578#endif
2579 return -EINVAL;
2580 case SNDCTL_DSP_GETODELAY:
2581 if (!(file->f_mode & FMODE_WRITE))
2582 return -EINVAL;
2583 spin_lock_irqsave(&state->card->lock, flags);
2584 ali_update_ptr(state);
2585 val = dmabuf->count;
2586 spin_unlock_irqrestore(&state->card->lock, flags);
2587#ifdef DEBUG
2588 printk("SNDCTL_DSP_GETODELAY %d\n", dmabuf->count);
2589#endif
2590 return put_user(val, p);
2591 case SOUND_PCM_READ_RATE:
2592#ifdef DEBUG
2593 printk("SOUND_PCM_READ_RATE %d\n", dmabuf->rate);
2594#endif
2595 return put_user(dmabuf->rate, p);
2596 case SOUND_PCM_READ_CHANNELS:
2597#ifdef DEBUG
2598 printk("SOUND_PCM_READ_CHANNELS\n");
2599#endif
2600 return put_user(2, p);
2601 case SOUND_PCM_READ_BITS:
2602#ifdef DEBUG
2603 printk("SOUND_PCM_READ_BITS\n");
2604#endif
2605 return put_user(AFMT_S16_LE, p);
2606 case SNDCTL_DSP_SETSPDIF: /* Set S/PDIF Control register */
2607#ifdef DEBUG
2608 printk("SNDCTL_DSP_SETSPDIF\n");
2609#endif
2610 if (get_user(val, p))
2611 return -EFAULT;
2612 /* Check to make sure the codec supports S/PDIF transmitter */
2613 if ((state->card->ac97_features & 4)) {
2614 /* mask out the transmitter speed bits so the user can't set them */
2615 val &= ~0x3000;
2616 /* Add the current transmitter speed bits to the passed value */
2617 ret = ali_ac97_get(codec, AC97_SPDIF_CONTROL);
2618 val |= (ret & 0x3000);
2619 ali_ac97_set(codec, AC97_SPDIF_CONTROL, val);
2620 if (ali_ac97_get(codec, AC97_SPDIF_CONTROL) != val) {
2621 printk(KERN_ERR "ali_audio: Unable to set S/PDIF configuration to 0x%04x.\n", val);
2622 return -EFAULT;
2623 }
2624 }
2625#ifdef DEBUG
2626 else
2627 printk(KERN_WARNING "ali_audio: S/PDIF transmitter not avalible.\n");
2628#endif
2629 return put_user(val, p);
2630 case SNDCTL_DSP_GETSPDIF: /* Get S/PDIF Control register */
2631#ifdef DEBUG
2632 printk("SNDCTL_DSP_GETSPDIF\n");
2633#endif
2634 if (get_user(val, p))
2635 return -EFAULT;
2636 /* Check to make sure the codec supports S/PDIF transmitter */
2637 if (!(state->card->ac97_features & 4)) {
2638#ifdef DEBUG
2639 printk(KERN_WARNING "ali_audio: S/PDIF transmitter not avalible.\n");
2640#endif
2641 val = 0;
2642 } else {
2643 val = ali_ac97_get(codec, AC97_SPDIF_CONTROL);
2644 }
2645
2646 return put_user(val, p);
2647//end add support spdif out
2648//add support 4,6 channel
2649 case SNDCTL_DSP_GETCHANNELMASK:
2650#ifdef DEBUG
2651 printk("SNDCTL_DSP_GETCHANNELMASK\n");
2652#endif
2653 if (get_user(val, p))
2654 return -EFAULT;
2655 /* Based on AC'97 DAC support, not ICH hardware */
2656 val = DSP_BIND_FRONT;
2657 if (state->card->ac97_features & 0x0004)
2658 val |= DSP_BIND_SPDIF;
2659 if (state->card->ac97_features & 0x0080)
2660 val |= DSP_BIND_SURR;
2661 if (state->card->ac97_features & 0x0140)
2662 val |= DSP_BIND_CENTER_LFE;
2663 return put_user(val, p);
2664 case SNDCTL_DSP_BIND_CHANNEL:
2665#ifdef DEBUG
2666 printk("SNDCTL_DSP_BIND_CHANNEL\n");
2667#endif
2668 if (get_user(val, p))
2669 return -EFAULT;
2670 if (val == DSP_BIND_QUERY) {
2671 val = DSP_BIND_FRONT; /* Always report this as being enabled */
2672 if (state->card->ac97_status & SPDIF_ON)
2673 val |= DSP_BIND_SPDIF;
2674 else {
2675 if (state->card->ac97_status & SURR_ON)
2676 val |= DSP_BIND_SURR;
2677 if (state->card->
2678 ac97_status & CENTER_LFE_ON)
2679 val |= DSP_BIND_CENTER_LFE;
2680 }
2681 } else { /* Not a query, set it */
2682 if (!(file->f_mode & FMODE_WRITE))
2683 return -EINVAL;
2684 if (dmabuf->enable == DAC_RUNNING) {
2685 stop_dac(state);
2686 }
2687 if (val & DSP_BIND_SPDIF) { /* Turn on SPDIF */
2688 /* Ok, this should probably define what slots
2689 * to use. For now, we'll only set it to the
2690 * defaults:
2691 *
2692 * non multichannel codec maps to slots 3&4
2693 * 2 channel codec maps to slots 7&8
2694 * 4 channel codec maps to slots 6&9
2695 * 6 channel codec maps to slots 10&11
2696 *
2697 * there should be some way for the app to
2698 * select the slot assignment.
2699 */
2700 i_scr = inl(state->card->iobase + ALI_SCR);
2701 if (codec_independent_spdif_locked > 0) {
2702
2703 if ((i_scr & 0x00300000) == 0x00100000) {
2704 ali_set_spdif_output(state, AC97_EA_SPSA_7_8, codec_independent_spdif_locked);
2705 } else {
2706 if ((i_scr & 0x00300000) == 0x00200000) {
2707 ali_set_spdif_output(state, AC97_EA_SPSA_6_9, codec_independent_spdif_locked);
2708 } else {
2709 if ((i_scr & 0x00300000) == 0x00300000) {
2710 ali_set_spdif_output(state, AC97_EA_SPSA_10_11, codec_independent_spdif_locked);
2711 }
2712 }
2713 }
2714 } else { /* codec spdif out (pcm out share ) */
2715 ali_set_spdif_output(state, AC97_EA_SPSA_3_4, dmabuf->rate); //I do not modify
2716 }
2717
2718 if (!(state->card->ac97_status & SPDIF_ON))
2719 val &= ~DSP_BIND_SPDIF;
2720 } else {
2721 int mask;
2722 int channels;
2723 /* Turn off S/PDIF if it was on */
2724 if (state->card->ac97_status & SPDIF_ON)
2725 ali_set_spdif_output(state, -1, 0);
2726 mask =
2727 val & (DSP_BIND_FRONT | DSP_BIND_SURR |
2728 DSP_BIND_CENTER_LFE);
2729 switch (mask) {
2730 case DSP_BIND_FRONT:
2731 channels = 2;
2732 break;
2733 case DSP_BIND_FRONT | DSP_BIND_SURR:
2734 channels = 4;
2735 break;
2736 case DSP_BIND_FRONT | DSP_BIND_SURR | DSP_BIND_CENTER_LFE:
2737 channels = 6;
2738 break;
2739 default:
2740 val = DSP_BIND_FRONT;
2741 channels = 2;
2742 break;
2743 }
2744 ali_set_dac_channels(state, channels);
2745 /* check that they really got turned on */
2746 if (!state->card->ac97_status & SURR_ON)
2747 val &= ~DSP_BIND_SURR;
2748 if (!state->card->
2749 ac97_status & CENTER_LFE_ON)
2750 val &= ~DSP_BIND_CENTER_LFE;
2751 }
2752 }
2753 return put_user(val, p);
2754 case SNDCTL_DSP_MAPINBUF:
2755 case SNDCTL_DSP_MAPOUTBUF:
2756 case SNDCTL_DSP_SETSYNCRO:
2757 case SOUND_PCM_WRITE_FILTER:
2758 case SOUND_PCM_READ_FILTER:
2759 return -EINVAL;
2760 }
2761 return -EINVAL;
2762}
2763
2764static int ali_open(struct inode *inode, struct file *file)
2765{
2766 int i = 0;
2767 struct ali_card *card = devs;
2768 struct ali_state *state = NULL;
2769 struct dmabuf *dmabuf = NULL;
2770 unsigned int i_scr;
2771
2772 /* find an available virtual channel (instance of /dev/dsp) */
2773
2774 while (card != NULL) {
2775
2776 /*
2777 * If we are initializing and then fail, card could go
2778 * away unuexpectedly while we are in the for() loop.
2779 * So, check for card on each iteration before we check
2780 * for card->initializing to avoid a possible oops.
2781 * This usually only matters for times when the driver is
2782 * autoloaded by kmod.
2783 */
2784 for (i = 0; i < 50 && card && card->initializing; i++) {
2785 set_current_state(TASK_UNINTERRUPTIBLE);
2786 schedule_timeout(HZ / 20);
2787 }
2788
2789 for (i = 0; i < NR_HW_CH && card && !card->initializing; i++) {
2790 if (card->states[i] == NULL) {
2791 state = card->states[i] = (struct ali_state *) kmalloc(sizeof(struct ali_state), GFP_KERNEL);
2792 if (state == NULL)
2793 return -ENOMEM;
2794 memset(state, 0, sizeof(struct ali_state));
2795 dmabuf = &state->dmabuf;
2796 goto found_virt;
2797 }
2798 }
2799 card = card->next;
2800 }
2801
2802 /* no more virtual channel avaiable */
2803 if (!state)
2804 return -ENODEV;
2805found_virt:
2806 /* initialize the virtual channel */
2807
2808 state->virt = i;
2809 state->card = card;
2810 state->magic = ALI5455_STATE_MAGIC;
2811 init_waitqueue_head(&dmabuf->wait);
2812 mutex_init(&state->open_mutex);
2813 file->private_data = state;
2814 dmabuf->trigger = 0;
2815 /* allocate hardware channels */
2816 if (file->f_mode & FMODE_READ) {
2817 if ((dmabuf->read_channel =
2818 card->alloc_rec_pcm_channel(card)) == NULL) {
2819 kfree(card->states[i]);
2820 card->states[i] = NULL;
2821 return -EBUSY;
2822 }
2823 dmabuf->trigger |= PCM_ENABLE_INPUT;
2824 ali_set_adc_rate(state, 8000);
2825 }
2826 if (file->f_mode & FMODE_WRITE) {
2827 if (codec_independent_spdif_locked > 0) {
2828 if ((dmabuf->codec_spdifout_channel = card->alloc_codec_spdifout_channel(card)) == NULL) {
2829 kfree(card->states[i]);
2830 card->states[i] = NULL;
2831 return -EBUSY;
2832 }
2833 dmabuf->trigger |= SPDIF_ENABLE_OUTPUT;
2834 ali_set_codecspdifout_rate(state, codec_independent_spdif_locked); //It must add
2835 i_scr = inl(state->card->iobase + ALI_SCR);
2836 if ((i_scr & 0x00300000) == 0x00100000) {
2837 ali_set_spdif_output(state, AC97_EA_SPSA_7_8, codec_independent_spdif_locked);
2838 } else {
2839 if ((i_scr & 0x00300000) == 0x00200000) {
2840 ali_set_spdif_output(state, AC97_EA_SPSA_6_9, codec_independent_spdif_locked);
2841 } else {
2842 if ((i_scr & 0x00300000) == 0x00300000) {
2843 ali_set_spdif_output(state, AC97_EA_SPSA_10_11, codec_independent_spdif_locked);
2844 } else {
2845 ali_set_spdif_output(state, AC97_EA_SPSA_7_8, codec_independent_spdif_locked);
2846 }
2847 }
2848
2849 }
2850 } else {
2851 if (controller_independent_spdif_locked > 0) {
2852 if ((dmabuf->controller_spdifout_channel = card->alloc_controller_spdifout_channel(card)) == NULL) {
2853 kfree(card->states[i]);
2854 card->states[i] = NULL;
2855 return -EBUSY;
2856 }
2857 dmabuf->trigger |= SPDIF_ENABLE_OUTPUT;
2858 ali_set_spdifout_rate(state, controller_independent_spdif_locked);
2859 } else {
2860 if ((dmabuf->write_channel = card->alloc_pcm_channel(card)) == NULL) {
2861 kfree(card->states[i]);
2862 card->states[i] = NULL;
2863 return -EBUSY;
2864 }
2865 /* Initialize to 8kHz? What if we don't support 8kHz? */
2866 /* Let's change this to check for S/PDIF stuff */
2867
2868 dmabuf->trigger |= PCM_ENABLE_OUTPUT;
2869 if (codec_pcmout_share_spdif_locked) {
2870 ali_set_dac_rate(state, codec_pcmout_share_spdif_locked);
2871 ali_set_spdif_output(state, AC97_EA_SPSA_3_4, codec_pcmout_share_spdif_locked);
2872 } else {
2873 ali_set_dac_rate(state, 8000);
2874 }
2875 }
2876
2877 }
2878 }
2879
2880 /* set default sample format. According to OSS Programmer's Guide /dev/dsp
2881 should be default to unsigned 8-bits, mono, with sample rate 8kHz and
2882 /dev/dspW will accept 16-bits sample, but we don't support those so we
2883 set it immediately to stereo and 16bit, which is all we do support */
2884 dmabuf->fmt |= ALI5455_FMT_16BIT | ALI5455_FMT_STEREO;
2885 dmabuf->ossfragsize = 0;
2886 dmabuf->ossmaxfrags = 0;
2887 dmabuf->subdivision = 0;
2888 state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2889 outl(0x00000000, card->iobase + ALI_INTERRUPTCR);
2890 outl(0x00000000, card->iobase + ALI_INTERRUPTSR);
2891 return nonseekable_open(inode, file);
2892}
2893
2894static int ali_release(struct inode *inode, struct file *file)
2895{
2896 struct ali_state *state = (struct ali_state *) file->private_data;
2897 struct ali_card *card = state->card;
2898 struct dmabuf *dmabuf = &state->dmabuf;
2899 unsigned long flags;
2900 lock_kernel();
2901
2902 /* stop DMA state machine and free DMA buffers/channels */
2903 if (dmabuf->trigger & PCM_ENABLE_OUTPUT)
2904 drain_dac(state, 0);
2905
2906 if (dmabuf->trigger & SPDIF_ENABLE_OUTPUT)
2907 drain_spdifout(state, 0);
2908
2909 if (dmabuf->trigger & PCM_ENABLE_INPUT)
2910 stop_adc(state);
2911
2912 spin_lock_irqsave(&card->lock, flags);
2913 dealloc_dmabuf(state);
2914 if (file->f_mode & FMODE_WRITE) {
2915 if (codec_independent_spdif_locked > 0) {
2916 state->card->free_pcm_channel(state->card, dmabuf->codec_spdifout_channel->num);
2917 } else {
2918 if (controller_independent_spdif_locked > 0)
2919 state->card->free_pcm_channel(state->card,
2920 dmabuf->controller_spdifout_channel->num);
2921 else state->card->free_pcm_channel(state->card,
2922 dmabuf->write_channel->num);
2923 }
2924 }
2925 if (file->f_mode & FMODE_READ)
2926 state->card->free_pcm_channel(state->card, dmabuf->read_channel->num);
2927
2928 state->card->states[state->virt] = NULL;
2929 kfree(state);
2930 spin_unlock_irqrestore(&card->lock, flags);
2931 unlock_kernel();
2932 return 0;
2933}
2934
2935static /*const */ struct file_operations ali_audio_fops = {
2936 .owner = THIS_MODULE,
2937 .llseek = no_llseek,
2938 .read = ali_read,
2939 .write = ali_write,
2940 .poll = ali_poll,
2941 .ioctl = ali_ioctl,
2942 .mmap = ali_mmap,
2943 .open = ali_open,
2944 .release = ali_release,
2945};
2946
2947/* Read AC97 codec registers */
2948static u16 ali_ac97_get(struct ac97_codec *dev, u8 reg)
2949{
2950 struct ali_card *card = dev->private_data;
2951 int count1 = 100;
2952 char val;
2953 unsigned short int data = 0, count, addr1, addr2 = 0;
2954
2955 spin_lock(&card->ac97_lock);
2956 while (count1-- && (inl(card->iobase + ALI_CAS) & 0x80000000))
2957 udelay(1);
2958
2959 addr1 = reg;
2960 reg |= 0x0080;
2961 for (count = 0; count < 0x7f; count++) {
2962 val = inb(card->iobase + ALI_CSPSR);
2963 if (val & 0x08)
2964 break;
2965 }
2966 if (count == 0x7f)
2967 {
2968 spin_unlock(&card->ac97_lock);
2969 return -1;
2970 }
2971 outw(reg, (card->iobase + ALI_CPR) + 2);
2972 for (count = 0; count < 0x7f; count++) {
2973 val = inb(card->iobase + ALI_CSPSR);
2974 if (val & 0x02) {
2975 data = inw(card->iobase + ALI_SPR);
2976 addr2 = inw((card->iobase + ALI_SPR) + 2);
2977 break;
2978 }
2979 }
2980 spin_unlock(&card->ac97_lock);
2981 if (count == 0x7f)
2982 return -1;
2983 if (addr2 != addr1)
2984 return -1;
2985 return ((u16) data);
2986}
2987
2988/* write ac97 codec register */
2989
2990static void ali_ac97_set(struct ac97_codec *dev, u8 reg, u16 data)
2991{
2992 struct ali_card *card = dev->private_data;
2993 int count1 = 100;
2994 char val;
2995 unsigned short int count;
2996
2997 spin_lock(&card->ac97_lock);
2998 while (count1-- && (inl(card->iobase + ALI_CAS) & 0x80000000))
2999 udelay(1);
3000
3001 for (count = 0; count < 0x7f; count++) {
3002 val = inb(card->iobase + ALI_CSPSR);
3003 if (val & 0x08)
3004 break;
3005 }
3006 if (count == 0x7f) {
3007 printk(KERN_WARNING "ali_ac97_set: AC97 codec register access timed out. \n");
3008 spin_unlock(&card->ac97_lock);
3009 return;
3010 }
3011 outw(data, (card->iobase + ALI_CPR));
3012 outb(reg, (card->iobase + ALI_CPR) + 2);
3013 for (count = 0; count < 0x7f; count++) {
3014 val = inb(card->iobase + ALI_CSPSR);
3015 if (val & 0x01)
3016 break;
3017 }
3018 spin_unlock(&card->ac97_lock);
3019 if (count == 0x7f)
3020 printk(KERN_WARNING "ali_ac97_set: AC97 codec register access timed out. \n");
3021 return;
3022}
3023
3024/* OSS /dev/mixer file operation methods */
3025
3026static int ali_open_mixdev(struct inode *inode, struct file *file)
3027{
3028 int i;
3029 int minor = iminor(inode);
3030 struct ali_card *card = devs;
3031 for (card = devs; card != NULL; card = card->next) {
3032 /*
3033 * If we are initializing and then fail, card could go
3034 * away unuexpectedly while we are in the for() loop.
3035 * So, check for card on each iteration before we check
3036 * for card->initializing to avoid a possible oops.
3037 * This usually only matters for times when the driver is
3038 * autoloaded by kmod.
3039 */
3040 for (i = 0; i < 50 && card && card->initializing; i++) {
3041 set_current_state(TASK_UNINTERRUPTIBLE);
3042 schedule_timeout(HZ / 20);
3043 }
3044 for (i = 0; i < NR_AC97 && card && !card->initializing; i++)
3045 if (card->ac97_codec[i] != NULL
3046 && card->ac97_codec[i]->dev_mixer == minor) {
3047 file->private_data = card->ac97_codec[i];
3048 return nonseekable_open(inode, file);
3049 }
3050 }
3051 return -ENODEV;
3052}
3053
3054static int ali_ioctl_mixdev(struct inode *inode,
3055 struct file *file,
3056 unsigned int cmd, unsigned long arg)
3057{
3058 struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
3059 return codec->mixer_ioctl(codec, cmd, arg);
3060}
3061
3062static /*const */ struct file_operations ali_mixer_fops = {
3063 .owner = THIS_MODULE,
3064 .llseek = no_llseek,
3065 .ioctl = ali_ioctl_mixdev,
3066 .open = ali_open_mixdev,
3067};
3068
3069/* AC97 codec initialisation. These small functions exist so we don't
3070 duplicate code between module init and apm resume */
3071
3072static inline int ali_ac97_exists(struct ali_card *card, int ac97_number)
3073{
3074 unsigned int i = 1;
3075 u32 reg = inl(card->iobase + ALI_RTSR);
3076 if (ac97_number) {
3077 while (i < 100) {
3078
3079 reg = inl(card->iobase + ALI_RTSR);
3080 if (reg & 0x40) {
3081 break;
3082 } else {
3083 outl(reg | 0x00000040,
3084 card->iobase + 0x34);
3085 udelay(1);
3086 }
3087 i++;
3088 }
3089
3090 } else {
3091 while (i < 100) {
3092 reg = inl(card->iobase + ALI_RTSR);
3093 if (reg & 0x80) {
3094 break;
3095 } else {
3096 outl(reg | 0x00000080,
3097 card->iobase + 0x34);
3098 udelay(1);
3099 }
3100 i++;
3101 }
3102 }
3103
3104 if (ac97_number)
3105 return reg & 0x40;
3106 else
3107 return reg & 0x80;
3108}
3109
3110static inline int ali_ac97_enable_variable_rate(struct ac97_codec *codec)
3111{
3112 ali_ac97_set(codec, AC97_EXTENDED_STATUS, 9);
3113 ali_ac97_set(codec, AC97_EXTENDED_STATUS, ali_ac97_get(codec, AC97_EXTENDED_STATUS) | 0xE800);
3114 return (ali_ac97_get(codec, AC97_EXTENDED_STATUS) & 1);
3115}
3116
3117
3118static int ali_ac97_probe_and_powerup(struct ali_card *card, struct ac97_codec *codec)
3119{
3120 /* Returns 0 on failure */
3121 int i;
3122 u16 addr;
3123 if (ac97_probe_codec(codec) == 0)
3124 return 0;
3125 /* ac97_probe_codec is success ,then begin to init codec */
3126 ali_ac97_set(codec, AC97_RESET, 0xffff);
3127 if (card->channel[0].used == 1) {
3128 ali_ac97_set(codec, AC97_RECORD_SELECT, 0x0000);
3129 ali_ac97_set(codec, AC97_LINEIN_VOL, 0x0808);
3130 ali_ac97_set(codec, AC97_RECORD_GAIN, 0x0F0F);
3131 }
3132
3133 if (card->channel[2].used == 1) //if MICin then init codec
3134 {
3135 ali_ac97_set(codec, AC97_RECORD_SELECT, 0x0000);
3136 ali_ac97_set(codec, AC97_MIC_VOL, 0x8808);
3137 ali_ac97_set(codec, AC97_RECORD_GAIN, 0x0F0F);
3138 ali_ac97_set(codec, AC97_RECORD_GAIN_MIC, 0x0000);
3139 }
3140
3141 ali_ac97_set(codec, AC97_MASTER_VOL_STEREO, 0x0000);
3142 ali_ac97_set(codec, AC97_HEADPHONE_VOL, 0x0000);
3143 ali_ac97_set(codec, AC97_PCMOUT_VOL, 0x0000);
3144 ali_ac97_set(codec, AC97_CD_VOL, 0x0808);
3145 ali_ac97_set(codec, AC97_VIDEO_VOL, 0x0808);
3146 ali_ac97_set(codec, AC97_AUX_VOL, 0x0808);
3147 ali_ac97_set(codec, AC97_PHONE_VOL, 0x8048);
3148 ali_ac97_set(codec, AC97_PCBEEP_VOL, 0x0000);
3149 ali_ac97_set(codec, AC97_GENERAL_PURPOSE, AC97_GP_MIX);
3150 ali_ac97_set(codec, AC97_MASTER_VOL_MONO, 0x0000);
3151 ali_ac97_set(codec, 0x38, 0x0000);
3152 addr = ali_ac97_get(codec, 0x2a);
3153 ali_ac97_set(codec, 0x2a, addr | 0x0001);
3154 addr = ali_ac97_get(codec, 0x2a);
3155 addr = ali_ac97_get(codec, 0x28);
3156 ali_ac97_set(codec, 0x2c, 0xbb80);
3157 addr = ali_ac97_get(codec, 0x2c);
3158 /* power it all up */
3159 ali_ac97_set(codec, AC97_POWER_CONTROL,
3160 ali_ac97_get(codec, AC97_POWER_CONTROL) & ~0x7f00);
3161 /* wait for analog ready */
3162 for (i = 10; i && ((ali_ac97_get(codec, AC97_POWER_CONTROL) & 0xf) != 0xf); i--) {
3163 set_current_state(TASK_UNINTERRUPTIBLE);
3164 schedule_timeout(HZ / 20);
3165 }
3166 /* FIXME !! */
3167 i++;
3168 return i;
3169}
3170
3171
3172/* I clone ali5455(2.4.7 ) not clone i810_audio(2.4.18) */
3173
3174static int ali_reset_5455(struct ali_card *card)
3175{
3176 outl(0x80000003, card->iobase + ALI_SCR);
3177 outl(0x83838383, card->iobase + ALI_FIFOCR1);
3178 outl(0x83838383, card->iobase + ALI_FIFOCR2);
3179 if (controller_pcmout_share_spdif_locked > 0) {
3180 outl((inl(card->iobase + ALI_SPDIFICS) | 0x00000001),
3181 card->iobase + ALI_SPDIFICS);
3182 outl(0x0408000a, card->iobase + ALI_INTERFACECR);
3183 } else {
3184 if (codec_independent_spdif_locked > 0) {
3185 outl((inl(card->iobase + ALI_SCR) | 0x00100000), card->iobase + ALI_SCR); // now I select slot 7 & 8
3186 outl(0x00200000, card->iobase + ALI_INTERFACECR); //enable codec independent spdifout
3187 } else
3188 outl(0x04080002, card->iobase + ALI_INTERFACECR);
3189 }
3190
3191 outl(0x00000000, card->iobase + ALI_INTERRUPTCR);
3192 outl(0x00000000, card->iobase + ALI_INTERRUPTSR);
3193 if (controller_independent_spdif_locked > 0)
3194 outl((inl(card->iobase + ALI_SPDIFICS) | 0x00000001),
3195 card->iobase + ALI_SPDIFICS);
3196 return 1;
3197}
3198
3199
3200static int ali_ac97_random_init_stuff(struct ali_card
3201 *card)
3202{
3203 u32 reg = inl(card->iobase + ALI_SCR);
3204 int i = 0;
3205 reg = inl(card->iobase + ALI_SCR);
3206 if ((reg & 2) == 0) /* Cold required */
3207 reg |= 2;
3208 else
3209 reg |= 1; /* Warm */
3210 reg &= ~0x80000000; /* ACLink on */
3211 outl(reg, card->iobase + ALI_SCR);
3212
3213 while (i < 10) {
3214 if ((inl(card->iobase + 0x18) & (1 << 1)) == 0)
3215 break;
3216 current->state = TASK_UNINTERRUPTIBLE;
3217 schedule_timeout(HZ / 20);
3218 i++;
3219 }
3220 if (i == 10) {
3221 printk(KERN_ERR "ali_audio: AC'97 reset failed.\n");
3222 return 0;
3223 }
3224
3225 set_current_state(TASK_UNINTERRUPTIBLE);
3226 schedule_timeout(HZ / 2);
3227 return 1;
3228}
3229
3230/* AC97 codec initialisation. */
3231
3232static int __devinit ali_ac97_init(struct ali_card *card)
3233{
3234 int num_ac97 = 0;
3235 int total_channels = 0;
3236 struct ac97_codec *codec;
3237 u16 eid;
3238
3239 if (!ali_ac97_random_init_stuff(card))
3240 return 0;
3241
3242 /* Number of channels supported */
3243 /* What about the codec? Just because the ICH supports */
3244 /* multiple channels doesn't mean the codec does. */
3245 /* we'll have to modify this in the codec section below */
3246 /* to reflect what the codec has. */
3247 /* ICH and ICH0 only support 2 channels so don't bother */
3248 /* to check.... */
3249 inl(card->iobase + ALI_CPR);
3250 card->channels = 2;
3251
3252 for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
3253
3254 /* Assume codec isn't available until we go through the
3255 * gauntlet below */
3256 card->ac97_codec[num_ac97] = NULL;
3257 /* The ICH programmer's reference says you should */
3258 /* check the ready status before probing. So we chk */
3259 /* What do we do if it's not ready? Wait and try */
3260 /* again, or abort? */
3261 if (!ali_ac97_exists(card, num_ac97)) {
3262 if (num_ac97 == 0)
3263 printk(KERN_ERR "ali_audio: Primary codec not ready.\n");
3264 break;
3265 }
3266
3267 if ((codec = ac97_alloc_codec()) == NULL)
3268 return -ENOMEM;
3269 /* initialize some basic codec information, other fields will be filled
3270 in ac97_probe_codec */
3271 codec->private_data = card;
3272 codec->id = num_ac97;
3273 codec->codec_read = ali_ac97_get;
3274 codec->codec_write = ali_ac97_set;
3275 if (!ali_ac97_probe_and_powerup(card, codec)) {
3276 printk(KERN_ERR "ali_audio: timed out waiting for codec %d analog ready",
3277 num_ac97);
3278 kfree(codec);
3279 break; /* it didn't work */
3280 }
3281
3282 /* Store state information about S/PDIF transmitter */
3283 card->ac97_status = 0;
3284 /* Don't attempt to get eid until powerup is complete */
3285 eid = ali_ac97_get(codec, AC97_EXTENDED_ID);
3286 if (eid == 0xFFFF) {
3287 printk(KERN_ERR "ali_audio: no codec attached ?\n");
3288 kfree(codec);
3289 break;
3290 }
3291
3292 card->ac97_features = eid;
3293 /* Now check the codec for useful features to make up for
3294 the dumbness of the ali5455 hardware engine */
3295 if (!(eid & 0x0001))
3296 printk(KERN_WARNING
3297 "ali_audio: only 48Khz playback available.\n");
3298 else {
3299 if (!ali_ac97_enable_variable_rate(codec)) {
3300 printk(KERN_WARNING
3301 "ali_audio: Codec refused to allow VRA, using 48Khz only.\n");
3302 card->ac97_features &= ~1;
3303 }
3304 }
3305
3306 /* Determine how many channels the codec(s) support */
3307 /* - The primary codec always supports 2 */
3308 /* - If the codec supports AMAP, surround DACs will */
3309 /* automaticlly get assigned to slots. */
3310 /* * Check for surround DACs and increment if */
3311 /* found. */
3312 /* - Else check if the codec is revision 2.2 */
3313 /* * If surround DACs exist, assign them to slots */
3314 /* and increment channel count. */
3315
3316 /* All of this only applies to ICH2 and above. ICH */
3317 /* and ICH0 only support 2 channels. ICH2 will only */
3318 /* support multiple codecs in a "split audio" config. */
3319 /* as described above. */
3320
3321 /* TODO: Remove all the debugging messages! */
3322
3323 if ((eid & 0xc000) == 0) /* primary codec */
3324 total_channels += 2;
3325 if ((codec->dev_mixer = register_sound_mixer(&ali_mixer_fops, -1)) < 0) {
3326 printk(KERN_ERR "ali_audio: couldn't register mixer!\n");
3327 kfree(codec);
3328 break;
3329 }
3330 card->ac97_codec[num_ac97] = codec;
3331 }
3332 /* pick the minimum of channels supported by ICHx or codec(s) */
3333 card->channels = (card->channels > total_channels) ? total_channels : card->channels;
3334 return num_ac97;
3335}
3336
3337static void __devinit ali_configure_clocking(void)
3338{
3339 struct ali_card *card;
3340 struct ali_state *state;
3341 struct dmabuf *dmabuf;
3342 unsigned int i, offset, new_offset;
3343 unsigned long flags;
3344 card = devs;
3345
3346 /* We could try to set the clocking for multiple cards, but can you even have
3347 * more than one ali in a machine? Besides, clocking is global, so unless
3348 * someone actually thinks more than one ali in a machine is possible and
3349 * decides to rewrite that little bit, setting the rate for more than one card
3350 * is a waste of time.
3351 */
3352 if (card != NULL) {
3353 state = card->states[0] = (struct ali_state *)
3354 kmalloc(sizeof(struct ali_state), GFP_KERNEL);
3355 if (state == NULL)
3356 return;
3357 memset(state, 0, sizeof(struct ali_state));
3358 dmabuf = &state->dmabuf;
3359 dmabuf->write_channel = card->alloc_pcm_channel(card);
3360 state->virt = 0;
3361 state->card = card;
3362 state->magic = ALI5455_STATE_MAGIC;
3363 init_waitqueue_head(&dmabuf->wait);
3364 mutex_init(&state->open_mutex);
3365 dmabuf->fmt = ALI5455_FMT_STEREO | ALI5455_FMT_16BIT;
3366 dmabuf->trigger = PCM_ENABLE_OUTPUT;
3367 ali_set_dac_rate(state, 48000);
3368 if (prog_dmabuf(state, 0) != 0)
3369 goto config_out_nodmabuf;
3370
3371 if (dmabuf->dmasize < 16384)
3372 goto config_out;
3373
3374 dmabuf->count = dmabuf->dmasize;
3375 outb(31, card->iobase + dmabuf->write_channel->port + OFF_LVI);
3376
3377 local_irq_save(flags);
3378 start_dac(state);
3379 offset = ali_get_dma_addr(state, 0);
3380 mdelay(50);
3381 new_offset = ali_get_dma_addr(state, 0);
3382 stop_dac(state);
3383
3384 outb(2, card->iobase + dmabuf->write_channel->port + OFF_CR);
3385 local_irq_restore(flags);
3386
3387 i = new_offset - offset;
3388
3389 if (i == 0)
3390 goto config_out;
3391 i = i / 4 * 20;
3392 if (i > 48500 || i < 47500) {
3393 clocking = clocking * clocking / i;
3394 }
3395config_out:
3396 dealloc_dmabuf(state);
3397config_out_nodmabuf:
3398 state->card->free_pcm_channel(state->card, state->dmabuf. write_channel->num);
3399 kfree(state);
3400 card->states[0] = NULL;
3401 }
3402}
3403
3404/* install the driver, we do not allocate hardware channel nor DMA buffer now, they are defered
3405 until "ACCESS" time (in prog_dmabuf called by open/read/write/ioctl/mmap) */
3406
3407static int __devinit ali_probe(struct pci_dev *pci_dev,
3408 const struct pci_device_id *pci_id)
3409{
3410 struct ali_card *card;
3411 if (pci_enable_device(pci_dev))
3412 return -EIO;
3413 if (pci_set_dma_mask(pci_dev, ALI5455_DMA_MASK)) {
3414 printk(KERN_ERR "ali5455: architecture does not support"
3415 " 32bit PCI busmaster DMA\n");
3416 return -ENODEV;
3417 }
3418
3419 if ((card = kmalloc(sizeof(struct ali_card), GFP_KERNEL)) == NULL) {
3420 printk(KERN_ERR "ali_audio: out of memory\n");
3421 return -ENOMEM;
3422 }
3423 memset(card, 0, sizeof(*card));
3424 card->initializing = 1;
3425 card->iobase = pci_resource_start(pci_dev, 0);
3426 card->pci_dev = pci_dev;
3427 card->pci_id = pci_id->device;
3428 card->irq = pci_dev->irq;
3429 card->next = devs;
3430 card->magic = ALI5455_CARD_MAGIC;
3431#ifdef CONFIG_PM
3432 card->pm_suspended = 0;
3433#endif
3434 spin_lock_init(&card->lock);
3435 spin_lock_init(&card->ac97_lock);
3436 devs = card;
3437 pci_set_master(pci_dev);
3438 printk(KERN_INFO "ali: %s found at IO 0x%04lx, IRQ %d\n",
3439 card_names[pci_id->driver_data], card->iobase, card->irq);
3440 card->alloc_pcm_channel = ali_alloc_pcm_channel;
3441 card->alloc_rec_pcm_channel = ali_alloc_rec_pcm_channel;
3442 card->alloc_rec_mic_channel = ali_alloc_rec_mic_channel;
3443 card->alloc_codec_spdifout_channel = ali_alloc_codec_spdifout_channel;
3444 card->alloc_controller_spdifout_channel = ali_alloc_controller_spdifout_channel;
3445 card->free_pcm_channel = ali_free_pcm_channel;
3446 card->channel[0].offset = 0;
3447 card->channel[0].port = 0x40;
3448 card->channel[0].num = 0;
3449 card->channel[1].offset = 0;
3450 card->channel[1].port = 0x50;
3451 card->channel[1].num = 1;
3452 card->channel[2].offset = 0;
3453 card->channel[2].port = 0x60;
3454 card->channel[2].num = 2;
3455 card->channel[3].offset = 0;
3456 card->channel[3].port = 0x70;
3457 card->channel[3].num = 3;
3458 card->channel[4].offset = 0;
3459 card->channel[4].port = 0xb0;
3460 card->channel[4].num = 4;
3461 /* claim our iospace and irq */
3462 request_region(card->iobase, 256, card_names[pci_id->driver_data]);
3463 if (request_irq(card->irq, &ali_interrupt, IRQF_SHARED,
3464 card_names[pci_id->driver_data], card)) {
3465 printk(KERN_ERR "ali_audio: unable to allocate irq %d\n",
3466 card->irq);
3467 release_region(card->iobase, 256);
3468 kfree(card);
3469 return -ENODEV;
3470 }
3471
3472 if (ali_reset_5455(card) <= 0) {
3473 unregister_sound_dsp(card->dev_audio);
3474 release_region(card->iobase, 256);
3475 free_irq(card->irq, card);
3476 kfree(card);
3477 return -ENODEV;
3478 }
3479
3480 /* initialize AC97 codec and register /dev/mixer */
3481 if (ali_ac97_init(card) < 0) {
3482 release_region(card->iobase, 256);
3483 free_irq(card->irq, card);
3484 kfree(card);
3485 return -ENODEV;
3486 }
3487
3488 pci_set_drvdata(pci_dev, card);
3489
3490 if (clocking == 0) {
3491 clocking = 48000;
3492 ali_configure_clocking();
3493 }
3494
3495 /* register /dev/dsp */
3496 if ((card->dev_audio = register_sound_dsp(&ali_audio_fops, -1)) < 0) {
3497 int i;
3498 printk(KERN_ERR"ali_audio: couldn't register DSP device!\n");
3499 release_region(card->iobase, 256);
3500 free_irq(card->irq, card);
3501 for (i = 0; i < NR_AC97; i++)
3502 if (card->ac97_codec[i] != NULL) {
3503 unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
3504 kfree(card->ac97_codec[i]);
3505 }
3506 kfree(card);
3507 return -ENODEV;
3508 }
3509 card->initializing = 0;
3510 return 0;
3511}
3512
3513static void __devexit ali_remove(struct pci_dev *pci_dev)
3514{
3515 int i;
3516 struct ali_card *card = pci_get_drvdata(pci_dev);
3517 /* free hardware resources */
3518 free_irq(card->irq, devs);
3519 release_region(card->iobase, 256);
3520 /* unregister audio devices */
3521 for (i = 0; i < NR_AC97; i++)
3522 if (card->ac97_codec[i] != NULL) {
3523 unregister_sound_mixer(card->ac97_codec[i]->
3524 dev_mixer);
3525 ac97_release_codec(card->ac97_codec[i]);
3526 card->ac97_codec[i] = NULL;
3527 }
3528 unregister_sound_dsp(card->dev_audio);
3529 kfree(card);
3530}
3531
3532#ifdef CONFIG_PM
3533static int ali_pm_suspend(struct pci_dev *dev, pm_message_t pm_state)
3534{
3535 struct ali_card *card = pci_get_drvdata(dev);
3536 struct ali_state *state;
3537 unsigned long flags;
3538 struct dmabuf *dmabuf;
3539 int i, num_ac97;
3540
3541 if (!card)
3542 return 0;
3543 spin_lock_irqsave(&card->lock, flags);
3544 card->pm_suspended = 1;
3545 for (i = 0; i < NR_HW_CH; i++) {
3546 state = card->states[i];
3547 if (!state)
3548 continue;
3549 /* this happens only if there are open files */
3550 dmabuf = &state->dmabuf;
3551 if (dmabuf->enable & DAC_RUNNING ||
3552 (dmabuf->count
3553 && (dmabuf->trigger & PCM_ENABLE_OUTPUT))) {
3554 state->pm_saved_dac_rate = dmabuf->rate;
3555 stop_dac(state);
3556 } else {
3557 state->pm_saved_dac_rate = 0;
3558 }
3559 if (dmabuf->enable & ADC_RUNNING) {
3560 state->pm_saved_adc_rate = dmabuf->rate;
3561 stop_adc(state);
3562 } else {
3563 state->pm_saved_adc_rate = 0;
3564 }
3565 dmabuf->ready = 0;
3566 dmabuf->swptr = dmabuf->hwptr = 0;
3567 dmabuf->count = dmabuf->total_bytes = 0;
3568 }
3569
3570 spin_unlock_irqrestore(&card->lock, flags);
3571 /* save mixer settings */
3572 for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
3573 struct ac97_codec *codec = card->ac97_codec[num_ac97];
3574 if (!codec)
3575 continue;
3576 for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
3577 if ((supported_mixer(codec, i)) && (codec->read_mixer)) {
3578 card->pm_saved_mixer_settings[i][num_ac97] = codec->read_mixer(codec, i);
3579 }
3580 }
3581 }
3582 pci_save_state(dev); /* XXX do we need this? */
3583 pci_disable_device(dev); /* disable busmastering */
3584 pci_set_power_state(dev, 3); /* Zzz. */
3585 return 0;
3586}
3587
3588
3589static int ali_pm_resume(struct pci_dev *dev)
3590{
3591 int num_ac97, i = 0;
3592 struct ali_card *card = pci_get_drvdata(dev);
3593 pci_enable_device(dev);
3594 pci_restore_state(dev);
3595 /* observation of a toshiba portege 3440ct suggests that the
3596 hardware has to be more or less completely reinitialized from
3597 scratch after an apm suspend. Works For Me. -dan */
3598 ali_ac97_random_init_stuff(card);
3599 for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
3600 struct ac97_codec *codec = card->ac97_codec[num_ac97];
3601 /* check they haven't stolen the hardware while we were
3602 away */
3603 if (!codec || !ali_ac97_exists(card, num_ac97)) {
3604 if (num_ac97)
3605 continue;
3606 else
3607 BUG();
3608 }
3609 if (!ali_ac97_probe_and_powerup(card, codec))
3610 BUG();
3611 if ((card->ac97_features & 0x0001)) {
3612 /* at probe time we found we could do variable
3613 rates, but APM suspend has made it forget
3614 its magical powers */
3615 if (!ali_ac97_enable_variable_rate(codec))
3616 BUG();
3617 }
3618 /* we lost our mixer settings, so restore them */
3619 for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
3620 if (supported_mixer(codec, i)) {
3621 int val = card->pm_saved_mixer_settings[i][num_ac97];
3622 codec->mixer_state[i] = val;
3623 codec->write_mixer(codec, i,
3624 (val & 0xff),
3625 ((val >> 8) & 0xff));
3626 }
3627 }
3628 }
3629
3630 /* we need to restore the sample rate from whatever it was */
3631 for (i = 0; i < NR_HW_CH; i++) {
3632 struct ali_state *state = card->states[i];
3633 if (state) {
3634 if (state->pm_saved_adc_rate)
3635 ali_set_adc_rate(state, state->pm_saved_adc_rate);
3636 if (state->pm_saved_dac_rate)
3637 ali_set_dac_rate(state, state->pm_saved_dac_rate);
3638 }
3639 }
3640
3641 card->pm_suspended = 0;
3642 /* any processes that were reading/writing during the suspend
3643 probably ended up here */
3644 for (i = 0; i < NR_HW_CH; i++) {
3645 struct ali_state *state = card->states[i];
3646 if (state)
3647 wake_up(&state->dmabuf.wait);
3648 }
3649 return 0;
3650}
3651#endif /* CONFIG_PM */
3652
3653MODULE_AUTHOR("");
3654MODULE_DESCRIPTION("ALI 5455 audio support");
3655MODULE_LICENSE("GPL");
3656module_param(clocking, int, 0);
3657/* FIXME: bool? */
3658module_param(strict_clocking, uint, 0);
3659module_param(codec_pcmout_share_spdif_locked, uint, 0);
3660module_param(codec_independent_spdif_locked, uint, 0);
3661module_param(controller_pcmout_share_spdif_locked, uint, 0);
3662module_param(controller_independent_spdif_locked, uint, 0);
3663#define ALI5455_MODULE_NAME "ali5455"
3664static struct pci_driver ali_pci_driver = {
3665 .name = ALI5455_MODULE_NAME,
3666 .id_table = ali_pci_tbl,
3667 .probe = ali_probe,
3668 .remove = __devexit_p(ali_remove),
3669#ifdef CONFIG_PM
3670 .suspend = ali_pm_suspend,
3671 .resume = ali_pm_resume,
3672#endif /* CONFIG_PM */
3673};
3674
3675static int __init ali_init_module(void)
3676{
3677 printk(KERN_INFO "ALI 5455 + AC97 Audio, version "
3678 DRIVER_VERSION ", " __TIME__ " " __DATE__ "\n");
3679
3680 if (codec_independent_spdif_locked > 0) {
3681 if (codec_independent_spdif_locked == 32000
3682 || codec_independent_spdif_locked == 44100
3683 || codec_independent_spdif_locked == 48000) {
3684 printk(KERN_INFO "ali_audio: Enabling S/PDIF at sample rate %dHz.\n", codec_independent_spdif_locked);
3685 } else {
3686 printk(KERN_INFO "ali_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
3687 codec_independent_spdif_locked = 0;
3688 }
3689 }
3690 if (controller_independent_spdif_locked > 0) {
3691 if (controller_independent_spdif_locked == 32000
3692 || controller_independent_spdif_locked == 44100
3693 || controller_independent_spdif_locked == 48000) {
3694 printk(KERN_INFO "ali_audio: Enabling S/PDIF at sample rate %dHz.\n", controller_independent_spdif_locked);
3695 } else {
3696 printk(KERN_INFO "ali_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
3697 controller_independent_spdif_locked = 0;
3698 }
3699 }
3700
3701 if (codec_pcmout_share_spdif_locked > 0) {
3702 if (codec_pcmout_share_spdif_locked == 32000
3703 || codec_pcmout_share_spdif_locked == 44100
3704 || codec_pcmout_share_spdif_locked == 48000) {
3705 printk(KERN_INFO "ali_audio: Enabling S/PDIF at sample rate %dHz.\n", codec_pcmout_share_spdif_locked);
3706 } else {
3707 printk(KERN_INFO "ali_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
3708 codec_pcmout_share_spdif_locked = 0;
3709 }
3710 }
3711 if (controller_pcmout_share_spdif_locked > 0) {
3712 if (controller_pcmout_share_spdif_locked == 32000
3713 || controller_pcmout_share_spdif_locked == 44100
3714 || controller_pcmout_share_spdif_locked == 48000) {
3715 printk(KERN_INFO "ali_audio: Enabling controller S/PDIF at sample rate %dHz.\n", controller_pcmout_share_spdif_locked);
3716 } else {
3717 printk(KERN_INFO "ali_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
3718 controller_pcmout_share_spdif_locked = 0;
3719 }
3720 }
3721 return pci_register_driver(&ali_pci_driver);
3722}
3723
3724static void __exit ali_cleanup_module(void)
3725{
3726 pci_unregister_driver(&ali_pci_driver);
3727}
3728
3729module_init(ali_init_module);
3730module_exit(ali_cleanup_module);
3731/*
3732Local Variables:
3733c-basic-offset: 8
3734End:
3735*/
diff --git a/sound/oss/au1000.c b/sound/oss/au1000.c
deleted file mode 100644
index e3796231452a..000000000000
--- a/sound/oss/au1000.c
+++ /dev/null
@@ -1,2216 +0,0 @@
1/*
2 * au1000.c -- Sound driver for Alchemy Au1000 MIPS Internet Edge
3 * Processor.
4 *
5 * Copyright 2001 MontaVista Software Inc.
6 * Author: MontaVista Software, Inc.
7 * stevel@mvista.com or source@mvista.com
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 *
29 *
30 * Module command line parameters:
31 *
32 * Supported devices:
33 * /dev/dsp standard OSS /dev/dsp device
34 * /dev/mixer standard OSS /dev/mixer device
35 *
36 * Notes:
37 *
38 * 1. Much of the OSS buffer allocation, ioctl's, and mmap'ing are
39 * taken, slightly modified or not at all, from the ES1371 driver,
40 * so refer to the credits in es1371.c for those. The rest of the
41 * code (probe, open, read, write, the ISR, etc.) is new.
42 *
43 * Revision history
44 * 06.27.2001 Initial version
45 * 03.20.2002 Added mutex locks around read/write methods, to prevent
46 * simultaneous access on SMP or preemptible kernels. Also
47 * removed the counter/pointer fragment aligning at the end
48 * of read/write methods [stevel].
49 * 03.21.2002 Add support for coherent DMA on the audio read/write DMA
50 * channels [stevel].
51 *
52 */
53#include <linux/module.h>
54#include <linux/string.h>
55#include <linux/ioport.h>
56#include <linux/sched.h>
57#include <linux/delay.h>
58#include <linux/sound.h>
59#include <linux/slab.h>
60#include <linux/soundcard.h>
61#include <linux/init.h>
62#include <linux/page-flags.h>
63#include <linux/poll.h>
64#include <linux/pci.h>
65#include <linux/bitops.h>
66#include <linux/proc_fs.h>
67#include <linux/spinlock.h>
68#include <linux/smp_lock.h>
69#include <linux/ac97_codec.h>
70#include <linux/interrupt.h>
71#include <linux/mutex.h>
72
73#include <asm/io.h>
74#include <asm/uaccess.h>
75#include <asm/mach-au1x00/au1000.h>
76#include <asm/mach-au1x00/au1000_dma.h>
77
78/* --------------------------------------------------------------------- */
79
80#undef OSS_DOCUMENTED_MIXER_SEMANTICS
81#undef AU1000_DEBUG
82#undef AU1000_VERBOSE_DEBUG
83
84#define AU1000_MODULE_NAME "Au1000 audio"
85#define PFX AU1000_MODULE_NAME
86
87#ifdef AU1000_DEBUG
88#define dbg(format, arg...) printk(KERN_DEBUG PFX ": " format "\n" , ## arg)
89#else
90#define dbg(format, arg...) do {} while (0)
91#endif
92#define err(format, arg...) printk(KERN_ERR PFX ": " format "\n" , ## arg)
93#define info(format, arg...) printk(KERN_INFO PFX ": " format "\n" , ## arg)
94#define warn(format, arg...) printk(KERN_WARNING PFX ": " format "\n" , ## arg)
95
96
97/* misc stuff */
98#define POLL_COUNT 0x5000
99#define AC97_EXT_DACS (AC97_EXTID_SDAC | AC97_EXTID_CDAC | AC97_EXTID_LDAC)
100
101/* Boot options */
102static int vra = 0; // 0 = no VRA, 1 = use VRA if codec supports it
103module_param(vra, bool, 0);
104MODULE_PARM_DESC(vra, "if 1 use VRA if codec supports it");
105
106
107/* --------------------------------------------------------------------- */
108
109struct au1000_state {
110 /* soundcore stuff */
111 int dev_audio;
112
113#ifdef AU1000_DEBUG
114 /* debug /proc entry */
115 struct proc_dir_entry *ps;
116 struct proc_dir_entry *ac97_ps;
117#endif /* AU1000_DEBUG */
118
119 struct ac97_codec codec;
120 unsigned codec_base_caps;// AC'97 reg 00h, "Reset Register"
121 unsigned codec_ext_caps; // AC'97 reg 28h, "Extended Audio ID"
122 int no_vra; // do not use VRA
123
124 spinlock_t lock;
125 struct mutex open_mutex;
126 struct mutex sem;
127 mode_t open_mode;
128 wait_queue_head_t open_wait;
129
130 struct dmabuf {
131 unsigned int dmanr; // DMA Channel number
132 unsigned sample_rate; // Hz
133 unsigned src_factor; // SRC interp/decimation (no vra)
134 unsigned sample_size; // 8 or 16
135 int num_channels; // 1 = mono, 2 = stereo, 4, 6
136 int dma_bytes_per_sample;// DMA bytes per audio sample frame
137 int user_bytes_per_sample;// User bytes per audio sample frame
138 int cnt_factor; // user-to-DMA bytes per audio
139 // sample frame
140 void *rawbuf;
141 dma_addr_t dmaaddr;
142 unsigned buforder;
143 unsigned numfrag; // # of DMA fragments in DMA buffer
144 unsigned fragshift;
145 void *nextIn; // ptr to next-in to DMA buffer
146 void *nextOut;// ptr to next-out from DMA buffer
147 int count; // current byte count in DMA buffer
148 unsigned total_bytes; // total bytes written or read
149 unsigned error; // over/underrun
150 wait_queue_head_t wait;
151 /* redundant, but makes calculations easier */
152 unsigned fragsize; // user perception of fragment size
153 unsigned dma_fragsize; // DMA (real) fragment size
154 unsigned dmasize; // Total DMA buffer size
155 // (mult. of DMA fragsize)
156 /* OSS stuff */
157 unsigned mapped:1;
158 unsigned ready:1;
159 unsigned stopped:1;
160 unsigned ossfragshift;
161 int ossmaxfrags;
162 unsigned subdivision;
163 } dma_dac , dma_adc;
164} au1000_state;
165
166/* --------------------------------------------------------------------- */
167
168
169static inline unsigned ld2(unsigned int x)
170{
171 unsigned r = 0;
172
173 if (x >= 0x10000) {
174 x >>= 16;
175 r += 16;
176 }
177 if (x >= 0x100) {
178 x >>= 8;
179 r += 8;
180 }
181 if (x >= 0x10) {
182 x >>= 4;
183 r += 4;
184 }
185 if (x >= 4) {
186 x >>= 2;
187 r += 2;
188 }
189 if (x >= 2)
190 r++;
191 return r;
192}
193
194/* --------------------------------------------------------------------- */
195
196static void au1000_delay(int msec)
197{
198 unsigned long tmo;
199 signed long tmo2;
200
201 if (in_interrupt())
202 return;
203
204 tmo = jiffies + (msec * HZ) / 1000;
205 for (;;) {
206 tmo2 = tmo - jiffies;
207 if (tmo2 <= 0)
208 break;
209 schedule_timeout(tmo2);
210 }
211}
212
213
214/* --------------------------------------------------------------------- */
215
216static u16 rdcodec(struct ac97_codec *codec, u8 addr)
217{
218 struct au1000_state *s = (struct au1000_state *)codec->private_data;
219 unsigned long flags;
220 u32 cmd;
221 u16 data;
222 int i;
223
224 spin_lock_irqsave(&s->lock, flags);
225
226 for (i = 0; i < POLL_COUNT; i++)
227 if (!(au_readl(AC97C_STATUS) & AC97C_CP))
228 break;
229 if (i == POLL_COUNT)
230 err("rdcodec: codec cmd pending expired!");
231
232 cmd = (u32) addr & AC97C_INDEX_MASK;
233 cmd |= AC97C_READ; // read command
234 au_writel(cmd, AC97C_CMD);
235
236 /* now wait for the data */
237 for (i = 0; i < POLL_COUNT; i++)
238 if (!(au_readl(AC97C_STATUS) & AC97C_CP))
239 break;
240 if (i == POLL_COUNT) {
241 err("rdcodec: read poll expired!");
242 return 0;
243 }
244
245 data = au_readl(AC97C_CMD) & 0xffff;
246
247 spin_unlock_irqrestore(&s->lock, flags);
248
249 return data;
250}
251
252
253static void wrcodec(struct ac97_codec *codec, u8 addr, u16 data)
254{
255 struct au1000_state *s = (struct au1000_state *)codec->private_data;
256 unsigned long flags;
257 u32 cmd;
258 int i;
259
260 spin_lock_irqsave(&s->lock, flags);
261
262 for (i = 0; i < POLL_COUNT; i++)
263 if (!(au_readl(AC97C_STATUS) & AC97C_CP))
264 break;
265 if (i == POLL_COUNT)
266 err("wrcodec: codec cmd pending expired!");
267
268 cmd = (u32) addr & AC97C_INDEX_MASK;
269 cmd &= ~AC97C_READ; // write command
270 cmd |= ((u32) data << AC97C_WD_BIT); // OR in the data word
271 au_writel(cmd, AC97C_CMD);
272
273 spin_unlock_irqrestore(&s->lock, flags);
274}
275
276static void waitcodec(struct ac97_codec *codec)
277{
278 u16 temp;
279 int i;
280
281 /* codec_wait is used to wait for a ready state after
282 an AC97C_RESET. */
283 au1000_delay(10);
284
285 // first poll the CODEC_READY tag bit
286 for (i = 0; i < POLL_COUNT; i++)
287 if (au_readl(AC97C_STATUS) & AC97C_READY)
288 break;
289 if (i == POLL_COUNT) {
290 err("waitcodec: CODEC_READY poll expired!");
291 return;
292 }
293 // get AC'97 powerdown control/status register
294 temp = rdcodec(codec, AC97_POWER_CONTROL);
295
296 // If anything is powered down, power'em up
297 if (temp & 0x7f00) {
298 // Power on
299 wrcodec(codec, AC97_POWER_CONTROL, 0);
300 au1000_delay(100);
301 // Reread
302 temp = rdcodec(codec, AC97_POWER_CONTROL);
303 }
304
305 // Check if Codec REF,ANL,DAC,ADC ready
306 if ((temp & 0x7f0f) != 0x000f)
307 err("codec reg 26 status (0x%x) not ready!!", temp);
308}
309
310
311/* --------------------------------------------------------------------- */
312
313/* stop the ADC before calling */
314static void set_adc_rate(struct au1000_state *s, unsigned rate)
315{
316 struct dmabuf *adc = &s->dma_adc;
317 struct dmabuf *dac = &s->dma_dac;
318 unsigned adc_rate, dac_rate;
319 u16 ac97_extstat;
320
321 if (s->no_vra) {
322 // calc SRC factor
323 adc->src_factor = ((96000 / rate) + 1) >> 1;
324 adc->sample_rate = 48000 / adc->src_factor;
325 return;
326 }
327
328 adc->src_factor = 1;
329
330 ac97_extstat = rdcodec(&s->codec, AC97_EXTENDED_STATUS);
331
332 rate = rate > 48000 ? 48000 : rate;
333
334 // enable VRA
335 wrcodec(&s->codec, AC97_EXTENDED_STATUS,
336 ac97_extstat | AC97_EXTSTAT_VRA);
337 // now write the sample rate
338 wrcodec(&s->codec, AC97_PCM_LR_ADC_RATE, (u16) rate);
339 // read it back for actual supported rate
340 adc_rate = rdcodec(&s->codec, AC97_PCM_LR_ADC_RATE);
341
342#ifdef AU1000_VERBOSE_DEBUG
343 dbg("%s: set to %d Hz", __FUNCTION__, adc_rate);
344#endif
345
346 // some codec's don't allow unequal DAC and ADC rates, in which case
347 // writing one rate reg actually changes both.
348 dac_rate = rdcodec(&s->codec, AC97_PCM_FRONT_DAC_RATE);
349 if (dac->num_channels > 2)
350 wrcodec(&s->codec, AC97_PCM_SURR_DAC_RATE, dac_rate);
351 if (dac->num_channels > 4)
352 wrcodec(&s->codec, AC97_PCM_LFE_DAC_RATE, dac_rate);
353
354 adc->sample_rate = adc_rate;
355 dac->sample_rate = dac_rate;
356}
357
358/* stop the DAC before calling */
359static void set_dac_rate(struct au1000_state *s, unsigned rate)
360{
361 struct dmabuf *dac = &s->dma_dac;
362 struct dmabuf *adc = &s->dma_adc;
363 unsigned adc_rate, dac_rate;
364 u16 ac97_extstat;
365
366 if (s->no_vra) {
367 // calc SRC factor
368 dac->src_factor = ((96000 / rate) + 1) >> 1;
369 dac->sample_rate = 48000 / dac->src_factor;
370 return;
371 }
372
373 dac->src_factor = 1;
374
375 ac97_extstat = rdcodec(&s->codec, AC97_EXTENDED_STATUS);
376
377 rate = rate > 48000 ? 48000 : rate;
378
379 // enable VRA
380 wrcodec(&s->codec, AC97_EXTENDED_STATUS,
381 ac97_extstat | AC97_EXTSTAT_VRA);
382 // now write the sample rate
383 wrcodec(&s->codec, AC97_PCM_FRONT_DAC_RATE, (u16) rate);
384 // I don't support different sample rates for multichannel,
385 // so make these channels the same.
386 if (dac->num_channels > 2)
387 wrcodec(&s->codec, AC97_PCM_SURR_DAC_RATE, (u16) rate);
388 if (dac->num_channels > 4)
389 wrcodec(&s->codec, AC97_PCM_LFE_DAC_RATE, (u16) rate);
390 // read it back for actual supported rate
391 dac_rate = rdcodec(&s->codec, AC97_PCM_FRONT_DAC_RATE);
392
393#ifdef AU1000_VERBOSE_DEBUG
394 dbg("%s: set to %d Hz", __FUNCTION__, dac_rate);
395#endif
396
397 // some codec's don't allow unequal DAC and ADC rates, in which case
398 // writing one rate reg actually changes both.
399 adc_rate = rdcodec(&s->codec, AC97_PCM_LR_ADC_RATE);
400
401 dac->sample_rate = dac_rate;
402 adc->sample_rate = adc_rate;
403}
404
405static void stop_dac(struct au1000_state *s)
406{
407 struct dmabuf *db = &s->dma_dac;
408 unsigned long flags;
409
410 if (db->stopped)
411 return;
412
413 spin_lock_irqsave(&s->lock, flags);
414
415 disable_dma(db->dmanr);
416
417 db->stopped = 1;
418
419 spin_unlock_irqrestore(&s->lock, flags);
420}
421
422static void stop_adc(struct au1000_state *s)
423{
424 struct dmabuf *db = &s->dma_adc;
425 unsigned long flags;
426
427 if (db->stopped)
428 return;
429
430 spin_lock_irqsave(&s->lock, flags);
431
432 disable_dma(db->dmanr);
433
434 db->stopped = 1;
435
436 spin_unlock_irqrestore(&s->lock, flags);
437}
438
439
440static void set_xmit_slots(int num_channels)
441{
442 u32 ac97_config = au_readl(AC97C_CONFIG) & ~AC97C_XMIT_SLOTS_MASK;
443
444 switch (num_channels) {
445 case 1: // mono
446 case 2: // stereo, slots 3,4
447 ac97_config |= (0x3 << AC97C_XMIT_SLOTS_BIT);
448 break;
449 case 4: // stereo with surround, slots 3,4,7,8
450 ac97_config |= (0x33 << AC97C_XMIT_SLOTS_BIT);
451 break;
452 case 6: // stereo with surround and center/LFE, slots 3,4,6,7,8,9
453 ac97_config |= (0x7b << AC97C_XMIT_SLOTS_BIT);
454 break;
455 }
456
457 au_writel(ac97_config, AC97C_CONFIG);
458}
459
460static void set_recv_slots(int num_channels)
461{
462 u32 ac97_config = au_readl(AC97C_CONFIG) & ~AC97C_RECV_SLOTS_MASK;
463
464 /*
465 * Always enable slots 3 and 4 (stereo). Slot 6 is
466 * optional Mic ADC, which I don't support yet.
467 */
468 ac97_config |= (0x3 << AC97C_RECV_SLOTS_BIT);
469
470 au_writel(ac97_config, AC97C_CONFIG);
471}
472
473static void start_dac(struct au1000_state *s)
474{
475 struct dmabuf *db = &s->dma_dac;
476 unsigned long flags;
477 unsigned long buf1, buf2;
478
479 if (!db->stopped)
480 return;
481
482 spin_lock_irqsave(&s->lock, flags);
483
484 au_readl(AC97C_STATUS); // read status to clear sticky bits
485
486 // reset Buffer 1 and 2 pointers to nextOut and nextOut+dma_fragsize
487 buf1 = virt_to_phys(db->nextOut);
488 buf2 = buf1 + db->dma_fragsize;
489 if (buf2 >= db->dmaaddr + db->dmasize)
490 buf2 -= db->dmasize;
491
492 set_xmit_slots(db->num_channels);
493
494 init_dma(db->dmanr);
495 if (get_dma_active_buffer(db->dmanr) == 0) {
496 clear_dma_done0(db->dmanr); // clear DMA done bit
497 set_dma_addr0(db->dmanr, buf1);
498 set_dma_addr1(db->dmanr, buf2);
499 } else {
500 clear_dma_done1(db->dmanr); // clear DMA done bit
501 set_dma_addr1(db->dmanr, buf1);
502 set_dma_addr0(db->dmanr, buf2);
503 }
504 set_dma_count(db->dmanr, db->dma_fragsize>>1);
505 enable_dma_buffers(db->dmanr);
506
507 start_dma(db->dmanr);
508
509#ifdef AU1000_VERBOSE_DEBUG
510 dump_au1000_dma_channel(db->dmanr);
511#endif
512
513 db->stopped = 0;
514
515 spin_unlock_irqrestore(&s->lock, flags);
516}
517
518static void start_adc(struct au1000_state *s)
519{
520 struct dmabuf *db = &s->dma_adc;
521 unsigned long flags;
522 unsigned long buf1, buf2;
523
524 if (!db->stopped)
525 return;
526
527 spin_lock_irqsave(&s->lock, flags);
528
529 au_readl(AC97C_STATUS); // read status to clear sticky bits
530
531 // reset Buffer 1 and 2 pointers to nextIn and nextIn+dma_fragsize
532 buf1 = virt_to_phys(db->nextIn);
533 buf2 = buf1 + db->dma_fragsize;
534 if (buf2 >= db->dmaaddr + db->dmasize)
535 buf2 -= db->dmasize;
536
537 set_recv_slots(db->num_channels);
538
539 init_dma(db->dmanr);
540 if (get_dma_active_buffer(db->dmanr) == 0) {
541 clear_dma_done0(db->dmanr); // clear DMA done bit
542 set_dma_addr0(db->dmanr, buf1);
543 set_dma_addr1(db->dmanr, buf2);
544 } else {
545 clear_dma_done1(db->dmanr); // clear DMA done bit
546 set_dma_addr1(db->dmanr, buf1);
547 set_dma_addr0(db->dmanr, buf2);
548 }
549 set_dma_count(db->dmanr, db->dma_fragsize>>1);
550 enable_dma_buffers(db->dmanr);
551
552 start_dma(db->dmanr);
553
554#ifdef AU1000_VERBOSE_DEBUG
555 dump_au1000_dma_channel(db->dmanr);
556#endif
557
558 db->stopped = 0;
559
560 spin_unlock_irqrestore(&s->lock, flags);
561}
562
563/* --------------------------------------------------------------------- */
564
565#define DMABUF_DEFAULTORDER (17-PAGE_SHIFT)
566#define DMABUF_MINORDER 1
567
568static inline void dealloc_dmabuf(struct au1000_state *s, struct dmabuf *db)
569{
570 struct page *page, *pend;
571
572 if (db->rawbuf) {
573 /* undo marking the pages as reserved */
574 pend = virt_to_page(db->rawbuf +
575 (PAGE_SIZE << db->buforder) - 1);
576 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
577 ClearPageReserved(page);
578 dma_free_noncoherent(NULL,
579 PAGE_SIZE << db->buforder,
580 db->rawbuf,
581 db->dmaaddr);
582 }
583 db->rawbuf = db->nextIn = db->nextOut = NULL;
584 db->mapped = db->ready = 0;
585}
586
587static int prog_dmabuf(struct au1000_state *s, struct dmabuf *db)
588{
589 int order;
590 unsigned user_bytes_per_sec;
591 unsigned bufs;
592 struct page *page, *pend;
593 unsigned rate = db->sample_rate;
594
595 if (!db->rawbuf) {
596 db->ready = db->mapped = 0;
597 for (order = DMABUF_DEFAULTORDER;
598 order >= DMABUF_MINORDER; order--)
599 if ((db->rawbuf = dma_alloc_noncoherent(NULL,
600 PAGE_SIZE << order,
601 &db->dmaaddr,
602 0)))
603 break;
604 if (!db->rawbuf)
605 return -ENOMEM;
606 db->buforder = order;
607 /* now mark the pages as reserved;
608 otherwise remap_pfn_range doesn't do what we want */
609 pend = virt_to_page(db->rawbuf +
610 (PAGE_SIZE << db->buforder) - 1);
611 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
612 SetPageReserved(page);
613 }
614
615 db->cnt_factor = 1;
616 if (db->sample_size == 8)
617 db->cnt_factor *= 2;
618 if (db->num_channels == 1)
619 db->cnt_factor *= 2;
620 db->cnt_factor *= db->src_factor;
621
622 db->count = 0;
623 db->nextIn = db->nextOut = db->rawbuf;
624
625 db->user_bytes_per_sample = (db->sample_size>>3) * db->num_channels;
626 db->dma_bytes_per_sample = 2 * ((db->num_channels == 1) ?
627 2 : db->num_channels);
628
629 user_bytes_per_sec = rate * db->user_bytes_per_sample;
630 bufs = PAGE_SIZE << db->buforder;
631 if (db->ossfragshift) {
632 if ((1000 << db->ossfragshift) < user_bytes_per_sec)
633 db->fragshift = ld2(user_bytes_per_sec/1000);
634 else
635 db->fragshift = db->ossfragshift;
636 } else {
637 db->fragshift = ld2(user_bytes_per_sec / 100 /
638 (db->subdivision ? db->subdivision : 1));
639 if (db->fragshift < 3)
640 db->fragshift = 3;
641 }
642
643 db->fragsize = 1 << db->fragshift;
644 db->dma_fragsize = db->fragsize * db->cnt_factor;
645 db->numfrag = bufs / db->dma_fragsize;
646
647 while (db->numfrag < 4 && db->fragshift > 3) {
648 db->fragshift--;
649 db->fragsize = 1 << db->fragshift;
650 db->dma_fragsize = db->fragsize * db->cnt_factor;
651 db->numfrag = bufs / db->dma_fragsize;
652 }
653
654 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
655 db->numfrag = db->ossmaxfrags;
656
657 db->dmasize = db->dma_fragsize * db->numfrag;
658 memset(db->rawbuf, 0, bufs);
659
660#ifdef AU1000_VERBOSE_DEBUG
661 dbg("rate=%d, samplesize=%d, channels=%d",
662 rate, db->sample_size, db->num_channels);
663 dbg("fragsize=%d, cnt_factor=%d, dma_fragsize=%d",
664 db->fragsize, db->cnt_factor, db->dma_fragsize);
665 dbg("numfrag=%d, dmasize=%d", db->numfrag, db->dmasize);
666#endif
667
668 db->ready = 1;
669 return 0;
670}
671
672static inline int prog_dmabuf_adc(struct au1000_state *s)
673{
674 stop_adc(s);
675 return prog_dmabuf(s, &s->dma_adc);
676
677}
678
679static inline int prog_dmabuf_dac(struct au1000_state *s)
680{
681 stop_dac(s);
682 return prog_dmabuf(s, &s->dma_dac);
683}
684
685
686/* hold spinlock for the following */
687static irqreturn_t dac_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
688{
689 struct au1000_state *s = (struct au1000_state *) dev_id;
690 struct dmabuf *dac = &s->dma_dac;
691 unsigned long newptr;
692 u32 ac97c_stat, buff_done;
693
694 ac97c_stat = au_readl(AC97C_STATUS);
695#ifdef AU1000_VERBOSE_DEBUG
696 if (ac97c_stat & (AC97C_XU | AC97C_XO | AC97C_TE))
697 dbg("AC97C status = 0x%08x", ac97c_stat);
698#endif
699
700 if ((buff_done = get_dma_buffer_done(dac->dmanr)) == 0) {
701 /* fastpath out, to ease interrupt sharing */
702 return IRQ_HANDLED;
703 }
704
705 spin_lock(&s->lock);
706
707 if (buff_done != (DMA_D0 | DMA_D1)) {
708 dac->nextOut += dac->dma_fragsize;
709 if (dac->nextOut >= dac->rawbuf + dac->dmasize)
710 dac->nextOut -= dac->dmasize;
711
712 /* update playback pointers */
713 newptr = virt_to_phys(dac->nextOut) + dac->dma_fragsize;
714 if (newptr >= dac->dmaaddr + dac->dmasize)
715 newptr -= dac->dmasize;
716
717 dac->count -= dac->dma_fragsize;
718 dac->total_bytes += dac->dma_fragsize;
719
720 if (dac->count <= 0) {
721#ifdef AU1000_VERBOSE_DEBUG
722 dbg("dac underrun");
723#endif
724 spin_unlock(&s->lock);
725 stop_dac(s);
726 spin_lock(&s->lock);
727 dac->count = 0;
728 dac->nextIn = dac->nextOut;
729 } else if (buff_done == DMA_D0) {
730 clear_dma_done0(dac->dmanr); // clear DMA done bit
731 set_dma_count0(dac->dmanr, dac->dma_fragsize>>1);
732 set_dma_addr0(dac->dmanr, newptr);
733 enable_dma_buffer0(dac->dmanr); // reenable
734 } else {
735 clear_dma_done1(dac->dmanr); // clear DMA done bit
736 set_dma_count1(dac->dmanr, dac->dma_fragsize>>1);
737 set_dma_addr1(dac->dmanr, newptr);
738 enable_dma_buffer1(dac->dmanr); // reenable
739 }
740 } else {
741 // both done bits set, we missed an interrupt
742 spin_unlock(&s->lock);
743 stop_dac(s);
744 spin_lock(&s->lock);
745
746 dac->nextOut += 2*dac->dma_fragsize;
747 if (dac->nextOut >= dac->rawbuf + dac->dmasize)
748 dac->nextOut -= dac->dmasize;
749
750 dac->count -= 2*dac->dma_fragsize;
751 dac->total_bytes += 2*dac->dma_fragsize;
752
753 if (dac->count > 0) {
754 spin_unlock(&s->lock);
755 start_dac(s);
756 spin_lock(&s->lock);
757 }
758 }
759
760 /* wake up anybody listening */
761 if (waitqueue_active(&dac->wait))
762 wake_up(&dac->wait);
763
764 spin_unlock(&s->lock);
765
766 return IRQ_HANDLED;
767}
768
769
770static irqreturn_t adc_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
771{
772 struct au1000_state *s = (struct au1000_state *) dev_id;
773 struct dmabuf *adc = &s->dma_adc;
774 unsigned long newptr;
775 u32 ac97c_stat, buff_done;
776
777 ac97c_stat = au_readl(AC97C_STATUS);
778#ifdef AU1000_VERBOSE_DEBUG
779 if (ac97c_stat & (AC97C_RU | AC97C_RO))
780 dbg("AC97C status = 0x%08x", ac97c_stat);
781#endif
782
783 if ((buff_done = get_dma_buffer_done(adc->dmanr)) == 0) {
784 /* fastpath out, to ease interrupt sharing */
785 return IRQ_HANDLED;
786 }
787
788 spin_lock(&s->lock);
789
790 if (buff_done != (DMA_D0 | DMA_D1)) {
791 if (adc->count + adc->dma_fragsize > adc->dmasize) {
792 // Overrun. Stop ADC and log the error
793 spin_unlock(&s->lock);
794 stop_adc(s);
795 adc->error++;
796 err("adc overrun");
797 return IRQ_NONE;
798 }
799
800 adc->nextIn += adc->dma_fragsize;
801 if (adc->nextIn >= adc->rawbuf + adc->dmasize)
802 adc->nextIn -= adc->dmasize;
803
804 /* update capture pointers */
805 newptr = virt_to_phys(adc->nextIn) + adc->dma_fragsize;
806 if (newptr >= adc->dmaaddr + adc->dmasize)
807 newptr -= adc->dmasize;
808
809 adc->count += adc->dma_fragsize;
810 adc->total_bytes += adc->dma_fragsize;
811
812 if (buff_done == DMA_D0) {
813 clear_dma_done0(adc->dmanr); // clear DMA done bit
814 set_dma_count0(adc->dmanr, adc->dma_fragsize>>1);
815 set_dma_addr0(adc->dmanr, newptr);
816 enable_dma_buffer0(adc->dmanr); // reenable
817 } else {
818 clear_dma_done1(adc->dmanr); // clear DMA done bit
819 set_dma_count1(adc->dmanr, adc->dma_fragsize>>1);
820 set_dma_addr1(adc->dmanr, newptr);
821 enable_dma_buffer1(adc->dmanr); // reenable
822 }
823 } else {
824 // both done bits set, we missed an interrupt
825 spin_unlock(&s->lock);
826 stop_adc(s);
827 spin_lock(&s->lock);
828
829 if (adc->count + 2*adc->dma_fragsize > adc->dmasize) {
830 // Overrun. Log the error
831 adc->error++;
832 err("adc overrun");
833 spin_unlock(&s->lock);
834 return IRQ_NONE;
835 }
836
837 adc->nextIn += 2*adc->dma_fragsize;
838 if (adc->nextIn >= adc->rawbuf + adc->dmasize)
839 adc->nextIn -= adc->dmasize;
840
841 adc->count += 2*adc->dma_fragsize;
842 adc->total_bytes += 2*adc->dma_fragsize;
843
844 spin_unlock(&s->lock);
845 start_adc(s);
846 spin_lock(&s->lock);
847 }
848
849 /* wake up anybody listening */
850 if (waitqueue_active(&adc->wait))
851 wake_up(&adc->wait);
852
853 spin_unlock(&s->lock);
854
855 return IRQ_HANDLED;
856}
857
858/* --------------------------------------------------------------------- */
859
860static loff_t au1000_llseek(struct file *file, loff_t offset, int origin)
861{
862 return -ESPIPE;
863}
864
865
866static int au1000_open_mixdev(struct inode *inode, struct file *file)
867{
868 file->private_data = &au1000_state;
869 return nonseekable_open(inode, file);
870}
871
872static int au1000_release_mixdev(struct inode *inode, struct file *file)
873{
874 return 0;
875}
876
877static int mixdev_ioctl(struct ac97_codec *codec, unsigned int cmd,
878 unsigned long arg)
879{
880 return codec->mixer_ioctl(codec, cmd, arg);
881}
882
883static int au1000_ioctl_mixdev(struct inode *inode, struct file *file,
884 unsigned int cmd, unsigned long arg)
885{
886 struct au1000_state *s = (struct au1000_state *)file->private_data;
887 struct ac97_codec *codec = &s->codec;
888
889 return mixdev_ioctl(codec, cmd, arg);
890}
891
892static /*const */ struct file_operations au1000_mixer_fops = {
893 .owner = THIS_MODULE,
894 .llseek = au1000_llseek,
895 .ioctl = au1000_ioctl_mixdev,
896 .open = au1000_open_mixdev,
897 .release = au1000_release_mixdev,
898};
899
900/* --------------------------------------------------------------------- */
901
902static int drain_dac(struct au1000_state *s, int nonblock)
903{
904 unsigned long flags;
905 int count, tmo;
906
907 if (s->dma_dac.mapped || !s->dma_dac.ready || s->dma_dac.stopped)
908 return 0;
909
910 for (;;) {
911 spin_lock_irqsave(&s->lock, flags);
912 count = s->dma_dac.count;
913 spin_unlock_irqrestore(&s->lock, flags);
914 if (count <= 0)
915 break;
916 if (signal_pending(current))
917 break;
918 if (nonblock)
919 return -EBUSY;
920 tmo = 1000 * count / (s->no_vra ?
921 48000 : s->dma_dac.sample_rate);
922 tmo /= s->dma_dac.dma_bytes_per_sample;
923 au1000_delay(tmo);
924 }
925 if (signal_pending(current))
926 return -ERESTARTSYS;
927 return 0;
928}
929
930/* --------------------------------------------------------------------- */
931
932static inline u8 S16_TO_U8(s16 ch)
933{
934 return (u8) (ch >> 8) + 0x80;
935}
936static inline s16 U8_TO_S16(u8 ch)
937{
938 return (s16) (ch - 0x80) << 8;
939}
940
941/*
942 * Translates user samples to dma buffer suitable for AC'97 DAC data:
943 * If mono, copy left channel to right channel in dma buffer.
944 * If 8 bit samples, cvt to 16-bit before writing to dma buffer.
945 * If interpolating (no VRA), duplicate every audio frame src_factor times.
946 */
947static int translate_from_user(struct dmabuf *db,
948 char* dmabuf,
949 char* userbuf,
950 int dmacount)
951{
952 int sample, i;
953 int interp_bytes_per_sample;
954 int num_samples;
955 int mono = (db->num_channels == 1);
956 char usersample[12];
957 s16 ch, dmasample[6];
958
959 if (db->sample_size == 16 && !mono && db->src_factor == 1) {
960 // no translation necessary, just copy
961 if (copy_from_user(dmabuf, userbuf, dmacount))
962 return -EFAULT;
963 return dmacount;
964 }
965
966 interp_bytes_per_sample = db->dma_bytes_per_sample * db->src_factor;
967 num_samples = dmacount / interp_bytes_per_sample;
968
969 for (sample = 0; sample < num_samples; sample++) {
970 if (copy_from_user(usersample, userbuf,
971 db->user_bytes_per_sample)) {
972 dbg("%s: fault", __FUNCTION__);
973 return -EFAULT;
974 }
975
976 for (i = 0; i < db->num_channels; i++) {
977 if (db->sample_size == 8)
978 ch = U8_TO_S16(usersample[i]);
979 else
980 ch = *((s16 *) (&usersample[i * 2]));
981 dmasample[i] = ch;
982 if (mono)
983 dmasample[i + 1] = ch; // right channel
984 }
985
986 // duplicate every audio frame src_factor times
987 for (i = 0; i < db->src_factor; i++)
988 memcpy(dmabuf, dmasample, db->dma_bytes_per_sample);
989
990 userbuf += db->user_bytes_per_sample;
991 dmabuf += interp_bytes_per_sample;
992 }
993
994 return num_samples * interp_bytes_per_sample;
995}
996
997/*
998 * Translates AC'97 ADC samples to user buffer:
999 * If mono, send only left channel to user buffer.
1000 * If 8 bit samples, cvt from 16 to 8 bit before writing to user buffer.
1001 * If decimating (no VRA), skip over src_factor audio frames.
1002 */
1003static int translate_to_user(struct dmabuf *db,
1004 char* userbuf,
1005 char* dmabuf,
1006 int dmacount)
1007{
1008 int sample, i;
1009 int interp_bytes_per_sample;
1010 int num_samples;
1011 int mono = (db->num_channels == 1);
1012 char usersample[12];
1013
1014 if (db->sample_size == 16 && !mono && db->src_factor == 1) {
1015 // no translation necessary, just copy
1016 if (copy_to_user(userbuf, dmabuf, dmacount))
1017 return -EFAULT;
1018 return dmacount;
1019 }
1020
1021 interp_bytes_per_sample = db->dma_bytes_per_sample * db->src_factor;
1022 num_samples = dmacount / interp_bytes_per_sample;
1023
1024 for (sample = 0; sample < num_samples; sample++) {
1025 for (i = 0; i < db->num_channels; i++) {
1026 if (db->sample_size == 8)
1027 usersample[i] =
1028 S16_TO_U8(*((s16 *) (&dmabuf[i * 2])));
1029 else
1030 *((s16 *) (&usersample[i * 2])) =
1031 *((s16 *) (&dmabuf[i * 2]));
1032 }
1033
1034 if (copy_to_user(userbuf, usersample,
1035 db->user_bytes_per_sample)) {
1036 dbg("%s: fault", __FUNCTION__);
1037 return -EFAULT;
1038 }
1039
1040 userbuf += db->user_bytes_per_sample;
1041 dmabuf += interp_bytes_per_sample;
1042 }
1043
1044 return num_samples * interp_bytes_per_sample;
1045}
1046
1047/*
1048 * Copy audio data to/from user buffer from/to dma buffer, taking care
1049 * that we wrap when reading/writing the dma buffer. Returns actual byte
1050 * count written to or read from the dma buffer.
1051 */
1052static int copy_dmabuf_user(struct dmabuf *db, char* userbuf,
1053 int count, int to_user)
1054{
1055 char *bufptr = to_user ? db->nextOut : db->nextIn;
1056 char *bufend = db->rawbuf + db->dmasize;
1057 int cnt, ret;
1058
1059 if (bufptr + count > bufend) {
1060 int partial = (int) (bufend - bufptr);
1061 if (to_user) {
1062 if ((cnt = translate_to_user(db, userbuf,
1063 bufptr, partial)) < 0)
1064 return cnt;
1065 ret = cnt;
1066 if ((cnt = translate_to_user(db, userbuf + partial,
1067 db->rawbuf,
1068 count - partial)) < 0)
1069 return cnt;
1070 ret += cnt;
1071 } else {
1072 if ((cnt = translate_from_user(db, bufptr, userbuf,
1073 partial)) < 0)
1074 return cnt;
1075 ret = cnt;
1076 if ((cnt = translate_from_user(db, db->rawbuf,
1077 userbuf + partial,
1078 count - partial)) < 0)
1079 return cnt;
1080 ret += cnt;
1081 }
1082 } else {
1083 if (to_user)
1084 ret = translate_to_user(db, userbuf, bufptr, count);
1085 else
1086 ret = translate_from_user(db, bufptr, userbuf, count);
1087 }
1088
1089 return ret;
1090}
1091
1092
1093static ssize_t au1000_read(struct file *file, char *buffer,
1094 size_t count, loff_t *ppos)
1095{
1096 struct au1000_state *s = (struct au1000_state *)file->private_data;
1097 struct dmabuf *db = &s->dma_adc;
1098 DECLARE_WAITQUEUE(wait, current);
1099 ssize_t ret;
1100 unsigned long flags;
1101 int cnt, usercnt, avail;
1102
1103 if (db->mapped)
1104 return -ENXIO;
1105 if (!access_ok(VERIFY_WRITE, buffer, count))
1106 return -EFAULT;
1107 ret = 0;
1108
1109 count *= db->cnt_factor;
1110
1111 mutex_lock(&s->sem);
1112 add_wait_queue(&db->wait, &wait);
1113
1114 while (count > 0) {
1115 // wait for samples in ADC dma buffer
1116 do {
1117 if (db->stopped)
1118 start_adc(s);
1119 spin_lock_irqsave(&s->lock, flags);
1120 avail = db->count;
1121 if (avail <= 0)
1122 __set_current_state(TASK_INTERRUPTIBLE);
1123 spin_unlock_irqrestore(&s->lock, flags);
1124 if (avail <= 0) {
1125 if (file->f_flags & O_NONBLOCK) {
1126 if (!ret)
1127 ret = -EAGAIN;
1128 goto out;
1129 }
1130 mutex_unlock(&s->sem);
1131 schedule();
1132 if (signal_pending(current)) {
1133 if (!ret)
1134 ret = -ERESTARTSYS;
1135 goto out2;
1136 }
1137 mutex_lock(&s->sem);
1138 }
1139 } while (avail <= 0);
1140
1141 // copy from nextOut to user
1142 if ((cnt = copy_dmabuf_user(db, buffer,
1143 count > avail ?
1144 avail : count, 1)) < 0) {
1145 if (!ret)
1146 ret = -EFAULT;
1147 goto out;
1148 }
1149
1150 spin_lock_irqsave(&s->lock, flags);
1151 db->count -= cnt;
1152 db->nextOut += cnt;
1153 if (db->nextOut >= db->rawbuf + db->dmasize)
1154 db->nextOut -= db->dmasize;
1155 spin_unlock_irqrestore(&s->lock, flags);
1156
1157 count -= cnt;
1158 usercnt = cnt / db->cnt_factor;
1159 buffer += usercnt;
1160 ret += usercnt;
1161 } // while (count > 0)
1162
1163out:
1164 mutex_unlock(&s->sem);
1165out2:
1166 remove_wait_queue(&db->wait, &wait);
1167 set_current_state(TASK_RUNNING);
1168 return ret;
1169}
1170
1171static ssize_t au1000_write(struct file *file, const char *buffer,
1172 size_t count, loff_t * ppos)
1173{
1174 struct au1000_state *s = (struct au1000_state *)file->private_data;
1175 struct dmabuf *db = &s->dma_dac;
1176 DECLARE_WAITQUEUE(wait, current);
1177 ssize_t ret = 0;
1178 unsigned long flags;
1179 int cnt, usercnt, avail;
1180
1181#ifdef AU1000_VERBOSE_DEBUG
1182 dbg("write: count=%d", count);
1183#endif
1184
1185 if (db->mapped)
1186 return -ENXIO;
1187 if (!access_ok(VERIFY_READ, buffer, count))
1188 return -EFAULT;
1189
1190 count *= db->cnt_factor;
1191
1192 mutex_lock(&s->sem);
1193 add_wait_queue(&db->wait, &wait);
1194
1195 while (count > 0) {
1196 // wait for space in playback buffer
1197 do {
1198 spin_lock_irqsave(&s->lock, flags);
1199 avail = (int) db->dmasize - db->count;
1200 if (avail <= 0)
1201 __set_current_state(TASK_INTERRUPTIBLE);
1202 spin_unlock_irqrestore(&s->lock, flags);
1203 if (avail <= 0) {
1204 if (file->f_flags & O_NONBLOCK) {
1205 if (!ret)
1206 ret = -EAGAIN;
1207 goto out;
1208 }
1209 mutex_unlock(&s->sem);
1210 schedule();
1211 if (signal_pending(current)) {
1212 if (!ret)
1213 ret = -ERESTARTSYS;
1214 goto out2;
1215 }
1216 mutex_lock(&s->sem);
1217 }
1218 } while (avail <= 0);
1219
1220 // copy from user to nextIn
1221 if ((cnt = copy_dmabuf_user(db, (char *) buffer,
1222 count > avail ?
1223 avail : count, 0)) < 0) {
1224 if (!ret)
1225 ret = -EFAULT;
1226 goto out;
1227 }
1228
1229 spin_lock_irqsave(&s->lock, flags);
1230 db->count += cnt;
1231 db->nextIn += cnt;
1232 if (db->nextIn >= db->rawbuf + db->dmasize)
1233 db->nextIn -= db->dmasize;
1234 spin_unlock_irqrestore(&s->lock, flags);
1235 if (db->stopped)
1236 start_dac(s);
1237
1238 count -= cnt;
1239 usercnt = cnt / db->cnt_factor;
1240 buffer += usercnt;
1241 ret += usercnt;
1242 } // while (count > 0)
1243
1244out:
1245 mutex_unlock(&s->sem);
1246out2:
1247 remove_wait_queue(&db->wait, &wait);
1248 set_current_state(TASK_RUNNING);
1249 return ret;
1250}
1251
1252
1253/* No kernel lock - we have our own spinlock */
1254static unsigned int au1000_poll(struct file *file,
1255 struct poll_table_struct *wait)
1256{
1257 struct au1000_state *s = (struct au1000_state *)file->private_data;
1258 unsigned long flags;
1259 unsigned int mask = 0;
1260
1261 if (file->f_mode & FMODE_WRITE) {
1262 if (!s->dma_dac.ready)
1263 return 0;
1264 poll_wait(file, &s->dma_dac.wait, wait);
1265 }
1266 if (file->f_mode & FMODE_READ) {
1267 if (!s->dma_adc.ready)
1268 return 0;
1269 poll_wait(file, &s->dma_adc.wait, wait);
1270 }
1271
1272 spin_lock_irqsave(&s->lock, flags);
1273
1274 if (file->f_mode & FMODE_READ) {
1275 if (s->dma_adc.count >= (signed)s->dma_adc.dma_fragsize)
1276 mask |= POLLIN | POLLRDNORM;
1277 }
1278 if (file->f_mode & FMODE_WRITE) {
1279 if (s->dma_dac.mapped) {
1280 if (s->dma_dac.count >=
1281 (signed)s->dma_dac.dma_fragsize)
1282 mask |= POLLOUT | POLLWRNORM;
1283 } else {
1284 if ((signed) s->dma_dac.dmasize >=
1285 s->dma_dac.count + (signed)s->dma_dac.dma_fragsize)
1286 mask |= POLLOUT | POLLWRNORM;
1287 }
1288 }
1289 spin_unlock_irqrestore(&s->lock, flags);
1290 return mask;
1291}
1292
1293static int au1000_mmap(struct file *file, struct vm_area_struct *vma)
1294{
1295 struct au1000_state *s = (struct au1000_state *)file->private_data;
1296 struct dmabuf *db;
1297 unsigned long size;
1298 int ret = 0;
1299
1300 dbg("%s", __FUNCTION__);
1301
1302 lock_kernel();
1303 mutex_lock(&s->sem);
1304 if (vma->vm_flags & VM_WRITE)
1305 db = &s->dma_dac;
1306 else if (vma->vm_flags & VM_READ)
1307 db = &s->dma_adc;
1308 else {
1309 ret = -EINVAL;
1310 goto out;
1311 }
1312 if (vma->vm_pgoff != 0) {
1313 ret = -EINVAL;
1314 goto out;
1315 }
1316 size = vma->vm_end - vma->vm_start;
1317 if (size > (PAGE_SIZE << db->buforder)) {
1318 ret = -EINVAL;
1319 goto out;
1320 }
1321 if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(db->rawbuf),
1322 size, vma->vm_page_prot)) {
1323 ret = -EAGAIN;
1324 goto out;
1325 }
1326 vma->vm_flags &= ~VM_IO;
1327 db->mapped = 1;
1328out:
1329 mutex_unlock(&s->sem);
1330 unlock_kernel();
1331 return ret;
1332}
1333
1334
1335#ifdef AU1000_VERBOSE_DEBUG
1336static struct ioctl_str_t {
1337 unsigned int cmd;
1338 const char *str;
1339} ioctl_str[] = {
1340 {SNDCTL_DSP_RESET, "SNDCTL_DSP_RESET"},
1341 {SNDCTL_DSP_SYNC, "SNDCTL_DSP_SYNC"},
1342 {SNDCTL_DSP_SPEED, "SNDCTL_DSP_SPEED"},
1343 {SNDCTL_DSP_STEREO, "SNDCTL_DSP_STEREO"},
1344 {SNDCTL_DSP_GETBLKSIZE, "SNDCTL_DSP_GETBLKSIZE"},
1345 {SNDCTL_DSP_SAMPLESIZE, "SNDCTL_DSP_SAMPLESIZE"},
1346 {SNDCTL_DSP_CHANNELS, "SNDCTL_DSP_CHANNELS"},
1347 {SOUND_PCM_WRITE_CHANNELS, "SOUND_PCM_WRITE_CHANNELS"},
1348 {SOUND_PCM_WRITE_FILTER, "SOUND_PCM_WRITE_FILTER"},
1349 {SNDCTL_DSP_POST, "SNDCTL_DSP_POST"},
1350 {SNDCTL_DSP_SUBDIVIDE, "SNDCTL_DSP_SUBDIVIDE"},
1351 {SNDCTL_DSP_SETFRAGMENT, "SNDCTL_DSP_SETFRAGMENT"},
1352 {SNDCTL_DSP_GETFMTS, "SNDCTL_DSP_GETFMTS"},
1353 {SNDCTL_DSP_SETFMT, "SNDCTL_DSP_SETFMT"},
1354 {SNDCTL_DSP_GETOSPACE, "SNDCTL_DSP_GETOSPACE"},
1355 {SNDCTL_DSP_GETISPACE, "SNDCTL_DSP_GETISPACE"},
1356 {SNDCTL_DSP_NONBLOCK, "SNDCTL_DSP_NONBLOCK"},
1357 {SNDCTL_DSP_GETCAPS, "SNDCTL_DSP_GETCAPS"},
1358 {SNDCTL_DSP_GETTRIGGER, "SNDCTL_DSP_GETTRIGGER"},
1359 {SNDCTL_DSP_SETTRIGGER, "SNDCTL_DSP_SETTRIGGER"},
1360 {SNDCTL_DSP_GETIPTR, "SNDCTL_DSP_GETIPTR"},
1361 {SNDCTL_DSP_GETOPTR, "SNDCTL_DSP_GETOPTR"},
1362 {SNDCTL_DSP_MAPINBUF, "SNDCTL_DSP_MAPINBUF"},
1363 {SNDCTL_DSP_MAPOUTBUF, "SNDCTL_DSP_MAPOUTBUF"},
1364 {SNDCTL_DSP_SETSYNCRO, "SNDCTL_DSP_SETSYNCRO"},
1365 {SNDCTL_DSP_SETDUPLEX, "SNDCTL_DSP_SETDUPLEX"},
1366 {SNDCTL_DSP_GETODELAY, "SNDCTL_DSP_GETODELAY"},
1367 {SNDCTL_DSP_GETCHANNELMASK, "SNDCTL_DSP_GETCHANNELMASK"},
1368 {SNDCTL_DSP_BIND_CHANNEL, "SNDCTL_DSP_BIND_CHANNEL"},
1369 {OSS_GETVERSION, "OSS_GETVERSION"},
1370 {SOUND_PCM_READ_RATE, "SOUND_PCM_READ_RATE"},
1371 {SOUND_PCM_READ_CHANNELS, "SOUND_PCM_READ_CHANNELS"},
1372 {SOUND_PCM_READ_BITS, "SOUND_PCM_READ_BITS"},
1373 {SOUND_PCM_READ_FILTER, "SOUND_PCM_READ_FILTER"}
1374};
1375#endif
1376
1377// Need to hold a spin-lock before calling this!
1378static int dma_count_done(struct dmabuf *db)
1379{
1380 if (db->stopped)
1381 return 0;
1382
1383 return db->dma_fragsize - get_dma_residue(db->dmanr);
1384}
1385
1386
1387static int au1000_ioctl(struct inode *inode, struct file *file,
1388 unsigned int cmd, unsigned long arg)
1389{
1390 struct au1000_state *s = (struct au1000_state *)file->private_data;
1391 unsigned long flags;
1392 audio_buf_info abinfo;
1393 count_info cinfo;
1394 int count;
1395 int val, mapped, ret, diff;
1396
1397 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
1398 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
1399
1400#ifdef AU1000_VERBOSE_DEBUG
1401 for (count=0; count<sizeof(ioctl_str)/sizeof(ioctl_str[0]); count++) {
1402 if (ioctl_str[count].cmd == cmd)
1403 break;
1404 }
1405 if (count < sizeof(ioctl_str) / sizeof(ioctl_str[0]))
1406 dbg("ioctl %s, arg=0x%lx", ioctl_str[count].str, arg);
1407 else
1408 dbg("ioctl 0x%x unknown, arg=0x%lx", cmd, arg);
1409#endif
1410
1411 switch (cmd) {
1412 case OSS_GETVERSION:
1413 return put_user(SOUND_VERSION, (int *) arg);
1414
1415 case SNDCTL_DSP_SYNC:
1416 if (file->f_mode & FMODE_WRITE)
1417 return drain_dac(s, file->f_flags & O_NONBLOCK);
1418 return 0;
1419
1420 case SNDCTL_DSP_SETDUPLEX:
1421 return 0;
1422
1423 case SNDCTL_DSP_GETCAPS:
1424 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME |
1425 DSP_CAP_TRIGGER | DSP_CAP_MMAP, (int *)arg);
1426
1427 case SNDCTL_DSP_RESET:
1428 if (file->f_mode & FMODE_WRITE) {
1429 stop_dac(s);
1430 synchronize_irq();
1431 s->dma_dac.count = s->dma_dac.total_bytes = 0;
1432 s->dma_dac.nextIn = s->dma_dac.nextOut =
1433 s->dma_dac.rawbuf;
1434 }
1435 if (file->f_mode & FMODE_READ) {
1436 stop_adc(s);
1437 synchronize_irq();
1438 s->dma_adc.count = s->dma_adc.total_bytes = 0;
1439 s->dma_adc.nextIn = s->dma_adc.nextOut =
1440 s->dma_adc.rawbuf;
1441 }
1442 return 0;
1443
1444 case SNDCTL_DSP_SPEED:
1445 if (get_user(val, (int *) arg))
1446 return -EFAULT;
1447 if (val >= 0) {
1448 if (file->f_mode & FMODE_READ) {
1449 stop_adc(s);
1450 set_adc_rate(s, val);
1451 }
1452 if (file->f_mode & FMODE_WRITE) {
1453 stop_dac(s);
1454 set_dac_rate(s, val);
1455 }
1456 if (s->open_mode & FMODE_READ)
1457 if ((ret = prog_dmabuf_adc(s)))
1458 return ret;
1459 if (s->open_mode & FMODE_WRITE)
1460 if ((ret = prog_dmabuf_dac(s)))
1461 return ret;
1462 }
1463 return put_user((file->f_mode & FMODE_READ) ?
1464 s->dma_adc.sample_rate :
1465 s->dma_dac.sample_rate,
1466 (int *)arg);
1467
1468 case SNDCTL_DSP_STEREO:
1469 if (get_user(val, (int *) arg))
1470 return -EFAULT;
1471 if (file->f_mode & FMODE_READ) {
1472 stop_adc(s);
1473 s->dma_adc.num_channels = val ? 2 : 1;
1474 if ((ret = prog_dmabuf_adc(s)))
1475 return ret;
1476 }
1477 if (file->f_mode & FMODE_WRITE) {
1478 stop_dac(s);
1479 s->dma_dac.num_channels = val ? 2 : 1;
1480 if (s->codec_ext_caps & AC97_EXT_DACS) {
1481 // disable surround and center/lfe in AC'97
1482 u16 ext_stat = rdcodec(&s->codec,
1483 AC97_EXTENDED_STATUS);
1484 wrcodec(&s->codec, AC97_EXTENDED_STATUS,
1485 ext_stat | (AC97_EXTSTAT_PRI |
1486 AC97_EXTSTAT_PRJ |
1487 AC97_EXTSTAT_PRK));
1488 }
1489 if ((ret = prog_dmabuf_dac(s)))
1490 return ret;
1491 }
1492 return 0;
1493
1494 case SNDCTL_DSP_CHANNELS:
1495 if (get_user(val, (int *) arg))
1496 return -EFAULT;
1497 if (val != 0) {
1498 if (file->f_mode & FMODE_READ) {
1499 if (val < 0 || val > 2)
1500 return -EINVAL;
1501 stop_adc(s);
1502 s->dma_adc.num_channels = val;
1503 if ((ret = prog_dmabuf_adc(s)))
1504 return ret;
1505 }
1506 if (file->f_mode & FMODE_WRITE) {
1507 switch (val) {
1508 case 1:
1509 case 2:
1510 break;
1511 case 3:
1512 case 5:
1513 return -EINVAL;
1514 case 4:
1515 if (!(s->codec_ext_caps &
1516 AC97_EXTID_SDAC))
1517 return -EINVAL;
1518 break;
1519 case 6:
1520 if ((s->codec_ext_caps &
1521 AC97_EXT_DACS) != AC97_EXT_DACS)
1522 return -EINVAL;
1523 break;
1524 default:
1525 return -EINVAL;
1526 }
1527
1528 stop_dac(s);
1529 if (val <= 2 &&
1530 (s->codec_ext_caps & AC97_EXT_DACS)) {
1531 // disable surround and center/lfe
1532 // channels in AC'97
1533 u16 ext_stat =
1534 rdcodec(&s->codec,
1535 AC97_EXTENDED_STATUS);
1536 wrcodec(&s->codec,
1537 AC97_EXTENDED_STATUS,
1538 ext_stat | (AC97_EXTSTAT_PRI |
1539 AC97_EXTSTAT_PRJ |
1540 AC97_EXTSTAT_PRK));
1541 } else if (val >= 4) {
1542 // enable surround, center/lfe
1543 // channels in AC'97
1544 u16 ext_stat =
1545 rdcodec(&s->codec,
1546 AC97_EXTENDED_STATUS);
1547 ext_stat &= ~AC97_EXTSTAT_PRJ;
1548 if (val == 6)
1549 ext_stat &=
1550 ~(AC97_EXTSTAT_PRI |
1551 AC97_EXTSTAT_PRK);
1552 wrcodec(&s->codec,
1553 AC97_EXTENDED_STATUS,
1554 ext_stat);
1555 }
1556
1557 s->dma_dac.num_channels = val;
1558 if ((ret = prog_dmabuf_dac(s)))
1559 return ret;
1560 }
1561 }
1562 return put_user(val, (int *) arg);
1563
1564 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1565 return put_user(AFMT_S16_LE | AFMT_U8, (int *) arg);
1566
1567 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt */
1568 if (get_user(val, (int *) arg))
1569 return -EFAULT;
1570 if (val != AFMT_QUERY) {
1571 if (file->f_mode & FMODE_READ) {
1572 stop_adc(s);
1573 if (val == AFMT_S16_LE)
1574 s->dma_adc.sample_size = 16;
1575 else {
1576 val = AFMT_U8;
1577 s->dma_adc.sample_size = 8;
1578 }
1579 if ((ret = prog_dmabuf_adc(s)))
1580 return ret;
1581 }
1582 if (file->f_mode & FMODE_WRITE) {
1583 stop_dac(s);
1584 if (val == AFMT_S16_LE)
1585 s->dma_dac.sample_size = 16;
1586 else {
1587 val = AFMT_U8;
1588 s->dma_dac.sample_size = 8;
1589 }
1590 if ((ret = prog_dmabuf_dac(s)))
1591 return ret;
1592 }
1593 } else {
1594 if (file->f_mode & FMODE_READ)
1595 val = (s->dma_adc.sample_size == 16) ?
1596 AFMT_S16_LE : AFMT_U8;
1597 else
1598 val = (s->dma_dac.sample_size == 16) ?
1599 AFMT_S16_LE : AFMT_U8;
1600 }
1601 return put_user(val, (int *) arg);
1602
1603 case SNDCTL_DSP_POST:
1604 return 0;
1605
1606 case SNDCTL_DSP_GETTRIGGER:
1607 val = 0;
1608 spin_lock_irqsave(&s->lock, flags);
1609 if (file->f_mode & FMODE_READ && !s->dma_adc.stopped)
1610 val |= PCM_ENABLE_INPUT;
1611 if (file->f_mode & FMODE_WRITE && !s->dma_dac.stopped)
1612 val |= PCM_ENABLE_OUTPUT;
1613 spin_unlock_irqrestore(&s->lock, flags);
1614 return put_user(val, (int *) arg);
1615
1616 case SNDCTL_DSP_SETTRIGGER:
1617 if (get_user(val, (int *) arg))
1618 return -EFAULT;
1619 if (file->f_mode & FMODE_READ) {
1620 if (val & PCM_ENABLE_INPUT)
1621 start_adc(s);
1622 else
1623 stop_adc(s);
1624 }
1625 if (file->f_mode & FMODE_WRITE) {
1626 if (val & PCM_ENABLE_OUTPUT)
1627 start_dac(s);
1628 else
1629 stop_dac(s);
1630 }
1631 return 0;
1632
1633 case SNDCTL_DSP_GETOSPACE:
1634 if (!(file->f_mode & FMODE_WRITE))
1635 return -EINVAL;
1636 abinfo.fragsize = s->dma_dac.fragsize;
1637 spin_lock_irqsave(&s->lock, flags);
1638 count = s->dma_dac.count;
1639 count -= dma_count_done(&s->dma_dac);
1640 spin_unlock_irqrestore(&s->lock, flags);
1641 if (count < 0)
1642 count = 0;
1643 abinfo.bytes = (s->dma_dac.dmasize - count) /
1644 s->dma_dac.cnt_factor;
1645 abinfo.fragstotal = s->dma_dac.numfrag;
1646 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
1647#ifdef AU1000_VERBOSE_DEBUG
1648 dbg("bytes=%d, fragments=%d", abinfo.bytes, abinfo.fragments);
1649#endif
1650 return copy_to_user((void *) arg, &abinfo,
1651 sizeof(abinfo)) ? -EFAULT : 0;
1652
1653 case SNDCTL_DSP_GETISPACE:
1654 if (!(file->f_mode & FMODE_READ))
1655 return -EINVAL;
1656 abinfo.fragsize = s->dma_adc.fragsize;
1657 spin_lock_irqsave(&s->lock, flags);
1658 count = s->dma_adc.count;
1659 count += dma_count_done(&s->dma_adc);
1660 spin_unlock_irqrestore(&s->lock, flags);
1661 if (count < 0)
1662 count = 0;
1663 abinfo.bytes = count / s->dma_adc.cnt_factor;
1664 abinfo.fragstotal = s->dma_adc.numfrag;
1665 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
1666 return copy_to_user((void *) arg, &abinfo,
1667 sizeof(abinfo)) ? -EFAULT : 0;
1668
1669 case SNDCTL_DSP_NONBLOCK:
1670 file->f_flags |= O_NONBLOCK;
1671 return 0;
1672
1673 case SNDCTL_DSP_GETODELAY:
1674 if (!(file->f_mode & FMODE_WRITE))
1675 return -EINVAL;
1676 spin_lock_irqsave(&s->lock, flags);
1677 count = s->dma_dac.count;
1678 count -= dma_count_done(&s->dma_dac);
1679 spin_unlock_irqrestore(&s->lock, flags);
1680 if (count < 0)
1681 count = 0;
1682 count /= s->dma_dac.cnt_factor;
1683 return put_user(count, (int *) arg);
1684
1685 case SNDCTL_DSP_GETIPTR:
1686 if (!(file->f_mode & FMODE_READ))
1687 return -EINVAL;
1688 spin_lock_irqsave(&s->lock, flags);
1689 cinfo.bytes = s->dma_adc.total_bytes;
1690 count = s->dma_adc.count;
1691 if (!s->dma_adc.stopped) {
1692 diff = dma_count_done(&s->dma_adc);
1693 count += diff;
1694 cinfo.bytes += diff;
1695 cinfo.ptr = virt_to_phys(s->dma_adc.nextIn) + diff -
1696 s->dma_adc.dmaaddr;
1697 } else
1698 cinfo.ptr = virt_to_phys(s->dma_adc.nextIn) -
1699 s->dma_adc.dmaaddr;
1700 if (s->dma_adc.mapped)
1701 s->dma_adc.count &= (s->dma_adc.dma_fragsize-1);
1702 spin_unlock_irqrestore(&s->lock, flags);
1703 if (count < 0)
1704 count = 0;
1705 cinfo.blocks = count >> s->dma_adc.fragshift;
1706 return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
1707
1708 case SNDCTL_DSP_GETOPTR:
1709 if (!(file->f_mode & FMODE_READ))
1710 return -EINVAL;
1711 spin_lock_irqsave(&s->lock, flags);
1712 cinfo.bytes = s->dma_dac.total_bytes;
1713 count = s->dma_dac.count;
1714 if (!s->dma_dac.stopped) {
1715 diff = dma_count_done(&s->dma_dac);
1716 count -= diff;
1717 cinfo.bytes += diff;
1718 cinfo.ptr = virt_to_phys(s->dma_dac.nextOut) + diff -
1719 s->dma_dac.dmaaddr;
1720 } else
1721 cinfo.ptr = virt_to_phys(s->dma_dac.nextOut) -
1722 s->dma_dac.dmaaddr;
1723 if (s->dma_dac.mapped)
1724 s->dma_dac.count &= (s->dma_dac.dma_fragsize-1);
1725 spin_unlock_irqrestore(&s->lock, flags);
1726 if (count < 0)
1727 count = 0;
1728 cinfo.blocks = count >> s->dma_dac.fragshift;
1729 return copy_to_user((void *) arg, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
1730
1731 case SNDCTL_DSP_GETBLKSIZE:
1732 if (file->f_mode & FMODE_WRITE)
1733 return put_user(s->dma_dac.fragsize, (int *) arg);
1734 else
1735 return put_user(s->dma_adc.fragsize, (int *) arg);
1736
1737 case SNDCTL_DSP_SETFRAGMENT:
1738 if (get_user(val, (int *) arg))
1739 return -EFAULT;
1740 if (file->f_mode & FMODE_READ) {
1741 stop_adc(s);
1742 s->dma_adc.ossfragshift = val & 0xffff;
1743 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
1744 if (s->dma_adc.ossfragshift < 4)
1745 s->dma_adc.ossfragshift = 4;
1746 if (s->dma_adc.ossfragshift > 15)
1747 s->dma_adc.ossfragshift = 15;
1748 if (s->dma_adc.ossmaxfrags < 4)
1749 s->dma_adc.ossmaxfrags = 4;
1750 if ((ret = prog_dmabuf_adc(s)))
1751 return ret;
1752 }
1753 if (file->f_mode & FMODE_WRITE) {
1754 stop_dac(s);
1755 s->dma_dac.ossfragshift = val & 0xffff;
1756 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
1757 if (s->dma_dac.ossfragshift < 4)
1758 s->dma_dac.ossfragshift = 4;
1759 if (s->dma_dac.ossfragshift > 15)
1760 s->dma_dac.ossfragshift = 15;
1761 if (s->dma_dac.ossmaxfrags < 4)
1762 s->dma_dac.ossmaxfrags = 4;
1763 if ((ret = prog_dmabuf_dac(s)))
1764 return ret;
1765 }
1766 return 0;
1767
1768 case SNDCTL_DSP_SUBDIVIDE:
1769 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1770 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
1771 return -EINVAL;
1772 if (get_user(val, (int *) arg))
1773 return -EFAULT;
1774 if (val != 1 && val != 2 && val != 4)
1775 return -EINVAL;
1776 if (file->f_mode & FMODE_READ) {
1777 stop_adc(s);
1778 s->dma_adc.subdivision = val;
1779 if ((ret = prog_dmabuf_adc(s)))
1780 return ret;
1781 }
1782 if (file->f_mode & FMODE_WRITE) {
1783 stop_dac(s);
1784 s->dma_dac.subdivision = val;
1785 if ((ret = prog_dmabuf_dac(s)))
1786 return ret;
1787 }
1788 return 0;
1789
1790 case SOUND_PCM_READ_RATE:
1791 return put_user((file->f_mode & FMODE_READ) ?
1792 s->dma_adc.sample_rate :
1793 s->dma_dac.sample_rate,
1794 (int *)arg);
1795
1796 case SOUND_PCM_READ_CHANNELS:
1797 if (file->f_mode & FMODE_READ)
1798 return put_user(s->dma_adc.num_channels, (int *)arg);
1799 else
1800 return put_user(s->dma_dac.num_channels, (int *)arg);
1801
1802 case SOUND_PCM_READ_BITS:
1803 if (file->f_mode & FMODE_READ)
1804 return put_user(s->dma_adc.sample_size, (int *)arg);
1805 else
1806 return put_user(s->dma_dac.sample_size, (int *)arg);
1807
1808 case SOUND_PCM_WRITE_FILTER:
1809 case SNDCTL_DSP_SETSYNCRO:
1810 case SOUND_PCM_READ_FILTER:
1811 return -EINVAL;
1812 }
1813
1814 return mixdev_ioctl(&s->codec, cmd, arg);
1815}
1816
1817
1818static int au1000_open(struct inode *inode, struct file *file)
1819{
1820 int minor = iminor(inode);
1821 DECLARE_WAITQUEUE(wait, current);
1822 struct au1000_state *s = &au1000_state;
1823 int ret;
1824
1825#ifdef AU1000_VERBOSE_DEBUG
1826 if (file->f_flags & O_NONBLOCK)
1827 dbg("%s: non-blocking", __FUNCTION__);
1828 else
1829 dbg("%s: blocking", __FUNCTION__);
1830#endif
1831
1832 file->private_data = s;
1833 /* wait for device to become free */
1834 mutex_lock(&s->open_mutex);
1835 while (s->open_mode & file->f_mode) {
1836 if (file->f_flags & O_NONBLOCK) {
1837 mutex_unlock(&s->open_mutex);
1838 return -EBUSY;
1839 }
1840 add_wait_queue(&s->open_wait, &wait);
1841 __set_current_state(TASK_INTERRUPTIBLE);
1842 mutex_unlock(&s->open_mutex);
1843 schedule();
1844 remove_wait_queue(&s->open_wait, &wait);
1845 set_current_state(TASK_RUNNING);
1846 if (signal_pending(current))
1847 return -ERESTARTSYS;
1848 mutex_lock(&s->open_mutex);
1849 }
1850
1851 stop_dac(s);
1852 stop_adc(s);
1853
1854 if (file->f_mode & FMODE_READ) {
1855 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
1856 s->dma_adc.subdivision = s->dma_adc.total_bytes = 0;
1857 s->dma_adc.num_channels = 1;
1858 s->dma_adc.sample_size = 8;
1859 set_adc_rate(s, 8000);
1860 if ((minor & 0xf) == SND_DEV_DSP16)
1861 s->dma_adc.sample_size = 16;
1862 }
1863
1864 if (file->f_mode & FMODE_WRITE) {
1865 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
1866 s->dma_dac.subdivision = s->dma_dac.total_bytes = 0;
1867 s->dma_dac.num_channels = 1;
1868 s->dma_dac.sample_size = 8;
1869 set_dac_rate(s, 8000);
1870 if ((minor & 0xf) == SND_DEV_DSP16)
1871 s->dma_dac.sample_size = 16;
1872 }
1873
1874 if (file->f_mode & FMODE_READ) {
1875 if ((ret = prog_dmabuf_adc(s)))
1876 return ret;
1877 }
1878 if (file->f_mode & FMODE_WRITE) {
1879 if ((ret = prog_dmabuf_dac(s)))
1880 return ret;
1881 }
1882
1883 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1884 mutex_unlock(&s->open_mutex);
1885 mutex_init(&s->sem);
1886 return nonseekable_open(inode, file);
1887}
1888
1889static int au1000_release(struct inode *inode, struct file *file)
1890{
1891 struct au1000_state *s = (struct au1000_state *)file->private_data;
1892
1893 lock_kernel();
1894
1895 if (file->f_mode & FMODE_WRITE) {
1896 unlock_kernel();
1897 drain_dac(s, file->f_flags & O_NONBLOCK);
1898 lock_kernel();
1899 }
1900
1901 mutex_lock(&s->open_mutex);
1902 if (file->f_mode & FMODE_WRITE) {
1903 stop_dac(s);
1904 dealloc_dmabuf(s, &s->dma_dac);
1905 }
1906 if (file->f_mode & FMODE_READ) {
1907 stop_adc(s);
1908 dealloc_dmabuf(s, &s->dma_adc);
1909 }
1910 s->open_mode &= ((~file->f_mode) & (FMODE_READ|FMODE_WRITE));
1911 mutex_unlock(&s->open_mutex);
1912 wake_up(&s->open_wait);
1913 unlock_kernel();
1914 return 0;
1915}
1916
1917static /*const */ struct file_operations au1000_audio_fops = {
1918 .owner = THIS_MODULE,
1919 .llseek = au1000_llseek,
1920 .read = au1000_read,
1921 .write = au1000_write,
1922 .poll = au1000_poll,
1923 .ioctl = au1000_ioctl,
1924 .mmap = au1000_mmap,
1925 .open = au1000_open,
1926 .release = au1000_release,
1927};
1928
1929
1930/* --------------------------------------------------------------------- */
1931
1932
1933/* --------------------------------------------------------------------- */
1934
1935/*
1936 * for debugging purposes, we'll create a proc device that dumps the
1937 * CODEC chipstate
1938 */
1939
1940#ifdef AU1000_DEBUG
1941static int proc_au1000_dump(char *buf, char **start, off_t fpos,
1942 int length, int *eof, void *data)
1943{
1944 struct au1000_state *s = &au1000_state;
1945 int cnt, len = 0;
1946
1947 /* print out header */
1948 len += sprintf(buf + len, "\n\t\tAU1000 Audio Debug\n\n");
1949
1950 // print out digital controller state
1951 len += sprintf(buf + len, "AU1000 Audio Controller registers\n");
1952 len += sprintf(buf + len, "---------------------------------\n");
1953 len += sprintf (buf + len, "AC97C_CONFIG = %08x\n",
1954 au_readl(AC97C_CONFIG));
1955 len += sprintf (buf + len, "AC97C_STATUS = %08x\n",
1956 au_readl(AC97C_STATUS));
1957 len += sprintf (buf + len, "AC97C_CNTRL = %08x\n",
1958 au_readl(AC97C_CNTRL));
1959
1960 /* print out CODEC state */
1961 len += sprintf(buf + len, "\nAC97 CODEC registers\n");
1962 len += sprintf(buf + len, "----------------------\n");
1963 for (cnt = 0; cnt <= 0x7e; cnt += 2)
1964 len += sprintf(buf + len, "reg %02x = %04x\n",
1965 cnt, rdcodec(&s->codec, cnt));
1966
1967 if (fpos >= len) {
1968 *start = buf;
1969 *eof = 1;
1970 return 0;
1971 }
1972 *start = buf + fpos;
1973 if ((len -= fpos) > length)
1974 return length;
1975 *eof = 1;
1976 return len;
1977
1978}
1979#endif /* AU1000_DEBUG */
1980
1981/* --------------------------------------------------------------------- */
1982
1983MODULE_AUTHOR("Monta Vista Software, stevel@mvista.com");
1984MODULE_DESCRIPTION("Au1000 Audio Driver");
1985
1986/* --------------------------------------------------------------------- */
1987
1988static int __devinit au1000_probe(void)
1989{
1990 struct au1000_state *s = &au1000_state;
1991 int val;
1992#ifdef AU1000_DEBUG
1993 char proc_str[80];
1994#endif
1995
1996 memset(s, 0, sizeof(struct au1000_state));
1997
1998 init_waitqueue_head(&s->dma_adc.wait);
1999 init_waitqueue_head(&s->dma_dac.wait);
2000 init_waitqueue_head(&s->open_wait);
2001 mutex_init(&s->open_mutex);
2002 spin_lock_init(&s->lock);
2003 s->codec.private_data = s;
2004 s->codec.id = 0;
2005 s->codec.codec_read = rdcodec;
2006 s->codec.codec_write = wrcodec;
2007 s->codec.codec_wait = waitcodec;
2008
2009 if (!request_mem_region(CPHYSADDR(AC97C_CONFIG),
2010 0x14, AU1000_MODULE_NAME)) {
2011 err("AC'97 ports in use");
2012 return -1;
2013 }
2014 // Allocate the DMA Channels
2015 if ((s->dma_dac.dmanr = request_au1000_dma(DMA_ID_AC97C_TX,
2016 "audio DAC",
2017 dac_dma_interrupt,
2018 IRQF_DISABLED, s)) < 0) {
2019 err("Can't get DAC DMA");
2020 goto err_dma1;
2021 }
2022 if ((s->dma_adc.dmanr = request_au1000_dma(DMA_ID_AC97C_RX,
2023 "audio ADC",
2024 adc_dma_interrupt,
2025 IRQF_DISABLED, s)) < 0) {
2026 err("Can't get ADC DMA");
2027 goto err_dma2;
2028 }
2029
2030 info("DAC: DMA%d/IRQ%d, ADC: DMA%d/IRQ%d",
2031 s->dma_dac.dmanr, get_dma_done_irq(s->dma_dac.dmanr),
2032 s->dma_adc.dmanr, get_dma_done_irq(s->dma_adc.dmanr));
2033
2034 // enable DMA coherency in read/write DMA channels
2035 set_dma_mode(s->dma_dac.dmanr,
2036 get_dma_mode(s->dma_dac.dmanr) & ~DMA_NC);
2037 set_dma_mode(s->dma_adc.dmanr,
2038 get_dma_mode(s->dma_adc.dmanr) & ~DMA_NC);
2039
2040 /* register devices */
2041
2042 if ((s->dev_audio = register_sound_dsp(&au1000_audio_fops, -1)) < 0)
2043 goto err_dev1;
2044 if ((s->codec.dev_mixer =
2045 register_sound_mixer(&au1000_mixer_fops, -1)) < 0)
2046 goto err_dev2;
2047
2048#ifdef AU1000_DEBUG
2049 /* intialize the debug proc device */
2050 s->ps = create_proc_read_entry(AU1000_MODULE_NAME, 0, NULL,
2051 proc_au1000_dump, NULL);
2052#endif /* AU1000_DEBUG */
2053
2054 // configure pins for AC'97
2055 au_writel(au_readl(SYS_PINFUNC) & ~0x02, SYS_PINFUNC);
2056
2057 // Assert reset for 10msec to the AC'97 controller, and enable clock
2058 au_writel(AC97C_RS | AC97C_CE, AC97C_CNTRL);
2059 au1000_delay(10);
2060 au_writel(AC97C_CE, AC97C_CNTRL);
2061 au1000_delay(10); // wait for clock to stabilize
2062
2063 /* cold reset the AC'97 */
2064 au_writel(AC97C_RESET, AC97C_CONFIG);
2065 au1000_delay(10);
2066 au_writel(0, AC97C_CONFIG);
2067 /* need to delay around 500msec(bleech) to give
2068 some CODECs enough time to wakeup */
2069 au1000_delay(500);
2070
2071 /* warm reset the AC'97 to start the bitclk */
2072 au_writel(AC97C_SG | AC97C_SYNC, AC97C_CONFIG);
2073 udelay(100);
2074 au_writel(0, AC97C_CONFIG);
2075
2076 /* codec init */
2077 if (!ac97_probe_codec(&s->codec))
2078 goto err_dev3;
2079
2080 s->codec_base_caps = rdcodec(&s->codec, AC97_RESET);
2081 s->codec_ext_caps = rdcodec(&s->codec, AC97_EXTENDED_ID);
2082 info("AC'97 Base/Extended ID = %04x/%04x",
2083 s->codec_base_caps, s->codec_ext_caps);
2084
2085 /*
2086 * On the Pb1000, audio playback is on the AUX_OUT
2087 * channel (which defaults to LNLVL_OUT in AC'97
2088 * rev 2.2) so make sure this channel is listed
2089 * as supported (soundcard.h calls this channel
2090 * ALTPCM). ac97_codec.c does not handle detection
2091 * of this channel correctly.
2092 */
2093 s->codec.supported_mixers |= SOUND_MASK_ALTPCM;
2094 /*
2095 * Now set AUX_OUT's default volume.
2096 */
2097 val = 0x4343;
2098 mixdev_ioctl(&s->codec, SOUND_MIXER_WRITE_ALTPCM,
2099 (unsigned long) &val);
2100
2101 if (!(s->codec_ext_caps & AC97_EXTID_VRA)) {
2102 // codec does not support VRA
2103 s->no_vra = 1;
2104 } else if (!vra) {
2105 // Boot option says disable VRA
2106 u16 ac97_extstat = rdcodec(&s->codec, AC97_EXTENDED_STATUS);
2107 wrcodec(&s->codec, AC97_EXTENDED_STATUS,
2108 ac97_extstat & ~AC97_EXTSTAT_VRA);
2109 s->no_vra = 1;
2110 }
2111 if (s->no_vra)
2112 info("no VRA, interpolating and decimating");
2113
2114 /* set mic to be the recording source */
2115 val = SOUND_MASK_MIC;
2116 mixdev_ioctl(&s->codec, SOUND_MIXER_WRITE_RECSRC,
2117 (unsigned long) &val);
2118
2119#ifdef AU1000_DEBUG
2120 sprintf(proc_str, "driver/%s/%d/ac97", AU1000_MODULE_NAME,
2121 s->codec.id);
2122 s->ac97_ps = create_proc_read_entry (proc_str, 0, NULL,
2123 ac97_read_proc, &s->codec);
2124#endif
2125
2126#ifdef CONFIG_MIPS_XXS1500
2127 /* deassert eapd */
2128 wrcodec(&s->codec, AC97_POWER_CONTROL,
2129 rdcodec(&s->codec, AC97_POWER_CONTROL) & ~0x8000);
2130 /* mute a number of signals which seem to be causing problems
2131 * if not muted.
2132 */
2133 wrcodec(&s->codec, AC97_PCBEEP_VOL, 0x8000);
2134 wrcodec(&s->codec, AC97_PHONE_VOL, 0x8008);
2135 wrcodec(&s->codec, AC97_MIC_VOL, 0x8008);
2136 wrcodec(&s->codec, AC97_LINEIN_VOL, 0x8808);
2137 wrcodec(&s->codec, AC97_CD_VOL, 0x8808);
2138 wrcodec(&s->codec, AC97_VIDEO_VOL, 0x8808);
2139 wrcodec(&s->codec, AC97_AUX_VOL, 0x8808);
2140 wrcodec(&s->codec, AC97_PCMOUT_VOL, 0x0808);
2141 wrcodec(&s->codec, AC97_GENERAL_PURPOSE, 0x2000);
2142#endif
2143
2144 return 0;
2145
2146 err_dev3:
2147 unregister_sound_mixer(s->codec.dev_mixer);
2148 err_dev2:
2149 unregister_sound_dsp(s->dev_audio);
2150 err_dev1:
2151 free_au1000_dma(s->dma_adc.dmanr);
2152 err_dma2:
2153 free_au1000_dma(s->dma_dac.dmanr);
2154 err_dma1:
2155 release_mem_region(CPHYSADDR(AC97C_CONFIG), 0x14);
2156 return -1;
2157}
2158
2159static void au1000_remove(void)
2160{
2161 struct au1000_state *s = &au1000_state;
2162
2163 if (!s)
2164 return;
2165#ifdef AU1000_DEBUG
2166 if (s->ps)
2167 remove_proc_entry(AU1000_MODULE_NAME, NULL);
2168#endif /* AU1000_DEBUG */
2169 synchronize_irq();
2170 free_au1000_dma(s->dma_adc.dmanr);
2171 free_au1000_dma(s->dma_dac.dmanr);
2172 release_mem_region(CPHYSADDR(AC97C_CONFIG), 0x14);
2173 unregister_sound_dsp(s->dev_audio);
2174 unregister_sound_mixer(s->codec.dev_mixer);
2175}
2176
2177static int __init init_au1000(void)
2178{
2179 info("stevel@mvista.com, built " __TIME__ " on " __DATE__);
2180 return au1000_probe();
2181}
2182
2183static void __exit cleanup_au1000(void)
2184{
2185 info("unloading");
2186 au1000_remove();
2187}
2188
2189module_init(init_au1000);
2190module_exit(cleanup_au1000);
2191
2192/* --------------------------------------------------------------------- */
2193
2194#ifndef MODULE
2195
2196static int __init au1000_setup(char *options)
2197{
2198 char *this_opt;
2199
2200 if (!options || !*options)
2201 return 0;
2202
2203 while ((this_opt = strsep(&options, ","))) {
2204 if (!*this_opt)
2205 continue;
2206 if (!strncmp(this_opt, "vra", 3)) {
2207 vra = 1;
2208 }
2209 }
2210
2211 return 1;
2212}
2213
2214__setup("au1000_audio=", au1000_setup);
2215
2216#endif /* MODULE */
diff --git a/sound/oss/audio_syms.c b/sound/oss/audio_syms.c
deleted file mode 100644
index 5da217fcbedd..000000000000
--- a/sound/oss/audio_syms.c
+++ /dev/null
@@ -1,16 +0,0 @@
1/*
2 * Exported symbols for audio driver.
3 */
4
5#include <linux/module.h>
6
7char audio_syms_symbol;
8
9#include "sound_config.h"
10#include "sound_calls.h"
11
12EXPORT_SYMBOL(DMAbuf_start_dma);
13EXPORT_SYMBOL(DMAbuf_open_dma);
14EXPORT_SYMBOL(DMAbuf_close_dma);
15EXPORT_SYMBOL(DMAbuf_inputintr);
16EXPORT_SYMBOL(DMAbuf_outputintr);
diff --git a/sound/oss/awe_hw.h b/sound/oss/awe_hw.h
deleted file mode 100644
index ab00c3c67e4e..000000000000
--- a/sound/oss/awe_hw.h
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * sound/oss/awe_hw.h
3 *
4 * Access routines and definitions for the low level driver for the
5 * Creative AWE32/SB32/AWE64 wave table synth.
6 * version 0.4.4; Jan. 4, 2000
7 *
8 * Copyright (C) 1996-2000 Takashi Iwai
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#ifndef AWE_HW_H_DEF
26#define AWE_HW_H_DEF
27
28/*
29 * Emu-8000 control registers
30 * name(channel) reg, port
31 */
32
33#define awe_cmd_idx(reg,ch) (((reg)<< 5) | (ch))
34
35#define Data0 0 /* 0x620: doubleword r/w */
36#define Data1 1 /* 0xA20: doubleword r/w */
37#define Data2 2 /* 0xA22: word r/w */
38#define Data3 3 /* 0xE20: word r/w */
39#define Pointer 4 /* 0xE22 register pointer r/w */
40
41#define AWE_CPF(ch) awe_cmd_idx(0,ch), Data0 /* DW: current pitch and fractional address */
42#define AWE_PTRX(ch) awe_cmd_idx(1,ch), Data0 /* DW: pitch target and reverb send */
43#define AWE_CVCF(ch) awe_cmd_idx(2,ch), Data0 /* DW: current volume and filter cutoff */
44#define AWE_VTFT(ch) awe_cmd_idx(3,ch), Data0 /* DW: volume and filter cutoff targets */
45#define AWE_0080(ch) awe_cmd_idx(4,ch), Data0 /* DW: ?? */
46#define AWE_00A0(ch) awe_cmd_idx(5,ch), Data0 /* DW: ?? */
47#define AWE_PSST(ch) awe_cmd_idx(6,ch), Data0 /* DW: pan send and loop start address */
48#define AWE_CSL(ch) awe_cmd_idx(7,ch), Data0 /* DW: chorus send and loop end address */
49#define AWE_CCCA(ch) awe_cmd_idx(0,ch), Data1 /* DW: Q, control bits, and current address */
50#define AWE_HWCF4 awe_cmd_idx(1,9), Data1 /* DW: config dw 4 */
51#define AWE_HWCF5 awe_cmd_idx(1,10), Data1 /* DW: config dw 5 */
52#define AWE_HWCF6 awe_cmd_idx(1,13), Data1 /* DW: config dw 6 */
53#define AWE_HWCF7 awe_cmd_idx(1,14), Data1 /* DW: config dw 7? (not documented) */
54#define AWE_SMALR awe_cmd_idx(1,20), Data1 /* DW: sound memory address for left read */
55#define AWE_SMARR awe_cmd_idx(1,21), Data1 /* DW: for right read */
56#define AWE_SMALW awe_cmd_idx(1,22), Data1 /* DW: sound memory address for left write */
57#define AWE_SMARW awe_cmd_idx(1,23), Data1 /* DW: for right write */
58#define AWE_SMLD awe_cmd_idx(1,26), Data1 /* W: sound memory left data */
59#define AWE_SMRD awe_cmd_idx(1,26), Data2 /* W: right data */
60#define AWE_WC awe_cmd_idx(1,27), Data2 /* W: sample counter */
61#define AWE_WC_Cmd awe_cmd_idx(1,27)
62#define AWE_WC_Port Data2
63#define AWE_HWCF1 awe_cmd_idx(1,29), Data1 /* W: config w 1 */
64#define AWE_HWCF2 awe_cmd_idx(1,30), Data1 /* W: config w 2 */
65#define AWE_HWCF3 awe_cmd_idx(1,31), Data1 /* W: config w 3 */
66#define AWE_INIT1(ch) awe_cmd_idx(2,ch), Data1 /* W: init array 1 */
67#define AWE_INIT2(ch) awe_cmd_idx(2,ch), Data2 /* W: init array 2 */
68#define AWE_INIT3(ch) awe_cmd_idx(3,ch), Data1 /* W: init array 3 */
69#define AWE_INIT4(ch) awe_cmd_idx(3,ch), Data2 /* W: init array 4 */
70#define AWE_ENVVOL(ch) awe_cmd_idx(4,ch), Data1 /* W: volume envelope delay */
71#define AWE_DCYSUSV(ch) awe_cmd_idx(5,ch), Data1 /* W: volume envelope sustain and decay */
72#define AWE_ENVVAL(ch) awe_cmd_idx(6,ch), Data1 /* W: modulation envelope delay */
73#define AWE_DCYSUS(ch) awe_cmd_idx(7,ch), Data1 /* W: modulation envelope sustain and decay */
74#define AWE_ATKHLDV(ch) awe_cmd_idx(4,ch), Data2 /* W: volume envelope attack and hold */
75#define AWE_LFO1VAL(ch) awe_cmd_idx(5,ch), Data2 /* W: LFO#1 Delay */
76#define AWE_ATKHLD(ch) awe_cmd_idx(6,ch), Data2 /* W: modulation envelope attack and hold */
77#define AWE_LFO2VAL(ch) awe_cmd_idx(7,ch), Data2 /* W: LFO#2 Delay */
78#define AWE_IP(ch) awe_cmd_idx(0,ch), Data3 /* W: initial pitch */
79#define AWE_IFATN(ch) awe_cmd_idx(1,ch), Data3 /* W: initial filter cutoff and attenuation */
80#define AWE_PEFE(ch) awe_cmd_idx(2,ch), Data3 /* W: pitch and filter envelope heights */
81#define AWE_FMMOD(ch) awe_cmd_idx(3,ch), Data3 /* W: vibrato and filter modulation freq */
82#define AWE_TREMFRQ(ch) awe_cmd_idx(4,ch), Data3 /* W: LFO#1 tremolo amount and freq */
83#define AWE_FM2FRQ2(ch) awe_cmd_idx(5,ch), Data3 /* W: LFO#2 vibrato amount and freq */
84
85/* used during detection (returns ROM version?; not documented in ADIP) */
86#define AWE_U1 0xE0, Data3 /* (R)(W) used in initialization */
87#define AWE_U2(ch) 0xC0+(ch), Data3 /* (W)(W) used in init envelope */
88
89
90#define AWE_MAX_VOICES 32
91#define AWE_NORMAL_VOICES 30 /*30&31 are reserved for DRAM refresh*/
92
93#define AWE_MAX_CHANNELS 32 /* max midi channels (must >= voices) */
94#define AWE_MAX_LAYERS AWE_MAX_VOICES /* maximum number of multiple layers */
95
96#define AWE_DRAM_OFFSET 0x200000
97#define AWE_MAX_DRAM_SIZE (28 * 1024) /* 28 MB is max onboard memory */
98
99#endif
diff --git a/sound/oss/awe_wave.c b/sound/oss/awe_wave.c
deleted file mode 100644
index 1b968f7ecb3f..000000000000
--- a/sound/oss/awe_wave.c
+++ /dev/null
@@ -1,6148 +0,0 @@
1/*
2 * sound/oss/awe_wave.c
3 *
4 * The low level driver for the AWE32/SB32/AWE64 wave table synth.
5 * version 0.4.4; Jan. 4, 2000
6 *
7 * Copyright (C) 1996-2000 Takashi Iwai
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24/*
25 * Changelog:
26 * Aug 18, 2003, Adam Belay <ambx1@neo.rr.com>
27 * - detection code rewrite
28 */
29
30#include <linux/awe_voice.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/string.h>
34#include <linux/pnp.h>
35
36#include "sound_config.h"
37
38#include "awe_wave.h"
39#include "awe_hw.h"
40
41#ifdef AWE_HAS_GUS_COMPATIBILITY
42#include "tuning.h"
43#include <linux/ultrasound.h>
44#endif
45
46/*
47 * debug message
48 */
49
50#ifdef AWE_DEBUG_ON
51#define DEBUG(LVL,XXX) {if (ctrls[AWE_MD_DEBUG_MODE] > LVL) { XXX; }}
52#define ERRMSG(XXX) {if (ctrls[AWE_MD_DEBUG_MODE]) { XXX; }}
53#define FATALERR(XXX) XXX
54#else
55#define DEBUG(LVL,XXX) /**/
56#define ERRMSG(XXX) XXX
57#define FATALERR(XXX) XXX
58#endif
59
60/*
61 * bank and voice record
62 */
63
64typedef struct _sf_list sf_list;
65typedef struct _awe_voice_list awe_voice_list;
66typedef struct _awe_sample_list awe_sample_list;
67
68/* soundfont record */
69struct _sf_list {
70 unsigned short sf_id; /* id number */
71 unsigned short type; /* lock & shared flags */
72 int num_info; /* current info table index */
73 int num_sample; /* current sample table index */
74 int mem_ptr; /* current word byte pointer */
75 awe_voice_list *infos, *last_infos; /* instruments */
76 awe_sample_list *samples, *last_samples; /* samples */
77#ifdef AWE_ALLOW_SAMPLE_SHARING
78 sf_list *shared; /* shared list */
79 unsigned char name[AWE_PATCH_NAME_LEN]; /* sharing id */
80#endif
81 sf_list *next, *prev;
82};
83
84/* instrument list */
85struct _awe_voice_list {
86 awe_voice_info v; /* instrument information */
87 sf_list *holder; /* parent sf_list of this record */
88 unsigned char bank, instr; /* preset number information */
89 char type, disabled; /* type=normal/mapped, disabled=boolean */
90 awe_voice_list *next; /* linked list with same sf_id */
91 awe_voice_list *next_instr; /* instrument list */
92 awe_voice_list *next_bank; /* hash table list */
93};
94
95/* voice list type */
96#define V_ST_NORMAL 0
97#define V_ST_MAPPED 1
98
99/* sample list */
100struct _awe_sample_list {
101 awe_sample_info v; /* sample information */
102 sf_list *holder; /* parent sf_list of this record */
103 awe_sample_list *next; /* linked list with same sf_id */
104};
105
106/* sample and information table */
107static int current_sf_id; /* current number of fonts */
108static int locked_sf_id; /* locked position */
109static sf_list *sfhead, *sftail; /* linked-lists */
110
111#define awe_free_mem_ptr() (sftail ? sftail->mem_ptr : 0)
112#define awe_free_info() (sftail ? sftail->num_info : 0)
113#define awe_free_sample() (sftail ? sftail->num_sample : 0)
114
115#define AWE_MAX_PRESETS 256
116#define AWE_DEFAULT_PRESET 0
117#define AWE_DEFAULT_BANK 0
118#define AWE_DEFAULT_DRUM 0
119#define AWE_DRUM_BANK 128
120
121#define MAX_LAYERS AWE_MAX_VOICES
122
123/* preset table index */
124static awe_voice_list *preset_table[AWE_MAX_PRESETS];
125
126/*
127 * voice table
128 */
129
130/* effects table */
131typedef struct FX_Rec { /* channel effects */
132 unsigned char flags[AWE_FX_END];
133 short val[AWE_FX_END];
134} FX_Rec;
135
136
137/* channel parameters */
138typedef struct _awe_chan_info {
139 int channel; /* channel number */
140 int bank; /* current tone bank */
141 int instr; /* current program */
142 int bender; /* midi pitchbend (-8192 - 8192) */
143 int bender_range; /* midi bender range (x100) */
144 int panning; /* panning (0-127) */
145 int main_vol; /* channel volume (0-127) */
146 int expression_vol; /* midi expression (0-127) */
147 int chan_press; /* channel pressure */
148 int sustained; /* sustain status in MIDI */
149 FX_Rec fx; /* effects */
150 FX_Rec fx_layer[MAX_LAYERS]; /* layer effects */
151} awe_chan_info;
152
153/* voice parameters */
154typedef struct _voice_info {
155 int state;
156#define AWE_ST_OFF (1<<0) /* no sound */
157#define AWE_ST_ON (1<<1) /* playing */
158#define AWE_ST_STANDBY (1<<2) /* stand by for playing */
159#define AWE_ST_SUSTAINED (1<<3) /* sustained */
160#define AWE_ST_MARK (1<<4) /* marked for allocation */
161#define AWE_ST_DRAM (1<<5) /* DRAM read/write */
162#define AWE_ST_FM (1<<6) /* reserved for FM */
163#define AWE_ST_RELEASED (1<<7) /* released */
164
165 int ch; /* midi channel */
166 int key; /* internal key for search */
167 int layer; /* layer number (for channel mode only) */
168 int time; /* allocated time */
169 awe_chan_info *cinfo; /* channel info */
170
171 int note; /* midi key (0-127) */
172 int velocity; /* midi velocity (0-127) */
173 int sostenuto; /* sostenuto on/off */
174 awe_voice_info *sample; /* assigned voice */
175
176 /* EMU8000 parameters */
177 int apitch; /* pitch parameter */
178 int avol; /* volume parameter */
179 int apan; /* panning parameter */
180 int acutoff; /* cutoff parameter */
181 short aaux; /* aux word */
182} voice_info;
183
184/* voice information */
185static voice_info voices[AWE_MAX_VOICES];
186
187#define IS_NO_SOUND(v) (voices[v].state & (AWE_ST_OFF|AWE_ST_RELEASED|AWE_ST_STANDBY|AWE_ST_SUSTAINED))
188#define IS_NO_EFFECT(v) (voices[v].state != AWE_ST_ON)
189#define IS_PLAYING(v) (voices[v].state & (AWE_ST_ON|AWE_ST_SUSTAINED|AWE_ST_RELEASED))
190#define IS_EMPTY(v) (voices[v].state & (AWE_ST_OFF|AWE_ST_MARK|AWE_ST_DRAM|AWE_ST_FM))
191
192
193/* MIDI channel effects information (for hw control) */
194static awe_chan_info channels[AWE_MAX_CHANNELS];
195
196
197/*
198 * global variables
199 */
200
201#ifndef AWE_DEFAULT_BASE_ADDR
202#define AWE_DEFAULT_BASE_ADDR 0 /* autodetect */
203#endif
204
205#ifndef AWE_DEFAULT_MEM_SIZE
206#define AWE_DEFAULT_MEM_SIZE -1 /* autodetect */
207#endif
208
209static int io = AWE_DEFAULT_BASE_ADDR; /* Emu8000 base address */
210static int memsize = AWE_DEFAULT_MEM_SIZE; /* memory size in Kbytes */
211#ifdef CONFIG_PNP
212static int isapnp = -1;
213#else
214static int isapnp;
215#endif
216
217MODULE_AUTHOR("Takashi Iwai <iwai@ww.uni-erlangen.de>");
218MODULE_DESCRIPTION("SB AWE32/64 WaveTable driver");
219MODULE_LICENSE("GPL");
220
221module_param(io, int, 0);
222MODULE_PARM_DESC(io, "base i/o port of Emu8000");
223module_param(memsize, int, 0);
224MODULE_PARM_DESC(memsize, "onboard DRAM size in Kbytes");
225module_param(isapnp, bool, 0);
226MODULE_PARM_DESC(isapnp, "use ISAPnP detection");
227
228/* DRAM start offset */
229static int awe_mem_start = AWE_DRAM_OFFSET;
230
231/* maximum channels for playing */
232static int awe_max_voices = AWE_MAX_VOICES;
233
234static int patch_opened; /* sample already loaded? */
235
236static char atten_relative = FALSE;
237static short atten_offset;
238
239static int awe_present = FALSE; /* awe device present? */
240static int awe_busy = FALSE; /* awe device opened? */
241
242static int my_dev = -1;
243
244#define DEFAULT_DRUM_FLAGS ((1 << 9) | (1 << 25))
245#define IS_DRUM_CHANNEL(c) (drum_flags & (1 << (c)))
246#define DRUM_CHANNEL_ON(c) (drum_flags |= (1 << (c)))
247#define DRUM_CHANNEL_OFF(c) (drum_flags &= ~(1 << (c)))
248static unsigned int drum_flags = DEFAULT_DRUM_FLAGS; /* channel flags */
249
250static int playing_mode = AWE_PLAY_INDIRECT;
251#define SINGLE_LAYER_MODE() (playing_mode == AWE_PLAY_INDIRECT || playing_mode == AWE_PLAY_DIRECT)
252#define MULTI_LAYER_MODE() (playing_mode == AWE_PLAY_MULTI || playing_mode == AWE_PLAY_MULTI2)
253
254static int current_alloc_time; /* voice allocation index for channel mode */
255
256static struct synth_info awe_info = {
257 "AWE32 Synth", /* name */
258 0, /* device */
259 SYNTH_TYPE_SAMPLE, /* synth_type */
260 SAMPLE_TYPE_AWE32, /* synth_subtype */
261 0, /* perc_mode (obsolete) */
262 AWE_MAX_VOICES, /* nr_voices */
263 0, /* nr_drums (obsolete) */
264 400 /* instr_bank_size */
265};
266
267
268static struct voice_alloc_info *voice_alloc; /* set at initialization */
269
270
271/*
272 * function prototypes
273 */
274
275static int awe_request_region(void);
276static void awe_release_region(void);
277
278static void awe_reset_samples(void);
279/* emu8000 chip i/o access */
280static void setup_ports(int p1, int p2, int p3);
281static void awe_poke(unsigned short cmd, unsigned short port, unsigned short data);
282static void awe_poke_dw(unsigned short cmd, unsigned short port, unsigned int data);
283static unsigned short awe_peek(unsigned short cmd, unsigned short port);
284static unsigned int awe_peek_dw(unsigned short cmd, unsigned short port);
285static void awe_wait(unsigned short delay);
286
287/* initialize emu8000 chip */
288static void awe_initialize(void);
289
290/* set voice parameters */
291static void awe_init_ctrl_parms(int init_all);
292static void awe_init_voice_info(awe_voice_info *vp);
293static void awe_init_voice_parm(awe_voice_parm *pp);
294#ifdef AWE_HAS_GUS_COMPATIBILITY
295static int freq_to_note(int freq);
296static int calc_rate_offset(int Hz);
297/*static int calc_parm_delay(int msec);*/
298static int calc_parm_hold(int msec);
299static int calc_parm_attack(int msec);
300static int calc_parm_decay(int msec);
301static int calc_parm_search(int msec, short *table);
302#endif /* gus compat */
303
304/* turn on/off note */
305static void awe_note_on(int voice);
306static void awe_note_off(int voice);
307static void awe_terminate(int voice);
308static void awe_exclusive_off(int voice);
309static void awe_note_off_all(int do_sustain);
310
311/* calculate voice parameters */
312typedef void (*fx_affect_func)(int voice, int forced);
313static void awe_set_pitch(int voice, int forced);
314static void awe_set_voice_pitch(int voice, int forced);
315static void awe_set_volume(int voice, int forced);
316static void awe_set_voice_vol(int voice, int forced);
317static void awe_set_pan(int voice, int forced);
318static void awe_fx_fmmod(int voice, int forced);
319static void awe_fx_tremfrq(int voice, int forced);
320static void awe_fx_fm2frq2(int voice, int forced);
321static void awe_fx_filterQ(int voice, int forced);
322static void awe_calc_pitch(int voice);
323#ifdef AWE_HAS_GUS_COMPATIBILITY
324static void awe_calc_pitch_from_freq(int voice, int freq);
325#endif
326static void awe_calc_volume(int voice);
327static void awe_update_volume(void);
328static void awe_change_master_volume(short val);
329static void awe_voice_init(int voice, int init_all);
330static void awe_channel_init(int ch, int init_all);
331static void awe_fx_init(int ch);
332static void awe_send_effect(int voice, int layer, int type, int val);
333static void awe_modwheel_change(int voice, int value);
334
335/* sequencer interface */
336static int awe_open(int dev, int mode);
337static void awe_close(int dev);
338static int awe_ioctl(int dev, unsigned int cmd, void __user * arg);
339static int awe_kill_note(int dev, int voice, int note, int velocity);
340static int awe_start_note(int dev, int v, int note_num, int volume);
341static int awe_set_instr(int dev, int voice, int instr_no);
342static int awe_set_instr_2(int dev, int voice, int instr_no);
343static void awe_reset(int dev);
344static void awe_hw_control(int dev, unsigned char *event);
345static int awe_load_patch(int dev, int format, const char __user *addr,
346 int offs, int count, int pmgr_flag);
347static void awe_aftertouch(int dev, int voice, int pressure);
348static void awe_controller(int dev, int voice, int ctrl_num, int value);
349static void awe_panning(int dev, int voice, int value);
350static void awe_volume_method(int dev, int mode);
351static void awe_bender(int dev, int voice, int value);
352static int awe_alloc(int dev, int chn, int note, struct voice_alloc_info *alloc);
353static void awe_setup_voice(int dev, int voice, int chn);
354
355#define awe_key_pressure(dev,voice,key,press) awe_start_note(dev,voice,(key)+128,press)
356
357/* hardware controls */
358#ifdef AWE_HAS_GUS_COMPATIBILITY
359static void awe_hw_gus_control(int dev, int cmd, unsigned char *event);
360#endif
361static void awe_hw_awe_control(int dev, int cmd, unsigned char *event);
362static void awe_voice_change(int voice, fx_affect_func func);
363static void awe_sostenuto_on(int voice, int forced);
364static void awe_sustain_off(int voice, int forced);
365static void awe_terminate_and_init(int voice, int forced);
366
367/* voice search */
368static int awe_search_key(int bank, int preset, int note);
369static awe_voice_list *awe_search_instr(int bank, int preset, int note);
370static int awe_search_multi_voices(awe_voice_list *rec, int note, int velocity, awe_voice_info **vlist);
371static void awe_alloc_multi_voices(int ch, int note, int velocity, int key);
372static void awe_alloc_one_voice(int voice, int note, int velocity);
373static int awe_clear_voice(void);
374
375/* load / remove patches */
376static int awe_open_patch(awe_patch_info *patch, const char __user *addr, int count);
377static int awe_close_patch(awe_patch_info *patch, const char __user *addr, int count);
378static int awe_unload_patch(awe_patch_info *patch, const char __user *addr, int count);
379static int awe_load_info(awe_patch_info *patch, const char __user *addr, int count);
380static int awe_remove_info(awe_patch_info *patch, const char __user *addr, int count);
381static int awe_load_data(awe_patch_info *patch, const char __user *addr, int count);
382static int awe_replace_data(awe_patch_info *patch, const char __user *addr, int count);
383static int awe_load_map(awe_patch_info *patch, const char __user *addr, int count);
384#ifdef AWE_HAS_GUS_COMPATIBILITY
385static int awe_load_guspatch(const char __user *addr, int offs, int size, int pmgr_flag);
386#endif
387/*static int awe_probe_info(awe_patch_info *patch, const char __user *addr, int count);*/
388static int awe_probe_data(awe_patch_info *patch, const char __user *addr, int count);
389static sf_list *check_patch_opened(int type, char *name);
390static int awe_write_wave_data(const char __user *addr, int offset, awe_sample_list *sp, int channels);
391static int awe_create_sf(int type, char *name);
392static void awe_free_sf(sf_list *sf);
393static void add_sf_info(sf_list *sf, awe_voice_list *rec);
394static void add_sf_sample(sf_list *sf, awe_sample_list *smp);
395static void purge_old_list(awe_voice_list *rec, awe_voice_list *next);
396static void add_info_list(awe_voice_list *rec);
397static void awe_remove_samples(int sf_id);
398static void rebuild_preset_list(void);
399static short awe_set_sample(awe_voice_list *rec);
400static awe_sample_list *search_sample_index(sf_list *sf, int sample);
401
402static int is_identical_holder(sf_list *sf1, sf_list *sf2);
403#ifdef AWE_ALLOW_SAMPLE_SHARING
404static int is_identical_name(unsigned char *name, sf_list *p);
405static int is_shared_sf(unsigned char *name);
406static int info_duplicated(sf_list *sf, awe_voice_list *rec);
407#endif /* allow sharing */
408
409/* lowlevel functions */
410static void awe_init_audio(void);
411static void awe_init_dma(void);
412static void awe_init_array(void);
413static void awe_send_array(unsigned short *data);
414static void awe_tweak_voice(int voice);
415static void awe_tweak(void);
416static void awe_init_fm(void);
417static int awe_open_dram_for_write(int offset, int channels);
418static void awe_open_dram_for_check(void);
419static void awe_close_dram(void);
420/*static void awe_write_dram(unsigned short c);*/
421static int awe_detect_base(int addr);
422static int awe_detect(void);
423static void awe_check_dram(void);
424static int awe_load_chorus_fx(awe_patch_info *patch, const char __user *addr, int count);
425static void awe_set_chorus_mode(int mode);
426static void awe_update_chorus_mode(void);
427static int awe_load_reverb_fx(awe_patch_info *patch, const char __user *addr, int count);
428static void awe_set_reverb_mode(int mode);
429static void awe_update_reverb_mode(void);
430static void awe_equalizer(int bass, int treble);
431static void awe_update_equalizer(void);
432
433#ifdef CONFIG_AWE32_MIXER
434static void attach_mixer(void);
435static void unload_mixer(void);
436#endif
437
438#ifdef CONFIG_AWE32_MIDIEMU
439static void attach_midiemu(void);
440static void unload_midiemu(void);
441#endif
442
443#define limitvalue(x, a, b) if ((x) < (a)) (x) = (a); else if ((x) > (b)) (x) = (b)
444
445/*
446 * control parameters
447 */
448
449
450#ifdef AWE_USE_NEW_VOLUME_CALC
451#define DEF_VOLUME_CALC TRUE
452#else
453#define DEF_VOLUME_CALC FALSE
454#endif /* new volume */
455
456#define DEF_ZERO_ATTEN 32 /* 12dB below */
457#define DEF_MOD_SENSE 18
458#define DEF_CHORUS_MODE 2
459#define DEF_REVERB_MODE 4
460#define DEF_BASS_LEVEL 5
461#define DEF_TREBLE_LEVEL 9
462
463static struct CtrlParmsDef {
464 int value;
465 int init_each_time;
466 void (*update)(void);
467} ctrl_parms[AWE_MD_END] = {
468 {0,0, NULL}, {0,0, NULL}, /* <-- not used */
469 {AWE_VERSION_NUMBER, FALSE, NULL},
470 {TRUE, FALSE, NULL}, /* exclusive */
471 {TRUE, FALSE, NULL}, /* realpan */
472 {AWE_DEFAULT_BANK, FALSE, NULL}, /* gusbank */
473 {FALSE, TRUE, NULL}, /* keep effect */
474 {DEF_ZERO_ATTEN, FALSE, awe_update_volume}, /* zero_atten */
475 {FALSE, FALSE, NULL}, /* chn_prior */
476 {DEF_MOD_SENSE, FALSE, NULL}, /* modwheel sense */
477 {AWE_DEFAULT_PRESET, FALSE, NULL}, /* def_preset */
478 {AWE_DEFAULT_BANK, FALSE, NULL}, /* def_bank */
479 {AWE_DEFAULT_DRUM, FALSE, NULL}, /* def_drum */
480 {FALSE, FALSE, NULL}, /* toggle_drum_bank */
481 {DEF_VOLUME_CALC, FALSE, awe_update_volume}, /* new_volume_calc */
482 {DEF_CHORUS_MODE, FALSE, awe_update_chorus_mode}, /* chorus mode */
483 {DEF_REVERB_MODE, FALSE, awe_update_reverb_mode}, /* reverb mode */
484 {DEF_BASS_LEVEL, FALSE, awe_update_equalizer}, /* bass level */
485 {DEF_TREBLE_LEVEL, FALSE, awe_update_equalizer}, /* treble level */
486 {0, FALSE, NULL}, /* debug mode */
487 {FALSE, FALSE, NULL}, /* pan exchange */
488};
489
490static int ctrls[AWE_MD_END];
491
492
493/*
494 * synth operation table
495 */
496
497static struct synth_operations awe_operations =
498{
499 .owner = THIS_MODULE,
500 .id = "EMU8K",
501 .info = &awe_info,
502 .midi_dev = 0,
503 .synth_type = SYNTH_TYPE_SAMPLE,
504 .synth_subtype = SAMPLE_TYPE_AWE32,
505 .open = awe_open,
506 .close = awe_close,
507 .ioctl = awe_ioctl,
508 .kill_note = awe_kill_note,
509 .start_note = awe_start_note,
510 .set_instr = awe_set_instr_2,
511 .reset = awe_reset,
512 .hw_control = awe_hw_control,
513 .load_patch = awe_load_patch,
514 .aftertouch = awe_aftertouch,
515 .controller = awe_controller,
516 .panning = awe_panning,
517 .volume_method = awe_volume_method,
518 .bender = awe_bender,
519 .alloc_voice = awe_alloc,
520 .setup_voice = awe_setup_voice
521};
522
523static void free_tables(void)
524{
525 if (sftail) {
526 sf_list *p, *prev;
527 for (p = sftail; p; p = prev) {
528 prev = p->prev;
529 awe_free_sf(p);
530 }
531 }
532 sfhead = sftail = NULL;
533}
534
535/*
536 * clear sample tables
537 */
538
539static void
540awe_reset_samples(void)
541{
542 /* free all bank tables */
543 memset(preset_table, 0, sizeof(preset_table));
544 free_tables();
545
546 current_sf_id = 0;
547 locked_sf_id = 0;
548 patch_opened = 0;
549}
550
551
552/*
553 * EMU register access
554 */
555
556/* select a given AWE32 pointer */
557static int awe_ports[5];
558static int port_setuped = FALSE;
559static int awe_cur_cmd = -1;
560#define awe_set_cmd(cmd) \
561if (awe_cur_cmd != cmd) { outw(cmd, awe_ports[Pointer]); awe_cur_cmd = cmd; }
562
563/* write 16bit data */
564static void
565awe_poke(unsigned short cmd, unsigned short port, unsigned short data)
566{
567 awe_set_cmd(cmd);
568 outw(data, awe_ports[port]);
569}
570
571/* write 32bit data */
572static void
573awe_poke_dw(unsigned short cmd, unsigned short port, unsigned int data)
574{
575 unsigned short addr = awe_ports[port];
576 awe_set_cmd(cmd);
577 outw(data, addr); /* write lower 16 bits */
578 outw(data >> 16, addr + 2); /* write higher 16 bits */
579}
580
581/* read 16bit data */
582static unsigned short
583awe_peek(unsigned short cmd, unsigned short port)
584{
585 unsigned short k;
586 awe_set_cmd(cmd);
587 k = inw(awe_ports[port]);
588 return k;
589}
590
591/* read 32bit data */
592static unsigned int
593awe_peek_dw(unsigned short cmd, unsigned short port)
594{
595 unsigned int k1, k2;
596 unsigned short addr = awe_ports[port];
597 awe_set_cmd(cmd);
598 k1 = inw(addr);
599 k2 = inw(addr + 2);
600 k1 |= k2 << 16;
601 return k1;
602}
603
604/* wait delay number of AWE32 44100Hz clocks */
605#ifdef WAIT_BY_LOOP /* wait by loop -- that's not good.. */
606static void
607awe_wait(unsigned short delay)
608{
609 unsigned short clock, target;
610 unsigned short port = awe_ports[AWE_WC_Port];
611 int counter;
612
613 /* sample counter */
614 awe_set_cmd(AWE_WC_Cmd);
615 clock = (unsigned short)inw(port);
616 target = clock + delay;
617 counter = 0;
618 if (target < clock) {
619 for (; (unsigned short)inw(port) > target; counter++)
620 if (counter > 65536)
621 break;
622 }
623 for (; (unsigned short)inw(port) < target; counter++)
624 if (counter > 65536)
625 break;
626}
627#else
628
629static void awe_wait(unsigned short delay)
630{
631 current->state = TASK_INTERRUPTIBLE;
632 schedule_timeout((HZ*(unsigned long)delay + 44099)/44100);
633}
634/*
635static void awe_wait(unsigned short delay)
636{
637 udelay(((unsigned long)delay * 1000000L + 44099) / 44100);
638}
639*/
640#endif /* wait by loop */
641
642/* write a word data */
643#define awe_write_dram(c) awe_poke(AWE_SMLD, c)
644
645/*
646 * AWE32 voice parameters
647 */
648
649/* initialize voice_info record */
650static void
651awe_init_voice_info(awe_voice_info *vp)
652{
653 vp->sample = 0;
654 vp->rate_offset = 0;
655
656 vp->start = 0;
657 vp->end = 0;
658 vp->loopstart = 0;
659 vp->loopend = 0;
660 vp->mode = 0;
661 vp->root = 60;
662 vp->tune = 0;
663 vp->low = 0;
664 vp->high = 127;
665 vp->vellow = 0;
666 vp->velhigh = 127;
667
668 vp->fixkey = -1;
669 vp->fixvel = -1;
670 vp->fixpan = -1;
671 vp->pan = -1;
672
673 vp->exclusiveClass = 0;
674 vp->amplitude = 127;
675 vp->attenuation = 0;
676 vp->scaleTuning = 100;
677
678 awe_init_voice_parm(&vp->parm);
679}
680
681/* initialize voice_parm record:
682 * Env1/2: delay=0, attack=0, hold=0, sustain=0, decay=0, release=0.
683 * Vibrato and Tremolo effects are zero.
684 * Cutoff is maximum.
685 * Chorus and Reverb effects are zero.
686 */
687static void
688awe_init_voice_parm(awe_voice_parm *pp)
689{
690 pp->moddelay = 0x8000;
691 pp->modatkhld = 0x7f7f;
692 pp->moddcysus = 0x7f7f;
693 pp->modrelease = 0x807f;
694 pp->modkeyhold = 0;
695 pp->modkeydecay = 0;
696
697 pp->voldelay = 0x8000;
698 pp->volatkhld = 0x7f7f;
699 pp->voldcysus = 0x7f7f;
700 pp->volrelease = 0x807f;
701 pp->volkeyhold = 0;
702 pp->volkeydecay = 0;
703
704 pp->lfo1delay = 0x8000;
705 pp->lfo2delay = 0x8000;
706 pp->pefe = 0;
707
708 pp->fmmod = 0;
709 pp->tremfrq = 0;
710 pp->fm2frq2 = 0;
711
712 pp->cutoff = 0xff;
713 pp->filterQ = 0;
714
715 pp->chorus = 0;
716 pp->reverb = 0;
717}
718
719
720#ifdef AWE_HAS_GUS_COMPATIBILITY
721
722/* convert frequency mHz to abstract cents (= midi key * 100) */
723static int
724freq_to_note(int mHz)
725{
726 /* abscents = log(mHz/8176) / log(2) * 1200 */
727 unsigned int max_val = (unsigned int)0xffffffff / 10000;
728 int i, times;
729 unsigned int base;
730 unsigned int freq;
731 int note, tune;
732
733 if (mHz == 0)
734 return 0;
735 if (mHz < 0)
736 return 12799; /* maximum */
737
738 freq = mHz;
739 note = 0;
740 for (base = 8176 * 2; freq >= base; base *= 2) {
741 note += 12;
742 if (note >= 128) /* over maximum */
743 return 12799;
744 }
745 base /= 2;
746
747 /* to avoid overflow... */
748 times = 10000;
749 while (freq > max_val) {
750 max_val *= 10;
751 times /= 10;
752 base /= 10;
753 }
754
755 freq = freq * times / base;
756 for (i = 0; i < 12; i++) {
757 if (freq < semitone_tuning[i+1])
758 break;
759 note++;
760 }
761
762 tune = 0;
763 freq = freq * 10000 / semitone_tuning[i];
764 for (i = 0; i < 100; i++) {
765 if (freq < cent_tuning[i+1])
766 break;
767 tune++;
768 }
769
770 return note * 100 + tune;
771}
772
773
774/* convert Hz to AWE32 rate offset:
775 * sample pitch offset for the specified sample rate
776 * rate=44100 is no offset, each 4096 is 1 octave (twice).
777 * eg, when rate is 22050, this offset becomes -4096.
778 */
779static int
780calc_rate_offset(int Hz)
781{
782 /* offset = log(Hz / 44100) / log(2) * 4096 */
783 int freq, base, i;
784
785 /* maybe smaller than max (44100Hz) */
786 if (Hz <= 0 || Hz >= 44100) return 0;
787
788 base = 0;
789 for (freq = Hz * 2; freq < 44100; freq *= 2)
790 base++;
791 base *= 1200;
792
793 freq = 44100 * 10000 / (freq/2);
794 for (i = 0; i < 12; i++) {
795 if (freq < semitone_tuning[i+1])
796 break;
797 base += 100;
798 }
799 freq = freq * 10000 / semitone_tuning[i];
800 for (i = 0; i < 100; i++) {
801 if (freq < cent_tuning[i+1])
802 break;
803 base++;
804 }
805 return -base * 4096 / 1200;
806}
807
808
809/*
810 * convert envelope time parameter to AWE32 raw parameter
811 */
812
813/* attack & decay/release time table (msec) */
814static short attack_time_tbl[128] = {
81532767, 32767, 5989, 4235, 2994, 2518, 2117, 1780, 1497, 1373, 1259, 1154, 1058, 970, 890, 816,
816707, 691, 662, 634, 607, 581, 557, 533, 510, 489, 468, 448, 429, 411, 393, 377,
817361, 345, 331, 317, 303, 290, 278, 266, 255, 244, 234, 224, 214, 205, 196, 188,
818180, 172, 165, 158, 151, 145, 139, 133, 127, 122, 117, 112, 107, 102, 98, 94,
81990, 86, 82, 79, 75, 72, 69, 66, 63, 61, 58, 56, 53, 51, 49, 47,
82045, 43, 41, 39, 37, 36, 34, 33, 31, 30, 29, 28, 26, 25, 24, 23,
82122, 21, 20, 19, 19, 18, 17, 16, 16, 15, 15, 14, 13, 13, 12, 12,
82211, 11, 10, 10, 10, 9, 9, 8, 8, 8, 8, 7, 7, 7, 6, 0,
823};
824
825static short decay_time_tbl[128] = {
82632767, 32767, 22614, 15990, 11307, 9508, 7995, 6723, 5653, 5184, 4754, 4359, 3997, 3665, 3361, 3082,
8272828, 2765, 2648, 2535, 2428, 2325, 2226, 2132, 2042, 1955, 1872, 1793, 1717, 1644, 1574, 1507,
8281443, 1382, 1324, 1267, 1214, 1162, 1113, 1066, 978, 936, 897, 859, 822, 787, 754, 722,
829691, 662, 634, 607, 581, 557, 533, 510, 489, 468, 448, 429, 411, 393, 377, 361,
830345, 331, 317, 303, 290, 278, 266, 255, 244, 234, 224, 214, 205, 196, 188, 180,
831172, 165, 158, 151, 145, 139, 133, 127, 122, 117, 112, 107, 102, 98, 94, 90,
83286, 82, 79, 75, 72, 69, 66, 63, 61, 58, 56, 53, 51, 49, 47, 45,
83343, 41, 39, 37, 36, 34, 33, 31, 30, 29, 28, 26, 25, 24, 23, 22,
834};
835
836#define calc_parm_delay(msec) (0x8000 - (msec) * 1000 / 725);
837
838/* delay time = 0x8000 - msec/92 */
839static int
840calc_parm_hold(int msec)
841{
842 int val = (0x7f * 92 - msec) / 92;
843 if (val < 1) val = 1;
844 if (val > 127) val = 127;
845 return val;
846}
847
848/* attack time: search from time table */
849static int
850calc_parm_attack(int msec)
851{
852 return calc_parm_search(msec, attack_time_tbl);
853}
854
855/* decay/release time: search from time table */
856static int
857calc_parm_decay(int msec)
858{
859 return calc_parm_search(msec, decay_time_tbl);
860}
861
862/* search an index for specified time from given time table */
863static int
864calc_parm_search(int msec, short *table)
865{
866 int left = 1, right = 127, mid;
867 while (left < right) {
868 mid = (left + right) / 2;
869 if (msec < (int)table[mid])
870 left = mid + 1;
871 else
872 right = mid;
873 }
874 return left;
875}
876#endif /* AWE_HAS_GUS_COMPATIBILITY */
877
878
879/*
880 * effects table
881 */
882
883/* set an effect value */
884#define FX_FLAG_OFF 0
885#define FX_FLAG_SET 1
886#define FX_FLAG_ADD 2
887
888#define FX_SET(rec,type,value) \
889 ((rec)->flags[type] = FX_FLAG_SET, (rec)->val[type] = (value))
890#define FX_ADD(rec,type,value) \
891 ((rec)->flags[type] = FX_FLAG_ADD, (rec)->val[type] = (value))
892#define FX_UNSET(rec,type) \
893 ((rec)->flags[type] = FX_FLAG_OFF, (rec)->val[type] = 0)
894
895/* check the effect value is set */
896#define FX_ON(rec,type) ((rec)->flags[type])
897
898#define PARM_BYTE 0
899#define PARM_WORD 1
900#define PARM_SIGN 2
901
902static struct PARM_DEFS {
903 int type; /* byte or word */
904 int low, high; /* value range */
905 fx_affect_func realtime; /* realtime paramater change */
906} parm_defs[] = {
907 {PARM_WORD, 0, 0x8000, NULL}, /* env1 delay */
908 {PARM_BYTE, 1, 0x7f, NULL}, /* env1 attack */
909 {PARM_BYTE, 0, 0x7e, NULL}, /* env1 hold */
910 {PARM_BYTE, 1, 0x7f, NULL}, /* env1 decay */
911 {PARM_BYTE, 1, 0x7f, NULL}, /* env1 release */
912 {PARM_BYTE, 0, 0x7f, NULL}, /* env1 sustain */
913 {PARM_BYTE, 0, 0xff, NULL}, /* env1 pitch */
914 {PARM_BYTE, 0, 0xff, NULL}, /* env1 cutoff */
915
916 {PARM_WORD, 0, 0x8000, NULL}, /* env2 delay */
917 {PARM_BYTE, 1, 0x7f, NULL}, /* env2 attack */
918 {PARM_BYTE, 0, 0x7e, NULL}, /* env2 hold */
919 {PARM_BYTE, 1, 0x7f, NULL}, /* env2 decay */
920 {PARM_BYTE, 1, 0x7f, NULL}, /* env2 release */
921 {PARM_BYTE, 0, 0x7f, NULL}, /* env2 sustain */
922
923 {PARM_WORD, 0, 0x8000, NULL}, /* lfo1 delay */
924 {PARM_BYTE, 0, 0xff, awe_fx_tremfrq}, /* lfo1 freq */
925 {PARM_SIGN, -128, 127, awe_fx_tremfrq}, /* lfo1 volume */
926 {PARM_SIGN, -128, 127, awe_fx_fmmod}, /* lfo1 pitch */
927 {PARM_BYTE, 0, 0xff, awe_fx_fmmod}, /* lfo1 cutoff */
928
929 {PARM_WORD, 0, 0x8000, NULL}, /* lfo2 delay */
930 {PARM_BYTE, 0, 0xff, awe_fx_fm2frq2}, /* lfo2 freq */
931 {PARM_SIGN, -128, 127, awe_fx_fm2frq2}, /* lfo2 pitch */
932
933 {PARM_WORD, 0, 0xffff, awe_set_voice_pitch}, /* initial pitch */
934 {PARM_BYTE, 0, 0xff, NULL}, /* chorus */
935 {PARM_BYTE, 0, 0xff, NULL}, /* reverb */
936 {PARM_BYTE, 0, 0xff, awe_set_volume}, /* initial cutoff */
937 {PARM_BYTE, 0, 15, awe_fx_filterQ}, /* initial resonance */
938
939 {PARM_WORD, 0, 0xffff, NULL}, /* sample start */
940 {PARM_WORD, 0, 0xffff, NULL}, /* loop start */
941 {PARM_WORD, 0, 0xffff, NULL}, /* loop end */
942 {PARM_WORD, 0, 0xffff, NULL}, /* coarse sample start */
943 {PARM_WORD, 0, 0xffff, NULL}, /* coarse loop start */
944 {PARM_WORD, 0, 0xffff, NULL}, /* coarse loop end */
945 {PARM_BYTE, 0, 0xff, awe_set_volume}, /* initial attenuation */
946};
947
948
949static unsigned char
950FX_BYTE(FX_Rec *rec, FX_Rec *lay, int type, unsigned char value)
951{
952 int effect = 0;
953 int on = 0;
954 if (lay && (on = FX_ON(lay, type)) != 0)
955 effect = lay->val[type];
956 if (!on && (on = FX_ON(rec, type)) != 0)
957 effect = rec->val[type];
958 if (on == FX_FLAG_ADD) {
959 if (parm_defs[type].type == PARM_SIGN) {
960 if (value > 0x7f)
961 effect += (int)value - 0x100;
962 else
963 effect += (int)value;
964 } else {
965 effect += (int)value;
966 }
967 }
968 if (on) {
969 if (effect < parm_defs[type].low)
970 effect = parm_defs[type].low;
971 else if (effect > parm_defs[type].high)
972 effect = parm_defs[type].high;
973 return (unsigned char)effect;
974 }
975 return value;
976}
977
978/* get word effect value */
979static unsigned short
980FX_WORD(FX_Rec *rec, FX_Rec *lay, int type, unsigned short value)
981{
982 int effect = 0;
983 int on = 0;
984 if (lay && (on = FX_ON(lay, type)) != 0)
985 effect = lay->val[type];
986 if (!on && (on = FX_ON(rec, type)) != 0)
987 effect = rec->val[type];
988 if (on == FX_FLAG_ADD)
989 effect += (int)value;
990 if (on) {
991 if (effect < parm_defs[type].low)
992 effect = parm_defs[type].low;
993 else if (effect > parm_defs[type].high)
994 effect = parm_defs[type].high;
995 return (unsigned short)effect;
996 }
997 return value;
998}
999
1000/* get word (upper=type1/lower=type2) effect value */
1001static unsigned short
1002FX_COMB(FX_Rec *rec, FX_Rec *lay, int type1, int type2, unsigned short value)
1003{
1004 unsigned short tmp;
1005 tmp = FX_BYTE(rec, lay, type1, (unsigned char)(value >> 8));
1006 tmp <<= 8;
1007 tmp |= FX_BYTE(rec, lay, type2, (unsigned char)(value & 0xff));
1008 return tmp;
1009}
1010
1011/* address offset */
1012static int
1013FX_OFFSET(FX_Rec *rec, FX_Rec *lay, int lo, int hi, int mode)
1014{
1015 int addr = 0;
1016 if (lay && FX_ON(lay, hi))
1017 addr = (short)lay->val[hi];
1018 else if (FX_ON(rec, hi))
1019 addr = (short)rec->val[hi];
1020 addr = addr << 15;
1021 if (lay && FX_ON(lay, lo))
1022 addr += (short)lay->val[lo];
1023 else if (FX_ON(rec, lo))
1024 addr += (short)rec->val[lo];
1025 if (!(mode & AWE_SAMPLE_8BITS))
1026 addr /= 2;
1027 return addr;
1028}
1029
1030
1031/*
1032 * turn on/off sample
1033 */
1034
1035/* table for volume target calculation */
1036static unsigned short voltarget[16] = {
1037 0xEAC0, 0XE0C8, 0XD740, 0XCE20, 0XC560, 0XBD08, 0XB500, 0XAD58,
1038 0XA5F8, 0X9EF0, 0X9830, 0X91C0, 0X8B90, 0X85A8, 0X8000, 0X7A90
1039};
1040
1041static void
1042awe_note_on(int voice)
1043{
1044 unsigned int temp;
1045 int addr;
1046 int vtarget, ftarget, ptarget, pitch;
1047 awe_voice_info *vp;
1048 awe_voice_parm_block *parm;
1049 FX_Rec *fx = &voices[voice].cinfo->fx;
1050 FX_Rec *fx_lay = NULL;
1051 if (voices[voice].layer < MAX_LAYERS)
1052 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1053
1054 /* A voice sample must assigned before calling */
1055 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1056 return;
1057
1058 parm = (awe_voice_parm_block*)&vp->parm;
1059
1060 /* channel to be silent and idle */
1061 awe_poke(AWE_DCYSUSV(voice), 0x0080);
1062 awe_poke(AWE_VTFT(voice), 0x0000FFFF);
1063 awe_poke(AWE_CVCF(voice), 0x0000FFFF);
1064 awe_poke(AWE_PTRX(voice), 0);
1065 awe_poke(AWE_CPF(voice), 0);
1066
1067 /* set pitch offset */
1068 awe_set_pitch(voice, TRUE);
1069
1070 /* modulation & volume envelope */
1071 if (parm->modatk >= 0x80 && parm->moddelay >= 0x8000) {
1072 awe_poke(AWE_ENVVAL(voice), 0xBFFF);
1073 pitch = (parm->env1pit<<4) + voices[voice].apitch;
1074 if (pitch > 0xffff) pitch = 0xffff;
1075 /* calculate filter target */
1076 ftarget = parm->cutoff + parm->env1fc;
1077 limitvalue(ftarget, 0, 255);
1078 ftarget <<= 8;
1079 } else {
1080 awe_poke(AWE_ENVVAL(voice),
1081 FX_WORD(fx, fx_lay, AWE_FX_ENV1_DELAY, parm->moddelay));
1082 ftarget = parm->cutoff;
1083 ftarget <<= 8;
1084 pitch = voices[voice].apitch;
1085 }
1086
1087 /* calcualte pitch target */
1088 if (pitch != 0xffff) {
1089 ptarget = 1 << (pitch >> 12);
1090 if (pitch & 0x800) ptarget += (ptarget*0x102e)/0x2710;
1091 if (pitch & 0x400) ptarget += (ptarget*0x764)/0x2710;
1092 if (pitch & 0x200) ptarget += (ptarget*0x389)/0x2710;
1093 ptarget += (ptarget>>1);
1094 if (ptarget > 0xffff) ptarget = 0xffff;
1095
1096 } else ptarget = 0xffff;
1097 if (parm->modatk >= 0x80)
1098 awe_poke(AWE_ATKHLD(voice),
1099 FX_BYTE(fx, fx_lay, AWE_FX_ENV1_HOLD, parm->modhld) << 8 | 0x7f);
1100 else
1101 awe_poke(AWE_ATKHLD(voice),
1102 FX_COMB(fx, fx_lay, AWE_FX_ENV1_HOLD, AWE_FX_ENV1_ATTACK,
1103 vp->parm.modatkhld));
1104 awe_poke(AWE_DCYSUS(voice),
1105 FX_COMB(fx, fx_lay, AWE_FX_ENV1_SUSTAIN, AWE_FX_ENV1_DECAY,
1106 vp->parm.moddcysus));
1107
1108 if (parm->volatk >= 0x80 && parm->voldelay >= 0x8000) {
1109 awe_poke(AWE_ENVVOL(voice), 0xBFFF);
1110 vtarget = voltarget[voices[voice].avol%0x10]>>(voices[voice].avol>>4);
1111 } else {
1112 awe_poke(AWE_ENVVOL(voice),
1113 FX_WORD(fx, fx_lay, AWE_FX_ENV2_DELAY, vp->parm.voldelay));
1114 vtarget = 0;
1115 }
1116 if (parm->volatk >= 0x80)
1117 awe_poke(AWE_ATKHLDV(voice),
1118 FX_BYTE(fx, fx_lay, AWE_FX_ENV2_HOLD, parm->volhld) << 8 | 0x7f);
1119 else
1120 awe_poke(AWE_ATKHLDV(voice),
1121 FX_COMB(fx, fx_lay, AWE_FX_ENV2_HOLD, AWE_FX_ENV2_ATTACK,
1122 vp->parm.volatkhld));
1123 /* decay/sustain parameter for volume envelope must be set at last */
1124
1125 /* cutoff and volume */
1126 awe_set_volume(voice, TRUE);
1127
1128 /* modulation envelope heights */
1129 awe_poke(AWE_PEFE(voice),
1130 FX_COMB(fx, fx_lay, AWE_FX_ENV1_PITCH, AWE_FX_ENV1_CUTOFF,
1131 vp->parm.pefe));
1132
1133 /* lfo1/2 delay */
1134 awe_poke(AWE_LFO1VAL(voice),
1135 FX_WORD(fx, fx_lay, AWE_FX_LFO1_DELAY, vp->parm.lfo1delay));
1136 awe_poke(AWE_LFO2VAL(voice),
1137 FX_WORD(fx, fx_lay, AWE_FX_LFO2_DELAY, vp->parm.lfo2delay));
1138
1139 /* lfo1 pitch & cutoff shift */
1140 awe_fx_fmmod(voice, TRUE);
1141 /* lfo1 volume & freq */
1142 awe_fx_tremfrq(voice, TRUE);
1143 /* lfo2 pitch & freq */
1144 awe_fx_fm2frq2(voice, TRUE);
1145 /* pan & loop start */
1146 awe_set_pan(voice, TRUE);
1147
1148 /* chorus & loop end (chorus 8bit, MSB) */
1149 addr = vp->loopend - 1;
1150 addr += FX_OFFSET(fx, fx_lay, AWE_FX_LOOP_END,
1151 AWE_FX_COARSE_LOOP_END, vp->mode);
1152 temp = FX_BYTE(fx, fx_lay, AWE_FX_CHORUS, vp->parm.chorus);
1153 temp = (temp <<24) | (unsigned int)addr;
1154 awe_poke_dw(AWE_CSL(voice), temp);
1155 DEBUG(4,printk("AWE32: [-- loopend=%x/%x]\n", vp->loopend, addr));
1156
1157 /* Q & current address (Q 4bit value, MSB) */
1158 addr = vp->start - 1;
1159 addr += FX_OFFSET(fx, fx_lay, AWE_FX_SAMPLE_START,
1160 AWE_FX_COARSE_SAMPLE_START, vp->mode);
1161 temp = FX_BYTE(fx, fx_lay, AWE_FX_FILTERQ, vp->parm.filterQ);
1162 temp = (temp<<28) | (unsigned int)addr;
1163 awe_poke_dw(AWE_CCCA(voice), temp);
1164 DEBUG(4,printk("AWE32: [-- startaddr=%x/%x]\n", vp->start, addr));
1165
1166 /* clear unknown registers */
1167 awe_poke_dw(AWE_00A0(voice), 0);
1168 awe_poke_dw(AWE_0080(voice), 0);
1169
1170 /* reset volume */
1171 awe_poke_dw(AWE_VTFT(voice), (vtarget<<16)|ftarget);
1172 awe_poke_dw(AWE_CVCF(voice), (vtarget<<16)|ftarget);
1173
1174 /* set reverb */
1175 temp = FX_BYTE(fx, fx_lay, AWE_FX_REVERB, vp->parm.reverb);
1176 temp = (temp << 8) | (ptarget << 16) | voices[voice].aaux;
1177 awe_poke_dw(AWE_PTRX(voice), temp);
1178 awe_poke_dw(AWE_CPF(voice), ptarget << 16);
1179 /* turn on envelope */
1180 awe_poke(AWE_DCYSUSV(voice),
1181 FX_COMB(fx, fx_lay, AWE_FX_ENV2_SUSTAIN, AWE_FX_ENV2_DECAY,
1182 vp->parm.voldcysus));
1183
1184 voices[voice].state = AWE_ST_ON;
1185
1186 /* clear voice position for the next note on this channel */
1187 if (SINGLE_LAYER_MODE()) {
1188 FX_UNSET(fx, AWE_FX_SAMPLE_START);
1189 FX_UNSET(fx, AWE_FX_COARSE_SAMPLE_START);
1190 }
1191}
1192
1193
1194/* turn off the voice */
1195static void
1196awe_note_off(int voice)
1197{
1198 awe_voice_info *vp;
1199 unsigned short tmp;
1200 FX_Rec *fx = &voices[voice].cinfo->fx;
1201 FX_Rec *fx_lay = NULL;
1202 if (voices[voice].layer < MAX_LAYERS)
1203 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1204
1205 if ((vp = voices[voice].sample) == NULL) {
1206 voices[voice].state = AWE_ST_OFF;
1207 return;
1208 }
1209
1210 tmp = 0x8000 | FX_BYTE(fx, fx_lay, AWE_FX_ENV1_RELEASE,
1211 (unsigned char)vp->parm.modrelease);
1212 awe_poke(AWE_DCYSUS(voice), tmp);
1213 tmp = 0x8000 | FX_BYTE(fx, fx_lay, AWE_FX_ENV2_RELEASE,
1214 (unsigned char)vp->parm.volrelease);
1215 awe_poke(AWE_DCYSUSV(voice), tmp);
1216 voices[voice].state = AWE_ST_RELEASED;
1217}
1218
1219/* force to terminate the voice (no releasing echo) */
1220static void
1221awe_terminate(int voice)
1222{
1223 awe_poke(AWE_DCYSUSV(voice), 0x807F);
1224 awe_tweak_voice(voice);
1225 voices[voice].state = AWE_ST_OFF;
1226}
1227
1228/* turn off other voices with the same exclusive class (for drums) */
1229static void
1230awe_exclusive_off(int voice)
1231{
1232 int i, exclass;
1233
1234 if (voices[voice].sample == NULL)
1235 return;
1236 if ((exclass = voices[voice].sample->exclusiveClass) == 0)
1237 return; /* not exclusive */
1238
1239 /* turn off voices with the same class */
1240 for (i = 0; i < awe_max_voices; i++) {
1241 if (i != voice && IS_PLAYING(i) &&
1242 voices[i].sample && voices[i].ch == voices[voice].ch &&
1243 voices[i].sample->exclusiveClass == exclass) {
1244 DEBUG(4,printk("AWE32: [exoff(%d)]\n", i));
1245 awe_terminate(i);
1246 awe_voice_init(i, TRUE);
1247 }
1248 }
1249}
1250
1251
1252/*
1253 * change the parameters of an audible voice
1254 */
1255
1256/* change pitch */
1257static void
1258awe_set_pitch(int voice, int forced)
1259{
1260 if (IS_NO_EFFECT(voice) && !forced) return;
1261 awe_poke(AWE_IP(voice), voices[voice].apitch);
1262 DEBUG(3,printk("AWE32: [-- pitch=%x]\n", voices[voice].apitch));
1263}
1264
1265/* calculate & change pitch */
1266static void
1267awe_set_voice_pitch(int voice, int forced)
1268{
1269 awe_calc_pitch(voice);
1270 awe_set_pitch(voice, forced);
1271}
1272
1273/* change volume & cutoff */
1274static void
1275awe_set_volume(int voice, int forced)
1276{
1277 awe_voice_info *vp;
1278 unsigned short tmp2;
1279 FX_Rec *fx = &voices[voice].cinfo->fx;
1280 FX_Rec *fx_lay = NULL;
1281 if (voices[voice].layer < MAX_LAYERS)
1282 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1283
1284 if (!IS_PLAYING(voice) && !forced) return;
1285 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1286 return;
1287
1288 tmp2 = FX_BYTE(fx, fx_lay, AWE_FX_CUTOFF,
1289 (unsigned char)voices[voice].acutoff);
1290 tmp2 = (tmp2 << 8);
1291 tmp2 |= FX_BYTE(fx, fx_lay, AWE_FX_ATTEN,
1292 (unsigned char)voices[voice].avol);
1293 awe_poke(AWE_IFATN(voice), tmp2);
1294}
1295
1296/* calculate & change volume */
1297static void
1298awe_set_voice_vol(int voice, int forced)
1299{
1300 if (IS_EMPTY(voice))
1301 return;
1302 awe_calc_volume(voice);
1303 awe_set_volume(voice, forced);
1304}
1305
1306
1307/* change pan; this could make a click noise.. */
1308static void
1309awe_set_pan(int voice, int forced)
1310{
1311 unsigned int temp;
1312 int addr;
1313 awe_voice_info *vp;
1314 FX_Rec *fx = &voices[voice].cinfo->fx;
1315 FX_Rec *fx_lay = NULL;
1316 if (voices[voice].layer < MAX_LAYERS)
1317 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1318
1319 if (IS_NO_EFFECT(voice) && !forced) return;
1320 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1321 return;
1322
1323 /* pan & loop start (pan 8bit, MSB, 0:right, 0xff:left) */
1324 if (vp->fixpan > 0) /* 0-127 */
1325 temp = 255 - (int)vp->fixpan * 2;
1326 else {
1327 int pos = 0;
1328 if (vp->pan >= 0) /* 0-127 */
1329 pos = (int)vp->pan * 2 - 128;
1330 pos += voices[voice].cinfo->panning; /* -128 - 127 */
1331 temp = 127 - pos;
1332 }
1333 limitvalue(temp, 0, 255);
1334 if (ctrls[AWE_MD_PAN_EXCHANGE]) {
1335 temp = 255 - temp;
1336 }
1337 if (forced || temp != voices[voice].apan) {
1338 voices[voice].apan = temp;
1339 if (temp == 0)
1340 voices[voice].aaux = 0xff;
1341 else
1342 voices[voice].aaux = (-temp) & 0xff;
1343 addr = vp->loopstart - 1;
1344 addr += FX_OFFSET(fx, fx_lay, AWE_FX_LOOP_START,
1345 AWE_FX_COARSE_LOOP_START, vp->mode);
1346 temp = (temp<<24) | (unsigned int)addr;
1347 awe_poke_dw(AWE_PSST(voice), temp);
1348 DEBUG(4,printk("AWE32: [-- loopstart=%x/%x]\n", vp->loopstart, addr));
1349 }
1350}
1351
1352/* effects change during playing */
1353static void
1354awe_fx_fmmod(int voice, int forced)
1355{
1356 awe_voice_info *vp;
1357 FX_Rec *fx = &voices[voice].cinfo->fx;
1358 FX_Rec *fx_lay = NULL;
1359 if (voices[voice].layer < MAX_LAYERS)
1360 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1361
1362 if (IS_NO_EFFECT(voice) && !forced) return;
1363 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1364 return;
1365 awe_poke(AWE_FMMOD(voice),
1366 FX_COMB(fx, fx_lay, AWE_FX_LFO1_PITCH, AWE_FX_LFO1_CUTOFF,
1367 vp->parm.fmmod));
1368}
1369
1370/* set tremolo (lfo1) volume & frequency */
1371static void
1372awe_fx_tremfrq(int voice, int forced)
1373{
1374 awe_voice_info *vp;
1375 FX_Rec *fx = &voices[voice].cinfo->fx;
1376 FX_Rec *fx_lay = NULL;
1377 if (voices[voice].layer < MAX_LAYERS)
1378 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1379
1380 if (IS_NO_EFFECT(voice) && !forced) return;
1381 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1382 return;
1383 awe_poke(AWE_TREMFRQ(voice),
1384 FX_COMB(fx, fx_lay, AWE_FX_LFO1_VOLUME, AWE_FX_LFO1_FREQ,
1385 vp->parm.tremfrq));
1386}
1387
1388/* set lfo2 pitch & frequency */
1389static void
1390awe_fx_fm2frq2(int voice, int forced)
1391{
1392 awe_voice_info *vp;
1393 FX_Rec *fx = &voices[voice].cinfo->fx;
1394 FX_Rec *fx_lay = NULL;
1395 if (voices[voice].layer < MAX_LAYERS)
1396 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1397
1398 if (IS_NO_EFFECT(voice) && !forced) return;
1399 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1400 return;
1401 awe_poke(AWE_FM2FRQ2(voice),
1402 FX_COMB(fx, fx_lay, AWE_FX_LFO2_PITCH, AWE_FX_LFO2_FREQ,
1403 vp->parm.fm2frq2));
1404}
1405
1406
1407/* Q & current address (Q 4bit value, MSB) */
1408static void
1409awe_fx_filterQ(int voice, int forced)
1410{
1411 unsigned int addr;
1412 awe_voice_info *vp;
1413 FX_Rec *fx = &voices[voice].cinfo->fx;
1414 FX_Rec *fx_lay = NULL;
1415 if (voices[voice].layer < MAX_LAYERS)
1416 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1417
1418 if (IS_NO_EFFECT(voice) && !forced) return;
1419 if ((vp = voices[voice].sample) == NULL || vp->index == 0)
1420 return;
1421
1422 addr = awe_peek_dw(AWE_CCCA(voice)) & 0xffffff;
1423 addr |= (FX_BYTE(fx, fx_lay, AWE_FX_FILTERQ, vp->parm.filterQ) << 28);
1424 awe_poke_dw(AWE_CCCA(voice), addr);
1425}
1426
1427/*
1428 * calculate pitch offset
1429 *
1430 * 0xE000 is no pitch offset at 44100Hz sample.
1431 * Every 4096 is one octave.
1432 */
1433
1434static void
1435awe_calc_pitch(int voice)
1436{
1437 voice_info *vp = &voices[voice];
1438 awe_voice_info *ap;
1439 awe_chan_info *cp = voices[voice].cinfo;
1440 int offset;
1441
1442 /* search voice information */
1443 if ((ap = vp->sample) == NULL)
1444 return;
1445 if (ap->index == 0) {
1446 DEBUG(3,printk("AWE32: set sample (%d)\n", ap->sample));
1447 if (awe_set_sample((awe_voice_list*)ap) == 0)
1448 return;
1449 }
1450
1451 /* calculate offset */
1452 if (ap->fixkey >= 0) {
1453 DEBUG(3,printk("AWE32: p-> fixkey(%d) tune(%d)\n", ap->fixkey, ap->tune));
1454 offset = (ap->fixkey - ap->root) * 4096 / 12;
1455 } else {
1456 DEBUG(3,printk("AWE32: p(%d)-> root(%d) tune(%d)\n", vp->note, ap->root, ap->tune));
1457 offset = (vp->note - ap->root) * 4096 / 12;
1458 DEBUG(4,printk("AWE32: p-> ofs=%d\n", offset));
1459 }
1460 offset = (offset * ap->scaleTuning) / 100;
1461 DEBUG(4,printk("AWE32: p-> scale* ofs=%d\n", offset));
1462 offset += ap->tune * 4096 / 1200;
1463 DEBUG(4,printk("AWE32: p-> tune+ ofs=%d\n", offset));
1464 if (cp->bender != 0) {
1465 DEBUG(3,printk("AWE32: p-> bend(%d) %d\n", voice, cp->bender));
1466 /* (819200: 1 semitone) ==> (4096: 12 semitones) */
1467 offset += cp->bender * cp->bender_range / 2400;
1468 }
1469
1470 /* add initial pitch correction */
1471 if (FX_ON(&cp->fx_layer[vp->layer], AWE_FX_INIT_PITCH))
1472 offset += cp->fx_layer[vp->layer].val[AWE_FX_INIT_PITCH];
1473 else if (FX_ON(&cp->fx, AWE_FX_INIT_PITCH))
1474 offset += cp->fx.val[AWE_FX_INIT_PITCH];
1475
1476 /* 0xe000: root pitch */
1477 vp->apitch = 0xe000 + ap->rate_offset + offset;
1478 DEBUG(4,printk("AWE32: p-> sum aofs=%x, rate_ofs=%d\n", vp->apitch, ap->rate_offset));
1479 if (vp->apitch > 0xffff)
1480 vp->apitch = 0xffff;
1481 if (vp->apitch < 0)
1482 vp->apitch = 0;
1483}
1484
1485
1486#ifdef AWE_HAS_GUS_COMPATIBILITY
1487/* calculate MIDI key and semitone from the specified frequency */
1488static void
1489awe_calc_pitch_from_freq(int voice, int freq)
1490{
1491 voice_info *vp = &voices[voice];
1492 awe_voice_info *ap;
1493 FX_Rec *fx = &voices[voice].cinfo->fx;
1494 FX_Rec *fx_lay = NULL;
1495 int offset;
1496 int note;
1497
1498 if (voices[voice].layer < MAX_LAYERS)
1499 fx_lay = &voices[voice].cinfo->fx_layer[voices[voice].layer];
1500
1501 /* search voice information */
1502 if ((ap = vp->sample) == NULL)
1503 return;
1504 if (ap->index == 0) {
1505 DEBUG(3,printk("AWE32: set sample (%d)\n", ap->sample));
1506 if (awe_set_sample((awe_voice_list*)ap) == 0)
1507 return;
1508 }
1509 note = freq_to_note(freq);
1510 offset = (note - ap->root * 100 + ap->tune) * 4096 / 1200;
1511 offset = (offset * ap->scaleTuning) / 100;
1512 if (fx_lay && FX_ON(fx_lay, AWE_FX_INIT_PITCH))
1513 offset += fx_lay->val[AWE_FX_INIT_PITCH];
1514 else if (FX_ON(fx, AWE_FX_INIT_PITCH))
1515 offset += fx->val[AWE_FX_INIT_PITCH];
1516 vp->apitch = 0xe000 + ap->rate_offset + offset;
1517 if (vp->apitch > 0xffff)
1518 vp->apitch = 0xffff;
1519 if (vp->apitch < 0)
1520 vp->apitch = 0;
1521}
1522#endif /* AWE_HAS_GUS_COMPATIBILITY */
1523
1524
1525/*
1526 * calculate volume attenuation
1527 *
1528 * Voice volume is controlled by volume attenuation parameter.
1529 * So volume becomes maximum when avol is 0 (no attenuation), and
1530 * minimum when 255 (-96dB or silence).
1531 */
1532
1533static int vol_table[128] = {
1534 255,111,95,86,79,74,70,66,63,61,58,56,54,52,50,49,
1535 47,46,45,43,42,41,40,39,38,37,36,35,34,34,33,32,
1536 31,31,30,29,29,28,27,27,26,26,25,24,24,23,23,22,
1537 22,21,21,21,20,20,19,19,18,18,18,17,17,16,16,16,
1538 15,15,15,14,14,14,13,13,13,12,12,12,11,11,11,10,
1539 10,10,10,9,9,9,8,8,8,8,7,7,7,7,6,6,
1540 6,6,5,5,5,5,5,4,4,4,4,3,3,3,3,3,
1541 2,2,2,2,2,1,1,1,1,1,0,0,0,0,0,0,
1542};
1543
1544/* tables for volume->attenuation calculation */
1545static unsigned char voltab1[128] = {
1546 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
1547 0x63, 0x2b, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22,
1548 0x21, 0x20, 0x1f, 0x1e, 0x1e, 0x1d, 0x1c, 0x1b, 0x1b, 0x1a,
1549 0x19, 0x19, 0x18, 0x17, 0x17, 0x16, 0x16, 0x15, 0x15, 0x14,
1550 0x14, 0x13, 0x13, 0x13, 0x12, 0x12, 0x11, 0x11, 0x11, 0x10,
1551 0x10, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0d,
1552 0x0d, 0x0d, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0b,
1553 0x0b, 0x0a, 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09,
1554 0x08, 0x08, 0x08, 0x08, 0x08, 0x07, 0x07, 0x07, 0x07, 0x06,
1555 0x06, 0x06, 0x06, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x04,
1556 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x02,
1557 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01,
1558 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
1559};
1560
1561static unsigned char voltab2[128] = {
1562 0x32, 0x31, 0x30, 0x2f, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x2a,
1563 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x24, 0x23, 0x22, 0x21,
1564 0x21, 0x20, 0x1f, 0x1e, 0x1e, 0x1d, 0x1c, 0x1c, 0x1b, 0x1a,
1565 0x1a, 0x19, 0x19, 0x18, 0x18, 0x17, 0x16, 0x16, 0x15, 0x15,
1566 0x14, 0x14, 0x13, 0x13, 0x13, 0x12, 0x12, 0x11, 0x11, 0x10,
1567 0x10, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d,
1568 0x0d, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a,
1569 0x0a, 0x0a, 0x09, 0x09, 0x09, 0x09, 0x09, 0x08, 0x08, 0x08,
1570 0x08, 0x08, 0x07, 0x07, 0x07, 0x07, 0x07, 0x06, 0x06, 0x06,
1571 0x06, 0x06, 0x06, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
1572 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03,
1573 0x03, 0x03, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01,
1574 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
1575};
1576
1577static unsigned char expressiontab[128] = {
1578 0x7f, 0x6c, 0x62, 0x5a, 0x54, 0x50, 0x4b, 0x48, 0x45, 0x42,
1579 0x40, 0x3d, 0x3b, 0x39, 0x38, 0x36, 0x34, 0x33, 0x31, 0x30,
1580 0x2f, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25,
1581 0x24, 0x24, 0x23, 0x22, 0x21, 0x21, 0x20, 0x1f, 0x1e, 0x1e,
1582 0x1d, 0x1d, 0x1c, 0x1b, 0x1b, 0x1a, 0x1a, 0x19, 0x18, 0x18,
1583 0x17, 0x17, 0x16, 0x16, 0x15, 0x15, 0x15, 0x14, 0x14, 0x13,
1584 0x13, 0x12, 0x12, 0x11, 0x11, 0x11, 0x10, 0x10, 0x0f, 0x0f,
1585 0x0f, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0c, 0x0c, 0x0c,
1586 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x09, 0x09, 0x09, 0x09,
1587 0x08, 0x08, 0x08, 0x07, 0x07, 0x07, 0x07, 0x06, 0x06, 0x06,
1588 0x06, 0x05, 0x05, 0x05, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03,
1589 0x03, 0x03, 0x03, 0x02, 0x02, 0x02, 0x02, 0x01, 0x01, 0x01,
1590 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
1591};
1592
1593static void
1594awe_calc_volume(int voice)
1595{
1596 voice_info *vp = &voices[voice];
1597 awe_voice_info *ap;
1598 awe_chan_info *cp = voices[voice].cinfo;
1599 int vol;
1600
1601 /* search voice information */
1602 if ((ap = vp->sample) == NULL)
1603 return;
1604
1605 ap = vp->sample;
1606 if (ap->index == 0) {
1607 DEBUG(3,printk("AWE32: set sample (%d)\n", ap->sample));
1608 if (awe_set_sample((awe_voice_list*)ap) == 0)
1609 return;
1610 }
1611
1612 if (ctrls[AWE_MD_NEW_VOLUME_CALC]) {
1613 int main_vol = cp->main_vol * ap->amplitude / 127;
1614 limitvalue(vp->velocity, 0, 127);
1615 limitvalue(main_vol, 0, 127);
1616 limitvalue(cp->expression_vol, 0, 127);
1617
1618 vol = voltab1[main_vol] + voltab2[vp->velocity];
1619 vol = (vol * 8) / 3;
1620 vol += ap->attenuation;
1621 if (cp->expression_vol < 127)
1622 vol += ((0x100 - vol) * expressiontab[cp->expression_vol])/128;
1623 vol += atten_offset;
1624 if (atten_relative)
1625 vol += ctrls[AWE_MD_ZERO_ATTEN];
1626 limitvalue(vol, 0, 255);
1627 vp->avol = vol;
1628
1629 } else {
1630 /* 0 - 127 */
1631 vol = (vp->velocity * cp->main_vol * cp->expression_vol) / (127*127);
1632 vol = vol * ap->amplitude / 127;
1633
1634 if (vol < 0) vol = 0;
1635 if (vol > 127) vol = 127;
1636
1637 /* calc to attenuation */
1638 vol = vol_table[vol];
1639 vol += (int)ap->attenuation;
1640 vol += atten_offset;
1641 if (atten_relative)
1642 vol += ctrls[AWE_MD_ZERO_ATTEN];
1643 if (vol > 255) vol = 255;
1644
1645 vp->avol = vol;
1646 }
1647 if (cp->bank != AWE_DRUM_BANK && ((awe_voice_parm_block*)(&ap->parm))->volatk < 0x7d) {
1648 int atten;
1649 if (vp->velocity < 70) atten = 70;
1650 else atten = vp->velocity;
1651 vp->acutoff = (atten * ap->parm.cutoff + 0xa0) >> 7;
1652 } else {
1653 vp->acutoff = ap->parm.cutoff;
1654 }
1655 DEBUG(3,printk("AWE32: [-- voice(%d) vol=%x]\n", voice, vol));
1656}
1657
1658/* change master volume */
1659static void
1660awe_change_master_volume(short val)
1661{
1662 limitvalue(val, 0, 127);
1663 atten_offset = vol_table[val];
1664 atten_relative = TRUE;
1665 awe_update_volume();
1666}
1667
1668/* update volumes of all available channels */
1669static void awe_update_volume(void)
1670{
1671 int i;
1672 for (i = 0; i < awe_max_voices; i++)
1673 awe_set_voice_vol(i, TRUE);
1674}
1675
1676/* set sostenuto on */
1677static void awe_sostenuto_on(int voice, int forced)
1678{
1679 if (IS_NO_EFFECT(voice) && !forced) return;
1680 voices[voice].sostenuto = 127;
1681}
1682
1683
1684/* drop sustain */
1685static void awe_sustain_off(int voice, int forced)
1686{
1687 if (voices[voice].state == AWE_ST_SUSTAINED) {
1688 awe_note_off(voice);
1689 awe_fx_init(voices[voice].ch);
1690 awe_voice_init(voice, FALSE);
1691 }
1692}
1693
1694
1695/* terminate and initialize voice */
1696static void awe_terminate_and_init(int voice, int forced)
1697{
1698 awe_terminate(voice);
1699 awe_fx_init(voices[voice].ch);
1700 awe_voice_init(voice, TRUE);
1701}
1702
1703
1704/*
1705 * synth operation routines
1706 */
1707
1708#define AWE_VOICE_KEY(v) (0x8000 | (v))
1709#define AWE_CHAN_KEY(c,n) (((c) << 8) | ((n) + 1))
1710#define KEY_CHAN_MATCH(key,c) (((key) >> 8) == (c))
1711
1712/* initialize the voice */
1713static void
1714awe_voice_init(int voice, int init_all)
1715{
1716 voice_info *vp = &voices[voice];
1717
1718 /* reset voice search key */
1719 if (playing_mode == AWE_PLAY_DIRECT)
1720 vp->key = AWE_VOICE_KEY(voice);
1721 else
1722 vp->key = 0;
1723
1724 /* clear voice mapping */
1725 voice_alloc->map[voice] = 0;
1726
1727 /* touch the timing flag */
1728 vp->time = current_alloc_time;
1729
1730 /* initialize other parameters if necessary */
1731 if (init_all) {
1732 vp->note = -1;
1733 vp->velocity = 0;
1734 vp->sostenuto = 0;
1735
1736 vp->sample = NULL;
1737 vp->cinfo = &channels[voice];
1738 vp->ch = voice;
1739 vp->state = AWE_ST_OFF;
1740
1741 /* emu8000 parameters */
1742 vp->apitch = 0;
1743 vp->avol = 255;
1744 vp->apan = -1;
1745 }
1746}
1747
1748/* clear effects */
1749static void awe_fx_init(int ch)
1750{
1751 if (SINGLE_LAYER_MODE() && !ctrls[AWE_MD_KEEP_EFFECT]) {
1752 memset(&channels[ch].fx, 0, sizeof(channels[ch].fx));
1753 memset(&channels[ch].fx_layer, 0, sizeof(&channels[ch].fx_layer));
1754 }
1755}
1756
1757/* initialize channel info */
1758static void awe_channel_init(int ch, int init_all)
1759{
1760 awe_chan_info *cp = &channels[ch];
1761 cp->channel = ch;
1762 if (init_all) {
1763 cp->panning = 0; /* zero center */
1764 cp->bender_range = 200; /* sense * 100 */
1765 cp->main_vol = 127;
1766 if (MULTI_LAYER_MODE() && IS_DRUM_CHANNEL(ch)) {
1767 cp->instr = ctrls[AWE_MD_DEF_DRUM];
1768 cp->bank = AWE_DRUM_BANK;
1769 } else {
1770 cp->instr = ctrls[AWE_MD_DEF_PRESET];
1771 cp->bank = ctrls[AWE_MD_DEF_BANK];
1772 }
1773 }
1774
1775 cp->bender = 0; /* zero tune skew */
1776 cp->expression_vol = 127;
1777 cp->chan_press = 0;
1778 cp->sustained = 0;
1779
1780 if (! ctrls[AWE_MD_KEEP_EFFECT]) {
1781 memset(&cp->fx, 0, sizeof(cp->fx));
1782 memset(&cp->fx_layer, 0, sizeof(cp->fx_layer));
1783 }
1784}
1785
1786
1787/* change the voice parameters; voice = channel */
1788static void awe_voice_change(int voice, fx_affect_func func)
1789{
1790 int i;
1791 switch (playing_mode) {
1792 case AWE_PLAY_DIRECT:
1793 func(voice, FALSE);
1794 break;
1795 case AWE_PLAY_INDIRECT:
1796 for (i = 0; i < awe_max_voices; i++)
1797 if (voices[i].key == AWE_VOICE_KEY(voice))
1798 func(i, FALSE);
1799 break;
1800 default:
1801 for (i = 0; i < awe_max_voices; i++)
1802 if (KEY_CHAN_MATCH(voices[i].key, voice))
1803 func(i, FALSE);
1804 break;
1805 }
1806}
1807
1808
1809/*
1810 * device open / close
1811 */
1812
1813/* open device:
1814 * reset status of all voices, and clear sample position flag
1815 */
1816static int
1817awe_open(int dev, int mode)
1818{
1819 if (awe_busy)
1820 return -EBUSY;
1821
1822 awe_busy = TRUE;
1823
1824 /* set default mode */
1825 awe_init_ctrl_parms(FALSE);
1826 atten_relative = TRUE;
1827 atten_offset = 0;
1828 drum_flags = DEFAULT_DRUM_FLAGS;
1829 playing_mode = AWE_PLAY_INDIRECT;
1830
1831 /* reset voices & channels */
1832 awe_reset(dev);
1833
1834 patch_opened = 0;
1835
1836 return 0;
1837}
1838
1839
1840/* close device:
1841 * reset all voices again (terminate sounds)
1842 */
1843static void
1844awe_close(int dev)
1845{
1846 awe_reset(dev);
1847 awe_busy = FALSE;
1848}
1849
1850
1851/* set miscellaneous mode parameters
1852 */
1853static void
1854awe_init_ctrl_parms(int init_all)
1855{
1856 int i;
1857 for (i = 0; i < AWE_MD_END; i++) {
1858 if (init_all || ctrl_parms[i].init_each_time)
1859 ctrls[i] = ctrl_parms[i].value;
1860 }
1861}
1862
1863
1864/* sequencer I/O control:
1865 */
1866static int
1867awe_ioctl(int dev, unsigned int cmd, void __user *arg)
1868{
1869 switch (cmd) {
1870 case SNDCTL_SYNTH_INFO:
1871 if (playing_mode == AWE_PLAY_DIRECT)
1872 awe_info.nr_voices = awe_max_voices;
1873 else
1874 awe_info.nr_voices = AWE_MAX_CHANNELS;
1875 if (copy_to_user(arg, &awe_info, sizeof(awe_info)))
1876 return -EFAULT;
1877 return 0;
1878 break;
1879
1880 case SNDCTL_SEQ_RESETSAMPLES:
1881 awe_reset(dev);
1882 awe_reset_samples();
1883 return 0;
1884 break;
1885
1886 case SNDCTL_SEQ_PERCMODE:
1887 /* what's this? */
1888 return 0;
1889 break;
1890
1891 case SNDCTL_SYNTH_MEMAVL:
1892 return memsize - awe_free_mem_ptr() * 2;
1893 break;
1894
1895 default:
1896 printk(KERN_WARNING "AWE32: unsupported ioctl %d\n", cmd);
1897 return -EINVAL;
1898 break;
1899 }
1900}
1901
1902
1903static int voice_in_range(int voice)
1904{
1905 if (playing_mode == AWE_PLAY_DIRECT) {
1906 if (voice < 0 || voice >= awe_max_voices)
1907 return FALSE;
1908 } else {
1909 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
1910 return FALSE;
1911 }
1912 return TRUE;
1913}
1914
1915static void release_voice(int voice, int do_sustain)
1916{
1917 if (IS_NO_SOUND(voice))
1918 return;
1919 if (do_sustain && (voices[voice].cinfo->sustained == 127 ||
1920 voices[voice].sostenuto == 127))
1921 voices[voice].state = AWE_ST_SUSTAINED;
1922 else {
1923 awe_note_off(voice);
1924 awe_fx_init(voices[voice].ch);
1925 awe_voice_init(voice, FALSE);
1926 }
1927}
1928
1929/* release all notes */
1930static void awe_note_off_all(int do_sustain)
1931{
1932 int i;
1933 for (i = 0; i < awe_max_voices; i++)
1934 release_voice(i, do_sustain);
1935}
1936
1937/* kill a voice:
1938 * not terminate, just release the voice.
1939 */
1940static int
1941awe_kill_note(int dev, int voice, int note, int velocity)
1942{
1943 int i, v2, key;
1944
1945 DEBUG(2,printk("AWE32: [off(%d) nt=%d vl=%d]\n", voice, note, velocity));
1946 if (! voice_in_range(voice))
1947 return -EINVAL;
1948
1949 switch (playing_mode) {
1950 case AWE_PLAY_DIRECT:
1951 case AWE_PLAY_INDIRECT:
1952 key = AWE_VOICE_KEY(voice);
1953 break;
1954
1955 case AWE_PLAY_MULTI2:
1956 v2 = voice_alloc->map[voice] >> 8;
1957 voice_alloc->map[voice] = 0;
1958 voice = v2;
1959 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
1960 return -EINVAL;
1961 /* continue to below */
1962 default:
1963 key = AWE_CHAN_KEY(voice, note);
1964 break;
1965 }
1966
1967 for (i = 0; i < awe_max_voices; i++) {
1968 if (voices[i].key == key)
1969 release_voice(i, TRUE);
1970 }
1971 return 0;
1972}
1973
1974
1975static void start_or_volume_change(int voice, int velocity)
1976{
1977 voices[voice].velocity = velocity;
1978 awe_calc_volume(voice);
1979 if (voices[voice].state == AWE_ST_STANDBY)
1980 awe_note_on(voice);
1981 else if (voices[voice].state == AWE_ST_ON)
1982 awe_set_volume(voice, FALSE);
1983}
1984
1985static void set_and_start_voice(int voice, int state)
1986{
1987 /* calculate pitch & volume parameters */
1988 voices[voice].state = state;
1989 awe_calc_pitch(voice);
1990 awe_calc_volume(voice);
1991 if (state == AWE_ST_ON)
1992 awe_note_on(voice);
1993}
1994
1995/* start a voice:
1996 * if note is 255, identical with aftertouch function.
1997 * Otherwise, start a voice with specified not and volume.
1998 */
1999static int
2000awe_start_note(int dev, int voice, int note, int velocity)
2001{
2002 int i, key, state, volonly;
2003
2004 DEBUG(2,printk("AWE32: [on(%d) nt=%d vl=%d]\n", voice, note, velocity));
2005 if (! voice_in_range(voice))
2006 return -EINVAL;
2007
2008 if (velocity == 0)
2009 state = AWE_ST_STANDBY; /* stand by for playing */
2010 else
2011 state = AWE_ST_ON; /* really play */
2012 volonly = FALSE;
2013
2014 switch (playing_mode) {
2015 case AWE_PLAY_DIRECT:
2016 case AWE_PLAY_INDIRECT:
2017 key = AWE_VOICE_KEY(voice);
2018 if (note == 255)
2019 volonly = TRUE;
2020 break;
2021
2022 case AWE_PLAY_MULTI2:
2023 voice = voice_alloc->map[voice] >> 8;
2024 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2025 return -EINVAL;
2026 /* continue to below */
2027 default:
2028 if (note >= 128) { /* key volume mode */
2029 note -= 128;
2030 volonly = TRUE;
2031 }
2032 key = AWE_CHAN_KEY(voice, note);
2033 break;
2034 }
2035
2036 /* dynamic volume change */
2037 if (volonly) {
2038 for (i = 0; i < awe_max_voices; i++) {
2039 if (voices[i].key == key)
2040 start_or_volume_change(i, velocity);
2041 }
2042 return 0;
2043 }
2044
2045 /* if the same note still playing, stop it */
2046 if (playing_mode != AWE_PLAY_DIRECT || ctrls[AWE_MD_EXCLUSIVE_SOUND]) {
2047 for (i = 0; i < awe_max_voices; i++)
2048 if (voices[i].key == key) {
2049 if (voices[i].state == AWE_ST_ON) {
2050 awe_note_off(i);
2051 awe_voice_init(i, FALSE);
2052 } else if (voices[i].state == AWE_ST_STANDBY)
2053 awe_voice_init(i, TRUE);
2054 }
2055 }
2056
2057 /* allocate voices */
2058 if (playing_mode == AWE_PLAY_DIRECT)
2059 awe_alloc_one_voice(voice, note, velocity);
2060 else
2061 awe_alloc_multi_voices(voice, note, velocity, key);
2062
2063 /* turn off other voices exlusively (for drums) */
2064 for (i = 0; i < awe_max_voices; i++)
2065 if (voices[i].key == key)
2066 awe_exclusive_off(i);
2067
2068 /* set up pitch and volume parameters */
2069 for (i = 0; i < awe_max_voices; i++) {
2070 if (voices[i].key == key && voices[i].state == AWE_ST_OFF)
2071 set_and_start_voice(i, state);
2072 }
2073
2074 return 0;
2075}
2076
2077
2078/* calculate hash key */
2079static int
2080awe_search_key(int bank, int preset, int note)
2081{
2082 unsigned int key;
2083
2084#if 1 /* new hash table */
2085 if (bank == AWE_DRUM_BANK)
2086 key = preset + note + 128;
2087 else
2088 key = bank + preset;
2089#else
2090 key = preset;
2091#endif
2092 key %= AWE_MAX_PRESETS;
2093
2094 return (int)key;
2095}
2096
2097
2098/* search instrument from hash table */
2099static awe_voice_list *
2100awe_search_instr(int bank, int preset, int note)
2101{
2102 awe_voice_list *p;
2103 int key, key2;
2104
2105 key = awe_search_key(bank, preset, note);
2106 for (p = preset_table[key]; p; p = p->next_bank) {
2107 if (p->instr == preset && p->bank == bank)
2108 return p;
2109 }
2110 key2 = awe_search_key(bank, preset, 0); /* search default */
2111 if (key == key2)
2112 return NULL;
2113 for (p = preset_table[key2]; p; p = p->next_bank) {
2114 if (p->instr == preset && p->bank == bank)
2115 return p;
2116 }
2117 return NULL;
2118}
2119
2120
2121/* assign the instrument to a voice */
2122static int
2123awe_set_instr_2(int dev, int voice, int instr_no)
2124{
2125 if (playing_mode == AWE_PLAY_MULTI2) {
2126 voice = voice_alloc->map[voice] >> 8;
2127 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2128 return -EINVAL;
2129 }
2130 return awe_set_instr(dev, voice, instr_no);
2131}
2132
2133/* assign the instrument to a channel; voice is the channel number */
2134static int
2135awe_set_instr(int dev, int voice, int instr_no)
2136{
2137 awe_chan_info *cinfo;
2138
2139 if (! voice_in_range(voice))
2140 return -EINVAL;
2141
2142 if (instr_no < 0 || instr_no >= AWE_MAX_PRESETS)
2143 return -EINVAL;
2144
2145 cinfo = &channels[voice];
2146 cinfo->instr = instr_no;
2147 DEBUG(2,printk("AWE32: [program(%d) %d]\n", voice, instr_no));
2148
2149 return 0;
2150}
2151
2152
2153/* reset all voices; terminate sounds and initialize parameters */
2154static void
2155awe_reset(int dev)
2156{
2157 int i;
2158 current_alloc_time = 0;
2159 /* don't turn off voice 31 and 32. they are used also for FM voices */
2160 for (i = 0; i < awe_max_voices; i++) {
2161 awe_terminate(i);
2162 awe_voice_init(i, TRUE);
2163 }
2164 for (i = 0; i < AWE_MAX_CHANNELS; i++)
2165 awe_channel_init(i, TRUE);
2166 for (i = 0; i < 16; i++) {
2167 awe_operations.chn_info[i].controllers[CTL_MAIN_VOLUME] = 127;
2168 awe_operations.chn_info[i].controllers[CTL_EXPRESSION] = 127;
2169 }
2170 awe_init_fm();
2171 awe_tweak();
2172}
2173
2174
2175/* hardware specific control:
2176 * GUS specific and AWE32 specific controls are available.
2177 */
2178static void
2179awe_hw_control(int dev, unsigned char *event)
2180{
2181 int cmd = event[2];
2182 if (cmd & _AWE_MODE_FLAG)
2183 awe_hw_awe_control(dev, cmd & _AWE_MODE_VALUE_MASK, event);
2184#ifdef AWE_HAS_GUS_COMPATIBILITY
2185 else
2186 awe_hw_gus_control(dev, cmd & _AWE_MODE_VALUE_MASK, event);
2187#endif
2188}
2189
2190
2191#ifdef AWE_HAS_GUS_COMPATIBILITY
2192
2193/* GUS compatible controls */
2194static void
2195awe_hw_gus_control(int dev, int cmd, unsigned char *event)
2196{
2197 int voice, i, key;
2198 unsigned short p1;
2199 short p2;
2200 int plong;
2201
2202 if (MULTI_LAYER_MODE())
2203 return;
2204 if (cmd == _GUS_NUMVOICES)
2205 return;
2206
2207 voice = event[3];
2208 if (! voice_in_range(voice))
2209 return;
2210
2211 p1 = *(unsigned short *) &event[4];
2212 p2 = *(short *) &event[6];
2213 plong = *(int*) &event[4];
2214
2215 switch (cmd) {
2216 case _GUS_VOICESAMPLE:
2217 awe_set_instr(dev, voice, p1);
2218 return;
2219
2220 case _GUS_VOICEBALA:
2221 /* 0 to 15 --> -128 to 127 */
2222 awe_panning(dev, voice, ((int)p1 << 4) - 128);
2223 return;
2224
2225 case _GUS_VOICEVOL:
2226 case _GUS_VOICEVOL2:
2227 /* not supported yet */
2228 return;
2229
2230 case _GUS_RAMPRANGE:
2231 case _GUS_RAMPRATE:
2232 case _GUS_RAMPMODE:
2233 case _GUS_RAMPON:
2234 case _GUS_RAMPOFF:
2235 /* volume ramping not supported */
2236 return;
2237
2238 case _GUS_VOLUME_SCALE:
2239 return;
2240
2241 case _GUS_VOICE_POS:
2242 FX_SET(&channels[voice].fx, AWE_FX_SAMPLE_START,
2243 (short)(plong & 0x7fff));
2244 FX_SET(&channels[voice].fx, AWE_FX_COARSE_SAMPLE_START,
2245 (plong >> 15) & 0xffff);
2246 return;
2247 }
2248
2249 key = AWE_VOICE_KEY(voice);
2250 for (i = 0; i < awe_max_voices; i++) {
2251 if (voices[i].key == key) {
2252 switch (cmd) {
2253 case _GUS_VOICEON:
2254 awe_note_on(i);
2255 break;
2256
2257 case _GUS_VOICEOFF:
2258 awe_terminate(i);
2259 awe_fx_init(voices[i].ch);
2260 awe_voice_init(i, TRUE);
2261 break;
2262
2263 case _GUS_VOICEFADE:
2264 awe_note_off(i);
2265 awe_fx_init(voices[i].ch);
2266 awe_voice_init(i, FALSE);
2267 break;
2268
2269 case _GUS_VOICEFREQ:
2270 awe_calc_pitch_from_freq(i, plong);
2271 break;
2272 }
2273 }
2274 }
2275}
2276
2277#endif /* gus_compat */
2278
2279
2280/* AWE32 specific controls */
2281static void
2282awe_hw_awe_control(int dev, int cmd, unsigned char *event)
2283{
2284 int voice;
2285 unsigned short p1;
2286 short p2;
2287 int i;
2288
2289 voice = event[3];
2290 if (! voice_in_range(voice))
2291 return;
2292
2293 if (playing_mode == AWE_PLAY_MULTI2) {
2294 voice = voice_alloc->map[voice] >> 8;
2295 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2296 return;
2297 }
2298
2299 p1 = *(unsigned short *) &event[4];
2300 p2 = *(short *) &event[6];
2301
2302 switch (cmd) {
2303 case _AWE_DEBUG_MODE:
2304 ctrls[AWE_MD_DEBUG_MODE] = p1;
2305 printk(KERN_DEBUG "AWE32: debug mode = %d\n", ctrls[AWE_MD_DEBUG_MODE]);
2306 break;
2307 case _AWE_REVERB_MODE:
2308 ctrls[AWE_MD_REVERB_MODE] = p1;
2309 awe_update_reverb_mode();
2310 break;
2311
2312 case _AWE_CHORUS_MODE:
2313 ctrls[AWE_MD_CHORUS_MODE] = p1;
2314 awe_update_chorus_mode();
2315 break;
2316
2317 case _AWE_REMOVE_LAST_SAMPLES:
2318 DEBUG(0,printk("AWE32: remove last samples\n"));
2319 awe_reset(0);
2320 if (locked_sf_id > 0)
2321 awe_remove_samples(locked_sf_id);
2322 break;
2323
2324 case _AWE_INITIALIZE_CHIP:
2325 awe_initialize();
2326 break;
2327
2328 case _AWE_SEND_EFFECT:
2329 i = -1;
2330 if (p1 >= 0x100) {
2331 i = (p1 >> 8);
2332 if (i < 0 || i >= MAX_LAYERS)
2333 break;
2334 }
2335 awe_send_effect(voice, i, p1, p2);
2336 break;
2337
2338 case _AWE_RESET_CHANNEL:
2339 awe_channel_init(voice, !p1);
2340 break;
2341
2342 case _AWE_TERMINATE_ALL:
2343 awe_reset(0);
2344 break;
2345
2346 case _AWE_TERMINATE_CHANNEL:
2347 awe_voice_change(voice, awe_terminate_and_init);
2348 break;
2349
2350 case _AWE_RELEASE_ALL:
2351 awe_note_off_all(FALSE);
2352 break;
2353 case _AWE_NOTEOFF_ALL:
2354 awe_note_off_all(TRUE);
2355 break;
2356
2357 case _AWE_INITIAL_VOLUME:
2358 DEBUG(0,printk("AWE32: init attenuation %d\n", p1));
2359 atten_relative = (char)p2;
2360 atten_offset = (short)p1;
2361 awe_update_volume();
2362 break;
2363
2364 case _AWE_CHN_PRESSURE:
2365 channels[voice].chan_press = p1;
2366 awe_modwheel_change(voice, p1);
2367 break;
2368
2369 case _AWE_CHANNEL_MODE:
2370 DEBUG(0,printk("AWE32: channel mode = %d\n", p1));
2371 playing_mode = p1;
2372 awe_reset(0);
2373 break;
2374
2375 case _AWE_DRUM_CHANNELS:
2376 DEBUG(0,printk("AWE32: drum flags = %x\n", p1));
2377 drum_flags = *(unsigned int*)&event[4];
2378 break;
2379
2380 case _AWE_MISC_MODE:
2381 DEBUG(0,printk("AWE32: ctrl parms = %d %d\n", p1, p2));
2382 if (p1 > AWE_MD_VERSION && p1 < AWE_MD_END) {
2383 ctrls[p1] = p2;
2384 if (ctrl_parms[p1].update)
2385 ctrl_parms[p1].update();
2386 }
2387 break;
2388
2389 case _AWE_EQUALIZER:
2390 ctrls[AWE_MD_BASS_LEVEL] = p1;
2391 ctrls[AWE_MD_TREBLE_LEVEL] = p2;
2392 awe_update_equalizer();
2393 break;
2394
2395 default:
2396 DEBUG(0,printk("AWE32: hw control cmd=%d voice=%d\n", cmd, voice));
2397 break;
2398 }
2399}
2400
2401
2402/* change effects */
2403static void
2404awe_send_effect(int voice, int layer, int type, int val)
2405{
2406 awe_chan_info *cinfo;
2407 FX_Rec *fx;
2408 int mode;
2409
2410 cinfo = &channels[voice];
2411 if (layer >= 0 && layer < MAX_LAYERS)
2412 fx = &cinfo->fx_layer[layer];
2413 else
2414 fx = &cinfo->fx;
2415
2416 if (type & 0x40)
2417 mode = FX_FLAG_OFF;
2418 else if (type & 0x80)
2419 mode = FX_FLAG_ADD;
2420 else
2421 mode = FX_FLAG_SET;
2422 type &= 0x3f;
2423
2424 if (type >= 0 && type < AWE_FX_END) {
2425 DEBUG(2,printk("AWE32: effects (%d) %d %d\n", voice, type, val));
2426 if (mode == FX_FLAG_SET)
2427 FX_SET(fx, type, val);
2428 else if (mode == FX_FLAG_ADD)
2429 FX_ADD(fx, type, val);
2430 else
2431 FX_UNSET(fx, type);
2432 if (mode != FX_FLAG_OFF && parm_defs[type].realtime) {
2433 DEBUG(2,printk("AWE32: fx_realtime (%d)\n", voice));
2434 awe_voice_change(voice, parm_defs[type].realtime);
2435 }
2436 }
2437}
2438
2439
2440/* change modulation wheel; voice is already mapped on multi2 mode */
2441static void
2442awe_modwheel_change(int voice, int value)
2443{
2444 int i;
2445 awe_chan_info *cinfo;
2446
2447 cinfo = &channels[voice];
2448 i = value * ctrls[AWE_MD_MOD_SENSE] / 1200;
2449 FX_ADD(&cinfo->fx, AWE_FX_LFO1_PITCH, i);
2450 awe_voice_change(voice, awe_fx_fmmod);
2451 FX_ADD(&cinfo->fx, AWE_FX_LFO2_PITCH, i);
2452 awe_voice_change(voice, awe_fx_fm2frq2);
2453}
2454
2455
2456/* voice pressure change */
2457static void
2458awe_aftertouch(int dev, int voice, int pressure)
2459{
2460 int note;
2461
2462 DEBUG(2,printk("AWE32: [after(%d) %d]\n", voice, pressure));
2463 if (! voice_in_range(voice))
2464 return;
2465
2466 switch (playing_mode) {
2467 case AWE_PLAY_DIRECT:
2468 case AWE_PLAY_INDIRECT:
2469 awe_start_note(dev, voice, 255, pressure);
2470 break;
2471 case AWE_PLAY_MULTI2:
2472 note = (voice_alloc->map[voice] & 0xff) - 1;
2473 awe_key_pressure(dev, voice, note + 0x80, pressure);
2474 break;
2475 }
2476}
2477
2478
2479/* voice control change */
2480static void
2481awe_controller(int dev, int voice, int ctrl_num, int value)
2482{
2483 awe_chan_info *cinfo;
2484
2485 if (! voice_in_range(voice))
2486 return;
2487
2488 if (playing_mode == AWE_PLAY_MULTI2) {
2489 voice = voice_alloc->map[voice] >> 8;
2490 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2491 return;
2492 }
2493
2494 cinfo = &channels[voice];
2495
2496 switch (ctrl_num) {
2497 case CTL_BANK_SELECT: /* MIDI control #0 */
2498 DEBUG(2,printk("AWE32: [bank(%d) %d]\n", voice, value));
2499 if (MULTI_LAYER_MODE() && IS_DRUM_CHANNEL(voice) &&
2500 !ctrls[AWE_MD_TOGGLE_DRUM_BANK])
2501 break;
2502 if (value < 0 || value > 255)
2503 break;
2504 cinfo->bank = value;
2505 if (cinfo->bank == AWE_DRUM_BANK)
2506 DRUM_CHANNEL_ON(cinfo->channel);
2507 else
2508 DRUM_CHANNEL_OFF(cinfo->channel);
2509 awe_set_instr(dev, voice, cinfo->instr);
2510 break;
2511
2512 case CTL_MODWHEEL: /* MIDI control #1 */
2513 DEBUG(2,printk("AWE32: [modwheel(%d) %d]\n", voice, value));
2514 awe_modwheel_change(voice, value);
2515 break;
2516
2517 case CTRL_PITCH_BENDER: /* SEQ1 V2 contorl */
2518 DEBUG(2,printk("AWE32: [bend(%d) %d]\n", voice, value));
2519 /* zero centered */
2520 cinfo->bender = value;
2521 awe_voice_change(voice, awe_set_voice_pitch);
2522 break;
2523
2524 case CTRL_PITCH_BENDER_RANGE: /* SEQ1 V2 control */
2525 DEBUG(2,printk("AWE32: [range(%d) %d]\n", voice, value));
2526 /* value = sense x 100 */
2527 cinfo->bender_range = value;
2528 /* no audible pitch change yet.. */
2529 break;
2530
2531 case CTL_EXPRESSION: /* MIDI control #11 */
2532 if (SINGLE_LAYER_MODE())
2533 value /= 128;
2534 case CTRL_EXPRESSION: /* SEQ1 V2 control */
2535 DEBUG(2,printk("AWE32: [expr(%d) %d]\n", voice, value));
2536 /* 0 - 127 */
2537 cinfo->expression_vol = value;
2538 awe_voice_change(voice, awe_set_voice_vol);
2539 break;
2540
2541 case CTL_PAN: /* MIDI control #10 */
2542 DEBUG(2,printk("AWE32: [pan(%d) %d]\n", voice, value));
2543 /* (0-127) -> signed 8bit */
2544 cinfo->panning = value * 2 - 128;
2545 if (ctrls[AWE_MD_REALTIME_PAN])
2546 awe_voice_change(voice, awe_set_pan);
2547 break;
2548
2549 case CTL_MAIN_VOLUME: /* MIDI control #7 */
2550 if (SINGLE_LAYER_MODE())
2551 value = (value * 100) / 16383;
2552 case CTRL_MAIN_VOLUME: /* SEQ1 V2 control */
2553 DEBUG(2,printk("AWE32: [mainvol(%d) %d]\n", voice, value));
2554 /* 0 - 127 */
2555 cinfo->main_vol = value;
2556 awe_voice_change(voice, awe_set_voice_vol);
2557 break;
2558
2559 case CTL_EXT_EFF_DEPTH: /* reverb effects: 0-127 */
2560 DEBUG(2,printk("AWE32: [reverb(%d) %d]\n", voice, value));
2561 FX_SET(&cinfo->fx, AWE_FX_REVERB, value * 2);
2562 break;
2563
2564 case CTL_CHORUS_DEPTH: /* chorus effects: 0-127 */
2565 DEBUG(2,printk("AWE32: [chorus(%d) %d]\n", voice, value));
2566 FX_SET(&cinfo->fx, AWE_FX_CHORUS, value * 2);
2567 break;
2568
2569 case 120: /* all sounds off */
2570 awe_note_off_all(FALSE);
2571 break;
2572 case 123: /* all notes off */
2573 awe_note_off_all(TRUE);
2574 break;
2575
2576 case CTL_SUSTAIN: /* MIDI control #64 */
2577 cinfo->sustained = value;
2578 if (value != 127)
2579 awe_voice_change(voice, awe_sustain_off);
2580 break;
2581
2582 case CTL_SOSTENUTO: /* MIDI control #66 */
2583 if (value == 127)
2584 awe_voice_change(voice, awe_sostenuto_on);
2585 else
2586 awe_voice_change(voice, awe_sustain_off);
2587 break;
2588
2589 default:
2590 DEBUG(0,printk("AWE32: [control(%d) ctrl=%d val=%d]\n",
2591 voice, ctrl_num, value));
2592 break;
2593 }
2594}
2595
2596
2597/* voice pan change (value = -128 - 127) */
2598static void
2599awe_panning(int dev, int voice, int value)
2600{
2601 awe_chan_info *cinfo;
2602
2603 if (! voice_in_range(voice))
2604 return;
2605
2606 if (playing_mode == AWE_PLAY_MULTI2) {
2607 voice = voice_alloc->map[voice] >> 8;
2608 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2609 return;
2610 }
2611
2612 cinfo = &channels[voice];
2613 cinfo->panning = value;
2614 DEBUG(2,printk("AWE32: [pan(%d) %d]\n", voice, cinfo->panning));
2615 if (ctrls[AWE_MD_REALTIME_PAN])
2616 awe_voice_change(voice, awe_set_pan);
2617}
2618
2619
2620/* volume mode change */
2621static void
2622awe_volume_method(int dev, int mode)
2623{
2624 /* not impremented */
2625 DEBUG(0,printk("AWE32: [volmethod mode=%d]\n", mode));
2626}
2627
2628
2629/* pitch wheel change: 0-16384 */
2630static void
2631awe_bender(int dev, int voice, int value)
2632{
2633 awe_chan_info *cinfo;
2634
2635 if (! voice_in_range(voice))
2636 return;
2637
2638 if (playing_mode == AWE_PLAY_MULTI2) {
2639 voice = voice_alloc->map[voice] >> 8;
2640 if (voice < 0 || voice >= AWE_MAX_CHANNELS)
2641 return;
2642 }
2643
2644 /* convert to zero centered value */
2645 cinfo = &channels[voice];
2646 cinfo->bender = value - 8192;
2647 DEBUG(2,printk("AWE32: [bend(%d) %d]\n", voice, cinfo->bender));
2648 awe_voice_change(voice, awe_set_voice_pitch);
2649}
2650
2651
2652/*
2653 * load a sound patch:
2654 * three types of patches are accepted: AWE, GUS, and SYSEX.
2655 */
2656
2657static int
2658awe_load_patch(int dev, int format, const char __user *addr,
2659 int offs, int count, int pmgr_flag)
2660{
2661 awe_patch_info patch;
2662 int rc = 0;
2663
2664#ifdef AWE_HAS_GUS_COMPATIBILITY
2665 if (format == GUS_PATCH) {
2666 return awe_load_guspatch(addr, offs, count, pmgr_flag);
2667 } else
2668#endif
2669 if (format == SYSEX_PATCH) {
2670 /* no system exclusive message supported yet */
2671 return 0;
2672 } else if (format != AWE_PATCH) {
2673 printk(KERN_WARNING "AWE32 Error: Invalid patch format (key) 0x%x\n", format);
2674 return -EINVAL;
2675 }
2676
2677 if (count < AWE_PATCH_INFO_SIZE) {
2678 printk(KERN_WARNING "AWE32 Error: Patch header too short\n");
2679 return -EINVAL;
2680 }
2681 if (copy_from_user(((char*)&patch) + offs, addr + offs,
2682 AWE_PATCH_INFO_SIZE - offs))
2683 return -EFAULT;
2684
2685 count -= AWE_PATCH_INFO_SIZE;
2686 if (count < patch.len) {
2687 printk(KERN_WARNING "AWE32: sample: Patch record too short (%d<%d)\n",
2688 count, patch.len);
2689 return -EINVAL;
2690 }
2691
2692 switch (patch.type) {
2693 case AWE_LOAD_INFO:
2694 rc = awe_load_info(&patch, addr, count);
2695 break;
2696 case AWE_LOAD_DATA:
2697 rc = awe_load_data(&patch, addr, count);
2698 break;
2699 case AWE_OPEN_PATCH:
2700 rc = awe_open_patch(&patch, addr, count);
2701 break;
2702 case AWE_CLOSE_PATCH:
2703 rc = awe_close_patch(&patch, addr, count);
2704 break;
2705 case AWE_UNLOAD_PATCH:
2706 rc = awe_unload_patch(&patch, addr, count);
2707 break;
2708 case AWE_REPLACE_DATA:
2709 rc = awe_replace_data(&patch, addr, count);
2710 break;
2711 case AWE_MAP_PRESET:
2712 rc = awe_load_map(&patch, addr, count);
2713 break;
2714 /* case AWE_PROBE_INFO:
2715 rc = awe_probe_info(&patch, addr, count);
2716 break;*/
2717 case AWE_PROBE_DATA:
2718 rc = awe_probe_data(&patch, addr, count);
2719 break;
2720 case AWE_REMOVE_INFO:
2721 rc = awe_remove_info(&patch, addr, count);
2722 break;
2723 case AWE_LOAD_CHORUS_FX:
2724 rc = awe_load_chorus_fx(&patch, addr, count);
2725 break;
2726 case AWE_LOAD_REVERB_FX:
2727 rc = awe_load_reverb_fx(&patch, addr, count);
2728 break;
2729
2730 default:
2731 printk(KERN_WARNING "AWE32 Error: unknown patch format type %d\n",
2732 patch.type);
2733 rc = -EINVAL;
2734 }
2735
2736 return rc;
2737}
2738
2739
2740/* create an sf list record */
2741static int
2742awe_create_sf(int type, char *name)
2743{
2744 sf_list *rec;
2745
2746 /* terminate sounds */
2747 awe_reset(0);
2748 rec = (sf_list *)kmalloc(sizeof(*rec), GFP_KERNEL);
2749 if (rec == NULL)
2750 return 1; /* no memory */
2751 rec->sf_id = current_sf_id + 1;
2752 rec->type = type;
2753 if (/*current_sf_id == 0 ||*/ (type & AWE_PAT_LOCKED) != 0)
2754 locked_sf_id = current_sf_id + 1;
2755 rec->num_info = awe_free_info();
2756 rec->num_sample = awe_free_sample();
2757 rec->mem_ptr = awe_free_mem_ptr();
2758 rec->infos = rec->last_infos = NULL;
2759 rec->samples = rec->last_samples = NULL;
2760
2761 /* add to linked-list */
2762 rec->next = NULL;
2763 rec->prev = sftail;
2764 if (sftail)
2765 sftail->next = rec;
2766 else
2767 sfhead = rec;
2768 sftail = rec;
2769 current_sf_id++;
2770
2771#ifdef AWE_ALLOW_SAMPLE_SHARING
2772 rec->shared = NULL;
2773 if (name)
2774 memcpy(rec->name, name, AWE_PATCH_NAME_LEN);
2775 else
2776 strcpy(rec->name, "*TEMPORARY*");
2777 if (current_sf_id > 1 && name && (type & AWE_PAT_SHARED) != 0) {
2778 /* is the current font really a shared font? */
2779 if (is_shared_sf(rec->name)) {
2780 /* check if the shared font is already installed */
2781 sf_list *p;
2782 for (p = rec->prev; p; p = p->prev) {
2783 if (is_identical_name(rec->name, p)) {
2784 rec->shared = p;
2785 break;
2786 }
2787 }
2788 }
2789 }
2790#endif /* allow sharing */
2791
2792 return 0;
2793}
2794
2795
2796#ifdef AWE_ALLOW_SAMPLE_SHARING
2797
2798/* check if the given name is a valid shared name */
2799#define ASC_TO_KEY(c) ((c) - 'A' + 1)
2800static int is_shared_sf(unsigned char *name)
2801{
2802 static unsigned char id_head[4] = {
2803 ASC_TO_KEY('A'), ASC_TO_KEY('W'), ASC_TO_KEY('E'),
2804 AWE_MAJOR_VERSION,
2805 };
2806 if (memcmp(name, id_head, 4) == 0)
2807 return TRUE;
2808 return FALSE;
2809}
2810
2811/* check if the given name matches to the existing list */
2812static int is_identical_name(unsigned char *name, sf_list *p)
2813{
2814 char *id = p->name;
2815 if (is_shared_sf(id) && memcmp(id, name, AWE_PATCH_NAME_LEN) == 0)
2816 return TRUE;
2817 return FALSE;
2818}
2819
2820/* check if the given voice info exists */
2821static int info_duplicated(sf_list *sf, awe_voice_list *rec)
2822{
2823 /* search for all sharing lists */
2824 for (; sf; sf = sf->shared) {
2825 awe_voice_list *p;
2826 for (p = sf->infos; p; p = p->next) {
2827 if (p->type == V_ST_NORMAL &&
2828 p->bank == rec->bank &&
2829 p->instr == rec->instr &&
2830 p->v.low == rec->v.low &&
2831 p->v.high == rec->v.high &&
2832 p->v.sample == rec->v.sample)
2833 return TRUE;
2834 }
2835 }
2836 return FALSE;
2837}
2838
2839#endif /* AWE_ALLOW_SAMPLE_SHARING */
2840
2841
2842/* free sf_list record */
2843/* linked-list in this function is not cared */
2844static void
2845awe_free_sf(sf_list *sf)
2846{
2847 if (sf->infos) {
2848 awe_voice_list *p, *next;
2849 for (p = sf->infos; p; p = next) {
2850 next = p->next;
2851 kfree(p);
2852 }
2853 }
2854 if (sf->samples) {
2855 awe_sample_list *p, *next;
2856 for (p = sf->samples; p; p = next) {
2857 next = p->next;
2858 kfree(p);
2859 }
2860 }
2861 kfree(sf);
2862}
2863
2864
2865/* open patch; create sf list and set opened flag */
2866static int
2867awe_open_patch(awe_patch_info *patch, const char __user *addr, int count)
2868{
2869 awe_open_parm parm;
2870 int shared;
2871
2872 if (copy_from_user(&parm, addr + AWE_PATCH_INFO_SIZE, sizeof(parm)))
2873 return -EFAULT;
2874 shared = FALSE;
2875
2876#ifdef AWE_ALLOW_SAMPLE_SHARING
2877 if (sftail && (parm.type & AWE_PAT_SHARED) != 0) {
2878 /* is the previous font the same font? */
2879 if (is_identical_name(parm.name, sftail)) {
2880 /* then append to the previous */
2881 shared = TRUE;
2882 awe_reset(0);
2883 if (parm.type & AWE_PAT_LOCKED)
2884 locked_sf_id = current_sf_id;
2885 }
2886 }
2887#endif /* allow sharing */
2888 if (! shared) {
2889 if (awe_create_sf(parm.type, parm.name)) {
2890 printk(KERN_ERR "AWE32: can't open: failed to alloc new list\n");
2891 return -ENOMEM;
2892 }
2893 }
2894 patch_opened = TRUE;
2895 return current_sf_id;
2896}
2897
2898/* check if the patch is already opened */
2899static sf_list *
2900check_patch_opened(int type, char *name)
2901{
2902 if (! patch_opened) {
2903 if (awe_create_sf(type, name)) {
2904 printk(KERN_ERR "AWE32: failed to alloc new list\n");
2905 return NULL;
2906 }
2907 patch_opened = TRUE;
2908 return sftail;
2909 }
2910 return sftail;
2911}
2912
2913/* close the patch; if no voice is loaded, remove the patch */
2914static int
2915awe_close_patch(awe_patch_info *patch, const char __user *addr, int count)
2916{
2917 if (patch_opened && sftail) {
2918 /* if no voice is loaded, release the current patch */
2919 if (sftail->infos == NULL) {
2920 awe_reset(0);
2921 awe_remove_samples(current_sf_id - 1);
2922 }
2923 }
2924 patch_opened = 0;
2925 return 0;
2926}
2927
2928
2929/* remove the latest patch */
2930static int
2931awe_unload_patch(awe_patch_info *patch, const char __user *addr, int count)
2932{
2933 if (current_sf_id > 0 && current_sf_id > locked_sf_id) {
2934 awe_reset(0);
2935 awe_remove_samples(current_sf_id - 1);
2936 }
2937 return 0;
2938}
2939
2940/* allocate voice info list records */
2941static awe_voice_list *
2942alloc_new_info(void)
2943{
2944 awe_voice_list *newlist;
2945
2946 newlist = kmalloc(sizeof(*newlist), GFP_KERNEL);
2947 if (newlist == NULL) {
2948 printk(KERN_ERR "AWE32: can't alloc info table\n");
2949 return NULL;
2950 }
2951 return newlist;
2952}
2953
2954/* allocate sample info list records */
2955static awe_sample_list *
2956alloc_new_sample(void)
2957{
2958 awe_sample_list *newlist;
2959
2960 newlist = (awe_sample_list *)kmalloc(sizeof(*newlist), GFP_KERNEL);
2961 if (newlist == NULL) {
2962 printk(KERN_ERR "AWE32: can't alloc sample table\n");
2963 return NULL;
2964 }
2965 return newlist;
2966}
2967
2968/* load voice map */
2969static int
2970awe_load_map(awe_patch_info *patch, const char __user *addr, int count)
2971{
2972 awe_voice_map map;
2973 awe_voice_list *rec, *p;
2974 sf_list *sf;
2975
2976 /* get the link info */
2977 if (count < sizeof(map)) {
2978 printk(KERN_WARNING "AWE32 Error: invalid patch info length\n");
2979 return -EINVAL;
2980 }
2981 if (copy_from_user(&map, addr + AWE_PATCH_INFO_SIZE, sizeof(map)))
2982 return -EFAULT;
2983
2984 /* check if the identical mapping already exists */
2985 p = awe_search_instr(map.map_bank, map.map_instr, map.map_key);
2986 for (; p; p = p->next_instr) {
2987 if (p->type == V_ST_MAPPED &&
2988 p->v.start == map.src_instr &&
2989 p->v.end == map.src_bank &&
2990 p->v.fixkey == map.src_key)
2991 return 0; /* already present! */
2992 }
2993
2994 if ((sf = check_patch_opened(AWE_PAT_TYPE_MAP, NULL)) == NULL)
2995 return -ENOMEM;
2996
2997 if ((rec = alloc_new_info()) == NULL)
2998 return -ENOMEM;
2999
3000 rec->bank = map.map_bank;
3001 rec->instr = map.map_instr;
3002 rec->type = V_ST_MAPPED;
3003 rec->disabled = FALSE;
3004 awe_init_voice_info(&rec->v);
3005 if (map.map_key >= 0) {
3006 rec->v.low = map.map_key;
3007 rec->v.high = map.map_key;
3008 }
3009 rec->v.start = map.src_instr;
3010 rec->v.end = map.src_bank;
3011 rec->v.fixkey = map.src_key;
3012 add_sf_info(sf, rec);
3013 add_info_list(rec);
3014
3015 return 0;
3016}
3017
3018#if 0
3019/* probe preset in the current list -- nothing to be loaded */
3020static int
3021awe_probe_info(awe_patch_info *patch, const char __user *addr, int count)
3022{
3023#ifdef AWE_ALLOW_SAMPLE_SHARING
3024 awe_voice_map map;
3025 awe_voice_list *p;
3026
3027 if (! patch_opened)
3028 return -EINVAL;
3029
3030 /* get the link info */
3031 if (count < sizeof(map)) {
3032 printk(KERN_WARNING "AWE32 Error: invalid patch info length\n");
3033 return -EINVAL;
3034 }
3035 if (copy_from_user(&map, addr + AWE_PATCH_INFO_SIZE, sizeof(map)))
3036 return -EFAULT;
3037
3038 /* check if the identical mapping already exists */
3039 if (sftail == NULL)
3040 return -EINVAL;
3041 p = awe_search_instr(map.src_bank, map.src_instr, map.src_key);
3042 for (; p; p = p->next_instr) {
3043 if (p->type == V_ST_NORMAL &&
3044 is_identical_holder(p->holder, sftail) &&
3045 p->v.low <= map.src_key &&
3046 p->v.high >= map.src_key)
3047 return 0; /* already present! */
3048 }
3049#endif /* allow sharing */
3050 return -EINVAL;
3051}
3052#endif
3053
3054/* probe sample in the current list -- nothing to be loaded */
3055static int
3056awe_probe_data(awe_patch_info *patch, const char __user *addr, int count)
3057{
3058#ifdef AWE_ALLOW_SAMPLE_SHARING
3059 if (! patch_opened)
3060 return -EINVAL;
3061
3062 /* search the specified sample by optarg */
3063 if (search_sample_index(sftail, patch->optarg) != NULL)
3064 return 0;
3065#endif /* allow sharing */
3066 return -EINVAL;
3067}
3068
3069
3070/* remove the present instrument layers */
3071static int
3072remove_info(sf_list *sf, int bank, int instr)
3073{
3074 awe_voice_list *prev, *next, *p;
3075 int removed = 0;
3076
3077 prev = NULL;
3078 for (p = sf->infos; p; p = next) {
3079 next = p->next;
3080 if (p->type == V_ST_NORMAL &&
3081 p->bank == bank && p->instr == instr) {
3082 /* remove this layer */
3083 if (prev)
3084 prev->next = next;
3085 else
3086 sf->infos = next;
3087 if (p == sf->last_infos)
3088 sf->last_infos = prev;
3089 sf->num_info--;
3090 removed++;
3091 kfree(p);
3092 } else
3093 prev = p;
3094 }
3095 if (removed)
3096 rebuild_preset_list();
3097 return removed;
3098}
3099
3100/* load voice information data */
3101static int
3102awe_load_info(awe_patch_info *patch, const char __user *addr, int count)
3103{
3104 int offset;
3105 awe_voice_rec_hdr hdr;
3106 int i;
3107 int total_size;
3108 sf_list *sf;
3109 awe_voice_list *rec;
3110
3111 if (count < AWE_VOICE_REC_SIZE) {
3112 printk(KERN_WARNING "AWE32 Error: invalid patch info length\n");
3113 return -EINVAL;
3114 }
3115
3116 offset = AWE_PATCH_INFO_SIZE;
3117 if (copy_from_user((char*)&hdr, addr + offset, AWE_VOICE_REC_SIZE))
3118 return -EFAULT;
3119 offset += AWE_VOICE_REC_SIZE;
3120
3121 if (hdr.nvoices <= 0 || hdr.nvoices >= 100) {
3122 printk(KERN_WARNING "AWE32 Error: Invalid voice number %d\n", hdr.nvoices);
3123 return -EINVAL;
3124 }
3125 total_size = AWE_VOICE_REC_SIZE + AWE_VOICE_INFO_SIZE * hdr.nvoices;
3126 if (count < total_size) {
3127 printk(KERN_WARNING "AWE32 Error: patch length(%d) is smaller than nvoices(%d)\n",
3128 count, hdr.nvoices);
3129 return -EINVAL;
3130 }
3131
3132 if ((sf = check_patch_opened(AWE_PAT_TYPE_MISC, NULL)) == NULL)
3133 return -ENOMEM;
3134
3135 switch (hdr.write_mode) {
3136 case AWE_WR_EXCLUSIVE:
3137 /* exclusive mode - if the instrument already exists,
3138 return error */
3139 for (rec = sf->infos; rec; rec = rec->next) {
3140 if (rec->type == V_ST_NORMAL &&
3141 rec->bank == hdr.bank &&
3142 rec->instr == hdr.instr)
3143 return -EINVAL;
3144 }
3145 break;
3146 case AWE_WR_REPLACE:
3147 /* replace mode - remove the instrument if it already exists */
3148 remove_info(sf, hdr.bank, hdr.instr);
3149 break;
3150 }
3151
3152 /* append new layers */
3153 for (i = 0; i < hdr.nvoices; i++) {
3154 rec = alloc_new_info();
3155 if (rec == NULL)
3156 return -ENOMEM;
3157
3158 rec->bank = hdr.bank;
3159 rec->instr = hdr.instr;
3160 rec->type = V_ST_NORMAL;
3161 rec->disabled = FALSE;
3162
3163 /* copy awe_voice_info parameters */
3164 if (copy_from_user(&rec->v, addr + offset, AWE_VOICE_INFO_SIZE)) {
3165 kfree(rec);
3166 return -EFAULT;
3167 }
3168 offset += AWE_VOICE_INFO_SIZE;
3169#ifdef AWE_ALLOW_SAMPLE_SHARING
3170 if (sf && sf->shared) {
3171 if (info_duplicated(sf, rec)) {
3172 kfree(rec);
3173 continue;
3174 }
3175 }
3176#endif /* allow sharing */
3177 if (rec->v.mode & AWE_MODE_INIT_PARM)
3178 awe_init_voice_parm(&rec->v.parm);
3179 add_sf_info(sf, rec);
3180 awe_set_sample(rec);
3181 add_info_list(rec);
3182 }
3183
3184 return 0;
3185}
3186
3187
3188/* remove instrument layers */
3189static int
3190awe_remove_info(awe_patch_info *patch, const char __user *addr, int count)
3191{
3192 unsigned char bank, instr;
3193 sf_list *sf;
3194
3195 if (! patch_opened || (sf = sftail) == NULL) {
3196 printk(KERN_WARNING "AWE32: remove_info: patch not opened\n");
3197 return -EINVAL;
3198 }
3199
3200 bank = ((unsigned short)patch->optarg >> 8) & 0xff;
3201 instr = (unsigned short)patch->optarg & 0xff;
3202 if (! remove_info(sf, bank, instr))
3203 return -EINVAL;
3204 return 0;
3205}
3206
3207
3208/* load wave sample data */
3209static int
3210awe_load_data(awe_patch_info *patch, const char __user *addr, int count)
3211{
3212 int offset, size;
3213 int rc;
3214 awe_sample_info tmprec;
3215 awe_sample_list *rec;
3216 sf_list *sf;
3217
3218 if ((sf = check_patch_opened(AWE_PAT_TYPE_MISC, NULL)) == NULL)
3219 return -ENOMEM;
3220
3221 size = (count - AWE_SAMPLE_INFO_SIZE) / 2;
3222 offset = AWE_PATCH_INFO_SIZE;
3223 if (copy_from_user(&tmprec, addr + offset, AWE_SAMPLE_INFO_SIZE))
3224 return -EFAULT;
3225 offset += AWE_SAMPLE_INFO_SIZE;
3226 if (size != tmprec.size) {
3227 printk(KERN_WARNING "AWE32: load: sample size differed (%d != %d)\n",
3228 tmprec.size, size);
3229 return -EINVAL;
3230 }
3231
3232 if (search_sample_index(sf, tmprec.sample) != NULL) {
3233#ifdef AWE_ALLOW_SAMPLE_SHARING
3234 /* if shared sample, skip this data */
3235 if (sf->type & AWE_PAT_SHARED)
3236 return 0;
3237#endif /* allow sharing */
3238 DEBUG(1,printk("AWE32: sample data %d already present\n", tmprec.sample));
3239 return -EINVAL;
3240 }
3241
3242 if ((rec = alloc_new_sample()) == NULL)
3243 return -ENOMEM;
3244
3245 memcpy(&rec->v, &tmprec, sizeof(tmprec));
3246
3247 if (rec->v.size > 0) {
3248 if ((rc = awe_write_wave_data(addr, offset, rec, -1)) < 0) {
3249 kfree(rec);
3250 return rc;
3251 }
3252 sf->mem_ptr += rc;
3253 }
3254
3255 add_sf_sample(sf, rec);
3256 return 0;
3257}
3258
3259
3260/* replace wave sample data */
3261static int
3262awe_replace_data(awe_patch_info *patch, const char __user *addr, int count)
3263{
3264 int offset;
3265 int size;
3266 int rc;
3267 int channels;
3268 awe_sample_info cursmp;
3269 int save_mem_ptr;
3270 sf_list *sf;
3271 awe_sample_list *rec;
3272
3273 if (! patch_opened || (sf = sftail) == NULL) {
3274 printk(KERN_WARNING "AWE32: replace: patch not opened\n");
3275 return -EINVAL;
3276 }
3277
3278 size = (count - AWE_SAMPLE_INFO_SIZE) / 2;
3279 offset = AWE_PATCH_INFO_SIZE;
3280 if (copy_from_user(&cursmp, addr + offset, AWE_SAMPLE_INFO_SIZE))
3281 return -EFAULT;
3282 offset += AWE_SAMPLE_INFO_SIZE;
3283 if (cursmp.size == 0 || size != cursmp.size) {
3284 printk(KERN_WARNING "AWE32: replace: invalid sample size (%d!=%d)\n",
3285 cursmp.size, size);
3286 return -EINVAL;
3287 }
3288 channels = patch->optarg;
3289 if (channels <= 0 || channels > AWE_NORMAL_VOICES) {
3290 printk(KERN_WARNING "AWE32: replace: invalid channels %d\n", channels);
3291 return -EINVAL;
3292 }
3293
3294 for (rec = sf->samples; rec; rec = rec->next) {
3295 if (rec->v.sample == cursmp.sample)
3296 break;
3297 }
3298 if (rec == NULL) {
3299 printk(KERN_WARNING "AWE32: replace: cannot find existing sample data %d\n",
3300 cursmp.sample);
3301 return -EINVAL;
3302 }
3303
3304 if (rec->v.size != cursmp.size) {
3305 printk(KERN_WARNING "AWE32: replace: exiting size differed (%d!=%d)\n",
3306 rec->v.size, cursmp.size);
3307 return -EINVAL;
3308 }
3309
3310 save_mem_ptr = awe_free_mem_ptr();
3311 sftail->mem_ptr = rec->v.start - awe_mem_start;
3312 memcpy(&rec->v, &cursmp, sizeof(cursmp));
3313 rec->v.sf_id = current_sf_id;
3314 if ((rc = awe_write_wave_data(addr, offset, rec, channels)) < 0)
3315 return rc;
3316 sftail->mem_ptr = save_mem_ptr;
3317
3318 return 0;
3319}
3320
3321
3322/*----------------------------------------------------------------*/
3323
3324static const char __user *readbuf_addr;
3325static int readbuf_offs;
3326static int readbuf_flags;
3327
3328/* initialize read buffer */
3329static int
3330readbuf_init(const char __user *addr, int offset, awe_sample_info *sp)
3331{
3332 readbuf_addr = addr;
3333 readbuf_offs = offset;
3334 readbuf_flags = sp->mode_flags;
3335 return 0;
3336}
3337
3338/* read directly from user buffer */
3339static unsigned short
3340readbuf_word(int pos)
3341{
3342 unsigned short c;
3343 /* read from user buffer */
3344 if (readbuf_flags & AWE_SAMPLE_8BITS) {
3345 unsigned char cc;
3346 get_user(cc, (unsigned char __user *)(readbuf_addr + readbuf_offs + pos));
3347 c = (unsigned short)cc << 8; /* convert 8bit -> 16bit */
3348 } else {
3349 get_user(c, (unsigned short __user *)(readbuf_addr + readbuf_offs + pos * 2));
3350 }
3351 if (readbuf_flags & AWE_SAMPLE_UNSIGNED)
3352 c ^= 0x8000; /* unsigned -> signed */
3353 return c;
3354}
3355
3356#define readbuf_word_cache readbuf_word
3357#define readbuf_end() /**/
3358
3359/*----------------------------------------------------------------*/
3360
3361#define BLANK_LOOP_START 8
3362#define BLANK_LOOP_END 40
3363#define BLANK_LOOP_SIZE 48
3364
3365/* loading onto memory - return the actual written size */
3366static int
3367awe_write_wave_data(const char __user *addr, int offset, awe_sample_list *list, int channels)
3368{
3369 int i, truesize, dram_offset;
3370 awe_sample_info *sp = &list->v;
3371 int rc;
3372
3373 /* be sure loop points start < end */
3374 if (sp->loopstart > sp->loopend) {
3375 int tmp = sp->loopstart;
3376 sp->loopstart = sp->loopend;
3377 sp->loopend = tmp;
3378 }
3379
3380 /* compute true data size to be loaded */
3381 truesize = sp->size;
3382 if (sp->mode_flags & (AWE_SAMPLE_BIDIR_LOOP|AWE_SAMPLE_REVERSE_LOOP))
3383 truesize += sp->loopend - sp->loopstart;
3384 if (sp->mode_flags & AWE_SAMPLE_NO_BLANK)
3385 truesize += BLANK_LOOP_SIZE;
3386 if (awe_free_mem_ptr() + truesize >= memsize/2) {
3387 DEBUG(-1,printk("AWE32 Error: Sample memory full\n"));
3388 return -ENOSPC;
3389 }
3390
3391 /* recalculate address offset */
3392 sp->end -= sp->start;
3393 sp->loopstart -= sp->start;
3394 sp->loopend -= sp->start;
3395
3396 dram_offset = awe_free_mem_ptr() + awe_mem_start;
3397 sp->start = dram_offset;
3398 sp->end += dram_offset;
3399 sp->loopstart += dram_offset;
3400 sp->loopend += dram_offset;
3401
3402 /* set the total size (store onto obsolete checksum value) */
3403 if (sp->size == 0)
3404 sp->checksum = 0;
3405 else
3406 sp->checksum = truesize;
3407
3408 if ((rc = awe_open_dram_for_write(dram_offset, channels)) != 0)
3409 return rc;
3410
3411 if (readbuf_init(addr, offset, sp) < 0)
3412 return -ENOSPC;
3413
3414 for (i = 0; i < sp->size; i++) {
3415 unsigned short c;
3416 c = readbuf_word(i);
3417 awe_write_dram(c);
3418 if (i == sp->loopend &&
3419 (sp->mode_flags & (AWE_SAMPLE_BIDIR_LOOP|AWE_SAMPLE_REVERSE_LOOP))) {
3420 int looplen = sp->loopend - sp->loopstart;
3421 /* copy reverse loop */
3422 int k;
3423 for (k = 1; k <= looplen; k++) {
3424 c = readbuf_word_cache(i - k);
3425 awe_write_dram(c);
3426 }
3427 if (sp->mode_flags & AWE_SAMPLE_BIDIR_LOOP) {
3428 sp->end += looplen;
3429 } else {
3430 sp->start += looplen;
3431 sp->end += looplen;
3432 }
3433 }
3434 }
3435 readbuf_end();
3436
3437 /* if no blank loop is attached in the sample, add it */
3438 if (sp->mode_flags & AWE_SAMPLE_NO_BLANK) {
3439 for (i = 0; i < BLANK_LOOP_SIZE; i++)
3440 awe_write_dram(0);
3441 if (sp->mode_flags & AWE_SAMPLE_SINGLESHOT) {
3442 sp->loopstart = sp->end + BLANK_LOOP_START;
3443 sp->loopend = sp->end + BLANK_LOOP_END;
3444 }
3445 }
3446
3447 awe_close_dram();
3448
3449 /* initialize FM */
3450 awe_init_fm();
3451
3452 return truesize;
3453}
3454
3455
3456/*----------------------------------------------------------------*/
3457
3458#ifdef AWE_HAS_GUS_COMPATIBILITY
3459
3460/* calculate GUS envelope time:
3461 * is this correct? i have no idea..
3462 */
3463static int
3464calc_gus_envelope_time(int rate, int start, int end)
3465{
3466 int r, p, t;
3467 r = (3 - ((rate >> 6) & 3)) * 3;
3468 p = rate & 0x3f;
3469 t = end - start;
3470 if (t < 0) t = -t;
3471 if (13 > r)
3472 t = t << (13 - r);
3473 else
3474 t = t >> (r - 13);
3475 return (t * 10) / (p * 441);
3476}
3477
3478#define calc_gus_sustain(val) (0x7f - vol_table[(val)/2])
3479#define calc_gus_attenuation(val) vol_table[(val)/2]
3480
3481/* load GUS patch */
3482static int
3483awe_load_guspatch(const char __user *addr, int offs, int size, int pmgr_flag)
3484{
3485 struct patch_info patch;
3486 awe_voice_info *rec;
3487 awe_sample_info *smp;
3488 awe_voice_list *vrec;
3489 awe_sample_list *smprec;
3490 int sizeof_patch;
3491 int note, rc;
3492 sf_list *sf;
3493
3494 sizeof_patch = (int)((long)&patch.data[0] - (long)&patch); /* header size */
3495 if (size < sizeof_patch) {
3496 printk(KERN_WARNING "AWE32 Error: Patch header too short\n");
3497 return -EINVAL;
3498 }
3499 if (copy_from_user(((char*)&patch) + offs, addr + offs, sizeof_patch - offs))
3500 return -EFAULT;
3501 size -= sizeof_patch;
3502 if (size < patch.len) {
3503 printk(KERN_WARNING "AWE32 Error: Patch record too short (%d<%d)\n",
3504 size, patch.len);
3505 return -EINVAL;
3506 }
3507 if ((sf = check_patch_opened(AWE_PAT_TYPE_GUS, NULL)) == NULL)
3508 return -ENOMEM;
3509 if ((smprec = alloc_new_sample()) == NULL)
3510 return -ENOMEM;
3511 if ((vrec = alloc_new_info()) == NULL) {
3512 kfree(smprec);
3513 return -ENOMEM;
3514 }
3515
3516 smp = &smprec->v;
3517 smp->sample = sf->num_sample;
3518 smp->start = 0;
3519 smp->end = patch.len;
3520 smp->loopstart = patch.loop_start;
3521 smp->loopend = patch.loop_end;
3522 smp->size = patch.len;
3523
3524 /* set up mode flags */
3525 smp->mode_flags = 0;
3526 if (!(patch.mode & WAVE_16_BITS))
3527 smp->mode_flags |= AWE_SAMPLE_8BITS;
3528 if (patch.mode & WAVE_UNSIGNED)
3529 smp->mode_flags |= AWE_SAMPLE_UNSIGNED;
3530 smp->mode_flags |= AWE_SAMPLE_NO_BLANK;
3531 if (!(patch.mode & (WAVE_LOOPING|WAVE_BIDIR_LOOP|WAVE_LOOP_BACK)))
3532 smp->mode_flags |= AWE_SAMPLE_SINGLESHOT;
3533 if (patch.mode & WAVE_BIDIR_LOOP)
3534 smp->mode_flags |= AWE_SAMPLE_BIDIR_LOOP;
3535 if (patch.mode & WAVE_LOOP_BACK)
3536 smp->mode_flags |= AWE_SAMPLE_REVERSE_LOOP;
3537
3538 DEBUG(0,printk("AWE32: [sample %d mode %x]\n", patch.instr_no, smp->mode_flags));
3539 if (patch.mode & WAVE_16_BITS) {
3540 /* convert to word offsets */
3541 smp->size /= 2;
3542 smp->end /= 2;
3543 smp->loopstart /= 2;
3544 smp->loopend /= 2;
3545 }
3546 smp->checksum_flag = 0;
3547 smp->checksum = 0;
3548
3549 if ((rc = awe_write_wave_data(addr, sizeof_patch, smprec, -1)) < 0) {
3550 kfree(vrec);
3551 return rc;
3552 }
3553 sf->mem_ptr += rc;
3554 add_sf_sample(sf, smprec);
3555
3556 /* set up voice info */
3557 rec = &vrec->v;
3558 awe_init_voice_info(rec);
3559 rec->sample = sf->num_info; /* the last sample */
3560 rec->rate_offset = calc_rate_offset(patch.base_freq);
3561 note = freq_to_note(patch.base_note);
3562 rec->root = note / 100;
3563 rec->tune = -(note % 100);
3564 rec->low = freq_to_note(patch.low_note) / 100;
3565 rec->high = freq_to_note(patch.high_note) / 100;
3566 DEBUG(1,printk("AWE32: [gus base offset=%d, note=%d, range=%d-%d(%d-%d)]\n",
3567 rec->rate_offset, note,
3568 rec->low, rec->high,
3569 patch.low_note, patch.high_note));
3570 /* panning position; -128 - 127 => 0-127 */
3571 rec->pan = (patch.panning + 128) / 2;
3572
3573 /* detuning is ignored */
3574 /* 6points volume envelope */
3575 if (patch.mode & WAVE_ENVELOPES) {
3576 int attack, hold, decay, release;
3577 attack = calc_gus_envelope_time
3578 (patch.env_rate[0], 0, patch.env_offset[0]);
3579 hold = calc_gus_envelope_time
3580 (patch.env_rate[1], patch.env_offset[0],
3581 patch.env_offset[1]);
3582 decay = calc_gus_envelope_time
3583 (patch.env_rate[2], patch.env_offset[1],
3584 patch.env_offset[2]);
3585 release = calc_gus_envelope_time
3586 (patch.env_rate[3], patch.env_offset[1],
3587 patch.env_offset[4]);
3588 release += calc_gus_envelope_time
3589 (patch.env_rate[4], patch.env_offset[3],
3590 patch.env_offset[4]);
3591 release += calc_gus_envelope_time
3592 (patch.env_rate[5], patch.env_offset[4],
3593 patch.env_offset[5]);
3594 rec->parm.volatkhld = (calc_parm_hold(hold) << 8) |
3595 calc_parm_attack(attack);
3596 rec->parm.voldcysus = (calc_gus_sustain(patch.env_offset[2]) << 8) |
3597 calc_parm_decay(decay);
3598 rec->parm.volrelease = 0x8000 | calc_parm_decay(release);
3599 DEBUG(2,printk("AWE32: [gusenv atk=%d, hld=%d, dcy=%d, rel=%d]\n", attack, hold, decay, release));
3600 rec->attenuation = calc_gus_attenuation(patch.env_offset[0]);
3601 }
3602
3603 /* tremolo effect */
3604 if (patch.mode & WAVE_TREMOLO) {
3605 int rate = (patch.tremolo_rate * 1000 / 38) / 42;
3606 rec->parm.tremfrq = ((patch.tremolo_depth / 2) << 8) | rate;
3607 DEBUG(2,printk("AWE32: [gusenv tremolo rate=%d, dep=%d, tremfrq=%x]\n",
3608 patch.tremolo_rate, patch.tremolo_depth,
3609 rec->parm.tremfrq));
3610 }
3611 /* vibrato effect */
3612 if (patch.mode & WAVE_VIBRATO) {
3613 int rate = (patch.vibrato_rate * 1000 / 38) / 42;
3614 rec->parm.fm2frq2 = ((patch.vibrato_depth / 6) << 8) | rate;
3615 DEBUG(2,printk("AWE32: [gusenv vibrato rate=%d, dep=%d, tremfrq=%x]\n",
3616 patch.tremolo_rate, patch.tremolo_depth,
3617 rec->parm.tremfrq));
3618 }
3619
3620 /* scale_freq, scale_factor, volume, and fractions not implemented */
3621
3622 /* append to the tail of the list */
3623 vrec->bank = ctrls[AWE_MD_GUS_BANK];
3624 vrec->instr = patch.instr_no;
3625 vrec->disabled = FALSE;
3626 vrec->type = V_ST_NORMAL;
3627
3628 add_sf_info(sf, vrec);
3629 add_info_list(vrec);
3630
3631 /* set the voice index */
3632 awe_set_sample(vrec);
3633
3634 return 0;
3635}
3636
3637#endif /* AWE_HAS_GUS_COMPATIBILITY */
3638
3639/*
3640 * sample and voice list handlers
3641 */
3642
3643/* append this to the current sf list */
3644static void add_sf_info(sf_list *sf, awe_voice_list *rec)
3645{
3646 if (sf == NULL)
3647 return;
3648 rec->holder = sf;
3649 rec->v.sf_id = sf->sf_id;
3650 if (sf->last_infos)
3651 sf->last_infos->next = rec;
3652 else
3653 sf->infos = rec;
3654 sf->last_infos = rec;
3655 rec->next = NULL;
3656 sf->num_info++;
3657}
3658
3659/* prepend this sample to sf list */
3660static void add_sf_sample(sf_list *sf, awe_sample_list *rec)
3661{
3662 if (sf == NULL)
3663 return;
3664 rec->holder = sf;
3665 rec->v.sf_id = sf->sf_id;
3666 if (sf->last_samples)
3667 sf->last_samples->next = rec;
3668 else
3669 sf->samples = rec;
3670 sf->last_samples = rec;
3671 rec->next = NULL;
3672 sf->num_sample++;
3673}
3674
3675/* purge the old records which don't belong with the same file id */
3676static void purge_old_list(awe_voice_list *rec, awe_voice_list *next)
3677{
3678 rec->next_instr = next;
3679 if (rec->bank == AWE_DRUM_BANK) {
3680 /* remove samples with the same note range */
3681 awe_voice_list *cur, *prev = rec;
3682 int low = rec->v.low;
3683 int high = rec->v.high;
3684 for (cur = next; cur; cur = cur->next_instr) {
3685 if (cur->v.low == low &&
3686 cur->v.high == high &&
3687 ! is_identical_holder(cur->holder, rec->holder))
3688 prev->next_instr = cur->next_instr;
3689 else
3690 prev = cur;
3691 }
3692 } else {
3693 if (! is_identical_holder(next->holder, rec->holder))
3694 /* remove all samples */
3695 rec->next_instr = NULL;
3696 }
3697}
3698
3699/* prepend to top of the preset table */
3700static void add_info_list(awe_voice_list *rec)
3701{
3702 awe_voice_list *prev, *cur;
3703 int key;
3704
3705 if (rec->disabled)
3706 return;
3707
3708 key = awe_search_key(rec->bank, rec->instr, rec->v.low);
3709 prev = NULL;
3710 for (cur = preset_table[key]; cur; cur = cur->next_bank) {
3711 /* search the first record with the same bank number */
3712 if (cur->instr == rec->instr && cur->bank == rec->bank) {
3713 /* replace the list with the new record */
3714 rec->next_bank = cur->next_bank;
3715 if (prev)
3716 prev->next_bank = rec;
3717 else
3718 preset_table[key] = rec;
3719 purge_old_list(rec, cur);
3720 return;
3721 }
3722 prev = cur;
3723 }
3724
3725 /* this is the first bank record.. just add this */
3726 rec->next_instr = NULL;
3727 rec->next_bank = preset_table[key];
3728 preset_table[key] = rec;
3729}
3730
3731/* remove samples later than the specified sf_id */
3732static void
3733awe_remove_samples(int sf_id)
3734{
3735 sf_list *p, *prev;
3736
3737 if (sf_id <= 0) {
3738 awe_reset_samples();
3739 return;
3740 }
3741 /* already removed? */
3742 if (current_sf_id <= sf_id)
3743 return;
3744
3745 for (p = sftail; p; p = prev) {
3746 if (p->sf_id <= sf_id)
3747 break;
3748 prev = p->prev;
3749 awe_free_sf(p);
3750 }
3751 sftail = p;
3752 if (sftail) {
3753 sf_id = sftail->sf_id;
3754 sftail->next = NULL;
3755 } else {
3756 sf_id = 0;
3757 sfhead = NULL;
3758 }
3759 current_sf_id = sf_id;
3760 if (locked_sf_id > sf_id)
3761 locked_sf_id = sf_id;
3762
3763 rebuild_preset_list();
3764}
3765
3766/* rebuild preset search list */
3767static void rebuild_preset_list(void)
3768{
3769 sf_list *p;
3770 awe_voice_list *rec;
3771
3772 memset(preset_table, 0, sizeof(preset_table));
3773
3774 for (p = sfhead; p; p = p->next) {
3775 for (rec = p->infos; rec; rec = rec->next)
3776 add_info_list(rec);
3777 }
3778}
3779
3780/* compare the given sf_id pair */
3781static int is_identical_holder(sf_list *sf1, sf_list *sf2)
3782{
3783 if (sf1 == NULL || sf2 == NULL)
3784 return FALSE;
3785 if (sf1 == sf2)
3786 return TRUE;
3787#ifdef AWE_ALLOW_SAMPLE_SHARING
3788 {
3789 /* compare with the sharing id */
3790 sf_list *p;
3791 int counter = 0;
3792 if (sf1->sf_id < sf2->sf_id) { /* make sure id1 > id2 */
3793 sf_list *tmp; tmp = sf1; sf1 = sf2; sf2 = tmp;
3794 }
3795 for (p = sf1->shared; p; p = p->shared) {
3796 if (counter++ > current_sf_id)
3797 break; /* strange sharing loop.. quit */
3798 if (p == sf2)
3799 return TRUE;
3800 }
3801 }
3802#endif /* allow sharing */
3803 return FALSE;
3804}
3805
3806/* search the sample index matching with the given sample id */
3807static awe_sample_list *
3808search_sample_index(sf_list *sf, int sample)
3809{
3810 awe_sample_list *p;
3811#ifdef AWE_ALLOW_SAMPLE_SHARING
3812 int counter = 0;
3813 while (sf) {
3814 for (p = sf->samples; p; p = p->next) {
3815 if (p->v.sample == sample)
3816 return p;
3817 }
3818 sf = sf->shared;
3819 if (counter++ > current_sf_id)
3820 break; /* strange sharing loop.. quit */
3821 }
3822#else
3823 if (sf) {
3824 for (p = sf->samples; p; p = p->next) {
3825 if (p->v.sample == sample)
3826 return p;
3827 }
3828 }
3829#endif
3830 return NULL;
3831}
3832
3833/* search the specified sample */
3834/* non-zero = found */
3835static short
3836awe_set_sample(awe_voice_list *rec)
3837{
3838 awe_sample_list *smp;
3839 awe_voice_info *vp = &rec->v;
3840
3841 vp->index = 0;
3842 if ((smp = search_sample_index(rec->holder, vp->sample)) == NULL)
3843 return 0;
3844
3845 /* set the actual sample offsets */
3846 vp->start += smp->v.start;
3847 vp->end += smp->v.end;
3848 vp->loopstart += smp->v.loopstart;
3849 vp->loopend += smp->v.loopend;
3850 /* copy mode flags */
3851 vp->mode = smp->v.mode_flags;
3852 /* set flag */
3853 vp->index = 1;
3854
3855 return 1;
3856}
3857
3858
3859/*
3860 * voice allocation
3861 */
3862
3863/* look for all voices associated with the specified note & velocity */
3864static int
3865awe_search_multi_voices(awe_voice_list *rec, int note, int velocity,
3866 awe_voice_info **vlist)
3867{
3868 int nvoices;
3869
3870 nvoices = 0;
3871 for (; rec; rec = rec->next_instr) {
3872 if (note >= rec->v.low &&
3873 note <= rec->v.high &&
3874 velocity >= rec->v.vellow &&
3875 velocity <= rec->v.velhigh) {
3876 if (rec->type == V_ST_MAPPED) {
3877 /* mapper */
3878 vlist[0] = &rec->v;
3879 return -1;
3880 }
3881 vlist[nvoices++] = &rec->v;
3882 if (nvoices >= AWE_MAX_VOICES)
3883 break;
3884 }
3885 }
3886 return nvoices;
3887}
3888
3889/* store the voice list from the specified note and velocity.
3890 if the preset is mapped, seek for the destination preset, and rewrite
3891 the note number if necessary.
3892 */
3893static int
3894really_alloc_voices(int bank, int instr, int *note, int velocity, awe_voice_info **vlist)
3895{
3896 int nvoices;
3897 awe_voice_list *vrec;
3898 int level = 0;
3899
3900 for (;;) {
3901 vrec = awe_search_instr(bank, instr, *note);
3902 nvoices = awe_search_multi_voices(vrec, *note, velocity, vlist);
3903 if (nvoices == 0) {
3904 if (bank == AWE_DRUM_BANK)
3905 /* search default drumset */
3906 vrec = awe_search_instr(bank, ctrls[AWE_MD_DEF_DRUM], *note);
3907 else
3908 /* search default preset */
3909 vrec = awe_search_instr(ctrls[AWE_MD_DEF_BANK], instr, *note);
3910 nvoices = awe_search_multi_voices(vrec, *note, velocity, vlist);
3911 }
3912 if (nvoices == 0) {
3913 if (bank == AWE_DRUM_BANK && ctrls[AWE_MD_DEF_DRUM] != 0)
3914 /* search default drumset */
3915 vrec = awe_search_instr(bank, 0, *note);
3916 else if (bank != AWE_DRUM_BANK && ctrls[AWE_MD_DEF_BANK] != 0)
3917 /* search default preset */
3918 vrec = awe_search_instr(0, instr, *note);
3919 nvoices = awe_search_multi_voices(vrec, *note, velocity, vlist);
3920 }
3921 if (nvoices < 0) { /* mapping */
3922 int key = vlist[0]->fixkey;
3923 instr = vlist[0]->start;
3924 bank = vlist[0]->end;
3925 if (level++ > 5) {
3926 printk(KERN_ERR "AWE32: too deep mapping level\n");
3927 return 0;
3928 }
3929 if (key >= 0)
3930 *note = key;
3931 } else
3932 break;
3933 }
3934
3935 return nvoices;
3936}
3937
3938/* allocate voices corresponding note and velocity; supports multiple insts. */
3939static void
3940awe_alloc_multi_voices(int ch, int note, int velocity, int key)
3941{
3942 int i, v, nvoices, bank;
3943 awe_voice_info *vlist[AWE_MAX_VOICES];
3944
3945 if (MULTI_LAYER_MODE() && IS_DRUM_CHANNEL(ch))
3946 bank = AWE_DRUM_BANK; /* always search drumset */
3947 else
3948 bank = channels[ch].bank;
3949
3950 /* check the possible voices; note may be changeable if mapped */
3951 nvoices = really_alloc_voices(bank, channels[ch].instr,
3952 &note, velocity, vlist);
3953
3954 /* set the voices */
3955 current_alloc_time++;
3956 for (i = 0; i < nvoices; i++) {
3957 v = awe_clear_voice();
3958 voices[v].key = key;
3959 voices[v].ch = ch;
3960 voices[v].note = note;
3961 voices[v].velocity = velocity;
3962 voices[v].time = current_alloc_time;
3963 voices[v].cinfo = &channels[ch];
3964 voices[v].sample = vlist[i];
3965 voices[v].state = AWE_ST_MARK;
3966 voices[v].layer = nvoices - i - 1; /* in reverse order */
3967 }
3968
3969 /* clear the mark in allocated voices */
3970 for (i = 0; i < awe_max_voices; i++) {
3971 if (voices[i].state == AWE_ST_MARK)
3972 voices[i].state = AWE_ST_OFF;
3973
3974 }
3975}
3976
3977
3978/* search an empty voice.
3979 if no empty voice is found, at least terminate a voice
3980 */
3981static int
3982awe_clear_voice(void)
3983{
3984 enum {
3985 OFF=0, RELEASED, SUSTAINED, PLAYING, END
3986 };
3987 struct voice_candidate_t {
3988 int best;
3989 int time;
3990 int vtarget;
3991 } candidate[END];
3992 int i, type, vtarget;
3993
3994 vtarget = 0xffff;
3995 for (type = OFF; type < END; type++) {
3996 candidate[type].best = -1;
3997 candidate[type].time = current_alloc_time + 1;
3998 candidate[type].vtarget = vtarget;
3999 }
4000
4001 for (i = 0; i < awe_max_voices; i++) {
4002 if (voices[i].state & AWE_ST_OFF)
4003 type = OFF;
4004 else if (voices[i].state & AWE_ST_RELEASED)
4005 type = RELEASED;
4006 else if (voices[i].state & AWE_ST_SUSTAINED)
4007 type = SUSTAINED;
4008 else if (voices[i].state & ~AWE_ST_MARK)
4009 type = PLAYING;
4010 else
4011 continue;
4012#ifdef AWE_CHECK_VTARGET
4013 /* get current volume */
4014 vtarget = (awe_peek_dw(AWE_VTFT(i)) >> 16) & 0xffff;
4015#endif
4016 if (candidate[type].best < 0 ||
4017 vtarget < candidate[type].vtarget ||
4018 (vtarget == candidate[type].vtarget &&
4019 voices[i].time < candidate[type].time)) {
4020 candidate[type].best = i;
4021 candidate[type].time = voices[i].time;
4022 candidate[type].vtarget = vtarget;
4023 }
4024 }
4025
4026 for (type = OFF; type < END; type++) {
4027 if ((i = candidate[type].best) >= 0) {
4028 if (voices[i].state != AWE_ST_OFF)
4029 awe_terminate(i);
4030 awe_voice_init(i, TRUE);
4031 return i;
4032 }
4033 }
4034 return 0;
4035}
4036
4037
4038/* search sample for the specified note & velocity and set it on the voice;
4039 * note that voice is the voice index (not channel index)
4040 */
4041static void
4042awe_alloc_one_voice(int voice, int note, int velocity)
4043{
4044 int ch, nvoices, bank;
4045 awe_voice_info *vlist[AWE_MAX_VOICES];
4046
4047 ch = voices[voice].ch;
4048 if (MULTI_LAYER_MODE() && IS_DRUM_CHANNEL(voice))
4049 bank = AWE_DRUM_BANK; /* always search drumset */
4050 else
4051 bank = voices[voice].cinfo->bank;
4052
4053 nvoices = really_alloc_voices(bank, voices[voice].cinfo->instr,
4054 &note, velocity, vlist);
4055 if (nvoices > 0) {
4056 voices[voice].time = ++current_alloc_time;
4057 voices[voice].sample = vlist[0]; /* use the first one */
4058 voices[voice].layer = 0;
4059 voices[voice].note = note;
4060 voices[voice].velocity = velocity;
4061 }
4062}
4063
4064
4065/*
4066 * sequencer2 functions
4067 */
4068
4069/* search an empty voice; used by sequencer2 */
4070static int
4071awe_alloc(int dev, int chn, int note, struct voice_alloc_info *alloc)
4072{
4073 playing_mode = AWE_PLAY_MULTI2;
4074 awe_info.nr_voices = AWE_MAX_CHANNELS;
4075 return awe_clear_voice();
4076}
4077
4078
4079/* set up voice; used by sequencer2 */
4080static void
4081awe_setup_voice(int dev, int voice, int chn)
4082{
4083 struct channel_info *info;
4084 if (synth_devs[dev] == NULL ||
4085 (info = &synth_devs[dev]->chn_info[chn]) == NULL)
4086 return;
4087
4088 if (voice < 0 || voice >= awe_max_voices)
4089 return;
4090
4091 DEBUG(2,printk("AWE32: [setup(%d) ch=%d]\n", voice, chn));
4092 channels[chn].expression_vol = info->controllers[CTL_EXPRESSION];
4093 channels[chn].main_vol = info->controllers[CTL_MAIN_VOLUME];
4094 channels[chn].panning =
4095 info->controllers[CTL_PAN] * 2 - 128; /* signed 8bit */
4096 channels[chn].bender = info->bender_value; /* zero center */
4097 channels[chn].bank = info->controllers[CTL_BANK_SELECT];
4098 channels[chn].sustained = info->controllers[CTL_SUSTAIN];
4099 if (info->controllers[CTL_EXT_EFF_DEPTH]) {
4100 FX_SET(&channels[chn].fx, AWE_FX_REVERB,
4101 info->controllers[CTL_EXT_EFF_DEPTH] * 2);
4102 }
4103 if (info->controllers[CTL_CHORUS_DEPTH]) {
4104 FX_SET(&channels[chn].fx, AWE_FX_CHORUS,
4105 info->controllers[CTL_CHORUS_DEPTH] * 2);
4106 }
4107 awe_set_instr(dev, chn, info->pgm_num);
4108}
4109
4110
4111#ifdef CONFIG_AWE32_MIXER
4112/*
4113 * AWE32 mixer device control
4114 */
4115
4116static int awe_mixer_ioctl(int dev, unsigned int cmd, void __user *arg);
4117
4118static int my_mixerdev = -1;
4119
4120static struct mixer_operations awe_mixer_operations = {
4121 .owner = THIS_MODULE,
4122 .id = "AWE",
4123 .name = "AWE32 Equalizer",
4124 .ioctl = awe_mixer_ioctl,
4125};
4126
4127static void __init attach_mixer(void)
4128{
4129 if ((my_mixerdev = sound_alloc_mixerdev()) >= 0) {
4130 mixer_devs[my_mixerdev] = &awe_mixer_operations;
4131 }
4132}
4133
4134static void unload_mixer(void)
4135{
4136 if (my_mixerdev >= 0)
4137 sound_unload_mixerdev(my_mixerdev);
4138}
4139
4140static int
4141awe_mixer_ioctl(int dev, unsigned int cmd, void __user * arg)
4142{
4143 int i, level, value;
4144
4145 if (((cmd >> 8) & 0xff) != 'M')
4146 return -EINVAL;
4147
4148 if (get_user(level, (int __user *)arg))
4149 return -EFAULT;
4150 level = ((level & 0xff) + (level >> 8)) / 2;
4151 DEBUG(0,printk("AWEMix: cmd=%x val=%d\n", cmd & 0xff, level));
4152
4153 if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
4154 switch (cmd & 0xff) {
4155 case SOUND_MIXER_BASS:
4156 value = level * 12 / 100;
4157 if (value >= 12)
4158 value = 11;
4159 ctrls[AWE_MD_BASS_LEVEL] = value;
4160 awe_update_equalizer();
4161 break;
4162 case SOUND_MIXER_TREBLE:
4163 value = level * 12 / 100;
4164 if (value >= 12)
4165 value = 11;
4166 ctrls[AWE_MD_TREBLE_LEVEL] = value;
4167 awe_update_equalizer();
4168 break;
4169 case SOUND_MIXER_VOLUME:
4170 level = level * 127 / 100;
4171 if (level >= 128) level = 127;
4172 atten_relative = FALSE;
4173 atten_offset = vol_table[level];
4174 awe_update_volume();
4175 break;
4176 }
4177 }
4178 switch (cmd & 0xff) {
4179 case SOUND_MIXER_BASS:
4180 level = ctrls[AWE_MD_BASS_LEVEL] * 100 / 24;
4181 level = (level << 8) | level;
4182 break;
4183 case SOUND_MIXER_TREBLE:
4184 level = ctrls[AWE_MD_TREBLE_LEVEL] * 100 / 24;
4185 level = (level << 8) | level;
4186 break;
4187 case SOUND_MIXER_VOLUME:
4188 value = atten_offset;
4189 if (atten_relative)
4190 value += ctrls[AWE_MD_ZERO_ATTEN];
4191 for (i = 127; i > 0; i--) {
4192 if (value <= vol_table[i])
4193 break;
4194 }
4195 level = i * 100 / 127;
4196 level = (level << 8) | level;
4197 break;
4198 case SOUND_MIXER_DEVMASK:
4199 level = SOUND_MASK_BASS|SOUND_MASK_TREBLE|SOUND_MASK_VOLUME;
4200 break;
4201 default:
4202 level = 0;
4203 break;
4204 }
4205 if (put_user(level, (int __user *)arg))
4206 return -EFAULT;
4207 return level;
4208}
4209#endif /* CONFIG_AWE32_MIXER */
4210
4211
4212/*
4213 * initialization of Emu8000
4214 */
4215
4216/* intiailize audio channels */
4217static void
4218awe_init_audio(void)
4219{
4220 int ch;
4221
4222 /* turn off envelope engines */
4223 for (ch = 0; ch < AWE_MAX_VOICES; ch++) {
4224 awe_poke(AWE_DCYSUSV(ch), 0x80);
4225 }
4226
4227 /* reset all other parameters to zero */
4228 for (ch = 0; ch < AWE_MAX_VOICES; ch++) {
4229 awe_poke(AWE_ENVVOL(ch), 0);
4230 awe_poke(AWE_ENVVAL(ch), 0);
4231 awe_poke(AWE_DCYSUS(ch), 0);
4232 awe_poke(AWE_ATKHLDV(ch), 0);
4233 awe_poke(AWE_LFO1VAL(ch), 0);
4234 awe_poke(AWE_ATKHLD(ch), 0);
4235 awe_poke(AWE_LFO2VAL(ch), 0);
4236 awe_poke(AWE_IP(ch), 0);
4237 awe_poke(AWE_IFATN(ch), 0);
4238 awe_poke(AWE_PEFE(ch), 0);
4239 awe_poke(AWE_FMMOD(ch), 0);
4240 awe_poke(AWE_TREMFRQ(ch), 0);
4241 awe_poke(AWE_FM2FRQ2(ch), 0);
4242 awe_poke_dw(AWE_PTRX(ch), 0);
4243 awe_poke_dw(AWE_VTFT(ch), 0);
4244 awe_poke_dw(AWE_PSST(ch), 0);
4245 awe_poke_dw(AWE_CSL(ch), 0);
4246 awe_poke_dw(AWE_CCCA(ch), 0);
4247 }
4248
4249 for (ch = 0; ch < AWE_MAX_VOICES; ch++) {
4250 awe_poke_dw(AWE_CPF(ch), 0);
4251 awe_poke_dw(AWE_CVCF(ch), 0);
4252 }
4253}
4254
4255
4256/* initialize DMA address */
4257static void
4258awe_init_dma(void)
4259{
4260 awe_poke_dw(AWE_SMALR, 0);
4261 awe_poke_dw(AWE_SMARR, 0);
4262 awe_poke_dw(AWE_SMALW, 0);
4263 awe_poke_dw(AWE_SMARW, 0);
4264}
4265
4266
4267/* initialization arrays; from ADIP */
4268
4269static unsigned short init1[128] = {
4270 0x03ff, 0x0030, 0x07ff, 0x0130, 0x0bff, 0x0230, 0x0fff, 0x0330,
4271 0x13ff, 0x0430, 0x17ff, 0x0530, 0x1bff, 0x0630, 0x1fff, 0x0730,
4272 0x23ff, 0x0830, 0x27ff, 0x0930, 0x2bff, 0x0a30, 0x2fff, 0x0b30,
4273 0x33ff, 0x0c30, 0x37ff, 0x0d30, 0x3bff, 0x0e30, 0x3fff, 0x0f30,
4274
4275 0x43ff, 0x0030, 0x47ff, 0x0130, 0x4bff, 0x0230, 0x4fff, 0x0330,
4276 0x53ff, 0x0430, 0x57ff, 0x0530, 0x5bff, 0x0630, 0x5fff, 0x0730,
4277 0x63ff, 0x0830, 0x67ff, 0x0930, 0x6bff, 0x0a30, 0x6fff, 0x0b30,
4278 0x73ff, 0x0c30, 0x77ff, 0x0d30, 0x7bff, 0x0e30, 0x7fff, 0x0f30,
4279
4280 0x83ff, 0x0030, 0x87ff, 0x0130, 0x8bff, 0x0230, 0x8fff, 0x0330,
4281 0x93ff, 0x0430, 0x97ff, 0x0530, 0x9bff, 0x0630, 0x9fff, 0x0730,
4282 0xa3ff, 0x0830, 0xa7ff, 0x0930, 0xabff, 0x0a30, 0xafff, 0x0b30,
4283 0xb3ff, 0x0c30, 0xb7ff, 0x0d30, 0xbbff, 0x0e30, 0xbfff, 0x0f30,
4284
4285 0xc3ff, 0x0030, 0xc7ff, 0x0130, 0xcbff, 0x0230, 0xcfff, 0x0330,
4286 0xd3ff, 0x0430, 0xd7ff, 0x0530, 0xdbff, 0x0630, 0xdfff, 0x0730,
4287 0xe3ff, 0x0830, 0xe7ff, 0x0930, 0xebff, 0x0a30, 0xefff, 0x0b30,
4288 0xf3ff, 0x0c30, 0xf7ff, 0x0d30, 0xfbff, 0x0e30, 0xffff, 0x0f30,
4289};
4290
4291static unsigned short init2[128] = {
4292 0x03ff, 0x8030, 0x07ff, 0x8130, 0x0bff, 0x8230, 0x0fff, 0x8330,
4293 0x13ff, 0x8430, 0x17ff, 0x8530, 0x1bff, 0x8630, 0x1fff, 0x8730,
4294 0x23ff, 0x8830, 0x27ff, 0x8930, 0x2bff, 0x8a30, 0x2fff, 0x8b30,
4295 0x33ff, 0x8c30, 0x37ff, 0x8d30, 0x3bff, 0x8e30, 0x3fff, 0x8f30,
4296
4297 0x43ff, 0x8030, 0x47ff, 0x8130, 0x4bff, 0x8230, 0x4fff, 0x8330,
4298 0x53ff, 0x8430, 0x57ff, 0x8530, 0x5bff, 0x8630, 0x5fff, 0x8730,
4299 0x63ff, 0x8830, 0x67ff, 0x8930, 0x6bff, 0x8a30, 0x6fff, 0x8b30,
4300 0x73ff, 0x8c30, 0x77ff, 0x8d30, 0x7bff, 0x8e30, 0x7fff, 0x8f30,
4301
4302 0x83ff, 0x8030, 0x87ff, 0x8130, 0x8bff, 0x8230, 0x8fff, 0x8330,
4303 0x93ff, 0x8430, 0x97ff, 0x8530, 0x9bff, 0x8630, 0x9fff, 0x8730,
4304 0xa3ff, 0x8830, 0xa7ff, 0x8930, 0xabff, 0x8a30, 0xafff, 0x8b30,
4305 0xb3ff, 0x8c30, 0xb7ff, 0x8d30, 0xbbff, 0x8e30, 0xbfff, 0x8f30,
4306
4307 0xc3ff, 0x8030, 0xc7ff, 0x8130, 0xcbff, 0x8230, 0xcfff, 0x8330,
4308 0xd3ff, 0x8430, 0xd7ff, 0x8530, 0xdbff, 0x8630, 0xdfff, 0x8730,
4309 0xe3ff, 0x8830, 0xe7ff, 0x8930, 0xebff, 0x8a30, 0xefff, 0x8b30,
4310 0xf3ff, 0x8c30, 0xf7ff, 0x8d30, 0xfbff, 0x8e30, 0xffff, 0x8f30,
4311};
4312
4313static unsigned short init3[128] = {
4314 0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
4315 0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x8F7C, 0x167E, 0xF254,
4316 0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x8BAA, 0x1B6D, 0xF234,
4317 0x229F, 0x8429, 0x2746, 0x8529, 0x1F1C, 0x86E7, 0x229E, 0xF224,
4318
4319 0x0DA4, 0x8429, 0x2C29, 0x8529, 0x2745, 0x87F6, 0x2C28, 0xF254,
4320 0x383B, 0x8428, 0x320F, 0x8528, 0x320E, 0x8F02, 0x1341, 0xF264,
4321 0x3EB6, 0x8428, 0x3EB9, 0x8528, 0x383A, 0x8FA9, 0x3EB5, 0xF294,
4322 0x3EB7, 0x8474, 0x3EBA, 0x8575, 0x3EB8, 0xC4C3, 0x3EBB, 0xC5C3,
4323
4324 0x0000, 0xA404, 0x0001, 0xA504, 0x141F, 0x8671, 0x14FD, 0x8287,
4325 0x3EBC, 0xE610, 0x3EC8, 0x8C7B, 0x031A, 0x87E6, 0x3EC8, 0x86F7,
4326 0x3EC0, 0x821E, 0x3EBE, 0xD208, 0x3EBD, 0x821F, 0x3ECA, 0x8386,
4327 0x3EC1, 0x8C03, 0x3EC9, 0x831E, 0x3ECA, 0x8C4C, 0x3EBF, 0x8C55,
4328
4329 0x3EC9, 0xC208, 0x3EC4, 0xBC84, 0x3EC8, 0x8EAD, 0x3EC8, 0xD308,
4330 0x3EC2, 0x8F7E, 0x3ECB, 0x8219, 0x3ECB, 0xD26E, 0x3EC5, 0x831F,
4331 0x3EC6, 0xC308, 0x3EC3, 0xB2FF, 0x3EC9, 0x8265, 0x3EC9, 0x8319,
4332 0x1342, 0xD36E, 0x3EC7, 0xB3FF, 0x0000, 0x8365, 0x1420, 0x9570,
4333};
4334
4335static unsigned short init4[128] = {
4336 0x0C10, 0x8470, 0x14FE, 0xB488, 0x167F, 0xA470, 0x18E7, 0x84B5,
4337 0x1B6E, 0x842A, 0x1F1D, 0x852A, 0x0DA3, 0x0F7C, 0x167E, 0x7254,
4338 0x0000, 0x842A, 0x0001, 0x852A, 0x18E6, 0x0BAA, 0x1B6D, 0x7234,
4339 0x229F, 0x8429, 0x2746, 0x8529, 0x1F1C, 0x06E7, 0x229E, 0x7224,
4340
4341 0x0DA4, 0x8429, 0x2C29, 0x8529, 0x2745, 0x07F6, 0x2C28, 0x7254,
4342 0x383B, 0x8428, 0x320F, 0x8528, 0x320E, 0x0F02, 0x1341, 0x7264,
4343 0x3EB6, 0x8428, 0x3EB9, 0x8528, 0x383A, 0x0FA9, 0x3EB5, 0x7294,
4344 0x3EB7, 0x8474, 0x3EBA, 0x8575, 0x3EB8, 0x44C3, 0x3EBB, 0x45C3,
4345
4346 0x0000, 0xA404, 0x0001, 0xA504, 0x141F, 0x0671, 0x14FD, 0x0287,
4347 0x3EBC, 0xE610, 0x3EC8, 0x0C7B, 0x031A, 0x07E6, 0x3EC8, 0x86F7,
4348 0x3EC0, 0x821E, 0x3EBE, 0xD208, 0x3EBD, 0x021F, 0x3ECA, 0x0386,
4349 0x3EC1, 0x0C03, 0x3EC9, 0x031E, 0x3ECA, 0x8C4C, 0x3EBF, 0x0C55,
4350
4351 0x3EC9, 0xC208, 0x3EC4, 0xBC84, 0x3EC8, 0x0EAD, 0x3EC8, 0xD308,
4352 0x3EC2, 0x8F7E, 0x3ECB, 0x0219, 0x3ECB, 0xD26E, 0x3EC5, 0x031F,
4353 0x3EC6, 0xC308, 0x3EC3, 0x32FF, 0x3EC9, 0x0265, 0x3EC9, 0x8319,
4354 0x1342, 0xD36E, 0x3EC7, 0x33FF, 0x0000, 0x8365, 0x1420, 0x9570,
4355};
4356
4357
4358/* send initialization arrays to start up */
4359static void
4360awe_init_array(void)
4361{
4362 awe_send_array(init1);
4363 awe_wait(1024);
4364 awe_send_array(init2);
4365 awe_send_array(init3);
4366 awe_poke_dw(AWE_HWCF4, 0);
4367 awe_poke_dw(AWE_HWCF5, 0x83);
4368 awe_poke_dw(AWE_HWCF6, 0x8000);
4369 awe_send_array(init4);
4370}
4371
4372/* send an initialization array */
4373static void
4374awe_send_array(unsigned short *data)
4375{
4376 int i;
4377 unsigned short *p;
4378
4379 p = data;
4380 for (i = 0; i < AWE_MAX_VOICES; i++, p++)
4381 awe_poke(AWE_INIT1(i), *p);
4382 for (i = 0; i < AWE_MAX_VOICES; i++, p++)
4383 awe_poke(AWE_INIT2(i), *p);
4384 for (i = 0; i < AWE_MAX_VOICES; i++, p++)
4385 awe_poke(AWE_INIT3(i), *p);
4386 for (i = 0; i < AWE_MAX_VOICES; i++, p++)
4387 awe_poke(AWE_INIT4(i), *p);
4388}
4389
4390
4391/*
4392 * set up awe32 channels to some known state.
4393 */
4394
4395/* set the envelope & LFO parameters to the default values; see ADIP */
4396static void
4397awe_tweak_voice(int i)
4398{
4399 /* set all mod/vol envelope shape to minimum */
4400 awe_poke(AWE_ENVVOL(i), 0x8000);
4401 awe_poke(AWE_ENVVAL(i), 0x8000);
4402 awe_poke(AWE_DCYSUS(i), 0x7F7F);
4403 awe_poke(AWE_ATKHLDV(i), 0x7F7F);
4404 awe_poke(AWE_ATKHLD(i), 0x7F7F);
4405 awe_poke(AWE_PEFE(i), 0); /* mod envelope height to zero */
4406 awe_poke(AWE_LFO1VAL(i), 0x8000); /* no delay for LFO1 */
4407 awe_poke(AWE_LFO2VAL(i), 0x8000);
4408 awe_poke(AWE_IP(i), 0xE000); /* no pitch shift */
4409 awe_poke(AWE_IFATN(i), 0xFF00); /* volume to minimum */
4410 awe_poke(AWE_FMMOD(i), 0);
4411 awe_poke(AWE_TREMFRQ(i), 0);
4412 awe_poke(AWE_FM2FRQ2(i), 0);
4413}
4414
4415static void
4416awe_tweak(void)
4417{
4418 int i;
4419 /* reset all channels */
4420 for (i = 0; i < awe_max_voices; i++)
4421 awe_tweak_voice(i);
4422}
4423
4424
4425/*
4426 * initializes the FM section of AWE32;
4427 * see Vince Vu's unofficial AWE32 programming guide
4428 */
4429
4430static void
4431awe_init_fm(void)
4432{
4433#ifndef AWE_ALWAYS_INIT_FM
4434 /* if no extended memory is on board.. */
4435 if (memsize <= 0)
4436 return;
4437#endif
4438 DEBUG(3,printk("AWE32: initializing FM\n"));
4439
4440 /* Initialize the last two channels for DRAM refresh and producing
4441 the reverb and chorus effects for Yamaha OPL-3 synthesizer */
4442
4443 /* 31: FM left channel, 0xffffe0-0xffffe8 */
4444 awe_poke(AWE_DCYSUSV(30), 0x80);
4445 awe_poke_dw(AWE_PSST(30), 0xFFFFFFE0); /* full left */
4446 awe_poke_dw(AWE_CSL(30), 0x00FFFFE8 |
4447 (DEF_FM_CHORUS_DEPTH << 24));
4448 awe_poke_dw(AWE_PTRX(30), (DEF_FM_REVERB_DEPTH << 8));
4449 awe_poke_dw(AWE_CPF(30), 0);
4450 awe_poke_dw(AWE_CCCA(30), 0x00FFFFE3);
4451
4452 /* 32: FM right channel, 0xfffff0-0xfffff8 */
4453 awe_poke(AWE_DCYSUSV(31), 0x80);
4454 awe_poke_dw(AWE_PSST(31), 0x00FFFFF0); /* full right */
4455 awe_poke_dw(AWE_CSL(31), 0x00FFFFF8 |
4456 (DEF_FM_CHORUS_DEPTH << 24));
4457 awe_poke_dw(AWE_PTRX(31), (DEF_FM_REVERB_DEPTH << 8));
4458 awe_poke_dw(AWE_CPF(31), 0x8000);
4459 awe_poke_dw(AWE_CCCA(31), 0x00FFFFF3);
4460
4461 /* skew volume & cutoff */
4462 awe_poke_dw(AWE_VTFT(30), 0x8000FFFF);
4463 awe_poke_dw(AWE_VTFT(31), 0x8000FFFF);
4464
4465 voices[30].state = AWE_ST_FM;
4466 voices[31].state = AWE_ST_FM;
4467
4468 /* change maximum channels to 30 */
4469 awe_max_voices = AWE_NORMAL_VOICES;
4470 if (playing_mode == AWE_PLAY_DIRECT)
4471 awe_info.nr_voices = awe_max_voices;
4472 else
4473 awe_info.nr_voices = AWE_MAX_CHANNELS;
4474 voice_alloc->max_voice = awe_max_voices;
4475}
4476
4477/*
4478 * AWE32 DRAM access routines
4479 */
4480
4481/* open DRAM write accessing mode */
4482static int
4483awe_open_dram_for_write(int offset, int channels)
4484{
4485 int vidx[AWE_NORMAL_VOICES];
4486 int i;
4487
4488 if (channels < 0 || channels >= AWE_NORMAL_VOICES) {
4489 channels = AWE_NORMAL_VOICES;
4490 for (i = 0; i < AWE_NORMAL_VOICES; i++)
4491 vidx[i] = i;
4492 } else {
4493 for (i = 0; i < channels; i++) {
4494 vidx[i] = awe_clear_voice();
4495 voices[vidx[i]].state = AWE_ST_MARK;
4496 }
4497 }
4498
4499 /* use all channels for DMA transfer */
4500 for (i = 0; i < channels; i++) {
4501 if (vidx[i] < 0) continue;
4502 awe_poke(AWE_DCYSUSV(vidx[i]), 0x80);
4503 awe_poke_dw(AWE_VTFT(vidx[i]), 0);
4504 awe_poke_dw(AWE_CVCF(vidx[i]), 0);
4505 awe_poke_dw(AWE_PTRX(vidx[i]), 0x40000000);
4506 awe_poke_dw(AWE_CPF(vidx[i]), 0x40000000);
4507 awe_poke_dw(AWE_PSST(vidx[i]), 0);
4508 awe_poke_dw(AWE_CSL(vidx[i]), 0);
4509 awe_poke_dw(AWE_CCCA(vidx[i]), 0x06000000);
4510 voices[vidx[i]].state = AWE_ST_DRAM;
4511 }
4512 /* point channels 31 & 32 to ROM samples for DRAM refresh */
4513 awe_poke_dw(AWE_VTFT(30), 0);
4514 awe_poke_dw(AWE_PSST(30), 0x1d8);
4515 awe_poke_dw(AWE_CSL(30), 0x1e0);
4516 awe_poke_dw(AWE_CCCA(30), 0x1d8);
4517 awe_poke_dw(AWE_VTFT(31), 0);
4518 awe_poke_dw(AWE_PSST(31), 0x1d8);
4519 awe_poke_dw(AWE_CSL(31), 0x1e0);
4520 awe_poke_dw(AWE_CCCA(31), 0x1d8);
4521 voices[30].state = AWE_ST_FM;
4522 voices[31].state = AWE_ST_FM;
4523
4524 /* if full bit is on, not ready to write on */
4525 if (awe_peek_dw(AWE_SMALW) & 0x80000000) {
4526 for (i = 0; i < channels; i++) {
4527 awe_poke_dw(AWE_CCCA(vidx[i]), 0);
4528 voices[vidx[i]].state = AWE_ST_OFF;
4529 }
4530 printk("awe: not ready to write..\n");
4531 return -EPERM;
4532 }
4533
4534 /* set address to write */
4535 awe_poke_dw(AWE_SMALW, offset);
4536
4537 return 0;
4538}
4539
4540/* open DRAM for RAM size detection */
4541static void
4542awe_open_dram_for_check(void)
4543{
4544 int i;
4545 for (i = 0; i < AWE_NORMAL_VOICES; i++) {
4546 awe_poke(AWE_DCYSUSV(i), 0x80);
4547 awe_poke_dw(AWE_VTFT(i), 0);
4548 awe_poke_dw(AWE_CVCF(i), 0);
4549 awe_poke_dw(AWE_PTRX(i), 0x40000000);
4550 awe_poke_dw(AWE_CPF(i), 0x40000000);
4551 awe_poke_dw(AWE_PSST(i), 0);
4552 awe_poke_dw(AWE_CSL(i), 0);
4553 if (i & 1) /* DMA write */
4554 awe_poke_dw(AWE_CCCA(i), 0x06000000);
4555 else /* DMA read */
4556 awe_poke_dw(AWE_CCCA(i), 0x04000000);
4557 voices[i].state = AWE_ST_DRAM;
4558 }
4559}
4560
4561
4562/* close dram access */
4563static void
4564awe_close_dram(void)
4565{
4566 int i;
4567 /* wait until FULL bit in SMAxW register be false */
4568 for (i = 0; i < 10000; i++) {
4569 if (!(awe_peek_dw(AWE_SMALW) & 0x80000000))
4570 break;
4571 awe_wait(10);
4572 }
4573
4574 for (i = 0; i < AWE_NORMAL_VOICES; i++) {
4575 if (voices[i].state == AWE_ST_DRAM) {
4576 awe_poke_dw(AWE_CCCA(i), 0);
4577 awe_poke(AWE_DCYSUSV(i), 0x807F);
4578 voices[i].state = AWE_ST_OFF;
4579 }
4580 }
4581}
4582
4583
4584/*
4585 * check dram size on AWE board
4586 */
4587
4588/* any three numbers you like */
4589#define UNIQUE_ID1 0x1234
4590#define UNIQUE_ID2 0x4321
4591#define UNIQUE_ID3 0xABCD
4592
4593static void __init
4594awe_check_dram(void)
4595{
4596 if (awe_present) /* already initialized */
4597 return;
4598
4599 if (memsize >= 0) { /* given by config file or module option */
4600 memsize *= 1024; /* convert to Kbytes */
4601 return;
4602 }
4603
4604 awe_open_dram_for_check();
4605
4606 memsize = 0;
4607
4608 /* set up unique two id numbers */
4609 awe_poke_dw(AWE_SMALW, AWE_DRAM_OFFSET);
4610 awe_poke(AWE_SMLD, UNIQUE_ID1);
4611 awe_poke(AWE_SMLD, UNIQUE_ID2);
4612
4613 while (memsize < AWE_MAX_DRAM_SIZE) {
4614 awe_wait(5);
4615 /* read a data on the DRAM start address */
4616 awe_poke_dw(AWE_SMALR, AWE_DRAM_OFFSET);
4617 awe_peek(AWE_SMLD); /* discard stale data */
4618 if (awe_peek(AWE_SMLD) != UNIQUE_ID1)
4619 break;
4620 if (awe_peek(AWE_SMLD) != UNIQUE_ID2)
4621 break;
4622 memsize += 512; /* increment 512kbytes */
4623 /* Write a unique data on the test address;
4624 * if the address is out of range, the data is written on
4625 * 0x200000(=AWE_DRAM_OFFSET). Then the two id words are
4626 * broken by this data.
4627 */
4628 awe_poke_dw(AWE_SMALW, AWE_DRAM_OFFSET + memsize*512L);
4629 awe_poke(AWE_SMLD, UNIQUE_ID3);
4630 awe_wait(5);
4631 /* read a data on the just written DRAM address */
4632 awe_poke_dw(AWE_SMALR, AWE_DRAM_OFFSET + memsize*512L);
4633 awe_peek(AWE_SMLD); /* discard stale data */
4634 if (awe_peek(AWE_SMLD) != UNIQUE_ID3)
4635 break;
4636 }
4637 awe_close_dram();
4638
4639 DEBUG(0,printk("AWE32: %d Kbytes memory detected\n", memsize));
4640
4641 /* convert to Kbytes */
4642 memsize *= 1024;
4643}
4644
4645
4646/*----------------------------------------------------------------*/
4647
4648/*
4649 * chorus and reverb controls; from VV's guide
4650 */
4651
4652/* 5 parameters for each chorus mode; 3 x 16bit, 2 x 32bit */
4653static char chorus_defined[AWE_CHORUS_NUMBERS];
4654static awe_chorus_fx_rec chorus_parm[AWE_CHORUS_NUMBERS] = {
4655 {0xE600, 0x03F6, 0xBC2C ,0x00000000, 0x0000006D}, /* chorus 1 */
4656 {0xE608, 0x031A, 0xBC6E, 0x00000000, 0x0000017C}, /* chorus 2 */
4657 {0xE610, 0x031A, 0xBC84, 0x00000000, 0x00000083}, /* chorus 3 */
4658 {0xE620, 0x0269, 0xBC6E, 0x00000000, 0x0000017C}, /* chorus 4 */
4659 {0xE680, 0x04D3, 0xBCA6, 0x00000000, 0x0000005B}, /* feedback */
4660 {0xE6E0, 0x044E, 0xBC37, 0x00000000, 0x00000026}, /* flanger */
4661 {0xE600, 0x0B06, 0xBC00, 0x0000E000, 0x00000083}, /* short delay */
4662 {0xE6C0, 0x0B06, 0xBC00, 0x0000E000, 0x00000083}, /* short delay + feedback */
4663};
4664
4665static int
4666awe_load_chorus_fx(awe_patch_info *patch, const char __user *addr, int count)
4667{
4668 if (patch->optarg < AWE_CHORUS_PREDEFINED || patch->optarg >= AWE_CHORUS_NUMBERS) {
4669 printk(KERN_WARNING "AWE32 Error: invalid chorus mode %d for uploading\n", patch->optarg);
4670 return -EINVAL;
4671 }
4672 if (count < sizeof(awe_chorus_fx_rec)) {
4673 printk(KERN_WARNING "AWE32 Error: too short chorus fx parameters\n");
4674 return -EINVAL;
4675 }
4676 if (copy_from_user(&chorus_parm[patch->optarg], addr + AWE_PATCH_INFO_SIZE,
4677 sizeof(awe_chorus_fx_rec)))
4678 return -EFAULT;
4679 chorus_defined[patch->optarg] = TRUE;
4680 return 0;
4681}
4682
4683static void
4684awe_set_chorus_mode(int effect)
4685{
4686 if (effect < 0 || effect >= AWE_CHORUS_NUMBERS ||
4687 (effect >= AWE_CHORUS_PREDEFINED && !chorus_defined[effect]))
4688 return;
4689 awe_poke(AWE_INIT3(9), chorus_parm[effect].feedback);
4690 awe_poke(AWE_INIT3(12), chorus_parm[effect].delay_offset);
4691 awe_poke(AWE_INIT4(3), chorus_parm[effect].lfo_depth);
4692 awe_poke_dw(AWE_HWCF4, chorus_parm[effect].delay);
4693 awe_poke_dw(AWE_HWCF5, chorus_parm[effect].lfo_freq);
4694 awe_poke_dw(AWE_HWCF6, 0x8000);
4695 awe_poke_dw(AWE_HWCF7, 0x0000);
4696}
4697
4698static void
4699awe_update_chorus_mode(void)
4700{
4701 awe_set_chorus_mode(ctrls[AWE_MD_CHORUS_MODE]);
4702}
4703
4704/*----------------------------------------------------------------*/
4705
4706/* reverb mode settings; write the following 28 data of 16 bit length
4707 * on the corresponding ports in the reverb_cmds array
4708 */
4709static char reverb_defined[AWE_CHORUS_NUMBERS];
4710static awe_reverb_fx_rec reverb_parm[AWE_REVERB_NUMBERS] = {
4711{{ /* room 1 */
4712 0xB488, 0xA450, 0x9550, 0x84B5, 0x383A, 0x3EB5, 0x72F4,
4713 0x72A4, 0x7254, 0x7204, 0x7204, 0x7204, 0x4416, 0x4516,
4714 0xA490, 0xA590, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
4715 0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
4716}},
4717{{ /* room 2 */
4718 0xB488, 0xA458, 0x9558, 0x84B5, 0x383A, 0x3EB5, 0x7284,
4719 0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4448, 0x4548,
4720 0xA440, 0xA540, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
4721 0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
4722}},
4723{{ /* room 3 */
4724 0xB488, 0xA460, 0x9560, 0x84B5, 0x383A, 0x3EB5, 0x7284,
4725 0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4416, 0x4516,
4726 0xA490, 0xA590, 0x842C, 0x852C, 0x842C, 0x852C, 0x842B,
4727 0x852B, 0x842B, 0x852B, 0x842A, 0x852A, 0x842A, 0x852A,
4728}},
4729{{ /* hall 1 */
4730 0xB488, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7284,
4731 0x7254, 0x7224, 0x7224, 0x7254, 0x7284, 0x4448, 0x4548,
4732 0xA440, 0xA540, 0x842B, 0x852B, 0x842B, 0x852B, 0x842A,
4733 0x852A, 0x842A, 0x852A, 0x8429, 0x8529, 0x8429, 0x8529,
4734}},
4735{{ /* hall 2 */
4736 0xB488, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7254,
4737 0x7234, 0x7224, 0x7254, 0x7264, 0x7294, 0x44C3, 0x45C3,
4738 0xA404, 0xA504, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
4739 0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
4740}},
4741{{ /* plate */
4742 0xB4FF, 0xA470, 0x9570, 0x84B5, 0x383A, 0x3EB5, 0x7234,
4743 0x7234, 0x7234, 0x7234, 0x7234, 0x7234, 0x4448, 0x4548,
4744 0xA440, 0xA540, 0x842A, 0x852A, 0x842A, 0x852A, 0x8429,
4745 0x8529, 0x8429, 0x8529, 0x8428, 0x8528, 0x8428, 0x8528,
4746}},
4747{{ /* delay */
4748 0xB4FF, 0xA470, 0x9500, 0x84B5, 0x333A, 0x39B5, 0x7204,
4749 0x7204, 0x7204, 0x7204, 0x7204, 0x72F4, 0x4400, 0x4500,
4750 0xA4FF, 0xA5FF, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420,
4751 0x8520, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420, 0x8520,
4752}},
4753{{ /* panning delay */
4754 0xB4FF, 0xA490, 0x9590, 0x8474, 0x333A, 0x39B5, 0x7204,
4755 0x7204, 0x7204, 0x7204, 0x7204, 0x72F4, 0x4400, 0x4500,
4756 0xA4FF, 0xA5FF, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420,
4757 0x8520, 0x8420, 0x8520, 0x8420, 0x8520, 0x8420, 0x8520,
4758}},
4759};
4760
4761static struct ReverbCmdPair {
4762 unsigned short cmd, port;
4763} reverb_cmds[28] = {
4764 {AWE_INIT1(0x03)}, {AWE_INIT1(0x05)}, {AWE_INIT4(0x1F)}, {AWE_INIT1(0x07)},
4765 {AWE_INIT2(0x14)}, {AWE_INIT2(0x16)}, {AWE_INIT1(0x0F)}, {AWE_INIT1(0x17)},
4766 {AWE_INIT1(0x1F)}, {AWE_INIT2(0x07)}, {AWE_INIT2(0x0F)}, {AWE_INIT2(0x17)},
4767 {AWE_INIT2(0x1D)}, {AWE_INIT2(0x1F)}, {AWE_INIT3(0x01)}, {AWE_INIT3(0x03)},
4768 {AWE_INIT1(0x09)}, {AWE_INIT1(0x0B)}, {AWE_INIT1(0x11)}, {AWE_INIT1(0x13)},
4769 {AWE_INIT1(0x19)}, {AWE_INIT1(0x1B)}, {AWE_INIT2(0x01)}, {AWE_INIT2(0x03)},
4770 {AWE_INIT2(0x09)}, {AWE_INIT2(0x0B)}, {AWE_INIT2(0x11)}, {AWE_INIT2(0x13)},
4771};
4772
4773static int
4774awe_load_reverb_fx(awe_patch_info *patch, const char __user *addr, int count)
4775{
4776 if (patch->optarg < AWE_REVERB_PREDEFINED || patch->optarg >= AWE_REVERB_NUMBERS) {
4777 printk(KERN_WARNING "AWE32 Error: invalid reverb mode %d for uploading\n", patch->optarg);
4778 return -EINVAL;
4779 }
4780 if (count < sizeof(awe_reverb_fx_rec)) {
4781 printk(KERN_WARNING "AWE32 Error: too short reverb fx parameters\n");
4782 return -EINVAL;
4783 }
4784 if (copy_from_user(&reverb_parm[patch->optarg], addr + AWE_PATCH_INFO_SIZE,
4785 sizeof(awe_reverb_fx_rec)))
4786 return -EFAULT;
4787 reverb_defined[patch->optarg] = TRUE;
4788 return 0;
4789}
4790
4791static void
4792awe_set_reverb_mode(int effect)
4793{
4794 int i;
4795 if (effect < 0 || effect >= AWE_REVERB_NUMBERS ||
4796 (effect >= AWE_REVERB_PREDEFINED && !reverb_defined[effect]))
4797 return;
4798 for (i = 0; i < 28; i++)
4799 awe_poke(reverb_cmds[i].cmd, reverb_cmds[i].port,
4800 reverb_parm[effect].parms[i]);
4801}
4802
4803static void
4804awe_update_reverb_mode(void)
4805{
4806 awe_set_reverb_mode(ctrls[AWE_MD_REVERB_MODE]);
4807}
4808
4809/*
4810 * treble/bass equalizer control
4811 */
4812
4813static unsigned short bass_parm[12][3] = {
4814 {0xD26A, 0xD36A, 0x0000}, /* -12 dB */
4815 {0xD25B, 0xD35B, 0x0000}, /* -8 */
4816 {0xD24C, 0xD34C, 0x0000}, /* -6 */
4817 {0xD23D, 0xD33D, 0x0000}, /* -4 */
4818 {0xD21F, 0xD31F, 0x0000}, /* -2 */
4819 {0xC208, 0xC308, 0x0001}, /* 0 (HW default) */
4820 {0xC219, 0xC319, 0x0001}, /* +2 */
4821 {0xC22A, 0xC32A, 0x0001}, /* +4 */
4822 {0xC24C, 0xC34C, 0x0001}, /* +6 */
4823 {0xC26E, 0xC36E, 0x0001}, /* +8 */
4824 {0xC248, 0xC348, 0x0002}, /* +10 */
4825 {0xC26A, 0xC36A, 0x0002}, /* +12 dB */
4826};
4827
4828static unsigned short treble_parm[12][9] = {
4829 {0x821E, 0xC26A, 0x031E, 0xC36A, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001}, /* -12 dB */
4830 {0x821E, 0xC25B, 0x031E, 0xC35B, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
4831 {0x821E, 0xC24C, 0x031E, 0xC34C, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
4832 {0x821E, 0xC23D, 0x031E, 0xC33D, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
4833 {0x821E, 0xC21F, 0x031E, 0xC31F, 0x021E, 0xD208, 0x831E, 0xD308, 0x0001},
4834 {0x821E, 0xD208, 0x031E, 0xD308, 0x021E, 0xD208, 0x831E, 0xD308, 0x0002},
4835 {0x821E, 0xD208, 0x031E, 0xD308, 0x021D, 0xD219, 0x831D, 0xD319, 0x0002},
4836 {0x821E, 0xD208, 0x031E, 0xD308, 0x021C, 0xD22A, 0x831C, 0xD32A, 0x0002},
4837 {0x821E, 0xD208, 0x031E, 0xD308, 0x021A, 0xD24C, 0x831A, 0xD34C, 0x0002},
4838 {0x821E, 0xD208, 0x031E, 0xD308, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002}, /* +8 (HW default) */
4839 {0x821D, 0xD219, 0x031D, 0xD319, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002},
4840 {0x821C, 0xD22A, 0x031C, 0xD32A, 0x0219, 0xD26E, 0x8319, 0xD36E, 0x0002}, /* +12 dB */
4841};
4842
4843
4844/*
4845 * set Emu8000 digital equalizer; from 0 to 11 [-12dB - 12dB]
4846 */
4847static void
4848awe_equalizer(int bass, int treble)
4849{
4850 unsigned short w;
4851
4852 if (bass < 0 || bass > 11 || treble < 0 || treble > 11)
4853 return;
4854 awe_poke(AWE_INIT4(0x01), bass_parm[bass][0]);
4855 awe_poke(AWE_INIT4(0x11), bass_parm[bass][1]);
4856 awe_poke(AWE_INIT3(0x11), treble_parm[treble][0]);
4857 awe_poke(AWE_INIT3(0x13), treble_parm[treble][1]);
4858 awe_poke(AWE_INIT3(0x1B), treble_parm[treble][2]);
4859 awe_poke(AWE_INIT4(0x07), treble_parm[treble][3]);
4860 awe_poke(AWE_INIT4(0x0B), treble_parm[treble][4]);
4861 awe_poke(AWE_INIT4(0x0D), treble_parm[treble][5]);
4862 awe_poke(AWE_INIT4(0x17), treble_parm[treble][6]);
4863 awe_poke(AWE_INIT4(0x19), treble_parm[treble][7]);
4864 w = bass_parm[bass][2] + treble_parm[treble][8];
4865 awe_poke(AWE_INIT4(0x15), (unsigned short)(w + 0x0262));
4866 awe_poke(AWE_INIT4(0x1D), (unsigned short)(w + 0x8362));
4867}
4868
4869static void awe_update_equalizer(void)
4870{
4871 awe_equalizer(ctrls[AWE_MD_BASS_LEVEL], ctrls[AWE_MD_TREBLE_LEVEL]);
4872}
4873
4874
4875/*----------------------------------------------------------------*/
4876
4877#ifdef CONFIG_AWE32_MIDIEMU
4878
4879/*
4880 * Emu8000 MIDI Emulation
4881 */
4882
4883/*
4884 * midi queue record
4885 */
4886
4887/* queue type */
4888enum { Q_NONE, Q_VARLEN, Q_READ, Q_SYSEX, };
4889
4890#define MAX_MIDIBUF 64
4891
4892/* midi status */
4893typedef struct MidiStatus {
4894 int queue; /* queue type */
4895 int qlen; /* queue length */
4896 int read; /* chars read */
4897 int status; /* current status */
4898 int chan; /* current channel */
4899 unsigned char buf[MAX_MIDIBUF];
4900} MidiStatus;
4901
4902/* MIDI mode type */
4903enum { MODE_GM, MODE_GS, MODE_XG, };
4904
4905/* NRPN / CC -> Emu8000 parameter converter */
4906typedef struct {
4907 int control;
4908 int awe_effect;
4909 unsigned short (*convert)(int val);
4910} ConvTable;
4911
4912
4913/*
4914 * prototypes
4915 */
4916
4917static int awe_midi_open(int dev, int mode, void (*input)(int,unsigned char), void (*output)(int));
4918static void awe_midi_close(int dev);
4919static int awe_midi_ioctl(int dev, unsigned cmd, void __user * arg);
4920static int awe_midi_outputc(int dev, unsigned char midi_byte);
4921
4922static void init_midi_status(MidiStatus *st);
4923static void clear_rpn(void);
4924static void get_midi_char(MidiStatus *st, int c);
4925/*static void queue_varlen(MidiStatus *st, int c);*/
4926static void special_event(MidiStatus *st, int c);
4927static void queue_read(MidiStatus *st, int c);
4928static void midi_note_on(MidiStatus *st);
4929static void midi_note_off(MidiStatus *st);
4930static void midi_key_pressure(MidiStatus *st);
4931static void midi_channel_pressure(MidiStatus *st);
4932static void midi_pitch_wheel(MidiStatus *st);
4933static void midi_program_change(MidiStatus *st);
4934static void midi_control_change(MidiStatus *st);
4935static void midi_select_bank(MidiStatus *st, int val);
4936static void midi_nrpn_event(MidiStatus *st);
4937static void midi_rpn_event(MidiStatus *st);
4938static void midi_detune(int chan, int coarse, int fine);
4939static void midi_system_exclusive(MidiStatus *st);
4940static int send_converted_effect(ConvTable *table, int num_tables, MidiStatus *st, int type, int val);
4941static int add_converted_effect(ConvTable *table, int num_tables, MidiStatus *st, int type, int val);
4942static int xg_control_change(MidiStatus *st, int cmd, int val);
4943
4944#define numberof(ary) (sizeof(ary)/sizeof(ary[0]))
4945
4946
4947/*
4948 * OSS Midi device record
4949 */
4950
4951static struct midi_operations awe_midi_operations =
4952{
4953 .owner = THIS_MODULE,
4954 .info = {"AWE Midi Emu", 0, 0, SNDCARD_SB},
4955 .in_info = {0},
4956 .open = awe_midi_open, /*open*/
4957 .close = awe_midi_close, /*close*/
4958 .ioctl = awe_midi_ioctl, /*ioctl*/
4959 .outputc = awe_midi_outputc, /*outputc*/
4960};
4961
4962static int my_mididev = -1;
4963
4964static void __init attach_midiemu(void)
4965{
4966 if ((my_mididev = sound_alloc_mididev()) < 0)
4967 printk ("Sound: Too many midi devices detected\n");
4968 else
4969 midi_devs[my_mididev] = &awe_midi_operations;
4970}
4971
4972static void unload_midiemu(void)
4973{
4974 if (my_mididev >= 0)
4975 sound_unload_mididev(my_mididev);
4976}
4977
4978
4979/*
4980 * open/close midi device
4981 */
4982
4983static int midi_opened = FALSE;
4984
4985static int midi_mode;
4986static int coarsetune, finetune;
4987
4988static int xg_mapping = TRUE;
4989static int xg_bankmode;
4990
4991/* effect sensitivity */
4992
4993#define FX_CUTOFF 0
4994#define FX_RESONANCE 1
4995#define FX_ATTACK 2
4996#define FX_RELEASE 3
4997#define FX_VIBRATE 4
4998#define FX_VIBDEPTH 5
4999#define FX_VIBDELAY 6
5000#define FX_NUMS 7
5001
5002#define DEF_FX_CUTOFF 170
5003#define DEF_FX_RESONANCE 6
5004#define DEF_FX_ATTACK 50
5005#define DEF_FX_RELEASE 50
5006#define DEF_FX_VIBRATE 30
5007#define DEF_FX_VIBDEPTH 4
5008#define DEF_FX_VIBDELAY 1500
5009
5010/* effect sense: */
5011static int gs_sense[] =
5012{
5013 DEF_FX_CUTOFF, DEF_FX_RESONANCE, DEF_FX_ATTACK, DEF_FX_RELEASE,
5014 DEF_FX_VIBRATE, DEF_FX_VIBDEPTH, DEF_FX_VIBDELAY
5015};
5016static int xg_sense[] =
5017{
5018 DEF_FX_CUTOFF, DEF_FX_RESONANCE, DEF_FX_ATTACK, DEF_FX_RELEASE,
5019 DEF_FX_VIBRATE, DEF_FX_VIBDEPTH, DEF_FX_VIBDELAY
5020};
5021
5022
5023/* current status */
5024static MidiStatus curst;
5025
5026
5027static int
5028awe_midi_open (int dev, int mode,
5029 void (*input)(int,unsigned char),
5030 void (*output)(int))
5031{
5032 if (midi_opened)
5033 return -EBUSY;
5034
5035 midi_opened = TRUE;
5036
5037 midi_mode = MODE_GM;
5038
5039 curst.queue = Q_NONE;
5040 curst.qlen = 0;
5041 curst.read = 0;
5042 curst.status = 0;
5043 curst.chan = 0;
5044 memset(curst.buf, 0, sizeof(curst.buf));
5045
5046 init_midi_status(&curst);
5047
5048 return 0;
5049}
5050
5051static void
5052awe_midi_close (int dev)
5053{
5054 midi_opened = FALSE;
5055}
5056
5057
5058static int
5059awe_midi_ioctl (int dev, unsigned cmd, void __user *arg)
5060{
5061 return -EPERM;
5062}
5063
5064static int
5065awe_midi_outputc (int dev, unsigned char midi_byte)
5066{
5067 if (! midi_opened)
5068 return 1;
5069
5070 /* force to change playing mode */
5071 playing_mode = AWE_PLAY_MULTI;
5072
5073 get_midi_char(&curst, midi_byte);
5074 return 1;
5075}
5076
5077
5078/*
5079 * initialize
5080 */
5081
5082static void init_midi_status(MidiStatus *st)
5083{
5084 clear_rpn();
5085 coarsetune = 0;
5086 finetune = 0;
5087}
5088
5089
5090/*
5091 * RPN & NRPN
5092 */
5093
5094#define MAX_MIDI_CHANNELS 16
5095
5096/* RPN & NRPN */
5097static unsigned char nrpn[MAX_MIDI_CHANNELS]; /* current event is NRPN? */
5098static int msb_bit; /* current event is msb for RPN/NRPN */
5099/* RPN & NRPN indeces */
5100static unsigned char rpn_msb[MAX_MIDI_CHANNELS], rpn_lsb[MAX_MIDI_CHANNELS];
5101/* RPN & NRPN values */
5102static int rpn_val[MAX_MIDI_CHANNELS];
5103
5104static void clear_rpn(void)
5105{
5106 int i;
5107 for (i = 0; i < MAX_MIDI_CHANNELS; i++) {
5108 nrpn[i] = 0;
5109 rpn_msb[i] = 127;
5110 rpn_lsb[i] = 127;
5111 rpn_val[i] = 0;
5112 }
5113 msb_bit = 0;
5114}
5115
5116
5117/*
5118 * process midi queue
5119 */
5120
5121/* status event types */
5122typedef void (*StatusEvent)(MidiStatus *st);
5123static struct StatusEventList {
5124 StatusEvent process;
5125 int qlen;
5126} status_event[8] = {
5127 {midi_note_off, 2},
5128 {midi_note_on, 2},
5129 {midi_key_pressure, 2},
5130 {midi_control_change, 2},
5131 {midi_program_change, 1},
5132 {midi_channel_pressure, 1},
5133 {midi_pitch_wheel, 2},
5134 {NULL, 0},
5135};
5136
5137
5138/* read a char from fifo and process it */
5139static void get_midi_char(MidiStatus *st, int c)
5140{
5141 if (c == 0xfe) {
5142 /* ignore active sense */
5143 st->queue = Q_NONE;
5144 return;
5145 }
5146
5147 switch (st->queue) {
5148 /* case Q_VARLEN: queue_varlen(st, c); break;*/
5149 case Q_READ:
5150 case Q_SYSEX:
5151 queue_read(st, c);
5152 break;
5153 case Q_NONE:
5154 st->read = 0;
5155 if ((c & 0xf0) == 0xf0) {
5156 special_event(st, c);
5157 } else if (c & 0x80) { /* status change */
5158 st->status = (c >> 4) & 0x07;
5159 st->chan = c & 0x0f;
5160 st->queue = Q_READ;
5161 st->qlen = status_event[st->status].qlen;
5162 if (st->qlen == 0)
5163 st->queue = Q_NONE;
5164 }
5165 break;
5166 }
5167}
5168
5169/* 0xfx events */
5170static void special_event(MidiStatus *st, int c)
5171{
5172 switch (c) {
5173 case 0xf0: /* system exclusive */
5174 st->queue = Q_SYSEX;
5175 st->qlen = 0;
5176 break;
5177 case 0xf1: /* MTC quarter frame */
5178 case 0xf3: /* song select */
5179 st->queue = Q_READ;
5180 st->qlen = 1;
5181 break;
5182 case 0xf2: /* song position */
5183 st->queue = Q_READ;
5184 st->qlen = 2;
5185 break;
5186 }
5187}
5188
5189#if 0
5190/* read variable length value */
5191static void queue_varlen(MidiStatus *st, int c)
5192{
5193 st->qlen += (c & 0x7f);
5194 if (c & 0x80) {
5195 st->qlen <<= 7;
5196 return;
5197 }
5198 if (st->qlen <= 0) {
5199 st->qlen = 0;
5200 st->queue = Q_NONE;
5201 }
5202 st->queue = Q_READ;
5203 st->read = 0;
5204}
5205#endif
5206
5207
5208/* read a char */
5209static void queue_read(MidiStatus *st, int c)
5210{
5211 if (st->read < MAX_MIDIBUF) {
5212 if (st->queue != Q_SYSEX)
5213 c &= 0x7f;
5214 st->buf[st->read] = (unsigned char)c;
5215 }
5216 st->read++;
5217 if (st->queue == Q_SYSEX && c == 0xf7) {
5218 midi_system_exclusive(st);
5219 st->queue = Q_NONE;
5220 } else if (st->queue == Q_READ && st->read >= st->qlen) {
5221 if (status_event[st->status].process)
5222 status_event[st->status].process(st);
5223 st->queue = Q_NONE;
5224 }
5225}
5226
5227
5228/*
5229 * status events
5230 */
5231
5232/* note on */
5233static void midi_note_on(MidiStatus *st)
5234{
5235 DEBUG(2,printk("midi: note_on (%d) %d %d\n", st->chan, st->buf[0], st->buf[1]));
5236 if (st->buf[1] == 0)
5237 midi_note_off(st);
5238 else
5239 awe_start_note(0, st->chan, st->buf[0], st->buf[1]);
5240}
5241
5242/* note off */
5243static void midi_note_off(MidiStatus *st)
5244{
5245 DEBUG(2,printk("midi: note_off (%d) %d %d\n", st->chan, st->buf[0], st->buf[1]));
5246 awe_kill_note(0, st->chan, st->buf[0], st->buf[1]);
5247}
5248
5249/* key pressure change */
5250static void midi_key_pressure(MidiStatus *st)
5251{
5252 awe_key_pressure(0, st->chan, st->buf[0], st->buf[1]);
5253}
5254
5255/* channel pressure change */
5256static void midi_channel_pressure(MidiStatus *st)
5257{
5258 channels[st->chan].chan_press = st->buf[0];
5259 awe_modwheel_change(st->chan, st->buf[0]);
5260}
5261
5262/* pitch wheel change */
5263static void midi_pitch_wheel(MidiStatus *st)
5264{
5265 int val = (int)st->buf[1] * 128 + st->buf[0];
5266 awe_bender(0, st->chan, val);
5267}
5268
5269/* program change */
5270static void midi_program_change(MidiStatus *st)
5271{
5272 int preset;
5273 preset = st->buf[0];
5274 if (midi_mode == MODE_GS && IS_DRUM_CHANNEL(st->chan) && preset == 127)
5275 preset = 0;
5276 else if (midi_mode == MODE_XG && xg_mapping && IS_DRUM_CHANNEL(st->chan))
5277 preset += 64;
5278
5279 awe_set_instr(0, st->chan, preset);
5280}
5281
5282#define send_effect(chan,type,val) awe_send_effect(chan,-1,type,val)
5283#define add_effect(chan,type,val) awe_send_effect(chan,-1,(type)|0x80,val)
5284#define unset_effect(chan,type) awe_send_effect(chan,-1,(type)|0x40,0)
5285
5286/* midi control change */
5287static void midi_control_change(MidiStatus *st)
5288{
5289 int cmd = st->buf[0];
5290 int val = st->buf[1];
5291
5292 DEBUG(2,printk("midi: control (%d) %d %d\n", st->chan, cmd, val));
5293 if (midi_mode == MODE_XG) {
5294 if (xg_control_change(st, cmd, val))
5295 return;
5296 }
5297
5298 /* controls #31 - #64 are LSB of #0 - #31 */
5299 msb_bit = 1;
5300 if (cmd >= 0x20 && cmd < 0x40) {
5301 msb_bit = 0;
5302 cmd -= 0x20;
5303 }
5304
5305 switch (cmd) {
5306 case CTL_SOFT_PEDAL:
5307 if (val == 127)
5308 add_effect(st->chan, AWE_FX_CUTOFF, -160);
5309 else
5310 unset_effect(st->chan, AWE_FX_CUTOFF);
5311 break;
5312
5313 case CTL_BANK_SELECT:
5314 midi_select_bank(st, val);
5315 break;
5316
5317 /* set RPN/NRPN parameter */
5318 case CTL_REGIST_PARM_NUM_MSB:
5319 nrpn[st->chan]=0; rpn_msb[st->chan]=val;
5320 break;
5321 case CTL_REGIST_PARM_NUM_LSB:
5322 nrpn[st->chan]=0; rpn_lsb[st->chan]=val;
5323 break;
5324 case CTL_NONREG_PARM_NUM_MSB:
5325 nrpn[st->chan]=1; rpn_msb[st->chan]=val;
5326 break;
5327 case CTL_NONREG_PARM_NUM_LSB:
5328 nrpn[st->chan]=1; rpn_lsb[st->chan]=val;
5329 break;
5330
5331 /* send RPN/NRPN entry */
5332 case CTL_DATA_ENTRY:
5333 if (msb_bit)
5334 rpn_val[st->chan] = val * 128;
5335 else
5336 rpn_val[st->chan] |= val;
5337 if (nrpn[st->chan])
5338 midi_nrpn_event(st);
5339 else
5340 midi_rpn_event(st);
5341 break;
5342
5343 /* increase/decrease data entry */
5344 case CTL_DATA_INCREMENT:
5345 rpn_val[st->chan]++;
5346 midi_rpn_event(st);
5347 break;
5348 case CTL_DATA_DECREMENT:
5349 rpn_val[st->chan]--;
5350 midi_rpn_event(st);
5351 break;
5352
5353 /* default */
5354 default:
5355 awe_controller(0, st->chan, cmd, val);
5356 break;
5357 }
5358}
5359
5360/* tone bank change */
5361static void midi_select_bank(MidiStatus *st, int val)
5362{
5363 if (midi_mode == MODE_XG && msb_bit) {
5364 xg_bankmode = val;
5365 /* XG MSB value; not normal bank selection */
5366 switch (val) {
5367 case 127: /* remap to drum channel */
5368 awe_controller(0, st->chan, CTL_BANK_SELECT, 128);
5369 break;
5370 default: /* remap to normal channel */
5371 awe_controller(0, st->chan, CTL_BANK_SELECT, val);
5372 break;
5373 }
5374 return;
5375 } else if (midi_mode == MODE_GS && !msb_bit)
5376 /* ignore LSB bank in GS mode (used for mapping) */
5377 return;
5378
5379 /* normal bank controls; accept both MSB and LSB */
5380 if (! IS_DRUM_CHANNEL(st->chan)) {
5381 if (midi_mode == MODE_XG) {
5382 if (xg_bankmode) return;
5383 if (val == 64 || val == 126)
5384 val = 0;
5385 } else if (midi_mode == MODE_GS && val == 127)
5386 val = 0;
5387 awe_controller(0, st->chan, CTL_BANK_SELECT, val);
5388 }
5389}
5390
5391
5392/*
5393 * RPN events
5394 */
5395
5396static void midi_rpn_event(MidiStatus *st)
5397{
5398 int type;
5399 type = (rpn_msb[st->chan]<<8) | rpn_lsb[st->chan];
5400 switch (type) {
5401 case 0x0000: /* Pitch bend sensitivity */
5402 /* MSB only / 1 semitone per 128 */
5403 if (msb_bit) {
5404 channels[st->chan].bender_range =
5405 rpn_val[st->chan] * 100 / 128;
5406 }
5407 break;
5408
5409 case 0x0001: /* fine tuning: */
5410 /* MSB/LSB, 8192=center, 100/8192 cent step */
5411 finetune = rpn_val[st->chan] - 8192;
5412 midi_detune(st->chan, coarsetune, finetune);
5413 break;
5414
5415 case 0x0002: /* coarse tuning */
5416 /* MSB only / 8192=center, 1 semitone per 128 */
5417 if (msb_bit) {
5418 coarsetune = rpn_val[st->chan] - 8192;
5419 midi_detune(st->chan, coarsetune, finetune);
5420 }
5421 break;
5422
5423 case 0x7F7F: /* "lock-in" RPN */
5424 break;
5425 }
5426}
5427
5428
5429/* tuning:
5430 * coarse = -8192 to 8192 (100 cent per 128)
5431 * fine = -8192 to 8192 (max=100cent)
5432 */
5433static void midi_detune(int chan, int coarse, int fine)
5434{
5435 /* 4096 = 1200 cents in AWE parameter */
5436 int val;
5437 val = coarse * 4096 / (12 * 128);
5438 val += fine / 24;
5439 if (val)
5440 send_effect(chan, AWE_FX_INIT_PITCH, val);
5441 else
5442 unset_effect(chan, AWE_FX_INIT_PITCH);
5443}
5444
5445
5446/*
5447 * system exclusive message
5448 * GM/GS/XG macros are accepted
5449 */
5450
5451static void midi_system_exclusive(MidiStatus *st)
5452{
5453 /* GM on */
5454 static unsigned char gm_on_macro[] = {
5455 0x7e,0x7f,0x09,0x01,
5456 };
5457 /* XG on */
5458 static unsigned char xg_on_macro[] = {
5459 0x43,0x10,0x4c,0x00,0x00,0x7e,0x00,
5460 };
5461 /* GS prefix
5462 * drum channel: XX=0x1?(channel), YY=0x15, ZZ=on/off
5463 * reverb mode: XX=0x01, YY=0x30, ZZ=0-7
5464 * chorus mode: XX=0x01, YY=0x38, ZZ=0-7
5465 */
5466 static unsigned char gs_pfx_macro[] = {
5467 0x41,0x10,0x42,0x12,0x40,/*XX,YY,ZZ*/
5468 };
5469
5470#if 0
5471 /* SC88 system mode set
5472 * single module mode: XX=1
5473 * double module mode: XX=0
5474 */
5475 static unsigned char gs_mode_macro[] = {
5476 0x41,0x10,0x42,0x12,0x00,0x00,0x7F,/*ZZ*/
5477 };
5478 /* SC88 display macro: XX=01:bitmap, 00:text
5479 */
5480 static unsigned char gs_disp_macro[] = {
5481 0x41,0x10,0x45,0x12,0x10,/*XX,00*/
5482 };
5483#endif
5484
5485 /* GM on */
5486 if (memcmp(st->buf, gm_on_macro, sizeof(gm_on_macro)) == 0) {
5487 if (midi_mode != MODE_GS && midi_mode != MODE_XG)
5488 midi_mode = MODE_GM;
5489 init_midi_status(st);
5490 }
5491
5492 /* GS macros */
5493 else if (memcmp(st->buf, gs_pfx_macro, sizeof(gs_pfx_macro)) == 0) {
5494 if (midi_mode != MODE_GS && midi_mode != MODE_XG)
5495 midi_mode = MODE_GS;
5496
5497 if (st->buf[5] == 0x00 && st->buf[6] == 0x7f && st->buf[7] == 0x00) {
5498 /* GS reset */
5499 init_midi_status(st);
5500 }
5501
5502 else if ((st->buf[5] & 0xf0) == 0x10 && st->buf[6] == 0x15) {
5503 /* drum pattern */
5504 int p = st->buf[5] & 0x0f;
5505 if (p == 0) p = 9;
5506 else if (p < 10) p--;
5507 if (st->buf[7] == 0)
5508 DRUM_CHANNEL_OFF(p);
5509 else
5510 DRUM_CHANNEL_ON(p);
5511
5512 } else if ((st->buf[5] & 0xf0) == 0x10 && st->buf[6] == 0x21) {
5513 /* program */
5514 int p = st->buf[5] & 0x0f;
5515 if (p == 0) p = 9;
5516 else if (p < 10) p--;
5517 if (! IS_DRUM_CHANNEL(p))
5518 awe_set_instr(0, p, st->buf[7]);
5519
5520 } else if (st->buf[5] == 0x01 && st->buf[6] == 0x30) {
5521 /* reverb mode */
5522 awe_set_reverb_mode(st->buf[7]);
5523
5524 } else if (st->buf[5] == 0x01 && st->buf[6] == 0x38) {
5525 /* chorus mode */
5526 awe_set_chorus_mode(st->buf[7]);
5527
5528 } else if (st->buf[5] == 0x00 && st->buf[6] == 0x04) {
5529 /* master volume */
5530 awe_change_master_volume(st->buf[7]);
5531
5532 }
5533 }
5534
5535 /* XG on */
5536 else if (memcmp(st->buf, xg_on_macro, sizeof(xg_on_macro)) == 0) {
5537 midi_mode = MODE_XG;
5538 xg_mapping = TRUE;
5539 xg_bankmode = 0;
5540 }
5541}
5542
5543
5544/*----------------------------------------------------------------*/
5545
5546/*
5547 * convert NRPN/control values
5548 */
5549
5550static int send_converted_effect(ConvTable *table, int num_tables, MidiStatus *st, int type, int val)
5551{
5552 int i, cval;
5553 for (i = 0; i < num_tables; i++) {
5554 if (table[i].control == type) {
5555 cval = table[i].convert(val);
5556 send_effect(st->chan, table[i].awe_effect, cval);
5557 return TRUE;
5558 }
5559 }
5560 return FALSE;
5561}
5562
5563static int add_converted_effect(ConvTable *table, int num_tables, MidiStatus *st, int type, int val)
5564{
5565 int i, cval;
5566 for (i = 0; i < num_tables; i++) {
5567 if (table[i].control == type) {
5568 cval = table[i].convert(val);
5569 add_effect(st->chan, table[i].awe_effect|0x80, cval);
5570 return TRUE;
5571 }
5572 }
5573 return FALSE;
5574}
5575
5576
5577/*
5578 * AWE32 NRPN effects
5579 */
5580
5581static unsigned short fx_delay(int val);
5582static unsigned short fx_attack(int val);
5583static unsigned short fx_hold(int val);
5584static unsigned short fx_decay(int val);
5585static unsigned short fx_the_value(int val);
5586static unsigned short fx_twice_value(int val);
5587static unsigned short fx_conv_pitch(int val);
5588static unsigned short fx_conv_Q(int val);
5589
5590/* function for each NRPN */ /* [range] units */
5591#define fx_env1_delay fx_delay /* [0,5900] 4msec */
5592#define fx_env1_attack fx_attack /* [0,5940] 1msec */
5593#define fx_env1_hold fx_hold /* [0,8191] 1msec */
5594#define fx_env1_decay fx_decay /* [0,5940] 4msec */
5595#define fx_env1_release fx_decay /* [0,5940] 4msec */
5596#define fx_env1_sustain fx_the_value /* [0,127] 0.75dB */
5597#define fx_env1_pitch fx_the_value /* [-127,127] 9.375cents */
5598#define fx_env1_cutoff fx_the_value /* [-127,127] 56.25cents */
5599
5600#define fx_env2_delay fx_delay /* [0,5900] 4msec */
5601#define fx_env2_attack fx_attack /* [0,5940] 1msec */
5602#define fx_env2_hold fx_hold /* [0,8191] 1msec */
5603#define fx_env2_decay fx_decay /* [0,5940] 4msec */
5604#define fx_env2_release fx_decay /* [0,5940] 4msec */
5605#define fx_env2_sustain fx_the_value /* [0,127] 0.75dB */
5606
5607#define fx_lfo1_delay fx_delay /* [0,5900] 4msec */
5608#define fx_lfo1_freq fx_twice_value /* [0,127] 84mHz */
5609#define fx_lfo1_volume fx_twice_value /* [0,127] 0.1875dB */
5610#define fx_lfo1_pitch fx_the_value /* [-127,127] 9.375cents */
5611#define fx_lfo1_cutoff fx_twice_value /* [-64,63] 56.25cents */
5612
5613#define fx_lfo2_delay fx_delay /* [0,5900] 4msec */
5614#define fx_lfo2_freq fx_twice_value /* [0,127] 84mHz */
5615#define fx_lfo2_pitch fx_the_value /* [-127,127] 9.375cents */
5616
5617#define fx_init_pitch fx_conv_pitch /* [-8192,8192] cents */
5618#define fx_chorus fx_the_value /* [0,255] -- */
5619#define fx_reverb fx_the_value /* [0,255] -- */
5620#define fx_cutoff fx_twice_value /* [0,127] 62Hz */
5621#define fx_filterQ fx_conv_Q /* [0,127] -- */
5622
5623static unsigned short fx_delay(int val)
5624{
5625 return (unsigned short)calc_parm_delay(val);
5626}
5627
5628static unsigned short fx_attack(int val)
5629{
5630 return (unsigned short)calc_parm_attack(val);
5631}
5632
5633static unsigned short fx_hold(int val)
5634{
5635 return (unsigned short)calc_parm_hold(val);
5636}
5637
5638static unsigned short fx_decay(int val)
5639{
5640 return (unsigned short)calc_parm_decay(val);
5641}
5642
5643static unsigned short fx_the_value(int val)
5644{
5645 return (unsigned short)(val & 0xff);
5646}
5647
5648static unsigned short fx_twice_value(int val)
5649{
5650 return (unsigned short)((val * 2) & 0xff);
5651}
5652
5653static unsigned short fx_conv_pitch(int val)
5654{
5655 return (short)(val * 4096 / 1200);
5656}
5657
5658static unsigned short fx_conv_Q(int val)
5659{
5660 return (unsigned short)((val / 8) & 0xff);
5661}
5662
5663
5664static ConvTable awe_effects[] =
5665{
5666 { 0, AWE_FX_LFO1_DELAY, fx_lfo1_delay},
5667 { 1, AWE_FX_LFO1_FREQ, fx_lfo1_freq},
5668 { 2, AWE_FX_LFO2_DELAY, fx_lfo2_delay},
5669 { 3, AWE_FX_LFO2_FREQ, fx_lfo2_freq},
5670
5671 { 4, AWE_FX_ENV1_DELAY, fx_env1_delay},
5672 { 5, AWE_FX_ENV1_ATTACK,fx_env1_attack},
5673 { 6, AWE_FX_ENV1_HOLD, fx_env1_hold},
5674 { 7, AWE_FX_ENV1_DECAY, fx_env1_decay},
5675 { 8, AWE_FX_ENV1_SUSTAIN, fx_env1_sustain},
5676 { 9, AWE_FX_ENV1_RELEASE, fx_env1_release},
5677
5678 {10, AWE_FX_ENV2_DELAY, fx_env2_delay},
5679 {11, AWE_FX_ENV2_ATTACK, fx_env2_attack},
5680 {12, AWE_FX_ENV2_HOLD, fx_env2_hold},
5681 {13, AWE_FX_ENV2_DECAY, fx_env2_decay},
5682 {14, AWE_FX_ENV2_SUSTAIN, fx_env2_sustain},
5683 {15, AWE_FX_ENV2_RELEASE, fx_env2_release},
5684
5685 {16, AWE_FX_INIT_PITCH, fx_init_pitch},
5686 {17, AWE_FX_LFO1_PITCH, fx_lfo1_pitch},
5687 {18, AWE_FX_LFO2_PITCH, fx_lfo2_pitch},
5688 {19, AWE_FX_ENV1_PITCH, fx_env1_pitch},
5689 {20, AWE_FX_LFO1_VOLUME, fx_lfo1_volume},
5690 {21, AWE_FX_CUTOFF, fx_cutoff},
5691 {22, AWE_FX_FILTERQ, fx_filterQ},
5692 {23, AWE_FX_LFO1_CUTOFF, fx_lfo1_cutoff},
5693 {24, AWE_FX_ENV1_CUTOFF, fx_env1_cutoff},
5694 {25, AWE_FX_CHORUS, fx_chorus},
5695 {26, AWE_FX_REVERB, fx_reverb},
5696};
5697
5698static int num_awe_effects = numberof(awe_effects);
5699
5700
5701/*
5702 * GS(SC88) NRPN effects; still experimental
5703 */
5704
5705/* cutoff: quarter semitone step, max=255 */
5706static unsigned short gs_cutoff(int val)
5707{
5708 return (val - 64) * gs_sense[FX_CUTOFF] / 50;
5709}
5710
5711/* resonance: 0 to 15(max) */
5712static unsigned short gs_filterQ(int val)
5713{
5714 return (val - 64) * gs_sense[FX_RESONANCE] / 50;
5715}
5716
5717/* attack: */
5718static unsigned short gs_attack(int val)
5719{
5720 return -(val - 64) * gs_sense[FX_ATTACK] / 50;
5721}
5722
5723/* decay: */
5724static unsigned short gs_decay(int val)
5725{
5726 return -(val - 64) * gs_sense[FX_RELEASE] / 50;
5727}
5728
5729/* release: */
5730static unsigned short gs_release(int val)
5731{
5732 return -(val - 64) * gs_sense[FX_RELEASE] / 50;
5733}
5734
5735/* vibrato freq: 0.042Hz step, max=255 */
5736static unsigned short gs_vib_rate(int val)
5737{
5738 return (val - 64) * gs_sense[FX_VIBRATE] / 50;
5739}
5740
5741/* vibrato depth: max=127, 1 octave */
5742static unsigned short gs_vib_depth(int val)
5743{
5744 return (val - 64) * gs_sense[FX_VIBDEPTH] / 50;
5745}
5746
5747/* vibrato delay: -0.725msec step */
5748static unsigned short gs_vib_delay(int val)
5749{
5750 return -(val - 64) * gs_sense[FX_VIBDELAY] / 50;
5751}
5752
5753static ConvTable gs_effects[] =
5754{
5755 {32, AWE_FX_CUTOFF, gs_cutoff},
5756 {33, AWE_FX_FILTERQ, gs_filterQ},
5757 {99, AWE_FX_ENV2_ATTACK, gs_attack},
5758 {100, AWE_FX_ENV2_DECAY, gs_decay},
5759 {102, AWE_FX_ENV2_RELEASE, gs_release},
5760 {8, AWE_FX_LFO1_FREQ, gs_vib_rate},
5761 {9, AWE_FX_LFO1_VOLUME, gs_vib_depth},
5762 {10, AWE_FX_LFO1_DELAY, gs_vib_delay},
5763};
5764
5765static int num_gs_effects = numberof(gs_effects);
5766
5767
5768/*
5769 * NRPN events: accept as AWE32/SC88 specific controls
5770 */
5771
5772static void midi_nrpn_event(MidiStatus *st)
5773{
5774 if (rpn_msb[st->chan] == 127 && rpn_lsb[st->chan] <= 26) {
5775 if (! msb_bit) /* both MSB/LSB necessary */
5776 send_converted_effect(awe_effects, num_awe_effects,
5777 st, rpn_lsb[st->chan],
5778 rpn_val[st->chan] - 8192);
5779 } else if (rpn_msb[st->chan] == 1) {
5780 if (msb_bit) /* only MSB is valid */
5781 add_converted_effect(gs_effects, num_gs_effects,
5782 st, rpn_lsb[st->chan],
5783 rpn_val[st->chan] / 128);
5784 }
5785}
5786
5787
5788/*
5789 * XG control effects; still experimental
5790 */
5791
5792/* cutoff: quarter semitone step, max=255 */
5793static unsigned short xg_cutoff(int val)
5794{
5795 return (val - 64) * xg_sense[FX_CUTOFF] / 64;
5796}
5797
5798/* resonance: 0(open) to 15(most nasal) */
5799static unsigned short xg_filterQ(int val)
5800{
5801 return (val - 64) * xg_sense[FX_RESONANCE] / 64;
5802}
5803
5804/* attack: */
5805static unsigned short xg_attack(int val)
5806{
5807 return -(val - 64) * xg_sense[FX_ATTACK] / 64;
5808}
5809
5810/* release: */
5811static unsigned short xg_release(int val)
5812{
5813 return -(val - 64) * xg_sense[FX_RELEASE] / 64;
5814}
5815
5816static ConvTable xg_effects[] =
5817{
5818 {71, AWE_FX_CUTOFF, xg_cutoff},
5819 {74, AWE_FX_FILTERQ, xg_filterQ},
5820 {72, AWE_FX_ENV2_RELEASE, xg_release},
5821 {73, AWE_FX_ENV2_ATTACK, xg_attack},
5822};
5823
5824static int num_xg_effects = numberof(xg_effects);
5825
5826static int xg_control_change(MidiStatus *st, int cmd, int val)
5827{
5828 return add_converted_effect(xg_effects, num_xg_effects, st, cmd, val);
5829}
5830
5831#endif /* CONFIG_AWE32_MIDIEMU */
5832
5833
5834/*----------------------------------------------------------------*/
5835
5836
5837/*
5838 * initialization of AWE driver
5839 */
5840
5841static void
5842awe_initialize(void)
5843{
5844 DEBUG(0,printk("AWE32: initializing..\n"));
5845
5846 /* initialize hardware configuration */
5847 awe_poke(AWE_HWCF1, 0x0059);
5848 awe_poke(AWE_HWCF2, 0x0020);
5849
5850 /* disable audio; this seems to reduce a clicking noise a bit.. */
5851 awe_poke(AWE_HWCF3, 0);
5852
5853 /* initialize audio channels */
5854 awe_init_audio();
5855
5856 /* initialize DMA */
5857 awe_init_dma();
5858
5859 /* initialize init array */
5860 awe_init_array();
5861
5862 /* check DRAM memory size */
5863 awe_check_dram();
5864
5865 /* initialize the FM section of the AWE32 */
5866 awe_init_fm();
5867
5868 /* set up voice envelopes */
5869 awe_tweak();
5870
5871 /* enable audio */
5872 awe_poke(AWE_HWCF3, 0x0004);
5873
5874 /* set default values */
5875 awe_init_ctrl_parms(TRUE);
5876
5877 /* set equalizer */
5878 awe_update_equalizer();
5879
5880 /* set reverb & chorus modes */
5881 awe_update_reverb_mode();
5882 awe_update_chorus_mode();
5883}
5884
5885
5886/*
5887 * Core Device Management Functions
5888 */
5889
5890/* store values to i/o port array */
5891static void setup_ports(int port1, int port2, int port3)
5892{
5893 awe_ports[0] = port1;
5894 if (port2 == 0)
5895 port2 = port1 + 0x400;
5896 awe_ports[1] = port2;
5897 awe_ports[2] = port2 + 2;
5898 if (port3 == 0)
5899 port3 = port1 + 0x800;
5900 awe_ports[3] = port3;
5901 awe_ports[4] = port3 + 2;
5902
5903 port_setuped = TRUE;
5904}
5905
5906/*
5907 * port request
5908 * 0x620-623, 0xA20-A23, 0xE20-E23
5909 */
5910
5911static int
5912awe_request_region(void)
5913{
5914 if (! port_setuped)
5915 return 0;
5916 if (! request_region(awe_ports[0], 4, "sound driver (AWE32)"))
5917 return 0;
5918 if (! request_region(awe_ports[1], 4, "sound driver (AWE32)"))
5919 goto err_out;
5920 if (! request_region(awe_ports[3], 4, "sound driver (AWE32)"))
5921 goto err_out1;
5922 return 1;
5923err_out1:
5924 release_region(awe_ports[1], 4);
5925err_out:
5926 release_region(awe_ports[0], 4);
5927 return 0;
5928}
5929
5930static void
5931awe_release_region(void)
5932{
5933 if (! port_setuped) return;
5934 release_region(awe_ports[0], 4);
5935 release_region(awe_ports[1], 4);
5936 release_region(awe_ports[3], 4);
5937}
5938
5939static int awe_attach_device(void)
5940{
5941 if (awe_present) return 0; /* for OSS38.. called twice? */
5942
5943 /* reserve I/O ports for awedrv */
5944 if (! awe_request_region()) {
5945 printk(KERN_ERR "AWE32: I/O area already used.\n");
5946 return 0;
5947 }
5948
5949 /* set buffers to NULL */
5950 sfhead = sftail = NULL;
5951
5952 my_dev = sound_alloc_synthdev();
5953 if (my_dev == -1) {
5954 printk(KERN_ERR "AWE32 Error: too many synthesizers\n");
5955 awe_release_region();
5956 return 0;
5957 }
5958
5959 voice_alloc = &awe_operations.alloc;
5960 voice_alloc->max_voice = awe_max_voices;
5961 synth_devs[my_dev] = &awe_operations;
5962
5963#ifdef CONFIG_AWE32_MIXER
5964 attach_mixer();
5965#endif
5966#ifdef CONFIG_AWE32_MIDIEMU
5967 attach_midiemu();
5968#endif
5969
5970 /* clear all samples */
5971 awe_reset_samples();
5972
5973 /* initialize AWE32 hardware */
5974 awe_initialize();
5975
5976 sprintf(awe_info.name, "AWE32-%s (RAM%dk)",
5977 AWEDRV_VERSION, memsize/1024);
5978 printk(KERN_INFO "<SoundBlaster EMU8000 (RAM%dk)>\n", memsize/1024);
5979
5980 awe_present = TRUE;
5981
5982 return 1;
5983}
5984
5985static void awe_dettach_device(void)
5986{
5987 if (awe_present) {
5988 awe_reset_samples();
5989 awe_release_region();
5990 free_tables();
5991#ifdef CONFIG_AWE32_MIXER
5992 unload_mixer();
5993#endif
5994#ifdef CONFIG_AWE32_MIDIEMU
5995 unload_midiemu();
5996#endif
5997 sound_unload_synthdev(my_dev);
5998 awe_present = FALSE;
5999 }
6000}
6001
6002
6003/*
6004 * Legacy device Probing
6005 */
6006
6007/* detect emu8000 chip on the specified address; from VV's guide */
6008
6009static int __init
6010awe_detect_base(int addr)
6011{
6012 setup_ports(addr, 0, 0);
6013 if ((awe_peek(AWE_U1) & 0x000F) != 0x000C)
6014 return 0;
6015 if ((awe_peek(AWE_HWCF1) & 0x007E) != 0x0058)
6016 return 0;
6017 if ((awe_peek(AWE_HWCF2) & 0x0003) != 0x0003)
6018 return 0;
6019 DEBUG(0,printk("AWE32 found at %x\n", addr));
6020 return 1;
6021}
6022
6023static int __init awe_detect_legacy_devices(void)
6024{
6025 int base;
6026 for (base = 0x620; base <= 0x680; base += 0x20)
6027 if (awe_detect_base(base)) {
6028 awe_attach_device();
6029 return 1;
6030 }
6031 DEBUG(0,printk("AWE32 Legacy detection failed\n"));
6032 return 0;
6033}
6034
6035
6036/*
6037 * PnP device Probing
6038 */
6039
6040static struct pnp_device_id awe_pnp_ids[] = {
6041 {.id = "CTL0021", .driver_data = 0}, /* AWE32 WaveTable */
6042 {.id = "CTL0022", .driver_data = 0}, /* AWE64 WaveTable */
6043 {.id = "CTL0023", .driver_data = 0}, /* AWE64 Gold WaveTable */
6044 { } /* terminator */
6045};
6046
6047MODULE_DEVICE_TABLE(pnp, awe_pnp_ids);
6048
6049static int awe_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
6050{
6051 int io1, io2, io3;
6052
6053 if (awe_present) {
6054 printk(KERN_ERR "AWE32: This driver only supports one AWE32 device, skipping.\n");
6055 }
6056
6057 if (!pnp_port_valid(dev,0) ||
6058 !pnp_port_valid(dev,1) ||
6059 !pnp_port_valid(dev,2)) {
6060 printk(KERN_ERR "AWE32: The PnP device does not have the required resources.\n");
6061 return -EINVAL;
6062 }
6063 io1 = pnp_port_start(dev,0);
6064 io2 = pnp_port_start(dev,1);
6065 io3 = pnp_port_start(dev,2);
6066 printk(KERN_INFO "AWE32: A PnP Wave Table was detected at IO's %#x,%#x,%#x.\n",
6067 io1, io2, io3);
6068 setup_ports(io1, io2, io3);
6069
6070 awe_attach_device();
6071 return 0;
6072}
6073
6074static void awe_pnp_remove(struct pnp_dev *dev)
6075{
6076 awe_dettach_device();
6077}
6078
6079static struct pnp_driver awe_pnp_driver = {
6080 .name = "AWE32",
6081 .id_table = awe_pnp_ids,
6082 .probe = awe_pnp_probe,
6083 .remove = awe_pnp_remove,
6084};
6085
6086static int __init awe_detect_pnp_devices(void)
6087{
6088 int ret;
6089
6090 ret = pnp_register_driver(&awe_pnp_driver);
6091 if (ret<0)
6092 printk(KERN_ERR "AWE32: PnP support is unavailable.\n");
6093 return ret;
6094}
6095
6096
6097/*
6098 * device / lowlevel (module) interface
6099 */
6100
6101static int __init
6102awe_detect(void)
6103{
6104 printk(KERN_INFO "AWE32: Probing for WaveTable...\n");
6105 if (isapnp) {
6106 if (awe_detect_pnp_devices()>=0)
6107 return 1;
6108 } else
6109 printk(KERN_INFO "AWE32: Skipping PnP detection.\n");
6110
6111 if (awe_detect_legacy_devices())
6112 return 1;
6113
6114 return 0;
6115}
6116
6117static int __init attach_awe(void)
6118{
6119 return awe_detect() ? 0 : -ENODEV;
6120}
6121
6122static void __exit unload_awe(void)
6123{
6124 pnp_unregister_driver(&awe_pnp_driver);
6125 awe_dettach_device();
6126}
6127
6128
6129module_init(attach_awe);
6130module_exit(unload_awe);
6131
6132#ifndef MODULE
6133static int __init setup_awe(char *str)
6134{
6135 /* io, memsize, isapnp */
6136 int ints[4];
6137
6138 str = get_options(str, ARRAY_SIZE(ints), ints);
6139
6140 io = ints[1];
6141 memsize = ints[2];
6142 isapnp = ints[3];
6143
6144 return 1;
6145}
6146
6147__setup("awe=", setup_awe);
6148#endif
diff --git a/sound/oss/awe_wave.h b/sound/oss/awe_wave.h
deleted file mode 100644
index fe584810608f..000000000000
--- a/sound/oss/awe_wave.h
+++ /dev/null
@@ -1,77 +0,0 @@
1/*
2 * sound/oss/awe_wave.h
3 *
4 * Configuration of AWE32/SB32/AWE64 wave table synth driver.
5 * version 0.4.4; Jan. 4, 2000
6 *
7 * Copyright (C) 1996-1998 Takashi Iwai
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24/*
25 * chorus & reverb effects send for FM chip: from 0 to 0xff
26 * larger numbers often cause weird sounds.
27 */
28
29#define DEF_FM_CHORUS_DEPTH 0x10
30#define DEF_FM_REVERB_DEPTH 0x10
31
32
33/*
34 * other compile conditions
35 */
36
37/* initialize FM passthrough even without extended RAM */
38#undef AWE_ALWAYS_INIT_FM
39
40/* debug on */
41#define AWE_DEBUG_ON
42
43/* GUS compatible mode */
44#define AWE_HAS_GUS_COMPATIBILITY
45
46/* add MIDI emulation by wavetable */
47#define CONFIG_AWE32_MIDIEMU
48
49/* add mixer control of emu8000 equalizer */
50#undef CONFIG_AWE32_MIXER
51
52/* use new volume calculation method as default */
53#define AWE_USE_NEW_VOLUME_CALC
54
55/* check current volume target for searching empty voices */
56#define AWE_CHECK_VTARGET
57
58/* allow sample sharing */
59#define AWE_ALLOW_SAMPLE_SHARING
60
61/*
62 * AWE32 card configuration:
63 * uncomment the following lines *ONLY* when auto detection doesn't
64 * work properly on your machine.
65 */
66
67/*#define AWE_DEFAULT_BASE_ADDR 0x620*/ /* base port address */
68/*#define AWE_DEFAULT_MEM_SIZE 512*/ /* kbytes */
69
70/*
71 * AWE driver version number
72 */
73#define AWE_MAJOR_VERSION 0
74#define AWE_MINOR_VERSION 4
75#define AWE_TINY_VERSION 4
76#define AWE_VERSION_NUMBER ((AWE_MAJOR_VERSION<<16)|(AWE_MINOR_VERSION<<8)|AWE_TINY_VERSION)
77#define AWEDRV_VERSION "0.4.4"
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c
deleted file mode 100644
index 20628aa07a25..000000000000
--- a/sound/oss/cmpci.c
+++ /dev/null
@@ -1,3380 +0,0 @@
1/*
2 * cmpci.c -- C-Media PCI audio driver.
3 *
4 * Copyright (C) 1999 C-media support (support@cmedia.com.tw)
5 *
6 * Based on the PCI drivers by Thomas Sailer (sailer@ife.ee.ethz.ch)
7 *
8 * For update, visit:
9 * http://www.cmedia.com.tw
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Special thanks to David C. Niemi, Jan Pfeifer
26 *
27 *
28 * Module command line parameters:
29 * none so far
30 *
31 *
32 * Supported devices:
33 * /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
34 * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
35 * /dev/midi simple MIDI UART interface, no ioctl
36 *
37 * The card has both an FM and a Wavetable synth, but I have to figure
38 * out first how to drive them...
39 *
40 * Revision history
41 * 06.05.98 0.1 Initial release
42 * 10.05.98 0.2 Fixed many bugs, esp. ADC rate calculation
43 * First stab at a simple midi interface (no bells&whistles)
44 * 13.05.98 0.3 Fix stupid cut&paste error: set_adc_rate was called instead of
45 * set_dac_rate in the FMODE_WRITE case in cm_open
46 * Fix hwptr out of bounds (now mpg123 works)
47 * 14.05.98 0.4 Don't allow excessive interrupt rates
48 * 08.06.98 0.5 First release using Alan Cox' soundcore instead of miscdevice
49 * 03.08.98 0.6 Do not include modversions.h
50 * Now mixer behaviour can basically be selected between
51 * "OSS documented" and "OSS actual" behaviour
52 * 31.08.98 0.7 Fix realplayer problems - dac.count issues
53 * 10.12.98 0.8 Fix drain_dac trying to wait on not yet initialized DMA
54 * 16.12.98 0.9 Fix a few f_file & FMODE_ bugs
55 * 06.01.99 0.10 remove the silly SA_INTERRUPT flag.
56 * hopefully killed the egcs section type conflict
57 * 12.03.99 0.11 cinfo.blocks should be reset after GETxPTR ioctl.
58 * reported by Johan Maes <joma@telindus.be>
59 * 22.03.99 0.12 return EAGAIN instead of EBUSY when O_NONBLOCK
60 * read/write cannot be executed
61 * 18.08.99 1.5 Only deallocate DMA buffer when unloading.
62 * 02.09.99 1.6 Enable SPDIF LOOP
63 * Change the mixer read back
64 * 21.09.99 2.33 Use RCS version as driver version.
65 * Add support for modem, S/PDIF loop and 4 channels.
66 * (8738 only)
67 * Fix bug cause x11amp cannot play.
68 *
69 * Fixes:
70 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
71 * 18/05/2001 - .bss nitpicks, fix a bug in set_dac_channels where it
72 * was calling prog_dmabuf with s->lock held, call missing
73 * unlock_kernel in cm_midi_release
74 * 08/10/2001 - use set_current_state in some more places
75 *
76 * Carlos Eduardo Gorges <carlos@techlinux.com.br>
77 * Fri May 25 2001
78 * - SMP support ( spin[un]lock* revision )
79 * - speaker mixer support
80 * Mon Aug 13 2001
81 * - optimizations and cleanups
82 *
83 * 03/01/2003 - open_mode fixes from Georg Acher <acher@in.tum.de>
84 * Simon Braunschmidt <brasimon@web.de>
85 * Sat Jan 31 2004
86 * - provide support for opl3 FM by releasing IO range after initialization
87 *
88 * ChenLi Tien <cltien@cmedia.com.tw>
89 * Mar 9 2004
90 * - Fix S/PDIF out if spdif_loop enabled
91 * - Load opl3 driver if enabled (fmio in proper range)
92 * - Load mpu401 if enabled (mpuio in proper range)
93 * Apr 5 2004
94 * - Fix DUAL_DAC dma synchronization bug
95 * - Check exist FM/MPU401 I/O before activate.
96 * - Add AFTM_S16_BE format support, so MPlayer/Xine can play AC3/mutlichannel
97 * on Mac
98 * - Change to support kernel 2.6 so only small patch needed
99 * - All parameters default to 0
100 * - Add spdif_out to send PCM through S/PDIF out jack
101 * - Add hw_copy to get 4-spaker output for general PCM/analog output
102 *
103 * Stefan Thater <stefan.thaeter@gmx.de>
104 * Apr 5 2004
105 * - Fix mute single channel for CD/Line-in/AUX-in
106 */
107/*****************************************************************************/
108
109#include <linux/module.h>
110#include <linux/string.h>
111#include <linux/interrupt.h>
112#include <linux/ioport.h>
113#include <linux/sched.h>
114#include <linux/delay.h>
115#include <linux/sound.h>
116#include <linux/slab.h>
117#include <linux/soundcard.h>
118#include <linux/pci.h>
119#include <linux/init.h>
120#include <linux/poll.h>
121#include <linux/spinlock.h>
122#include <linux/smp_lock.h>
123#include <linux/bitops.h>
124#include <linux/wait.h>
125#include <linux/dma-mapping.h>
126
127#include <asm/io.h>
128#include <asm/page.h>
129#include <asm/uaccess.h>
130
131#ifdef CONFIG_SOUND_CMPCI_MIDI
132#include "sound_config.h"
133#include "mpu401.h"
134#endif
135#ifdef CONFIG_SOUND_CMPCI_FM
136#include "opl3.h"
137#endif
138#ifdef CONFIG_SOUND_CMPCI_JOYSTICK
139#include <linux/gameport.h>
140#include <linux/mutex.h>
141
142#endif
143
144/* --------------------------------------------------------------------- */
145#undef OSS_DOCUMENTED_MIXER_SEMANTICS
146#undef DMABYTEIO
147#define DBG(x) {}
148/* --------------------------------------------------------------------- */
149
150#define CM_MAGIC ((PCI_VENDOR_ID_CMEDIA<<16)|PCI_DEVICE_ID_CMEDIA_CM8338A)
151
152/* CM8338 registers definition ****************/
153
154#define CODEC_CMI_FUNCTRL0 (0x00)
155#define CODEC_CMI_FUNCTRL1 (0x04)
156#define CODEC_CMI_CHFORMAT (0x08)
157#define CODEC_CMI_INT_HLDCLR (0x0C)
158#define CODEC_CMI_INT_STATUS (0x10)
159#define CODEC_CMI_LEGACY_CTRL (0x14)
160#define CODEC_CMI_MISC_CTRL (0x18)
161#define CODEC_CMI_TDMA_POS (0x1C)
162#define CODEC_CMI_MIXER (0x20)
163#define CODEC_SB16_DATA (0x22)
164#define CODEC_SB16_ADDR (0x23)
165#define CODEC_CMI_MIXER1 (0x24)
166#define CODEC_CMI_MIXER2 (0x25)
167#define CODEC_CMI_AUX_VOL (0x26)
168#define CODEC_CMI_MISC (0x27)
169#define CODEC_CMI_AC97 (0x28)
170
171#define CODEC_CMI_CH0_FRAME1 (0x80)
172#define CODEC_CMI_CH0_FRAME2 (0x84)
173#define CODEC_CMI_CH1_FRAME1 (0x88)
174#define CODEC_CMI_CH1_FRAME2 (0x8C)
175
176#define CODEC_CMI_SPDIF_CTRL (0x90)
177#define CODEC_CMI_MISC_CTRL2 (0x92)
178
179#define CODEC_CMI_EXT_REG (0xF0)
180
181/* Mixer registers for SB16 ******************/
182
183#define DSP_MIX_DATARESETIDX ((unsigned char)(0x00))
184
185#define DSP_MIX_MASTERVOLIDX_L ((unsigned char)(0x30))
186#define DSP_MIX_MASTERVOLIDX_R ((unsigned char)(0x31))
187#define DSP_MIX_VOICEVOLIDX_L ((unsigned char)(0x32))
188#define DSP_MIX_VOICEVOLIDX_R ((unsigned char)(0x33))
189#define DSP_MIX_FMVOLIDX_L ((unsigned char)(0x34))
190#define DSP_MIX_FMVOLIDX_R ((unsigned char)(0x35))
191#define DSP_MIX_CDVOLIDX_L ((unsigned char)(0x36))
192#define DSP_MIX_CDVOLIDX_R ((unsigned char)(0x37))
193#define DSP_MIX_LINEVOLIDX_L ((unsigned char)(0x38))
194#define DSP_MIX_LINEVOLIDX_R ((unsigned char)(0x39))
195
196#define DSP_MIX_MICVOLIDX ((unsigned char)(0x3A))
197#define DSP_MIX_SPKRVOLIDX ((unsigned char)(0x3B))
198
199#define DSP_MIX_OUTMIXIDX ((unsigned char)(0x3C))
200
201#define DSP_MIX_ADCMIXIDX_L ((unsigned char)(0x3D))
202#define DSP_MIX_ADCMIXIDX_R ((unsigned char)(0x3E))
203
204#define DSP_MIX_INGAINIDX_L ((unsigned char)(0x3F))
205#define DSP_MIX_INGAINIDX_R ((unsigned char)(0x40))
206#define DSP_MIX_OUTGAINIDX_L ((unsigned char)(0x41))
207#define DSP_MIX_OUTGAINIDX_R ((unsigned char)(0x42))
208
209#define DSP_MIX_AGCIDX ((unsigned char)(0x43))
210
211#define DSP_MIX_TREBLEIDX_L ((unsigned char)(0x44))
212#define DSP_MIX_TREBLEIDX_R ((unsigned char)(0x45))
213#define DSP_MIX_BASSIDX_L ((unsigned char)(0x46))
214#define DSP_MIX_BASSIDX_R ((unsigned char)(0x47))
215#define DSP_MIX_EXTENSION ((unsigned char)(0xf0))
216// pseudo register for AUX
217#define DSP_MIX_AUXVOL_L ((unsigned char)(0x50))
218#define DSP_MIX_AUXVOL_R ((unsigned char)(0x51))
219
220// I/O length
221#define CM_EXTENT_CODEC 0x100
222#define CM_EXTENT_MIDI 0x2
223#define CM_EXTENT_SYNTH 0x4
224#define CM_EXTENT_GAME 0x8
225
226// Function Control Register 0 (00h)
227#define CHADC0 0x01
228#define CHADC1 0x02
229#define PAUSE0 0x04
230#define PAUSE1 0x08
231
232// Function Control Register 0+2 (02h)
233#define CHEN0 0x01
234#define CHEN1 0x02
235#define RST_CH0 0x04
236#define RST_CH1 0x08
237
238// Function Control Register 1 (04h)
239#define JYSTK_EN 0x02
240#define UART_EN 0x04
241#define SPDO2DAC 0x40
242#define SPDFLOOP 0x80
243
244// Function Control Register 1+1 (05h)
245#define SPDF_0 0x01
246#define SPDF_1 0x02
247#define ASFC 0x1c
248#define DSFC 0xe0
249#define SPDIF2DAC (SPDF_1 << 8 | SPDO2DAC)
250
251// Channel Format Register (08h)
252#define CM_CFMT_STEREO 0x01
253#define CM_CFMT_16BIT 0x02
254#define CM_CFMT_MASK 0x03
255#define POLVALID 0x20
256#define INVSPDIFI 0x80
257
258// Channel Format Register+2 (0ah)
259#define SPD24SEL 0x20
260
261// Channel Format Register+3 (0bh)
262#define CHB3D 0x20
263#define CHB3D5C 0x80
264
265// Interrupt Hold/Clear Register+2 (0eh)
266#define CH0_INT_EN 0x01
267#define CH1_INT_EN 0x02
268
269// Interrupt Register (10h)
270#define CHINT0 0x01
271#define CHINT1 0x02
272#define CH0BUSY 0x04
273#define CH1BUSY 0x08
274
275// Legacy Control/Status Register+1 (15h)
276#define EXBASEN 0x10
277#define BASE2LIN 0x20
278#define CENTR2LIN 0x40
279#define CB2LIN (BASE2LIN | CENTR2LIN)
280#define CHB3D6C 0x80
281
282// Legacy Control/Status Register+2 (16h)
283#define DAC2SPDO 0x20
284#define SPDCOPYRHT 0x40
285#define ENSPDOUT 0x80
286
287// Legacy Control/Status Register+3 (17h)
288#define FMSEL 0x03
289#define VSBSEL 0x0c
290#define VMPU 0x60
291#define NXCHG 0x80
292
293// Miscellaneous Control Register (18h)
294#define REAR2LIN 0x20
295#define MUTECH1 0x40
296#define ENCENTER 0x80
297
298// Miscellaneous Control Register+1 (19h)
299#define SELSPDIFI2 0x01
300#define SPDF_AC97 0x80
301
302// Miscellaneous Control Register+2 (1ah)
303#define AC3_EN 0x04
304#define FM_EN 0x08
305#define SPD32SEL 0x20
306#define XCHGDAC 0x40
307#define ENDBDAC 0x80
308
309// Miscellaneous Control Register+3 (1bh)
310#define SPDIFI48K 0x01
311#define SPDO5V 0x02
312#define N4SPK3D 0x04
313#define RESET 0x40
314#define PWD 0x80
315#define SPDIF48K (SPDIFI48K << 24 | SPDF_AC97 << 8)
316
317// Mixer1 (24h)
318#define CDPLAY 0x01
319#define X3DEN 0x02
320#define REAR2FRONT 0x10
321#define SPK4 0x20
322#define WSMUTE 0x40
323#define FMMUTE 0x80
324
325// Miscellaneous Register (27h)
326#define SPDVALID 0x02
327#define CENTR2MIC 0x04
328
329// Miscellaneous Register2 (92h)
330#define SPD32KFMT 0x10
331
332#define CM_CFMT_DACSHIFT 2
333#define CM_CFMT_ADCSHIFT 0
334#define CM_FREQ_DACSHIFT 5
335#define CM_FREQ_ADCSHIFT 2
336#define RSTDAC RST_CH1
337#define RSTADC RST_CH0
338#define ENDAC CHEN1
339#define ENADC CHEN0
340#define PAUSEDAC PAUSE1
341#define PAUSEADC PAUSE0
342#define CODEC_CMI_ADC_FRAME1 CODEC_CMI_CH0_FRAME1
343#define CODEC_CMI_ADC_FRAME2 CODEC_CMI_CH0_FRAME2
344#define CODEC_CMI_DAC_FRAME1 CODEC_CMI_CH1_FRAME1
345#define CODEC_CMI_DAC_FRAME2 CODEC_CMI_CH1_FRAME2
346#define DACINT CHINT1
347#define ADCINT CHINT0
348#define DACBUSY CH1BUSY
349#define ADCBUSY CH0BUSY
350#define ENDACINT CH1_INT_EN
351#define ENADCINT CH0_INT_EN
352
353static const unsigned sample_size[] = { 1, 2, 2, 4 };
354static const unsigned sample_shift[] = { 0, 1, 1, 2 };
355
356#define SND_DEV_DSP16 5
357
358#define NR_DEVICE 3 /* maximum number of devices */
359
360#define set_dac1_rate set_adc_rate
361#define set_dac1_rate_unlocked set_adc_rate_unlocked
362#define stop_dac1 stop_adc
363#define stop_dac1_unlocked stop_adc_unlocked
364#define get_dmadac1 get_dmaadc
365
366static unsigned int devindex = 0;
367
368//*********************************************/
369
370struct cm_state {
371 /* magic */
372 unsigned int magic;
373
374 /* list of cmedia devices */
375 struct list_head devs;
376
377 /* the corresponding pci_dev structure */
378 struct pci_dev *dev;
379
380 int dev_audio; /* soundcore stuff */
381 int dev_mixer;
382
383 unsigned int iosb, iobase, iosynth,
384 iomidi, iogame, irq; /* hardware resources */
385 unsigned short deviceid; /* pci_id */
386
387 struct { /* mixer stuff */
388 unsigned int modcnt;
389 unsigned short vol[13];
390 } mix;
391
392 unsigned int rateadc, ratedac; /* wave stuff */
393 unsigned char fmt, enable;
394
395 spinlock_t lock;
396 struct mutex open_mutex;
397 mode_t open_mode;
398 wait_queue_head_t open_wait;
399
400 struct dmabuf {
401 void *rawbuf;
402 dma_addr_t dmaaddr;
403 unsigned buforder;
404 unsigned numfrag;
405 unsigned fragshift;
406 unsigned hwptr, swptr;
407 unsigned total_bytes;
408 int count;
409 unsigned error; /* over/underrun */
410 wait_queue_head_t wait;
411
412 unsigned fragsize; /* redundant, but makes calculations easier */
413 unsigned dmasize;
414 unsigned fragsamples;
415 unsigned dmasamples;
416
417 unsigned mapped:1; /* OSS stuff */
418 unsigned ready:1;
419 unsigned endcleared:1;
420 unsigned enabled:1;
421 unsigned ossfragshift;
422 int ossmaxfrags;
423 unsigned subdivision;
424 } dma_dac, dma_adc;
425
426#ifdef CONFIG_SOUND_CMPCI_MIDI
427 int midi_devc;
428 struct address_info mpu_data;
429#endif
430#ifdef CONFIG_SOUND_CMPCI_JOYSTICK
431 struct gameport *gameport;
432#endif
433
434 int chip_version;
435 int max_channels;
436 int curr_channels;
437 int capability; /* HW capability, various for chip versions */
438
439 int status; /* HW or SW state */
440
441 int spdif_counter; /* spdif frame counter */
442};
443
444/* flags used for capability */
445#define CAN_AC3_HW 0x00000001 /* 037 or later */
446#define CAN_AC3_SW 0x00000002 /* 033 or later */
447#define CAN_AC3 (CAN_AC3_HW | CAN_AC3_SW)
448#define CAN_DUAL_DAC 0x00000004 /* 033 or later */
449#define CAN_MULTI_CH_HW 0x00000008 /* 039 or later */
450#define CAN_MULTI_CH (CAN_MULTI_CH_HW | CAN_DUAL_DAC)
451#define CAN_LINE_AS_REAR 0x00000010 /* 033 or later */
452#define CAN_LINE_AS_BASS 0x00000020 /* 039 or later */
453#define CAN_MIC_AS_BASS 0x00000040 /* 039 or later */
454
455/* flags used for status */
456#define DO_AC3_HW 0x00000001
457#define DO_AC3_SW 0x00000002
458#define DO_AC3 (DO_AC3_HW | DO_AC3_SW)
459#define DO_DUAL_DAC 0x00000004
460#define DO_MULTI_CH_HW 0x00000008
461#define DO_MULTI_CH (DO_MULTI_CH_HW | DO_DUAL_DAC)
462#define DO_LINE_AS_REAR 0x00000010 /* 033 or later */
463#define DO_LINE_AS_BASS 0x00000020 /* 039 or later */
464#define DO_MIC_AS_BASS 0x00000040 /* 039 or later */
465#define DO_SPDIF_OUT 0x00000100
466#define DO_SPDIF_IN 0x00000200
467#define DO_SPDIF_LOOP 0x00000400
468#define DO_BIGENDIAN_W 0x00001000 /* used in PowerPC */
469#define DO_BIGENDIAN_R 0x00002000 /* used in PowerPC */
470
471static LIST_HEAD(devs);
472
473static int mpuio;
474static int fmio;
475static int joystick;
476static int spdif_inverse;
477static int spdif_loop;
478static int spdif_out;
479static int use_line_as_rear;
480static int use_line_as_bass;
481static int use_mic_as_bass;
482static int mic_boost;
483static int hw_copy;
484module_param(mpuio, int, 0);
485module_param(fmio, int, 0);
486module_param(joystick, bool, 0);
487module_param(spdif_inverse, bool, 0);
488module_param(spdif_loop, bool, 0);
489module_param(spdif_out, bool, 0);
490module_param(use_line_as_rear, bool, 0);
491module_param(use_line_as_bass, bool, 0);
492module_param(use_mic_as_bass, bool, 0);
493module_param(mic_boost, bool, 0);
494module_param(hw_copy, bool, 0);
495MODULE_PARM_DESC(mpuio, "(0x330, 0x320, 0x310, 0x300) Base of MPU-401, 0 to disable");
496MODULE_PARM_DESC(fmio, "(0x388, 0x3C8, 0x3E0) Base of OPL3, 0 to disable");
497MODULE_PARM_DESC(joystick, "(1/0) Enable joystick interface, still need joystick driver");
498MODULE_PARM_DESC(spdif_inverse, "(1/0) Invert S/PDIF-in signal");
499MODULE_PARM_DESC(spdif_loop, "(1/0) Route S/PDIF-in to S/PDIF-out directly");
500MODULE_PARM_DESC(spdif_out, "(1/0) Send PCM to S/PDIF-out (PCM volume will not function)");
501MODULE_PARM_DESC(use_line_as_rear, "(1/0) Use line-in jack as rear-out");
502MODULE_PARM_DESC(use_line_as_bass, "(1/0) Use line-in jack as bass/center");
503MODULE_PARM_DESC(use_mic_as_bass, "(1/0) Use mic-in jack as bass/center");
504MODULE_PARM_DESC(mic_boost, "(1/0) Enable microphone boost");
505MODULE_PARM_DESC(hw_copy, "Copy front channel to surround channel");
506
507/* --------------------------------------------------------------------- */
508
509static inline unsigned ld2(unsigned int x)
510{
511 unsigned exp=16,l=5,r=0;
512 static const unsigned num[]={0x2,0x4,0x10,0x100,0x10000};
513
514 /* num: 2, 4, 16, 256, 65536 */
515 /* exp: 1, 2, 4, 8, 16 */
516
517 while(l--) {
518 if( x >= num[l] ) {
519 if(num[l]>2) x >>= exp;
520 r+=exp;
521 }
522 exp>>=1;
523 }
524
525 return r;
526}
527
528/* --------------------------------------------------------------------- */
529
530static void maskb(unsigned int addr, unsigned int mask, unsigned int value)
531{
532 outb((inb(addr) & mask) | value, addr);
533}
534
535static void maskw(unsigned int addr, unsigned int mask, unsigned int value)
536{
537 outw((inw(addr) & mask) | value, addr);
538}
539
540static void maskl(unsigned int addr, unsigned int mask, unsigned int value)
541{
542 outl((inl(addr) & mask) | value, addr);
543}
544
545static void set_dmadac1(struct cm_state *s, unsigned int addr, unsigned int count)
546{
547 if (addr)
548 outl(addr, s->iobase + CODEC_CMI_ADC_FRAME1);
549 outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2);
550 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~CHADC0, 0);
551}
552
553static void set_dmaadc(struct cm_state *s, unsigned int addr, unsigned int count)
554{
555 outl(addr, s->iobase + CODEC_CMI_ADC_FRAME1);
556 outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2);
557 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, CHADC0);
558}
559
560static void set_dmadac(struct cm_state *s, unsigned int addr, unsigned int count)
561{
562 outl(addr, s->iobase + CODEC_CMI_DAC_FRAME1);
563 outw(count - 1, s->iobase + CODEC_CMI_DAC_FRAME2);
564 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~CHADC1, 0);
565 if (s->status & DO_DUAL_DAC)
566 set_dmadac1(s, 0, count);
567}
568
569static void set_countadc(struct cm_state *s, unsigned count)
570{
571 outw(count - 1, s->iobase + CODEC_CMI_ADC_FRAME2 + 2);
572}
573
574static void set_countdac(struct cm_state *s, unsigned count)
575{
576 outw(count - 1, s->iobase + CODEC_CMI_DAC_FRAME2 + 2);
577 if (s->status & DO_DUAL_DAC)
578 set_countadc(s, count);
579}
580
581static unsigned get_dmadac(struct cm_state *s)
582{
583 unsigned int curr_addr;
584
585 curr_addr = inw(s->iobase + CODEC_CMI_DAC_FRAME2) + 1;
586 curr_addr <<= sample_shift[(s->fmt >> CM_CFMT_DACSHIFT) & CM_CFMT_MASK];
587 curr_addr = s->dma_dac.dmasize - curr_addr;
588
589 return curr_addr;
590}
591
592static unsigned get_dmaadc(struct cm_state *s)
593{
594 unsigned int curr_addr;
595
596 curr_addr = inw(s->iobase + CODEC_CMI_ADC_FRAME2) + 1;
597 curr_addr <<= sample_shift[(s->fmt >> CM_CFMT_ADCSHIFT) & CM_CFMT_MASK];
598 curr_addr = s->dma_adc.dmasize - curr_addr;
599
600 return curr_addr;
601}
602
603static void wrmixer(struct cm_state *s, unsigned char idx, unsigned char data)
604{
605 unsigned char regval, pseudo;
606
607 // pseudo register
608 if (idx == DSP_MIX_AUXVOL_L) {
609 data >>= 4;
610 data &= 0x0f;
611 regval = inb(s->iobase + CODEC_CMI_AUX_VOL) & ~0x0f;
612 outb(regval | data, s->iobase + CODEC_CMI_AUX_VOL);
613 return;
614 }
615 if (idx == DSP_MIX_AUXVOL_R) {
616 data &= 0xf0;
617 regval = inb(s->iobase + CODEC_CMI_AUX_VOL) & ~0xf0;
618 outb(regval | data, s->iobase + CODEC_CMI_AUX_VOL);
619 return;
620 }
621 outb(idx, s->iobase + CODEC_SB16_ADDR);
622 udelay(10);
623 // pseudo bits
624 if (idx == DSP_MIX_OUTMIXIDX) {
625 pseudo = data & ~0x1f;
626 pseudo >>= 1;
627 regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x30;
628 outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2);
629 }
630 if (idx == DSP_MIX_ADCMIXIDX_L) {
631 pseudo = data & 0x80;
632 pseudo >>= 1;
633 regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x40;
634 outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2);
635 }
636 if (idx == DSP_MIX_ADCMIXIDX_R) {
637 pseudo = data & 0x80;
638 regval = inb(s->iobase + CODEC_CMI_MIXER2) & ~0x80;
639 outb(regval | pseudo, s->iobase + CODEC_CMI_MIXER2);
640 }
641 outb(data, s->iobase + CODEC_SB16_DATA);
642 udelay(10);
643}
644
645static unsigned char rdmixer(struct cm_state *s, unsigned char idx)
646{
647 unsigned char v, pseudo;
648
649 // pseudo register
650 if (idx == DSP_MIX_AUXVOL_L) {
651 v = inb(s->iobase + CODEC_CMI_AUX_VOL) & 0x0f;
652 v <<= 4;
653 return v;
654 }
655 if (idx == DSP_MIX_AUXVOL_L) {
656 v = inb(s->iobase + CODEC_CMI_AUX_VOL) & 0xf0;
657 return v;
658 }
659 outb(idx, s->iobase + CODEC_SB16_ADDR);
660 udelay(10);
661 v = inb(s->iobase + CODEC_SB16_DATA);
662 udelay(10);
663 // pseudo bits
664 if (idx == DSP_MIX_OUTMIXIDX) {
665 pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x30;
666 pseudo <<= 1;
667 v |= pseudo;
668 }
669 if (idx == DSP_MIX_ADCMIXIDX_L) {
670 pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x40;
671 pseudo <<= 1;
672 v |= pseudo;
673 }
674 if (idx == DSP_MIX_ADCMIXIDX_R) {
675 pseudo = inb(s->iobase + CODEC_CMI_MIXER2) & 0x80;
676 v |= pseudo;
677 }
678 return v;
679}
680
681static void set_fmt_unlocked(struct cm_state *s, unsigned char mask, unsigned char data)
682{
683 if (mask && s->chip_version > 0) { /* 8338 cannot keep this */
684 s->fmt = inb(s->iobase + CODEC_CMI_CHFORMAT);
685 udelay(10);
686 }
687 s->fmt = (s->fmt & mask) | data;
688 outb(s->fmt, s->iobase + CODEC_CMI_CHFORMAT);
689 udelay(10);
690}
691
692static void set_fmt(struct cm_state *s, unsigned char mask, unsigned char data)
693{
694 unsigned long flags;
695
696 spin_lock_irqsave(&s->lock, flags);
697 set_fmt_unlocked(s,mask,data);
698 spin_unlock_irqrestore(&s->lock, flags);
699}
700
701static void frobindir(struct cm_state *s, unsigned char idx, unsigned char mask, unsigned char data)
702{
703 outb(idx, s->iobase + CODEC_SB16_ADDR);
704 udelay(10);
705 outb((inb(s->iobase + CODEC_SB16_DATA) & mask) | data, s->iobase + CODEC_SB16_DATA);
706 udelay(10);
707}
708
709static struct {
710 unsigned rate;
711 unsigned lower;
712 unsigned upper;
713 unsigned char freq;
714} rate_lookup[] =
715{
716 { 5512, (0 + 5512) / 2, (5512 + 8000) / 2, 0 },
717 { 8000, (5512 + 8000) / 2, (8000 + 11025) / 2, 4 },
718 { 11025, (8000 + 11025) / 2, (11025 + 16000) / 2, 1 },
719 { 16000, (11025 + 16000) / 2, (16000 + 22050) / 2, 5 },
720 { 22050, (16000 + 22050) / 2, (22050 + 32000) / 2, 2 },
721 { 32000, (22050 + 32000) / 2, (32000 + 44100) / 2, 6 },
722 { 44100, (32000 + 44100) / 2, (44100 + 48000) / 2, 3 },
723 { 48000, (44100 + 48000) / 2, 48000, 7 }
724};
725
726static void set_spdif_copyright(struct cm_state *s, int spdif_copyright)
727{
728 /* enable SPDIF-in Copyright */
729 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~SPDCOPYRHT, spdif_copyright ? SPDCOPYRHT : 0);
730}
731
732static void set_spdif_loop(struct cm_state *s, int spdif_loop)
733{
734 /* enable SPDIF loop */
735 if (spdif_loop) {
736 s->status |= DO_SPDIF_LOOP;
737 /* turn on spdif-in to spdif-out */
738 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, SPDFLOOP);
739 } else {
740 s->status &= ~DO_SPDIF_LOOP;
741 /* turn off spdif-in to spdif-out */
742 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDFLOOP, 0);
743 }
744}
745
746static void set_spdif_monitor(struct cm_state *s, int channel)
747{
748 // SPDO2DAC
749 maskw(s->iobase + CODEC_CMI_FUNCTRL1, ~SPDO2DAC, channel == 2 ? SPDO2DAC : 0);
750 // CDPLAY
751 if (s->chip_version >= 39)
752 maskb(s->iobase + CODEC_CMI_MIXER1, ~CDPLAY, channel ? CDPLAY : 0);
753}
754
755static void set_spdifout_level(struct cm_state *s, int level5v)
756{
757 /* SPDO5V */
758 if (s->chip_version > 0)
759 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~SPDO5V, level5v ? SPDO5V : 0);
760}
761
762static void set_spdifin_inverse(struct cm_state *s, int spdif_inverse)
763{
764 if (s->chip_version == 0) /* 8338 has not this feature */
765 return;
766 if (spdif_inverse) {
767 /* turn on spdif-in inverse */
768 if (s->chip_version >= 39)
769 maskb(s->iobase + CODEC_CMI_CHFORMAT, ~0, INVSPDIFI);
770 else
771 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 1);
772 } else {
773 /* turn off spdif-ininverse */
774 if (s->chip_version >= 39)
775 maskb(s->iobase + CODEC_CMI_CHFORMAT, ~INVSPDIFI, 0);
776 else
777 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~1, 0);
778 }
779}
780
781static void set_spdifin_channel2(struct cm_state *s, int channel2)
782{
783 /* SELSPDIFI2 */
784 if (s->chip_version >= 39)
785 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 1, ~SELSPDIFI2, channel2 ? SELSPDIFI2 : 0);
786}
787
788static void set_spdifin_valid(struct cm_state *s, int valid)
789{
790 /* SPDVALID */
791 maskb(s->iobase + CODEC_CMI_MISC, ~SPDVALID, valid ? SPDVALID : 0);
792}
793
794static void set_spdifout_unlocked(struct cm_state *s, unsigned rate)
795{
796 if (rate != 48000 && rate != 44100)
797 rate = 0;
798 if (rate == 48000 || rate == 44100) {
799 set_spdif_loop(s, 0);
800 // SPDF_1
801 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~0, SPDF_1);
802 // SPDIFI48K SPDF_AC97
803 maskl(s->iobase + CODEC_CMI_MISC_CTRL, ~SPDIF48K, rate == 48000 ? SPDIF48K : 0);
804 if (s->chip_version >= 55)
805 // SPD32KFMT
806 maskb(s->iobase + CODEC_CMI_MISC_CTRL2, ~SPD32KFMT, rate == 48000 ? SPD32KFMT : 0);
807 if (s->chip_version > 0)
808 // ENSPDOUT
809 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~0, ENSPDOUT);
810 // monitor SPDIF out
811 set_spdif_monitor(s, 2);
812 s->status |= DO_SPDIF_OUT;
813 } else {
814 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~SPDF_1, 0);
815 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 2, ~ENSPDOUT, 0);
816 // monitor none
817 set_spdif_monitor(s, 0);
818 s->status &= ~DO_SPDIF_OUT;
819 }
820}
821
822static void set_spdifout(struct cm_state *s, unsigned rate)
823{
824 unsigned long flags;
825
826 spin_lock_irqsave(&s->lock, flags);
827 set_spdifout_unlocked(s,rate);
828 spin_unlock_irqrestore(&s->lock, flags);
829}
830
831static void set_spdifin_unlocked(struct cm_state *s, unsigned rate)
832{
833 if (rate == 48000 || rate == 44100) {
834 // SPDF_1
835 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~0, SPDF_1);
836 // SPDIFI48K SPDF_AC97
837 maskl(s->iobase + CODEC_CMI_MISC_CTRL, ~SPDIF48K, rate == 48000 ? SPDIF48K : 0);
838 s->status |= DO_SPDIF_IN;
839 } else {
840 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~SPDF_1, 0);
841 s->status &= ~DO_SPDIF_IN;
842 }
843}
844
845static void set_spdifin(struct cm_state *s, unsigned rate)
846{
847 unsigned long flags;
848
849 spin_lock_irqsave(&s->lock, flags);
850 set_spdifin_unlocked(s,rate);
851 spin_unlock_irqrestore(&s->lock, flags);
852}
853
854/* find parity for bit 4~30 */
855static unsigned parity(unsigned data)
856{
857 unsigned parity = 0;
858 int counter = 4;
859
860 data >>= 4; // start from bit 4
861 while (counter <= 30) {
862 if (data & 1)
863 parity++;
864 data >>= 1;
865 counter++;
866 }
867 return parity & 1;
868}
869
870static void set_ac3_unlocked(struct cm_state *s, unsigned rate)
871{
872 if (!(s->capability & CAN_AC3))
873 return;
874 /* enable AC3 */
875 if (rate && rate != 44100)
876 rate = 48000;
877 if (rate == 48000 || rate == 44100) {
878 // mute DAC
879 maskb(s->iobase + CODEC_CMI_MIXER1, ~0, WSMUTE);
880 if (s->chip_version >= 39)
881 maskb(s->iobase + CODEC_CMI_MISC_CTRL, ~0, MUTECH1);
882 // AC3EN for 039, 0x04
883 if (s->chip_version >= 39) {
884 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, AC3_EN);
885 if (s->chip_version == 55)
886 maskb(s->iobase + CODEC_CMI_SPDIF_CTRL, ~2, 0);
887 // AC3EN for 037, 0x10
888 } else if (s->chip_version == 37)
889 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x10);
890 if (s->capability & CAN_AC3_HW) {
891 // SPD24SEL for 039, 0x20, but cannot be set
892 if (s->chip_version == 39)
893 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, SPD24SEL);
894 // SPD24SEL for 037, 0x02
895 else if (s->chip_version == 37)
896 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~0, 0x02);
897 if (s->chip_version >= 39)
898 maskb(s->iobase + CODEC_CMI_MIXER1, ~CDPLAY, 0);
899
900 s->status |= DO_AC3_HW;
901 } else {
902 // SPD32SEL for 037 & 039
903 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, SPD32SEL);
904 // set 176K sample rate to fix 033 HW bug
905 if (s->chip_version == 33) {
906 if (rate == 48000)
907 maskb(s->iobase + CODEC_CMI_CHFORMAT + 1, ~0, 0x08);
908 else
909 maskb(s->iobase + CODEC_CMI_CHFORMAT + 1, ~0x08, 0);
910 }
911 s->status |= DO_AC3_SW;
912 }
913 } else {
914 maskb(s->iobase + CODEC_CMI_MIXER1, ~WSMUTE, 0);
915 if (s->chip_version >= 39)
916 maskb(s->iobase + CODEC_CMI_MISC_CTRL, ~MUTECH1, 0);
917 maskb(s->iobase + CODEC_CMI_CHFORMAT + 2, ~(SPD24SEL|0x12), 0);
918 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~(SPD32SEL|AC3_EN), 0);
919 if (s->chip_version == 33)
920 maskb(s->iobase + CODEC_CMI_CHFORMAT + 1, ~0x08, 0);
921 if (s->chip_version >= 39)
922 maskb(s->iobase + CODEC_CMI_MIXER1, ~0, CDPLAY);
923 s->status &= ~DO_AC3;
924 }
925 s->spdif_counter = 0;
926}
927
928static void set_line_as_rear(struct cm_state *s, int use_line_as_rear)
929{
930 if (!(s->capability & CAN_LINE_AS_REAR))
931 return;
932 if (use_line_as_rear) {
933 maskb(s->iobase + CODEC_CMI_MIXER1, ~0, SPK4);
934 s->status |= DO_LINE_AS_REAR;
935 } else {
936 maskb(s->iobase + CODEC_CMI_MIXER1, ~SPK4, 0);
937 s->status &= ~DO_LINE_AS_REAR;
938 }
939}
940
941static void set_line_as_bass(struct cm_state *s, int use_line_as_bass)
942{
943 if (!(s->capability & CAN_LINE_AS_BASS))
944 return;
945 if (use_line_as_bass) {
946 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~0, CB2LIN);
947 s->status |= DO_LINE_AS_BASS;
948 } else {
949 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CB2LIN, 0);
950 s->status &= ~DO_LINE_AS_BASS;
951 }
952}
953
954static void set_mic_as_bass(struct cm_state *s, int use_mic_as_bass)
955{
956 if (!(s->capability & CAN_MIC_AS_BASS))
957 return;
958 if (use_mic_as_bass) {
959 maskb(s->iobase + CODEC_CMI_MISC, ~0, 0x04);
960 s->status |= DO_MIC_AS_BASS;
961 } else {
962 maskb(s->iobase + CODEC_CMI_MISC, ~0x04, 0);
963 s->status &= ~DO_MIC_AS_BASS;
964 }
965}
966
967static void set_hw_copy(struct cm_state *s, int hw_copy)
968{
969 if (s->max_channels > 2 && hw_copy)
970 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~0, N4SPK3D);
971 else
972 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 3, ~N4SPK3D, 0);
973}
974
975static void set_ac3(struct cm_state *s, unsigned rate)
976{
977 unsigned long flags;
978
979 spin_lock_irqsave(&s->lock, flags);
980 set_spdifout_unlocked(s, rate);
981 set_ac3_unlocked(s, rate);
982 spin_unlock_irqrestore(&s->lock, flags);
983}
984
985static int trans_ac3(struct cm_state *s, void *dest, const char __user *source, int size)
986{
987 int i = size / 2;
988 unsigned long data;
989 unsigned short data16;
990 unsigned long *dst = (unsigned long *) dest;
991 unsigned short __user *src = (unsigned short __user *)source;
992 int err;
993
994 do {
995 if ((err = __get_user(data16, src++)))
996 return err;
997 data = (unsigned long)le16_to_cpu(data16);
998 data <<= 12; // ok for 16-bit data
999 if (s->spdif_counter == 2 || s->spdif_counter == 3)
1000 data |= 0x40000000; // indicate AC-3 raw data
1001 if (parity(data))
1002 data |= 0x80000000; // parity
1003 if (s->spdif_counter == 0)
1004 data |= 3; // preamble 'M'
1005 else if (s->spdif_counter & 1)
1006 data |= 5; // odd, 'W'
1007 else
1008 data |= 9; // even, 'M'
1009 *dst++ = cpu_to_le32(data);
1010 s->spdif_counter++;
1011 if (s->spdif_counter == 384)
1012 s->spdif_counter = 0;
1013 } while (--i);
1014
1015 return 0;
1016}
1017
1018static void set_adc_rate_unlocked(struct cm_state *s, unsigned rate)
1019{
1020 unsigned char freq = 4;
1021 int i;
1022
1023 if (rate > 48000)
1024 rate = 48000;
1025 if (rate < 8000)
1026 rate = 8000;
1027 for (i = 0; i < sizeof(rate_lookup) / sizeof(rate_lookup[0]); i++) {
1028 if (rate > rate_lookup[i].lower && rate <= rate_lookup[i].upper) {
1029 rate = rate_lookup[i].rate;
1030 freq = rate_lookup[i].freq;
1031 break;
1032 }
1033 }
1034 s->rateadc = rate;
1035 freq <<= CM_FREQ_ADCSHIFT;
1036
1037 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~ASFC, freq);
1038}
1039
1040static void set_adc_rate(struct cm_state *s, unsigned rate)
1041{
1042 unsigned long flags;
1043 unsigned char freq = 4;
1044 int i;
1045
1046 if (rate > 48000)
1047 rate = 48000;
1048 if (rate < 8000)
1049 rate = 8000;
1050 for (i = 0; i < sizeof(rate_lookup) / sizeof(rate_lookup[0]); i++) {
1051 if (rate > rate_lookup[i].lower && rate <= rate_lookup[i].upper) {
1052 rate = rate_lookup[i].rate;
1053 freq = rate_lookup[i].freq;
1054 break;
1055 }
1056 }
1057 s->rateadc = rate;
1058 freq <<= CM_FREQ_ADCSHIFT;
1059
1060 spin_lock_irqsave(&s->lock, flags);
1061 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~ASFC, freq);
1062 spin_unlock_irqrestore(&s->lock, flags);
1063}
1064
1065static void set_dac_rate(struct cm_state *s, unsigned rate)
1066{
1067 unsigned long flags;
1068 unsigned char freq = 4;
1069 int i;
1070
1071 if (rate > 48000)
1072 rate = 48000;
1073 if (rate < 8000)
1074 rate = 8000;
1075 for (i = 0; i < sizeof(rate_lookup) / sizeof(rate_lookup[0]); i++) {
1076 if (rate > rate_lookup[i].lower && rate <= rate_lookup[i].upper) {
1077 rate = rate_lookup[i].rate;
1078 freq = rate_lookup[i].freq;
1079 break;
1080 }
1081 }
1082 s->ratedac = rate;
1083 freq <<= CM_FREQ_DACSHIFT;
1084
1085 spin_lock_irqsave(&s->lock, flags);
1086 maskb(s->iobase + CODEC_CMI_FUNCTRL1 + 1, ~DSFC, freq);
1087 spin_unlock_irqrestore(&s->lock, flags);
1088
1089 if (s->curr_channels <= 2 && spdif_out)
1090 set_spdifout(s, rate);
1091 if (s->status & DO_DUAL_DAC)
1092 set_dac1_rate(s, rate);
1093}
1094
1095/* --------------------------------------------------------------------- */
1096static inline void reset_adc(struct cm_state *s)
1097{
1098 /* reset bus master */
1099 outb(s->enable | RSTADC, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1100 udelay(10);
1101 outb(s->enable & ~RSTADC, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1102}
1103
1104static inline void reset_dac(struct cm_state *s)
1105{
1106 /* reset bus master */
1107 outb(s->enable | RSTDAC, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1108 udelay(10);
1109 outb(s->enable & ~RSTDAC, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1110 if (s->status & DO_DUAL_DAC)
1111 reset_adc(s);
1112}
1113
1114static inline void pause_adc(struct cm_state *s)
1115{
1116 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, PAUSEADC);
1117}
1118
1119static inline void pause_dac(struct cm_state *s)
1120{
1121 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~0, PAUSEDAC);
1122 if (s->status & DO_DUAL_DAC)
1123 pause_adc(s);
1124}
1125
1126static inline void disable_adc(struct cm_state *s)
1127{
1128 /* disable channel */
1129 s->enable &= ~ENADC;
1130 outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1131 reset_adc(s);
1132}
1133
1134static inline void disable_dac(struct cm_state *s)
1135{
1136 /* disable channel */
1137 s->enable &= ~ENDAC;
1138 outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1139 reset_dac(s);
1140 if (s->status & DO_DUAL_DAC)
1141 disable_adc(s);
1142}
1143
1144static inline void enable_adc(struct cm_state *s)
1145{
1146 if (!(s->enable & ENADC)) {
1147 /* enable channel */
1148 s->enable |= ENADC;
1149 outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1150 }
1151 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~PAUSEADC, 0);
1152}
1153
1154static inline void enable_dac_unlocked(struct cm_state *s)
1155{
1156 if (!(s->enable & ENDAC)) {
1157 /* enable channel */
1158 s->enable |= ENDAC;
1159 outb(s->enable, s->iobase + CODEC_CMI_FUNCTRL0 + 2);
1160 }
1161 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~PAUSEDAC, 0);
1162
1163 if (s->status & DO_DUAL_DAC)
1164 enable_adc(s);
1165}
1166
1167static inline void stop_adc_unlocked(struct cm_state *s)
1168{
1169 if (s->enable & ENADC) {
1170 /* disable interrupt */
1171 maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~ENADCINT, 0);
1172 disable_adc(s);
1173 }
1174}
1175
1176static inline void stop_adc(struct cm_state *s)
1177{
1178 unsigned long flags;
1179
1180 spin_lock_irqsave(&s->lock, flags);
1181 stop_adc_unlocked(s);
1182 spin_unlock_irqrestore(&s->lock, flags);
1183
1184}
1185
1186static inline void stop_dac_unlocked(struct cm_state *s)
1187{
1188 if (s->enable & ENDAC) {
1189 /* disable interrupt */
1190 maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~ENDACINT, 0);
1191 disable_dac(s);
1192 }
1193 if (s->status & DO_DUAL_DAC)
1194 stop_dac1_unlocked(s);
1195}
1196
1197static inline void stop_dac(struct cm_state *s)
1198{
1199 unsigned long flags;
1200
1201 spin_lock_irqsave(&s->lock, flags);
1202 stop_dac_unlocked(s);
1203 spin_unlock_irqrestore(&s->lock, flags);
1204}
1205
1206static inline void start_adc_unlocked(struct cm_state *s)
1207{
1208 if ((s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
1209 && s->dma_adc.ready) {
1210 /* enable interrupt */
1211 maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENADCINT);
1212 enable_adc(s);
1213 }
1214}
1215
1216static void start_adc(struct cm_state *s)
1217{
1218 unsigned long flags;
1219
1220 spin_lock_irqsave(&s->lock, flags);
1221 start_adc_unlocked(s);
1222 spin_unlock_irqrestore(&s->lock, flags);
1223}
1224
1225static void start_dac1_unlocked(struct cm_state *s)
1226{
1227 if ((s->dma_adc.mapped || s->dma_adc.count > 0) && s->dma_adc.ready) {
1228 /* enable interrupt */
1229 maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENADCINT);
1230 enable_dac_unlocked(s);
1231 }
1232}
1233
1234static void start_dac_unlocked(struct cm_state *s)
1235{
1236 if ((s->dma_dac.mapped || s->dma_dac.count > 0) && s->dma_dac.ready) {
1237 /* enable interrupt */
1238 maskb(s->iobase + CODEC_CMI_INT_HLDCLR + 2, ~0, ENDACINT);
1239 enable_dac_unlocked(s);
1240 }
1241 if (s->status & DO_DUAL_DAC)
1242 start_dac1_unlocked(s);
1243}
1244
1245static void start_dac(struct cm_state *s)
1246{
1247 unsigned long flags;
1248
1249 spin_lock_irqsave(&s->lock, flags);
1250 start_dac_unlocked(s);
1251 spin_unlock_irqrestore(&s->lock, flags);
1252}
1253
1254static int prog_dmabuf(struct cm_state *s, unsigned rec);
1255
1256static int set_dac_channels(struct cm_state *s, int channels)
1257{
1258 unsigned long flags;
1259 static unsigned int fmmute = 0;
1260
1261 spin_lock_irqsave(&s->lock, flags);
1262
1263 if ((channels > 2) && (channels <= s->max_channels)
1264 && (((s->fmt >> CM_CFMT_DACSHIFT) & CM_CFMT_MASK) == (CM_CFMT_STEREO | CM_CFMT_16BIT))) {
1265 set_spdifout_unlocked(s, 0);
1266 if (s->capability & CAN_MULTI_CH_HW) {
1267 // NXCHG
1268 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0, NXCHG);
1269 // CHB3D or CHB3D5C
1270 maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~(CHB3D5C|CHB3D), channels > 4 ? CHB3D5C : CHB3D);
1271 // CHB3D6C
1272 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CHB3D6C, channels == 6 ? CHB3D6C : 0);
1273 // ENCENTER
1274 maskb(s->iobase + CODEC_CMI_MISC_CTRL, ~ENCENTER, channels == 6 ? ENCENTER : 0);
1275 s->status |= DO_MULTI_CH_HW;
1276 } else if (s->capability & CAN_DUAL_DAC) {
1277 unsigned char fmtm = ~0, fmts = 0;
1278 ssize_t ret;
1279
1280 // ENDBDAC, turn on double DAC mode
1281 // XCHGDAC, CH0 -> back, CH1->front
1282 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, ENDBDAC|XCHGDAC);
1283 // mute FM
1284 fmmute = inb(s->iobase + CODEC_CMI_MIXER1) & FMMUTE;
1285 maskb(s->iobase + CODEC_CMI_MIXER1, ~0, FMMUTE);
1286 s->status |= DO_DUAL_DAC;
1287 // prepare secondary buffer
1288 spin_unlock_irqrestore(&s->lock, flags);
1289 ret = prog_dmabuf(s, 1);
1290 if (ret) return ret;
1291 spin_lock_irqsave(&s->lock, flags);
1292
1293 // copy the hw state
1294 fmtm &= ~((CM_CFMT_STEREO | CM_CFMT_16BIT) << CM_CFMT_DACSHIFT);
1295 fmtm &= ~((CM_CFMT_STEREO | CM_CFMT_16BIT) << CM_CFMT_ADCSHIFT);
1296 // the HW only support 16-bit stereo
1297 fmts |= CM_CFMT_16BIT << CM_CFMT_DACSHIFT;
1298 fmts |= CM_CFMT_16BIT << CM_CFMT_ADCSHIFT;
1299 fmts |= CM_CFMT_STEREO << CM_CFMT_DACSHIFT;
1300 fmts |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
1301
1302 set_fmt_unlocked(s, fmtm, fmts);
1303 set_adc_rate_unlocked(s, s->ratedac);
1304 }
1305 // disable 4 speaker mode (analog duplicate)
1306 set_hw_copy(s, 0);
1307 s->curr_channels = channels;
1308
1309 // enable jack redirect
1310 set_line_as_rear(s, use_line_as_rear);
1311 if (channels > 4) {
1312 set_line_as_bass(s, use_line_as_bass);
1313 set_mic_as_bass(s, use_mic_as_bass);
1314 }
1315 } else {
1316 if (s->status & DO_MULTI_CH_HW) {
1317 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~NXCHG, 0);
1318 maskb(s->iobase + CODEC_CMI_CHFORMAT + 3, ~(CHB3D5C|CHB3D), 0);
1319 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 1, ~CHB3D6C, 0);
1320 } else if (s->status & DO_DUAL_DAC) {
1321 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~ENDBDAC, 0);
1322 maskb(s->iobase + CODEC_CMI_MIXER1, ~FMMUTE, fmmute);
1323 }
1324 // enable 4 speaker mode (analog duplicate)
1325 set_hw_copy(s, hw_copy);
1326 s->status &= ~DO_MULTI_CH;
1327 s->curr_channels = s->fmt & (CM_CFMT_STEREO << CM_CFMT_DACSHIFT) ? 2 : 1;
1328 // disable jack redirect
1329 set_line_as_rear(s, hw_copy ? use_line_as_rear : 0);
1330 set_line_as_bass(s, 0);
1331 set_mic_as_bass(s, 0);
1332 }
1333 spin_unlock_irqrestore(&s->lock, flags);
1334 return s->curr_channels;
1335}
1336
1337/* --------------------------------------------------------------------- */
1338
1339#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
1340#define DMABUF_MINORDER 1
1341
1342static void dealloc_dmabuf(struct cm_state *s, struct dmabuf *db)
1343{
1344 struct page *pstart, *pend;
1345
1346 if (db->rawbuf) {
1347 /* undo marking the pages as reserved */
1348 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
1349 for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
1350 ClearPageReserved(pstart);
1351 pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
1352 }
1353 db->rawbuf = NULL;
1354 db->mapped = db->ready = 0;
1355}
1356
1357/* Ch1 is used for playback, Ch0 is used for recording */
1358
1359static int prog_dmabuf(struct cm_state *s, unsigned rec)
1360{
1361 struct dmabuf *db = rec ? &s->dma_adc : &s->dma_dac;
1362 unsigned rate = rec ? s->rateadc : s->ratedac;
1363 int order;
1364 unsigned bytepersec;
1365 unsigned bufs;
1366 struct page *pstart, *pend;
1367 unsigned char fmt;
1368 unsigned long flags;
1369
1370 fmt = s->fmt;
1371 if (rec) {
1372 stop_adc(s);
1373 fmt >>= CM_CFMT_ADCSHIFT;
1374 } else {
1375 stop_dac(s);
1376 fmt >>= CM_CFMT_DACSHIFT;
1377 }
1378
1379 fmt &= CM_CFMT_MASK;
1380 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
1381 if (!db->rawbuf) {
1382 db->ready = db->mapped = 0;
1383 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
1384 if ((db->rawbuf = pci_alloc_consistent(s->dev, PAGE_SIZE << order, &db->dmaaddr)))
1385 break;
1386 if (!db->rawbuf || !db->dmaaddr)
1387 return -ENOMEM;
1388 db->buforder = order;
1389 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
1390 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
1391 for (pstart = virt_to_page(db->rawbuf); pstart <= pend; pstart++)
1392 SetPageReserved(pstart);
1393 }
1394 bytepersec = rate << sample_shift[fmt];
1395 bufs = PAGE_SIZE << db->buforder;
1396 if (db->ossfragshift) {
1397 if ((1000 << db->ossfragshift) < bytepersec)
1398 db->fragshift = ld2(bytepersec/1000);
1399 else
1400 db->fragshift = db->ossfragshift;
1401 } else {
1402 db->fragshift = ld2(bytepersec/100/(db->subdivision ? db->subdivision : 1));
1403 if (db->fragshift < 3)
1404 db->fragshift = 3;
1405 }
1406 db->numfrag = bufs >> db->fragshift;
1407 while (db->numfrag < 4 && db->fragshift > 3) {
1408 db->fragshift--;
1409 db->numfrag = bufs >> db->fragshift;
1410 }
1411 db->fragsize = 1 << db->fragshift;
1412 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
1413 db->numfrag = db->ossmaxfrags;
1414 /* to make fragsize >= 4096 */
1415 db->fragsamples = db->fragsize >> sample_shift[fmt];
1416 db->dmasize = db->numfrag << db->fragshift;
1417 db->dmasamples = db->dmasize >> sample_shift[fmt];
1418 memset(db->rawbuf, (fmt & CM_CFMT_16BIT) ? 0 : 0x80, db->dmasize);
1419 spin_lock_irqsave(&s->lock, flags);
1420 if (rec) {
1421 if (s->status & DO_DUAL_DAC)
1422 set_dmadac1(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]);
1423 else
1424 set_dmaadc(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]);
1425 /* program sample counts */
1426 set_countdac(s, db->fragsamples);
1427 } else {
1428 set_dmadac(s, db->dmaaddr, db->dmasize >> sample_shift[fmt]);
1429 /* program sample counts */
1430 set_countdac(s, db->fragsamples);
1431 }
1432 spin_unlock_irqrestore(&s->lock, flags);
1433 db->enabled = 1;
1434 db->ready = 1;
1435 return 0;
1436}
1437
1438static inline void clear_advance(struct cm_state *s)
1439{
1440 unsigned char c = (s->fmt & (CM_CFMT_16BIT << CM_CFMT_DACSHIFT)) ? 0 : 0x80;
1441 unsigned char *buf = s->dma_dac.rawbuf;
1442 unsigned char *buf1 = s->dma_adc.rawbuf;
1443 unsigned bsize = s->dma_dac.dmasize;
1444 unsigned bptr = s->dma_dac.swptr;
1445 unsigned len = s->dma_dac.fragsize;
1446
1447 if (bptr + len > bsize) {
1448 unsigned x = bsize - bptr;
1449 memset(buf + bptr, c, x);
1450 if (s->status & DO_DUAL_DAC)
1451 memset(buf1 + bptr, c, x);
1452 bptr = 0;
1453 len -= x;
1454 }
1455 memset(buf + bptr, c, len);
1456 if (s->status & DO_DUAL_DAC)
1457 memset(buf1 + bptr, c, len);
1458}
1459
1460/* call with spinlock held! */
1461static void cm_update_ptr(struct cm_state *s)
1462{
1463 unsigned hwptr;
1464 int diff;
1465
1466 /* update ADC pointer */
1467 if (s->dma_adc.ready) {
1468 if (s->status & DO_DUAL_DAC) {
1469 /* the dac part will finish for this */
1470 } else {
1471 hwptr = get_dmaadc(s) % s->dma_adc.dmasize;
1472 diff = (s->dma_adc.dmasize + hwptr - s->dma_adc.hwptr) % s->dma_adc.dmasize;
1473 s->dma_adc.hwptr = hwptr;
1474 s->dma_adc.total_bytes += diff;
1475 s->dma_adc.count += diff;
1476 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1477 wake_up(&s->dma_adc.wait);
1478 if (!s->dma_adc.mapped) {
1479 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
1480 pause_adc(s);
1481 s->dma_adc.error++;
1482 }
1483 }
1484 }
1485 }
1486 /* update DAC pointer */
1487 if (s->dma_dac.ready) {
1488 hwptr = get_dmadac(s) % s->dma_dac.dmasize;
1489 diff = (s->dma_dac.dmasize + hwptr - s->dma_dac.hwptr) % s->dma_dac.dmasize;
1490 s->dma_dac.hwptr = hwptr;
1491 s->dma_dac.total_bytes += diff;
1492 if (s->status & DO_DUAL_DAC) {
1493 s->dma_adc.hwptr = hwptr;
1494 s->dma_adc.total_bytes += diff;
1495 }
1496 if (s->dma_dac.mapped) {
1497 s->dma_dac.count += diff;
1498 if (s->status & DO_DUAL_DAC)
1499 s->dma_adc.count += diff;
1500 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
1501 wake_up(&s->dma_dac.wait);
1502 } else {
1503 s->dma_dac.count -= diff;
1504 if (s->status & DO_DUAL_DAC)
1505 s->dma_adc.count -= diff;
1506 if (s->dma_dac.count <= 0) {
1507 pause_dac(s);
1508 s->dma_dac.error++;
1509 } else if (s->dma_dac.count <= (signed)s->dma_dac.fragsize && !s->dma_dac.endcleared) {
1510 clear_advance(s);
1511 s->dma_dac.endcleared = 1;
1512 if (s->status & DO_DUAL_DAC)
1513 s->dma_adc.endcleared = 1;
1514 }
1515 if (s->dma_dac.count + (signed)s->dma_dac.fragsize <= (signed)s->dma_dac.dmasize)
1516 wake_up(&s->dma_dac.wait);
1517 }
1518 }
1519}
1520
1521static irqreturn_t cm_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1522{
1523 struct cm_state *s = (struct cm_state *)dev_id;
1524 unsigned int intsrc, intstat;
1525 unsigned char mask = 0;
1526
1527 /* fastpath out, to ease interrupt sharing */
1528 intsrc = inl(s->iobase + CODEC_CMI_INT_STATUS);
1529 if (!(intsrc & 0x80000000))
1530 return IRQ_NONE;
1531 spin_lock(&s->lock);
1532 intstat = inb(s->iobase + CODEC_CMI_INT_HLDCLR + 2);
1533 /* acknowledge interrupt */
1534 if (intsrc & ADCINT)
1535 mask |= ENADCINT;
1536 if (intsrc & DACINT)
1537 mask |= ENDACINT;
1538 outb(intstat & ~mask, s->iobase + CODEC_CMI_INT_HLDCLR + 2);
1539 outb(intstat | mask, s->iobase + CODEC_CMI_INT_HLDCLR + 2);
1540 cm_update_ptr(s);
1541 spin_unlock(&s->lock);
1542#ifdef CONFIG_SOUND_CMPCI_MIDI
1543 if (intsrc & 0x00010000) { // UART interrupt
1544 if (s->midi_devc && intchk_mpu401((void *)s->midi_devc))
1545 mpuintr(irq, (void *)s->midi_devc, regs);
1546 else
1547 inb(s->iomidi);// dummy read
1548 }
1549#endif
1550 return IRQ_HANDLED;
1551}
1552
1553/* --------------------------------------------------------------------- */
1554
1555static const char invalid_magic[] = KERN_CRIT "cmpci: invalid magic value\n";
1556
1557#define VALIDATE_STATE(s) \
1558({ \
1559 if (!(s) || (s)->magic != CM_MAGIC) { \
1560 printk(invalid_magic); \
1561 return -ENXIO; \
1562 } \
1563})
1564
1565/* --------------------------------------------------------------------- */
1566
1567#define MT_4 1
1568#define MT_5MUTE 2
1569#define MT_4MUTEMONO 3
1570#define MT_6MUTE 4
1571#define MT_5MUTEMONO 5
1572
1573static const struct {
1574 unsigned left;
1575 unsigned right;
1576 unsigned type;
1577 unsigned rec;
1578 unsigned play;
1579} mixtable[SOUND_MIXER_NRDEVICES] = {
1580 [SOUND_MIXER_CD] = { DSP_MIX_CDVOLIDX_L, DSP_MIX_CDVOLIDX_R, MT_5MUTE, 0x04, 0x06 },
1581 [SOUND_MIXER_LINE] = { DSP_MIX_LINEVOLIDX_L, DSP_MIX_LINEVOLIDX_R, MT_5MUTE, 0x10, 0x18 },
1582 [SOUND_MIXER_MIC] = { DSP_MIX_MICVOLIDX, DSP_MIX_MICVOLIDX, MT_5MUTEMONO, 0x01, 0x01 },
1583 [SOUND_MIXER_SYNTH] = { DSP_MIX_FMVOLIDX_L, DSP_MIX_FMVOLIDX_R, MT_5MUTE, 0x40, 0x00 },
1584 [SOUND_MIXER_VOLUME] = { DSP_MIX_MASTERVOLIDX_L, DSP_MIX_MASTERVOLIDX_R, MT_5MUTE, 0x00, 0x00 },
1585 [SOUND_MIXER_PCM] = { DSP_MIX_VOICEVOLIDX_L, DSP_MIX_VOICEVOLIDX_R, MT_5MUTE, 0x00, 0x00 },
1586 [SOUND_MIXER_LINE1] = { DSP_MIX_AUXVOL_L, DSP_MIX_AUXVOL_R, MT_5MUTE, 0x80, 0x60 },
1587 [SOUND_MIXER_SPEAKER]= { DSP_MIX_SPKRVOLIDX, DSP_MIX_SPKRVOLIDX, MT_5MUTEMONO, 0x00, 0x01 }
1588};
1589
1590static const unsigned char volidx[SOUND_MIXER_NRDEVICES] =
1591{
1592 [SOUND_MIXER_CD] = 1,
1593 [SOUND_MIXER_LINE] = 2,
1594 [SOUND_MIXER_MIC] = 3,
1595 [SOUND_MIXER_SYNTH] = 4,
1596 [SOUND_MIXER_VOLUME] = 5,
1597 [SOUND_MIXER_PCM] = 6,
1598 [SOUND_MIXER_LINE1] = 7,
1599 [SOUND_MIXER_SPEAKER]= 8
1600};
1601
1602static unsigned mixer_outmask(struct cm_state *s)
1603{
1604 unsigned long flags;
1605 int i, j, k;
1606
1607 spin_lock_irqsave(&s->lock, flags);
1608 j = rdmixer(s, DSP_MIX_OUTMIXIDX);
1609 spin_unlock_irqrestore(&s->lock, flags);
1610 for (k = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1611 if (j & mixtable[i].play)
1612 k |= 1 << i;
1613 return k;
1614}
1615
1616static unsigned mixer_recmask(struct cm_state *s)
1617{
1618 unsigned long flags;
1619 int i, j, k;
1620
1621 spin_lock_irqsave(&s->lock, flags);
1622 j = rdmixer(s, DSP_MIX_ADCMIXIDX_L);
1623 spin_unlock_irqrestore(&s->lock, flags);
1624 for (k = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1625 if (j & mixtable[i].rec)
1626 k |= 1 << i;
1627 return k;
1628}
1629
1630static int mixer_ioctl(struct cm_state *s, unsigned int cmd, unsigned long arg)
1631{
1632 unsigned long flags;
1633 int i, val, j;
1634 unsigned char l, r, rl, rr;
1635 void __user *argp = (void __user *)arg;
1636 int __user *p = argp;
1637
1638 VALIDATE_STATE(s);
1639 if (cmd == SOUND_MIXER_INFO) {
1640 mixer_info info;
1641 memset(&info, 0, sizeof(info));
1642 strlcpy(info.id, "cmpci", sizeof(info.id));
1643 strlcpy(info.name, "C-Media PCI", sizeof(info.name));
1644 info.modify_counter = s->mix.modcnt;
1645 if (copy_to_user(argp, &info, sizeof(info)))
1646 return -EFAULT;
1647 return 0;
1648 }
1649 if (cmd == SOUND_OLD_MIXER_INFO) {
1650 _old_mixer_info info;
1651 memset(&info, 0, sizeof(info));
1652 strlcpy(info.id, "cmpci", sizeof(info.id));
1653 strlcpy(info.name, "C-Media cmpci", sizeof(info.name));
1654 if (copy_to_user(argp, &info, sizeof(info)))
1655 return -EFAULT;
1656 return 0;
1657 }
1658 if (cmd == OSS_GETVERSION)
1659 return put_user(SOUND_VERSION, p);
1660 if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
1661 return -EINVAL;
1662 if (_SIOC_DIR(cmd) == _SIOC_READ) {
1663 switch (_IOC_NR(cmd)) {
1664 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
1665 val = mixer_recmask(s);
1666 return put_user(val, p);
1667
1668 case SOUND_MIXER_OUTSRC: /* Arg contains a bit for each recording source */
1669 val = mixer_outmask(s);
1670 return put_user(val, p);
1671
1672 case SOUND_MIXER_DEVMASK: /* Arg contains a bit for each supported device */
1673 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1674 if (mixtable[i].type)
1675 val |= 1 << i;
1676 return put_user(val, p);
1677
1678 case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
1679 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1680 if (mixtable[i].rec)
1681 val |= 1 << i;
1682 return put_user(val, p);
1683
1684 case SOUND_MIXER_OUTMASK: /* Arg contains a bit for each supported recording source */
1685 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1686 if (mixtable[i].play)
1687 val |= 1 << i;
1688 return put_user(val, p);
1689
1690 case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
1691 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1692 if (mixtable[i].type && mixtable[i].type != MT_4MUTEMONO)
1693 val |= 1 << i;
1694 return put_user(val, p);
1695
1696 case SOUND_MIXER_CAPS:
1697 return put_user(0, p);
1698
1699 default:
1700 i = _IOC_NR(cmd);
1701 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].type)
1702 return -EINVAL;
1703 if (!volidx[i])
1704 return -EINVAL;
1705 return put_user(s->mix.vol[volidx[i]-1], p);
1706 }
1707 }
1708 if (_SIOC_DIR(cmd) != (_SIOC_READ|_SIOC_WRITE))
1709 return -EINVAL;
1710 s->mix.modcnt++;
1711 switch (_IOC_NR(cmd)) {
1712 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
1713 if (get_user(val, p))
1714 return -EFAULT;
1715 i = hweight32(val);
1716 for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
1717 if (!(val & (1 << i)))
1718 continue;
1719 if (!mixtable[i].rec) {
1720 val &= ~(1 << i);
1721 continue;
1722 }
1723 j |= mixtable[i].rec;
1724 }
1725 spin_lock_irqsave(&s->lock, flags);
1726 wrmixer(s, DSP_MIX_ADCMIXIDX_L, j);
1727 wrmixer(s, DSP_MIX_ADCMIXIDX_R, (j & 1) | (j>>1) | (j & 0x80));
1728 spin_unlock_irqrestore(&s->lock, flags);
1729 return 0;
1730
1731 case SOUND_MIXER_OUTSRC: /* Arg contains a bit for each recording source */
1732 if (get_user(val, p))
1733 return -EFAULT;
1734 for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
1735 if (!(val & (1 << i)))
1736 continue;
1737 if (!mixtable[i].play) {
1738 val &= ~(1 << i);
1739 continue;
1740 }
1741 j |= mixtable[i].play;
1742 }
1743 spin_lock_irqsave(&s->lock, flags);
1744 wrmixer(s, DSP_MIX_OUTMIXIDX, j);
1745 spin_unlock_irqrestore(&s->lock, flags);
1746 return 0;
1747
1748 default:
1749 i = _IOC_NR(cmd);
1750 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].type)
1751 return -EINVAL;
1752 if (get_user(val, p))
1753 return -EFAULT;
1754 l = val & 0xff;
1755 r = (val >> 8) & 0xff;
1756 if (l > 100)
1757 l = 100;
1758 if (r > 100)
1759 r = 100;
1760 spin_lock_irqsave(&s->lock, flags);
1761 switch (mixtable[i].type) {
1762 case MT_4:
1763 if (l >= 10)
1764 l -= 10;
1765 if (r >= 10)
1766 r -= 10;
1767 frobindir(s, mixtable[i].left, 0xf0, l / 6);
1768 frobindir(s, mixtable[i].right, 0xf0, l / 6);
1769 break;
1770
1771 case MT_4MUTEMONO:
1772 rl = (l < 4 ? 0 : (l - 5) / 3) & 31;
1773 rr = (rl >> 2) & 7;
1774 wrmixer(s, mixtable[i].left, rl<<3);
1775 if (i == SOUND_MIXER_MIC)
1776 maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1);
1777 break;
1778
1779 case MT_5MUTEMONO:
1780 rl = l < 4 ? 0 : (l - 5) / 3;
1781 wrmixer(s, mixtable[i].left, rl<<3);
1782 l = rdmixer(s, DSP_MIX_OUTMIXIDX) & ~mixtable[i].play;
1783 r = rl ? mixtable[i].play : 0;
1784 wrmixer(s, DSP_MIX_OUTMIXIDX, l | r);
1785 /* for recording */
1786 if (i == SOUND_MIXER_MIC) {
1787 if (s->chip_version >= 37) {
1788 rr = rl >> 1;
1789 maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, (rr&0x07)<<1);
1790 frobindir(s, DSP_MIX_EXTENSION, ~0x01, rr>>3);
1791 } else {
1792 rr = rl >> 2;
1793 maskb(s->iobase + CODEC_CMI_MIXER2, ~0x0e, rr<<1);
1794 }
1795 }
1796 break;
1797
1798 case MT_5MUTE:
1799 rl = l < 4 ? 0 : (l - 5) / 3;
1800 rr = r < 4 ? 0 : (r - 5) / 3;
1801 wrmixer(s, mixtable[i].left, rl<<3);
1802 wrmixer(s, mixtable[i].right, rr<<3);
1803 l = rdmixer(s, DSP_MIX_OUTMIXIDX);
1804 l &= ~mixtable[i].play;
1805 r = (rl|rr) ? mixtable[i].play : 0;
1806 wrmixer(s, DSP_MIX_OUTMIXIDX, l | r);
1807 break;
1808
1809 case MT_6MUTE:
1810 if (l < 6)
1811 rl = 0x00;
1812 else
1813 rl = l * 2 / 3;
1814 if (r < 6)
1815 rr = 0x00;
1816 else
1817 rr = r * 2 / 3;
1818 wrmixer(s, mixtable[i].left, rl);
1819 wrmixer(s, mixtable[i].right, rr);
1820 break;
1821 }
1822 spin_unlock_irqrestore(&s->lock, flags);
1823
1824 if (!volidx[i])
1825 return -EINVAL;
1826 s->mix.vol[volidx[i]-1] = val;
1827 return put_user(s->mix.vol[volidx[i]-1], p);
1828 }
1829}
1830
1831/* --------------------------------------------------------------------- */
1832
1833static int cm_open_mixdev(struct inode *inode, struct file *file)
1834{
1835 int minor = iminor(inode);
1836 struct list_head *list;
1837 struct cm_state *s;
1838
1839 for (list = devs.next; ; list = list->next) {
1840 if (list == &devs)
1841 return -ENODEV;
1842 s = list_entry(list, struct cm_state, devs);
1843 if (s->dev_mixer == minor)
1844 break;
1845 }
1846 VALIDATE_STATE(s);
1847 file->private_data = s;
1848 return nonseekable_open(inode, file);
1849}
1850
1851static int cm_release_mixdev(struct inode *inode, struct file *file)
1852{
1853 struct cm_state *s = (struct cm_state *)file->private_data;
1854
1855 VALIDATE_STATE(s);
1856 return 0;
1857}
1858
1859static int cm_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1860{
1861 return mixer_ioctl((struct cm_state *)file->private_data, cmd, arg);
1862}
1863
1864static /*const*/ struct file_operations cm_mixer_fops = {
1865 .owner = THIS_MODULE,
1866 .llseek = no_llseek,
1867 .ioctl = cm_ioctl_mixdev,
1868 .open = cm_open_mixdev,
1869 .release = cm_release_mixdev,
1870};
1871
1872
1873/* --------------------------------------------------------------------- */
1874
1875static int drain_dac(struct cm_state *s, int nonblock)
1876{
1877 DECLARE_WAITQUEUE(wait, current);
1878 unsigned long flags;
1879 int count, tmo;
1880
1881 if (s->dma_dac.mapped || !s->dma_dac.ready)
1882 return 0;
1883 add_wait_queue(&s->dma_dac.wait, &wait);
1884 for (;;) {
1885 __set_current_state(TASK_INTERRUPTIBLE);
1886 spin_lock_irqsave(&s->lock, flags);
1887 count = s->dma_dac.count;
1888 spin_unlock_irqrestore(&s->lock, flags);
1889 if (count <= 0)
1890 break;
1891 if (signal_pending(current))
1892 break;
1893 if (nonblock) {
1894 remove_wait_queue(&s->dma_dac.wait, &wait);
1895 set_current_state(TASK_RUNNING);
1896 return -EBUSY;
1897 }
1898 tmo = 3 * HZ * (count + s->dma_dac.fragsize) / 2 / s->ratedac;
1899 tmo >>= sample_shift[(s->fmt >> CM_CFMT_DACSHIFT) & CM_CFMT_MASK];
1900 if (!schedule_timeout(tmo + 1))
1901 DBG(printk(KERN_DEBUG "cmpci: dma timed out??\n");)
1902 }
1903 remove_wait_queue(&s->dma_dac.wait, &wait);
1904 set_current_state(TASK_RUNNING);
1905 if (signal_pending(current))
1906 return -ERESTARTSYS;
1907 return 0;
1908}
1909
1910/* --------------------------------------------------------------------- */
1911
1912static ssize_t cm_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1913{
1914 struct cm_state *s = (struct cm_state *)file->private_data;
1915 DECLARE_WAITQUEUE(wait, current);
1916 ssize_t ret;
1917 unsigned long flags;
1918 unsigned swptr;
1919 int cnt;
1920
1921 VALIDATE_STATE(s);
1922 if (s->dma_adc.mapped)
1923 return -ENXIO;
1924 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
1925 return ret;
1926 if (!access_ok(VERIFY_WRITE, buffer, count))
1927 return -EFAULT;
1928 ret = 0;
1929
1930 add_wait_queue(&s->dma_adc.wait, &wait);
1931 while (count > 0) {
1932 spin_lock_irqsave(&s->lock, flags);
1933 swptr = s->dma_adc.swptr;
1934 cnt = s->dma_adc.dmasize-swptr;
1935 if (s->dma_adc.count < cnt)
1936 cnt = s->dma_adc.count;
1937 if (cnt <= 0)
1938 __set_current_state(TASK_INTERRUPTIBLE);
1939 spin_unlock_irqrestore(&s->lock, flags);
1940 if (cnt > count)
1941 cnt = count;
1942 if (cnt <= 0) {
1943 if (s->dma_adc.enabled)
1944 start_adc(s);
1945 if (file->f_flags & O_NONBLOCK) {
1946 if (!ret)
1947 ret = -EAGAIN;
1948 goto out;
1949 }
1950 if (!schedule_timeout(HZ)) {
1951 printk(KERN_DEBUG "cmpci: read: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1952 s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count,
1953 s->dma_adc.hwptr, s->dma_adc.swptr);
1954 spin_lock_irqsave(&s->lock, flags);
1955 stop_adc_unlocked(s);
1956 set_dmaadc(s, s->dma_adc.dmaaddr, s->dma_adc.dmasamples);
1957 /* program sample counts */
1958 set_countadc(s, s->dma_adc.fragsamples);
1959 s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0;
1960 spin_unlock_irqrestore(&s->lock, flags);
1961 }
1962 if (signal_pending(current)) {
1963 if (!ret)
1964 ret = -ERESTARTSYS;
1965 goto out;
1966 }
1967 continue;
1968 }
1969 if (s->status & DO_BIGENDIAN_R) {
1970 int i, err;
1971 unsigned char *src;
1972 char __user *dst = buffer;
1973 unsigned char data[2];
1974
1975 src = (unsigned char *) (s->dma_adc.rawbuf + swptr);
1976 // copy left/right sample at one time
1977 for (i = 0; i < cnt / 2; i++) {
1978 data[0] = src[1];
1979 data[1] = src[0];
1980 if ((err = __put_user(data[0], dst++))) {
1981 ret = err;
1982 goto out;
1983 }
1984 if ((err = __put_user(data[1], dst++))) {
1985 ret = err;
1986 goto out;
1987 }
1988 src += 2;
1989 }
1990 } else if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
1991 if (!ret)
1992 ret = -EFAULT;
1993 goto out;
1994 }
1995 swptr = (swptr + cnt) % s->dma_adc.dmasize;
1996 spin_lock_irqsave(&s->lock, flags);
1997 s->dma_adc.swptr = swptr;
1998 s->dma_adc.count -= cnt;
1999 count -= cnt;
2000 buffer += cnt;
2001 ret += cnt;
2002 if (s->dma_adc.enabled)
2003 start_adc_unlocked(s);
2004 spin_unlock_irqrestore(&s->lock, flags);
2005 }
2006out:
2007 remove_wait_queue(&s->dma_adc.wait, &wait);
2008 set_current_state(TASK_RUNNING);
2009 return ret;
2010}
2011
2012static ssize_t cm_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
2013{
2014 struct cm_state *s = (struct cm_state *)file->private_data;
2015 DECLARE_WAITQUEUE(wait, current);
2016 ssize_t ret;
2017 unsigned long flags;
2018 unsigned swptr;
2019 int cnt;
2020
2021 VALIDATE_STATE(s);
2022 if (s->dma_dac.mapped)
2023 return -ENXIO;
2024 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2025 return ret;
2026 if (!access_ok(VERIFY_READ, buffer, count))
2027 return -EFAULT;
2028 if (s->status & DO_DUAL_DAC) {
2029 if (s->dma_adc.mapped)
2030 return -ENXIO;
2031 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2032 return ret;
2033 }
2034 if (!access_ok(VERIFY_READ, buffer, count))
2035 return -EFAULT;
2036 ret = 0;
2037
2038 add_wait_queue(&s->dma_dac.wait, &wait);
2039 while (count > 0) {
2040 spin_lock_irqsave(&s->lock, flags);
2041 if (s->dma_dac.count < 0) {
2042 s->dma_dac.count = 0;
2043 s->dma_dac.swptr = s->dma_dac.hwptr;
2044 }
2045 if (s->status & DO_DUAL_DAC) {
2046 s->dma_adc.swptr = s->dma_dac.swptr;
2047 s->dma_adc.count = s->dma_dac.count;
2048 s->dma_adc.endcleared = s->dma_dac.endcleared;
2049 }
2050 swptr = s->dma_dac.swptr;
2051 cnt = s->dma_dac.dmasize-swptr;
2052 if (s->status & DO_AC3_SW) {
2053 if (s->dma_dac.count + 2 * cnt > s->dma_dac.dmasize)
2054 cnt = (s->dma_dac.dmasize - s->dma_dac.count) / 2;
2055 } else {
2056 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
2057 cnt = s->dma_dac.dmasize - s->dma_dac.count;
2058 }
2059 if (cnt <= 0)
2060 __set_current_state(TASK_INTERRUPTIBLE);
2061 spin_unlock_irqrestore(&s->lock, flags);
2062 if (cnt > count)
2063 cnt = count;
2064 if ((s->status & DO_DUAL_DAC) && (cnt > count / 2))
2065 cnt = count / 2;
2066 if (cnt <= 0) {
2067 if (s->dma_dac.enabled)
2068 start_dac(s);
2069 if (file->f_flags & O_NONBLOCK) {
2070 if (!ret)
2071 ret = -EAGAIN;
2072 goto out;
2073 }
2074 if (!schedule_timeout(HZ)) {
2075 printk(KERN_DEBUG "cmpci: write: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
2076 s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count,
2077 s->dma_dac.hwptr, s->dma_dac.swptr);
2078 spin_lock_irqsave(&s->lock, flags);
2079 stop_dac_unlocked(s);
2080 set_dmadac(s, s->dma_dac.dmaaddr, s->dma_dac.dmasamples);
2081 /* program sample counts */
2082 set_countdac(s, s->dma_dac.fragsamples);
2083 s->dma_dac.count = s->dma_dac.hwptr = s->dma_dac.swptr = 0;
2084 if (s->status & DO_DUAL_DAC) {
2085 set_dmadac1(s, s->dma_adc.dmaaddr, s->dma_adc.dmasamples);
2086 s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0;
2087 }
2088 spin_unlock_irqrestore(&s->lock, flags);
2089 }
2090 if (signal_pending(current)) {
2091 if (!ret)
2092 ret = -ERESTARTSYS;
2093 goto out;
2094 }
2095 continue;
2096 }
2097 if (s->status & DO_AC3_SW) {
2098 int err;
2099
2100 // clip exceeded data, caught by 033 and 037
2101 if (swptr + 2 * cnt > s->dma_dac.dmasize)
2102 cnt = (s->dma_dac.dmasize - swptr) / 2;
2103 if ((err = trans_ac3(s, s->dma_dac.rawbuf + swptr, buffer, cnt))) {
2104 ret = err;
2105 goto out;
2106 }
2107 swptr = (swptr + 2 * cnt) % s->dma_dac.dmasize;
2108 } else if ((s->status & DO_DUAL_DAC) && (s->status & DO_BIGENDIAN_W)) {
2109 int i, err;
2110 const char __user *src = buffer;
2111 unsigned char *dst0, *dst1;
2112 unsigned char data[8];
2113
2114 dst0 = (unsigned char *) (s->dma_dac.rawbuf + swptr);
2115 dst1 = (unsigned char *) (s->dma_adc.rawbuf + swptr);
2116 // copy left/right sample at one time
2117 for (i = 0; i < cnt / 4; i++) {
2118 if ((err = __get_user(data[0], src++))) {
2119 ret = err;
2120 goto out;
2121 }
2122 if ((err = __get_user(data[1], src++))) {
2123 ret = err;
2124 goto out;
2125 }
2126 if ((err = __get_user(data[2], src++))) {
2127 ret = err;
2128 goto out;
2129 }
2130 if ((err = __get_user(data[3], src++))) {
2131 ret = err;
2132 goto out;
2133 }
2134 if ((err = __get_user(data[4], src++))) {
2135 ret = err;
2136 goto out;
2137 }
2138 if ((err = __get_user(data[5], src++))) {
2139 ret = err;
2140 goto out;
2141 }
2142 if ((err = __get_user(data[6], src++))) {
2143 ret = err;
2144 goto out;
2145 }
2146 if ((err = __get_user(data[7], src++))) {
2147 ret = err;
2148 goto out;
2149 }
2150 dst0[0] = data[1];
2151 dst0[1] = data[0];
2152 dst0[2] = data[3];
2153 dst0[3] = data[2];
2154 dst1[0] = data[5];
2155 dst1[1] = data[4];
2156 dst1[2] = data[7];
2157 dst1[3] = data[6];
2158 dst0 += 4;
2159 dst1 += 4;
2160 }
2161 swptr = (swptr + cnt) % s->dma_dac.dmasize;
2162 } else if (s->status & DO_DUAL_DAC) {
2163 int i, err;
2164 unsigned long __user *src = (unsigned long __user *) buffer;
2165 unsigned long *dst0, *dst1;
2166
2167 dst0 = (unsigned long *) (s->dma_dac.rawbuf + swptr);
2168 dst1 = (unsigned long *) (s->dma_adc.rawbuf + swptr);
2169 // copy left/right sample at one time
2170 for (i = 0; i < cnt / 4; i++) {
2171 if ((err = __get_user(*dst0++, src++))) {
2172 ret = err;
2173 goto out;
2174 }
2175 if ((err = __get_user(*dst1++, src++))) {
2176 ret = err;
2177 goto out;
2178 }
2179 }
2180 swptr = (swptr + cnt) % s->dma_dac.dmasize;
2181 } else if (s->status & DO_BIGENDIAN_W) {
2182 int i, err;
2183 const char __user *src = buffer;
2184 unsigned char *dst;
2185 unsigned char data[2];
2186
2187 dst = (unsigned char *) (s->dma_dac.rawbuf + swptr);
2188 // swap hi/lo bytes for each sample
2189 for (i = 0; i < cnt / 2; i++) {
2190 if ((err = __get_user(data[0], src++))) {
2191 ret = err;
2192 goto out;
2193 }
2194 if ((err = __get_user(data[1], src++))) {
2195 ret = err;
2196 goto out;
2197 }
2198 dst[0] = data[1];
2199 dst[1] = data[0];
2200 dst += 2;
2201 }
2202 swptr = (swptr + cnt) % s->dma_dac.dmasize;
2203 } else {
2204 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) {
2205 if (!ret)
2206 ret = -EFAULT;
2207 goto out;
2208 }
2209 swptr = (swptr + cnt) % s->dma_dac.dmasize;
2210 }
2211 spin_lock_irqsave(&s->lock, flags);
2212 s->dma_dac.swptr = swptr;
2213 s->dma_dac.count += cnt;
2214 if (s->status & DO_AC3_SW)
2215 s->dma_dac.count += cnt;
2216 s->dma_dac.endcleared = 0;
2217 spin_unlock_irqrestore(&s->lock, flags);
2218 count -= cnt;
2219 buffer += cnt;
2220 ret += cnt;
2221 if (s->status & DO_DUAL_DAC) {
2222 count -= cnt;
2223 buffer += cnt;
2224 ret += cnt;
2225 }
2226 if (s->dma_dac.enabled)
2227 start_dac(s);
2228 }
2229out:
2230 remove_wait_queue(&s->dma_dac.wait, &wait);
2231 set_current_state(TASK_RUNNING);
2232 return ret;
2233}
2234
2235static unsigned int cm_poll(struct file *file, struct poll_table_struct *wait)
2236{
2237 struct cm_state *s = (struct cm_state *)file->private_data;
2238 unsigned long flags;
2239 unsigned int mask = 0;
2240
2241 VALIDATE_STATE(s);
2242 if (file->f_mode & FMODE_WRITE) {
2243 if (!s->dma_dac.ready && prog_dmabuf(s, 0))
2244 return 0;
2245 poll_wait(file, &s->dma_dac.wait, wait);
2246 }
2247 if (file->f_mode & FMODE_READ) {
2248 if (!s->dma_adc.ready && prog_dmabuf(s, 1))
2249 return 0;
2250 poll_wait(file, &s->dma_adc.wait, wait);
2251 }
2252 spin_lock_irqsave(&s->lock, flags);
2253 cm_update_ptr(s);
2254 if (file->f_mode & FMODE_READ) {
2255 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
2256 mask |= POLLIN | POLLRDNORM;
2257 }
2258 if (file->f_mode & FMODE_WRITE) {
2259 if (s->dma_dac.mapped) {
2260 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
2261 mask |= POLLOUT | POLLWRNORM;
2262 } else {
2263 if ((signed)s->dma_dac.dmasize >= s->dma_dac.count + (signed)s->dma_dac.fragsize)
2264 mask |= POLLOUT | POLLWRNORM;
2265 }
2266 }
2267 spin_unlock_irqrestore(&s->lock, flags);
2268 return mask;
2269}
2270
2271static int cm_mmap(struct file *file, struct vm_area_struct *vma)
2272{
2273 struct cm_state *s = (struct cm_state *)file->private_data;
2274 struct dmabuf *db;
2275 int ret = -EINVAL;
2276 unsigned long size;
2277
2278 VALIDATE_STATE(s);
2279 lock_kernel();
2280 if (vma->vm_flags & VM_WRITE) {
2281 if ((ret = prog_dmabuf(s, 0)) != 0)
2282 goto out;
2283 db = &s->dma_dac;
2284 } else if (vma->vm_flags & VM_READ) {
2285 if ((ret = prog_dmabuf(s, 1)) != 0)
2286 goto out;
2287 db = &s->dma_adc;
2288 } else
2289 goto out;
2290 ret = -EINVAL;
2291 if (vma->vm_pgoff != 0)
2292 goto out;
2293 size = vma->vm_end - vma->vm_start;
2294 if (size > (PAGE_SIZE << db->buforder))
2295 goto out;
2296 ret = -EINVAL;
2297 if (remap_pfn_range(vma, vma->vm_start,
2298 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
2299 size, vma->vm_page_prot))
2300 goto out;
2301 db->mapped = 1;
2302 ret = 0;
2303out:
2304 unlock_kernel();
2305 return ret;
2306}
2307
2308#define SNDCTL_SPDIF_COPYRIGHT _SIOW('S', 0, int) // set/reset S/PDIF copy protection
2309#define SNDCTL_SPDIF_LOOP _SIOW('S', 1, int) // set/reset S/PDIF loop
2310#define SNDCTL_SPDIF_MONITOR _SIOW('S', 2, int) // set S/PDIF monitor
2311#define SNDCTL_SPDIF_LEVEL _SIOW('S', 3, int) // set/reset S/PDIF out level
2312#define SNDCTL_SPDIF_INV _SIOW('S', 4, int) // set/reset S/PDIF in inverse
2313#define SNDCTL_SPDIF_SEL2 _SIOW('S', 5, int) // set S/PDIF in #2
2314#define SNDCTL_SPDIF_VALID _SIOW('S', 6, int) // set S/PDIF valid
2315#define SNDCTL_SPDIFOUT _SIOW('S', 7, int) // set S/PDIF out
2316#define SNDCTL_SPDIFIN _SIOW('S', 8, int) // set S/PDIF out
2317
2318static int cm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2319{
2320 struct cm_state *s = (struct cm_state *)file->private_data;
2321 unsigned long flags;
2322 audio_buf_info abinfo;
2323 count_info cinfo;
2324 int val, mapped, ret;
2325 unsigned char fmtm, fmtd;
2326 void __user *argp = (void __user *)arg;
2327 int __user *p = argp;
2328
2329 VALIDATE_STATE(s);
2330 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
2331 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
2332 switch (cmd) {
2333 case OSS_GETVERSION:
2334 return put_user(SOUND_VERSION, p);
2335
2336 case SNDCTL_DSP_SYNC:
2337 if (file->f_mode & FMODE_WRITE)
2338 return drain_dac(s, 0/*file->f_flags & O_NONBLOCK*/);
2339 return 0;
2340
2341 case SNDCTL_DSP_SETDUPLEX:
2342 return 0;
2343
2344 case SNDCTL_DSP_GETCAPS:
2345 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP | DSP_CAP_BIND, p);
2346
2347 case SNDCTL_DSP_RESET:
2348 if (file->f_mode & FMODE_WRITE) {
2349 stop_dac(s);
2350 synchronize_irq(s->irq);
2351 s->dma_dac.swptr = s->dma_dac.hwptr = s->dma_dac.count = s->dma_dac.total_bytes = 0;
2352 if (s->status & DO_DUAL_DAC)
2353 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
2354 }
2355 if (file->f_mode & FMODE_READ) {
2356 stop_adc(s);
2357 synchronize_irq(s->irq);
2358 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
2359 }
2360 return 0;
2361
2362 case SNDCTL_DSP_SPEED:
2363 if (get_user(val, p))
2364 return -EFAULT;
2365 if (val >= 0) {
2366 if (file->f_mode & FMODE_READ) {
2367 spin_lock_irqsave(&s->lock, flags);
2368 stop_adc_unlocked(s);
2369 s->dma_adc.ready = 0;
2370 set_adc_rate_unlocked(s, val);
2371 spin_unlock_irqrestore(&s->lock, flags);
2372 }
2373 if (file->f_mode & FMODE_WRITE) {
2374 stop_dac(s);
2375 s->dma_dac.ready = 0;
2376 if (s->status & DO_DUAL_DAC)
2377 s->dma_adc.ready = 0;
2378 set_dac_rate(s, val);
2379 }
2380 }
2381 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
2382
2383 case SNDCTL_DSP_STEREO:
2384 if (get_user(val, p))
2385 return -EFAULT;
2386 fmtd = 0;
2387 fmtm = ~0;
2388 if (file->f_mode & FMODE_READ) {
2389 stop_adc(s);
2390 s->dma_adc.ready = 0;
2391 if (val)
2392 fmtd |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
2393 else
2394 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_ADCSHIFT);
2395 }
2396 if (file->f_mode & FMODE_WRITE) {
2397 stop_dac(s);
2398 s->dma_dac.ready = 0;
2399 if (val)
2400 fmtd |= CM_CFMT_STEREO << CM_CFMT_DACSHIFT;
2401 else
2402 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_DACSHIFT);
2403 if (s->status & DO_DUAL_DAC) {
2404 s->dma_adc.ready = 0;
2405 if (val)
2406 fmtd |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
2407 else
2408 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_ADCSHIFT);
2409 }
2410 }
2411 set_fmt(s, fmtm, fmtd);
2412 return 0;
2413
2414 case SNDCTL_DSP_CHANNELS:
2415 if (get_user(val, p))
2416 return -EFAULT;
2417 if (val != 0) {
2418 fmtd = 0;
2419 fmtm = ~0;
2420 if (file->f_mode & FMODE_READ) {
2421 stop_adc(s);
2422 s->dma_adc.ready = 0;
2423 if (val >= 2)
2424 fmtd |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
2425 else
2426 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_ADCSHIFT);
2427 }
2428 if (file->f_mode & FMODE_WRITE) {
2429 stop_dac(s);
2430 s->dma_dac.ready = 0;
2431 if (val >= 2)
2432 fmtd |= CM_CFMT_STEREO << CM_CFMT_DACSHIFT;
2433 else
2434 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_DACSHIFT);
2435 if (s->status & DO_DUAL_DAC) {
2436 s->dma_adc.ready = 0;
2437 if (val >= 2)
2438 fmtd |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
2439 else
2440 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_ADCSHIFT);
2441 }
2442 }
2443 set_fmt(s, fmtm, fmtd);
2444 if ((s->capability & CAN_MULTI_CH)
2445 && (file->f_mode & FMODE_WRITE)) {
2446 val = set_dac_channels(s, val);
2447 return put_user(val, p);
2448 }
2449 }
2450 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (CM_CFMT_STEREO << CM_CFMT_ADCSHIFT)
2451 : (CM_CFMT_STEREO << CM_CFMT_DACSHIFT))) ? 2 : 1, p);
2452
2453 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
2454 return put_user(AFMT_S16_BE|AFMT_S16_LE|AFMT_U8|
2455 ((s->capability & CAN_AC3) ? AFMT_AC3 : 0), p);
2456
2457 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
2458 if (get_user(val, p))
2459 return -EFAULT;
2460 if (val != AFMT_QUERY) {
2461 fmtd = 0;
2462 fmtm = ~0;
2463 if (file->f_mode & FMODE_READ) {
2464 stop_adc(s);
2465 s->dma_adc.ready = 0;
2466 if (val == AFMT_S16_BE || val == AFMT_S16_LE)
2467 fmtd |= CM_CFMT_16BIT << CM_CFMT_ADCSHIFT;
2468 else
2469 fmtm &= ~(CM_CFMT_16BIT << CM_CFMT_ADCSHIFT);
2470 if (val == AFMT_S16_BE)
2471 s->status |= DO_BIGENDIAN_R;
2472 else
2473 s->status &= ~DO_BIGENDIAN_R;
2474 }
2475 if (file->f_mode & FMODE_WRITE) {
2476 stop_dac(s);
2477 s->dma_dac.ready = 0;
2478 if (val == AFMT_S16_BE || val == AFMT_S16_LE || val == AFMT_AC3)
2479 fmtd |= CM_CFMT_16BIT << CM_CFMT_DACSHIFT;
2480 else
2481 fmtm &= ~(CM_CFMT_16BIT << CM_CFMT_DACSHIFT);
2482 if (val == AFMT_AC3) {
2483 fmtd |= CM_CFMT_STEREO << CM_CFMT_DACSHIFT;
2484 set_ac3(s, 48000);
2485 } else
2486 set_ac3(s, 0);
2487 if (s->status & DO_DUAL_DAC) {
2488 s->dma_adc.ready = 0;
2489 if (val == AFMT_S16_BE || val == AFMT_S16_LE)
2490 fmtd |= CM_CFMT_STEREO << CM_CFMT_ADCSHIFT;
2491 else
2492 fmtm &= ~(CM_CFMT_STEREO << CM_CFMT_ADCSHIFT);
2493 }
2494 if (val == AFMT_S16_BE)
2495 s->status |= DO_BIGENDIAN_W;
2496 else
2497 s->status &= ~DO_BIGENDIAN_W;
2498 }
2499 set_fmt(s, fmtm, fmtd);
2500 }
2501 if (s->status & DO_AC3) return put_user(AFMT_AC3, p);
2502 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (CM_CFMT_16BIT << CM_CFMT_ADCSHIFT)
2503 : (CM_CFMT_16BIT << CM_CFMT_DACSHIFT))) ? val : AFMT_U8, p);
2504
2505 case SNDCTL_DSP_POST:
2506 return 0;
2507
2508 case SNDCTL_DSP_GETTRIGGER:
2509 val = 0;
2510 if (s->status & DO_DUAL_DAC) {
2511 if (file->f_mode & FMODE_WRITE &&
2512 (s->enable & ENDAC) &&
2513 (s->enable & ENADC))
2514 val |= PCM_ENABLE_OUTPUT;
2515 return put_user(val, p);
2516 }
2517 if (file->f_mode & FMODE_READ && s->enable & ENADC)
2518 val |= PCM_ENABLE_INPUT;
2519 if (file->f_mode & FMODE_WRITE && s->enable & ENDAC)
2520 val |= PCM_ENABLE_OUTPUT;
2521 return put_user(val, p);
2522
2523 case SNDCTL_DSP_SETTRIGGER:
2524 if (get_user(val, p))
2525 return -EFAULT;
2526 if (file->f_mode & FMODE_READ) {
2527 if (val & PCM_ENABLE_INPUT) {
2528 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2529 return ret;
2530 s->dma_adc.enabled = 1;
2531 start_adc(s);
2532 } else {
2533 s->dma_adc.enabled = 0;
2534 stop_adc(s);
2535 }
2536 }
2537 if (file->f_mode & FMODE_WRITE) {
2538 if (val & PCM_ENABLE_OUTPUT) {
2539 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2540 return ret;
2541 if (s->status & DO_DUAL_DAC) {
2542 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2543 return ret;
2544 }
2545 s->dma_dac.enabled = 1;
2546 start_dac(s);
2547 } else {
2548 s->dma_dac.enabled = 0;
2549 stop_dac(s);
2550 }
2551 }
2552 return 0;
2553
2554 case SNDCTL_DSP_GETOSPACE:
2555 if (!(file->f_mode & FMODE_WRITE))
2556 return -EINVAL;
2557 if (!(s->enable & ENDAC) && (val = prog_dmabuf(s, 0)) != 0)
2558 return val;
2559 spin_lock_irqsave(&s->lock, flags);
2560 cm_update_ptr(s);
2561 abinfo.fragsize = s->dma_dac.fragsize;
2562 abinfo.bytes = s->dma_dac.dmasize - s->dma_dac.count;
2563 abinfo.fragstotal = s->dma_dac.numfrag;
2564 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
2565 spin_unlock_irqrestore(&s->lock, flags);
2566 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
2567
2568 case SNDCTL_DSP_GETISPACE:
2569 if (!(file->f_mode & FMODE_READ))
2570 return -EINVAL;
2571 if (!(s->enable & ENADC) && (val = prog_dmabuf(s, 1)) != 0)
2572 return val;
2573 spin_lock_irqsave(&s->lock, flags);
2574 cm_update_ptr(s);
2575 abinfo.fragsize = s->dma_adc.fragsize;
2576 abinfo.bytes = s->dma_adc.count;
2577 abinfo.fragstotal = s->dma_adc.numfrag;
2578 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
2579 spin_unlock_irqrestore(&s->lock, flags);
2580 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
2581
2582 case SNDCTL_DSP_NONBLOCK:
2583 file->f_flags |= O_NONBLOCK;
2584 return 0;
2585
2586 case SNDCTL_DSP_GETODELAY:
2587 if (!(file->f_mode & FMODE_WRITE))
2588 return -EINVAL;
2589 spin_lock_irqsave(&s->lock, flags);
2590 cm_update_ptr(s);
2591 val = s->dma_dac.count;
2592 spin_unlock_irqrestore(&s->lock, flags);
2593 return put_user(val, p);
2594
2595 case SNDCTL_DSP_GETIPTR:
2596 if (!(file->f_mode & FMODE_READ))
2597 return -EINVAL;
2598 spin_lock_irqsave(&s->lock, flags);
2599 cm_update_ptr(s);
2600 cinfo.bytes = s->dma_adc.total_bytes;
2601 cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
2602 cinfo.ptr = s->dma_adc.hwptr;
2603 if (s->dma_adc.mapped)
2604 s->dma_adc.count &= s->dma_adc.fragsize-1;
2605 spin_unlock_irqrestore(&s->lock, flags);
2606 return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
2607
2608 case SNDCTL_DSP_GETOPTR:
2609 if (!(file->f_mode & FMODE_WRITE))
2610 return -EINVAL;
2611 spin_lock_irqsave(&s->lock, flags);
2612 cm_update_ptr(s);
2613 cinfo.bytes = s->dma_dac.total_bytes;
2614 cinfo.blocks = s->dma_dac.count >> s->dma_dac.fragshift;
2615 cinfo.ptr = s->dma_dac.hwptr;
2616 if (s->dma_dac.mapped)
2617 s->dma_dac.count &= s->dma_dac.fragsize-1;
2618 if (s->status & DO_DUAL_DAC) {
2619 if (s->dma_adc.mapped)
2620 s->dma_adc.count &= s->dma_adc.fragsize-1;
2621 }
2622 spin_unlock_irqrestore(&s->lock, flags);
2623 return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
2624
2625 case SNDCTL_DSP_GETBLKSIZE:
2626 if (file->f_mode & FMODE_WRITE) {
2627 if ((val = prog_dmabuf(s, 0)))
2628 return val;
2629 if (s->status & DO_DUAL_DAC) {
2630 if ((val = prog_dmabuf(s, 1)))
2631 return val;
2632 return put_user(2 * s->dma_dac.fragsize, p);
2633 }
2634 return put_user(s->dma_dac.fragsize, p);
2635 }
2636 if ((val = prog_dmabuf(s, 1)))
2637 return val;
2638 return put_user(s->dma_adc.fragsize, p);
2639
2640 case SNDCTL_DSP_SETFRAGMENT:
2641 if (get_user(val, p))
2642 return -EFAULT;
2643 if (file->f_mode & FMODE_READ) {
2644 s->dma_adc.ossfragshift = val & 0xffff;
2645 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
2646 if (s->dma_adc.ossfragshift < 4)
2647 s->dma_adc.ossfragshift = 4;
2648 if (s->dma_adc.ossfragshift > 15)
2649 s->dma_adc.ossfragshift = 15;
2650 if (s->dma_adc.ossmaxfrags < 4)
2651 s->dma_adc.ossmaxfrags = 4;
2652 }
2653 if (file->f_mode & FMODE_WRITE) {
2654 s->dma_dac.ossfragshift = val & 0xffff;
2655 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
2656 if (s->dma_dac.ossfragshift < 4)
2657 s->dma_dac.ossfragshift = 4;
2658 if (s->dma_dac.ossfragshift > 15)
2659 s->dma_dac.ossfragshift = 15;
2660 if (s->dma_dac.ossmaxfrags < 4)
2661 s->dma_dac.ossmaxfrags = 4;
2662 if (s->status & DO_DUAL_DAC) {
2663 s->dma_adc.ossfragshift = s->dma_dac.ossfragshift;
2664 s->dma_adc.ossmaxfrags = s->dma_dac.ossmaxfrags;
2665 }
2666 }
2667 return 0;
2668
2669 case SNDCTL_DSP_SUBDIVIDE:
2670 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
2671 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
2672 return -EINVAL;
2673 if (get_user(val, p))
2674 return -EFAULT;
2675 if (val != 1 && val != 2 && val != 4)
2676 return -EINVAL;
2677 if (file->f_mode & FMODE_READ)
2678 s->dma_adc.subdivision = val;
2679 if (file->f_mode & FMODE_WRITE) {
2680 s->dma_dac.subdivision = val;
2681 if (s->status & DO_DUAL_DAC)
2682 s->dma_adc.subdivision = val;
2683 }
2684 return 0;
2685
2686 case SOUND_PCM_READ_RATE:
2687 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
2688
2689 case SOUND_PCM_READ_CHANNELS:
2690 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (CM_CFMT_STEREO << CM_CFMT_ADCSHIFT) : (CM_CFMT_STEREO << CM_CFMT_DACSHIFT))) ? 2 : 1, p);
2691
2692 case SOUND_PCM_READ_BITS:
2693 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (CM_CFMT_16BIT << CM_CFMT_ADCSHIFT) : (CM_CFMT_16BIT << CM_CFMT_DACSHIFT))) ? 16 : 8, p);
2694
2695 case SOUND_PCM_READ_FILTER:
2696 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
2697
2698 case SNDCTL_DSP_GETCHANNELMASK:
2699 return put_user(DSP_BIND_FRONT|DSP_BIND_SURR|DSP_BIND_CENTER_LFE|DSP_BIND_SPDIF, p);
2700
2701 case SNDCTL_DSP_BIND_CHANNEL:
2702 if (get_user(val, p))
2703 return -EFAULT;
2704 if (val == DSP_BIND_QUERY) {
2705 val = DSP_BIND_FRONT;
2706 if (s->status & DO_SPDIF_OUT)
2707 val |= DSP_BIND_SPDIF;
2708 else {
2709 if (s->curr_channels == 4)
2710 val |= DSP_BIND_SURR;
2711 if (s->curr_channels > 4)
2712 val |= DSP_BIND_CENTER_LFE;
2713 }
2714 } else {
2715 if (file->f_mode & FMODE_READ) {
2716 stop_adc(s);
2717 s->dma_adc.ready = 0;
2718 if (val & DSP_BIND_SPDIF) {
2719 set_spdifin(s, s->rateadc);
2720 if (!(s->status & DO_SPDIF_OUT))
2721 val &= ~DSP_BIND_SPDIF;
2722 }
2723 }
2724 if (file->f_mode & FMODE_WRITE) {
2725 stop_dac(s);
2726 s->dma_dac.ready = 0;
2727 if (val & DSP_BIND_SPDIF) {
2728 set_spdifout(s, s->ratedac);
2729 set_dac_channels(s, s->fmt & (CM_CFMT_STEREO << CM_CFMT_DACSHIFT) ? 2 : 1);
2730 if (!(s->status & DO_SPDIF_OUT))
2731 val &= ~DSP_BIND_SPDIF;
2732 } else {
2733 int channels;
2734 int mask;
2735
2736 mask = val & (DSP_BIND_FRONT|DSP_BIND_SURR|DSP_BIND_CENTER_LFE);
2737 switch (mask) {
2738 case DSP_BIND_FRONT:
2739 channels = 2;
2740 break;
2741 case DSP_BIND_FRONT|DSP_BIND_SURR:
2742 channels = 4;
2743 break;
2744 case DSP_BIND_FRONT|DSP_BIND_SURR|DSP_BIND_CENTER_LFE:
2745 channels = 6;
2746 break;
2747 default:
2748 channels = s->fmt & (CM_CFMT_STEREO << CM_CFMT_DACSHIFT) ? 2 : 1;
2749 break;
2750 }
2751 set_dac_channels(s, channels);
2752 }
2753 }
2754 }
2755 return put_user(val, p);
2756
2757 case SOUND_PCM_WRITE_FILTER:
2758 case SNDCTL_DSP_MAPINBUF:
2759 case SNDCTL_DSP_MAPOUTBUF:
2760 case SNDCTL_DSP_SETSYNCRO:
2761 return -EINVAL;
2762 case SNDCTL_SPDIF_COPYRIGHT:
2763 if (get_user(val, p))
2764 return -EFAULT;
2765 set_spdif_copyright(s, val);
2766 return 0;
2767 case SNDCTL_SPDIF_LOOP:
2768 if (get_user(val, p))
2769 return -EFAULT;
2770 set_spdif_loop(s, val);
2771 return 0;
2772 case SNDCTL_SPDIF_MONITOR:
2773 if (get_user(val, p))
2774 return -EFAULT;
2775 set_spdif_monitor(s, val);
2776 return 0;
2777 case SNDCTL_SPDIF_LEVEL:
2778 if (get_user(val, p))
2779 return -EFAULT;
2780 set_spdifout_level(s, val);
2781 return 0;
2782 case SNDCTL_SPDIF_INV:
2783 if (get_user(val, p))
2784 return -EFAULT;
2785 set_spdifin_inverse(s, val);
2786 return 0;
2787 case SNDCTL_SPDIF_SEL2:
2788 if (get_user(val, p))
2789 return -EFAULT;
2790 set_spdifin_channel2(s, val);
2791 return 0;
2792 case SNDCTL_SPDIF_VALID:
2793 if (get_user(val, p))
2794 return -EFAULT;
2795 set_spdifin_valid(s, val);
2796 return 0;
2797 case SNDCTL_SPDIFOUT:
2798 if (get_user(val, p))
2799 return -EFAULT;
2800 set_spdifout(s, val ? s->ratedac : 0);
2801 return 0;
2802 case SNDCTL_SPDIFIN:
2803 if (get_user(val, p))
2804 return -EFAULT;
2805 set_spdifin(s, val ? s->rateadc : 0);
2806 return 0;
2807 }
2808 return mixer_ioctl(s, cmd, arg);
2809}
2810
2811static int cm_open(struct inode *inode, struct file *file)
2812{
2813 int minor = iminor(inode);
2814 DECLARE_WAITQUEUE(wait, current);
2815 unsigned char fmtm = ~0, fmts = 0;
2816 struct list_head *list;
2817 struct cm_state *s;
2818
2819 for (list = devs.next; ; list = list->next) {
2820 if (list == &devs)
2821 return -ENODEV;
2822 s = list_entry(list, struct cm_state, devs);
2823 if (!((s->dev_audio ^ minor) & ~0xf))
2824 break;
2825 }
2826 VALIDATE_STATE(s);
2827 file->private_data = s;
2828 /* wait for device to become free */
2829 mutex_lock(&s->open_mutex);
2830 while (s->open_mode & file->f_mode) {
2831 if (file->f_flags & O_NONBLOCK) {
2832 mutex_unlock(&s->open_mutex);
2833 return -EBUSY;
2834 }
2835 add_wait_queue(&s->open_wait, &wait);
2836 __set_current_state(TASK_INTERRUPTIBLE);
2837 mutex_unlock(&s->open_mutex);
2838 schedule();
2839 remove_wait_queue(&s->open_wait, &wait);
2840 set_current_state(TASK_RUNNING);
2841 if (signal_pending(current))
2842 return -ERESTARTSYS;
2843 mutex_lock(&s->open_mutex);
2844 }
2845 if (file->f_mode & FMODE_READ) {
2846 s->status &= ~DO_BIGENDIAN_R;
2847 fmtm &= ~((CM_CFMT_STEREO | CM_CFMT_16BIT) << CM_CFMT_ADCSHIFT);
2848 if ((minor & 0xf) == SND_DEV_DSP16)
2849 fmts |= CM_CFMT_16BIT << CM_CFMT_ADCSHIFT;
2850 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
2851 s->dma_adc.enabled = 1;
2852 set_adc_rate(s, 8000);
2853 // spdif-in is turnned off by default
2854 set_spdifin(s, 0);
2855 }
2856 if (file->f_mode & FMODE_WRITE) {
2857 s->status &= ~DO_BIGENDIAN_W;
2858 fmtm &= ~((CM_CFMT_STEREO | CM_CFMT_16BIT) << CM_CFMT_DACSHIFT);
2859 if ((minor & 0xf) == SND_DEV_DSP16)
2860 fmts |= CM_CFMT_16BIT << CM_CFMT_DACSHIFT;
2861 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
2862 s->dma_dac.enabled = 1;
2863 set_dac_rate(s, 8000);
2864 // clear previous multichannel, spdif, ac3 state
2865 set_spdifout(s, 0);
2866 set_ac3(s, 0);
2867 set_dac_channels(s, 1);
2868 }
2869 set_fmt(s, fmtm, fmts);
2870 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2871 mutex_unlock(&s->open_mutex);
2872 return nonseekable_open(inode, file);
2873}
2874
2875static int cm_release(struct inode *inode, struct file *file)
2876{
2877 struct cm_state *s = (struct cm_state *)file->private_data;
2878
2879 VALIDATE_STATE(s);
2880 lock_kernel();
2881 if (file->f_mode & FMODE_WRITE)
2882 drain_dac(s, file->f_flags & O_NONBLOCK);
2883 mutex_lock(&s->open_mutex);
2884 if (file->f_mode & FMODE_WRITE) {
2885 stop_dac(s);
2886
2887 dealloc_dmabuf(s, &s->dma_dac);
2888 if (s->status & DO_DUAL_DAC)
2889 dealloc_dmabuf(s, &s->dma_adc);
2890
2891 if (s->status & DO_MULTI_CH)
2892 set_dac_channels(s, 1);
2893 if (s->status & DO_AC3)
2894 set_ac3(s, 0);
2895 if (s->status & DO_SPDIF_OUT)
2896 set_spdifout(s, 0);
2897 /* enable SPDIF loop */
2898 set_spdif_loop(s, spdif_loop);
2899 s->status &= ~DO_BIGENDIAN_W;
2900 }
2901 if (file->f_mode & FMODE_READ) {
2902 stop_adc(s);
2903 dealloc_dmabuf(s, &s->dma_adc);
2904 s->status &= ~DO_BIGENDIAN_R;
2905 }
2906 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
2907 mutex_unlock(&s->open_mutex);
2908 wake_up(&s->open_wait);
2909 unlock_kernel();
2910 return 0;
2911}
2912
2913static /*const*/ struct file_operations cm_audio_fops = {
2914 .owner = THIS_MODULE,
2915 .llseek = no_llseek,
2916 .read = cm_read,
2917 .write = cm_write,
2918 .poll = cm_poll,
2919 .ioctl = cm_ioctl,
2920 .mmap = cm_mmap,
2921 .open = cm_open,
2922 .release = cm_release,
2923};
2924
2925/* --------------------------------------------------------------------- */
2926
2927static struct initvol {
2928 int mixch;
2929 int vol;
2930} initvol[] __devinitdata = {
2931 { SOUND_MIXER_WRITE_CD, 0x4f4f },
2932 { SOUND_MIXER_WRITE_LINE, 0x4f4f },
2933 { SOUND_MIXER_WRITE_MIC, 0x4f4f },
2934 { SOUND_MIXER_WRITE_SYNTH, 0x4f4f },
2935 { SOUND_MIXER_WRITE_VOLUME, 0x4f4f },
2936 { SOUND_MIXER_WRITE_PCM, 0x4f4f }
2937};
2938
2939/* check chip version and capability */
2940static int query_chip(struct cm_state *s)
2941{
2942 int ChipVersion = -1;
2943 unsigned char RegValue;
2944
2945 // check reg 0Ch, bit 24-31
2946 RegValue = inb(s->iobase + CODEC_CMI_INT_HLDCLR + 3);
2947 if (RegValue == 0) {
2948 // check reg 08h, bit 24-28
2949 RegValue = inb(s->iobase + CODEC_CMI_CHFORMAT + 3);
2950 RegValue &= 0x1f;
2951 if (RegValue == 0) {
2952 ChipVersion = 33;
2953 s->max_channels = 4;
2954 s->capability |= CAN_AC3_SW;
2955 s->capability |= CAN_DUAL_DAC;
2956 } else {
2957 ChipVersion = 37;
2958 s->max_channels = 4;
2959 s->capability |= CAN_AC3_HW;
2960 s->capability |= CAN_DUAL_DAC;
2961 }
2962 } else {
2963 // check reg 0Ch, bit 26
2964 if (RegValue & (1 << (26-24))) {
2965 ChipVersion = 39;
2966 if (RegValue & (1 << (24-24)))
2967 s->max_channels = 6;
2968 else
2969 s->max_channels = 4;
2970 s->capability |= CAN_AC3_HW;
2971 s->capability |= CAN_DUAL_DAC;
2972 s->capability |= CAN_MULTI_CH_HW;
2973 s->capability |= CAN_LINE_AS_BASS;
2974 s->capability |= CAN_MIC_AS_BASS;
2975 } else {
2976 ChipVersion = 55; // 4 or 6 channels
2977 s->max_channels = 6;
2978 s->capability |= CAN_AC3_HW;
2979 s->capability |= CAN_DUAL_DAC;
2980 s->capability |= CAN_MULTI_CH_HW;
2981 s->capability |= CAN_LINE_AS_BASS;
2982 s->capability |= CAN_MIC_AS_BASS;
2983 }
2984 }
2985 s->capability |= CAN_LINE_AS_REAR;
2986 return ChipVersion;
2987}
2988
2989#ifdef CONFIG_SOUND_CMPCI_JOYSTICK
2990static int __devinit cm_create_gameport(struct cm_state *s, int io_port)
2991{
2992 struct gameport *gp;
2993
2994 if (!request_region(io_port, CM_EXTENT_GAME, "cmpci GAME")) {
2995 printk(KERN_ERR "cmpci: gameport io ports 0x%#x in use\n", io_port);
2996 return -EBUSY;
2997 }
2998
2999 if (!(s->gameport = gp = gameport_allocate_port())) {
3000 printk(KERN_ERR "cmpci: can not allocate memory for gameport\n");
3001 release_region(io_port, CM_EXTENT_GAME);
3002 return -ENOMEM;
3003 }
3004
3005 gameport_set_name(gp, "C-Media GP");
3006 gameport_set_phys(gp, "pci%s/gameport0", pci_name(s->dev));
3007 gp->dev.parent = &s->dev->dev;
3008 gp->io = io_port;
3009
3010 /* enable joystick */
3011 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x02);
3012
3013 gameport_register_port(gp);
3014
3015 return 0;
3016}
3017
3018static void __devexit cm_free_gameport(struct cm_state *s)
3019{
3020 if (s->gameport) {
3021 int gpio = s->gameport->io;
3022
3023 gameport_unregister_port(s->gameport);
3024 s->gameport = NULL;
3025 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x02, 0);
3026 release_region(gpio, CM_EXTENT_GAME);
3027 }
3028}
3029#else
3030static inline int cm_create_gameport(struct cm_state *s, int io_port) { return -ENOSYS; }
3031static inline void cm_free_gameport(struct cm_state *s) { }
3032#endif
3033
3034#define echo_option(x)\
3035if (x) strcat(options, "" #x " ")
3036
3037static int __devinit cm_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
3038{
3039 struct cm_state *s;
3040 mm_segment_t fs;
3041 int i, val, ret;
3042 unsigned char reg_mask;
3043 int timeout;
3044 struct resource *ports;
3045 struct {
3046 unsigned short deviceid;
3047 char *devicename;
3048 } devicetable[] = {
3049 { PCI_DEVICE_ID_CMEDIA_CM8338A, "CM8338A" },
3050 { PCI_DEVICE_ID_CMEDIA_CM8338B, "CM8338B" },
3051 { PCI_DEVICE_ID_CMEDIA_CM8738, "CM8738" },
3052 { PCI_DEVICE_ID_CMEDIA_CM8738B, "CM8738B" },
3053 };
3054 char *devicename = "unknown";
3055 char options[256];
3056
3057 if ((ret = pci_enable_device(pcidev)))
3058 return ret;
3059 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_IO))
3060 return -ENODEV;
3061 if (pcidev->irq == 0)
3062 return -ENODEV;
3063 i = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
3064 if (i) {
3065 printk(KERN_WARNING "cmpci: architecture does not support 32bit PCI busmaster DMA\n");
3066 return i;
3067 }
3068 s = kmalloc(sizeof(*s), GFP_KERNEL);
3069 if (!s) {
3070 printk(KERN_WARNING "cmpci: out of memory\n");
3071 return -ENOMEM;
3072 }
3073 /* search device name */
3074 for (i = 0; i < sizeof(devicetable) / sizeof(devicetable[0]); i++) {
3075 if (devicetable[i].deviceid == pcidev->device) {
3076 devicename = devicetable[i].devicename;
3077 break;
3078 }
3079 }
3080 memset(s, 0, sizeof(struct cm_state));
3081 init_waitqueue_head(&s->dma_adc.wait);
3082 init_waitqueue_head(&s->dma_dac.wait);
3083 init_waitqueue_head(&s->open_wait);
3084 mutex_init(&s->open_mutex);
3085 spin_lock_init(&s->lock);
3086 s->magic = CM_MAGIC;
3087 s->dev = pcidev;
3088 s->iobase = pci_resource_start(pcidev, 0);
3089 s->iosynth = fmio;
3090 s->iomidi = mpuio;
3091#ifdef CONFIG_SOUND_CMPCI_MIDI
3092 s->midi_devc = 0;
3093#endif
3094 s->status = 0;
3095 if (s->iobase == 0)
3096 return -ENODEV;
3097 s->irq = pcidev->irq;
3098
3099 if (!request_region(s->iobase, CM_EXTENT_CODEC, "cmpci")) {
3100 printk(KERN_ERR "cmpci: io ports %#x-%#x in use\n", s->iobase, s->iobase+CM_EXTENT_CODEC-1);
3101 ret = -EBUSY;
3102 goto err_region5;
3103 }
3104 /* dump parameters */
3105 strcpy(options, "cmpci: ");
3106 echo_option(joystick);
3107 echo_option(spdif_inverse);
3108 echo_option(spdif_loop);
3109 echo_option(spdif_out);
3110 echo_option(use_line_as_rear);
3111 echo_option(use_line_as_bass);
3112 echo_option(use_mic_as_bass);
3113 echo_option(mic_boost);
3114 echo_option(hw_copy);
3115 printk(KERN_INFO "%s\n", options);
3116
3117 /* initialize codec registers */
3118 outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */
3119 outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */
3120 /* reset mixer */
3121 wrmixer(s, DSP_MIX_DATARESETIDX, 0);
3122
3123 /* request irq */
3124 if ((ret = request_irq(s->irq, cm_interrupt, IRQF_SHARED, "cmpci", s))) {
3125 printk(KERN_ERR "cmpci: irq %u in use\n", s->irq);
3126 goto err_irq;
3127 }
3128 printk(KERN_INFO "cmpci: found %s adapter at io %#x irq %u\n",
3129 devicename, s->iobase, s->irq);
3130 /* register devices */
3131 if ((s->dev_audio = register_sound_dsp(&cm_audio_fops, -1)) < 0) {
3132 ret = s->dev_audio;
3133 goto err_dev1;
3134 }
3135 if ((s->dev_mixer = register_sound_mixer(&cm_mixer_fops, -1)) < 0) {
3136 ret = s->dev_mixer;
3137 goto err_dev2;
3138 }
3139 pci_set_master(pcidev); /* enable bus mastering */
3140 /* initialize the chips */
3141 fs = get_fs();
3142 set_fs(KERNEL_DS);
3143 /* set mixer output */
3144 frobindir(s, DSP_MIX_OUTMIXIDX, 0x1f, 0x1f);
3145 /* set mixer input */
3146 val = SOUND_MASK_LINE|SOUND_MASK_SYNTH|SOUND_MASK_CD|SOUND_MASK_MIC;
3147 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val);
3148 for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) {
3149 val = initvol[i].vol;
3150 mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val);
3151 }
3152 set_fs(fs);
3153 /* use channel 1 for playback, channel 0 for record */
3154 maskb(s->iobase + CODEC_CMI_FUNCTRL0, ~CHADC1, CHADC0);
3155 /* turn off VMIC3 - mic boost */
3156 if (mic_boost)
3157 maskb(s->iobase + CODEC_CMI_MIXER2, ~1, 0);
3158 else
3159 maskb(s->iobase + CODEC_CMI_MIXER2, ~0, 1);
3160 s->deviceid = pcidev->device;
3161
3162 if (pcidev->device == PCI_DEVICE_ID_CMEDIA_CM8738
3163 || pcidev->device == PCI_DEVICE_ID_CMEDIA_CM8738B) {
3164
3165 /* chip version and hw capability check */
3166 s->chip_version = query_chip(s);
3167 printk(KERN_INFO "cmpci: chip version = 0%d\n", s->chip_version);
3168
3169 /* set SPDIF-in inverse before enable SPDIF loop */
3170 set_spdifin_inverse(s, spdif_inverse);
3171
3172 /* use SPDIF in #1 */
3173 set_spdifin_channel2(s, 0);
3174 } else {
3175 s->chip_version = 0;
3176 /* 8338 will fall here */
3177 s->max_channels = 4;
3178 s->capability |= CAN_DUAL_DAC;
3179 s->capability |= CAN_LINE_AS_REAR;
3180 }
3181 /* enable SPDIF loop */
3182 set_spdif_loop(s, spdif_loop);
3183
3184 // enable 4 speaker mode (analog duplicate)
3185 set_hw_copy(s, hw_copy);
3186
3187 reg_mask = 0;
3188#ifdef CONFIG_SOUND_CMPCI_FM
3189 /* disable FM */
3190 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~8, 0);
3191 if (s->iosynth) {
3192 /* don't enable OPL3 if there is one */
3193 if (opl3_detect(s->iosynth, NULL)) {
3194 s->iosynth = 0;
3195 } else {
3196 /* set IO based at 0x388 */
3197 switch (s->iosynth) {
3198 case 0x388:
3199 reg_mask = 0;
3200 break;
3201 case 0x3C8:
3202 reg_mask = 0x01;
3203 break;
3204 case 0x3E0:
3205 reg_mask = 0x02;
3206 break;
3207 case 0x3E8:
3208 reg_mask = 0x03;
3209 break;
3210 default:
3211 s->iosynth = 0;
3212 break;
3213 }
3214 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0x03, reg_mask);
3215 /* enable FM */
3216 if (s->iosynth) {
3217 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~0, 8);
3218 if (opl3_detect(s->iosynth, NULL))
3219 ret = opl3_init(s->iosynth, NULL, THIS_MODULE);
3220 else {
3221 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~8, 0);
3222 s->iosynth = 0;
3223 }
3224 }
3225 }
3226 }
3227#endif
3228#ifdef CONFIG_SOUND_CMPCI_MIDI
3229 switch (s->iomidi) {
3230 case 0x330:
3231 reg_mask = 0;
3232 break;
3233 case 0x320:
3234 reg_mask = 0x20;
3235 break;
3236 case 0x310:
3237 reg_mask = 0x40;
3238 break;
3239 case 0x300:
3240 reg_mask = 0x60;
3241 break;
3242 default:
3243 s->iomidi = 0;
3244 goto skip_mpu;
3245 }
3246 ports = request_region(s->iomidi, 2, "mpu401");
3247 if (!ports)
3248 goto skip_mpu;
3249 /* disable MPU-401 */
3250 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x04, 0);
3251 s->mpu_data.name = "cmpci mpu";
3252 s->mpu_data.io_base = s->iomidi;
3253 s->mpu_data.irq = -s->irq; // tell mpu401 to share irq
3254 if (probe_mpu401(&s->mpu_data, ports)) {
3255 release_region(s->iomidi, 2);
3256 s->iomidi = 0;
3257 goto skip_mpu;
3258 }
3259 maskb(s->iobase + CODEC_CMI_LEGACY_CTRL + 3, ~0x60, reg_mask);
3260 /* enable MPU-401 */
3261 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x04);
3262 /* clear all previously received interrupt */
3263 for (timeout = 900000; timeout > 0; timeout--) {
3264 if ((inb(s->iomidi + 1) && 0x80) == 0)
3265 inb(s->iomidi);
3266 else
3267 break;
3268 }
3269 if (!probe_mpu401(&s->mpu_data, ports)) {
3270 release_region(s->iomidi, 2);
3271 s->iomidi = 0;
3272 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0, 0x04);
3273 } else {
3274 attach_mpu401(&s->mpu_data, THIS_MODULE);
3275 s->midi_devc = s->mpu_data.slots[1];
3276 }
3277skip_mpu:
3278#endif
3279 /* disable joystick port */
3280 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x02, 0);
3281 if (joystick)
3282 cm_create_gameport(s, 0x200);
3283
3284 /* store it in the driver field */
3285 pci_set_drvdata(pcidev, s);
3286 /* put it into driver list */
3287 list_add_tail(&s->devs, &devs);
3288 /* increment devindex */
3289 if (devindex < NR_DEVICE-1)
3290 devindex++;
3291 return 0;
3292
3293err_dev2:
3294 unregister_sound_dsp(s->dev_audio);
3295err_dev1:
3296 printk(KERN_ERR "cmpci: cannot register misc device\n");
3297 free_irq(s->irq, s);
3298err_irq:
3299 release_region(s->iobase, CM_EXTENT_CODEC);
3300err_region5:
3301 kfree(s);
3302 return ret;
3303}
3304
3305/* --------------------------------------------------------------------- */
3306
3307MODULE_AUTHOR("ChenLi Tien, cltien@cmedia.com.tw");
3308MODULE_DESCRIPTION("CM8x38 Audio Driver");
3309MODULE_LICENSE("GPL");
3310
3311static void __devexit cm_remove(struct pci_dev *dev)
3312{
3313 struct cm_state *s = pci_get_drvdata(dev);
3314
3315 if (!s)
3316 return;
3317
3318 cm_free_gameport(s);
3319
3320#ifdef CONFIG_SOUND_CMPCI_FM
3321 if (s->iosynth) {
3322 /* disable FM */
3323 maskb(s->iobase + CODEC_CMI_MISC_CTRL + 2, ~8, 0);
3324 }
3325#endif
3326#ifdef CONFIG_SOUND_CMPCI_MIDI
3327 if (s->iomidi) {
3328 unload_mpu401(&s->mpu_data);
3329 /* disable MPU-401 */
3330 maskb(s->iobase + CODEC_CMI_FUNCTRL1, ~0x04, 0);
3331 }
3332#endif
3333 set_spdif_loop(s, 0);
3334 list_del(&s->devs);
3335 outb(0, s->iobase + CODEC_CMI_INT_HLDCLR + 2); /* disable ints */
3336 synchronize_irq(s->irq);
3337 outb(0, s->iobase + CODEC_CMI_FUNCTRL0 + 2); /* disable channels */
3338 free_irq(s->irq, s);
3339
3340 /* reset mixer */
3341 wrmixer(s, DSP_MIX_DATARESETIDX, 0);
3342
3343 release_region(s->iobase, CM_EXTENT_CODEC);
3344 unregister_sound_dsp(s->dev_audio);
3345 unregister_sound_mixer(s->dev_mixer);
3346 kfree(s);
3347 pci_set_drvdata(dev, NULL);
3348}
3349
3350static struct pci_device_id id_table[] __devinitdata = {
3351 { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738B, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
3352 { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8738, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
3353 { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338A, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
3354 { PCI_VENDOR_ID_CMEDIA, PCI_DEVICE_ID_CMEDIA_CM8338B, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
3355 { 0, }
3356};
3357
3358MODULE_DEVICE_TABLE(pci, id_table);
3359
3360static struct pci_driver cm_driver = {
3361 .name = "cmpci",
3362 .id_table = id_table,
3363 .probe = cm_probe,
3364 .remove = __devexit_p(cm_remove)
3365};
3366
3367static int __init init_cmpci(void)
3368{
3369 printk(KERN_INFO "cmpci: version $Revision: 6.82 $ time " __TIME__ " " __DATE__ "\n");
3370 return pci_register_driver(&cm_driver);
3371}
3372
3373static void __exit cleanup_cmpci(void)
3374{
3375 printk(KERN_INFO "cmpci: unloading\n");
3376 pci_unregister_driver(&cm_driver);
3377}
3378
3379module_init(init_cmpci);
3380module_exit(cleanup_cmpci);
diff --git a/sound/oss/cs4281/Makefile b/sound/oss/cs4281/Makefile
deleted file mode 100644
index 6d527e8530d6..000000000000
--- a/sound/oss/cs4281/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
1# Makefile for Cirrus Logic-Crystal CS4281
2#
3
4obj-$(CONFIG_SOUND_CS4281) += cs4281.o
5
6cs4281-objs += cs4281m.o
diff --git a/sound/oss/cs4281/cs4281_hwdefs.h b/sound/oss/cs4281/cs4281_hwdefs.h
deleted file mode 100644
index 701d595e33f5..000000000000
--- a/sound/oss/cs4281/cs4281_hwdefs.h
+++ /dev/null
@@ -1,1234 +0,0 @@
1//****************************************************************************
2//
3// HWDEFS.H - Definitions of the registers and data structures used by the
4// CS4281
5//
6// Copyright (c) 1999,2000,2001 Crystal Semiconductor Corp.
7//
8//****************************************************************************
9
10#ifndef _H_HWDEFS
11#define _H_HWDEFS
12
13//****************************************************************************
14//
15// The following define the offsets of the registers located in the PCI
16// configuration space of the CS4281 part.
17//
18//****************************************************************************
19#define PCICONFIG_DEVID_VENID 0x00000000L
20#define PCICONFIG_STATUS_COMMAND 0x00000004L
21#define PCICONFIG_CLASS_REVISION 0x00000008L
22#define PCICONFIG_LATENCY_TIMER 0x0000000CL
23#define PCICONFIG_BA0 0x00000010L
24#define PCICONFIG_BA1 0x00000014L
25#define PCICONFIG_SUBSYSID_SUBSYSVENID 0x0000002CL
26#define PCICONFIG_INTERRUPT 0x0000003CL
27
28//****************************************************************************
29//
30// The following define the offsets of the registers accessed via base address
31// register zero on the CS4281 part.
32//
33//****************************************************************************
34#define BA0_HISR 0x00000000L
35#define BA0_HICR 0x00000008L
36#define BA0_HIMR 0x0000000CL
37#define BA0_IIER 0x00000010L
38#define BA0_HDSR0 0x000000F0L
39#define BA0_HDSR1 0x000000F4L
40#define BA0_HDSR2 0x000000F8L
41#define BA0_HDSR3 0x000000FCL
42#define BA0_DCA0 0x00000110L
43#define BA0_DCC0 0x00000114L
44#define BA0_DBA0 0x00000118L
45#define BA0_DBC0 0x0000011CL
46#define BA0_DCA1 0x00000120L
47#define BA0_DCC1 0x00000124L
48#define BA0_DBA1 0x00000128L
49#define BA0_DBC1 0x0000012CL
50#define BA0_DCA2 0x00000130L
51#define BA0_DCC2 0x00000134L
52#define BA0_DBA2 0x00000138L
53#define BA0_DBC2 0x0000013CL
54#define BA0_DCA3 0x00000140L
55#define BA0_DCC3 0x00000144L
56#define BA0_DBA3 0x00000148L
57#define BA0_DBC3 0x0000014CL
58#define BA0_DMR0 0x00000150L
59#define BA0_DCR0 0x00000154L
60#define BA0_DMR1 0x00000158L
61#define BA0_DCR1 0x0000015CL
62#define BA0_DMR2 0x00000160L
63#define BA0_DCR2 0x00000164L
64#define BA0_DMR3 0x00000168L
65#define BA0_DCR3 0x0000016CL
66#define BA0_DLMR 0x00000170L
67#define BA0_DLSR 0x00000174L
68#define BA0_FCR0 0x00000180L
69#define BA0_FCR1 0x00000184L
70#define BA0_FCR2 0x00000188L
71#define BA0_FCR3 0x0000018CL
72#define BA0_FPDR0 0x00000190L
73#define BA0_FPDR1 0x00000194L
74#define BA0_FPDR2 0x00000198L
75#define BA0_FPDR3 0x0000019CL
76#define BA0_FCHS 0x0000020CL
77#define BA0_FSIC0 0x00000210L
78#define BA0_FSIC1 0x00000214L
79#define BA0_FSIC2 0x00000218L
80#define BA0_FSIC3 0x0000021CL
81#define BA0_PCICFG00 0x00000300L
82#define BA0_PCICFG04 0x00000304L
83#define BA0_PCICFG08 0x00000308L
84#define BA0_PCICFG0C 0x0000030CL
85#define BA0_PCICFG10 0x00000310L
86#define BA0_PCICFG14 0x00000314L
87#define BA0_PCICFG18 0x00000318L
88#define BA0_PCICFG1C 0x0000031CL
89#define BA0_PCICFG20 0x00000320L
90#define BA0_PCICFG24 0x00000324L
91#define BA0_PCICFG28 0x00000328L
92#define BA0_PCICFG2C 0x0000032CL
93#define BA0_PCICFG30 0x00000330L
94#define BA0_PCICFG34 0x00000334L
95#define BA0_PCICFG38 0x00000338L
96#define BA0_PCICFG3C 0x0000033CL
97#define BA0_PCICFG40 0x00000340L
98#define BA0_PMCS 0x00000344L
99#define BA0_CWPR 0x000003E0L
100#define BA0_EPPMC 0x000003E4L
101#define BA0_GPIOR 0x000003E8L
102#define BA0_SPMC 0x000003ECL
103#define BA0_CFLR 0x000003F0L
104#define BA0_IISR 0x000003F4L
105#define BA0_TMS 0x000003F8L
106#define BA0_SSVID 0x000003FCL
107#define BA0_CLKCR1 0x00000400L
108#define BA0_FRR 0x00000410L
109#define BA0_SLT12O 0x0000041CL
110#define BA0_SERMC 0x00000420L
111#define BA0_SERC1 0x00000428L
112#define BA0_SERC2 0x0000042CL
113#define BA0_SLT12M 0x0000045CL
114#define BA0_ACCTL 0x00000460L
115#define BA0_ACSTS 0x00000464L
116#define BA0_ACOSV 0x00000468L
117#define BA0_ACCAD 0x0000046CL
118#define BA0_ACCDA 0x00000470L
119#define BA0_ACISV 0x00000474L
120#define BA0_ACSAD 0x00000478L
121#define BA0_ACSDA 0x0000047CL
122#define BA0_JSPT 0x00000480L
123#define BA0_JSCTL 0x00000484L
124#define BA0_MIDCR 0x00000490L
125#define BA0_MIDCMD 0x00000494L
126#define BA0_MIDSR 0x00000494L
127#define BA0_MIDWP 0x00000498L
128#define BA0_MIDRP 0x0000049CL
129#define BA0_AODSD1 0x000004A8L
130#define BA0_AODSD2 0x000004ACL
131#define BA0_CFGI 0x000004B0L
132#define BA0_SLT12M2 0x000004DCL
133#define BA0_ACSTS2 0x000004E4L
134#define BA0_ACISV2 0x000004F4L
135#define BA0_ACSAD2 0x000004F8L
136#define BA0_ACSDA2 0x000004FCL
137#define BA0_IOTGP 0x00000500L
138#define BA0_IOTSB 0x00000504L
139#define BA0_IOTFM 0x00000508L
140#define BA0_IOTDMA 0x0000050CL
141#define BA0_IOTAC0 0x00000500L
142#define BA0_IOTAC1 0x00000504L
143#define BA0_IOTAC2 0x00000508L
144#define BA0_IOTAC3 0x0000050CL
145#define BA0_IOTPCP 0x0000052CL
146#define BA0_IOTCC 0x00000530L
147#define BA0_IOTCR 0x0000058CL
148#define BA0_PCPRR 0x00000600L
149#define BA0_PCPGR 0x00000604L
150#define BA0_PCPCR 0x00000608L
151#define BA0_PCPCIEN 0x00000608L
152#define BA0_SBMAR 0x00000700L
153#define BA0_SBMDR 0x00000704L
154#define BA0_SBRR 0x00000708L
155#define BA0_SBRDP 0x0000070CL
156#define BA0_SBWDP 0x00000710L
157#define BA0_SBWBS 0x00000710L
158#define BA0_SBRBS 0x00000714L
159#define BA0_FMSR 0x00000730L
160#define BA0_B0AP 0x00000730L
161#define BA0_FMDP 0x00000734L
162#define BA0_B1AP 0x00000738L
163#define BA0_B1DP 0x0000073CL
164#define BA0_SSPM 0x00000740L
165#define BA0_DACSR 0x00000744L
166#define BA0_ADCSR 0x00000748L
167#define BA0_SSCR 0x0000074CL
168#define BA0_FMLVC 0x00000754L
169#define BA0_FMRVC 0x00000758L
170#define BA0_SRCSA 0x0000075CL
171#define BA0_PPLVC 0x00000760L
172#define BA0_PPRVC 0x00000764L
173#define BA0_PASR 0x00000768L
174#define BA0_CASR 0x0000076CL
175
176//****************************************************************************
177//
178// The following define the offsets of the AC97 shadow registers, which appear
179// as a virtual extension to the base address register zero memory range.
180//
181//****************************************************************************
182#define AC97_REG_OFFSET_MASK 0x0000007EL
183#define AC97_CODEC_NUMBER_MASK 0x00003000L
184
185#define BA0_AC97_RESET 0x00001000L
186#define BA0_AC97_MASTER_VOLUME 0x00001002L
187#define BA0_AC97_HEADPHONE_VOLUME 0x00001004L
188#define BA0_AC97_MASTER_VOLUME_MONO 0x00001006L
189#define BA0_AC97_MASTER_TONE 0x00001008L
190#define BA0_AC97_PC_BEEP_VOLUME 0x0000100AL
191#define BA0_AC97_PHONE_VOLUME 0x0000100CL
192#define BA0_AC97_MIC_VOLUME 0x0000100EL
193#define BA0_AC97_LINE_IN_VOLUME 0x00001010L
194#define BA0_AC97_CD_VOLUME 0x00001012L
195#define BA0_AC97_VIDEO_VOLUME 0x00001014L
196#define BA0_AC97_AUX_VOLUME 0x00001016L
197#define BA0_AC97_PCM_OUT_VOLUME 0x00001018L
198#define BA0_AC97_RECORD_SELECT 0x0000101AL
199#define BA0_AC97_RECORD_GAIN 0x0000101CL
200#define BA0_AC97_RECORD_GAIN_MIC 0x0000101EL
201#define BA0_AC97_GENERAL_PURPOSE 0x00001020L
202#define BA0_AC97_3D_CONTROL 0x00001022L
203#define BA0_AC97_MODEM_RATE 0x00001024L
204#define BA0_AC97_POWERDOWN 0x00001026L
205#define BA0_AC97_EXT_AUDIO_ID 0x00001028L
206#define BA0_AC97_EXT_AUDIO_POWER 0x0000102AL
207#define BA0_AC97_PCM_FRONT_DAC_RATE 0x0000102CL
208#define BA0_AC97_PCM_SURR_DAC_RATE 0x0000102EL
209#define BA0_AC97_PCM_LFE_DAC_RATE 0x00001030L
210#define BA0_AC97_PCM_LR_ADC_RATE 0x00001032L
211#define BA0_AC97_MIC_ADC_RATE 0x00001034L
212#define BA0_AC97_6CH_VOL_C_LFE 0x00001036L
213#define BA0_AC97_6CH_VOL_SURROUND 0x00001038L
214#define BA0_AC97_RESERVED_3A 0x0000103AL
215#define BA0_AC97_EXT_MODEM_ID 0x0000103CL
216#define BA0_AC97_EXT_MODEM_POWER 0x0000103EL
217#define BA0_AC97_LINE1_CODEC_RATE 0x00001040L
218#define BA0_AC97_LINE2_CODEC_RATE 0x00001042L
219#define BA0_AC97_HANDSET_CODEC_RATE 0x00001044L
220#define BA0_AC97_LINE1_CODEC_LEVEL 0x00001046L
221#define BA0_AC97_LINE2_CODEC_LEVEL 0x00001048L
222#define BA0_AC97_HANDSET_CODEC_LEVEL 0x0000104AL
223#define BA0_AC97_GPIO_PIN_CONFIG 0x0000104CL
224#define BA0_AC97_GPIO_PIN_TYPE 0x0000104EL
225#define BA0_AC97_GPIO_PIN_STICKY 0x00001050L
226#define BA0_AC97_GPIO_PIN_WAKEUP 0x00001052L
227#define BA0_AC97_GPIO_PIN_STATUS 0x00001054L
228#define BA0_AC97_MISC_MODEM_AFE_STAT 0x00001056L
229#define BA0_AC97_RESERVED_58 0x00001058L
230#define BA0_AC97_CRYSTAL_REV_N_FAB_ID 0x0000105AL
231#define BA0_AC97_TEST_AND_MISC_CTRL 0x0000105CL
232#define BA0_AC97_AC_MODE 0x0000105EL
233#define BA0_AC97_MISC_CRYSTAL_CONTROL 0x00001060L
234#define BA0_AC97_LINE1_HYPRID_CTRL 0x00001062L
235#define BA0_AC97_VENDOR_RESERVED_64 0x00001064L
236#define BA0_AC97_VENDOR_RESERVED_66 0x00001066L
237#define BA0_AC97_SPDIF_CONTROL 0x00001068L
238#define BA0_AC97_VENDOR_RESERVED_6A 0x0000106AL
239#define BA0_AC97_VENDOR_RESERVED_6C 0x0000106CL
240#define BA0_AC97_VENDOR_RESERVED_6E 0x0000106EL
241#define BA0_AC97_VENDOR_RESERVED_70 0x00001070L
242#define BA0_AC97_VENDOR_RESERVED_72 0x00001072L
243#define BA0_AC97_VENDOR_RESERVED_74 0x00001074L
244#define BA0_AC97_CAL_ADDRESS 0x00001076L
245#define BA0_AC97_CAL_DATA 0x00001078L
246#define BA0_AC97_VENDOR_RESERVED_7A 0x0000107AL
247#define BA0_AC97_VENDOR_ID1 0x0000107CL
248#define BA0_AC97_VENDOR_ID2 0x0000107EL
249
250//****************************************************************************
251//
252// The following define the offsets of the registers and memories accessed via
253// base address register one on the CS4281 part.
254//
255//****************************************************************************
256
257//****************************************************************************
258//
259// The following defines are for the flags in the PCI device ID/vendor ID
260// register.
261//
262//****************************************************************************
263#define PDV_VENID_MASK 0x0000FFFFL
264#define PDV_DEVID_MASK 0xFFFF0000L
265#define PDV_VENID_SHIFT 0L
266#define PDV_DEVID_SHIFT 16L
267#define VENID_CIRRUS_LOGIC 0x1013L
268#define DEVID_CS4281 0x6005L
269
270//****************************************************************************
271//
272// The following defines are for the flags in the PCI status and command
273// register.
274//
275//****************************************************************************
276#define PSC_IO_SPACE_ENABLE 0x00000001L
277#define PSC_MEMORY_SPACE_ENABLE 0x00000002L
278#define PSC_BUS_MASTER_ENABLE 0x00000004L
279#define PSC_SPECIAL_CYCLES 0x00000008L
280#define PSC_MWI_ENABLE 0x00000010L
281#define PSC_VGA_PALETTE_SNOOP 0x00000020L
282#define PSC_PARITY_RESPONSE 0x00000040L
283#define PSC_WAIT_CONTROL 0x00000080L
284#define PSC_SERR_ENABLE 0x00000100L
285#define PSC_FAST_B2B_ENABLE 0x00000200L
286#define PSC_UDF_MASK 0x007F0000L
287#define PSC_FAST_B2B_CAPABLE 0x00800000L
288#define PSC_PARITY_ERROR_DETECTED 0x01000000L
289#define PSC_DEVSEL_TIMING_MASK 0x06000000L
290#define PSC_TARGET_ABORT_SIGNALLED 0x08000000L
291#define PSC_RECEIVED_TARGET_ABORT 0x10000000L
292#define PSC_RECEIVED_MASTER_ABORT 0x20000000L
293#define PSC_SIGNALLED_SERR 0x40000000L
294#define PSC_DETECTED_PARITY_ERROR 0x80000000L
295#define PSC_UDF_SHIFT 16L
296#define PSC_DEVSEL_TIMING_SHIFT 25L
297
298//****************************************************************************
299//
300// The following defines are for the flags in the PCI class/revision ID
301// register.
302//
303//****************************************************************************
304#define PCR_REVID_MASK 0x000000FFL
305#define PCR_INTERFACE_MASK 0x0000FF00L
306#define PCR_SUBCLASS_MASK 0x00FF0000L
307#define PCR_CLASS_MASK 0xFF000000L
308#define PCR_REVID_SHIFT 0L
309#define PCR_INTERFACE_SHIFT 8L
310#define PCR_SUBCLASS_SHIFT 16L
311#define PCR_CLASS_SHIFT 24L
312
313//****************************************************************************
314//
315// The following defines are for the flags in the PCI latency timer register.
316//
317//****************************************************************************
318#define PLT_CACHE_LINE_SIZE_MASK 0x000000FFL
319#define PLT_LATENCY_TIMER_MASK 0x0000FF00L
320#define PLT_HEADER_TYPE_MASK 0x00FF0000L
321#define PLT_BIST_MASK 0xFF000000L
322#define PLT_CACHE_LINE_SIZE_SHIFT 0L
323#define PLT_LATENCY_TIMER_SHIFT 8L
324#define PLT_HEADER_TYPE_SHIFT 16L
325#define PLT_BIST_SHIFT 24L
326
327//****************************************************************************
328//
329// The following defines are for the flags in the PCI base address registers.
330//
331//****************************************************************************
332#define PBAR_MEMORY_SPACE_INDICATOR 0x00000001L
333#define PBAR_LOCATION_TYPE_MASK 0x00000006L
334#define PBAR_NOT_PREFETCHABLE 0x00000008L
335#define PBAR_ADDRESS_MASK 0xFFFFFFF0L
336#define PBAR_LOCATION_TYPE_SHIFT 1L
337
338//****************************************************************************
339//
340// The following defines are for the flags in the PCI subsystem ID/subsystem
341// vendor ID register.
342//
343//****************************************************************************
344#define PSS_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFFL
345#define PSS_SUBSYSTEM_ID_MASK 0xFFFF0000L
346#define PSS_SUBSYSTEM_VENDOR_ID_SHIFT 0L
347#define PSS_SUBSYSTEM_ID_SHIFT 16L
348
349//****************************************************************************
350//
351// The following defines are for the flags in the PCI interrupt register.
352//
353//****************************************************************************
354#define PI_LINE_MASK 0x000000FFL
355#define PI_PIN_MASK 0x0000FF00L
356#define PI_MIN_GRANT_MASK 0x00FF0000L
357#define PI_MAX_LATENCY_MASK 0xFF000000L
358#define PI_LINE_SHIFT 0L
359#define PI_PIN_SHIFT 8L
360#define PI_MIN_GRANT_SHIFT 16L
361#define PI_MAX_LATENCY_SHIFT 24L
362
363//****************************************************************************
364//
365// The following defines are for the flags in the host interrupt status
366// register.
367//
368//****************************************************************************
369#define HISR_HVOLMASK 0x00000003L
370#define HISR_VDNI 0x00000001L
371#define HISR_VUPI 0x00000002L
372#define HISR_GP1I 0x00000004L
373#define HISR_GP3I 0x00000008L
374#define HISR_GPSI 0x00000010L
375#define HISR_GPPI 0x00000020L
376#define HISR_DMAI 0x00040000L
377#define HISR_FIFOI 0x00100000L
378#define HISR_HVOL 0x00200000L
379#define HISR_MIDI 0x00400000L
380#define HISR_SBINT 0x00800000L
381#define HISR_INTENA 0x80000000L
382#define HISR_DMA_MASK 0x00000F00L
383#define HISR_FIFO_MASK 0x0000F000L
384#define HISR_DMA_SHIFT 8L
385#define HISR_FIFO_SHIFT 12L
386#define HISR_FIFO0 0x00001000L
387#define HISR_FIFO1 0x00002000L
388#define HISR_FIFO2 0x00004000L
389#define HISR_FIFO3 0x00008000L
390#define HISR_DMA0 0x00000100L
391#define HISR_DMA1 0x00000200L
392#define HISR_DMA2 0x00000400L
393#define HISR_DMA3 0x00000800L
394#define HISR_RESERVED 0x40000000L
395
396//****************************************************************************
397//
398// The following defines are for the flags in the host interrupt control
399// register.
400//
401//****************************************************************************
402#define HICR_IEV 0x00000001L
403#define HICR_CHGM 0x00000002L
404
405//****************************************************************************
406//
407// The following defines are for the flags in the DMA Mode Register n
408// (DMRn)
409//
410//****************************************************************************
411#define DMRn_TR_MASK 0x0000000CL
412#define DMRn_TR_SHIFT 2L
413#define DMRn_AUTO 0x00000010L
414#define DMRn_TR_READ 0x00000008L
415#define DMRn_TR_WRITE 0x00000004L
416#define DMRn_TYPE_MASK 0x000000C0L
417#define DMRn_TYPE_SHIFT 6L
418#define DMRn_SIZE8 0x00010000L
419#define DMRn_MONO 0x00020000L
420#define DMRn_BEND 0x00040000L
421#define DMRn_USIGN 0x00080000L
422#define DMRn_SIZE20 0x00100000L
423#define DMRn_SWAPC 0x00400000L
424#define DMRn_CBC 0x01000000L
425#define DMRn_TBC 0x02000000L
426#define DMRn_POLL 0x10000000L
427#define DMRn_DMA 0x20000000L
428#define DMRn_FSEL_MASK 0xC0000000L
429#define DMRn_FSEL_SHIFT 30L
430#define DMRn_FSEL0 0x00000000L
431#define DMRn_FSEL1 0x40000000L
432#define DMRn_FSEL2 0x80000000L
433#define DMRn_FSEL3 0xC0000000L
434
435//****************************************************************************
436//
437// The following defines are for the flags in the DMA Command Register n
438// (DCRn)
439//
440//****************************************************************************
441#define DCRn_HTCIE 0x00020000L
442#define DCRn_TCIE 0x00010000L
443#define DCRn_MSK 0x00000001L
444
445//****************************************************************************
446//
447// The following defines are for the flags in the FIFO Control
448// register n.(FCRn)
449//
450//****************************************************************************
451#define FCRn_OF_MASK 0x0000007FL
452#define FCRn_OF_SHIFT 0L
453#define FCRn_SZ_MASK 0x00007F00L
454#define FCRn_SZ_SHIFT 8L
455#define FCRn_LS_MASK 0x001F0000L
456#define FCRn_LS_SHIFT 16L
457#define FCRn_RS_MASK 0x1F000000L
458#define FCRn_RS_SHIFT 24L
459#define FCRn_FEN 0x80000000L
460#define FCRn_PSH 0x20000000L
461#define FCRn_DACZ 0x40000000L
462
463//****************************************************************************
464//
465// The following defines are for the flags in the serial port Power Management
466// control register.(SPMC)
467//
468//****************************************************************************
469#define SPMC_RSTN 0x00000001L
470#define SPMC_ASYN 0x00000002L
471#define SPMC_WUP1 0x00000004L
472#define SPMC_WUP2 0x00000008L
473#define SPMC_ASDI2E 0x00000100L
474#define SPMC_ESSPD 0x00000200L
475#define SPMC_GISPEN 0x00004000L
476#define SPMC_GIPPEN 0x00008000L
477
478//****************************************************************************
479//
480// The following defines are for the flags in the Configuration Load register.
481// (CFLR)
482//
483//****************************************************************************
484#define CFLR_CLOCK_SOURCE_MASK 0x00000003L
485#define CFLR_CLOCK_SOURCE_AC97 0x00000001L
486
487#define CFLR_CB0_MASK 0x000000FFL
488#define CFLR_CB1_MASK 0x0000FF00L
489#define CFLR_CB2_MASK 0x00FF0000L
490#define CFLR_CB3_MASK 0xFF000000L
491#define CFLR_CB0_SHIFT 0L
492#define CFLR_CB1_SHIFT 8L
493#define CFLR_CB2_SHIFT 16L
494#define CFLR_CB3_SHIFT 24L
495
496#define IOTCR_DMA0 0x00000000L
497#define IOTCR_DMA1 0x00000400L
498#define IOTCR_DMA2 0x00000800L
499#define IOTCR_DMA3 0x00000C00L
500#define IOTCR_CCLS 0x00000100L
501#define IOTCR_PCPCI 0x00000200L
502#define IOTCR_DDMA 0x00000300L
503
504#define SBWBS_WBB 0x00000080L
505
506//****************************************************************************
507//
508// The following defines are for the flags in the SRC Slot Assignment Register
509// (SRCSA)
510//
511//****************************************************************************
512#define SRCSA_PLSS_MASK 0x0000001FL
513#define SRCSA_PLSS_SHIFT 0L
514#define SRCSA_PRSS_MASK 0x00001F00L
515#define SRCSA_PRSS_SHIFT 8L
516#define SRCSA_CLSS_MASK 0x001F0000L
517#define SRCSA_CLSS_SHIFT 16L
518#define SRCSA_CRSS_MASK 0x1F000000L
519#define SRCSA_CRSS_SHIFT 24L
520
521//****************************************************************************
522//
523// The following defines are for the flags in the Sound System Power Management
524// register.(SSPM)
525//
526//****************************************************************************
527#define SSPM_FPDN 0x00000080L
528#define SSPM_MIXEN 0x00000040L
529#define SSPM_CSRCEN 0x00000020L
530#define SSPM_PSRCEN 0x00000010L
531#define SSPM_JSEN 0x00000008L
532#define SSPM_ACLEN 0x00000004L
533#define SSPM_FMEN 0x00000002L
534
535//****************************************************************************
536//
537// The following defines are for the flags in the Sound System Control
538// Register. (SSCR)
539//
540//****************************************************************************
541#define SSCR_SB 0x00000004L
542#define SSCR_HVC 0x00000008L
543#define SSCR_LPFIFO 0x00000040L
544#define SSCR_LPSRC 0x00000080L
545#define SSCR_XLPSRC 0x00000100L
546#define SSCR_MVMD 0x00010000L
547#define SSCR_MVAD 0x00020000L
548#define SSCR_MVLD 0x00040000L
549#define SSCR_MVCS 0x00080000L
550
551//****************************************************************************
552//
553// The following defines are for the flags in the Clock Control Register 1.
554// (CLKCR1)
555//
556//****************************************************************************
557#define CLKCR1_DLLSS_MASK 0x0000000CL
558#define CLKCR1_DLLSS_SHIFT 2L
559#define CLKCR1_DLLP 0x00000010L
560#define CLKCR1_SWCE 0x00000020L
561#define CLKCR1_DLLOS 0x00000040L
562#define CLKCR1_CKRA 0x00010000L
563#define CLKCR1_CKRN 0x00020000L
564#define CLKCR1_DLLRDY 0x01000000L
565#define CLKCR1_CLKON 0x02000000L
566
567//****************************************************************************
568//
569// The following defines are for the flags in the Sound Blaster Read Buffer
570// Status.(SBRBS)
571//
572//****************************************************************************
573#define SBRBS_RD_MASK 0x0000007FL
574#define SBRBS_RD_SHIFT 0L
575#define SBRBS_RBF 0x00000080L
576
577//****************************************************************************
578//
579// The following defines are for the flags in the serial port master control
580// register.(SERMC)
581//
582//****************************************************************************
583#define SERMC_MSPE 0x00000001L
584#define SERMC_PTC_MASK 0x0000000EL
585#define SERMC_PTC_SHIFT 1L
586#define SERMC_PTC_AC97 0x00000002L
587#define SERMC_PLB 0x00000010L
588#define SERMC_PXLB 0x00000020L
589#define SERMC_LOFV 0x00080000L
590#define SERMC_SLB 0x00100000L
591#define SERMC_SXLB 0x00200000L
592#define SERMC_ODSEN1 0x01000000L
593#define SERMC_ODSEN2 0x02000000L
594
595//****************************************************************************
596//
597// The following defines are for the flags in the General Purpose I/O Register.
598// (GPIOR)
599//
600//****************************************************************************
601#define GPIOR_VDNS 0x00000001L
602#define GPIOR_VUPS 0x00000002L
603#define GPIOR_GP1S 0x00000004L
604#define GPIOR_GP3S 0x00000008L
605#define GPIOR_GPSS 0x00000010L
606#define GPIOR_GPPS 0x00000020L
607#define GPIOR_GP1D 0x00000400L
608#define GPIOR_GP3D 0x00000800L
609#define GPIOR_VDNLT 0x00010000L
610#define GPIOR_VDNPO 0x00020000L
611#define GPIOR_VDNST 0x00040000L
612#define GPIOR_VDNW 0x00080000L
613#define GPIOR_VUPLT 0x00100000L
614#define GPIOR_VUPPO 0x00200000L
615#define GPIOR_VUPST 0x00400000L
616#define GPIOR_VUPW 0x00800000L
617#define GPIOR_GP1OE 0x01000000L
618#define GPIOR_GP1PT 0x02000000L
619#define GPIOR_GP1ST 0x04000000L
620#define GPIOR_GP1W 0x08000000L
621#define GPIOR_GP3OE 0x10000000L
622#define GPIOR_GP3PT 0x20000000L
623#define GPIOR_GP3ST 0x40000000L
624#define GPIOR_GP3W 0x80000000L
625
626//****************************************************************************
627//
628// The following defines are for the flags in the clock control register 1.
629//
630//****************************************************************************
631#define CLKCR1_PLLSS_MASK 0x0000000CL
632#define CLKCR1_PLLSS_SERIAL 0x00000000L
633#define CLKCR1_PLLSS_CRYSTAL 0x00000004L
634#define CLKCR1_PLLSS_PCI 0x00000008L
635#define CLKCR1_PLLSS_RESERVED 0x0000000CL
636#define CLKCR1_PLLP 0x00000010L
637#define CLKCR1_SWCE 0x00000020L
638#define CLKCR1_PLLOS 0x00000040L
639
640//****************************************************************************
641//
642// The following defines are for the flags in the feature reporting register.
643//
644//****************************************************************************
645#define FRR_FAB_MASK 0x00000003L
646#define FRR_MASK_MASK 0x0000001CL
647#define FRR_ID_MASK 0x00003000L
648#define FRR_FAB_SHIFT 0L
649#define FRR_MASK_SHIFT 2L
650#define FRR_ID_SHIFT 12L
651
652//****************************************************************************
653//
654// The following defines are for the flags in the serial port 1 configuration
655// register.
656//
657//****************************************************************************
658#define SERC1_VALUE 0x00000003L
659#define SERC1_SO1EN 0x00000001L
660#define SERC1_SO1F_MASK 0x0000000EL
661#define SERC1_SO1F_CS423X 0x00000000L
662#define SERC1_SO1F_AC97 0x00000002L
663#define SERC1_SO1F_DAC 0x00000004L
664#define SERC1_SO1F_SPDIF 0x00000006L
665
666//****************************************************************************
667//
668// The following defines are for the flags in the serial port 2 configuration
669// register.
670//
671//****************************************************************************
672#define SERC2_VALUE 0x00000003L
673#define SERC2_SI1EN 0x00000001L
674#define SERC2_SI1F_MASK 0x0000000EL
675#define SERC2_SI1F_CS423X 0x00000000L
676#define SERC2_SI1F_AC97 0x00000002L
677#define SERC2_SI1F_ADC 0x00000004L
678#define SERC2_SI1F_SPDIF 0x00000006L
679
680//****************************************************************************
681//
682// The following defines are for the flags in the AC97 control register.
683//
684//****************************************************************************
685#define ACCTL_ESYN 0x00000002L
686#define ACCTL_VFRM 0x00000004L
687#define ACCTL_DCV 0x00000008L
688#define ACCTL_CRW 0x00000010L
689#define ACCTL_TC 0x00000040L
690
691//****************************************************************************
692//
693// The following defines are for the flags in the AC97 status register.
694//
695//****************************************************************************
696#define ACSTS_CRDY 0x00000001L
697#define ACSTS_VSTS 0x00000002L
698
699//****************************************************************************
700//
701// The following defines are for the flags in the AC97 output slot valid
702// register.
703//
704//****************************************************************************
705#define ACOSV_SLV3 0x00000001L
706#define ACOSV_SLV4 0x00000002L
707#define ACOSV_SLV5 0x00000004L
708#define ACOSV_SLV6 0x00000008L
709#define ACOSV_SLV7 0x00000010L
710#define ACOSV_SLV8 0x00000020L
711#define ACOSV_SLV9 0x00000040L
712#define ACOSV_SLV10 0x00000080L
713#define ACOSV_SLV11 0x00000100L
714#define ACOSV_SLV12 0x00000200L
715
716//****************************************************************************
717//
718// The following defines are for the flags in the AC97 command address
719// register.
720//
721//****************************************************************************
722#define ACCAD_CI_MASK 0x0000007FL
723#define ACCAD_CI_SHIFT 0L
724
725//****************************************************************************
726//
727// The following defines are for the flags in the AC97 command data register.
728//
729//****************************************************************************
730#define ACCDA_CD_MASK 0x0000FFFFL
731#define ACCDA_CD_SHIFT 0L
732
733//****************************************************************************
734//
735// The following defines are for the flags in the AC97 input slot valid
736// register.
737//
738//****************************************************************************
739#define ACISV_ISV3 0x00000001L
740#define ACISV_ISV4 0x00000002L
741#define ACISV_ISV5 0x00000004L
742#define ACISV_ISV6 0x00000008L
743#define ACISV_ISV7 0x00000010L
744#define ACISV_ISV8 0x00000020L
745#define ACISV_ISV9 0x00000040L
746#define ACISV_ISV10 0x00000080L
747#define ACISV_ISV11 0x00000100L
748#define ACISV_ISV12 0x00000200L
749
750//****************************************************************************
751//
752// The following defines are for the flags in the AC97 status address
753// register.
754//
755//****************************************************************************
756#define ACSAD_SI_MASK 0x0000007FL
757#define ACSAD_SI_SHIFT 0L
758
759//****************************************************************************
760//
761// The following defines are for the flags in the AC97 status data register.
762//
763//****************************************************************************
764#define ACSDA_SD_MASK 0x0000FFFFL
765#define ACSDA_SD_SHIFT 0L
766
767//****************************************************************************
768//
769// The following defines are for the flags in the I/O trap address and control
770// registers (all 12).
771//
772//****************************************************************************
773#define IOTAC_SA_MASK 0x0000FFFFL
774#define IOTAC_MSK_MASK 0x000F0000L
775#define IOTAC_IODC_MASK 0x06000000L
776#define IOTAC_IODC_16_BIT 0x00000000L
777#define IOTAC_IODC_10_BIT 0x02000000L
778#define IOTAC_IODC_12_BIT 0x04000000L
779#define IOTAC_WSPI 0x08000000L
780#define IOTAC_RSPI 0x10000000L
781#define IOTAC_WSE 0x20000000L
782#define IOTAC_WE 0x40000000L
783#define IOTAC_RE 0x80000000L
784#define IOTAC_SA_SHIFT 0L
785#define IOTAC_MSK_SHIFT 16L
786
787//****************************************************************************
788//
789// The following defines are for the flags in the PC/PCI master enable
790// register.
791//
792//****************************************************************************
793#define PCPCIEN_EN 0x00000001L
794
795//****************************************************************************
796//
797// The following defines are for the flags in the joystick poll/trigger
798// register.
799//
800//****************************************************************************
801#define JSPT_CAX 0x00000001L
802#define JSPT_CAY 0x00000002L
803#define JSPT_CBX 0x00000004L
804#define JSPT_CBY 0x00000008L
805#define JSPT_BA1 0x00000010L
806#define JSPT_BA2 0x00000020L
807#define JSPT_BB1 0x00000040L
808#define JSPT_BB2 0x00000080L
809
810//****************************************************************************
811//
812// The following defines are for the flags in the joystick control register.
813// The TBF bit has been moved from MIDSR register to JSCTL register bit 8.
814//
815//****************************************************************************
816#define JSCTL_SP_MASK 0x00000003L
817#define JSCTL_SP_SLOW 0x00000000L
818#define JSCTL_SP_MEDIUM_SLOW 0x00000001L
819#define JSCTL_SP_MEDIUM_FAST 0x00000002L
820#define JSCTL_SP_FAST 0x00000003L
821#define JSCTL_ARE 0x00000004L
822#define JSCTL_TBF 0x00000100L
823
824
825//****************************************************************************
826//
827// The following defines are for the flags in the MIDI control register.
828//
829//****************************************************************************
830#define MIDCR_TXE 0x00000001L
831#define MIDCR_RXE 0x00000002L
832#define MIDCR_RIE 0x00000004L
833#define MIDCR_TIE 0x00000008L
834#define MIDCR_MLB 0x00000010L
835#define MIDCR_MRST 0x00000020L
836
837//****************************************************************************
838//
839// The following defines are for the flags in the MIDI status register.
840//
841//****************************************************************************
842#define MIDSR_RBE 0x00000080L
843#define MIDSR_RDA 0x00008000L
844
845//****************************************************************************
846//
847// The following defines are for the flags in the MIDI write port register.
848//
849//****************************************************************************
850#define MIDWP_MWD_MASK 0x000000FFL
851#define MIDWP_MWD_SHIFT 0L
852
853//****************************************************************************
854//
855// The following defines are for the flags in the MIDI read port register.
856//
857//****************************************************************************
858#define MIDRP_MRD_MASK 0x000000FFL
859#define MIDRP_MRD_SHIFT 0L
860
861//****************************************************************************
862//
863// The following defines are for the flags in the configuration interface
864// register.
865//
866//****************************************************************************
867#define CFGI_CLK 0x00000001L
868#define CFGI_DOUT 0x00000002L
869#define CFGI_DIN_EEN 0x00000004L
870#define CFGI_EELD 0x00000008L
871
872//****************************************************************************
873//
874// The following defines are for the flags in the subsystem ID and vendor ID
875// register.
876//
877//****************************************************************************
878#define SSVID_VID_MASK 0x0000FFFFL
879#define SSVID_SID_MASK 0xFFFF0000L
880#define SSVID_VID_SHIFT 0L
881#define SSVID_SID_SHIFT 16L
882
883//****************************************************************************
884//
885// The following defines are for the flags in the GPIO pin interface register.
886//
887//****************************************************************************
888#define GPIOR_VOLDN 0x00000001L
889#define GPIOR_VOLUP 0x00000002L
890#define GPIOR_SI2D 0x00000004L
891#define GPIOR_SI2OE 0x00000008L
892
893//****************************************************************************
894//
895// The following defines are for the flags in the AC97 status register 2.
896//
897//****************************************************************************
898#define ACSTS2_CRDY 0x00000001L
899#define ACSTS2_VSTS 0x00000002L
900
901//****************************************************************************
902//
903// The following defines are for the flags in the AC97 input slot valid
904// register 2.
905//
906//****************************************************************************
907#define ACISV2_ISV3 0x00000001L
908#define ACISV2_ISV4 0x00000002L
909#define ACISV2_ISV5 0x00000004L
910#define ACISV2_ISV6 0x00000008L
911#define ACISV2_ISV7 0x00000010L
912#define ACISV2_ISV8 0x00000020L
913#define ACISV2_ISV9 0x00000040L
914#define ACISV2_ISV10 0x00000080L
915#define ACISV2_ISV11 0x00000100L
916#define ACISV2_ISV12 0x00000200L
917
918//****************************************************************************
919//
920// The following defines are for the flags in the AC97 status address
921// register 2.
922//
923//****************************************************************************
924#define ACSAD2_SI_MASK 0x0000007FL
925#define ACSAD2_SI_SHIFT 0L
926
927//****************************************************************************
928//
929// The following defines are for the flags in the AC97 status data register 2.
930//
931//****************************************************************************
932#define ACSDA2_SD_MASK 0x0000FFFFL
933#define ACSDA2_SD_SHIFT 0L
934
935//****************************************************************************
936//
937// The following defines are for the flags in the I/O trap control register.
938//
939//****************************************************************************
940#define IOTCR_ITD 0x00000001L
941#define IOTCR_HRV 0x00000002L
942#define IOTCR_SRV 0x00000004L
943#define IOTCR_DTI 0x00000008L
944#define IOTCR_DFI 0x00000010L
945#define IOTCR_DDP 0x00000020L
946#define IOTCR_JTE 0x00000040L
947#define IOTCR_PPE 0x00000080L
948
949//****************************************************************************
950//
951// The following defines are for the flags in the I/O trap address and control
952// registers for Hardware Master Volume.
953//
954//****************************************************************************
955#define IOTGP_SA_MASK 0x0000FFFFL
956#define IOTGP_MSK_MASK 0x000F0000L
957#define IOTGP_IODC_MASK 0x06000000L
958#define IOTGP_IODC_16_BIT 0x00000000L
959#define IOTGP_IODC_10_BIT 0x02000000L
960#define IOTGP_IODC_12_BIT 0x04000000L
961#define IOTGP_WSPI 0x08000000L
962#define IOTGP_RSPI 0x10000000L
963#define IOTGP_WSE 0x20000000L
964#define IOTGP_WE 0x40000000L
965#define IOTGP_RE 0x80000000L
966#define IOTGP_SA_SHIFT 0L
967#define IOTGP_MSK_SHIFT 16L
968
969//****************************************************************************
970//
971// The following defines are for the flags in the I/O trap address and control
972// registers for Sound Blaster
973//
974//****************************************************************************
975#define IOTSB_SA_MASK 0x0000FFFFL
976#define IOTSB_MSK_MASK 0x000F0000L
977#define IOTSB_IODC_MASK 0x06000000L
978#define IOTSB_IODC_16_BIT 0x00000000L
979#define IOTSB_IODC_10_BIT 0x02000000L
980#define IOTSB_IODC_12_BIT 0x04000000L
981#define IOTSB_WSPI 0x08000000L
982#define IOTSB_RSPI 0x10000000L
983#define IOTSB_WSE 0x20000000L
984#define IOTSB_WE 0x40000000L
985#define IOTSB_RE 0x80000000L
986#define IOTSB_SA_SHIFT 0L
987#define IOTSB_MSK_SHIFT 16L
988
989//****************************************************************************
990//
991// The following defines are for the flags in the I/O trap address and control
992// registers for FM.
993//
994//****************************************************************************
995#define IOTFM_SA_MASK 0x0000FFFFL
996#define IOTFM_MSK_MASK 0x000F0000L
997#define IOTFM_IODC_MASK 0x06000000L
998#define IOTFM_IODC_16_BIT 0x00000000L
999#define IOTFM_IODC_10_BIT 0x02000000L
1000#define IOTFM_IODC_12_BIT 0x04000000L
1001#define IOTFM_WSPI 0x08000000L
1002#define IOTFM_RSPI 0x10000000L
1003#define IOTFM_WSE 0x20000000L
1004#define IOTFM_WE 0x40000000L
1005#define IOTFM_RE 0x80000000L
1006#define IOTFM_SA_SHIFT 0L
1007#define IOTFM_MSK_SHIFT 16L
1008
1009//****************************************************************************
1010//
1011// The following defines are for the flags in the PC/PCI request register.
1012//
1013//****************************************************************************
1014#define PCPRR_RDC_MASK 0x00000007L
1015#define PCPRR_REQ 0x00008000L
1016#define PCPRR_RDC_SHIFT 0L
1017
1018//****************************************************************************
1019//
1020// The following defines are for the flags in the PC/PCI grant register.
1021//
1022//****************************************************************************
1023#define PCPGR_GDC_MASK 0x00000007L
1024#define PCPGR_VL 0x00008000L
1025#define PCPGR_GDC_SHIFT 0L
1026
1027//****************************************************************************
1028//
1029// The following defines are for the flags in the PC/PCI Control Register.
1030//
1031//****************************************************************************
1032#define PCPCR_EN 0x00000001L
1033
1034//****************************************************************************
1035//
1036// The following defines are for the flags in the debug index register.
1037//
1038//****************************************************************************
1039#define DREG_REGID_MASK 0x0000007FL
1040#define DREG_DEBUG 0x00000080L
1041#define DREG_RGBK_MASK 0x00000700L
1042#define DREG_TRAP 0x00000800L
1043#if !defined(NO_CS4612)
1044#if !defined(NO_CS4615)
1045#define DREG_TRAPX 0x00001000L
1046#endif
1047#endif
1048#define DREG_REGID_SHIFT 0L
1049#define DREG_RGBK_SHIFT 8L
1050#define DREG_RGBK_REGID_MASK 0x0000077FL
1051#define DREG_REGID_R0 0x00000010L
1052#define DREG_REGID_R1 0x00000011L
1053#define DREG_REGID_R2 0x00000012L
1054#define DREG_REGID_R3 0x00000013L
1055#define DREG_REGID_R4 0x00000014L
1056#define DREG_REGID_R5 0x00000015L
1057#define DREG_REGID_R6 0x00000016L
1058#define DREG_REGID_R7 0x00000017L
1059#define DREG_REGID_R8 0x00000018L
1060#define DREG_REGID_R9 0x00000019L
1061#define DREG_REGID_RA 0x0000001AL
1062#define DREG_REGID_RB 0x0000001BL
1063#define DREG_REGID_RC 0x0000001CL
1064#define DREG_REGID_RD 0x0000001DL
1065#define DREG_REGID_RE 0x0000001EL
1066#define DREG_REGID_RF 0x0000001FL
1067#define DREG_REGID_RA_BUS_LOW 0x00000020L
1068#define DREG_REGID_RA_BUS_HIGH 0x00000038L
1069#define DREG_REGID_YBUS_LOW 0x00000050L
1070#define DREG_REGID_YBUS_HIGH 0x00000058L
1071#define DREG_REGID_TRAP_0 0x00000100L
1072#define DREG_REGID_TRAP_1 0x00000101L
1073#define DREG_REGID_TRAP_2 0x00000102L
1074#define DREG_REGID_TRAP_3 0x00000103L
1075#define DREG_REGID_TRAP_4 0x00000104L
1076#define DREG_REGID_TRAP_5 0x00000105L
1077#define DREG_REGID_TRAP_6 0x00000106L
1078#define DREG_REGID_TRAP_7 0x00000107L
1079#define DREG_REGID_INDIRECT_ADDRESS 0x0000010EL
1080#define DREG_REGID_TOP_OF_STACK 0x0000010FL
1081#if !defined(NO_CS4612)
1082#if !defined(NO_CS4615)
1083#define DREG_REGID_TRAP_8 0x00000110L
1084#define DREG_REGID_TRAP_9 0x00000111L
1085#define DREG_REGID_TRAP_10 0x00000112L
1086#define DREG_REGID_TRAP_11 0x00000113L
1087#define DREG_REGID_TRAP_12 0x00000114L
1088#define DREG_REGID_TRAP_13 0x00000115L
1089#define DREG_REGID_TRAP_14 0x00000116L
1090#define DREG_REGID_TRAP_15 0x00000117L
1091#define DREG_REGID_TRAP_16 0x00000118L
1092#define DREG_REGID_TRAP_17 0x00000119L
1093#define DREG_REGID_TRAP_18 0x0000011AL
1094#define DREG_REGID_TRAP_19 0x0000011BL
1095#define DREG_REGID_TRAP_20 0x0000011CL
1096#define DREG_REGID_TRAP_21 0x0000011DL
1097#define DREG_REGID_TRAP_22 0x0000011EL
1098#define DREG_REGID_TRAP_23 0x0000011FL
1099#endif
1100#endif
1101#define DREG_REGID_RSA0_LOW 0x00000200L
1102#define DREG_REGID_RSA0_HIGH 0x00000201L
1103#define DREG_REGID_RSA1_LOW 0x00000202L
1104#define DREG_REGID_RSA1_HIGH 0x00000203L
1105#define DREG_REGID_RSA2 0x00000204L
1106#define DREG_REGID_RSA3 0x00000205L
1107#define DREG_REGID_RSI0_LOW 0x00000206L
1108#define DREG_REGID_RSI0_HIGH 0x00000207L
1109#define DREG_REGID_RSI1 0x00000208L
1110#define DREG_REGID_RSI2 0x00000209L
1111#define DREG_REGID_SAGUSTATUS 0x0000020AL
1112#define DREG_REGID_RSCONFIG01_LOW 0x0000020BL
1113#define DREG_REGID_RSCONFIG01_HIGH 0x0000020CL
1114#define DREG_REGID_RSCONFIG23_LOW 0x0000020DL
1115#define DREG_REGID_RSCONFIG23_HIGH 0x0000020EL
1116#define DREG_REGID_RSDMA01E 0x0000020FL
1117#define DREG_REGID_RSDMA23E 0x00000210L
1118#define DREG_REGID_RSD0_LOW 0x00000211L
1119#define DREG_REGID_RSD0_HIGH 0x00000212L
1120#define DREG_REGID_RSD1_LOW 0x00000213L
1121#define DREG_REGID_RSD1_HIGH 0x00000214L
1122#define DREG_REGID_RSD2_LOW 0x00000215L
1123#define DREG_REGID_RSD2_HIGH 0x00000216L
1124#define DREG_REGID_RSD3_LOW 0x00000217L
1125#define DREG_REGID_RSD3_HIGH 0x00000218L
1126#define DREG_REGID_SRAR_HIGH 0x0000021AL
1127#define DREG_REGID_SRAR_LOW 0x0000021BL
1128#define DREG_REGID_DMA_STATE 0x0000021CL
1129#define DREG_REGID_CURRENT_DMA_STREAM 0x0000021DL
1130#define DREG_REGID_NEXT_DMA_STREAM 0x0000021EL
1131#define DREG_REGID_CPU_STATUS 0x00000300L
1132#define DREG_REGID_MAC_MODE 0x00000301L
1133#define DREG_REGID_STACK_AND_REPEAT 0x00000302L
1134#define DREG_REGID_INDEX0 0x00000304L
1135#define DREG_REGID_INDEX1 0x00000305L
1136#define DREG_REGID_DMA_STATE_0_3 0x00000400L
1137#define DREG_REGID_DMA_STATE_4_7 0x00000404L
1138#define DREG_REGID_DMA_STATE_8_11 0x00000408L
1139#define DREG_REGID_DMA_STATE_12_15 0x0000040CL
1140#define DREG_REGID_DMA_STATE_16_19 0x00000410L
1141#define DREG_REGID_DMA_STATE_20_23 0x00000414L
1142#define DREG_REGID_DMA_STATE_24_27 0x00000418L
1143#define DREG_REGID_DMA_STATE_28_31 0x0000041CL
1144#define DREG_REGID_DMA_STATE_32_35 0x00000420L
1145#define DREG_REGID_DMA_STATE_36_39 0x00000424L
1146#define DREG_REGID_DMA_STATE_40_43 0x00000428L
1147#define DREG_REGID_DMA_STATE_44_47 0x0000042CL
1148#define DREG_REGID_DMA_STATE_48_51 0x00000430L
1149#define DREG_REGID_DMA_STATE_52_55 0x00000434L
1150#define DREG_REGID_DMA_STATE_56_59 0x00000438L
1151#define DREG_REGID_DMA_STATE_60_63 0x0000043CL
1152#define DREG_REGID_DMA_STATE_64_67 0x00000440L
1153#define DREG_REGID_DMA_STATE_68_71 0x00000444L
1154#define DREG_REGID_DMA_STATE_72_75 0x00000448L
1155#define DREG_REGID_DMA_STATE_76_79 0x0000044CL
1156#define DREG_REGID_DMA_STATE_80_83 0x00000450L
1157#define DREG_REGID_DMA_STATE_84_87 0x00000454L
1158#define DREG_REGID_DMA_STATE_88_91 0x00000458L
1159#define DREG_REGID_DMA_STATE_92_95 0x0000045CL
1160#define DREG_REGID_TRAP_SELECT 0x00000500L
1161#define DREG_REGID_TRAP_WRITE_0 0x00000500L
1162#define DREG_REGID_TRAP_WRITE_1 0x00000501L
1163#define DREG_REGID_TRAP_WRITE_2 0x00000502L
1164#define DREG_REGID_TRAP_WRITE_3 0x00000503L
1165#define DREG_REGID_TRAP_WRITE_4 0x00000504L
1166#define DREG_REGID_TRAP_WRITE_5 0x00000505L
1167#define DREG_REGID_TRAP_WRITE_6 0x00000506L
1168#define DREG_REGID_TRAP_WRITE_7 0x00000507L
1169#if !defined(NO_CS4612)
1170#if !defined(NO_CS4615)
1171#define DREG_REGID_TRAP_WRITE_8 0x00000510L
1172#define DREG_REGID_TRAP_WRITE_9 0x00000511L
1173#define DREG_REGID_TRAP_WRITE_10 0x00000512L
1174#define DREG_REGID_TRAP_WRITE_11 0x00000513L
1175#define DREG_REGID_TRAP_WRITE_12 0x00000514L
1176#define DREG_REGID_TRAP_WRITE_13 0x00000515L
1177#define DREG_REGID_TRAP_WRITE_14 0x00000516L
1178#define DREG_REGID_TRAP_WRITE_15 0x00000517L
1179#define DREG_REGID_TRAP_WRITE_16 0x00000518L
1180#define DREG_REGID_TRAP_WRITE_17 0x00000519L
1181#define DREG_REGID_TRAP_WRITE_18 0x0000051AL
1182#define DREG_REGID_TRAP_WRITE_19 0x0000051BL
1183#define DREG_REGID_TRAP_WRITE_20 0x0000051CL
1184#define DREG_REGID_TRAP_WRITE_21 0x0000051DL
1185#define DREG_REGID_TRAP_WRITE_22 0x0000051EL
1186#define DREG_REGID_TRAP_WRITE_23 0x0000051FL
1187#endif
1188#endif
1189#define DREG_REGID_MAC0_ACC0_LOW 0x00000600L
1190#define DREG_REGID_MAC0_ACC1_LOW 0x00000601L
1191#define DREG_REGID_MAC0_ACC2_LOW 0x00000602L
1192#define DREG_REGID_MAC0_ACC3_LOW 0x00000603L
1193#define DREG_REGID_MAC1_ACC0_LOW 0x00000604L
1194#define DREG_REGID_MAC1_ACC1_LOW 0x00000605L
1195#define DREG_REGID_MAC1_ACC2_LOW 0x00000606L
1196#define DREG_REGID_MAC1_ACC3_LOW 0x00000607L
1197#define DREG_REGID_MAC0_ACC0_MID 0x00000608L
1198#define DREG_REGID_MAC0_ACC1_MID 0x00000609L
1199#define DREG_REGID_MAC0_ACC2_MID 0x0000060AL
1200#define DREG_REGID_MAC0_ACC3_MID 0x0000060BL
1201#define DREG_REGID_MAC1_ACC0_MID 0x0000060CL
1202#define DREG_REGID_MAC1_ACC1_MID 0x0000060DL
1203#define DREG_REGID_MAC1_ACC2_MID 0x0000060EL
1204#define DREG_REGID_MAC1_ACC3_MID 0x0000060FL
1205#define DREG_REGID_MAC0_ACC0_HIGH 0x00000610L
1206#define DREG_REGID_MAC0_ACC1_HIGH 0x00000611L
1207#define DREG_REGID_MAC0_ACC2_HIGH 0x00000612L
1208#define DREG_REGID_MAC0_ACC3_HIGH 0x00000613L
1209#define DREG_REGID_MAC1_ACC0_HIGH 0x00000614L
1210#define DREG_REGID_MAC1_ACC1_HIGH 0x00000615L
1211#define DREG_REGID_MAC1_ACC2_HIGH 0x00000616L
1212#define DREG_REGID_MAC1_ACC3_HIGH 0x00000617L
1213#define DREG_REGID_RSHOUT_LOW 0x00000620L
1214#define DREG_REGID_RSHOUT_MID 0x00000628L
1215#define DREG_REGID_RSHOUT_HIGH 0x00000630L
1216
1217//****************************************************************************
1218//
1219// The following defines are for the flags in the AC97 S/PDIF Control register.
1220//
1221//****************************************************************************
1222#define SPDIF_CONTROL_SPDIF_EN 0x00008000L
1223#define SPDIF_CONTROL_VAL 0x00004000L
1224#define SPDIF_CONTROL_COPY 0x00000004L
1225#define SPDIF_CONTROL_CC0 0x00000010L
1226#define SPDIF_CONTROL_CC1 0x00000020L
1227#define SPDIF_CONTROL_CC2 0x00000040L
1228#define SPDIF_CONTROL_CC3 0x00000080L
1229#define SPDIF_CONTROL_CC4 0x00000100L
1230#define SPDIF_CONTROL_CC5 0x00000200L
1231#define SPDIF_CONTROL_CC6 0x00000400L
1232#define SPDIF_CONTROL_L 0x00000800L
1233
1234#endif // _H_HWDEFS
diff --git a/sound/oss/cs4281/cs4281_wrapper-24.c b/sound/oss/cs4281/cs4281_wrapper-24.c
deleted file mode 100644
index 4559f02c9969..000000000000
--- a/sound/oss/cs4281/cs4281_wrapper-24.c
+++ /dev/null
@@ -1,41 +0,0 @@
1/*******************************************************************************
2*
3* "cs4281_wrapper.c" -- Cirrus Logic-Crystal CS4281 linux audio driver.
4*
5* Copyright (C) 2000,2001 Cirrus Logic Corp.
6* -- tom woller (twoller@crystal.cirrus.com) or
7* (audio@crystal.cirrus.com).
8*
9* This program is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License as published by
11* the Free Software Foundation; either version 2 of the License, or
12* (at your option) any later version.
13*
14* This program is distributed in the hope that it will be useful,
15* but WITHOUT ANY WARRANTY; without even the implied warranty of
16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17* GNU General Public License for more details.
18*
19* You should have received a copy of the GNU General Public License
20* along with this program; if not, write to the Free Software
21* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*
23* 12/20/00 trw - new file.
24*
25*******************************************************************************/
26
27#include <linux/spinlock.h>
28
29static int cs4281_resume_null(struct pci_dev *pcidev) { return 0; }
30static int cs4281_suspend_null(struct pci_dev *pcidev, pm_message_t state) { return 0; }
31
32#define free_dmabuf(state, dmabuf) \
33 pci_free_consistent(state->pcidev, \
34 PAGE_SIZE << (dmabuf)->buforder, \
35 (dmabuf)->rawbuf, (dmabuf)->dmaaddr);
36#define free_dmabuf2(state, dmabuf) \
37 pci_free_consistent((state)->pcidev, \
38 PAGE_SIZE << (state)->buforder_tmpbuff, \
39 (state)->tmpbuff, (state)->dmaaddr_tmpbuff);
40#define cs4x_pgoff(vma) ((vma)->vm_pgoff)
41
diff --git a/sound/oss/cs4281/cs4281m.c b/sound/oss/cs4281/cs4281m.c
deleted file mode 100644
index 0400a416dc93..000000000000
--- a/sound/oss/cs4281/cs4281m.c
+++ /dev/null
@@ -1,4487 +0,0 @@
1/*******************************************************************************
2*
3* "cs4281.c" -- Cirrus Logic-Crystal CS4281 linux audio driver.
4*
5* Copyright (C) 2000,2001 Cirrus Logic Corp.
6* -- adapted from drivers by Thomas Sailer,
7* -- but don't bug him; Problems should go to:
8* -- tom woller (twoller@crystal.cirrus.com) or
9* (audio@crystal.cirrus.com).
10*
11* This program is free software; you can redistribute it and/or modify
12* it under the terms of the GNU General Public License as published by
13* the Free Software Foundation; either version 2 of the License, or
14* (at your option) any later version.
15*
16* This program is distributed in the hope that it will be useful,
17* but WITHOUT ANY WARRANTY; without even the implied warranty of
18* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19* GNU General Public License for more details.
20*
21* You should have received a copy of the GNU General Public License
22* along with this program; if not, write to the Free Software
23* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24*
25* Module command line parameters:
26* none
27*
28* Supported devices:
29* /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
30* /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
31* /dev/midi simple MIDI UART interface, no ioctl
32*
33* Modification History
34* 08/20/00 trw - silence and no stopping DAC until release
35* 08/23/00 trw - added CS_DBG statements, fix interrupt hang issue on DAC stop.
36* 09/18/00 trw - added 16bit only record with conversion
37* 09/24/00 trw - added Enhanced Full duplex (separate simultaneous
38* capture/playback rates)
39* 10/03/00 trw - fixed mmap (fixed GRECORD and the XMMS mmap test plugin
40* libOSSm.so)
41* 10/11/00 trw - modified for 2.4.0-test9 kernel enhancements (NR_MAP removal)
42* 11/03/00 trw - fixed interrupt loss/stutter, added debug.
43* 11/10/00 bkz - added __devinit to cs4281_hw_init()
44* 11/10/00 trw - fixed SMP and capture spinlock hang.
45* 12/04/00 trw - cleaned up CSDEBUG flags and added "defaultorder" moduleparm.
46* 12/05/00 trw - fixed polling (myth2), and added underrun swptr fix.
47* 12/08/00 trw - added PM support.
48* 12/14/00 trw - added wrapper code, builds under 2.4.0, 2.2.17-20, 2.2.17-8
49* (RH/Dell base), 2.2.18, 2.2.12. cleaned up code mods by ident.
50* 12/19/00 trw - added PM support for 2.2 base (apm_callback). other PM cleanup.
51* 12/21/00 trw - added fractional "defaultorder" inputs. if >100 then use
52* defaultorder-100 as power of 2 for the buffer size. example:
53* 106 = 2^(106-100) = 2^6 = 64 bytes for the buffer size.
54*
55*******************************************************************************/
56
57/* uncomment the following line to disable building PM support into the driver */
58//#define NOT_CS4281_PM 1
59
60#include <linux/list.h>
61#include <linux/module.h>
62#include <linux/string.h>
63#include <linux/ioport.h>
64#include <linux/sched.h>
65#include <linux/delay.h>
66#include <linux/sound.h>
67#include <linux/slab.h>
68#include <linux/soundcard.h>
69#include <linux/pci.h>
70#include <linux/bitops.h>
71#include <linux/init.h>
72#include <linux/interrupt.h>
73#include <linux/poll.h>
74#include <linux/fs.h>
75#include <linux/wait.h>
76
77#include <asm/current.h>
78#include <asm/io.h>
79#include <asm/dma.h>
80#include <asm/page.h>
81#include <asm/uaccess.h>
82
83//#include "cs_dm.h"
84#include "cs4281_hwdefs.h"
85#include "cs4281pm.h"
86
87struct cs4281_state;
88
89static void stop_dac(struct cs4281_state *s);
90static void stop_adc(struct cs4281_state *s);
91static void start_dac(struct cs4281_state *s);
92static void start_adc(struct cs4281_state *s);
93#undef OSS_DOCUMENTED_MIXER_SEMANTICS
94
95// ---------------------------------------------------------------------
96
97#ifndef PCI_VENDOR_ID_CIRRUS
98#define PCI_VENDOR_ID_CIRRUS 0x1013
99#endif
100#ifndef PCI_DEVICE_ID_CRYSTAL_CS4281
101#define PCI_DEVICE_ID_CRYSTAL_CS4281 0x6005
102#endif
103
104#define CS4281_MAGIC ((PCI_DEVICE_ID_CRYSTAL_CS4281<<16) | PCI_VENDOR_ID_CIRRUS)
105#define CS4281_CFLR_DEFAULT 0x00000001 /* CFLR must be in AC97 link mode */
106
107// buffer order determines the size of the dma buffer for the driver.
108// under Linux, a smaller buffer allows more responsiveness from many of the
109// applications (e.g. games). A larger buffer allows some of the apps (esound)
110// to not underrun the dma buffer as easily. As default, use 32k (order=3)
111// rather than 64k as some of the games work more responsively.
112// log base 2( buff sz = 32k).
113static unsigned long defaultorder = 3;
114module_param(defaultorder, ulong, 0);
115
116//
117// Turn on/off debugging compilation by commenting out "#define CSDEBUG"
118//
119#define CSDEBUG 1
120#if CSDEBUG
121#define CSDEBUG_INTERFACE 1
122#else
123#undef CSDEBUG_INTERFACE
124#endif
125//
126// cs_debugmask areas
127//
128#define CS_INIT 0x00000001 // initialization and probe functions
129#define CS_ERROR 0x00000002 // tmp debugging bit placeholder
130#define CS_INTERRUPT 0x00000004 // interrupt handler (separate from all other)
131#define CS_FUNCTION 0x00000008 // enter/leave functions
132#define CS_WAVE_WRITE 0x00000010 // write information for wave
133#define CS_WAVE_READ 0x00000020 // read information for wave
134#define CS_MIDI_WRITE 0x00000040 // write information for midi
135#define CS_MIDI_READ 0x00000080 // read information for midi
136#define CS_MPU401_WRITE 0x00000100 // write information for mpu401
137#define CS_MPU401_READ 0x00000200 // read information for mpu401
138#define CS_OPEN 0x00000400 // all open functions in the driver
139#define CS_RELEASE 0x00000800 // all release functions in the driver
140#define CS_PARMS 0x00001000 // functional and operational parameters
141#define CS_IOCTL 0x00002000 // ioctl (non-mixer)
142#define CS_PM 0x00004000 // power management
143#define CS_TMP 0x10000000 // tmp debug mask bit
144
145#define CS_IOCTL_CMD_SUSPEND 0x1 // suspend
146#define CS_IOCTL_CMD_RESUME 0x2 // resume
147//
148// CSDEBUG is usual mode is set to 1, then use the
149// cs_debuglevel and cs_debugmask to turn on or off debugging.
150// Debug level of 1 has been defined to be kernel errors and info
151// that should be printed on any released driver.
152//
153#if CSDEBUG
154#define CS_DBGOUT(mask,level,x) if((cs_debuglevel >= (level)) && ((mask) & cs_debugmask) ) {x;}
155#else
156#define CS_DBGOUT(mask,level,x)
157#endif
158
159#if CSDEBUG
160static unsigned long cs_debuglevel = 1; // levels range from 1-9
161static unsigned long cs_debugmask = CS_INIT | CS_ERROR; // use CS_DBGOUT with various mask values
162module_param(cs_debuglevel, ulong, 0);
163module_param(cs_debugmask, ulong, 0);
164#endif
165#define CS_TRUE 1
166#define CS_FALSE 0
167
168// MIDI buffer sizes
169#define MIDIINBUF 500
170#define MIDIOUTBUF 500
171
172#define FMODE_MIDI_SHIFT 3
173#define FMODE_MIDI_READ (FMODE_READ << FMODE_MIDI_SHIFT)
174#define FMODE_MIDI_WRITE (FMODE_WRITE << FMODE_MIDI_SHIFT)
175
176#define CS4281_MAJOR_VERSION 1
177#define CS4281_MINOR_VERSION 13
178#ifdef __ia64__
179#define CS4281_ARCH 64 //architecture key
180#else
181#define CS4281_ARCH 32 //architecture key
182#endif
183
184#define CS_TYPE_ADC 0
185#define CS_TYPE_DAC 1
186
187
188static const char invalid_magic[] =
189 KERN_CRIT "cs4281: invalid magic value\n";
190
191#define VALIDATE_STATE(s) \
192({ \
193 if (!(s) || (s)->magic != CS4281_MAGIC) { \
194 printk(invalid_magic); \
195 return -ENXIO; \
196 } \
197})
198
199//LIST_HEAD(cs4281_devs);
200static struct list_head cs4281_devs = { &cs4281_devs, &cs4281_devs };
201
202struct cs4281_state;
203
204#include "cs4281_wrapper-24.c"
205
206struct cs4281_state {
207 // magic
208 unsigned int magic;
209
210 // we keep the cards in a linked list
211 struct cs4281_state *next;
212
213 // pcidev is needed to turn off the DDMA controller at driver shutdown
214 struct pci_dev *pcidev;
215 struct list_head list;
216
217 // soundcore stuff
218 int dev_audio;
219 int dev_mixer;
220 int dev_midi;
221
222 // hardware resources
223 unsigned int pBA0phys, pBA1phys;
224 char __iomem *pBA0;
225 char __iomem *pBA1;
226 unsigned int irq;
227
228 // mixer registers
229 struct {
230 unsigned short vol[10];
231 unsigned int recsrc;
232 unsigned int modcnt;
233 unsigned short micpreamp;
234 } mix;
235
236 // wave stuff
237 struct properties {
238 unsigned fmt;
239 unsigned fmt_original; // original requested format
240 unsigned channels;
241 unsigned rate;
242 unsigned char clkdiv;
243 } prop_dac, prop_adc;
244 unsigned conversion:1; // conversion from 16 to 8 bit in progress
245 void *tmpbuff; // tmp buffer for sample conversions
246 unsigned ena;
247 spinlock_t lock;
248 struct mutex open_sem;
249 struct mutex open_sem_adc;
250 struct mutex open_sem_dac;
251 mode_t open_mode;
252 wait_queue_head_t open_wait;
253 wait_queue_head_t open_wait_adc;
254 wait_queue_head_t open_wait_dac;
255
256 dma_addr_t dmaaddr_tmpbuff;
257 unsigned buforder_tmpbuff; // Log base 2 of 'rawbuf' size in bytes..
258 struct dmabuf {
259 void *rawbuf; // Physical address of
260 dma_addr_t dmaaddr;
261 unsigned buforder; // Log base 2 of 'rawbuf' size in bytes..
262 unsigned numfrag; // # of 'fragments' in the buffer.
263 unsigned fragshift; // Log base 2 of fragment size.
264 unsigned hwptr, swptr;
265 unsigned total_bytes; // # bytes process since open.
266 unsigned blocks; // last returned blocks value GETOPTR
267 unsigned wakeup; // interrupt occurred on block
268 int count;
269 unsigned underrun; // underrun flag
270 unsigned error; // over/underrun
271 wait_queue_head_t wait;
272 // redundant, but makes calculations easier
273 unsigned fragsize; // 2**fragshift..
274 unsigned dmasize; // 2**buforder.
275 unsigned fragsamples;
276 // OSS stuff
277 unsigned mapped:1; // Buffer mapped in cs4281_mmap()?
278 unsigned ready:1; // prog_dmabuf_dac()/adc() successful?
279 unsigned endcleared:1;
280 unsigned type:1; // adc or dac buffer (CS_TYPE_XXX)
281 unsigned ossfragshift;
282 int ossmaxfrags;
283 unsigned subdivision;
284 } dma_dac, dma_adc;
285
286 // midi stuff
287 struct {
288 unsigned ird, iwr, icnt;
289 unsigned ord, owr, ocnt;
290 wait_queue_head_t iwait;
291 wait_queue_head_t owait;
292 struct timer_list timer;
293 unsigned char ibuf[MIDIINBUF];
294 unsigned char obuf[MIDIOUTBUF];
295 } midi;
296
297 struct cs4281_pm pm;
298 struct cs4281_pipeline pl[CS4281_NUMBER_OF_PIPELINES];
299};
300
301#include "cs4281pm-24.c"
302
303#if CSDEBUG
304
305// DEBUG ROUTINES
306
307#define SOUND_MIXER_CS_GETDBGLEVEL _SIOWR('M',120, int)
308#define SOUND_MIXER_CS_SETDBGLEVEL _SIOWR('M',121, int)
309#define SOUND_MIXER_CS_GETDBGMASK _SIOWR('M',122, int)
310#define SOUND_MIXER_CS_SETDBGMASK _SIOWR('M',123, int)
311
312#define SOUND_MIXER_CS_APM _SIOWR('M',124, int)
313
314
315static void cs_printioctl(unsigned int x)
316{
317 unsigned int i;
318 unsigned char vidx;
319 // Index of mixtable1[] member is Device ID
320 // and must be <= SOUND_MIXER_NRDEVICES.
321 // Value of array member is index into s->mix.vol[]
322 static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = {
323 [SOUND_MIXER_PCM] = 1, // voice
324 [SOUND_MIXER_LINE1] = 2, // AUX
325 [SOUND_MIXER_CD] = 3, // CD
326 [SOUND_MIXER_LINE] = 4, // Line
327 [SOUND_MIXER_SYNTH] = 5, // FM
328 [SOUND_MIXER_MIC] = 6, // Mic
329 [SOUND_MIXER_SPEAKER] = 7, // Speaker
330 [SOUND_MIXER_RECLEV] = 8, // Recording level
331 [SOUND_MIXER_VOLUME] = 9 // Master Volume
332 };
333
334 switch (x) {
335 case SOUND_MIXER_CS_GETDBGMASK:
336 CS_DBGOUT(CS_IOCTL, 4,
337 printk("SOUND_MIXER_CS_GETDBGMASK:\n"));
338 break;
339 case SOUND_MIXER_CS_GETDBGLEVEL:
340 CS_DBGOUT(CS_IOCTL, 4,
341 printk("SOUND_MIXER_CS_GETDBGLEVEL:\n"));
342 break;
343 case SOUND_MIXER_CS_SETDBGMASK:
344 CS_DBGOUT(CS_IOCTL, 4,
345 printk("SOUND_MIXER_CS_SETDBGMASK:\n"));
346 break;
347 case SOUND_MIXER_CS_SETDBGLEVEL:
348 CS_DBGOUT(CS_IOCTL, 4,
349 printk("SOUND_MIXER_CS_SETDBGLEVEL:\n"));
350 break;
351 case OSS_GETVERSION:
352 CS_DBGOUT(CS_IOCTL, 4, printk("OSS_GETVERSION:\n"));
353 break;
354 case SNDCTL_DSP_SYNC:
355 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SYNC:\n"));
356 break;
357 case SNDCTL_DSP_SETDUPLEX:
358 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETDUPLEX:\n"));
359 break;
360 case SNDCTL_DSP_GETCAPS:
361 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETCAPS:\n"));
362 break;
363 case SNDCTL_DSP_RESET:
364 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_RESET:\n"));
365 break;
366 case SNDCTL_DSP_SPEED:
367 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SPEED:\n"));
368 break;
369 case SNDCTL_DSP_STEREO:
370 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_STEREO:\n"));
371 break;
372 case SNDCTL_DSP_CHANNELS:
373 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_CHANNELS:\n"));
374 break;
375 case SNDCTL_DSP_GETFMTS:
376 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETFMTS:\n"));
377 break;
378 case SNDCTL_DSP_SETFMT:
379 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETFMT:\n"));
380 break;
381 case SNDCTL_DSP_POST:
382 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_POST:\n"));
383 break;
384 case SNDCTL_DSP_GETTRIGGER:
385 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETTRIGGER:\n"));
386 break;
387 case SNDCTL_DSP_SETTRIGGER:
388 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETTRIGGER:\n"));
389 break;
390 case SNDCTL_DSP_GETOSPACE:
391 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOSPACE:\n"));
392 break;
393 case SNDCTL_DSP_GETISPACE:
394 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETISPACE:\n"));
395 break;
396 case SNDCTL_DSP_NONBLOCK:
397 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_NONBLOCK:\n"));
398 break;
399 case SNDCTL_DSP_GETODELAY:
400 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETODELAY:\n"));
401 break;
402 case SNDCTL_DSP_GETIPTR:
403 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETIPTR:\n"));
404 break;
405 case SNDCTL_DSP_GETOPTR:
406 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETOPTR:\n"));
407 break;
408 case SNDCTL_DSP_GETBLKSIZE:
409 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_GETBLKSIZE:\n"));
410 break;
411 case SNDCTL_DSP_SETFRAGMENT:
412 CS_DBGOUT(CS_IOCTL, 4,
413 printk("SNDCTL_DSP_SETFRAGMENT:\n"));
414 break;
415 case SNDCTL_DSP_SUBDIVIDE:
416 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SUBDIVIDE:\n"));
417 break;
418 case SOUND_PCM_READ_RATE:
419 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_RATE:\n"));
420 break;
421 case SOUND_PCM_READ_CHANNELS:
422 CS_DBGOUT(CS_IOCTL, 4,
423 printk("SOUND_PCM_READ_CHANNELS:\n"));
424 break;
425 case SOUND_PCM_READ_BITS:
426 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_BITS:\n"));
427 break;
428 case SOUND_PCM_WRITE_FILTER:
429 CS_DBGOUT(CS_IOCTL, 4,
430 printk("SOUND_PCM_WRITE_FILTER:\n"));
431 break;
432 case SNDCTL_DSP_SETSYNCRO:
433 CS_DBGOUT(CS_IOCTL, 4, printk("SNDCTL_DSP_SETSYNCRO:\n"));
434 break;
435 case SOUND_PCM_READ_FILTER:
436 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_PCM_READ_FILTER:\n"));
437 break;
438 case SOUND_MIXER_PRIVATE1:
439 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE1:\n"));
440 break;
441 case SOUND_MIXER_PRIVATE2:
442 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE2:\n"));
443 break;
444 case SOUND_MIXER_PRIVATE3:
445 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE3:\n"));
446 break;
447 case SOUND_MIXER_PRIVATE4:
448 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE4:\n"));
449 break;
450 case SOUND_MIXER_PRIVATE5:
451 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_PRIVATE5:\n"));
452 break;
453 case SOUND_MIXER_INFO:
454 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_INFO:\n"));
455 break;
456 case SOUND_OLD_MIXER_INFO:
457 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_OLD_MIXER_INFO:\n"));
458 break;
459
460 default:
461 switch (_IOC_NR(x)) {
462 case SOUND_MIXER_VOLUME:
463 CS_DBGOUT(CS_IOCTL, 4,
464 printk("SOUND_MIXER_VOLUME:\n"));
465 break;
466 case SOUND_MIXER_SPEAKER:
467 CS_DBGOUT(CS_IOCTL, 4,
468 printk("SOUND_MIXER_SPEAKER:\n"));
469 break;
470 case SOUND_MIXER_RECLEV:
471 CS_DBGOUT(CS_IOCTL, 4,
472 printk("SOUND_MIXER_RECLEV:\n"));
473 break;
474 case SOUND_MIXER_MIC:
475 CS_DBGOUT(CS_IOCTL, 4,
476 printk("SOUND_MIXER_MIC:\n"));
477 break;
478 case SOUND_MIXER_SYNTH:
479 CS_DBGOUT(CS_IOCTL, 4,
480 printk("SOUND_MIXER_SYNTH:\n"));
481 break;
482 case SOUND_MIXER_RECSRC:
483 CS_DBGOUT(CS_IOCTL, 4,
484 printk("SOUND_MIXER_RECSRC:\n"));
485 break;
486 case SOUND_MIXER_DEVMASK:
487 CS_DBGOUT(CS_IOCTL, 4,
488 printk("SOUND_MIXER_DEVMASK:\n"));
489 break;
490 case SOUND_MIXER_RECMASK:
491 CS_DBGOUT(CS_IOCTL, 4,
492 printk("SOUND_MIXER_RECMASK:\n"));
493 break;
494 case SOUND_MIXER_STEREODEVS:
495 CS_DBGOUT(CS_IOCTL, 4,
496 printk("SOUND_MIXER_STEREODEVS:\n"));
497 break;
498 case SOUND_MIXER_CAPS:
499 CS_DBGOUT(CS_IOCTL, 4, printk("SOUND_MIXER_CAPS:\n"));
500 break;
501 default:
502 i = _IOC_NR(x);
503 if (i >= SOUND_MIXER_NRDEVICES
504 || !(vidx = mixtable1[i])) {
505 CS_DBGOUT(CS_IOCTL, 4, printk
506 ("UNKNOWN IOCTL: 0x%.8x NR=%d\n",
507 x, i));
508 } else {
509 CS_DBGOUT(CS_IOCTL, 4, printk
510 ("SOUND_MIXER_IOCTL AC9x: 0x%.8x NR=%d\n",
511 x, i));
512 }
513 break;
514 }
515 }
516}
517#endif
518static int prog_dmabuf_adc(struct cs4281_state *s);
519static void prog_codec(struct cs4281_state *s, unsigned type);
520
521// ---------------------------------------------------------------------
522//
523// Hardware Interfaces For the CS4281
524//
525
526
527//******************************************************************************
528// "delayus()-- Delay for the specified # of microseconds.
529//******************************************************************************
530static void delayus(struct cs4281_state *s, u32 delay)
531{
532 u32 j;
533 if ((delay > 9999) && (s->pm.flags & CS4281_PM_IDLE)) {
534 j = (delay * HZ) / 1000000; /* calculate delay in jiffies */
535 if (j < 1)
536 j = 1; /* minimum one jiffy. */
537 current->state = TASK_UNINTERRUPTIBLE;
538 schedule_timeout(j);
539 } else
540 udelay(delay);
541 return;
542}
543
544
545//******************************************************************************
546// "cs4281_read_ac97" -- Reads a word from the specified location in the
547// CS4281's address space(based on the BA0 register).
548//
549// 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address
550// 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 register,
551// 0h for reads.
552// 3. Write ACCTL = Control Register = 460h for initiating the write
553// 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 17h
554// 5. if DCV not cleared, break and return error
555// 6. Read ACSTS = Status Register = 464h, check VSTS bit
556//****************************************************************************
557static int cs4281_read_ac97(struct cs4281_state *card, u32 offset,
558 u32 * value)
559{
560 u32 count, status;
561
562 // Make sure that there is not data sitting
563 // around from a previous uncompleted access.
564 // ACSDA = Status Data Register = 47Ch
565 status = readl(card->pBA0 + BA0_ACSDA);
566
567 // Setup the AC97 control registers on the CS4281 to send the
568 // appropriate command to the AC97 to perform the read.
569 // ACCAD = Command Address Register = 46Ch
570 // ACCDA = Command Data Register = 470h
571 // ACCTL = Control Register = 460h
572 // bit DCV - will clear when process completed
573 // bit CRW - Read command
574 // bit VFRM - valid frame enabled
575 // bit ESYN - ASYNC generation enabled
576
577 // Get the actual AC97 register from the offset
578 writel(offset - BA0_AC97_RESET, card->pBA0 + BA0_ACCAD);
579 writel(0, card->pBA0 + BA0_ACCDA);
580 writel(ACCTL_DCV | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN,
581 card->pBA0 + BA0_ACCTL);
582
583 // Wait for the read to occur.
584 for (count = 0; count < 10; count++) {
585 // First, we want to wait for a short time.
586 udelay(25);
587
588 // Now, check to see if the read has completed.
589 // ACCTL = 460h, DCV should be reset by now and 460h = 17h
590 if (!(readl(card->pBA0 + BA0_ACCTL) & ACCTL_DCV))
591 break;
592 }
593
594 // Make sure the read completed.
595 if (readl(card->pBA0 + BA0_ACCTL) & ACCTL_DCV)
596 return 1;
597
598 // Wait for the valid status bit to go active.
599 for (count = 0; count < 10; count++) {
600 // Read the AC97 status register.
601 // ACSTS = Status Register = 464h
602 status = readl(card->pBA0 + BA0_ACSTS);
603
604 // See if we have valid status.
605 // VSTS - Valid Status
606 if (status & ACSTS_VSTS)
607 break;
608 // Wait for a short while.
609 udelay(25);
610 }
611
612 // Make sure we got valid status.
613 if (!(status & ACSTS_VSTS))
614 return 1;
615
616 // Read the data returned from the AC97 register.
617 // ACSDA = Status Data Register = 474h
618 *value = readl(card->pBA0 + BA0_ACSDA);
619
620 // Success.
621 return (0);
622}
623
624
625//****************************************************************************
626//
627// "cs4281_write_ac97()"-- writes a word to the specified location in the
628// CS461x's address space (based on the part's base address zero register).
629//
630// 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address
631// 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 reg.
632// 3. Write ACCTL = Control Register = 460h for initiating the write
633// 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 07h
634// 5. if DCV not cleared, break and return error
635//
636//****************************************************************************
637static int cs4281_write_ac97(struct cs4281_state *card, u32 offset,
638 u32 value)
639{
640 u32 count, status=0;
641
642 CS_DBGOUT(CS_FUNCTION, 2,
643 printk(KERN_INFO "cs4281: cs_4281_write_ac97()+ \n"));
644
645 // Setup the AC97 control registers on the CS4281 to send the
646 // appropriate command to the AC97 to perform the read.
647 // ACCAD = Command Address Register = 46Ch
648 // ACCDA = Command Data Register = 470h
649 // ACCTL = Control Register = 460h
650 // set DCV - will clear when process completed
651 // reset CRW - Write command
652 // set VFRM - valid frame enabled
653 // set ESYN - ASYNC generation enabled
654 // set RSTN - ARST# inactive, AC97 codec not reset
655
656 // Get the actual AC97 register from the offset
657
658 writel(offset - BA0_AC97_RESET, card->pBA0 + BA0_ACCAD);
659 writel(value, card->pBA0 + BA0_ACCDA);
660 writel(ACCTL_DCV | ACCTL_VFRM | ACCTL_ESYN,
661 card->pBA0 + BA0_ACCTL);
662
663 // Wait for the write to occur.
664 for (count = 0; count < 100; count++) {
665 // First, we want to wait for a short time.
666 udelay(25);
667 // Now, check to see if the write has completed.
668 // ACCTL = 460h, DCV should be reset by now and 460h = 07h
669 status = readl(card->pBA0 + BA0_ACCTL);
670 if (!(status & ACCTL_DCV))
671 break;
672 }
673
674 // Make sure the write completed.
675 if (status & ACCTL_DCV) {
676 CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
677 "cs4281: cs_4281_write_ac97()- unable to write. ACCTL_DCV active\n"));
678 return 1;
679 }
680 CS_DBGOUT(CS_FUNCTION, 2,
681 printk(KERN_INFO "cs4281: cs_4281_write_ac97()- 0\n"));
682 // Success.
683 return 0;
684}
685
686
687//******************************************************************************
688// "Init4281()" -- Bring up the part.
689//******************************************************************************
690static __devinit int cs4281_hw_init(struct cs4281_state *card)
691{
692 u32 ac97_slotid;
693 u32 temp1, temp2;
694
695 CS_DBGOUT(CS_FUNCTION, 2,
696 printk(KERN_INFO "cs4281: cs4281_hw_init()+ \n"));
697#ifndef NOT_CS4281_PM
698 if(!card)
699 return 1;
700#endif
701 temp2 = readl(card->pBA0 + BA0_CFLR);
702 CS_DBGOUT(CS_INIT | CS_ERROR | CS_PARMS, 4, printk(KERN_INFO
703 "cs4281: cs4281_hw_init() CFLR 0x%x\n", temp2));
704 if(temp2 != CS4281_CFLR_DEFAULT)
705 {
706 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_INFO
707 "cs4281: cs4281_hw_init() CFLR invalid - resetting from 0x%x to 0x%x\n",
708 temp2,CS4281_CFLR_DEFAULT));
709 writel(CS4281_CFLR_DEFAULT, card->pBA0 + BA0_CFLR);
710 temp2 = readl(card->pBA0 + BA0_CFLR);
711 if(temp2 != CS4281_CFLR_DEFAULT)
712 {
713 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_INFO
714 "cs4281: cs4281_hw_init() Invalid hardware - unable to configure CFLR\n"));
715 return 1;
716 }
717 }
718
719 //***************************************7
720 // Set up the Sound System Configuration
721 //***************************************
722
723 // Set the 'Configuration Write Protect' register
724 // to 4281h. Allows vendor-defined configuration
725 // space between 0e4h and 0ffh to be written.
726
727 writel(0x4281, card->pBA0 + BA0_CWPR); // (3e0h)
728
729 // (0), Blast the clock control register to zero so that the
730 // PLL starts out in a known state, and blast the master serial
731 // port control register to zero so that the serial ports also
732 // start out in a known state.
733
734 writel(0, card->pBA0 + BA0_CLKCR1); // (400h)
735 writel(0, card->pBA0 + BA0_SERMC); // (420h)
736
737
738 // (1), Make ESYN go to zero to turn off
739 // the Sync pulse on the AC97 link.
740
741 writel(0, card->pBA0 + BA0_ACCTL);
742 udelay(50);
743
744
745 // (2) Drive the ARST# pin low for a minimum of 1uS (as defined in
746 // the AC97 spec) and then drive it high. This is done for non
747 // AC97 modes since there might be logic external to the CS461x
748 // that uses the ARST# line for a reset.
749
750 writel(0, card->pBA0 + BA0_SPMC); // (3ech)
751 udelay(100);
752 writel(SPMC_RSTN, card->pBA0 + BA0_SPMC);
753 delayus(card,50000); // Wait 50 ms for ABITCLK to become stable.
754
755 // (3) Turn on the Sound System Clocks.
756 writel(CLKCR1_PLLP, card->pBA0 + BA0_CLKCR1); // (400h)
757 delayus(card,50000); // Wait for the PLL to stabilize.
758 // Turn on clocking of the core (CLKCR1(400h) = 0x00000030)
759 writel(CLKCR1_PLLP | CLKCR1_SWCE, card->pBA0 + BA0_CLKCR1);
760
761 // (4) Power on everything for now..
762 writel(0x7E, card->pBA0 + BA0_SSPM); // (740h)
763
764 // (5) Wait for clock stabilization.
765 for (temp1 = 0; temp1 < 1000; temp1++) {
766 udelay(1000);
767 if (readl(card->pBA0 + BA0_CLKCR1) & CLKCR1_DLLRDY)
768 break;
769 }
770 if (!(readl(card->pBA0 + BA0_CLKCR1) & CLKCR1_DLLRDY)) {
771 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
772 "cs4281: DLLRDY failed!\n"));
773 return -EIO;
774 }
775 // (6) Enable ASYNC generation.
776 writel(ACCTL_ESYN, card->pBA0 + BA0_ACCTL); // (460h)
777
778 // Now wait 'for a short while' to allow the AC97
779 // part to start generating bit clock. (so we don't
780 // Try to start the PLL without an input clock.)
781 delayus(card,50000);
782
783 // Set the serial port timing configuration, so that the
784 // clock control circuit gets its clock from the right place.
785 writel(SERMC_PTC_AC97, card->pBA0 + BA0_SERMC); // (420h)=2.
786
787 // (7) Wait for the codec ready signal from the AC97 codec.
788
789 for (temp1 = 0; temp1 < 1000; temp1++) {
790 // Delay a mil to let things settle out and
791 // to prevent retrying the read too quickly.
792 udelay(1000);
793 if (readl(card->pBA0 + BA0_ACSTS) & ACSTS_CRDY) // If ready, (464h)
794 break; // exit the 'for' loop.
795 }
796 if (!(readl(card->pBA0 + BA0_ACSTS) & ACSTS_CRDY)) // If never came ready,
797 {
798 CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_ERR
799 "cs4281: ACSTS never came ready!\n"));
800 return -EIO; // exit initialization.
801 }
802 // (8) Assert the 'valid frame' signal so we can
803 // begin sending commands to the AC97 codec.
804 writel(ACCTL_VFRM | ACCTL_ESYN, card->pBA0 + BA0_ACCTL); // (460h)
805
806 // (9), Wait until CODEC calibration is finished.
807 // Print an error message if it doesn't.
808 for (temp1 = 0; temp1 < 1000; temp1++) {
809 delayus(card,10000);
810 // Read the AC97 Powerdown Control/Status Register.
811 cs4281_read_ac97(card, BA0_AC97_POWERDOWN, &temp2);
812 if ((temp2 & 0x0000000F) == 0x0000000F)
813 break;
814 }
815 if ((temp2 & 0x0000000F) != 0x0000000F) {
816 CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_ERR
817 "cs4281: Codec failed to calibrate. Status = %.8x.\n",
818 temp2));
819 return -EIO;
820 }
821 // (10), Set the serial port timing configuration, so that the
822 // clock control circuit gets its clock from the right place.
823 writel(SERMC_PTC_AC97, card->pBA0 + BA0_SERMC); // (420h)=2.
824
825
826 // (11) Wait until we've sampled input slots 3 & 4 as valid, meaning
827 // that the codec is pumping ADC data across the AC link.
828 for (temp1 = 0; temp1 < 1000; temp1++) {
829 // Delay a mil to let things settle out and
830 // to prevent retrying the read too quickly.
831 delayus(card,1000); //(test)
832
833 // Read the input slot valid register; See
834 // if input slots 3 and 4 are valid yet.
835 if (
836 (readl(card->pBA0 + BA0_ACISV) &
837 (ACISV_ISV3 | ACISV_ISV4)) ==
838 (ACISV_ISV3 | ACISV_ISV4)) break; // Exit the 'for' if slots are valid.
839 }
840 // If we never got valid data, exit initialization.
841 if ((readl(card->pBA0 + BA0_ACISV) & (ACISV_ISV3 | ACISV_ISV4))
842 != (ACISV_ISV3 | ACISV_ISV4)) {
843 CS_DBGOUT(CS_FUNCTION, 2,
844 printk(KERN_ERR
845 "cs4281: Never got valid data!\n"));
846 return -EIO; // If no valid data, exit initialization.
847 }
848 // (12), Start digital data transfer of audio data to the codec.
849 writel(ACOSV_SLV3 | ACOSV_SLV4, card->pBA0 + BA0_ACOSV); // (468h)
850
851
852 //**************************************
853 // Unmute the Master and Alternate
854 // (headphone) volumes. Set to max.
855 //**************************************
856 cs4281_write_ac97(card, BA0_AC97_HEADPHONE_VOLUME, 0);
857 cs4281_write_ac97(card, BA0_AC97_MASTER_VOLUME, 0);
858
859 //******************************************
860 // Power on the DAC(AddDACUser()from main())
861 //******************************************
862 cs4281_read_ac97(card, BA0_AC97_POWERDOWN, &temp1);
863 cs4281_write_ac97(card, BA0_AC97_POWERDOWN, temp1 &= 0xfdff);
864
865 // Wait until we sample a DAC ready state.
866 for (temp2 = 0; temp2 < 32; temp2++) {
867 // Let's wait a mil to let things settle.
868 delayus(card,1000);
869 // Read the current state of the power control reg.
870 cs4281_read_ac97(card, BA0_AC97_POWERDOWN, &temp1);
871 // If the DAC ready state bit is set, stop waiting.
872 if (temp1 & 0x2)
873 break;
874 }
875
876 //******************************************
877 // Power on the ADC(AddADCUser()from main())
878 //******************************************
879 cs4281_read_ac97(card, BA0_AC97_POWERDOWN, &temp1);
880 cs4281_write_ac97(card, BA0_AC97_POWERDOWN, temp1 &= 0xfeff);
881
882 // Wait until we sample ADC ready state.
883 for (temp2 = 0; temp2 < 32; temp2++) {
884 // Let's wait a mil to let things settle.
885 delayus(card,1000);
886 // Read the current state of the power control reg.
887 cs4281_read_ac97(card, BA0_AC97_POWERDOWN, &temp1);
888 // If the ADC ready state bit is set, stop waiting.
889 if (temp1 & 0x1)
890 break;
891 }
892 // Set up 4281 Register contents that
893 // don't change for boot duration.
894
895 // For playback, we map AC97 slot 3 and 4(Left
896 // & Right PCM playback) to DMA Channel 0.
897 // Set the fifo to be 15 bytes at offset zero.
898
899 ac97_slotid = 0x01000f00; // FCR0.RS[4:0]=1(=>slot4, right PCM playback).
900 // FCR0.LS[4:0]=0(=>slot3, left PCM playback).
901 // FCR0.SZ[6-0]=15; FCR0.OF[6-0]=0.
902 writel(ac97_slotid, card->pBA0 + BA0_FCR0); // (180h)
903 writel(ac97_slotid | FCRn_FEN, card->pBA0 + BA0_FCR0); // Turn on FIFO Enable.
904
905 // For capture, we map AC97 slot 10 and 11(Left
906 // and Right PCM Record) to DMA Channel 1.
907 // Set the fifo to be 15 bytes at offset sixteen.
908 ac97_slotid = 0x0B0A0f10; // FCR1.RS[4:0]=11(=>slot11, right PCM record).
909 // FCR1.LS[4:0]=10(=>slot10, left PCM record).
910 // FCR1.SZ[6-0]=15; FCR1.OF[6-0]=16.
911 writel(ac97_slotid | FCRn_PSH, card->pBA0 + BA0_FCR1); // (184h)
912 writel(ac97_slotid | FCRn_FEN, card->pBA0 + BA0_FCR1); // Turn on FIFO Enable.
913
914 // Map the Playback SRC to the same AC97 slots(3 & 4--
915 // --Playback left & right)as DMA channel 0.
916 // Map the record SRC to the same AC97 slots(10 & 11--
917 // -- Record left & right) as DMA channel 1.
918
919 ac97_slotid = 0x0b0a0100; // SCRSA.PRSS[4:0]=1(=>slot4, right PCM playback).
920 // SCRSA.PLSS[4:0]=0(=>slot3, left PCM playback).
921 // SCRSA.CRSS[4:0]=11(=>slot11, right PCM record)
922 // SCRSA.CLSS[4:0]=10(=>slot10, left PCM record).
923 writel(ac97_slotid, card->pBA0 + BA0_SRCSA); // (75ch)
924
925 // Set 'Half Terminal Count Interrupt Enable' and 'Terminal
926 // Count Interrupt Enable' in DMA Control Registers 0 & 1.
927 // Set 'MSK' flag to 1 to keep the DMA engines paused.
928 temp1 = (DCRn_HTCIE | DCRn_TCIE | DCRn_MSK); // (00030001h)
929 writel(temp1, card->pBA0 + BA0_DCR0); // (154h
930 writel(temp1, card->pBA0 + BA0_DCR1); // (15ch)
931
932 // Set 'Auto-Initialize Control' to 'enabled'; For playback,
933 // set 'Transfer Type Control'(TR[1:0]) to 'read transfer',
934 // for record, set Transfer Type Control to 'write transfer'.
935 // All other bits set to zero; Some will be changed @ transfer start.
936 temp1 = (DMRn_DMA | DMRn_AUTO | DMRn_TR_READ); // (20000018h)
937 writel(temp1, card->pBA0 + BA0_DMR0); // (150h)
938 temp1 = (DMRn_DMA | DMRn_AUTO | DMRn_TR_WRITE); // (20000014h)
939 writel(temp1, card->pBA0 + BA0_DMR1); // (158h)
940
941 // Enable DMA interrupts generally, and
942 // DMA0 & DMA1 interrupts specifically.
943 temp1 = readl(card->pBA0 + BA0_HIMR) & 0xfffbfcff;
944 writel(temp1, card->pBA0 + BA0_HIMR);
945
946 CS_DBGOUT(CS_FUNCTION, 2,
947 printk(KERN_INFO "cs4281: cs4281_hw_init()- 0\n"));
948 return 0;
949}
950
951#ifndef NOT_CS4281_PM
952static void printpm(struct cs4281_state *s)
953{
954 CS_DBGOUT(CS_PM, 9, printk("pm struct:\n"));
955 CS_DBGOUT(CS_PM, 9, printk("flags:0x%x u32CLKCR1_SAVE: 0%x u32SSPMValue: 0x%x\n",
956 (unsigned)s->pm.flags,s->pm.u32CLKCR1_SAVE,s->pm.u32SSPMValue));
957 CS_DBGOUT(CS_PM, 9, printk("u32PPLVCvalue: 0x%x u32PPRVCvalue: 0x%x\n",
958 s->pm.u32PPLVCvalue,s->pm.u32PPRVCvalue));
959 CS_DBGOUT(CS_PM, 9, printk("u32FMLVCvalue: 0x%x u32FMRVCvalue: 0x%x\n",
960 s->pm.u32FMLVCvalue,s->pm.u32FMRVCvalue));
961 CS_DBGOUT(CS_PM, 9, printk("u32GPIORvalue: 0x%x u32JSCTLvalue: 0x%x\n",
962 s->pm.u32GPIORvalue,s->pm.u32JSCTLvalue));
963 CS_DBGOUT(CS_PM, 9, printk("u32SSCR: 0x%x u32SRCSA: 0x%x\n",
964 s->pm.u32SSCR,s->pm.u32SRCSA));
965 CS_DBGOUT(CS_PM, 9, printk("u32DacASR: 0x%x u32AdcASR: 0x%x\n",
966 s->pm.u32DacASR,s->pm.u32AdcASR));
967 CS_DBGOUT(CS_PM, 9, printk("u32DacSR: 0x%x u32AdcSR: 0x%x\n",
968 s->pm.u32DacSR,s->pm.u32AdcSR));
969 CS_DBGOUT(CS_PM, 9, printk("u32MIDCR_Save: 0x%x\n",
970 s->pm.u32MIDCR_Save));
971
972}
973static void printpipe(struct cs4281_pipeline *pl)
974{
975
976 CS_DBGOUT(CS_PM, 9, printk("pm struct:\n"));
977 CS_DBGOUT(CS_PM, 9, printk("flags:0x%x number: 0%x\n",
978 (unsigned)pl->flags,pl->number));
979 CS_DBGOUT(CS_PM, 9, printk("u32DBAnValue: 0%x u32DBCnValue: 0x%x\n",
980 pl->u32DBAnValue,pl->u32DBCnValue));
981 CS_DBGOUT(CS_PM, 9, printk("u32DMRnValue: 0x%x u32DCRnValue: 0x%x\n",
982 pl->u32DMRnValue,pl->u32DCRnValue));
983 CS_DBGOUT(CS_PM, 9, printk("u32DBAnAddress: 0x%x u32DBCnAddress: 0x%x\n",
984 pl->u32DBAnAddress,pl->u32DBCnAddress));
985 CS_DBGOUT(CS_PM, 9, printk("u32DCAnAddress: 0x%x u32DCCnAddress: 0x%x\n",
986 pl->u32DCCnAddress,pl->u32DCCnAddress));
987 CS_DBGOUT(CS_PM, 9, printk("u32DMRnAddress: 0x%x u32DCRnAddress: 0x%x\n",
988 pl->u32DMRnAddress,pl->u32DCRnAddress));
989 CS_DBGOUT(CS_PM, 9, printk("u32HDSRnAddress: 0x%x u32DBAn_Save: 0x%x\n",
990 pl->u32HDSRnAddress,pl->u32DBAn_Save));
991 CS_DBGOUT(CS_PM, 9, printk("u32DBCn_Save: 0x%x u32DMRn_Save: 0x%x\n",
992 pl->u32DBCn_Save,pl->u32DMRn_Save));
993 CS_DBGOUT(CS_PM, 9, printk("u32DCRn_Save: 0x%x u32DCCn_Save: 0x%x\n",
994 pl->u32DCRn_Save,pl->u32DCCn_Save));
995 CS_DBGOUT(CS_PM, 9, printk("u32DCAn_Save: 0x%x\n",
996 pl->u32DCAn_Save));
997 CS_DBGOUT(CS_PM, 9, printk("u32FCRn_Save: 0x%x u32FSICn_Save: 0x%x\n",
998 pl->u32FCRn_Save,pl->u32FSICn_Save));
999 CS_DBGOUT(CS_PM, 9, printk("u32FCRnValue: 0x%x u32FSICnValue: 0x%x\n",
1000 pl->u32FCRnValue,pl->u32FSICnValue));
1001 CS_DBGOUT(CS_PM, 9, printk("u32FCRnAddress: 0x%x u32FSICnAddress: 0x%x\n",
1002 pl->u32FCRnAddress,pl->u32FSICnAddress));
1003 CS_DBGOUT(CS_PM, 9, printk("u32FPDRnValue: 0x%x u32FPDRnAddress: 0x%x\n",
1004 pl->u32FPDRnValue,pl->u32FPDRnAddress));
1005}
1006static void printpipelines(struct cs4281_state *s)
1007{
1008 int i;
1009 for(i=0;i<CS4281_NUMBER_OF_PIPELINES;i++)
1010 {
1011 if(s->pl[i].flags & CS4281_PIPELINE_VALID)
1012 {
1013 printpipe(&s->pl[i]);
1014 }
1015 }
1016}
1017/****************************************************************************
1018*
1019* Suspend - save the ac97 regs, mute the outputs and power down the part.
1020*
1021****************************************************************************/
1022static void cs4281_ac97_suspend(struct cs4281_state *s)
1023{
1024 int Count,i;
1025
1026 CS_DBGOUT(CS_PM, 9, printk("cs4281: cs4281_ac97_suspend()+\n"));
1027/*
1028* change the state, save the current hwptr, then stop the dac/adc
1029*/
1030 s->pm.flags &= ~CS4281_PM_IDLE;
1031 s->pm.flags |= CS4281_PM_SUSPENDING;
1032 s->pm.u32hwptr_playback = readl(s->pBA0 + BA0_DCA0);
1033 s->pm.u32hwptr_capture = readl(s->pBA0 + BA0_DCA1);
1034 stop_dac(s);
1035 stop_adc(s);
1036
1037 for(Count = 0x2, i=0; (Count <= CS4281_AC97_HIGHESTREGTORESTORE)
1038 && (i < CS4281_AC97_NUMBER_RESTORE_REGS);
1039 Count += 2, i++)
1040 {
1041 cs4281_read_ac97(s, BA0_AC97_RESET + Count, &s->pm.ac97[i]);
1042 }
1043/*
1044* Save the ac97 volume registers as well as the current powerdown state.
1045* Now, mute the all the outputs (master, headphone, and mono), as well
1046* as the PCM volume, in preparation for powering down the entire part.
1047*/
1048 cs4281_read_ac97(s, BA0_AC97_MASTER_VOLUME, &s->pm.u32AC97_master_volume);
1049 cs4281_read_ac97(s, BA0_AC97_HEADPHONE_VOLUME, &s->pm.u32AC97_headphone_volume);
1050 cs4281_read_ac97(s, BA0_AC97_MASTER_VOLUME_MONO, &s->pm.u32AC97_master_volume_mono);
1051 cs4281_read_ac97(s, BA0_AC97_PCM_OUT_VOLUME, &s->pm.u32AC97_pcm_out_volume);
1052
1053 cs4281_write_ac97(s, BA0_AC97_MASTER_VOLUME, 0x8000);
1054 cs4281_write_ac97(s, BA0_AC97_HEADPHONE_VOLUME, 0x8000);
1055 cs4281_write_ac97(s, BA0_AC97_MASTER_VOLUME_MONO, 0x8000);
1056 cs4281_write_ac97(s, BA0_AC97_PCM_OUT_VOLUME, 0x8000);
1057
1058 cs4281_read_ac97(s, BA0_AC97_POWERDOWN, &s->pm.u32AC97_powerdown);
1059 cs4281_read_ac97(s, BA0_AC97_GENERAL_PURPOSE, &s->pm.u32AC97_general_purpose);
1060
1061/*
1062* And power down everything on the AC97 codec.
1063*/
1064 cs4281_write_ac97(s, BA0_AC97_POWERDOWN, 0xff00);
1065 CS_DBGOUT(CS_PM, 9, printk("cs4281: cs4281_ac97_suspend()-\n"));
1066}
1067
1068/****************************************************************************
1069*
1070* Resume - power up the part and restore its registers..
1071*
1072****************************************************************************/
1073static void cs4281_ac97_resume(struct cs4281_state *s)
1074{
1075 int Count,i;
1076
1077 CS_DBGOUT(CS_PM, 9, printk("cs4281: cs4281_ac97_resume()+\n"));
1078
1079/* do not save the power state registers at this time
1080 //
1081 // If we saved away the power control registers, write them into the
1082 // shadows so those saved values get restored instead of the current
1083 // shadowed value.
1084 //
1085 if( bPowerStateSaved )
1086 {
1087 PokeShadow( 0x26, ulSaveReg0x26 );
1088 bPowerStateSaved = FALSE;
1089 }
1090*/
1091
1092//
1093// First, we restore the state of the general purpose register. This
1094// contains the mic select (mic1 or mic2) and if we restore this after
1095// we restore the mic volume/boost state and mic2 was selected at
1096// suspend time, we will end up with a brief period of time where mic1
1097// is selected with the volume/boost settings for mic2, causing
1098// acoustic feedback. So we restore the general purpose register
1099// first, thereby getting the correct mic selected before we restore
1100// the mic volume/boost.
1101//
1102 cs4281_write_ac97(s, BA0_AC97_GENERAL_PURPOSE, s->pm.u32AC97_general_purpose);
1103
1104//
1105// Now, while the outputs are still muted, restore the state of power
1106// on the AC97 part.
1107//
1108 cs4281_write_ac97(s, BA0_AC97_POWERDOWN, s->pm.u32AC97_powerdown);
1109
1110/*
1111* Restore just the first set of registers, from register number
1112* 0x02 to the register number that ulHighestRegToRestore specifies.
1113*/
1114 for( Count = 0x2, i=0;
1115 (Count <= CS4281_AC97_HIGHESTREGTORESTORE)
1116 && (i < CS4281_AC97_NUMBER_RESTORE_REGS);
1117 Count += 2, i++)
1118 {
1119 cs4281_write_ac97(s, BA0_AC97_RESET + Count, s->pm.ac97[i]);
1120 }
1121 CS_DBGOUT(CS_PM, 9, printk("cs4281: cs4281_ac97_resume()-\n"));
1122}
1123
1124/* do not save the power state registers at this time
1125****************************************************************************
1126*
1127* SavePowerState - Save the power registers away.
1128*
1129****************************************************************************
1130void
1131HWAC97codec::SavePowerState(void)
1132{
1133 ENTRY(TM_OBJECTCALLS, "HWAC97codec::SavePowerState()\r\n");
1134
1135 ulSaveReg0x26 = PeekShadow(0x26);
1136
1137 //
1138 // Note that we have saved registers that need to be restored during a
1139 // resume instead of ulAC97Regs[].
1140 //
1141 bPowerStateSaved = TRUE;
1142
1143} // SavePowerState
1144*/
1145
1146static void cs4281_SuspendFIFO(struct cs4281_state *s, struct cs4281_pipeline *pl)
1147{
1148 /*
1149 * We need to save the contents of the BASIC FIFO Registers.
1150 */
1151 pl->u32FCRn_Save = readl(s->pBA0 + pl->u32FCRnAddress);
1152 pl->u32FSICn_Save = readl(s->pBA0 + pl->u32FSICnAddress);
1153}
1154static void cs4281_ResumeFIFO(struct cs4281_state *s, struct cs4281_pipeline *pl)
1155{
1156 /*
1157 * We need to restore the contents of the BASIC FIFO Registers.
1158 */
1159 writel(pl->u32FCRn_Save,s->pBA0 + pl->u32FCRnAddress);
1160 writel(pl->u32FSICn_Save,s->pBA0 + pl->u32FSICnAddress);
1161}
1162static void cs4281_SuspendDMAengine(struct cs4281_state *s, struct cs4281_pipeline *pl)
1163{
1164 //
1165 // We need to save the contents of the BASIC DMA Registers.
1166 //
1167 pl->u32DBAn_Save = readl(s->pBA0 + pl->u32DBAnAddress);
1168 pl->u32DBCn_Save = readl(s->pBA0 + pl->u32DBCnAddress);
1169 pl->u32DMRn_Save = readl(s->pBA0 + pl->u32DMRnAddress);
1170 pl->u32DCRn_Save = readl(s->pBA0 + pl->u32DCRnAddress);
1171 pl->u32DCCn_Save = readl(s->pBA0 + pl->u32DCCnAddress);
1172 pl->u32DCAn_Save = readl(s->pBA0 + pl->u32DCAnAddress);
1173}
1174static void cs4281_ResumeDMAengine(struct cs4281_state *s, struct cs4281_pipeline *pl)
1175{
1176 //
1177 // We need to save the contents of the BASIC DMA Registers.
1178 //
1179 writel( pl->u32DBAn_Save, s->pBA0 + pl->u32DBAnAddress);
1180 writel( pl->u32DBCn_Save, s->pBA0 + pl->u32DBCnAddress);
1181 writel( pl->u32DMRn_Save, s->pBA0 + pl->u32DMRnAddress);
1182 writel( pl->u32DCRn_Save, s->pBA0 + pl->u32DCRnAddress);
1183 writel( pl->u32DCCn_Save, s->pBA0 + pl->u32DCCnAddress);
1184 writel( pl->u32DCAn_Save, s->pBA0 + pl->u32DCAnAddress);
1185}
1186
1187static int cs4281_suspend(struct cs4281_state *s)
1188{
1189 int i;
1190 u32 u32CLKCR1;
1191 struct cs4281_pm *pm = &s->pm;
1192 CS_DBGOUT(CS_PM | CS_FUNCTION, 9,
1193 printk("cs4281: cs4281_suspend()+ flags=%d\n",
1194 (unsigned)s->pm.flags));
1195/*
1196* check the current state, only suspend if IDLE
1197*/
1198 if(!(s->pm.flags & CS4281_PM_IDLE))
1199 {
1200 CS_DBGOUT(CS_PM | CS_ERROR, 2,
1201 printk("cs4281: cs4281_suspend() unable to suspend, not IDLE\n"));
1202 return 1;
1203 }
1204 s->pm.flags &= ~CS4281_PM_IDLE;
1205 s->pm.flags |= CS4281_PM_SUSPENDING;
1206
1207//
1208// Gershwin CLKRUN - Set CKRA
1209//
1210 u32CLKCR1 = readl(s->pBA0 + BA0_CLKCR1);
1211
1212 pm->u32CLKCR1_SAVE = u32CLKCR1;
1213 if(!(u32CLKCR1 & 0x00010000 ) )
1214 writel(u32CLKCR1 | 0x00010000, s->pBA0 + BA0_CLKCR1);
1215
1216//
1217// First, turn on the clocks (yikes) to the devices, so that they will
1218// respond when we try to save their state.
1219//
1220 if(!(u32CLKCR1 & CLKCR1_SWCE))
1221 {
1222 writel(u32CLKCR1 | CLKCR1_SWCE , s->pBA0 + BA0_CLKCR1);
1223 }
1224
1225 //
1226 // Save the power state
1227 //
1228 pm->u32SSPMValue = readl(s->pBA0 + BA0_SSPM);
1229
1230 //
1231 // Disable interrupts.
1232 //
1233 writel(HICR_CHGM, s->pBA0 + BA0_HICR);
1234
1235 //
1236 // Save the PCM Playback Left and Right Volume Control.
1237 //
1238 pm->u32PPLVCvalue = readl(s->pBA0 + BA0_PPLVC);
1239 pm->u32PPRVCvalue = readl(s->pBA0 + BA0_PPRVC);
1240
1241 //
1242 // Save the FM Synthesis Left and Right Volume Control.
1243 //
1244 pm->u32FMLVCvalue = readl(s->pBA0 + BA0_FMLVC);
1245 pm->u32FMRVCvalue = readl(s->pBA0 + BA0_FMRVC);
1246
1247 //
1248 // Save the GPIOR value.
1249 //
1250 pm->u32GPIORvalue = readl(s->pBA0 + BA0_GPIOR);
1251
1252 //
1253 // Save the JSCTL value.
1254 //
1255 pm->u32JSCTLvalue = readl(s->pBA0 + BA0_GPIOR);
1256
1257 //
1258 // Save Sound System Control Register
1259 //
1260 pm->u32SSCR = readl(s->pBA0 + BA0_SSCR);
1261
1262 //
1263 // Save SRC Slot Assinment register
1264 //
1265 pm->u32SRCSA = readl(s->pBA0 + BA0_SRCSA);
1266
1267 //
1268 // Save sample rate
1269 //
1270 pm->u32DacASR = readl(s->pBA0 + BA0_PASR);
1271 pm->u32AdcASR = readl(s->pBA0 + BA0_CASR);
1272 pm->u32DacSR = readl(s->pBA0 + BA0_DACSR);
1273 pm->u32AdcSR = readl(s->pBA0 + BA0_ADCSR);
1274
1275 //
1276 // Loop through all of the PipeLines
1277 //
1278 for(i = 0; i < CS4281_NUMBER_OF_PIPELINES; i++)
1279 {
1280 if(s->pl[i].flags & CS4281_PIPELINE_VALID)
1281 {
1282 //
1283 // Ask the DMAengines and FIFOs to Suspend.
1284 //
1285 cs4281_SuspendDMAengine(s,&s->pl[i]);
1286 cs4281_SuspendFIFO(s,&s->pl[i]);
1287 }
1288 }
1289 //
1290 // We need to save the contents of the Midi Control Register.
1291 //
1292 pm->u32MIDCR_Save = readl(s->pBA0 + BA0_MIDCR);
1293/*
1294* save off the AC97 part information
1295*/
1296 cs4281_ac97_suspend(s);
1297
1298 //
1299 // Turn off the serial ports.
1300 //
1301 writel(0, s->pBA0 + BA0_SERMC);
1302
1303 //
1304 // Power off FM, Joystick, AC link,
1305 //
1306 writel(0, s->pBA0 + BA0_SSPM);
1307
1308 //
1309 // DLL off.
1310 //
1311 writel(0, s->pBA0 + BA0_CLKCR1);
1312
1313 //
1314 // AC link off.
1315 //
1316 writel(0, s->pBA0 + BA0_SPMC);
1317
1318 //
1319 // Put the chip into D3(hot) state.
1320 //
1321 // PokeBA0(BA0_PMCS, 0x00000003);
1322
1323 //
1324 // Gershwin CLKRUN - Clear CKRA
1325 //
1326 u32CLKCR1 = readl(s->pBA0 + BA0_CLKCR1);
1327 writel(u32CLKCR1 & 0xFFFEFFFF, s->pBA0 + BA0_CLKCR1);
1328
1329#ifdef CSDEBUG
1330 printpm(s);
1331 printpipelines(s);
1332#endif
1333
1334 s->pm.flags &= ~CS4281_PM_SUSPENDING;
1335 s->pm.flags |= CS4281_PM_SUSPENDED;
1336
1337 CS_DBGOUT(CS_PM | CS_FUNCTION, 9,
1338 printk("cs4281: cs4281_suspend()- flags=%d\n",
1339 (unsigned)s->pm.flags));
1340 return 0;
1341}
1342
1343static int cs4281_resume(struct cs4281_state *s)
1344{
1345 int i;
1346 unsigned temp1;
1347 u32 u32CLKCR1;
1348 struct cs4281_pm *pm = &s->pm;
1349 CS_DBGOUT(CS_PM | CS_FUNCTION, 4,
1350 printk( "cs4281: cs4281_resume()+ flags=%d\n",
1351 (unsigned)s->pm.flags));
1352 if(!(s->pm.flags & CS4281_PM_SUSPENDED))
1353 {
1354 CS_DBGOUT(CS_PM | CS_ERROR, 2,
1355 printk("cs4281: cs4281_resume() unable to resume, not SUSPENDED\n"));
1356 return 1;
1357 }
1358 s->pm.flags &= ~CS4281_PM_SUSPENDED;
1359 s->pm.flags |= CS4281_PM_RESUMING;
1360
1361//
1362// Gershwin CLKRUN - Set CKRA
1363//
1364 u32CLKCR1 = readl(s->pBA0 + BA0_CLKCR1);
1365 writel(u32CLKCR1 | 0x00010000, s->pBA0 + BA0_CLKCR1);
1366
1367 //
1368 // set the power state.
1369 //
1370 //old PokeBA0(BA0_PMCS, 0);
1371
1372 //
1373 // Program the clock circuit and serial ports.
1374 //
1375 temp1 = cs4281_hw_init(s);
1376 if (temp1) {
1377 CS_DBGOUT(CS_ERROR | CS_INIT, 1,
1378 printk(KERN_ERR
1379 "cs4281: resume cs4281_hw_init() error.\n"));
1380 return -1;
1381 }
1382
1383 //
1384 // restore the Power state
1385 //
1386 writel(pm->u32SSPMValue, s->pBA0 + BA0_SSPM);
1387
1388 //
1389 // Set post SRC mix setting (FM or ALT48K)
1390 //
1391 writel(pm->u32SSPM_BITS, s->pBA0 + BA0_SSPM);
1392
1393 //
1394 // Loop through all of the PipeLines
1395 //
1396 for(i = 0; i < CS4281_NUMBER_OF_PIPELINES; i++)
1397 {
1398 if(s->pl[i].flags & CS4281_PIPELINE_VALID)
1399 {
1400 //
1401 // Ask the DMAengines and FIFOs to Resume.
1402 //
1403 cs4281_ResumeDMAengine(s,&s->pl[i]);
1404 cs4281_ResumeFIFO(s,&s->pl[i]);
1405 }
1406 }
1407 //
1408 // We need to restore the contents of the Midi Control Register.
1409 //
1410 writel(pm->u32MIDCR_Save, s->pBA0 + BA0_MIDCR);
1411
1412 cs4281_ac97_resume(s);
1413 //
1414 // Restore the PCM Playback Left and Right Volume Control.
1415 //
1416 writel(pm->u32PPLVCvalue, s->pBA0 + BA0_PPLVC);
1417 writel(pm->u32PPRVCvalue, s->pBA0 + BA0_PPRVC);
1418
1419 //
1420 // Restore the FM Synthesis Left and Right Volume Control.
1421 //
1422 writel(pm->u32FMLVCvalue, s->pBA0 + BA0_FMLVC);
1423 writel(pm->u32FMRVCvalue, s->pBA0 + BA0_FMRVC);
1424
1425 //
1426 // Restore the JSCTL value.
1427 //
1428 writel(pm->u32JSCTLvalue, s->pBA0 + BA0_JSCTL);
1429
1430 //
1431 // Restore the GPIOR register value.
1432 //
1433 writel(pm->u32GPIORvalue, s->pBA0 + BA0_GPIOR);
1434
1435 //
1436 // Restore Sound System Control Register
1437 //
1438 writel(pm->u32SSCR, s->pBA0 + BA0_SSCR);
1439
1440 //
1441 // Restore SRC Slot Assignment register
1442 //
1443 writel(pm->u32SRCSA, s->pBA0 + BA0_SRCSA);
1444
1445 //
1446 // Restore sample rate
1447 //
1448 writel(pm->u32DacASR, s->pBA0 + BA0_PASR);
1449 writel(pm->u32AdcASR, s->pBA0 + BA0_CASR);
1450 writel(pm->u32DacSR, s->pBA0 + BA0_DACSR);
1451 writel(pm->u32AdcSR, s->pBA0 + BA0_ADCSR);
1452
1453 //
1454 // Restore CFL1/2 registers we saved to compensate for OEM bugs.
1455 //
1456 // PokeBA0(BA0_CFLR, ulConfig);
1457
1458 //
1459 // Gershwin CLKRUN - Clear CKRA
1460 //
1461 writel(pm->u32CLKCR1_SAVE, s->pBA0 + BA0_CLKCR1);
1462
1463 //
1464 // Enable interrupts on the part.
1465 //
1466 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR);
1467
1468#ifdef CSDEBUG
1469 printpm(s);
1470 printpipelines(s);
1471#endif
1472/*
1473* change the state, restore the current hwptrs, then stop the dac/adc
1474*/
1475 s->pm.flags |= CS4281_PM_IDLE;
1476 s->pm.flags &= ~(CS4281_PM_SUSPENDING | CS4281_PM_SUSPENDED
1477 | CS4281_PM_RESUMING | CS4281_PM_RESUMED);
1478
1479 writel(s->pm.u32hwptr_playback, s->pBA0 + BA0_DCA0);
1480 writel(s->pm.u32hwptr_capture, s->pBA0 + BA0_DCA1);
1481 start_dac(s);
1482 start_adc(s);
1483
1484 CS_DBGOUT(CS_PM | CS_FUNCTION, 9, printk("cs4281: cs4281_resume()- flags=%d\n",
1485 (unsigned)s->pm.flags));
1486 return 0;
1487}
1488
1489#endif
1490
1491//******************************************************************************
1492// "cs4281_play_rate()" --
1493//******************************************************************************
1494static void cs4281_play_rate(struct cs4281_state *card, u32 playrate)
1495{
1496 u32 DACSRvalue = 1;
1497
1498 // Based on the sample rate, program the DACSR register.
1499 if (playrate == 8000)
1500 DACSRvalue = 5;
1501 if (playrate == 11025)
1502 DACSRvalue = 4;
1503 else if (playrate == 22050)
1504 DACSRvalue = 2;
1505 else if (playrate == 44100)
1506 DACSRvalue = 1;
1507 else if ((playrate <= 48000) && (playrate >= 6023))
1508 DACSRvalue = 24576000 / (playrate * 16);
1509 else if (playrate < 6023)
1510 // Not allowed by open.
1511 return;
1512 else if (playrate > 48000)
1513 // Not allowed by open.
1514 return;
1515 CS_DBGOUT(CS_WAVE_WRITE | CS_PARMS, 2, printk(KERN_INFO
1516 "cs4281: cs4281_play_rate(): DACSRvalue=0x%.8x playrate=%d\n",
1517 DACSRvalue, playrate));
1518 // Write the 'sample rate select code'
1519 // to the 'DAC Sample Rate' register.
1520 writel(DACSRvalue, card->pBA0 + BA0_DACSR); // (744h)
1521}
1522
1523//******************************************************************************
1524// "cs4281_record_rate()" -- Initialize the record sample rate converter.
1525//******************************************************************************
1526static void cs4281_record_rate(struct cs4281_state *card, u32 outrate)
1527{
1528 u32 ADCSRvalue = 1;
1529
1530 //
1531 // Based on the sample rate, program the ADCSR register
1532 //
1533 if (outrate == 8000)
1534 ADCSRvalue = 5;
1535 if (outrate == 11025)
1536 ADCSRvalue = 4;
1537 else if (outrate == 22050)
1538 ADCSRvalue = 2;
1539 else if (outrate == 44100)
1540 ADCSRvalue = 1;
1541 else if ((outrate <= 48000) && (outrate >= 6023))
1542 ADCSRvalue = 24576000 / (outrate * 16);
1543 else if (outrate < 6023) {
1544 // Not allowed by open.
1545 return;
1546 } else if (outrate > 48000) {
1547 // Not allowed by open.
1548 return;
1549 }
1550 CS_DBGOUT(CS_WAVE_READ | CS_PARMS, 2, printk(KERN_INFO
1551 "cs4281: cs4281_record_rate(): ADCSRvalue=0x%.8x outrate=%d\n",
1552 ADCSRvalue, outrate));
1553 // Write the 'sample rate select code
1554 // to the 'ADC Sample Rate' register.
1555 writel(ADCSRvalue, card->pBA0 + BA0_ADCSR); // (748h)
1556}
1557
1558
1559
1560static void stop_dac(struct cs4281_state *s)
1561{
1562 unsigned long flags;
1563 unsigned temp1;
1564
1565 CS_DBGOUT(CS_WAVE_WRITE, 3, printk(KERN_INFO "cs4281: stop_dac():\n"));
1566 spin_lock_irqsave(&s->lock, flags);
1567 s->ena &= ~FMODE_WRITE;
1568 temp1 = readl(s->pBA0 + BA0_DCR0) | DCRn_MSK;
1569 writel(temp1, s->pBA0 + BA0_DCR0);
1570
1571 spin_unlock_irqrestore(&s->lock, flags);
1572}
1573
1574
1575static void start_dac(struct cs4281_state *s)
1576{
1577 unsigned long flags;
1578 unsigned temp1;
1579
1580 CS_DBGOUT(CS_FUNCTION, 3, printk(KERN_INFO "cs4281: start_dac()+\n"));
1581 spin_lock_irqsave(&s->lock, flags);
1582 if (!(s->ena & FMODE_WRITE) && (s->dma_dac.mapped ||
1583 (s->dma_dac.count > 0
1584 && s->dma_dac.ready))
1585#ifndef NOT_CS4281_PM
1586 && (s->pm.flags & CS4281_PM_IDLE))
1587#else
1588)
1589#endif
1590 {
1591 s->ena |= FMODE_WRITE;
1592 temp1 = readl(s->pBA0 + BA0_DCR0) & ~DCRn_MSK; // Clear DMA0 channel mask.
1593 writel(temp1, s->pBA0 + BA0_DCR0); // Start DMA'ing.
1594 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR); // Enable interrupts.
1595
1596 writel(7, s->pBA0 + BA0_PPRVC);
1597 writel(7, s->pBA0 + BA0_PPLVC);
1598 CS_DBGOUT(CS_WAVE_WRITE | CS_PARMS, 8, printk(KERN_INFO
1599 "cs4281: start_dac(): writel 0x%x start dma\n", temp1));
1600
1601 }
1602 spin_unlock_irqrestore(&s->lock, flags);
1603 CS_DBGOUT(CS_FUNCTION, 3,
1604 printk(KERN_INFO "cs4281: start_dac()-\n"));
1605}
1606
1607
1608static void stop_adc(struct cs4281_state *s)
1609{
1610 unsigned long flags;
1611 unsigned temp1;
1612
1613 CS_DBGOUT(CS_FUNCTION, 3,
1614 printk(KERN_INFO "cs4281: stop_adc()+\n"));
1615
1616 spin_lock_irqsave(&s->lock, flags);
1617 s->ena &= ~FMODE_READ;
1618
1619 if (s->conversion == 1) {
1620 s->conversion = 0;
1621 s->prop_adc.fmt = s->prop_adc.fmt_original;
1622 }
1623 temp1 = readl(s->pBA0 + BA0_DCR1) | DCRn_MSK;
1624 writel(temp1, s->pBA0 + BA0_DCR1);
1625 spin_unlock_irqrestore(&s->lock, flags);
1626 CS_DBGOUT(CS_FUNCTION, 3,
1627 printk(KERN_INFO "cs4281: stop_adc()-\n"));
1628}
1629
1630
1631static void start_adc(struct cs4281_state *s)
1632{
1633 unsigned long flags;
1634 unsigned temp1;
1635
1636 CS_DBGOUT(CS_FUNCTION, 2,
1637 printk(KERN_INFO "cs4281: start_adc()+\n"));
1638
1639 if (!(s->ena & FMODE_READ) &&
1640 (s->dma_adc.mapped || s->dma_adc.count <=
1641 (signed) (s->dma_adc.dmasize - 2 * s->dma_adc.fragsize))
1642 && s->dma_adc.ready
1643#ifndef NOT_CS4281_PM
1644 && (s->pm.flags & CS4281_PM_IDLE))
1645#else
1646)
1647#endif
1648 {
1649 if (s->prop_adc.fmt & AFMT_S8 || s->prop_adc.fmt & AFMT_U8) {
1650 //
1651 // now only use 16 bit capture, due to truncation issue
1652 // in the chip, noticable distortion occurs.
1653 // allocate buffer and then convert from 16 bit to
1654 // 8 bit for the user buffer.
1655 //
1656 s->prop_adc.fmt_original = s->prop_adc.fmt;
1657 if (s->prop_adc.fmt & AFMT_S8) {
1658 s->prop_adc.fmt &= ~AFMT_S8;
1659 s->prop_adc.fmt |= AFMT_S16_LE;
1660 }
1661 if (s->prop_adc.fmt & AFMT_U8) {
1662 s->prop_adc.fmt &= ~AFMT_U8;
1663 s->prop_adc.fmt |= AFMT_U16_LE;
1664 }
1665 //
1666 // prog_dmabuf_adc performs a stop_adc() but that is
1667 // ok since we really haven't started the DMA yet.
1668 //
1669 prog_codec(s, CS_TYPE_ADC);
1670
1671 if (prog_dmabuf_adc(s) != 0) {
1672 CS_DBGOUT(CS_ERROR, 2, printk(KERN_INFO
1673 "cs4281: start_adc(): error in prog_dmabuf_adc\n"));
1674 }
1675 s->conversion = 1;
1676 }
1677 spin_lock_irqsave(&s->lock, flags);
1678 s->ena |= FMODE_READ;
1679 temp1 = readl(s->pBA0 + BA0_DCR1) & ~DCRn_MSK; // Clear DMA1 channel mask bit.
1680 writel(temp1, s->pBA0 + BA0_DCR1); // Start recording
1681 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR); // Enable interrupts.
1682 spin_unlock_irqrestore(&s->lock, flags);
1683
1684 CS_DBGOUT(CS_PARMS, 6, printk(KERN_INFO
1685 "cs4281: start_adc(): writel 0x%x \n", temp1));
1686 }
1687 CS_DBGOUT(CS_FUNCTION, 2,
1688 printk(KERN_INFO "cs4281: start_adc()-\n"));
1689
1690}
1691
1692
1693// ---------------------------------------------------------------------
1694
1695#define DMABUF_MINORDER 1 // ==> min buffer size = 8K.
1696
1697
1698static void dealloc_dmabuf(struct cs4281_state *s, struct dmabuf *db)
1699{
1700 struct page *map, *mapend;
1701
1702 if (db->rawbuf) {
1703 // Undo prog_dmabuf()'s marking the pages as reserved
1704 mapend =
1705 virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) -
1706 1);
1707 for (map = virt_to_page(db->rawbuf); map <= mapend; map++)
1708 ClearPageReserved(map);
1709 free_dmabuf(s, db);
1710 }
1711 if (s->tmpbuff && (db->type == CS_TYPE_ADC)) {
1712 // Undo prog_dmabuf()'s marking the pages as reserved
1713 mapend =
1714 virt_to_page(s->tmpbuff +
1715 (PAGE_SIZE << s->buforder_tmpbuff) - 1);
1716 for (map = virt_to_page(s->tmpbuff); map <= mapend; map++)
1717 ClearPageReserved(map);
1718 free_dmabuf2(s, db);
1719 }
1720 s->tmpbuff = NULL;
1721 db->rawbuf = NULL;
1722 db->mapped = db->ready = 0;
1723}
1724
1725static int prog_dmabuf(struct cs4281_state *s, struct dmabuf *db)
1726{
1727 int order;
1728 unsigned bytespersec, temp1;
1729 unsigned bufs, sample_shift = 0;
1730 struct page *map, *mapend;
1731 unsigned long df;
1732
1733 CS_DBGOUT(CS_FUNCTION, 2,
1734 printk(KERN_INFO "cs4281: prog_dmabuf()+\n"));
1735 db->hwptr = db->swptr = db->total_bytes = db->count = db->error =
1736 db->endcleared = db->blocks = db->wakeup = db->underrun = 0;
1737/*
1738* check for order within limits, but do not overwrite value, check
1739* later for a fractional defaultorder (i.e. 100+).
1740*/
1741 if((defaultorder > 0) && (defaultorder < 12))
1742 df = defaultorder;
1743 else
1744 df = 1;
1745
1746 if (!db->rawbuf) {
1747 db->ready = db->mapped = 0;
1748 for (order = df; order >= DMABUF_MINORDER; order--)
1749 if ( (db->rawbuf = (void *) pci_alloc_consistent(
1750 s->pcidev, PAGE_SIZE << order, &db-> dmaaddr)))
1751 break;
1752 if (!db->rawbuf) {
1753 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
1754 "cs4281: prog_dmabuf(): unable to allocate rawbuf\n"));
1755 return -ENOMEM;
1756 }
1757 db->buforder = order;
1758 // Now mark the pages as reserved; otherwise the
1759 // remap_pfn_range() in cs4281_mmap doesn't work.
1760 // 1. get index to last page in mem_map array for rawbuf.
1761 mapend = virt_to_page(db->rawbuf +
1762 (PAGE_SIZE << db->buforder) - 1);
1763
1764 // 2. mark each physical page in range as 'reserved'.
1765 for (map = virt_to_page(db->rawbuf); map <= mapend; map++)
1766 SetPageReserved(map);
1767 }
1768 if (!s->tmpbuff && (db->type == CS_TYPE_ADC)) {
1769 for (order = df; order >= DMABUF_MINORDER;
1770 order--)
1771 if ( (s->tmpbuff = (void *) pci_alloc_consistent(
1772 s->pcidev, PAGE_SIZE << order,
1773 &s->dmaaddr_tmpbuff)))
1774 break;
1775 if (!s->tmpbuff) {
1776 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
1777 "cs4281: prog_dmabuf(): unable to allocate tmpbuff\n"));
1778 return -ENOMEM;
1779 }
1780 s->buforder_tmpbuff = order;
1781 // Now mark the pages as reserved; otherwise the
1782 // remap_pfn_range() in cs4281_mmap doesn't work.
1783 // 1. get index to last page in mem_map array for rawbuf.
1784 mapend = virt_to_page(s->tmpbuff +
1785 (PAGE_SIZE << s->buforder_tmpbuff) - 1);
1786
1787 // 2. mark each physical page in range as 'reserved'.
1788 for (map = virt_to_page(s->tmpbuff); map <= mapend; map++)
1789 SetPageReserved(map);
1790 }
1791 if (db->type == CS_TYPE_DAC) {
1792 if (s->prop_dac.fmt & (AFMT_S16_LE | AFMT_U16_LE))
1793 sample_shift++;
1794 if (s->prop_dac.channels > 1)
1795 sample_shift++;
1796 bytespersec = s->prop_dac.rate << sample_shift;
1797 } else // CS_TYPE_ADC
1798 {
1799 if (s->prop_adc.fmt & (AFMT_S16_LE | AFMT_U16_LE))
1800 sample_shift++;
1801 if (s->prop_adc.channels > 1)
1802 sample_shift++;
1803 bytespersec = s->prop_adc.rate << sample_shift;
1804 }
1805 bufs = PAGE_SIZE << db->buforder;
1806
1807/*
1808* added fractional "defaultorder" inputs. if >100 then use
1809* defaultorder-100 as power of 2 for the buffer size. example:
1810* 106 = 2^(106-100) = 2^6 = 64 bytes for the buffer size.
1811*/
1812 if(defaultorder >= 100)
1813 {
1814 bufs = 1 << (defaultorder-100);
1815 }
1816
1817#define INTERRUPT_RATE_MS 100 // Interrupt rate in milliseconds.
1818 db->numfrag = 2;
1819/*
1820* Nominal frag size(bytes/interrupt)
1821*/
1822 temp1 = bytespersec / (1000 / INTERRUPT_RATE_MS);
1823 db->fragshift = 8; // Min 256 bytes.
1824 while (1 << db->fragshift < temp1) // Calc power of 2 frag size.
1825 db->fragshift += 1;
1826 db->fragsize = 1 << db->fragshift;
1827 db->dmasize = db->fragsize * 2;
1828 db->fragsamples = db->fragsize >> sample_shift; // # samples/fragment.
1829
1830// If the calculated size is larger than the allocated
1831// buffer, divide the allocated buffer into 2 fragments.
1832 if (db->dmasize > bufs) {
1833
1834 db->numfrag = 2; // Two fragments.
1835 db->fragsize = bufs >> 1; // Each 1/2 the alloc'ed buffer.
1836 db->fragsamples = db->fragsize >> sample_shift; // # samples/fragment.
1837 db->dmasize = bufs; // Use all the alloc'ed buffer.
1838
1839 db->fragshift = 0; // Calculate 'fragshift'.
1840 temp1 = db->fragsize; // update_ptr() uses it
1841 while ((temp1 >>= 1) > 1) // to calc 'total-bytes'
1842 db->fragshift += 1; // returned in DSP_GETI/OPTR.
1843 }
1844 CS_DBGOUT(CS_PARMS, 3, printk(KERN_INFO
1845 "cs4281: prog_dmabuf(): numfrag=%d fragsize=%d fragsamples=%d fragshift=%d bufs=%d fmt=0x%x ch=%d\n",
1846 db->numfrag, db->fragsize, db->fragsamples,
1847 db->fragshift, bufs,
1848 (db->type == CS_TYPE_DAC) ? s->prop_dac.fmt :
1849 s->prop_adc.fmt,
1850 (db->type == CS_TYPE_DAC) ? s->prop_dac.channels :
1851 s->prop_adc.channels));
1852 CS_DBGOUT(CS_FUNCTION, 2,
1853 printk(KERN_INFO "cs4281: prog_dmabuf()-\n"));
1854 return 0;
1855}
1856
1857
1858static int prog_dmabuf_adc(struct cs4281_state *s)
1859{
1860 unsigned long va;
1861 unsigned count;
1862 int c;
1863 stop_adc(s);
1864 s->dma_adc.type = CS_TYPE_ADC;
1865 if ((c = prog_dmabuf(s, &s->dma_adc)))
1866 return c;
1867
1868 if (s->dma_adc.rawbuf) {
1869 memset(s->dma_adc.rawbuf,
1870 (s->prop_adc.
1871 fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0,
1872 s->dma_adc.dmasize);
1873 }
1874 if (s->tmpbuff) {
1875 memset(s->tmpbuff,
1876 (s->prop_adc.
1877 fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0,
1878 PAGE_SIZE << s->buforder_tmpbuff);
1879 }
1880
1881 va = virt_to_bus(s->dma_adc.rawbuf);
1882
1883 count = s->dma_adc.dmasize;
1884
1885 if (s->prop_adc.
1886 fmt & (AFMT_S16_LE | AFMT_U16_LE | AFMT_S16_BE | AFMT_U16_BE))
1887 count /= 2; // 16-bit.
1888
1889 if (s->prop_adc.channels > 1)
1890 count /= 2; // Assume stereo.
1891
1892 CS_DBGOUT(CS_WAVE_READ, 3, printk(KERN_INFO
1893 "cs4281: prog_dmabuf_adc(): count=%d va=0x%.8x\n",
1894 count, (unsigned) va));
1895
1896 writel(va, s->pBA0 + BA0_DBA1); // Set buffer start address.
1897 writel(count - 1, s->pBA0 + BA0_DBC1); // Set count.
1898 s->dma_adc.ready = 1;
1899 return 0;
1900}
1901
1902
1903static int prog_dmabuf_dac(struct cs4281_state *s)
1904{
1905 unsigned long va;
1906 unsigned count;
1907 int c;
1908 stop_dac(s);
1909 s->dma_dac.type = CS_TYPE_DAC;
1910 if ((c = prog_dmabuf(s, &s->dma_dac)))
1911 return c;
1912 memset(s->dma_dac.rawbuf,
1913 (s->prop_dac.fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0,
1914 s->dma_dac.dmasize);
1915
1916 va = virt_to_bus(s->dma_dac.rawbuf);
1917
1918 count = s->dma_dac.dmasize;
1919 if (s->prop_dac.
1920 fmt & (AFMT_S16_LE | AFMT_U16_LE | AFMT_S16_BE | AFMT_U16_BE))
1921 count /= 2; // 16-bit.
1922
1923 if (s->prop_dac.channels > 1)
1924 count /= 2; // Assume stereo.
1925
1926 writel(va, s->pBA0 + BA0_DBA0); // Set buffer start address.
1927 writel(count - 1, s->pBA0 + BA0_DBC0); // Set count.
1928
1929 CS_DBGOUT(CS_WAVE_WRITE, 3, printk(KERN_INFO
1930 "cs4281: prog_dmabuf_dac(): count=%d va=0x%.8x\n",
1931 count, (unsigned) va));
1932
1933 s->dma_dac.ready = 1;
1934 return 0;
1935}
1936
1937
1938static void clear_advance(void *buf, unsigned bsize, unsigned bptr,
1939 unsigned len, unsigned char c)
1940{
1941 if (bptr + len > bsize) {
1942 unsigned x = bsize - bptr;
1943 memset(((char *) buf) + bptr, c, x);
1944 bptr = 0;
1945 len -= x;
1946 }
1947 CS_DBGOUT(CS_WAVE_WRITE, 4, printk(KERN_INFO
1948 "cs4281: clear_advance(): memset %d at %p for %d size \n",
1949 (unsigned)c, ((char *) buf) + bptr, len));
1950 memset(((char *) buf) + bptr, c, len);
1951}
1952
1953
1954
1955// call with spinlock held!
1956static void cs4281_update_ptr(struct cs4281_state *s, int intflag)
1957{
1958 int diff;
1959 unsigned hwptr, va;
1960
1961 // update ADC pointer
1962 if (s->ena & FMODE_READ) {
1963 hwptr = readl(s->pBA0 + BA0_DCA1); // Read capture DMA address.
1964 va = virt_to_bus(s->dma_adc.rawbuf);
1965 hwptr -= (unsigned) va;
1966 diff =
1967 (s->dma_adc.dmasize + hwptr -
1968 s->dma_adc.hwptr) % s->dma_adc.dmasize;
1969 s->dma_adc.hwptr = hwptr;
1970 s->dma_adc.total_bytes += diff;
1971 s->dma_adc.count += diff;
1972 if (s->dma_adc.count > s->dma_adc.dmasize)
1973 s->dma_adc.count = s->dma_adc.dmasize;
1974 if (s->dma_adc.mapped) {
1975 if (s->dma_adc.count >=
1976 (signed) s->dma_adc.fragsize) wake_up(&s->
1977 dma_adc.
1978 wait);
1979 } else {
1980 if (s->dma_adc.count > 0)
1981 wake_up(&s->dma_adc.wait);
1982 }
1983 CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
1984 "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n",
1985 s, s->dma_adc.hwptr, s->dma_adc.total_bytes, s->dma_adc.count));
1986 }
1987 // update DAC pointer
1988 //
1989 // check for end of buffer, means that we are going to wait for another interrupt
1990 // to allow silence to fill the fifos on the part, to keep pops down to a minimum.
1991 //
1992 if (s->ena & FMODE_WRITE) {
1993 hwptr = readl(s->pBA0 + BA0_DCA0); // Read play DMA address.
1994 va = virt_to_bus(s->dma_dac.rawbuf);
1995 hwptr -= (unsigned) va;
1996 diff = (s->dma_dac.dmasize + hwptr -
1997 s->dma_dac.hwptr) % s->dma_dac.dmasize;
1998 s->dma_dac.hwptr = hwptr;
1999 s->dma_dac.total_bytes += diff;
2000 if (s->dma_dac.mapped) {
2001 s->dma_dac.count += diff;
2002 if (s->dma_dac.count >= s->dma_dac.fragsize) {
2003 s->dma_dac.wakeup = 1;
2004 wake_up(&s->dma_dac.wait);
2005 if (s->dma_dac.count > s->dma_dac.dmasize)
2006 s->dma_dac.count &=
2007 s->dma_dac.dmasize - 1;
2008 }
2009 } else {
2010 s->dma_dac.count -= diff;
2011 if (s->dma_dac.count <= 0) {
2012 //
2013 // fill with silence, and do not shut down the DAC.
2014 // Continue to play silence until the _release.
2015 //
2016 CS_DBGOUT(CS_WAVE_WRITE, 6, printk(KERN_INFO
2017 "cs4281: cs4281_update_ptr(): memset %d at %p for %d size \n",
2018 (unsigned)(s->prop_dac.fmt &
2019 (AFMT_U8 | AFMT_U16_LE)) ? 0x80 : 0,
2020 s->dma_dac.rawbuf, s->dma_dac.dmasize));
2021 memset(s->dma_dac.rawbuf,
2022 (s->prop_dac.
2023 fmt & (AFMT_U8 | AFMT_U16_LE)) ?
2024 0x80 : 0, s->dma_dac.dmasize);
2025 if (s->dma_dac.count < 0) {
2026 s->dma_dac.underrun = 1;
2027 s->dma_dac.count = 0;
2028 CS_DBGOUT(CS_ERROR, 9, printk(KERN_INFO
2029 "cs4281: cs4281_update_ptr(): underrun\n"));
2030 }
2031 } else if (s->dma_dac.count <=
2032 (signed) s->dma_dac.fragsize
2033 && !s->dma_dac.endcleared) {
2034 clear_advance(s->dma_dac.rawbuf,
2035 s->dma_dac.dmasize,
2036 s->dma_dac.swptr,
2037 s->dma_dac.fragsize,
2038 (s->prop_dac.
2039 fmt & (AFMT_U8 |
2040 AFMT_U16_LE)) ? 0x80
2041 : 0);
2042 s->dma_dac.endcleared = 1;
2043 }
2044 if ( (s->dma_dac.count <= (signed) s->dma_dac.dmasize/2) ||
2045 intflag)
2046 {
2047 wake_up(&s->dma_dac.wait);
2048 }
2049 }
2050 CS_DBGOUT(CS_PARMS, 8, printk(KERN_INFO
2051 "cs4281: cs4281_update_ptr(): s=%p hwptr=%d total_bytes=%d count=%d \n",
2052 s, s->dma_dac.hwptr, s->dma_dac.total_bytes, s->dma_dac.count));
2053 }
2054}
2055
2056
2057// ---------------------------------------------------------------------
2058
2059static void prog_codec(struct cs4281_state *s, unsigned type)
2060{
2061 unsigned long flags;
2062 unsigned temp1, format;
2063
2064 CS_DBGOUT(CS_FUNCTION, 2,
2065 printk(KERN_INFO "cs4281: prog_codec()+ \n"));
2066
2067 spin_lock_irqsave(&s->lock, flags);
2068 if (type == CS_TYPE_ADC) {
2069 temp1 = readl(s->pBA0 + BA0_DCR1);
2070 writel(temp1 | DCRn_MSK, s->pBA0 + BA0_DCR1); // Stop capture DMA, if active.
2071
2072 // program sampling rates
2073 // Note, for CS4281, capture & play rates can be set independently.
2074 cs4281_record_rate(s, s->prop_adc.rate);
2075
2076 // program ADC parameters
2077 format = DMRn_DMA | DMRn_AUTO | DMRn_TR_WRITE;
2078 if (s->prop_adc.
2079 fmt & (AFMT_S16_LE | AFMT_U16_LE | AFMT_S16_BE | AFMT_U16_BE)) { // 16-bit
2080 if (s->prop_adc.fmt & (AFMT_S16_BE | AFMT_U16_BE)) // Big-endian?
2081 format |= DMRn_BEND;
2082 if (s->prop_adc.fmt & (AFMT_U16_LE | AFMT_U16_BE))
2083 format |= DMRn_USIGN; // Unsigned.
2084 } else
2085 format |= DMRn_SIZE8 | DMRn_USIGN; // 8-bit, unsigned
2086 if (s->prop_adc.channels < 2)
2087 format |= DMRn_MONO;
2088
2089 writel(format, s->pBA0 + BA0_DMR1);
2090
2091 CS_DBGOUT(CS_PARMS, 2, printk(KERN_INFO
2092 "cs4281: prog_codec(): adc %s %s %s rate=%d DMR0 format=0x%.8x\n",
2093 (format & DMRn_SIZE8) ? "8" : "16",
2094 (format & DMRn_USIGN) ? "Unsigned" : "Signed",
2095 (format & DMRn_MONO) ? "Mono" : "Stereo",
2096 s->prop_adc.rate, format));
2097
2098 s->ena &= ~FMODE_READ; // not capturing data yet
2099 }
2100
2101
2102 if (type == CS_TYPE_DAC) {
2103 temp1 = readl(s->pBA0 + BA0_DCR0);
2104 writel(temp1 | DCRn_MSK, s->pBA0 + BA0_DCR0); // Stop play DMA, if active.
2105
2106 // program sampling rates
2107 // Note, for CS4281, capture & play rates can be set independently.
2108 cs4281_play_rate(s, s->prop_dac.rate);
2109
2110 // program DAC parameters
2111 format = DMRn_DMA | DMRn_AUTO | DMRn_TR_READ;
2112 if (s->prop_dac.
2113 fmt & (AFMT_S16_LE | AFMT_U16_LE | AFMT_S16_BE | AFMT_U16_BE)) { // 16-bit
2114 if (s->prop_dac.fmt & (AFMT_S16_BE | AFMT_U16_BE))
2115 format |= DMRn_BEND; // Big Endian.
2116 if (s->prop_dac.fmt & (AFMT_U16_LE | AFMT_U16_BE))
2117 format |= DMRn_USIGN; // Unsigned.
2118 } else
2119 format |= DMRn_SIZE8 | DMRn_USIGN; // 8-bit, unsigned
2120
2121 if (s->prop_dac.channels < 2)
2122 format |= DMRn_MONO;
2123
2124 writel(format, s->pBA0 + BA0_DMR0);
2125
2126
2127 CS_DBGOUT(CS_PARMS, 2, printk(KERN_INFO
2128 "cs4281: prog_codec(): dac %s %s %s rate=%d DMR0 format=0x%.8x\n",
2129 (format & DMRn_SIZE8) ? "8" : "16",
2130 (format & DMRn_USIGN) ? "Unsigned" : "Signed",
2131 (format & DMRn_MONO) ? "Mono" : "Stereo",
2132 s->prop_dac.rate, format));
2133
2134 s->ena &= ~FMODE_WRITE; // not capturing data yet
2135
2136 }
2137 spin_unlock_irqrestore(&s->lock, flags);
2138 CS_DBGOUT(CS_FUNCTION, 2,
2139 printk(KERN_INFO "cs4281: prog_codec()- \n"));
2140}
2141
2142
2143static int mixer_ioctl(struct cs4281_state *s, unsigned int cmd,
2144 unsigned long arg)
2145{
2146 // Index to mixer_src[] is value of AC97 Input Mux Select Reg.
2147 // Value of array member is recording source Device ID Mask.
2148 static const unsigned int mixer_src[8] = {
2149 SOUND_MASK_MIC, SOUND_MASK_CD, 0, SOUND_MASK_LINE1,
2150 SOUND_MASK_LINE, SOUND_MASK_VOLUME, 0, 0
2151 };
2152 void __user *argp = (void __user *)arg;
2153
2154 // Index of mixtable1[] member is Device ID
2155 // and must be <= SOUND_MIXER_NRDEVICES.
2156 // Value of array member is index into s->mix.vol[]
2157 static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = {
2158 [SOUND_MIXER_PCM] = 1, // voice
2159 [SOUND_MIXER_LINE1] = 2, // AUX
2160 [SOUND_MIXER_CD] = 3, // CD
2161 [SOUND_MIXER_LINE] = 4, // Line
2162 [SOUND_MIXER_SYNTH] = 5, // FM
2163 [SOUND_MIXER_MIC] = 6, // Mic
2164 [SOUND_MIXER_SPEAKER] = 7, // Speaker
2165 [SOUND_MIXER_RECLEV] = 8, // Recording level
2166 [SOUND_MIXER_VOLUME] = 9 // Master Volume
2167 };
2168
2169
2170 static const unsigned mixreg[] = {
2171 BA0_AC97_PCM_OUT_VOLUME,
2172 BA0_AC97_AUX_VOLUME,
2173 BA0_AC97_CD_VOLUME,
2174 BA0_AC97_LINE_IN_VOLUME
2175 };
2176 unsigned char l, r, rl, rr, vidx;
2177 unsigned char attentbl[11] =
2178 { 63, 42, 26, 17, 14, 11, 8, 6, 4, 2, 0 };
2179 unsigned temp1;
2180 int i, val;
2181
2182 VALIDATE_STATE(s);
2183 CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
2184 "cs4281: mixer_ioctl(): s=%p cmd=0x%.8x\n", s, cmd));
2185#if CSDEBUG
2186 cs_printioctl(cmd);
2187#endif
2188#if CSDEBUG_INTERFACE
2189
2190 if ((cmd == SOUND_MIXER_CS_GETDBGMASK) ||
2191 (cmd == SOUND_MIXER_CS_SETDBGMASK) ||
2192 (cmd == SOUND_MIXER_CS_GETDBGLEVEL) ||
2193 (cmd == SOUND_MIXER_CS_SETDBGLEVEL) ||
2194 (cmd == SOUND_MIXER_CS_APM))
2195 {
2196 switch (cmd) {
2197
2198 case SOUND_MIXER_CS_GETDBGMASK:
2199 return put_user(cs_debugmask,
2200 (unsigned long __user *) argp);
2201
2202 case SOUND_MIXER_CS_GETDBGLEVEL:
2203 return put_user(cs_debuglevel,
2204 (unsigned long __user *) argp);
2205
2206 case SOUND_MIXER_CS_SETDBGMASK:
2207 if (get_user(val, (unsigned long __user *) argp))
2208 return -EFAULT;
2209 cs_debugmask = val;
2210 return 0;
2211
2212 case SOUND_MIXER_CS_SETDBGLEVEL:
2213 if (get_user(val, (unsigned long __user *) argp))
2214 return -EFAULT;
2215 cs_debuglevel = val;
2216 return 0;
2217#ifndef NOT_CS4281_PM
2218 case SOUND_MIXER_CS_APM:
2219 if (get_user(val, (unsigned long __user *) argp))
2220 return -EFAULT;
2221 if(val == CS_IOCTL_CMD_SUSPEND)
2222 cs4281_suspend(s);
2223 else if(val == CS_IOCTL_CMD_RESUME)
2224 cs4281_resume(s);
2225 else
2226 {
2227 CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
2228 "cs4281: mixer_ioctl(): invalid APM cmd (%d)\n",
2229 val));
2230 }
2231 return 0;
2232#endif
2233 default:
2234 CS_DBGOUT(CS_ERROR, 1, printk(KERN_INFO
2235 "cs4281: mixer_ioctl(): ERROR unknown debug cmd\n"));
2236 return 0;
2237 }
2238 }
2239#endif
2240
2241 if (cmd == SOUND_MIXER_PRIVATE1) {
2242 // enable/disable/query mixer preamp
2243 if (get_user(val, (int __user *) argp))
2244 return -EFAULT;
2245 if (val != -1) {
2246 cs4281_read_ac97(s, BA0_AC97_MIC_VOLUME, &temp1);
2247 temp1 = val ? (temp1 | 0x40) : (temp1 & 0xffbf);
2248 cs4281_write_ac97(s, BA0_AC97_MIC_VOLUME, temp1);
2249 }
2250 cs4281_read_ac97(s, BA0_AC97_MIC_VOLUME, &temp1);
2251 val = (temp1 & 0x40) ? 1 : 0;
2252 return put_user(val, (int __user *) argp);
2253 }
2254 if (cmd == SOUND_MIXER_PRIVATE2) {
2255 // enable/disable/query spatializer
2256 if (get_user(val, (int __user *)argp))
2257 return -EFAULT;
2258 if (val != -1) {
2259 temp1 = (val & 0x3f) >> 2;
2260 cs4281_write_ac97(s, BA0_AC97_3D_CONTROL, temp1);
2261 cs4281_read_ac97(s, BA0_AC97_GENERAL_PURPOSE,
2262 &temp1);
2263 cs4281_write_ac97(s, BA0_AC97_GENERAL_PURPOSE,
2264 temp1 | 0x2000);
2265 }
2266 cs4281_read_ac97(s, BA0_AC97_3D_CONTROL, &temp1);
2267 return put_user((temp1 << 2) | 3, (int __user *)argp);
2268 }
2269 if (cmd == SOUND_MIXER_INFO) {
2270 mixer_info info;
2271 strlcpy(info.id, "CS4281", sizeof(info.id));
2272 strlcpy(info.name, "Crystal CS4281", sizeof(info.name));
2273 info.modify_counter = s->mix.modcnt;
2274 if (copy_to_user(argp, &info, sizeof(info)))
2275 return -EFAULT;
2276 return 0;
2277 }
2278 if (cmd == SOUND_OLD_MIXER_INFO) {
2279 _old_mixer_info info;
2280 strlcpy(info.id, "CS4281", sizeof(info.id));
2281 strlcpy(info.name, "Crystal CS4281", sizeof(info.name));
2282 if (copy_to_user(argp, &info, sizeof(info)))
2283 return -EFAULT;
2284 return 0;
2285 }
2286 if (cmd == OSS_GETVERSION)
2287 return put_user(SOUND_VERSION, (int __user *) argp);
2288
2289 if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
2290 return -EINVAL;
2291
2292 // If ioctl has only the SIOC_READ bit(bit 31)
2293 // on, process the only-read commands.
2294 if (_SIOC_DIR(cmd) == _SIOC_READ) {
2295 switch (_IOC_NR(cmd)) {
2296 case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source
2297 cs4281_read_ac97(s, BA0_AC97_RECORD_SELECT, &temp1);
2298 return put_user(mixer_src[temp1&7], (int __user *)argp);
2299
2300 case SOUND_MIXER_DEVMASK: // Arg contains a bit for each supported device
2301 return put_user(SOUND_MASK_PCM | SOUND_MASK_SYNTH |
2302 SOUND_MASK_CD | SOUND_MASK_LINE |
2303 SOUND_MASK_LINE1 | SOUND_MASK_MIC |
2304 SOUND_MASK_VOLUME |
2305 SOUND_MASK_RECLEV |
2306 SOUND_MASK_SPEAKER, (int __user *)argp);
2307
2308 case SOUND_MIXER_RECMASK: // Arg contains a bit for each supported recording source
2309 return put_user(SOUND_MASK_LINE | SOUND_MASK_MIC |
2310 SOUND_MASK_CD | SOUND_MASK_VOLUME |
2311 SOUND_MASK_LINE1, (int __user *) argp);
2312
2313 case SOUND_MIXER_STEREODEVS: // Mixer channels supporting stereo
2314 return put_user(SOUND_MASK_PCM | SOUND_MASK_SYNTH |
2315 SOUND_MASK_CD | SOUND_MASK_LINE |
2316 SOUND_MASK_LINE1 | SOUND_MASK_MIC |
2317 SOUND_MASK_VOLUME |
2318 SOUND_MASK_RECLEV, (int __user *)argp);
2319
2320 case SOUND_MIXER_CAPS:
2321 return put_user(SOUND_CAP_EXCL_INPUT, (int __user *)argp);
2322
2323 default:
2324 i = _IOC_NR(cmd);
2325 if (i >= SOUND_MIXER_NRDEVICES
2326 || !(vidx = mixtable1[i]))
2327 return -EINVAL;
2328 return put_user(s->mix.vol[vidx - 1], (int __user *)argp);
2329 }
2330 }
2331 // If ioctl doesn't have both the SIOC_READ and
2332 // the SIOC_WRITE bit set, return invalid.
2333 if (_SIOC_DIR(cmd) != (_SIOC_READ | _SIOC_WRITE))
2334 return -EINVAL;
2335
2336 // Increment the count of volume writes.
2337 s->mix.modcnt++;
2338
2339 // Isolate the command; it must be a write.
2340 switch (_IOC_NR(cmd)) {
2341
2342 case SOUND_MIXER_RECSRC: // Arg contains a bit for each recording source
2343 if (get_user(val, (int __user *)argp))
2344 return -EFAULT;
2345 i = hweight32(val); // i = # bits on in val.
2346 if (i != 1) // One & only 1 bit must be on.
2347 return 0;
2348 for (i = 0; i < sizeof(mixer_src) / sizeof(int); i++) {
2349 if (val == mixer_src[i]) {
2350 temp1 = (i << 8) | i;
2351 cs4281_write_ac97(s,
2352 BA0_AC97_RECORD_SELECT,
2353 temp1);
2354 return 0;
2355 }
2356 }
2357 return 0;
2358
2359 case SOUND_MIXER_VOLUME:
2360 if (get_user(val, (int __user *)argp))
2361 return -EFAULT;
2362 l = val & 0xff;
2363 if (l > 100)
2364 l = 100; // Max soundcard.h vol is 100.
2365 if (l < 6) {
2366 rl = 63;
2367 l = 0;
2368 } else
2369 rl = attentbl[(10 * l) / 100]; // Convert 0-100 vol to 63-0 atten.
2370
2371 r = (val >> 8) & 0xff;
2372 if (r > 100)
2373 r = 100; // Max right volume is 100, too
2374 if (r < 6) {
2375 rr = 63;
2376 r = 0;
2377 } else
2378 rr = attentbl[(10 * r) / 100]; // Convert volume to attenuation.
2379
2380 if ((rl > 60) && (rr > 60)) // If both l & r are 'low',
2381 temp1 = 0x8000; // turn on the mute bit.
2382 else
2383 temp1 = 0;
2384
2385 temp1 |= (rl << 8) | rr;
2386
2387 cs4281_write_ac97(s, BA0_AC97_MASTER_VOLUME, temp1);
2388 cs4281_write_ac97(s, BA0_AC97_HEADPHONE_VOLUME, temp1);
2389
2390#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2391 s->mix.vol[8] = ((unsigned int) r << 8) | l;
2392#else
2393 s->mix.vol[8] = val;
2394#endif
2395 return put_user(s->mix.vol[8], (int __user *)argp);
2396
2397 case SOUND_MIXER_SPEAKER:
2398 if (get_user(val, (int __user *)argp))
2399 return -EFAULT;
2400 l = val & 0xff;
2401 if (l > 100)
2402 l = 100;
2403 if (l < 3) {
2404 rl = 0;
2405 l = 0;
2406 } else {
2407 rl = (l * 2 - 5) / 13; // Convert 0-100 range to 0-15.
2408 l = (rl * 13 + 5) / 2;
2409 }
2410
2411 if (rl < 3) {
2412 temp1 = 0x8000;
2413 rl = 0;
2414 } else
2415 temp1 = 0;
2416 rl = 15 - rl; // Convert volume to attenuation.
2417 temp1 |= rl << 1;
2418 cs4281_write_ac97(s, BA0_AC97_PC_BEEP_VOLUME, temp1);
2419
2420#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2421 s->mix.vol[6] = l << 8;
2422#else
2423 s->mix.vol[6] = val;
2424#endif
2425 return put_user(s->mix.vol[6], (int __user *)argp);
2426
2427 case SOUND_MIXER_RECLEV:
2428 if (get_user(val, (int __user *)argp))
2429 return -EFAULT;
2430 l = val & 0xff;
2431 if (l > 100)
2432 l = 100;
2433 r = (val >> 8) & 0xff;
2434 if (r > 100)
2435 r = 100;
2436 rl = (l * 2 - 5) / 13; // Convert 0-100 scale to 0-15.
2437 rr = (r * 2 - 5) / 13;
2438 if (rl < 3 && rr < 3)
2439 temp1 = 0x8000;
2440 else
2441 temp1 = 0;
2442
2443 temp1 = temp1 | (rl << 8) | rr;
2444 cs4281_write_ac97(s, BA0_AC97_RECORD_GAIN, temp1);
2445
2446#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2447 s->mix.vol[7] = ((unsigned int) r << 8) | l;
2448#else
2449 s->mix.vol[7] = val;
2450#endif
2451 return put_user(s->mix.vol[7], (int __user *)argp);
2452
2453 case SOUND_MIXER_MIC:
2454 if (get_user(val, (int __user *)argp))
2455 return -EFAULT;
2456 l = val & 0xff;
2457 if (l > 100)
2458 l = 100;
2459 if (l < 1) {
2460 l = 0;
2461 rl = 0;
2462 } else {
2463 rl = ((unsigned) l * 5 - 4) / 16; // Convert 0-100 range to 0-31.
2464 l = (rl * 16 + 4) / 5;
2465 }
2466 cs4281_read_ac97(s, BA0_AC97_MIC_VOLUME, &temp1);
2467 temp1 &= 0x40; // Isolate 20db gain bit.
2468 if (rl < 3) {
2469 temp1 |= 0x8000;
2470 rl = 0;
2471 }
2472 rl = 31 - rl; // Convert volume to attenuation.
2473 temp1 |= rl;
2474 cs4281_write_ac97(s, BA0_AC97_MIC_VOLUME, temp1);
2475
2476#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2477 s->mix.vol[5] = val << 8;
2478#else
2479 s->mix.vol[5] = val;
2480#endif
2481 return put_user(s->mix.vol[5], (int __user *)argp);
2482
2483
2484 case SOUND_MIXER_SYNTH:
2485 if (get_user(val, (int __user *)argp))
2486 return -EFAULT;
2487 l = val & 0xff;
2488 if (l > 100)
2489 l = 100;
2490 if (get_user(val, (int __user *)argp))
2491 return -EFAULT;
2492 r = (val >> 8) & 0xff;
2493 if (r > 100)
2494 r = 100;
2495 rl = (l * 2 - 11) / 3; // Convert 0-100 range to 0-63.
2496 rr = (r * 2 - 11) / 3;
2497 if (rl < 3) // If l is low, turn on
2498 temp1 = 0x0080; // the mute bit.
2499 else
2500 temp1 = 0;
2501
2502 rl = 63 - rl; // Convert vol to attenuation.
2503 writel(temp1 | rl, s->pBA0 + BA0_FMLVC);
2504 if (rr < 3) // If rr is low, turn on
2505 temp1 = 0x0080; // the mute bit.
2506 else
2507 temp1 = 0;
2508 rr = 63 - rr; // Convert vol to attenuation.
2509 writel(temp1 | rr, s->pBA0 + BA0_FMRVC);
2510
2511#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2512 s->mix.vol[4] = (r << 8) | l;
2513#else
2514 s->mix.vol[4] = val;
2515#endif
2516 return put_user(s->mix.vol[4], (int __user *)argp);
2517
2518
2519 default:
2520 CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
2521 "cs4281: mixer_ioctl(): default\n"));
2522
2523 i = _IOC_NR(cmd);
2524 if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
2525 return -EINVAL;
2526 if (get_user(val, (int __user *)argp))
2527 return -EFAULT;
2528 l = val & 0xff;
2529 if (l > 100)
2530 l = 100;
2531 if (l < 1) {
2532 l = 0;
2533 rl = 31;
2534 } else
2535 rl = (attentbl[(l * 10) / 100]) >> 1;
2536
2537 r = (val >> 8) & 0xff;
2538 if (r > 100)
2539 r = 100;
2540 if (r < 1) {
2541 r = 0;
2542 rr = 31;
2543 } else
2544 rr = (attentbl[(r * 10) / 100]) >> 1;
2545 if ((rl > 30) && (rr > 30))
2546 temp1 = 0x8000;
2547 else
2548 temp1 = 0;
2549 temp1 = temp1 | (rl << 8) | rr;
2550 cs4281_write_ac97(s, mixreg[vidx - 1], temp1);
2551
2552#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
2553 s->mix.vol[vidx - 1] = ((unsigned int) r << 8) | l;
2554#else
2555 s->mix.vol[vidx - 1] = val;
2556#endif
2557#ifndef NOT_CS4281_PM
2558 CS_DBGOUT(CS_PM, 9, printk(KERN_INFO
2559 "write ac97 mixreg[%d]=0x%x mix.vol[]=0x%x\n",
2560 vidx-1,temp1,s->mix.vol[vidx-1]));
2561#endif
2562 return put_user(s->mix.vol[vidx - 1], (int __user *)argp);
2563 }
2564}
2565
2566
2567// ---------------------------------------------------------------------
2568
2569static int cs4281_open_mixdev(struct inode *inode, struct file *file)
2570{
2571 unsigned int minor = iminor(inode);
2572 struct cs4281_state *s=NULL;
2573 struct list_head *entry;
2574
2575 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
2576 printk(KERN_INFO "cs4281: cs4281_open_mixdev()+\n"));
2577
2578 list_for_each(entry, &cs4281_devs)
2579 {
2580 s = list_entry(entry, struct cs4281_state, list);
2581 if(s->dev_mixer == minor)
2582 break;
2583 }
2584 if (!s)
2585 {
2586 CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2,
2587 printk(KERN_INFO "cs4281: cs4281_open_mixdev()- -ENODEV\n"));
2588 return -ENODEV;
2589 }
2590 VALIDATE_STATE(s);
2591 file->private_data = s;
2592
2593 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 4,
2594 printk(KERN_INFO "cs4281: cs4281_open_mixdev()- 0\n"));
2595
2596 return nonseekable_open(inode, file);
2597}
2598
2599
2600static int cs4281_release_mixdev(struct inode *inode, struct file *file)
2601{
2602 struct cs4281_state *s =
2603 (struct cs4281_state *) file->private_data;
2604
2605 VALIDATE_STATE(s);
2606 return 0;
2607}
2608
2609
2610static int cs4281_ioctl_mixdev(struct inode *inode, struct file *file,
2611 unsigned int cmd, unsigned long arg)
2612{
2613 return mixer_ioctl((struct cs4281_state *) file->private_data, cmd,
2614 arg);
2615}
2616
2617
2618// ******************************************************************************************
2619// Mixer file operations struct.
2620// ******************************************************************************************
2621static /*const */ struct file_operations cs4281_mixer_fops = {
2622 .owner = THIS_MODULE,
2623 .llseek = no_llseek,
2624 .ioctl = cs4281_ioctl_mixdev,
2625 .open = cs4281_open_mixdev,
2626 .release = cs4281_release_mixdev,
2627};
2628
2629// ---------------------------------------------------------------------
2630
2631
2632static int drain_adc(struct cs4281_state *s, int nonblock)
2633{
2634 DECLARE_WAITQUEUE(wait, current);
2635 unsigned long flags;
2636 int count;
2637 unsigned tmo;
2638
2639 if (s->dma_adc.mapped)
2640 return 0;
2641 add_wait_queue(&s->dma_adc.wait, &wait);
2642 for (;;) {
2643 set_current_state(TASK_INTERRUPTIBLE);
2644 spin_lock_irqsave(&s->lock, flags);
2645 count = s->dma_adc.count;
2646 CS_DBGOUT(CS_FUNCTION, 2,
2647 printk(KERN_INFO "cs4281: drain_adc() %d\n", count));
2648 spin_unlock_irqrestore(&s->lock, flags);
2649 if (count <= 0) {
2650 CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO
2651 "cs4281: drain_adc() count<0\n"));
2652 break;
2653 }
2654 if (signal_pending(current))
2655 break;
2656 if (nonblock) {
2657 remove_wait_queue(&s->dma_adc.wait, &wait);
2658 current->state = TASK_RUNNING;
2659 return -EBUSY;
2660 }
2661 tmo =
2662 3 * HZ * (count +
2663 s->dma_adc.fragsize) / 2 / s->prop_adc.rate;
2664 if (s->prop_adc.fmt & (AFMT_S16_LE | AFMT_U16_LE))
2665 tmo >>= 1;
2666 if (s->prop_adc.channels > 1)
2667 tmo >>= 1;
2668 if (!schedule_timeout(tmo + 1))
2669 printk(KERN_DEBUG "cs4281: dma timed out??\n");
2670 }
2671 remove_wait_queue(&s->dma_adc.wait, &wait);
2672 current->state = TASK_RUNNING;
2673 if (signal_pending(current))
2674 return -ERESTARTSYS;
2675 return 0;
2676}
2677
2678static int drain_dac(struct cs4281_state *s, int nonblock)
2679{
2680 DECLARE_WAITQUEUE(wait, current);
2681 unsigned long flags;
2682 int count;
2683 unsigned tmo;
2684
2685 if (s->dma_dac.mapped)
2686 return 0;
2687 add_wait_queue(&s->dma_dac.wait, &wait);
2688 for (;;) {
2689 set_current_state(TASK_INTERRUPTIBLE);
2690 spin_lock_irqsave(&s->lock, flags);
2691 count = s->dma_dac.count;
2692 spin_unlock_irqrestore(&s->lock, flags);
2693 if (count <= 0)
2694 break;
2695 if (signal_pending(current))
2696 break;
2697 if (nonblock) {
2698 remove_wait_queue(&s->dma_dac.wait, &wait);
2699 current->state = TASK_RUNNING;
2700 return -EBUSY;
2701 }
2702 tmo =
2703 3 * HZ * (count +
2704 s->dma_dac.fragsize) / 2 / s->prop_dac.rate;
2705 if (s->prop_dac.fmt & (AFMT_S16_LE | AFMT_U16_LE))
2706 tmo >>= 1;
2707 if (s->prop_dac.channels > 1)
2708 tmo >>= 1;
2709 if (!schedule_timeout(tmo + 1))
2710 printk(KERN_DEBUG "cs4281: dma timed out??\n");
2711 }
2712 remove_wait_queue(&s->dma_dac.wait, &wait);
2713 current->state = TASK_RUNNING;
2714 if (signal_pending(current))
2715 return -ERESTARTSYS;
2716 return 0;
2717}
2718
2719//****************************************************************************
2720//
2721// CopySamples copies 16-bit stereo samples from the source to the
2722// destination, possibly converting down to either 8-bit or mono or both.
2723// count specifies the number of output bytes to write.
2724//
2725// Arguments:
2726//
2727// dst - Pointer to a destination buffer.
2728// src - Pointer to a source buffer
2729// count - The number of bytes to copy into the destination buffer.
2730// iChannels - Stereo - 2
2731// Mono - 1
2732// fmt - AFMT_xxx (soundcard.h formats)
2733//
2734// NOTES: only call this routine for conversion to 8bit from 16bit
2735//
2736//****************************************************************************
2737static void CopySamples(char *dst, char *src, int count, int iChannels,
2738 unsigned fmt)
2739{
2740
2741 unsigned short *psSrc;
2742 long lAudioSample;
2743
2744 CS_DBGOUT(CS_FUNCTION, 2,
2745 printk(KERN_INFO "cs4281: CopySamples()+ "));
2746 CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
2747 " dst=%p src=%p count=%d iChannels=%d fmt=0x%x\n",
2748 dst, src, (unsigned) count, (unsigned) iChannels, (unsigned) fmt));
2749
2750 // Gershwin does format conversion in hardware so normally
2751 // we don't do any host based coversion. The data formatter
2752 // truncates 16 bit data to 8 bit and that causes some hiss.
2753 // We have already forced the HW to do 16 bit sampling and
2754 // 2 channel so that we can use software to round instead
2755 // of truncate
2756
2757 //
2758 // See if the data should be output as 8-bit unsigned stereo.
2759 // or if the data should be output at 8-bit unsigned mono.
2760 //
2761 if ( ((iChannels == 2) && (fmt & AFMT_U8)) ||
2762 ((iChannels == 1) && (fmt & AFMT_U8)) ) {
2763 //
2764 // Convert each 16-bit unsigned stereo sample to 8-bit unsigned
2765 // stereo using rounding.
2766 //
2767 psSrc = (unsigned short *) src;
2768 count = count / 2;
2769 while (count--) {
2770 lAudioSample = (long) psSrc[count] + (long) 0x80;
2771 if (lAudioSample > 0xffff) {
2772 lAudioSample = 0xffff;
2773 }
2774 dst[count] = (char) (lAudioSample >> 8);
2775 }
2776 }
2777 //
2778 // check for 8-bit signed stereo.
2779 //
2780 else if ((iChannels == 2) && (fmt & AFMT_S8)) {
2781 //
2782 // Convert each 16-bit stereo sample to 8-bit stereo using rounding.
2783 //
2784 psSrc = (short *) src;
2785 while (count--) {
2786 lAudioSample =
2787 (((long) psSrc[0] + (long) psSrc[1]) / 2);
2788 psSrc += 2;
2789 *dst++ = (char) ((short) lAudioSample >> 8);
2790 }
2791 }
2792 //
2793 // Otherwise, the data should be output as 8-bit signed mono.
2794 //
2795 else if ((iChannels == 1) && (fmt & AFMT_S8)) {
2796 //
2797 // Convert each 16-bit signed mono sample to 8-bit signed mono
2798 // using rounding.
2799 //
2800 psSrc = (short *) src;
2801 count = count / 2;
2802 while (count--) {
2803 lAudioSample =
2804 (((long) psSrc[0] + (long) psSrc[1]) / 2);
2805 if (lAudioSample > 0x7fff) {
2806 lAudioSample = 0x7fff;
2807 }
2808 psSrc += 2;
2809 *dst++ = (char) ((short) lAudioSample >> 8);
2810 }
2811 }
2812}
2813
2814//
2815// cs_copy_to_user()
2816// replacement for the standard copy_to_user, to allow for a conversion from
2817// 16 bit to 8 bit if the record conversion is active. the cs4281 has some
2818// issues with 8 bit capture, so the driver always captures data in 16 bit
2819// and then if the user requested 8 bit, converts from 16 to 8 bit.
2820//
2821static unsigned cs_copy_to_user(struct cs4281_state *s, void __user *dest,
2822 unsigned *hwsrc, unsigned cnt,
2823 unsigned *copied)
2824{
2825 void *src = hwsrc; //default to the standard destination buffer addr
2826
2827 CS_DBGOUT(CS_FUNCTION, 6, printk(KERN_INFO
2828 "cs_copy_to_user()+ fmt=0x%x fmt_o=0x%x cnt=%d dest=%p\n",
2829 s->prop_adc.fmt, s->prop_adc.fmt_original,
2830 (unsigned) cnt, dest));
2831
2832 if (cnt > s->dma_adc.dmasize) {
2833 cnt = s->dma_adc.dmasize;
2834 }
2835 if (!cnt) {
2836 *copied = 0;
2837 return 0;
2838 }
2839 if (s->conversion) {
2840 if (!s->tmpbuff) {
2841 *copied = cnt / 2;
2842 return 0;
2843 }
2844 CopySamples(s->tmpbuff, (void *) hwsrc, cnt,
2845 (unsigned) s->prop_adc.channels,
2846 s->prop_adc.fmt_original);
2847 src = s->tmpbuff;
2848 cnt = cnt / 2;
2849 }
2850
2851 if (copy_to_user(dest, src, cnt)) {
2852 *copied = 0;
2853 return -EFAULT;
2854 }
2855 *copied = cnt;
2856 CS_DBGOUT(CS_FUNCTION, 2, printk(KERN_INFO
2857 "cs4281: cs_copy_to_user()- copied bytes is %d \n", cnt));
2858 return 0;
2859}
2860
2861// ---------------------------------------------------------------------
2862
2863static ssize_t cs4281_read(struct file *file, char __user *buffer, size_t count,
2864 loff_t * ppos)
2865{
2866 struct cs4281_state *s =
2867 (struct cs4281_state *) file->private_data;
2868 ssize_t ret;
2869 unsigned long flags;
2870 unsigned swptr;
2871 int cnt;
2872 unsigned copied = 0;
2873
2874 CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
2875 printk(KERN_INFO "cs4281: cs4281_read()+ %Zu \n", count));
2876
2877 VALIDATE_STATE(s);
2878 if (s->dma_adc.mapped)
2879 return -ENXIO;
2880 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
2881 return ret;
2882 if (!access_ok(VERIFY_WRITE, buffer, count))
2883 return -EFAULT;
2884 ret = 0;
2885//
2886// "count" is the amount of bytes to read (from app), is decremented each loop
2887// by the amount of bytes that have been returned to the user buffer.
2888// "cnt" is the running total of each read from the buffer (changes each loop)
2889// "buffer" points to the app's buffer
2890// "ret" keeps a running total of the amount of bytes that have been copied
2891// to the user buffer.
2892// "copied" is the total bytes copied into the user buffer for each loop.
2893//
2894 while (count > 0) {
2895 CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
2896 "_read() count>0 count=%Zu .count=%d .swptr=%d .hwptr=%d \n",
2897 count, s->dma_adc.count,
2898 s->dma_adc.swptr, s->dma_adc.hwptr));
2899 spin_lock_irqsave(&s->lock, flags);
2900
2901 // get the current copy point of the sw buffer
2902 swptr = s->dma_adc.swptr;
2903
2904 // cnt is the amount of unread bytes from the end of the
2905 // hw buffer to the current sw pointer
2906 cnt = s->dma_adc.dmasize - swptr;
2907
2908 // dma_adc.count is the current total bytes that have not been read.
2909 // if the amount of unread bytes from the current sw pointer to the
2910 // end of the buffer is greater than the current total bytes that
2911 // have not been read, then set the "cnt" (unread bytes) to the
2912 // amount of unread bytes.
2913
2914 if (s->dma_adc.count < cnt)
2915 cnt = s->dma_adc.count;
2916 spin_unlock_irqrestore(&s->lock, flags);
2917 //
2918 // if we are converting from 8/16 then we need to copy
2919 // twice the number of 16 bit bytes then 8 bit bytes.
2920 //
2921 if (s->conversion) {
2922 if (cnt > (count * 2))
2923 cnt = (count * 2);
2924 } else {
2925 if (cnt > count)
2926 cnt = count;
2927 }
2928 //
2929 // "cnt" NOW is the smaller of the amount that will be read,
2930 // and the amount that is requested in this read (or partial).
2931 // if there are no bytes in the buffer to read, then start the
2932 // ADC and wait for the interrupt handler to wake us up.
2933 //
2934 if (cnt <= 0) {
2935
2936 // start up the dma engine and then continue back to the top of
2937 // the loop when wake up occurs.
2938 start_adc(s);
2939 if (file->f_flags & O_NONBLOCK)
2940 return ret ? ret : -EAGAIN;
2941 interruptible_sleep_on(&s->dma_adc.wait);
2942 if (signal_pending(current))
2943 return ret ? ret : -ERESTARTSYS;
2944 continue;
2945 }
2946 // there are bytes in the buffer to read.
2947 // copy from the hw buffer over to the user buffer.
2948 // user buffer is designated by "buffer"
2949 // virtual address to copy from is rawbuf+swptr
2950 // the "cnt" is the number of bytes to read.
2951
2952 CS_DBGOUT(CS_WAVE_READ, 2, printk(KERN_INFO
2953 "_read() copy_to cnt=%d count=%Zu ", cnt, count));
2954 CS_DBGOUT(CS_WAVE_READ, 8, printk(KERN_INFO
2955 " .dmasize=%d .count=%d buffer=%p ret=%Zd\n",
2956 s->dma_adc.dmasize, s->dma_adc.count, buffer, ret));
2957
2958 if (cs_copy_to_user
2959 (s, buffer, s->dma_adc.rawbuf + swptr, cnt, &copied))
2960 return ret ? ret : -EFAULT;
2961 swptr = (swptr + cnt) % s->dma_adc.dmasize;
2962 spin_lock_irqsave(&s->lock, flags);
2963 s->dma_adc.swptr = swptr;
2964 s->dma_adc.count -= cnt;
2965 spin_unlock_irqrestore(&s->lock, flags);
2966 count -= copied;
2967 buffer += copied;
2968 ret += copied;
2969 start_adc(s);
2970 }
2971 CS_DBGOUT(CS_FUNCTION | CS_WAVE_READ, 2,
2972 printk(KERN_INFO "cs4281: cs4281_read()- %Zd\n", ret));
2973 return ret;
2974}
2975
2976
2977static ssize_t cs4281_write(struct file *file, const char __user *buffer,
2978 size_t count, loff_t * ppos)
2979{
2980 struct cs4281_state *s =
2981 (struct cs4281_state *) file->private_data;
2982 ssize_t ret;
2983 unsigned long flags;
2984 unsigned swptr, hwptr, busaddr;
2985 int cnt;
2986
2987 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
2988 printk(KERN_INFO "cs4281: cs4281_write()+ count=%Zu\n",
2989 count));
2990 VALIDATE_STATE(s);
2991
2992 if (s->dma_dac.mapped)
2993 return -ENXIO;
2994 if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
2995 return ret;
2996 if (!access_ok(VERIFY_READ, buffer, count))
2997 return -EFAULT;
2998 ret = 0;
2999 while (count > 0) {
3000 spin_lock_irqsave(&s->lock, flags);
3001 if (s->dma_dac.count < 0) {
3002 s->dma_dac.count = 0;
3003 s->dma_dac.swptr = s->dma_dac.hwptr;
3004 }
3005 if (s->dma_dac.underrun) {
3006 s->dma_dac.underrun = 0;
3007 hwptr = readl(s->pBA0 + BA0_DCA0);
3008 busaddr = virt_to_bus(s->dma_dac.rawbuf);
3009 hwptr -= (unsigned) busaddr;
3010 s->dma_dac.swptr = s->dma_dac.hwptr = hwptr;
3011 }
3012 swptr = s->dma_dac.swptr;
3013 cnt = s->dma_dac.dmasize - swptr;
3014 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
3015 cnt = s->dma_dac.dmasize - s->dma_dac.count;
3016 spin_unlock_irqrestore(&s->lock, flags);
3017 if (cnt > count)
3018 cnt = count;
3019 if (cnt <= 0) {
3020 start_dac(s);
3021 if (file->f_flags & O_NONBLOCK)
3022 return ret ? ret : -EAGAIN;
3023 interruptible_sleep_on(&s->dma_dac.wait);
3024 if (signal_pending(current))
3025 return ret ? ret : -ERESTARTSYS;
3026 continue;
3027 }
3028 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt))
3029 return ret ? ret : -EFAULT;
3030 swptr = (swptr + cnt) % s->dma_dac.dmasize;
3031 spin_lock_irqsave(&s->lock, flags);
3032 s->dma_dac.swptr = swptr;
3033 s->dma_dac.count += cnt;
3034 s->dma_dac.endcleared = 0;
3035 spin_unlock_irqrestore(&s->lock, flags);
3036 count -= cnt;
3037 buffer += cnt;
3038 ret += cnt;
3039 start_dac(s);
3040 }
3041 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE, 2,
3042 printk(KERN_INFO "cs4281: cs4281_write()- %Zd\n", ret));
3043 return ret;
3044}
3045
3046
3047static unsigned int cs4281_poll(struct file *file,
3048 struct poll_table_struct *wait)
3049{
3050 struct cs4281_state *s =
3051 (struct cs4281_state *) file->private_data;
3052 unsigned long flags;
3053 unsigned int mask = 0;
3054
3055 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
3056 printk(KERN_INFO "cs4281: cs4281_poll()+\n"));
3057 VALIDATE_STATE(s);
3058 if (file->f_mode & FMODE_WRITE) {
3059 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
3060 printk(KERN_INFO
3061 "cs4281: cs4281_poll() wait on FMODE_WRITE\n"));
3062 if(!s->dma_dac.ready && prog_dmabuf_dac(s))
3063 return 0;
3064 poll_wait(file, &s->dma_dac.wait, wait);
3065 }
3066 if (file->f_mode & FMODE_READ) {
3067 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
3068 printk(KERN_INFO
3069 "cs4281: cs4281_poll() wait on FMODE_READ\n"));
3070 if(!s->dma_dac.ready && prog_dmabuf_adc(s))
3071 return 0;
3072 poll_wait(file, &s->dma_adc.wait, wait);
3073 }
3074 spin_lock_irqsave(&s->lock, flags);
3075 cs4281_update_ptr(s,CS_FALSE);
3076 if (file->f_mode & FMODE_WRITE) {
3077 if (s->dma_dac.mapped) {
3078 if (s->dma_dac.count >=
3079 (signed) s->dma_dac.fragsize) {
3080 if (s->dma_dac.wakeup)
3081 mask |= POLLOUT | POLLWRNORM;
3082 else
3083 mask = 0;
3084 s->dma_dac.wakeup = 0;
3085 }
3086 } else {
3087 if ((signed) (s->dma_dac.dmasize/2) >= s->dma_dac.count)
3088 mask |= POLLOUT | POLLWRNORM;
3089 }
3090 } else if (file->f_mode & FMODE_READ) {
3091 if (s->dma_adc.mapped) {
3092 if (s->dma_adc.count >= (signed) s->dma_adc.fragsize)
3093 mask |= POLLIN | POLLRDNORM;
3094 } else {
3095 if (s->dma_adc.count > 0)
3096 mask |= POLLIN | POLLRDNORM;
3097 }
3098 }
3099 spin_unlock_irqrestore(&s->lock, flags);
3100 CS_DBGOUT(CS_FUNCTION | CS_WAVE_WRITE | CS_WAVE_READ, 4,
3101 printk(KERN_INFO "cs4281: cs4281_poll()- 0x%.8x\n",
3102 mask));
3103 return mask;
3104}
3105
3106
3107static int cs4281_mmap(struct file *file, struct vm_area_struct *vma)
3108{
3109 struct cs4281_state *s =
3110 (struct cs4281_state *) file->private_data;
3111 struct dmabuf *db;
3112 int ret;
3113 unsigned long size;
3114
3115 CS_DBGOUT(CS_FUNCTION | CS_PARMS | CS_OPEN, 4,
3116 printk(KERN_INFO "cs4281: cs4281_mmap()+\n"));
3117
3118 VALIDATE_STATE(s);
3119 if (vma->vm_flags & VM_WRITE) {
3120 if ((ret = prog_dmabuf_dac(s)) != 0)
3121 return ret;
3122 db = &s->dma_dac;
3123 } else if (vma->vm_flags & VM_READ) {
3124 if ((ret = prog_dmabuf_adc(s)) != 0)
3125 return ret;
3126 db = &s->dma_adc;
3127 } else
3128 return -EINVAL;
3129//
3130// only support PLAYBACK for now
3131//
3132 db = &s->dma_dac;
3133
3134 if (cs4x_pgoff(vma) != 0)
3135 return -EINVAL;
3136 size = vma->vm_end - vma->vm_start;
3137 if (size > (PAGE_SIZE << db->buforder))
3138 return -EINVAL;
3139 if (remap_pfn_range(vma, vma->vm_start,
3140 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
3141 size, vma->vm_page_prot))
3142 return -EAGAIN;
3143 db->mapped = 1;
3144
3145 CS_DBGOUT(CS_FUNCTION | CS_PARMS | CS_OPEN, 4,
3146 printk(KERN_INFO "cs4281: cs4281_mmap()- 0 size=%d\n",
3147 (unsigned) size));
3148
3149 return 0;
3150}
3151
3152
3153static int cs4281_ioctl(struct inode *inode, struct file *file,
3154 unsigned int cmd, unsigned long arg)
3155{
3156 struct cs4281_state *s =
3157 (struct cs4281_state *) file->private_data;
3158 unsigned long flags;
3159 audio_buf_info abinfo;
3160 count_info cinfo;
3161 int val, mapped, ret;
3162 int __user *p = (int __user *)arg;
3163
3164 CS_DBGOUT(CS_FUNCTION, 4, printk(KERN_INFO
3165 "cs4281: cs4281_ioctl(): file=%p cmd=0x%.8x\n", file, cmd));
3166#if CSDEBUG
3167 cs_printioctl(cmd);
3168#endif
3169 VALIDATE_STATE(s);
3170 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
3171 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
3172 switch (cmd) {
3173 case OSS_GETVERSION:
3174 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3175 "cs4281: cs4281_ioctl(): SOUND_VERSION=0x%.8x\n",
3176 SOUND_VERSION));
3177 return put_user(SOUND_VERSION, p);
3178
3179 case SNDCTL_DSP_SYNC:
3180 CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
3181 "cs4281: cs4281_ioctl(): DSP_SYNC\n"));
3182 if (file->f_mode & FMODE_WRITE)
3183 return drain_dac(s,
3184 0 /*file->f_flags & O_NONBLOCK */
3185 );
3186 return 0;
3187
3188 case SNDCTL_DSP_SETDUPLEX:
3189 return 0;
3190
3191 case SNDCTL_DSP_GETCAPS:
3192 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME |
3193 DSP_CAP_TRIGGER | DSP_CAP_MMAP,
3194 p);
3195
3196 case SNDCTL_DSP_RESET:
3197 CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
3198 "cs4281: cs4281_ioctl(): DSP_RESET\n"));
3199 if (file->f_mode & FMODE_WRITE) {
3200 stop_dac(s);
3201 synchronize_irq(s->irq);
3202 s->dma_dac.swptr = s->dma_dac.hwptr =
3203 s->dma_dac.count = s->dma_dac.total_bytes =
3204 s->dma_dac.blocks = s->dma_dac.wakeup = 0;
3205 prog_codec(s, CS_TYPE_DAC);
3206 }
3207 if (file->f_mode & FMODE_READ) {
3208 stop_adc(s);
3209 synchronize_irq(s->irq);
3210 s->dma_adc.swptr = s->dma_adc.hwptr =
3211 s->dma_adc.count = s->dma_adc.total_bytes =
3212 s->dma_adc.blocks = s->dma_dac.wakeup = 0;
3213 prog_codec(s, CS_TYPE_ADC);
3214 }
3215 return 0;
3216
3217 case SNDCTL_DSP_SPEED:
3218 if (get_user(val, p))
3219 return -EFAULT;
3220 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3221 "cs4281: cs4281_ioctl(): DSP_SPEED val=%d\n", val));
3222 //
3223 // support independent capture and playback channels
3224 // assume that the file mode bit determines the
3225 // direction of the data flow.
3226 //
3227 if (file->f_mode & FMODE_READ) {
3228 if (val >= 0) {
3229 stop_adc(s);
3230 s->dma_adc.ready = 0;
3231 // program sampling rates
3232 if (val > 48000)
3233 val = 48000;
3234 if (val < 6300)
3235 val = 6300;
3236 s->prop_adc.rate = val;
3237 prog_codec(s, CS_TYPE_ADC);
3238 }
3239 }
3240 if (file->f_mode & FMODE_WRITE) {
3241 if (val >= 0) {
3242 stop_dac(s);
3243 s->dma_dac.ready = 0;
3244 // program sampling rates
3245 if (val > 48000)
3246 val = 48000;
3247 if (val < 6300)
3248 val = 6300;
3249 s->prop_dac.rate = val;
3250 prog_codec(s, CS_TYPE_DAC);
3251 }
3252 }
3253
3254 if (file->f_mode & FMODE_WRITE)
3255 val = s->prop_dac.rate;
3256 else if (file->f_mode & FMODE_READ)
3257 val = s->prop_adc.rate;
3258
3259 return put_user(val, p);
3260
3261 case SNDCTL_DSP_STEREO:
3262 if (get_user(val, p))
3263 return -EFAULT;
3264 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3265 "cs4281: cs4281_ioctl(): DSP_STEREO val=%d\n", val));
3266 if (file->f_mode & FMODE_READ) {
3267 stop_adc(s);
3268 s->dma_adc.ready = 0;
3269 s->prop_adc.channels = val ? 2 : 1;
3270 prog_codec(s, CS_TYPE_ADC);
3271 }
3272 if (file->f_mode & FMODE_WRITE) {
3273 stop_dac(s);
3274 s->dma_dac.ready = 0;
3275 s->prop_dac.channels = val ? 2 : 1;
3276 prog_codec(s, CS_TYPE_DAC);
3277 }
3278 return 0;
3279
3280 case SNDCTL_DSP_CHANNELS:
3281 if (get_user(val, p))
3282 return -EFAULT;
3283 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3284 "cs4281: cs4281_ioctl(): DSP_CHANNELS val=%d\n",
3285 val));
3286 if (val != 0) {
3287 if (file->f_mode & FMODE_READ) {
3288 stop_adc(s);
3289 s->dma_adc.ready = 0;
3290 if (val >= 2)
3291 s->prop_adc.channels = 2;
3292 else
3293 s->prop_adc.channels = 1;
3294 prog_codec(s, CS_TYPE_ADC);
3295 }
3296 if (file->f_mode & FMODE_WRITE) {
3297 stop_dac(s);
3298 s->dma_dac.ready = 0;
3299 if (val >= 2)
3300 s->prop_dac.channels = 2;
3301 else
3302 s->prop_dac.channels = 1;
3303 prog_codec(s, CS_TYPE_DAC);
3304 }
3305 }
3306
3307 if (file->f_mode & FMODE_WRITE)
3308 val = s->prop_dac.channels;
3309 else if (file->f_mode & FMODE_READ)
3310 val = s->prop_adc.channels;
3311
3312 return put_user(val, p);
3313
3314 case SNDCTL_DSP_GETFMTS: // Returns a mask
3315 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3316 "cs4281: cs4281_ioctl(): DSP_GETFMT val=0x%.8x\n",
3317 AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
3318 AFMT_U8));
3319 return put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
3320 AFMT_U8, p);
3321
3322 case SNDCTL_DSP_SETFMT:
3323 if (get_user(val, p))
3324 return -EFAULT;
3325 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3326 "cs4281: cs4281_ioctl(): DSP_SETFMT val=0x%.8x\n",
3327 val));
3328 if (val != AFMT_QUERY) {
3329 if (file->f_mode & FMODE_READ) {
3330 stop_adc(s);
3331 s->dma_adc.ready = 0;
3332 if (val != AFMT_S16_LE
3333 && val != AFMT_U16_LE && val != AFMT_S8
3334 && val != AFMT_U8)
3335 val = AFMT_U8;
3336 s->prop_adc.fmt = val;
3337 s->prop_adc.fmt_original = s->prop_adc.fmt;
3338 prog_codec(s, CS_TYPE_ADC);
3339 }
3340 if (file->f_mode & FMODE_WRITE) {
3341 stop_dac(s);
3342 s->dma_dac.ready = 0;
3343 if (val != AFMT_S16_LE
3344 && val != AFMT_U16_LE && val != AFMT_S8
3345 && val != AFMT_U8)
3346 val = AFMT_U8;
3347 s->prop_dac.fmt = val;
3348 s->prop_dac.fmt_original = s->prop_dac.fmt;
3349 prog_codec(s, CS_TYPE_DAC);
3350 }
3351 } else {
3352 if (file->f_mode & FMODE_WRITE)
3353 val = s->prop_dac.fmt_original;
3354 else if (file->f_mode & FMODE_READ)
3355 val = s->prop_adc.fmt_original;
3356 }
3357 CS_DBGOUT(CS_IOCTL | CS_PARMS, 4, printk(KERN_INFO
3358 "cs4281: cs4281_ioctl(): DSP_SETFMT return val=0x%.8x\n",
3359 val));
3360 return put_user(val, p);
3361
3362 case SNDCTL_DSP_POST:
3363 CS_DBGOUT(CS_IOCTL, 4, printk(KERN_INFO
3364 "cs4281: cs4281_ioctl(): DSP_POST\n"));
3365 return 0;
3366
3367 case SNDCTL_DSP_GETTRIGGER:
3368 val = 0;
3369 if (file->f_mode & s->ena & FMODE_READ)
3370 val |= PCM_ENABLE_INPUT;
3371 if (file->f_mode & s->ena & FMODE_WRITE)
3372 val |= PCM_ENABLE_OUTPUT;
3373 return put_user(val, p);
3374
3375 case SNDCTL_DSP_SETTRIGGER:
3376 if (get_user(val, p))
3377 return -EFAULT;
3378 if (file->f_mode & FMODE_READ) {
3379 if (val & PCM_ENABLE_INPUT) {
3380 if (!s->dma_adc.ready
3381 && (ret = prog_dmabuf_adc(s)))
3382 return ret;
3383 start_adc(s);
3384 } else
3385 stop_adc(s);
3386 }
3387 if (file->f_mode & FMODE_WRITE) {
3388 if (val & PCM_ENABLE_OUTPUT) {
3389 if (!s->dma_dac.ready
3390 && (ret = prog_dmabuf_dac(s)))
3391 return ret;
3392 start_dac(s);
3393 } else
3394 stop_dac(s);
3395 }
3396 return 0;
3397
3398 case SNDCTL_DSP_GETOSPACE:
3399 if (!(file->f_mode & FMODE_WRITE))
3400 return -EINVAL;
3401 if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s)))
3402 return val;
3403 spin_lock_irqsave(&s->lock, flags);
3404 cs4281_update_ptr(s,CS_FALSE);
3405 abinfo.fragsize = s->dma_dac.fragsize;
3406 if (s->dma_dac.mapped)
3407 abinfo.bytes = s->dma_dac.dmasize;
3408 else
3409 abinfo.bytes =
3410 s->dma_dac.dmasize - s->dma_dac.count;
3411 abinfo.fragstotal = s->dma_dac.numfrag;
3412 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
3413 CS_DBGOUT(CS_FUNCTION | CS_PARMS, 4, printk(KERN_INFO
3414 "cs4281: cs4281_ioctl(): GETOSPACE .fragsize=%d .bytes=%d .fragstotal=%d .fragments=%d\n",
3415 abinfo.fragsize,abinfo.bytes,abinfo.fragstotal,
3416 abinfo.fragments));
3417 spin_unlock_irqrestore(&s->lock, flags);
3418 return copy_to_user(p, &abinfo,
3419 sizeof(abinfo)) ? -EFAULT : 0;
3420
3421 case SNDCTL_DSP_GETISPACE:
3422 if (!(file->f_mode & FMODE_READ))
3423 return -EINVAL;
3424 if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)))
3425 return val;
3426 spin_lock_irqsave(&s->lock, flags);
3427 cs4281_update_ptr(s,CS_FALSE);
3428 if (s->conversion) {
3429 abinfo.fragsize = s->dma_adc.fragsize / 2;
3430 abinfo.bytes = s->dma_adc.count / 2;
3431 abinfo.fragstotal = s->dma_adc.numfrag;
3432 abinfo.fragments =
3433 abinfo.bytes >> (s->dma_adc.fragshift - 1);
3434 } else {
3435 abinfo.fragsize = s->dma_adc.fragsize;
3436 abinfo.bytes = s->dma_adc.count;
3437 abinfo.fragstotal = s->dma_adc.numfrag;
3438 abinfo.fragments =
3439 abinfo.bytes >> s->dma_adc.fragshift;
3440 }
3441 spin_unlock_irqrestore(&s->lock, flags);
3442 return copy_to_user(p, &abinfo,
3443 sizeof(abinfo)) ? -EFAULT : 0;
3444
3445 case SNDCTL_DSP_NONBLOCK:
3446 file->f_flags |= O_NONBLOCK;
3447 return 0;
3448
3449 case SNDCTL_DSP_GETODELAY:
3450 if (!(file->f_mode & FMODE_WRITE))
3451 return -EINVAL;
3452 if(!s->dma_dac.ready && prog_dmabuf_dac(s))
3453 return 0;
3454 spin_lock_irqsave(&s->lock, flags);
3455 cs4281_update_ptr(s,CS_FALSE);
3456 val = s->dma_dac.count;
3457 spin_unlock_irqrestore(&s->lock, flags);
3458 return put_user(val, p);
3459
3460 case SNDCTL_DSP_GETIPTR:
3461 if (!(file->f_mode & FMODE_READ))
3462 return -EINVAL;
3463 if(!s->dma_adc.ready && prog_dmabuf_adc(s))
3464 return 0;
3465 spin_lock_irqsave(&s->lock, flags);
3466 cs4281_update_ptr(s,CS_FALSE);
3467 cinfo.bytes = s->dma_adc.total_bytes;
3468 if (s->dma_adc.mapped) {
3469 cinfo.blocks =
3470 (cinfo.bytes >> s->dma_adc.fragshift) -
3471 s->dma_adc.blocks;
3472 s->dma_adc.blocks =
3473 cinfo.bytes >> s->dma_adc.fragshift;
3474 } else {
3475 if (s->conversion) {
3476 cinfo.blocks =
3477 s->dma_adc.count /
3478 2 >> (s->dma_adc.fragshift - 1);
3479 } else
3480 cinfo.blocks =
3481 s->dma_adc.count >> s->dma_adc.
3482 fragshift;
3483 }
3484 if (s->conversion)
3485 cinfo.ptr = s->dma_adc.hwptr / 2;
3486 else
3487 cinfo.ptr = s->dma_adc.hwptr;
3488 if (s->dma_adc.mapped)
3489 s->dma_adc.count &= s->dma_adc.fragsize - 1;
3490 spin_unlock_irqrestore(&s->lock, flags);
3491 if (copy_to_user(p, &cinfo, sizeof(cinfo)))
3492 return -EFAULT;
3493 return 0;
3494
3495 case SNDCTL_DSP_GETOPTR:
3496 if (!(file->f_mode & FMODE_WRITE))
3497 return -EINVAL;
3498 if(!s->dma_dac.ready && prog_dmabuf_dac(s))
3499 return 0;
3500 spin_lock_irqsave(&s->lock, flags);
3501 cs4281_update_ptr(s,CS_FALSE);
3502 cinfo.bytes = s->dma_dac.total_bytes;
3503 if (s->dma_dac.mapped) {
3504 cinfo.blocks =
3505 (cinfo.bytes >> s->dma_dac.fragshift) -
3506 s->dma_dac.blocks;
3507 s->dma_dac.blocks =
3508 cinfo.bytes >> s->dma_dac.fragshift;
3509 } else {
3510 cinfo.blocks =
3511 s->dma_dac.count >> s->dma_dac.fragshift;
3512 }
3513 cinfo.ptr = s->dma_dac.hwptr;
3514 if (s->dma_dac.mapped)
3515 s->dma_dac.count &= s->dma_dac.fragsize - 1;
3516 spin_unlock_irqrestore(&s->lock, flags);
3517 if (copy_to_user(p, &cinfo, sizeof(cinfo)))
3518 return -EFAULT;
3519 return 0;
3520
3521 case SNDCTL_DSP_GETBLKSIZE:
3522 if (file->f_mode & FMODE_WRITE) {
3523 if ((val = prog_dmabuf_dac(s)))
3524 return val;
3525 return put_user(s->dma_dac.fragsize, p);
3526 }
3527 if ((val = prog_dmabuf_adc(s)))
3528 return val;
3529 if (s->conversion)
3530 return put_user(s->dma_adc.fragsize / 2, p);
3531 else
3532 return put_user(s->dma_adc.fragsize, p);
3533
3534 case SNDCTL_DSP_SETFRAGMENT:
3535 if (get_user(val, p))
3536 return -EFAULT;
3537 return 0; // Say OK, but do nothing.
3538
3539 case SNDCTL_DSP_SUBDIVIDE:
3540 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision)
3541 || (file->f_mode & FMODE_WRITE
3542 && s->dma_dac.subdivision)) return -EINVAL;
3543 if (get_user(val, p))
3544 return -EFAULT;
3545 if (val != 1 && val != 2 && val != 4)
3546 return -EINVAL;
3547 if (file->f_mode & FMODE_READ)
3548 s->dma_adc.subdivision = val;
3549 else if (file->f_mode & FMODE_WRITE)
3550 s->dma_dac.subdivision = val;
3551 return 0;
3552
3553 case SOUND_PCM_READ_RATE:
3554 if (file->f_mode & FMODE_READ)
3555 return put_user(s->prop_adc.rate, p);
3556 else if (file->f_mode & FMODE_WRITE)
3557 return put_user(s->prop_dac.rate, p);
3558
3559 case SOUND_PCM_READ_CHANNELS:
3560 if (file->f_mode & FMODE_READ)
3561 return put_user(s->prop_adc.channels, p);
3562 else if (file->f_mode & FMODE_WRITE)
3563 return put_user(s->prop_dac.channels, p);
3564
3565 case SOUND_PCM_READ_BITS:
3566 if (file->f_mode & FMODE_READ)
3567 return
3568 put_user(
3569 (s->prop_adc.
3570 fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16,
3571 p);
3572 else if (file->f_mode & FMODE_WRITE)
3573 return
3574 put_user(
3575 (s->prop_dac.
3576 fmt & (AFMT_S8 | AFMT_U8)) ? 8 : 16,
3577 p);
3578
3579 case SOUND_PCM_WRITE_FILTER:
3580 case SNDCTL_DSP_SETSYNCRO:
3581 case SOUND_PCM_READ_FILTER:
3582 return -EINVAL;
3583 }
3584 return mixer_ioctl(s, cmd, arg);
3585}
3586
3587
3588static int cs4281_release(struct inode *inode, struct file *file)
3589{
3590 struct cs4281_state *s =
3591 (struct cs4281_state *) file->private_data;
3592
3593 CS_DBGOUT(CS_FUNCTION | CS_RELEASE, 2, printk(KERN_INFO
3594 "cs4281: cs4281_release(): inode=%p file=%p f_mode=%d\n",
3595 inode, file, file->f_mode));
3596
3597 VALIDATE_STATE(s);
3598
3599 if (file->f_mode & FMODE_WRITE) {
3600 drain_dac(s, file->f_flags & O_NONBLOCK);
3601 mutex_lock(&s->open_sem_dac);
3602 stop_dac(s);
3603 dealloc_dmabuf(s, &s->dma_dac);
3604 s->open_mode &= ~FMODE_WRITE;
3605 mutex_unlock(&s->open_sem_dac);
3606 wake_up(&s->open_wait_dac);
3607 }
3608 if (file->f_mode & FMODE_READ) {
3609 drain_adc(s, file->f_flags & O_NONBLOCK);
3610 mutex_lock(&s->open_sem_adc);
3611 stop_adc(s);
3612 dealloc_dmabuf(s, &s->dma_adc);
3613 s->open_mode &= ~FMODE_READ;
3614 mutex_unlock(&s->open_sem_adc);
3615 wake_up(&s->open_wait_adc);
3616 }
3617 return 0;
3618}
3619
3620static int cs4281_open(struct inode *inode, struct file *file)
3621{
3622 unsigned int minor = iminor(inode);
3623 struct cs4281_state *s=NULL;
3624 struct list_head *entry;
3625
3626 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
3627 "cs4281: cs4281_open(): inode=%p file=%p f_mode=0x%x\n",
3628 inode, file, file->f_mode));
3629
3630 list_for_each(entry, &cs4281_devs)
3631 {
3632 s = list_entry(entry, struct cs4281_state, list);
3633
3634 if (!((s->dev_audio ^ minor) & ~0xf))
3635 break;
3636 }
3637 if (entry == &cs4281_devs)
3638 return -ENODEV;
3639 if (!s) {
3640 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
3641 "cs4281: cs4281_open(): Error - unable to find audio state struct\n"));
3642 return -ENODEV;
3643 }
3644 VALIDATE_STATE(s);
3645 file->private_data = s;
3646
3647 // wait for device to become free
3648 if (!(file->f_mode & (FMODE_WRITE | FMODE_READ))) {
3649 CS_DBGOUT(CS_FUNCTION | CS_OPEN | CS_ERROR, 2, printk(KERN_INFO
3650 "cs4281: cs4281_open(): Error - must open READ and/or WRITE\n"));
3651 return -ENODEV;
3652 }
3653 if (file->f_mode & FMODE_WRITE) {
3654 mutex_lock(&s->open_sem_dac);
3655 while (s->open_mode & FMODE_WRITE) {
3656 if (file->f_flags & O_NONBLOCK) {
3657 mutex_unlock(&s->open_sem_dac);
3658 return -EBUSY;
3659 }
3660 mutex_unlock(&s->open_sem_dac);
3661 interruptible_sleep_on(&s->open_wait_dac);
3662
3663 if (signal_pending(current))
3664 return -ERESTARTSYS;
3665 mutex_lock(&s->open_sem_dac);
3666 }
3667 }
3668 if (file->f_mode & FMODE_READ) {
3669 mutex_lock(&s->open_sem_adc);
3670 while (s->open_mode & FMODE_READ) {
3671 if (file->f_flags & O_NONBLOCK) {
3672 mutex_unlock(&s->open_sem_adc);
3673 return -EBUSY;
3674 }
3675 mutex_unlock(&s->open_sem_adc);
3676 interruptible_sleep_on(&s->open_wait_adc);
3677
3678 if (signal_pending(current))
3679 return -ERESTARTSYS;
3680 mutex_lock(&s->open_sem_adc);
3681 }
3682 }
3683 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
3684 if (file->f_mode & FMODE_READ) {
3685 s->prop_adc.fmt = AFMT_U8;
3686 s->prop_adc.fmt_original = s->prop_adc.fmt;
3687 s->prop_adc.channels = 1;
3688 s->prop_adc.rate = 8000;
3689 s->prop_adc.clkdiv = 96 | 0x80;
3690 s->conversion = 0;
3691 s->ena &= ~FMODE_READ;
3692 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags =
3693 s->dma_adc.subdivision = 0;
3694 mutex_unlock(&s->open_sem_adc);
3695
3696 if (prog_dmabuf_adc(s)) {
3697 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
3698 "cs4281: adc Program dmabufs failed.\n"));
3699 cs4281_release(inode, file);
3700 return -ENOMEM;
3701 }
3702 prog_codec(s, CS_TYPE_ADC);
3703 }
3704 if (file->f_mode & FMODE_WRITE) {
3705 s->prop_dac.fmt = AFMT_U8;
3706 s->prop_dac.fmt_original = s->prop_dac.fmt;
3707 s->prop_dac.channels = 1;
3708 s->prop_dac.rate = 8000;
3709 s->prop_dac.clkdiv = 96 | 0x80;
3710 s->conversion = 0;
3711 s->ena &= ~FMODE_WRITE;
3712 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags =
3713 s->dma_dac.subdivision = 0;
3714 mutex_unlock(&s->open_sem_dac);
3715
3716 if (prog_dmabuf_dac(s)) {
3717 CS_DBGOUT(CS_OPEN | CS_ERROR, 2, printk(KERN_ERR
3718 "cs4281: dac Program dmabufs failed.\n"));
3719 cs4281_release(inode, file);
3720 return -ENOMEM;
3721 }
3722 prog_codec(s, CS_TYPE_DAC);
3723 }
3724 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2,
3725 printk(KERN_INFO "cs4281: cs4281_open()- 0\n"));
3726 return nonseekable_open(inode, file);
3727}
3728
3729
3730// ******************************************************************************************
3731// Wave (audio) file operations struct.
3732// ******************************************************************************************
3733static /*const */ struct file_operations cs4281_audio_fops = {
3734 .owner = THIS_MODULE,
3735 .llseek = no_llseek,
3736 .read = cs4281_read,
3737 .write = cs4281_write,
3738 .poll = cs4281_poll,
3739 .ioctl = cs4281_ioctl,
3740 .mmap = cs4281_mmap,
3741 .open = cs4281_open,
3742 .release = cs4281_release,
3743};
3744
3745// ---------------------------------------------------------------------
3746
3747// hold spinlock for the following!
3748static void cs4281_handle_midi(struct cs4281_state *s)
3749{
3750 unsigned char ch;
3751 int wake;
3752 unsigned temp1;
3753
3754 wake = 0;
3755 while (!(readl(s->pBA0 + BA0_MIDSR) & 0x80)) {
3756 ch = readl(s->pBA0 + BA0_MIDRP);
3757 if (s->midi.icnt < MIDIINBUF) {
3758 s->midi.ibuf[s->midi.iwr] = ch;
3759 s->midi.iwr = (s->midi.iwr + 1) % MIDIINBUF;
3760 s->midi.icnt++;
3761 }
3762 wake = 1;
3763 }
3764 if (wake)
3765 wake_up(&s->midi.iwait);
3766 wake = 0;
3767 while (!(readl(s->pBA0 + BA0_MIDSR) & 0x40) && s->midi.ocnt > 0) {
3768 temp1 = (s->midi.obuf[s->midi.ord]) & 0x000000ff;
3769 writel(temp1, s->pBA0 + BA0_MIDWP);
3770 s->midi.ord = (s->midi.ord + 1) % MIDIOUTBUF;
3771 s->midi.ocnt--;
3772 if (s->midi.ocnt < MIDIOUTBUF - 16)
3773 wake = 1;
3774 }
3775 if (wake)
3776 wake_up(&s->midi.owait);
3777}
3778
3779
3780
3781static irqreturn_t cs4281_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3782{
3783 struct cs4281_state *s = (struct cs4281_state *) dev_id;
3784 unsigned int temp1;
3785
3786 // fastpath out, to ease interrupt sharing
3787 temp1 = readl(s->pBA0 + BA0_HISR); // Get Int Status reg.
3788
3789 CS_DBGOUT(CS_INTERRUPT, 6, printk(KERN_INFO
3790 "cs4281: cs4281_interrupt() BA0_HISR=0x%.8x\n", temp1));
3791/*
3792* If not DMA or MIDI interrupt, then just return.
3793*/
3794 if (!(temp1 & (HISR_DMA0 | HISR_DMA1 | HISR_MIDI))) {
3795 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR);
3796 CS_DBGOUT(CS_INTERRUPT, 9, printk(KERN_INFO
3797 "cs4281: cs4281_interrupt(): returning not cs4281 interrupt.\n"));
3798 return IRQ_NONE;
3799 }
3800
3801 if (temp1 & HISR_DMA0) // If play interrupt,
3802 readl(s->pBA0 + BA0_HDSR0); // clear the source.
3803
3804 if (temp1 & HISR_DMA1) // Same for play.
3805 readl(s->pBA0 + BA0_HDSR1);
3806 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR); // Local EOI
3807
3808 spin_lock(&s->lock);
3809 cs4281_update_ptr(s,CS_TRUE);
3810 cs4281_handle_midi(s);
3811 spin_unlock(&s->lock);
3812 return IRQ_HANDLED;
3813}
3814
3815// **************************************************************************
3816
3817static void cs4281_midi_timer(unsigned long data)
3818{
3819 struct cs4281_state *s = (struct cs4281_state *) data;
3820 unsigned long flags;
3821
3822 spin_lock_irqsave(&s->lock, flags);
3823 cs4281_handle_midi(s);
3824 spin_unlock_irqrestore(&s->lock, flags);
3825 s->midi.timer.expires = jiffies + 1;
3826 add_timer(&s->midi.timer);
3827}
3828
3829
3830// ---------------------------------------------------------------------
3831
3832static ssize_t cs4281_midi_read(struct file *file, char __user *buffer,
3833 size_t count, loff_t * ppos)
3834{
3835 struct cs4281_state *s =
3836 (struct cs4281_state *) file->private_data;
3837 ssize_t ret;
3838 unsigned long flags;
3839 unsigned ptr;
3840 int cnt;
3841
3842 VALIDATE_STATE(s);
3843 if (!access_ok(VERIFY_WRITE, buffer, count))
3844 return -EFAULT;
3845 ret = 0;
3846 while (count > 0) {
3847 spin_lock_irqsave(&s->lock, flags);
3848 ptr = s->midi.ird;
3849 cnt = MIDIINBUF - ptr;
3850 if (s->midi.icnt < cnt)
3851 cnt = s->midi.icnt;
3852 spin_unlock_irqrestore(&s->lock, flags);
3853 if (cnt > count)
3854 cnt = count;
3855 if (cnt <= 0) {
3856 if (file->f_flags & O_NONBLOCK)
3857 return ret ? ret : -EAGAIN;
3858 interruptible_sleep_on(&s->midi.iwait);
3859 if (signal_pending(current))
3860 return ret ? ret : -ERESTARTSYS;
3861 continue;
3862 }
3863 if (copy_to_user(buffer, s->midi.ibuf + ptr, cnt))
3864 return ret ? ret : -EFAULT;
3865 ptr = (ptr + cnt) % MIDIINBUF;
3866 spin_lock_irqsave(&s->lock, flags);
3867 s->midi.ird = ptr;
3868 s->midi.icnt -= cnt;
3869 spin_unlock_irqrestore(&s->lock, flags);
3870 count -= cnt;
3871 buffer += cnt;
3872 ret += cnt;
3873 }
3874 return ret;
3875}
3876
3877
3878static ssize_t cs4281_midi_write(struct file *file, const char __user *buffer,
3879 size_t count, loff_t * ppos)
3880{
3881 struct cs4281_state *s =
3882 (struct cs4281_state *) file->private_data;
3883 ssize_t ret;
3884 unsigned long flags;
3885 unsigned ptr;
3886 int cnt;
3887
3888 VALIDATE_STATE(s);
3889 if (!access_ok(VERIFY_READ, buffer, count))
3890 return -EFAULT;
3891 ret = 0;
3892 while (count > 0) {
3893 spin_lock_irqsave(&s->lock, flags);
3894 ptr = s->midi.owr;
3895 cnt = MIDIOUTBUF - ptr;
3896 if (s->midi.ocnt + cnt > MIDIOUTBUF)
3897 cnt = MIDIOUTBUF - s->midi.ocnt;
3898 if (cnt <= 0)
3899 cs4281_handle_midi(s);
3900 spin_unlock_irqrestore(&s->lock, flags);
3901 if (cnt > count)
3902 cnt = count;
3903 if (cnt <= 0) {
3904 if (file->f_flags & O_NONBLOCK)
3905 return ret ? ret : -EAGAIN;
3906 interruptible_sleep_on(&s->midi.owait);
3907 if (signal_pending(current))
3908 return ret ? ret : -ERESTARTSYS;
3909 continue;
3910 }
3911 if (copy_from_user(s->midi.obuf + ptr, buffer, cnt))
3912 return ret ? ret : -EFAULT;
3913 ptr = (ptr + cnt) % MIDIOUTBUF;
3914 spin_lock_irqsave(&s->lock, flags);
3915 s->midi.owr = ptr;
3916 s->midi.ocnt += cnt;
3917 spin_unlock_irqrestore(&s->lock, flags);
3918 count -= cnt;
3919 buffer += cnt;
3920 ret += cnt;
3921 spin_lock_irqsave(&s->lock, flags);
3922 cs4281_handle_midi(s);
3923 spin_unlock_irqrestore(&s->lock, flags);
3924 }
3925 return ret;
3926}
3927
3928
3929static unsigned int cs4281_midi_poll(struct file *file,
3930 struct poll_table_struct *wait)
3931{
3932 struct cs4281_state *s =
3933 (struct cs4281_state *) file->private_data;
3934 unsigned long flags;
3935 unsigned int mask = 0;
3936
3937 VALIDATE_STATE(s);
3938 if (file->f_flags & FMODE_WRITE)
3939 poll_wait(file, &s->midi.owait, wait);
3940 if (file->f_flags & FMODE_READ)
3941 poll_wait(file, &s->midi.iwait, wait);
3942 spin_lock_irqsave(&s->lock, flags);
3943 if (file->f_flags & FMODE_READ) {
3944 if (s->midi.icnt > 0)
3945 mask |= POLLIN | POLLRDNORM;
3946 }
3947 if (file->f_flags & FMODE_WRITE) {
3948 if (s->midi.ocnt < MIDIOUTBUF)
3949 mask |= POLLOUT | POLLWRNORM;
3950 }
3951 spin_unlock_irqrestore(&s->lock, flags);
3952 return mask;
3953}
3954
3955
3956static int cs4281_midi_open(struct inode *inode, struct file *file)
3957{
3958 unsigned long flags, temp1;
3959 unsigned int minor = iminor(inode);
3960 struct cs4281_state *s=NULL;
3961 struct list_head *entry;
3962 list_for_each(entry, &cs4281_devs)
3963 {
3964 s = list_entry(entry, struct cs4281_state, list);
3965
3966 if (s->dev_midi == minor)
3967 break;
3968 }
3969
3970 if (entry == &cs4281_devs)
3971 return -ENODEV;
3972 if (!s)
3973 {
3974 CS_DBGOUT(CS_FUNCTION | CS_OPEN, 2, printk(KERN_INFO
3975 "cs4281: cs4281_open(): Error - unable to find audio state struct\n"));
3976 return -ENODEV;
3977 }
3978 VALIDATE_STATE(s);
3979 file->private_data = s;
3980 // wait for device to become free
3981 mutex_lock(&s->open_sem);
3982 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
3983 if (file->f_flags & O_NONBLOCK) {
3984 mutex_unlock(&s->open_sem);
3985 return -EBUSY;
3986 }
3987 mutex_unlock(&s->open_sem);
3988 interruptible_sleep_on(&s->open_wait);
3989 if (signal_pending(current))
3990 return -ERESTARTSYS;
3991 mutex_lock(&s->open_sem);
3992 }
3993 spin_lock_irqsave(&s->lock, flags);
3994 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
3995 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
3996 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
3997 writel(1, s->pBA0 + BA0_MIDCR); // Reset the interface.
3998 writel(0, s->pBA0 + BA0_MIDCR); // Return to normal mode.
3999 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
4000 writel(0x0000000f, s->pBA0 + BA0_MIDCR); // Enable transmit, record, ints.
4001 temp1 = readl(s->pBA0 + BA0_HIMR);
4002 writel(temp1 & 0xffbfffff, s->pBA0 + BA0_HIMR); // Enable midi int. recognition.
4003 writel(HICR_IEV | HICR_CHGM, s->pBA0 + BA0_HICR); // Enable interrupts
4004 init_timer(&s->midi.timer);
4005 s->midi.timer.expires = jiffies + 1;
4006 s->midi.timer.data = (unsigned long) s;
4007 s->midi.timer.function = cs4281_midi_timer;
4008 add_timer(&s->midi.timer);
4009 }
4010 if (file->f_mode & FMODE_READ) {
4011 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
4012 }
4013 if (file->f_mode & FMODE_WRITE) {
4014 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
4015 }
4016 spin_unlock_irqrestore(&s->lock, flags);
4017 s->open_mode |=
4018 (file->
4019 f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ |
4020 FMODE_MIDI_WRITE);
4021 mutex_unlock(&s->open_sem);
4022 return nonseekable_open(inode, file);
4023}
4024
4025
4026static int cs4281_midi_release(struct inode *inode, struct file *file)
4027{
4028 struct cs4281_state *s =
4029 (struct cs4281_state *) file->private_data;
4030 DECLARE_WAITQUEUE(wait, current);
4031 unsigned long flags;
4032 unsigned count, tmo;
4033
4034 VALIDATE_STATE(s);
4035
4036 if (file->f_mode & FMODE_WRITE) {
4037 add_wait_queue(&s->midi.owait, &wait);
4038 for (;;) {
4039 set_current_state(TASK_INTERRUPTIBLE);
4040 spin_lock_irqsave(&s->lock, flags);
4041 count = s->midi.ocnt;
4042 spin_unlock_irqrestore(&s->lock, flags);
4043 if (count <= 0)
4044 break;
4045 if (signal_pending(current))
4046 break;
4047 if (file->f_flags & O_NONBLOCK) {
4048 remove_wait_queue(&s->midi.owait, &wait);
4049 current->state = TASK_RUNNING;
4050 return -EBUSY;
4051 }
4052 tmo = (count * HZ) / 3100;
4053 if (!schedule_timeout(tmo ? : 1) && tmo)
4054 printk(KERN_DEBUG
4055 "cs4281: midi timed out??\n");
4056 }
4057 remove_wait_queue(&s->midi.owait, &wait);
4058 current->state = TASK_RUNNING;
4059 }
4060 mutex_lock(&s->open_sem);
4061 s->open_mode &=
4062 (~(file->f_mode << FMODE_MIDI_SHIFT)) & (FMODE_MIDI_READ |
4063 FMODE_MIDI_WRITE);
4064 spin_lock_irqsave(&s->lock, flags);
4065 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
4066 writel(0, s->pBA0 + BA0_MIDCR); // Disable Midi interrupts.
4067 del_timer(&s->midi.timer);
4068 }
4069 spin_unlock_irqrestore(&s->lock, flags);
4070 mutex_unlock(&s->open_sem);
4071 wake_up(&s->open_wait);
4072 return 0;
4073}
4074
4075// ******************************************************************************************
4076// Midi file operations struct.
4077// ******************************************************************************************
4078static /*const */ struct file_operations cs4281_midi_fops = {
4079 .owner = THIS_MODULE,
4080 .llseek = no_llseek,
4081 .read = cs4281_midi_read,
4082 .write = cs4281_midi_write,
4083 .poll = cs4281_midi_poll,
4084 .open = cs4281_midi_open,
4085 .release = cs4281_midi_release,
4086};
4087
4088
4089// ---------------------------------------------------------------------
4090
4091// maximum number of devices
4092#define NR_DEVICE 8 // Only eight devices supported currently.
4093
4094// ---------------------------------------------------------------------
4095
4096static struct initvol {
4097 int mixch;
4098 int vol;
4099} initvol[] __devinitdata = {
4100
4101 {
4102 SOUND_MIXER_WRITE_VOLUME, 0x4040}, {
4103 SOUND_MIXER_WRITE_PCM, 0x4040}, {
4104 SOUND_MIXER_WRITE_SYNTH, 0x4040}, {
4105 SOUND_MIXER_WRITE_CD, 0x4040}, {
4106 SOUND_MIXER_WRITE_LINE, 0x4040}, {
4107 SOUND_MIXER_WRITE_LINE1, 0x4040}, {
4108 SOUND_MIXER_WRITE_RECLEV, 0x0000}, {
4109 SOUND_MIXER_WRITE_SPEAKER, 0x4040}, {
4110 SOUND_MIXER_WRITE_MIC, 0x0000}
4111};
4112
4113
4114#ifndef NOT_CS4281_PM
4115static void __devinit cs4281_BuildFIFO(
4116 struct cs4281_pipeline *p,
4117 struct cs4281_state *s)
4118{
4119 switch(p->number)
4120 {
4121 case 0: /* playback */
4122 {
4123 p->u32FCRnAddress = BA0_FCR0;
4124 p->u32FSICnAddress = BA0_FSIC0;
4125 p->u32FPDRnAddress = BA0_FPDR0;
4126 break;
4127 }
4128 case 1: /* capture */
4129 {
4130 p->u32FCRnAddress = BA0_FCR1;
4131 p->u32FSICnAddress = BA0_FSIC1;
4132 p->u32FPDRnAddress = BA0_FPDR1;
4133 break;
4134 }
4135
4136 case 2:
4137 {
4138 p->u32FCRnAddress = BA0_FCR2;
4139 p->u32FSICnAddress = BA0_FSIC2;
4140 p->u32FPDRnAddress = BA0_FPDR2;
4141 break;
4142 }
4143 case 3:
4144 {
4145 p->u32FCRnAddress = BA0_FCR3;
4146 p->u32FSICnAddress = BA0_FSIC3;
4147 p->u32FPDRnAddress = BA0_FPDR3;
4148 break;
4149 }
4150 default:
4151 break;
4152 }
4153 //
4154 // first read the hardware to initialize the member variables
4155 //
4156 p->u32FCRnValue = readl(s->pBA0 + p->u32FCRnAddress);
4157 p->u32FSICnValue = readl(s->pBA0 + p->u32FSICnAddress);
4158 p->u32FPDRnValue = readl(s->pBA0 + p->u32FPDRnAddress);
4159
4160}
4161
4162static void __devinit cs4281_BuildDMAengine(
4163 struct cs4281_pipeline *p,
4164 struct cs4281_state *s)
4165{
4166/*
4167* initialize all the addresses of this pipeline dma info.
4168*/
4169 switch(p->number)
4170 {
4171 case 0: /* playback */
4172 {
4173 p->u32DBAnAddress = BA0_DBA0;
4174 p->u32DCAnAddress = BA0_DCA0;
4175 p->u32DBCnAddress = BA0_DBC0;
4176 p->u32DCCnAddress = BA0_DCC0;
4177 p->u32DMRnAddress = BA0_DMR0;
4178 p->u32DCRnAddress = BA0_DCR0;
4179 p->u32HDSRnAddress = BA0_HDSR0;
4180 break;
4181 }
4182
4183 case 1: /* capture */
4184 {
4185 p->u32DBAnAddress = BA0_DBA1;
4186 p->u32DCAnAddress = BA0_DCA1;
4187 p->u32DBCnAddress = BA0_DBC1;
4188 p->u32DCCnAddress = BA0_DCC1;
4189 p->u32DMRnAddress = BA0_DMR1;
4190 p->u32DCRnAddress = BA0_DCR1;
4191 p->u32HDSRnAddress = BA0_HDSR1;
4192 break;
4193 }
4194
4195 case 2:
4196 {
4197 p->u32DBAnAddress = BA0_DBA2;
4198 p->u32DCAnAddress = BA0_DCA2;
4199 p->u32DBCnAddress = BA0_DBC2;
4200 p->u32DCCnAddress = BA0_DCC2;
4201 p->u32DMRnAddress = BA0_DMR2;
4202 p->u32DCRnAddress = BA0_DCR2;
4203 p->u32HDSRnAddress = BA0_HDSR2;
4204 break;
4205 }
4206
4207 case 3:
4208 {
4209 p->u32DBAnAddress = BA0_DBA3;
4210 p->u32DCAnAddress = BA0_DCA3;
4211 p->u32DBCnAddress = BA0_DBC3;
4212 p->u32DCCnAddress = BA0_DCC3;
4213 p->u32DMRnAddress = BA0_DMR3;
4214 p->u32DCRnAddress = BA0_DCR3;
4215 p->u32HDSRnAddress = BA0_HDSR3;
4216 break;
4217 }
4218 default:
4219 break;
4220 }
4221
4222//
4223// Initialize the dma values for this pipeline
4224//
4225 p->u32DBAnValue = readl(s->pBA0 + p->u32DBAnAddress);
4226 p->u32DBCnValue = readl(s->pBA0 + p->u32DBCnAddress);
4227 p->u32DMRnValue = readl(s->pBA0 + p->u32DMRnAddress);
4228 p->u32DCRnValue = readl(s->pBA0 + p->u32DCRnAddress);
4229
4230}
4231
4232static void __devinit cs4281_InitPM(struct cs4281_state *s)
4233{
4234 int i;
4235 struct cs4281_pipeline *p;
4236
4237 for(i=0;i<CS4281_NUMBER_OF_PIPELINES;i++)
4238 {
4239 p = &s->pl[i];
4240 p->number = i;
4241 cs4281_BuildDMAengine(p,s);
4242 cs4281_BuildFIFO(p,s);
4243 /*
4244 * currently only 2 pipelines are used
4245 * so, only set the valid bit on the playback and capture.
4246 */
4247 if( (i == CS4281_PLAYBACK_PIPELINE_NUMBER) ||
4248 (i == CS4281_CAPTURE_PIPELINE_NUMBER))
4249 p->flags |= CS4281_PIPELINE_VALID;
4250 }
4251 s->pm.u32SSPM_BITS = 0x7e; /* rev c, use 0x7c for rev a or b */
4252}
4253#endif
4254
4255static int __devinit cs4281_probe(struct pci_dev *pcidev,
4256 const struct pci_device_id *pciid)
4257{
4258 struct cs4281_state *s;
4259 dma_addr_t dma_mask;
4260 mm_segment_t fs;
4261 int i, val;
4262 unsigned int temp1, temp2;
4263
4264 CS_DBGOUT(CS_FUNCTION | CS_INIT, 2,
4265 printk(KERN_INFO "cs4281: probe()+\n"));
4266
4267 if (pci_enable_device(pcidev)) {
4268 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
4269 "cs4281: pci_enable_device() failed\n"));
4270 return -1;
4271 }
4272 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_MEM) ||
4273 !(pci_resource_flags(pcidev, 1) & IORESOURCE_MEM)) {
4274 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
4275 "cs4281: probe()- Memory region not assigned\n"));
4276 return -ENODEV;
4277 }
4278 if (pcidev->irq == 0) {
4279 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
4280 "cs4281: probe() IRQ not assigned\n"));
4281 return -ENODEV;
4282 }
4283 dma_mask = 0xffffffff; /* this enables playback and recording */
4284 i = pci_set_dma_mask(pcidev, dma_mask);
4285 if (i) {
4286 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
4287 "cs4281: probe() architecture does not support 32bit PCI busmaster DMA\n"));
4288 return i;
4289 }
4290 if (!(s = kmalloc(sizeof(struct cs4281_state), GFP_KERNEL))) {
4291 CS_DBGOUT(CS_ERROR, 1, printk(KERN_ERR
4292 "cs4281: probe() no memory for state struct.\n"));
4293 return -1;
4294 }
4295 memset(s, 0, sizeof(struct cs4281_state));
4296 init_waitqueue_head(&s->dma_adc.wait);
4297 init_waitqueue_head(&s->dma_dac.wait);
4298 init_waitqueue_head(&s->open_wait);
4299 init_waitqueue_head(&s->open_wait_adc);
4300 init_waitqueue_head(&s->open_wait_dac);
4301 init_waitqueue_head(&s->midi.iwait);
4302 init_waitqueue_head(&s->midi.owait);
4303 mutex_init(&s->open_sem);
4304 mutex_init(&s->open_sem_adc);
4305 mutex_init(&s->open_sem_dac);
4306 spin_lock_init(&s->lock);
4307 s->pBA0phys = pci_resource_start(pcidev, 0);
4308 s->pBA1phys = pci_resource_start(pcidev, 1);
4309
4310 /* Convert phys to linear. */
4311 s->pBA0 = ioremap_nocache(s->pBA0phys, 4096);
4312 if (!s->pBA0) {
4313 CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_ERR
4314 "cs4281: BA0 I/O mapping failed. Skipping part.\n"));
4315 goto err_free;
4316 }
4317 s->pBA1 = ioremap_nocache(s->pBA1phys, 65536);
4318 if (!s->pBA1) {
4319 CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_ERR
4320 "cs4281: BA1 I/O mapping failed. Skipping part.\n"));
4321 goto err_unmap;
4322 }
4323
4324 temp1 = readl(s->pBA0 + BA0_PCICFG00);
4325 temp2 = readl(s->pBA0 + BA0_PCICFG04);
4326
4327 CS_DBGOUT(CS_INIT, 2,
4328 printk(KERN_INFO
4329 "cs4281: probe() BA0=0x%.8x BA1=0x%.8x pBA0=%p pBA1=%p \n",
4330 (unsigned) temp1, (unsigned) temp2, s->pBA0, s->pBA1));
4331 CS_DBGOUT(CS_INIT, 2,
4332 printk(KERN_INFO
4333 "cs4281: probe() pBA0phys=0x%.8x pBA1phys=0x%.8x\n",
4334 (unsigned) s->pBA0phys, (unsigned) s->pBA1phys));
4335
4336#ifndef NOT_CS4281_PM
4337 s->pm.flags = CS4281_PM_IDLE;
4338#endif
4339 temp1 = cs4281_hw_init(s);
4340 if (temp1) {
4341 CS_DBGOUT(CS_ERROR | CS_INIT, 1, printk(KERN_ERR
4342 "cs4281: cs4281_hw_init() failed. Skipping part.\n"));
4343 goto err_irq;
4344 }
4345 s->magic = CS4281_MAGIC;
4346 s->pcidev = pcidev;
4347 s->irq = pcidev->irq;
4348 if (request_irq
4349 (s->irq, cs4281_interrupt, IRQF_SHARED, "Crystal CS4281", s)) {
4350 CS_DBGOUT(CS_INIT | CS_ERROR, 1,
4351 printk(KERN_ERR "cs4281: irq %u in use\n", s->irq));
4352 goto err_irq;
4353 }
4354 if ((s->dev_audio = register_sound_dsp(&cs4281_audio_fops, -1)) <
4355 0) {
4356 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
4357 "cs4281: probe() register_sound_dsp() failed.\n"));
4358 goto err_dev1;
4359 }
4360 if ((s->dev_mixer = register_sound_mixer(&cs4281_mixer_fops, -1)) <
4361 0) {
4362 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
4363 "cs4281: probe() register_sound_mixer() failed.\n"));
4364 goto err_dev2;
4365 }
4366 if ((s->dev_midi = register_sound_midi(&cs4281_midi_fops, -1)) < 0) {
4367 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_ERR
4368 "cs4281: probe() register_sound_midi() failed.\n"));
4369 goto err_dev3;
4370 }
4371#ifndef NOT_CS4281_PM
4372 cs4281_InitPM(s);
4373 s->pm.flags |= CS4281_PM_NOT_REGISTERED;
4374#endif
4375
4376 pci_set_master(pcidev); // enable bus mastering
4377
4378 fs = get_fs();
4379 set_fs(KERNEL_DS);
4380 val = SOUND_MASK_LINE;
4381 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
4382 for (i = 0; i < sizeof(initvol) / sizeof(initvol[0]); i++) {
4383 val = initvol[i].vol;
4384 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
4385 }
4386 val = 1; // enable mic preamp
4387 mixer_ioctl(s, SOUND_MIXER_PRIVATE1, (unsigned long) &val);
4388 set_fs(fs);
4389
4390 pci_set_drvdata(pcidev, s);
4391 list_add(&s->list, &cs4281_devs);
4392 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
4393 "cs4281: probe()- device allocated successfully\n"));
4394 return 0;
4395
4396 err_dev3:
4397 unregister_sound_mixer(s->dev_mixer);
4398 err_dev2:
4399 unregister_sound_dsp(s->dev_audio);
4400 err_dev1:
4401 free_irq(s->irq, s);
4402 err_irq:
4403 iounmap(s->pBA1);
4404 err_unmap:
4405 iounmap(s->pBA0);
4406 err_free:
4407 kfree(s);
4408
4409 CS_DBGOUT(CS_INIT | CS_ERROR, 1, printk(KERN_INFO
4410 "cs4281: probe()- no device allocated\n"));
4411 return -ENODEV;
4412} // probe_cs4281
4413
4414
4415// ---------------------------------------------------------------------
4416
4417static void __devexit cs4281_remove(struct pci_dev *pci_dev)
4418{
4419 struct cs4281_state *s = pci_get_drvdata(pci_dev);
4420 // stop DMA controller
4421 synchronize_irq(s->irq);
4422 free_irq(s->irq, s);
4423 unregister_sound_dsp(s->dev_audio);
4424 unregister_sound_mixer(s->dev_mixer);
4425 unregister_sound_midi(s->dev_midi);
4426 iounmap(s->pBA1);
4427 iounmap(s->pBA0);
4428 pci_set_drvdata(pci_dev,NULL);
4429 list_del(&s->list);
4430 kfree(s);
4431 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
4432 "cs4281: cs4281_remove()-: remove successful\n"));
4433}
4434
4435static struct pci_device_id cs4281_pci_tbl[] = {
4436 {
4437 .vendor = PCI_VENDOR_ID_CIRRUS,
4438 .device = PCI_DEVICE_ID_CRYSTAL_CS4281,
4439 .subvendor = PCI_ANY_ID,
4440 .subdevice = PCI_ANY_ID,
4441 },
4442 { 0, },
4443};
4444
4445MODULE_DEVICE_TABLE(pci, cs4281_pci_tbl);
4446
4447static struct pci_driver cs4281_pci_driver = {
4448 .name = "cs4281",
4449 .id_table = cs4281_pci_tbl,
4450 .probe = cs4281_probe,
4451 .remove = __devexit_p(cs4281_remove),
4452 .suspend = CS4281_SUSPEND_TBL,
4453 .resume = CS4281_RESUME_TBL,
4454};
4455
4456static int __init cs4281_init_module(void)
4457{
4458 int rtn = 0;
4459 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2, printk(KERN_INFO
4460 "cs4281: cs4281_init_module()+ \n"));
4461 printk(KERN_INFO "cs4281: version v%d.%02d.%d time " __TIME__ " "
4462 __DATE__ "\n", CS4281_MAJOR_VERSION, CS4281_MINOR_VERSION,
4463 CS4281_ARCH);
4464 rtn = pci_register_driver(&cs4281_pci_driver);
4465
4466 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
4467 printk(KERN_INFO "cs4281: cs4281_init_module()- (%d)\n",rtn));
4468 return rtn;
4469}
4470
4471static void __exit cs4281_cleanup_module(void)
4472{
4473 pci_unregister_driver(&cs4281_pci_driver);
4474 CS_DBGOUT(CS_INIT | CS_FUNCTION, 2,
4475 printk(KERN_INFO "cs4281: cleanup_cs4281() finished\n"));
4476}
4477// ---------------------------------------------------------------------
4478
4479MODULE_AUTHOR("gw boynton, audio@crystal.cirrus.com");
4480MODULE_DESCRIPTION("Cirrus Logic CS4281 Driver");
4481MODULE_LICENSE("GPL");
4482
4483// ---------------------------------------------------------------------
4484
4485module_init(cs4281_init_module);
4486module_exit(cs4281_cleanup_module);
4487
diff --git a/sound/oss/cs4281/cs4281pm-24.c b/sound/oss/cs4281/cs4281pm-24.c
deleted file mode 100644
index 90cbd7679534..000000000000
--- a/sound/oss/cs4281/cs4281pm-24.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*******************************************************************************
2*
3* "cs4281pm.c" -- Cirrus Logic-Crystal CS4281 linux audio driver.
4*
5* Copyright (C) 2000,2001 Cirrus Logic Corp.
6* -- tom woller (twoller@crystal.cirrus.com) or
7* (audio@crystal.cirrus.com).
8*
9* This program is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License as published by
11* the Free Software Foundation; either version 2 of the License, or
12* (at your option) any later version.
13*
14* This program is distributed in the hope that it will be useful,
15* but WITHOUT ANY WARRANTY; without even the implied warranty of
16* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17* GNU General Public License for more details.
18*
19* You should have received a copy of the GNU General Public License
20* along with this program; if not, write to the Free Software
21* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*
23* 12/22/00 trw - new file.
24*
25*******************************************************************************/
26
27#ifndef NOT_CS4281_PM
28#include <linux/pm.h>
29
30static int cs4281_suspend(struct cs4281_state *s);
31static int cs4281_resume(struct cs4281_state *s);
32/*
33* for now (12/22/00) only enable the pm_register PM support.
34* allow these table entries to be null.
35#define CS4281_SUSPEND_TBL cs4281_suspend_tbl
36#define CS4281_RESUME_TBL cs4281_resume_tbl
37*/
38#define CS4281_SUSPEND_TBL cs4281_suspend_null
39#define CS4281_RESUME_TBL cs4281_resume_null
40
41#else /* CS4281_PM */
42#define CS4281_SUSPEND_TBL cs4281_suspend_null
43#define CS4281_RESUME_TBL cs4281_resume_null
44#endif /* CS4281_PM */
45
diff --git a/sound/oss/cs4281/cs4281pm.h b/sound/oss/cs4281/cs4281pm.h
deleted file mode 100644
index b44fdc9ce002..000000000000
--- a/sound/oss/cs4281/cs4281pm.h
+++ /dev/null
@@ -1,74 +0,0 @@
1#ifndef NOT_CS4281_PM
2/*******************************************************************************
3*
4* "cs4281pm.h" -- Cirrus Logic-Crystal CS4281 linux audio driver.
5*
6* Copyright (C) 2000,2001 Cirrus Logic Corp.
7* -- tom woller (twoller@crystal.cirrus.com) or
8* (audio@crystal.cirrus.com).
9*
10* This program is free software; you can redistribute it and/or modify
11* it under the terms of the GNU General Public License as published by
12* the Free Software Foundation; either version 2 of the License, or
13* (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful,
16* but WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18* GNU General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software
22* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23*
24* 12/22/00 trw - new file.
25*
26*******************************************************************************/
27/* general pm definitions */
28#define CS4281_AC97_HIGHESTREGTORESTORE 0x26
29#define CS4281_AC97_NUMBER_RESTORE_REGS (CS4281_AC97_HIGHESTREGTORESTORE/2-1)
30
31/* pipeline definitions */
32#define CS4281_NUMBER_OF_PIPELINES 4
33#define CS4281_PIPELINE_VALID 0x0001
34#define CS4281_PLAYBACK_PIPELINE_NUMBER 0x0000
35#define CS4281_CAPTURE_PIPELINE_NUMBER 0x0001
36
37/* PM state defintions */
38#define CS4281_PM_NOT_REGISTERED 0x1000
39#define CS4281_PM_IDLE 0x0001
40#define CS4281_PM_SUSPENDING 0x0002
41#define CS4281_PM_SUSPENDED 0x0004
42#define CS4281_PM_RESUMING 0x0008
43#define CS4281_PM_RESUMED 0x0010
44
45struct cs4281_pm {
46 unsigned long flags;
47 u32 u32CLKCR1_SAVE,u32SSPMValue,u32PPLVCvalue,u32PPRVCvalue;
48 u32 u32FMLVCvalue,u32FMRVCvalue,u32GPIORvalue,u32JSCTLvalue,u32SSCR;
49 u32 u32SRCSA,u32DacASR,u32AdcASR,u32DacSR,u32AdcSR,u32MIDCR_Save;
50 u32 u32SSPM_BITS;
51 u32 ac97[CS4281_AC97_NUMBER_RESTORE_REGS];
52 u32 u32AC97_master_volume, u32AC97_headphone_volume, u32AC97_master_volume_mono;
53 u32 u32AC97_pcm_out_volume, u32AC97_powerdown, u32AC97_general_purpose;
54 u32 u32hwptr_playback,u32hwptr_capture;
55};
56
57struct cs4281_pipeline {
58 unsigned flags;
59 unsigned number;
60 u32 u32DBAnValue,u32DBCnValue,u32DMRnValue,u32DCRnValue;
61 u32 u32DBAnAddress,u32DCAnAddress,u32DBCnAddress,u32DCCnAddress;
62 u32 u32DMRnAddress,u32DCRnAddress,u32HDSRnAddress;
63 u32 u32DBAn_Save,u32DBCn_Save,u32DMRn_Save,u32DCRn_Save;
64 u32 u32DCCn_Save,u32DCAn_Save;
65/*
66* technically, these are fifo variables, but just map the
67* first fifo with the first pipeline and then use the fifo
68* variables inside of the pipeline struct.
69*/
70 u32 u32FCRn_Save,u32FSICn_Save;
71 u32 u32FCRnValue,u32FCRnAddress,u32FSICnValue,u32FSICnAddress;
72 u32 u32FPDRnValue,u32FPDRnAddress;
73};
74#endif
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c
index fb64279f3935..08274c995d06 100644
--- a/sound/oss/dev_table.c
+++ b/sound/oss/dev_table.c
@@ -13,9 +13,39 @@
13 13
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16#define _DEV_TABLE_C_
17#include "sound_config.h" 16#include "sound_config.h"
18 17
18struct audio_operations *audio_devs[MAX_AUDIO_DEV];
19EXPORT_SYMBOL(audio_devs);
20
21int num_audiodevs;
22EXPORT_SYMBOL(num_audiodevs);
23
24struct mixer_operations *mixer_devs[MAX_MIXER_DEV];
25EXPORT_SYMBOL(mixer_devs);
26
27int num_mixers;
28EXPORT_SYMBOL(num_mixers);
29
30struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV];
31EXPORT_SYMBOL(synth_devs);
32
33int num_synths;
34
35struct midi_operations *midi_devs[MAX_MIDI_DEV];
36EXPORT_SYMBOL(midi_devs);
37
38int num_midis;
39EXPORT_SYMBOL(num_midis);
40
41struct sound_timer_operations *sound_timer_devs[MAX_TIMER_DEV] = {
42 &default_sound_timer, NULL
43};
44EXPORT_SYMBOL(sound_timer_devs);
45
46int num_sound_timers = 1;
47
48
19static int sound_alloc_audiodev(void); 49static int sound_alloc_audiodev(void);
20 50
21int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver, 51int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
@@ -75,6 +105,7 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
75 audio_init_devices(); 105 audio_init_devices();
76 return num; 106 return num;
77} 107}
108EXPORT_SYMBOL(sound_install_audiodrv);
78 109
79int sound_install_mixer(int vers, char *name, struct mixer_operations *driver, 110int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
80 int driver_size, void *devc) 111 int driver_size, void *devc)
@@ -113,6 +144,7 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
113 mixer_devs[n] = op; 144 mixer_devs[n] = op;
114 return n; 145 return n;
115} 146}
147EXPORT_SYMBOL(sound_install_mixer);
116 148
117void sound_unload_audiodev(int dev) 149void sound_unload_audiodev(int dev)
118{ 150{
@@ -122,6 +154,7 @@ void sound_unload_audiodev(int dev)
122 unregister_sound_dsp((dev<<4)+3); 154 unregister_sound_dsp((dev<<4)+3);
123 } 155 }
124} 156}
157EXPORT_SYMBOL(sound_unload_audiodev);
125 158
126static int sound_alloc_audiodev(void) 159static int sound_alloc_audiodev(void)
127{ 160{
@@ -144,6 +177,7 @@ int sound_alloc_mididev(void)
144 num_midis = i + 1; 177 num_midis = i + 1;
145 return i; 178 return i;
146} 179}
180EXPORT_SYMBOL(sound_alloc_mididev);
147 181
148int sound_alloc_synthdev(void) 182int sound_alloc_synthdev(void)
149{ 183{
@@ -158,6 +192,7 @@ int sound_alloc_synthdev(void)
158 } 192 }
159 return -1; 193 return -1;
160} 194}
195EXPORT_SYMBOL(sound_alloc_synthdev);
161 196
162int sound_alloc_mixerdev(void) 197int sound_alloc_mixerdev(void)
163{ 198{
@@ -169,6 +204,7 @@ int sound_alloc_mixerdev(void)
169 num_mixers = i + 1; 204 num_mixers = i + 1;
170 return i; 205 return i;
171} 206}
207EXPORT_SYMBOL(sound_alloc_mixerdev);
172 208
173int sound_alloc_timerdev(void) 209int sound_alloc_timerdev(void)
174{ 210{
@@ -183,6 +219,7 @@ int sound_alloc_timerdev(void)
183 } 219 }
184 return -1; 220 return -1;
185} 221}
222EXPORT_SYMBOL(sound_alloc_timerdev);
186 223
187void sound_unload_mixerdev(int dev) 224void sound_unload_mixerdev(int dev)
188{ 225{
@@ -192,6 +229,7 @@ void sound_unload_mixerdev(int dev)
192 num_mixers--; 229 num_mixers--;
193 } 230 }
194} 231}
232EXPORT_SYMBOL(sound_unload_mixerdev);
195 233
196void sound_unload_mididev(int dev) 234void sound_unload_mididev(int dev)
197{ 235{
@@ -200,15 +238,19 @@ void sound_unload_mididev(int dev)
200 unregister_sound_midi((dev<<4)+2); 238 unregister_sound_midi((dev<<4)+2);
201 } 239 }
202} 240}
241EXPORT_SYMBOL(sound_unload_mididev);
203 242
204void sound_unload_synthdev(int dev) 243void sound_unload_synthdev(int dev)
205{ 244{
206 if (dev != -1) 245 if (dev != -1)
207 synth_devs[dev] = NULL; 246 synth_devs[dev] = NULL;
208} 247}
248EXPORT_SYMBOL(sound_unload_synthdev);
209 249
210void sound_unload_timerdev(int dev) 250void sound_unload_timerdev(int dev)
211{ 251{
212 if (dev != -1) 252 if (dev != -1)
213 sound_timer_devs[dev] = NULL; 253 sound_timer_devs[dev] = NULL;
214} 254}
255EXPORT_SYMBOL(sound_unload_timerdev);
256
diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h
index adf1d625b576..b7617bee6388 100644
--- a/sound/oss/dev_table.h
+++ b/sound/oss/dev_table.h
@@ -352,22 +352,8 @@ struct sound_timer_operations
352 void (*arm_timer)(int dev, long time); 352 void (*arm_timer)(int dev, long time);
353}; 353};
354 354
355#ifdef _DEV_TABLE_C_
356struct audio_operations *audio_devs[MAX_AUDIO_DEV];
357int num_audiodevs;
358struct mixer_operations *mixer_devs[MAX_MIXER_DEV];
359int num_mixers;
360struct synth_operations *synth_devs[MAX_SYNTH_DEV+MAX_MIDI_DEV];
361int num_synths;
362struct midi_operations *midi_devs[MAX_MIDI_DEV];
363int num_midis;
364
365extern struct sound_timer_operations default_sound_timer; 355extern struct sound_timer_operations default_sound_timer;
366struct sound_timer_operations *sound_timer_devs[MAX_TIMER_DEV] = { 356
367 &default_sound_timer, NULL
368};
369int num_sound_timers = 1;
370#else
371extern struct audio_operations *audio_devs[MAX_AUDIO_DEV]; 357extern struct audio_operations *audio_devs[MAX_AUDIO_DEV];
372extern int num_audiodevs; 358extern int num_audiodevs;
373extern struct mixer_operations *mixer_devs[MAX_MIXER_DEV]; 359extern struct mixer_operations *mixer_devs[MAX_MIXER_DEV];
@@ -378,7 +364,6 @@ extern struct midi_operations *midi_devs[MAX_MIDI_DEV];
378extern int num_midis; 364extern int num_midis;
379extern struct sound_timer_operations * sound_timer_devs[MAX_TIMER_DEV]; 365extern struct sound_timer_operations * sound_timer_devs[MAX_TIMER_DEV];
380extern int num_sound_timers; 366extern int num_sound_timers;
381#endif /* _DEV_TABLE_C_ */
382 367
383extern int sound_map_buffer (int dev, struct dma_buffparms *dmap, buffmem_desc *info); 368extern int sound_map_buffer (int dev, struct dma_buffparms *dmap, buffmem_desc *info);
384void sound_timer_init (struct sound_lowlev_timer *t, char *name); 369void sound_timer_init (struct sound_lowlev_timer *t, char *name);
diff --git a/sound/oss/dm.h b/sound/oss/dm.h
deleted file mode 100644
index 14a90593c44f..000000000000
--- a/sound/oss/dm.h
+++ /dev/null
@@ -1,79 +0,0 @@
1#ifndef _DRIVERS_SOUND_DM_H
2#define _DRIVERS_SOUND_DM_H
3
4/*
5 * Definitions of the 'direct midi sound' interface used
6 * by the newer commercial OSS package. We should export
7 * this to userland somewhere in glibc later.
8 */
9
10/*
11 * Data structure composing an FM "note" or sound event.
12 */
13
14struct dm_fm_voice
15{
16 u8 op;
17 u8 voice;
18 u8 am;
19 u8 vibrato;
20 u8 do_sustain;
21 u8 kbd_scale;
22 u8 harmonic;
23 u8 scale_level;
24 u8 volume;
25 u8 attack;
26 u8 decay;
27 u8 sustain;
28 u8 release;
29 u8 feedback;
30 u8 connection;
31 u8 left;
32 u8 right;
33 u8 waveform;
34};
35
36/*
37 * This describes an FM note by its voice, octave, frequency number (10bit)
38 * and key on/off.
39 */
40
41struct dm_fm_note
42{
43 u8 voice;
44 u8 octave;
45 u32 fnum;
46 u8 key_on;
47};
48
49/*
50 * FM parameters that apply globally to all voices, and thus are not "notes"
51 */
52
53struct dm_fm_params
54{
55 u8 am_depth;
56 u8 vib_depth;
57 u8 kbd_split;
58 u8 rhythm;
59
60 /* This block is the percussion instrument data */
61 u8 bass;
62 u8 snare;
63 u8 tomtom;
64 u8 cymbal;
65 u8 hihat;
66};
67
68/*
69 * FM mode ioctl settings
70 */
71
72#define FM_IOCTL_RESET 0x20
73#define FM_IOCTL_PLAY_NOTE 0x21
74#define FM_IOCTL_SET_VOICE 0x22
75#define FM_IOCTL_SET_PARAMS 0x23
76#define FM_IOCTL_SET_MODE 0x24
77#define FM_IOCTL_SET_OPL 0x25
78
79#endif
diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c
index 6c1cf74b78c5..b256c0401161 100644
--- a/sound/oss/dmabuf.c
+++ b/sound/oss/dmabuf.c
@@ -926,6 +926,7 @@ int DMAbuf_start_dma(int dev, unsigned long physaddr, int count, int dma_mode)
926 sound_start_dma(dmap, physaddr, count, dma_mode); 926 sound_start_dma(dmap, physaddr, count, dma_mode);
927 return count; 927 return count;
928} 928}
929EXPORT_SYMBOL(DMAbuf_start_dma);
929 930
930static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode) 931static int local_start_dma(struct audio_operations *adev, unsigned long physaddr, int count, int dma_mode)
931{ 932{
@@ -1055,6 +1056,8 @@ void DMAbuf_outputintr(int dev, int notify_only)
1055 do_outputintr(dev, notify_only); 1056 do_outputintr(dev, notify_only);
1056 spin_unlock_irqrestore(&dmap->lock,flags); 1057 spin_unlock_irqrestore(&dmap->lock,flags);
1057} 1058}
1059EXPORT_SYMBOL(DMAbuf_outputintr);
1060
1058/* called with dmap->lock held in irq context */ 1061/* called with dmap->lock held in irq context */
1059static void do_inputintr(int dev) 1062static void do_inputintr(int dev)
1060{ 1063{
@@ -1154,36 +1157,7 @@ void DMAbuf_inputintr(int dev)
1154 do_inputintr(dev); 1157 do_inputintr(dev);
1155 spin_unlock_irqrestore(&dmap->lock,flags); 1158 spin_unlock_irqrestore(&dmap->lock,flags);
1156} 1159}
1157 1160EXPORT_SYMBOL(DMAbuf_inputintr);
1158int DMAbuf_open_dma(int dev)
1159{
1160 /*
1161 * NOTE! This routine opens only the primary DMA channel (output).
1162 */
1163 struct audio_operations *adev = audio_devs[dev];
1164 int err;
1165
1166 if ((err = open_dmap(adev, OPEN_READWRITE, adev->dmap_out)) < 0)
1167 return -EBUSY;
1168 dma_init_buffers(adev->dmap_out);
1169 adev->dmap_out->flags |= DMA_ALLOC_DONE;
1170 adev->dmap_out->fragment_size = adev->dmap_out->buffsize;
1171
1172 if (adev->dmap_out->dma >= 0) {
1173 unsigned long flags;
1174
1175 flags=claim_dma_lock();
1176 clear_dma_ff(adev->dmap_out->dma);
1177 disable_dma(adev->dmap_out->dma);
1178 release_dma_lock(flags);
1179 }
1180 return 0;
1181}
1182
1183void DMAbuf_close_dma(int dev)
1184{
1185 close_dmap(audio_devs[dev], audio_devs[dev]->dmap_out);
1186}
1187 1161
1188void DMAbuf_init(int dev, int dma1, int dma2) 1162void DMAbuf_init(int dev, int dma1, int dma2)
1189{ 1163{
@@ -1192,12 +1166,6 @@ void DMAbuf_init(int dev, int dma1, int dma2)
1192 * NOTE! This routine could be called several times. 1166 * NOTE! This routine could be called several times.
1193 */ 1167 */
1194 1168
1195 /* drag in audio_syms.o */
1196 {
1197 extern char audio_syms_symbol;
1198 audio_syms_symbol = 0;
1199 }
1200
1201 if (adev && adev->dmap_out == NULL) { 1169 if (adev && adev->dmap_out == NULL) {
1202 if (adev->d == NULL) 1170 if (adev->d == NULL)
1203 panic("OSS: audio_devs[%d]->d == NULL\n", dev); 1171 panic("OSS: audio_devs[%d]->d == NULL\n", dev);
diff --git a/sound/oss/es1370.c b/sound/oss/es1370.c
deleted file mode 100644
index 13f483149737..000000000000
--- a/sound/oss/es1370.c
+++ /dev/null
@@ -1,2819 +0,0 @@
1/*****************************************************************************/
2
3/*
4 * es1370.c -- Ensoniq ES1370/Asahi Kasei AK4531 audio driver.
5 *
6 * Copyright (C) 1998-2001, 2003 Thomas Sailer (t.sailer@alumni.ethz.ch)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * Special thanks to David C. Niemi
23 *
24 *
25 * Module command line parameters:
26 * lineout if 1 the LINE jack is used as an output instead of an input.
27 * LINE then contains the unmixed dsp output. This can be used
28 * to make the card a four channel one: use dsp to output two
29 * channels to LINE and dac to output the other two channels to
30 * SPKR. Set the mixer to only output synth to SPKR.
31 * micbias sets the +5V bias to the mic if using an electretmic.
32 *
33 *
34 * Note: sync mode is not yet supported (i.e. running dsp and dac from the same
35 * clock source)
36 *
37 * Supported devices:
38 * /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
39 * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
40 * /dev/dsp1 additional DAC, like /dev/dsp, but output only,
41 * only 5512, 11025, 22050 and 44100 samples/s,
42 * outputs to mixer "SYNTH" setting
43 * /dev/midi simple MIDI UART interface, no ioctl
44 *
45 * NOTE: the card does not have any FM/Wavetable synthesizer, it is supposed
46 * to be done in software. That is what /dev/dac is for. By now (Q2 1998)
47 * there are several MIDI to PCM (WAV) packages, one of them is timidity.
48 *
49 * Revision history
50 * 26.03.1998 0.1 Initial release
51 * 31.03.1998 0.2 Fix bug in GETOSPACE
52 * 04.04.1998 0.3 Make it work (again) under 2.0.33
53 * Fix mixer write operation not returning the actual
54 * settings
55 * 05.04.1998 0.4 First attempt at using the new PCI stuff
56 * 29.04.1998 0.5 Fix hang when ^C is pressed on amp
57 * 07.05.1998 0.6 Don't double lock around stop_*() in *_release()
58 * 10.05.1998 0.7 First stab at a simple midi interface (no bells&whistles)
59 * 14.05.1998 0.8 Don't allow excessive interrupt rates
60 * 08.06.1998 0.9 First release using Alan Cox' soundcore instead of
61 * miscdevice
62 * 05.07.1998 0.10 Fixed the driver to correctly maintin OSS style volume
63 * settings (not sure if this should be standard)
64 * Fixed many references: f_flags should be f_mode
65 * -- Gerald Britton <gbritton@mit.edu>
66 * 03.08.1998 0.11 Now mixer behaviour can basically be selected between
67 * "OSS documented" and "OSS actual" behaviour
68 * Fixed mixer table thanks to Hakan.Lennestal@lu.erisoft.se
69 * On module startup, set DAC2 to 11kSPS instead of 5.5kSPS,
70 * as it produces an annoying ssssh in the lower sampling rate
71 * Do not include modversions.h
72 * 22.08.1998 0.12 Mixer registers actually have 5 instead of 4 bits
73 * pointed out by Itai Nahshon
74 * 31.08.1998 0.13 Fix realplayer problems - dac.count issues
75 * 08.10.1998 0.14 Joystick support fixed
76 * -- Oliver Neukum <c188@org.chemie.uni-muenchen.de>
77 * 10.12.1998 0.15 Fix drain_dac trying to wait on not yet initialized DMA
78 * 16.12.1998 0.16 Don't wake up app until there are fragsize bytes to read/write
79 * 06.01.1999 0.17 remove the silly SA_INTERRUPT flag.
80 * hopefully killed the egcs section type conflict
81 * 12.03.1999 0.18 cinfo.blocks should be reset after GETxPTR ioctl.
82 * reported by Johan Maes <joma@telindus.be>
83 * 22.03.1999 0.19 return EAGAIN instead of EBUSY when O_NONBLOCK
84 * read/write cannot be executed
85 * 07.04.1999 0.20 implemented the following ioctl's: SOUND_PCM_READ_RATE,
86 * SOUND_PCM_READ_CHANNELS, SOUND_PCM_READ_BITS;
87 * Alpha fixes reported by Peter Jones <pjones@redhat.com>
88 * Note: joystick address handling might still be wrong on archs
89 * other than i386
90 * 10.05.1999 0.21 Added support for an electret mic for SB PCI64
91 * to the Linux kernel sound driver. This mod also straighten
92 * out the question marks around the mic impedance setting
93 * (micz). From Kim.Berts@fisub.mail.abb.com
94 * 11.05.1999 0.22 Implemented the IMIX call to mute recording monitor.
95 * Guenter Geiger <geiger@epy.co.at>
96 * 15.06.1999 0.23 Fix bad allocation bug.
97 * Thanks to Deti Fliegl <fliegl@in.tum.de>
98 * 28.06.1999 0.24 Add pci_set_master
99 * 02.08.1999 0.25 Added workaround for the "phantom write" bug first
100 * documented by Dave Sharpless from Anchor Games
101 * 03.08.1999 0.26 adapt to Linus' new __setup/__initcall
102 * added kernel command line option "es1370=joystick[,lineout[,micbias]]"
103 * removed CONFIG_SOUND_ES1370_JOYPORT_BOOT kludge
104 * 12.08.1999 0.27 module_init/__setup fixes
105 * 19.08.1999 0.28 SOUND_MIXER_IMIX fixes, reported by Gianluca <gialluca@mail.tiscalinet.it>
106 * 31.08.1999 0.29 add spin_lock_init
107 * replaced current->state = x with set_current_state(x)
108 * 03.09.1999 0.30 change read semantics for MIDI to match
109 * OSS more closely; remove possible wakeup race
110 * 28.10.1999 0.31 More waitqueue races fixed
111 * 08.01.2000 0.32 Prevent some ioctl's from returning bad count values on underrun/overrun;
112 * Tim Janik's BSE (Bedevilled Sound Engine) found this
113 * 07.02.2000 0.33 Use pci_alloc_consistent and pci_register_driver
114 * 21.11.2000 0.34 Initialize dma buffers in poll, otherwise poll may return a bogus mask
115 * 12.12.2000 0.35 More dma buffer initializations, patch from
116 * Tjeerd Mulder <tjeerd.mulder@fujitsu-siemens.com>
117 * 07.01.2001 0.36 Timeout change in wrcodec as requested by Frank Klemm <pfk@fuchs.offl.uni-jena.de>
118 * 31.01.2001 0.37 Register/Unregister gameport
119 * Fix SETTRIGGER non OSS API conformity
120 * 03.01.2003 0.38 open_mode fixes from Georg Acher <acher@in.tum.de>
121 *
122 * some important things missing in Ensoniq documentation:
123 *
124 * Experimental PCLKDIV results: play the same waveforms on both DAC1 and DAC2
125 * and vary PCLKDIV to obtain zero beat.
126 * 5512sps: 254
127 * 44100sps: 30
128 * seems to be fs = 1411200/(PCLKDIV+2)
129 *
130 * should find out when curr_sample_ct is cleared and
131 * where exactly the CCB fetches data
132 *
133 * The card uses a 22.5792 MHz crystal.
134 * The LINEIN jack may be converted to an AOUT jack by
135 * setting pin 47 (XCTL0) of the ES1370 to high.
136 * Pin 48 (XCTL1) of the ES1370 sets the +5V bias for an electretmic
137 *
138 *
139 */
140
141/*****************************************************************************/
142
143#include <linux/interrupt.h>
144#include <linux/module.h>
145#include <linux/string.h>
146#include <linux/ioport.h>
147#include <linux/sched.h>
148#include <linux/delay.h>
149#include <linux/sound.h>
150#include <linux/slab.h>
151#include <linux/soundcard.h>
152#include <linux/pci.h>
153#include <linux/smp_lock.h>
154#include <linux/init.h>
155#include <linux/poll.h>
156#include <linux/spinlock.h>
157#include <linux/gameport.h>
158#include <linux/wait.h>
159#include <linux/dma-mapping.h>
160#include <linux/mutex.h>
161
162#include <asm/io.h>
163#include <asm/page.h>
164#include <asm/uaccess.h>
165
166#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
167#define SUPPORT_JOYSTICK
168#endif
169
170/* --------------------------------------------------------------------- */
171
172#undef OSS_DOCUMENTED_MIXER_SEMANTICS
173#define DBG(x) {}
174/*#define DBG(x) {x}*/
175
176/* --------------------------------------------------------------------- */
177
178#ifndef PCI_VENDOR_ID_ENSONIQ
179#define PCI_VENDOR_ID_ENSONIQ 0x1274
180#endif
181
182#ifndef PCI_DEVICE_ID_ENSONIQ_ES1370
183#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000
184#endif
185
186#define ES1370_MAGIC ((PCI_VENDOR_ID_ENSONIQ<<16)|PCI_DEVICE_ID_ENSONIQ_ES1370)
187
188#define ES1370_EXTENT 0x40
189#define JOY_EXTENT 8
190
191#define ES1370_REG_CONTROL 0x00
192#define ES1370_REG_STATUS 0x04
193#define ES1370_REG_UART_DATA 0x08
194#define ES1370_REG_UART_STATUS 0x09
195#define ES1370_REG_UART_CONTROL 0x09
196#define ES1370_REG_UART_TEST 0x0a
197#define ES1370_REG_MEMPAGE 0x0c
198#define ES1370_REG_CODEC 0x10
199#define ES1370_REG_SERIAL_CONTROL 0x20
200#define ES1370_REG_DAC1_SCOUNT 0x24
201#define ES1370_REG_DAC2_SCOUNT 0x28
202#define ES1370_REG_ADC_SCOUNT 0x2c
203
204#define ES1370_REG_DAC1_FRAMEADR 0xc30
205#define ES1370_REG_DAC1_FRAMECNT 0xc34
206#define ES1370_REG_DAC2_FRAMEADR 0xc38
207#define ES1370_REG_DAC2_FRAMECNT 0xc3c
208#define ES1370_REG_ADC_FRAMEADR 0xd30
209#define ES1370_REG_ADC_FRAMECNT 0xd34
210#define ES1370_REG_PHANTOM_FRAMEADR 0xd38
211#define ES1370_REG_PHANTOM_FRAMECNT 0xd3c
212
213#define ES1370_FMT_U8_MONO 0
214#define ES1370_FMT_U8_STEREO 1
215#define ES1370_FMT_S16_MONO 2
216#define ES1370_FMT_S16_STEREO 3
217#define ES1370_FMT_STEREO 1
218#define ES1370_FMT_S16 2
219#define ES1370_FMT_MASK 3
220
221static const unsigned sample_size[] = { 1, 2, 2, 4 };
222static const unsigned sample_shift[] = { 0, 1, 1, 2 };
223
224static const unsigned dac1_samplerate[] = { 5512, 11025, 22050, 44100 };
225
226#define DAC2_SRTODIV(x) (((1411200+(x)/2)/(x))-2)
227#define DAC2_DIVTOSR(x) (1411200/((x)+2))
228
229#define CTRL_ADC_STOP 0x80000000 /* 1 = ADC stopped */
230#define CTRL_XCTL1 0x40000000 /* electret mic bias */
231#define CTRL_OPEN 0x20000000 /* no function, can be read and written */
232#define CTRL_PCLKDIV 0x1fff0000 /* ADC/DAC2 clock divider */
233#define CTRL_SH_PCLKDIV 16
234#define CTRL_MSFMTSEL 0x00008000 /* MPEG serial data fmt: 0 = Sony, 1 = I2S */
235#define CTRL_M_SBB 0x00004000 /* DAC2 clock: 0 = PCLKDIV, 1 = MPEG */
236#define CTRL_WTSRSEL 0x00003000 /* DAC1 clock freq: 0=5512, 1=11025, 2=22050, 3=44100 */
237#define CTRL_SH_WTSRSEL 12
238#define CTRL_DAC_SYNC 0x00000800 /* 1 = DAC2 runs off DAC1 clock */
239#define CTRL_CCB_INTRM 0x00000400 /* 1 = CCB "voice" ints enabled */
240#define CTRL_M_CB 0x00000200 /* recording source: 0 = ADC, 1 = MPEG */
241#define CTRL_XCTL0 0x00000100 /* 0 = Line in, 1 = Line out */
242#define CTRL_BREQ 0x00000080 /* 1 = test mode (internal mem test) */
243#define CTRL_DAC1_EN 0x00000040 /* enable DAC1 */
244#define CTRL_DAC2_EN 0x00000020 /* enable DAC2 */
245#define CTRL_ADC_EN 0x00000010 /* enable ADC */
246#define CTRL_UART_EN 0x00000008 /* enable MIDI uart */
247#define CTRL_JYSTK_EN 0x00000004 /* enable Joystick port (presumably at address 0x200) */
248#define CTRL_CDC_EN 0x00000002 /* enable serial (CODEC) interface */
249#define CTRL_SERR_DIS 0x00000001 /* 1 = disable PCI SERR signal */
250
251#define STAT_INTR 0x80000000 /* wired or of all interrupt bits */
252#define STAT_CSTAT 0x00000400 /* 1 = codec busy or codec write in progress */
253#define STAT_CBUSY 0x00000200 /* 1 = codec busy */
254#define STAT_CWRIP 0x00000100 /* 1 = codec write in progress */
255#define STAT_VC 0x00000060 /* CCB int source, 0=DAC1, 1=DAC2, 2=ADC, 3=undef */
256#define STAT_SH_VC 5
257#define STAT_MCCB 0x00000010 /* CCB int pending */
258#define STAT_UART 0x00000008 /* UART int pending */
259#define STAT_DAC1 0x00000004 /* DAC1 int pending */
260#define STAT_DAC2 0x00000002 /* DAC2 int pending */
261#define STAT_ADC 0x00000001 /* ADC int pending */
262
263#define USTAT_RXINT 0x80 /* UART rx int pending */
264#define USTAT_TXINT 0x04 /* UART tx int pending */
265#define USTAT_TXRDY 0x02 /* UART tx ready */
266#define USTAT_RXRDY 0x01 /* UART rx ready */
267
268#define UCTRL_RXINTEN 0x80 /* 1 = enable RX ints */
269#define UCTRL_TXINTEN 0x60 /* TX int enable field mask */
270#define UCTRL_ENA_TXINT 0x20 /* enable TX int */
271#define UCTRL_CNTRL 0x03 /* control field */
272#define UCTRL_CNTRL_SWR 0x03 /* software reset command */
273
274#define SCTRL_P2ENDINC 0x00380000 /* */
275#define SCTRL_SH_P2ENDINC 19
276#define SCTRL_P2STINC 0x00070000 /* */
277#define SCTRL_SH_P2STINC 16
278#define SCTRL_R1LOOPSEL 0x00008000 /* 0 = loop mode */
279#define SCTRL_P2LOOPSEL 0x00004000 /* 0 = loop mode */
280#define SCTRL_P1LOOPSEL 0x00002000 /* 0 = loop mode */
281#define SCTRL_P2PAUSE 0x00001000 /* 1 = pause mode */
282#define SCTRL_P1PAUSE 0x00000800 /* 1 = pause mode */
283#define SCTRL_R1INTEN 0x00000400 /* enable interrupt */
284#define SCTRL_P2INTEN 0x00000200 /* enable interrupt */
285#define SCTRL_P1INTEN 0x00000100 /* enable interrupt */
286#define SCTRL_P1SCTRLD 0x00000080 /* reload sample count register for DAC1 */
287#define SCTRL_P2DACSEN 0x00000040 /* 1 = DAC2 play back last sample when disabled */
288#define SCTRL_R1SEB 0x00000020 /* 1 = 16bit */
289#define SCTRL_R1SMB 0x00000010 /* 1 = stereo */
290#define SCTRL_R1FMT 0x00000030 /* format mask */
291#define SCTRL_SH_R1FMT 4
292#define SCTRL_P2SEB 0x00000008 /* 1 = 16bit */
293#define SCTRL_P2SMB 0x00000004 /* 1 = stereo */
294#define SCTRL_P2FMT 0x0000000c /* format mask */
295#define SCTRL_SH_P2FMT 2
296#define SCTRL_P1SEB 0x00000002 /* 1 = 16bit */
297#define SCTRL_P1SMB 0x00000001 /* 1 = stereo */
298#define SCTRL_P1FMT 0x00000003 /* format mask */
299#define SCTRL_SH_P1FMT 0
300
301/* misc stuff */
302
303#define FMODE_DAC 4 /* slight misuse of mode_t */
304
305/* MIDI buffer sizes */
306
307#define MIDIINBUF 256
308#define MIDIOUTBUF 256
309
310#define FMODE_MIDI_SHIFT 3
311#define FMODE_MIDI_READ (FMODE_READ << FMODE_MIDI_SHIFT)
312#define FMODE_MIDI_WRITE (FMODE_WRITE << FMODE_MIDI_SHIFT)
313
314/* --------------------------------------------------------------------- */
315
316struct es1370_state {
317 /* magic */
318 unsigned int magic;
319
320 /* list of es1370 devices */
321 struct list_head devs;
322
323 /* the corresponding pci_dev structure */
324 struct pci_dev *dev;
325
326 /* soundcore stuff */
327 int dev_audio;
328 int dev_mixer;
329 int dev_dac;
330 int dev_midi;
331
332 /* hardware resources */
333 unsigned long io; /* long for SPARC */
334 unsigned int irq;
335
336 /* mixer registers; there is no HW readback */
337 struct {
338 unsigned short vol[10];
339 unsigned int recsrc;
340 unsigned int modcnt;
341 unsigned short micpreamp;
342 unsigned int imix;
343 } mix;
344
345 /* wave stuff */
346 unsigned ctrl;
347 unsigned sctrl;
348
349 spinlock_t lock;
350 struct mutex open_mutex;
351 mode_t open_mode;
352 wait_queue_head_t open_wait;
353
354 struct dmabuf {
355 void *rawbuf;
356 dma_addr_t dmaaddr;
357 unsigned buforder;
358 unsigned numfrag;
359 unsigned fragshift;
360 unsigned hwptr, swptr;
361 unsigned total_bytes;
362 int count;
363 unsigned error; /* over/underrun */
364 wait_queue_head_t wait;
365 /* redundant, but makes calculations easier */
366 unsigned fragsize;
367 unsigned dmasize;
368 unsigned fragsamples;
369 /* OSS stuff */
370 unsigned mapped:1;
371 unsigned ready:1;
372 unsigned endcleared:1;
373 unsigned enabled:1;
374 unsigned ossfragshift;
375 int ossmaxfrags;
376 unsigned subdivision;
377 } dma_dac1, dma_dac2, dma_adc;
378
379 /* The following buffer is used to point the phantom write channel to. */
380 unsigned char *bugbuf_cpu;
381 dma_addr_t bugbuf_dma;
382
383 /* midi stuff */
384 struct {
385 unsigned ird, iwr, icnt;
386 unsigned ord, owr, ocnt;
387 wait_queue_head_t iwait;
388 wait_queue_head_t owait;
389 unsigned char ibuf[MIDIINBUF];
390 unsigned char obuf[MIDIOUTBUF];
391 } midi;
392
393#ifdef SUPPORT_JOYSTICK
394 struct gameport *gameport;
395#endif
396
397 struct mutex mutex;
398};
399
400/* --------------------------------------------------------------------- */
401
402static LIST_HEAD(devs);
403
404/* --------------------------------------------------------------------- */
405
406static inline unsigned ld2(unsigned int x)
407{
408 unsigned r = 0;
409
410 if (x >= 0x10000) {
411 x >>= 16;
412 r += 16;
413 }
414 if (x >= 0x100) {
415 x >>= 8;
416 r += 8;
417 }
418 if (x >= 0x10) {
419 x >>= 4;
420 r += 4;
421 }
422 if (x >= 4) {
423 x >>= 2;
424 r += 2;
425 }
426 if (x >= 2)
427 r++;
428 return r;
429}
430
431/* --------------------------------------------------------------------- */
432
433static void wrcodec(struct es1370_state *s, unsigned char idx, unsigned char data)
434{
435 unsigned long tmo = jiffies + HZ/10, j;
436
437 do {
438 j = jiffies;
439 if (!(inl(s->io+ES1370_REG_STATUS) & STAT_CSTAT)) {
440 outw((((unsigned short)idx)<<8)|data, s->io+ES1370_REG_CODEC);
441 return;
442 }
443 schedule();
444 } while ((signed)(tmo-j) > 0);
445 printk(KERN_ERR "es1370: write to codec register timeout\n");
446}
447
448/* --------------------------------------------------------------------- */
449
450static inline void stop_adc(struct es1370_state *s)
451{
452 unsigned long flags;
453
454 spin_lock_irqsave(&s->lock, flags);
455 s->ctrl &= ~CTRL_ADC_EN;
456 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
457 spin_unlock_irqrestore(&s->lock, flags);
458}
459
460static inline void stop_dac1(struct es1370_state *s)
461{
462 unsigned long flags;
463
464 spin_lock_irqsave(&s->lock, flags);
465 s->ctrl &= ~CTRL_DAC1_EN;
466 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
467 spin_unlock_irqrestore(&s->lock, flags);
468}
469
470static inline void stop_dac2(struct es1370_state *s)
471{
472 unsigned long flags;
473
474 spin_lock_irqsave(&s->lock, flags);
475 s->ctrl &= ~CTRL_DAC2_EN;
476 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
477 spin_unlock_irqrestore(&s->lock, flags);
478}
479
480static void start_dac1(struct es1370_state *s)
481{
482 unsigned long flags;
483 unsigned fragremain, fshift;
484
485 spin_lock_irqsave(&s->lock, flags);
486 if (!(s->ctrl & CTRL_DAC1_EN) && (s->dma_dac1.mapped || s->dma_dac1.count > 0)
487 && s->dma_dac1.ready) {
488 s->ctrl |= CTRL_DAC1_EN;
489 s->sctrl = (s->sctrl & ~(SCTRL_P1LOOPSEL | SCTRL_P1PAUSE | SCTRL_P1SCTRLD)) | SCTRL_P1INTEN;
490 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
491 fragremain = ((- s->dma_dac1.hwptr) & (s->dma_dac1.fragsize-1));
492 fshift = sample_shift[(s->sctrl & SCTRL_P1FMT) >> SCTRL_SH_P1FMT];
493 if (fragremain < 2*fshift)
494 fragremain = s->dma_dac1.fragsize;
495 outl((fragremain >> fshift) - 1, s->io+ES1370_REG_DAC1_SCOUNT);
496 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
497 outl((s->dma_dac1.fragsize >> fshift) - 1, s->io+ES1370_REG_DAC1_SCOUNT);
498 }
499 spin_unlock_irqrestore(&s->lock, flags);
500}
501
502static void start_dac2(struct es1370_state *s)
503{
504 unsigned long flags;
505 unsigned fragremain, fshift;
506
507 spin_lock_irqsave(&s->lock, flags);
508 if (!(s->ctrl & CTRL_DAC2_EN) && (s->dma_dac2.mapped || s->dma_dac2.count > 0)
509 && s->dma_dac2.ready) {
510 s->ctrl |= CTRL_DAC2_EN;
511 s->sctrl = (s->sctrl & ~(SCTRL_P2LOOPSEL | SCTRL_P2PAUSE | SCTRL_P2DACSEN |
512 SCTRL_P2ENDINC | SCTRL_P2STINC)) | SCTRL_P2INTEN |
513 (((s->sctrl & SCTRL_P2FMT) ? 2 : 1) << SCTRL_SH_P2ENDINC) |
514 (0 << SCTRL_SH_P2STINC);
515 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
516 fragremain = ((- s->dma_dac2.hwptr) & (s->dma_dac2.fragsize-1));
517 fshift = sample_shift[(s->sctrl & SCTRL_P2FMT) >> SCTRL_SH_P2FMT];
518 if (fragremain < 2*fshift)
519 fragremain = s->dma_dac2.fragsize;
520 outl((fragremain >> fshift) - 1, s->io+ES1370_REG_DAC2_SCOUNT);
521 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
522 outl((s->dma_dac2.fragsize >> fshift) - 1, s->io+ES1370_REG_DAC2_SCOUNT);
523 }
524 spin_unlock_irqrestore(&s->lock, flags);
525}
526
527static void start_adc(struct es1370_state *s)
528{
529 unsigned long flags;
530 unsigned fragremain, fshift;
531
532 spin_lock_irqsave(&s->lock, flags);
533 if (!(s->ctrl & CTRL_ADC_EN) && (s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
534 && s->dma_adc.ready) {
535 s->ctrl |= CTRL_ADC_EN;
536 s->sctrl = (s->sctrl & ~SCTRL_R1LOOPSEL) | SCTRL_R1INTEN;
537 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
538 fragremain = ((- s->dma_adc.hwptr) & (s->dma_adc.fragsize-1));
539 fshift = sample_shift[(s->sctrl & SCTRL_R1FMT) >> SCTRL_SH_R1FMT];
540 if (fragremain < 2*fshift)
541 fragremain = s->dma_adc.fragsize;
542 outl((fragremain >> fshift) - 1, s->io+ES1370_REG_ADC_SCOUNT);
543 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
544 outl((s->dma_adc.fragsize >> fshift) - 1, s->io+ES1370_REG_ADC_SCOUNT);
545 }
546 spin_unlock_irqrestore(&s->lock, flags);
547}
548
549/* --------------------------------------------------------------------- */
550
551#define DMABUF_DEFAULTORDER (17-PAGE_SHIFT)
552#define DMABUF_MINORDER 1
553
554static inline void dealloc_dmabuf(struct es1370_state *s, struct dmabuf *db)
555{
556 struct page *page, *pend;
557
558 if (db->rawbuf) {
559 /* undo marking the pages as reserved */
560 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
561 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
562 ClearPageReserved(page);
563 pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
564 }
565 db->rawbuf = NULL;
566 db->mapped = db->ready = 0;
567}
568
569static int prog_dmabuf(struct es1370_state *s, struct dmabuf *db, unsigned rate, unsigned fmt, unsigned reg)
570{
571 int order;
572 unsigned bytepersec;
573 unsigned bufs;
574 struct page *page, *pend;
575
576 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
577 if (!db->rawbuf) {
578 db->ready = db->mapped = 0;
579 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
580 if ((db->rawbuf = pci_alloc_consistent(s->dev, PAGE_SIZE << order, &db->dmaaddr)))
581 break;
582 if (!db->rawbuf)
583 return -ENOMEM;
584 db->buforder = order;
585 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
586 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
587 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
588 SetPageReserved(page);
589 }
590 fmt &= ES1370_FMT_MASK;
591 bytepersec = rate << sample_shift[fmt];
592 bufs = PAGE_SIZE << db->buforder;
593 if (db->ossfragshift) {
594 if ((1000 << db->ossfragshift) < bytepersec)
595 db->fragshift = ld2(bytepersec/1000);
596 else
597 db->fragshift = db->ossfragshift;
598 } else {
599 db->fragshift = ld2(bytepersec/100/(db->subdivision ? db->subdivision : 1));
600 if (db->fragshift < 3)
601 db->fragshift = 3;
602 }
603 db->numfrag = bufs >> db->fragshift;
604 while (db->numfrag < 4 && db->fragshift > 3) {
605 db->fragshift--;
606 db->numfrag = bufs >> db->fragshift;
607 }
608 db->fragsize = 1 << db->fragshift;
609 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
610 db->numfrag = db->ossmaxfrags;
611 db->fragsamples = db->fragsize >> sample_shift[fmt];
612 db->dmasize = db->numfrag << db->fragshift;
613 memset(db->rawbuf, (fmt & ES1370_FMT_S16) ? 0 : 0x80, db->dmasize);
614 outl((reg >> 8) & 15, s->io+ES1370_REG_MEMPAGE);
615 outl(db->dmaaddr, s->io+(reg & 0xff));
616 outl((db->dmasize >> 2)-1, s->io+((reg + 4) & 0xff));
617 db->enabled = 1;
618 db->ready = 1;
619 return 0;
620}
621
622static inline int prog_dmabuf_adc(struct es1370_state *s)
623{
624 stop_adc(s);
625 return prog_dmabuf(s, &s->dma_adc, DAC2_DIVTOSR((s->ctrl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV),
626 (s->sctrl >> SCTRL_SH_R1FMT) & ES1370_FMT_MASK, ES1370_REG_ADC_FRAMEADR);
627}
628
629static inline int prog_dmabuf_dac2(struct es1370_state *s)
630{
631 stop_dac2(s);
632 return prog_dmabuf(s, &s->dma_dac2, DAC2_DIVTOSR((s->ctrl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV),
633 (s->sctrl >> SCTRL_SH_P2FMT) & ES1370_FMT_MASK, ES1370_REG_DAC2_FRAMEADR);
634}
635
636static inline int prog_dmabuf_dac1(struct es1370_state *s)
637{
638 stop_dac1(s);
639 return prog_dmabuf(s, &s->dma_dac1, dac1_samplerate[(s->ctrl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL],
640 (s->sctrl >> SCTRL_SH_P1FMT) & ES1370_FMT_MASK, ES1370_REG_DAC1_FRAMEADR);
641}
642
643static inline unsigned get_hwptr(struct es1370_state *s, struct dmabuf *db, unsigned reg)
644{
645 unsigned hwptr, diff;
646
647 outl((reg >> 8) & 15, s->io+ES1370_REG_MEMPAGE);
648 hwptr = (inl(s->io+(reg & 0xff)) >> 14) & 0x3fffc;
649 diff = (db->dmasize + hwptr - db->hwptr) % db->dmasize;
650 db->hwptr = hwptr;
651 return diff;
652}
653
654static inline void clear_advance(void *buf, unsigned bsize, unsigned bptr, unsigned len, unsigned char c)
655{
656 if (bptr + len > bsize) {
657 unsigned x = bsize - bptr;
658 memset(((char *)buf) + bptr, c, x);
659 bptr = 0;
660 len -= x;
661 }
662 memset(((char *)buf) + bptr, c, len);
663}
664
665/* call with spinlock held! */
666static void es1370_update_ptr(struct es1370_state *s)
667{
668 int diff;
669
670 /* update ADC pointer */
671 if (s->ctrl & CTRL_ADC_EN) {
672 diff = get_hwptr(s, &s->dma_adc, ES1370_REG_ADC_FRAMECNT);
673 s->dma_adc.total_bytes += diff;
674 s->dma_adc.count += diff;
675 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
676 wake_up(&s->dma_adc.wait);
677 if (!s->dma_adc.mapped) {
678 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
679 s->ctrl &= ~CTRL_ADC_EN;
680 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
681 s->dma_adc.error++;
682 }
683 }
684 }
685 /* update DAC1 pointer */
686 if (s->ctrl & CTRL_DAC1_EN) {
687 diff = get_hwptr(s, &s->dma_dac1, ES1370_REG_DAC1_FRAMECNT);
688 s->dma_dac1.total_bytes += diff;
689 if (s->dma_dac1.mapped) {
690 s->dma_dac1.count += diff;
691 if (s->dma_dac1.count >= (signed)s->dma_dac1.fragsize)
692 wake_up(&s->dma_dac1.wait);
693 } else {
694 s->dma_dac1.count -= diff;
695 if (s->dma_dac1.count <= 0) {
696 s->ctrl &= ~CTRL_DAC1_EN;
697 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
698 s->dma_dac1.error++;
699 } else if (s->dma_dac1.count <= (signed)s->dma_dac1.fragsize && !s->dma_dac1.endcleared) {
700 clear_advance(s->dma_dac1.rawbuf, s->dma_dac1.dmasize, s->dma_dac1.swptr,
701 s->dma_dac1.fragsize, (s->sctrl & SCTRL_P1SEB) ? 0 : 0x80);
702 s->dma_dac1.endcleared = 1;
703 }
704 if (s->dma_dac1.count + (signed)s->dma_dac1.fragsize <= (signed)s->dma_dac1.dmasize)
705 wake_up(&s->dma_dac1.wait);
706 }
707 }
708 /* update DAC2 pointer */
709 if (s->ctrl & CTRL_DAC2_EN) {
710 diff = get_hwptr(s, &s->dma_dac2, ES1370_REG_DAC2_FRAMECNT);
711 s->dma_dac2.total_bytes += diff;
712 if (s->dma_dac2.mapped) {
713 s->dma_dac2.count += diff;
714 if (s->dma_dac2.count >= (signed)s->dma_dac2.fragsize)
715 wake_up(&s->dma_dac2.wait);
716 } else {
717 s->dma_dac2.count -= diff;
718 if (s->dma_dac2.count <= 0) {
719 s->ctrl &= ~CTRL_DAC2_EN;
720 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
721 s->dma_dac2.error++;
722 } else if (s->dma_dac2.count <= (signed)s->dma_dac2.fragsize && !s->dma_dac2.endcleared) {
723 clear_advance(s->dma_dac2.rawbuf, s->dma_dac2.dmasize, s->dma_dac2.swptr,
724 s->dma_dac2.fragsize, (s->sctrl & SCTRL_P2SEB) ? 0 : 0x80);
725 s->dma_dac2.endcleared = 1;
726 }
727 if (s->dma_dac2.count + (signed)s->dma_dac2.fragsize <= (signed)s->dma_dac2.dmasize)
728 wake_up(&s->dma_dac2.wait);
729 }
730 }
731}
732
733/* hold spinlock for the following! */
734static void es1370_handle_midi(struct es1370_state *s)
735{
736 unsigned char ch;
737 int wake;
738
739 if (!(s->ctrl & CTRL_UART_EN))
740 return;
741 wake = 0;
742 while (inb(s->io+ES1370_REG_UART_STATUS) & USTAT_RXRDY) {
743 ch = inb(s->io+ES1370_REG_UART_DATA);
744 if (s->midi.icnt < MIDIINBUF) {
745 s->midi.ibuf[s->midi.iwr] = ch;
746 s->midi.iwr = (s->midi.iwr + 1) % MIDIINBUF;
747 s->midi.icnt++;
748 }
749 wake = 1;
750 }
751 if (wake)
752 wake_up(&s->midi.iwait);
753 wake = 0;
754 while ((inb(s->io+ES1370_REG_UART_STATUS) & USTAT_TXRDY) && s->midi.ocnt > 0) {
755 outb(s->midi.obuf[s->midi.ord], s->io+ES1370_REG_UART_DATA);
756 s->midi.ord = (s->midi.ord + 1) % MIDIOUTBUF;
757 s->midi.ocnt--;
758 if (s->midi.ocnt < MIDIOUTBUF-16)
759 wake = 1;
760 }
761 if (wake)
762 wake_up(&s->midi.owait);
763 outb((s->midi.ocnt > 0) ? UCTRL_RXINTEN | UCTRL_ENA_TXINT : UCTRL_RXINTEN, s->io+ES1370_REG_UART_CONTROL);
764}
765
766static irqreturn_t es1370_interrupt(int irq, void *dev_id, struct pt_regs *regs)
767{
768 struct es1370_state *s = (struct es1370_state *)dev_id;
769 unsigned int intsrc, sctl;
770
771 /* fastpath out, to ease interrupt sharing */
772 intsrc = inl(s->io+ES1370_REG_STATUS);
773 if (!(intsrc & 0x80000000))
774 return IRQ_NONE;
775 spin_lock(&s->lock);
776 /* clear audio interrupts first */
777 sctl = s->sctrl;
778 if (intsrc & STAT_ADC)
779 sctl &= ~SCTRL_R1INTEN;
780 if (intsrc & STAT_DAC1)
781 sctl &= ~SCTRL_P1INTEN;
782 if (intsrc & STAT_DAC2)
783 sctl &= ~SCTRL_P2INTEN;
784 outl(sctl, s->io+ES1370_REG_SERIAL_CONTROL);
785 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
786 es1370_update_ptr(s);
787 es1370_handle_midi(s);
788 spin_unlock(&s->lock);
789 return IRQ_HANDLED;
790}
791
792/* --------------------------------------------------------------------- */
793
794static const char invalid_magic[] = KERN_CRIT "es1370: invalid magic value\n";
795
796#define VALIDATE_STATE(s) \
797({ \
798 if (!(s) || (s)->magic != ES1370_MAGIC) { \
799 printk(invalid_magic); \
800 return -ENXIO; \
801 } \
802})
803
804/* --------------------------------------------------------------------- */
805
806static const struct {
807 unsigned volidx:4;
808 unsigned left:4;
809 unsigned right:4;
810 unsigned stereo:1;
811 unsigned recmask:13;
812 unsigned avail:1;
813} mixtable[SOUND_MIXER_NRDEVICES] = {
814 [SOUND_MIXER_VOLUME] = { 0, 0x0, 0x1, 1, 0x0000, 1 }, /* master */
815 [SOUND_MIXER_PCM] = { 1, 0x2, 0x3, 1, 0x0400, 1 }, /* voice */
816 [SOUND_MIXER_SYNTH] = { 2, 0x4, 0x5, 1, 0x0060, 1 }, /* FM */
817 [SOUND_MIXER_CD] = { 3, 0x6, 0x7, 1, 0x0006, 1 }, /* CD */
818 [SOUND_MIXER_LINE] = { 4, 0x8, 0x9, 1, 0x0018, 1 }, /* Line */
819 [SOUND_MIXER_LINE1] = { 5, 0xa, 0xb, 1, 0x1800, 1 }, /* AUX */
820 [SOUND_MIXER_LINE2] = { 6, 0xc, 0x0, 0, 0x0100, 1 }, /* Mono1 */
821 [SOUND_MIXER_LINE3] = { 7, 0xd, 0x0, 0, 0x0200, 1 }, /* Mono2 */
822 [SOUND_MIXER_MIC] = { 8, 0xe, 0x0, 0, 0x0001, 1 }, /* Mic */
823 [SOUND_MIXER_OGAIN] = { 9, 0xf, 0x0, 0, 0x0000, 1 } /* mono out */
824};
825
826static void set_recsrc(struct es1370_state *s, unsigned int val)
827{
828 unsigned int i, j;
829
830 for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
831 if (!(val & (1 << i)))
832 continue;
833 if (!mixtable[i].recmask) {
834 val &= ~(1 << i);
835 continue;
836 }
837 j |= mixtable[i].recmask;
838 }
839 s->mix.recsrc = val;
840 wrcodec(s, 0x12, j & 0xd5);
841 wrcodec(s, 0x13, j & 0xaa);
842 wrcodec(s, 0x14, (j >> 8) & 0x17);
843 wrcodec(s, 0x15, (j >> 8) & 0x0f);
844 i = (j & 0x37f) | ((j << 1) & 0x3000) | 0xc60;
845 if (!s->mix.imix) {
846 i &= 0xff60; /* mute record and line monitor */
847 }
848 wrcodec(s, 0x10, i);
849 wrcodec(s, 0x11, i >> 8);
850}
851
852static int mixer_ioctl(struct es1370_state *s, unsigned int cmd, unsigned long arg)
853{
854 unsigned long flags;
855 int i, val;
856 unsigned char l, r, rl, rr;
857 int __user *p = (int __user *)arg;
858
859 VALIDATE_STATE(s);
860 if (cmd == SOUND_MIXER_PRIVATE1) {
861 /* enable/disable/query mixer preamp */
862 if (get_user(val, p))
863 return -EFAULT;
864 if (val != -1) {
865 s->mix.micpreamp = !!val;
866 wrcodec(s, 0x19, s->mix.micpreamp);
867 }
868 return put_user(s->mix.micpreamp, p);
869 }
870 if (cmd == SOUND_MIXER_PRIVATE2) {
871 /* enable/disable/query use of linein as second lineout */
872 if (get_user(val, p))
873 return -EFAULT;
874 if (val != -1) {
875 spin_lock_irqsave(&s->lock, flags);
876 if (val)
877 s->ctrl |= CTRL_XCTL0;
878 else
879 s->ctrl &= ~CTRL_XCTL0;
880 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
881 spin_unlock_irqrestore(&s->lock, flags);
882 }
883 return put_user((s->ctrl & CTRL_XCTL0) ? 1 : 0, p);
884 }
885 if (cmd == SOUND_MIXER_PRIVATE3) {
886 /* enable/disable/query microphone impedance setting */
887 if (get_user(val, p))
888 return -EFAULT;
889 if (val != -1) {
890 spin_lock_irqsave(&s->lock, flags);
891 if (val)
892 s->ctrl |= CTRL_XCTL1;
893 else
894 s->ctrl &= ~CTRL_XCTL1;
895 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
896 spin_unlock_irqrestore(&s->lock, flags);
897 }
898 return put_user((s->ctrl & CTRL_XCTL1) ? 1 : 0, p);
899 }
900 if (cmd == SOUND_MIXER_INFO) {
901 mixer_info info;
902 strncpy(info.id, "ES1370", sizeof(info.id));
903 strncpy(info.name, "Ensoniq ES1370", sizeof(info.name));
904 info.modify_counter = s->mix.modcnt;
905 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
906 return -EFAULT;
907 return 0;
908 }
909 if (cmd == SOUND_OLD_MIXER_INFO) {
910 _old_mixer_info info;
911 strncpy(info.id, "ES1370", sizeof(info.id));
912 strncpy(info.name, "Ensoniq ES1370", sizeof(info.name));
913 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
914 return -EFAULT;
915 return 0;
916 }
917 if (cmd == OSS_GETVERSION)
918 return put_user(SOUND_VERSION, p);
919 if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
920 return -EINVAL;
921 if (_SIOC_DIR(cmd) == _SIOC_READ) {
922 switch (_IOC_NR(cmd)) {
923 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
924 return put_user(s->mix.recsrc, p);
925
926 case SOUND_MIXER_DEVMASK: /* Arg contains a bit for each supported device */
927 val = SOUND_MASK_IMIX;
928 for (i = 0; i < SOUND_MIXER_NRDEVICES; i++)
929 if (mixtable[i].avail)
930 val |= 1 << i;
931 return put_user(val, p);
932
933 case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
934 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
935 if (mixtable[i].recmask)
936 val |= 1 << i;
937 return put_user(val, p);
938
939 case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
940 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
941 if (mixtable[i].stereo)
942 val |= 1 << i;
943 return put_user(val, p);
944
945 case SOUND_MIXER_CAPS:
946 return put_user(0, p);
947
948 case SOUND_MIXER_IMIX:
949 return put_user(s->mix.imix, p);
950
951 default:
952 i = _IOC_NR(cmd);
953 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].avail)
954 return -EINVAL;
955 return put_user(s->mix.vol[mixtable[i].volidx], p);
956 }
957 }
958 if (_SIOC_DIR(cmd) != (_SIOC_READ|_SIOC_WRITE))
959 return -EINVAL;
960 s->mix.modcnt++;
961 switch (_IOC_NR(cmd)) {
962
963 case SOUND_MIXER_IMIX:
964 if (get_user(s->mix.imix, p))
965 return -EFAULT;
966 set_recsrc(s, s->mix.recsrc);
967 return 0;
968
969 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
970 if (get_user(val, p))
971 return -EFAULT;
972 set_recsrc(s, val);
973 return 0;
974
975 default:
976 i = _IOC_NR(cmd);
977 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].avail)
978 return -EINVAL;
979 if (get_user(val, p))
980 return -EFAULT;
981 l = val & 0xff;
982 if (l > 100)
983 l = 100;
984 if (mixtable[i].stereo) {
985 r = (val >> 8) & 0xff;
986 if (r > 100)
987 r = 100;
988 if (l < 7) {
989 rl = 0x80;
990 l = 0;
991 } else {
992 rl = 31 - ((l - 7) / 3);
993 l = (31 - rl) * 3 + 7;
994 }
995 if (r < 7) {
996 rr = 0x80;
997 r = 0;
998 } else {
999 rr = 31 - ((r - 7) / 3);
1000 r = (31 - rr) * 3 + 7;
1001 }
1002 wrcodec(s, mixtable[i].right, rr);
1003 } else {
1004 if (mixtable[i].left == 15) {
1005 if (l < 2) {
1006 rr = rl = 0x80;
1007 r = l = 0;
1008 } else {
1009 rl = 7 - ((l - 2) / 14);
1010 r = l = (7 - rl) * 14 + 2;
1011 }
1012 } else {
1013 if (l < 7) {
1014 rl = 0x80;
1015 r = l = 0;
1016 } else {
1017 rl = 31 - ((l - 7) / 3);
1018 r = l = (31 - rl) * 3 + 7;
1019 }
1020 }
1021 }
1022 wrcodec(s, mixtable[i].left, rl);
1023#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
1024 s->mix.vol[mixtable[i].volidx] = ((unsigned int)r << 8) | l;
1025#else
1026 s->mix.vol[mixtable[i].volidx] = val;
1027#endif
1028 return put_user(s->mix.vol[mixtable[i].volidx], p);
1029 }
1030}
1031
1032/* --------------------------------------------------------------------- */
1033
1034static int es1370_open_mixdev(struct inode *inode, struct file *file)
1035{
1036 unsigned int minor = iminor(inode);
1037 struct list_head *list;
1038 struct es1370_state *s;
1039
1040 for (list = devs.next; ; list = list->next) {
1041 if (list == &devs)
1042 return -ENODEV;
1043 s = list_entry(list, struct es1370_state, devs);
1044 if (s->dev_mixer == minor)
1045 break;
1046 }
1047 VALIDATE_STATE(s);
1048 file->private_data = s;
1049 return nonseekable_open(inode, file);
1050}
1051
1052static int es1370_release_mixdev(struct inode *inode, struct file *file)
1053{
1054 struct es1370_state *s = (struct es1370_state *)file->private_data;
1055
1056 VALIDATE_STATE(s);
1057 return 0;
1058}
1059
1060static int es1370_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1061{
1062 return mixer_ioctl((struct es1370_state *)file->private_data, cmd, arg);
1063}
1064
1065static /*const*/ struct file_operations es1370_mixer_fops = {
1066 .owner = THIS_MODULE,
1067 .llseek = no_llseek,
1068 .ioctl = es1370_ioctl_mixdev,
1069 .open = es1370_open_mixdev,
1070 .release = es1370_release_mixdev,
1071};
1072
1073/* --------------------------------------------------------------------- */
1074
1075static int drain_dac1(struct es1370_state *s, int nonblock)
1076{
1077 DECLARE_WAITQUEUE(wait, current);
1078 unsigned long flags;
1079 int count, tmo;
1080
1081 if (s->dma_dac1.mapped || !s->dma_dac1.ready)
1082 return 0;
1083 add_wait_queue(&s->dma_dac1.wait, &wait);
1084 for (;;) {
1085 __set_current_state(TASK_INTERRUPTIBLE);
1086 spin_lock_irqsave(&s->lock, flags);
1087 count = s->dma_dac1.count;
1088 spin_unlock_irqrestore(&s->lock, flags);
1089 if (count <= 0)
1090 break;
1091 if (signal_pending(current))
1092 break;
1093 if (nonblock) {
1094 remove_wait_queue(&s->dma_dac1.wait, &wait);
1095 set_current_state(TASK_RUNNING);
1096 return -EBUSY;
1097 }
1098 tmo = 3 * HZ * (count + s->dma_dac1.fragsize) / 2
1099 / dac1_samplerate[(s->ctrl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL];
1100 tmo >>= sample_shift[(s->sctrl & SCTRL_P1FMT) >> SCTRL_SH_P1FMT];
1101 if (!schedule_timeout(tmo + 1))
1102 DBG(printk(KERN_DEBUG "es1370: dma timed out??\n");)
1103 }
1104 remove_wait_queue(&s->dma_dac1.wait, &wait);
1105 set_current_state(TASK_RUNNING);
1106 if (signal_pending(current))
1107 return -ERESTARTSYS;
1108 return 0;
1109}
1110
1111static int drain_dac2(struct es1370_state *s, int nonblock)
1112{
1113 DECLARE_WAITQUEUE(wait, current);
1114 unsigned long flags;
1115 int count, tmo;
1116
1117 if (s->dma_dac2.mapped || !s->dma_dac2.ready)
1118 return 0;
1119 add_wait_queue(&s->dma_dac2.wait, &wait);
1120 for (;;) {
1121 __set_current_state(TASK_INTERRUPTIBLE);
1122 spin_lock_irqsave(&s->lock, flags);
1123 count = s->dma_dac2.count;
1124 spin_unlock_irqrestore(&s->lock, flags);
1125 if (count <= 0)
1126 break;
1127 if (signal_pending(current))
1128 break;
1129 if (nonblock) {
1130 remove_wait_queue(&s->dma_dac2.wait, &wait);
1131 set_current_state(TASK_RUNNING);
1132 return -EBUSY;
1133 }
1134 tmo = 3 * HZ * (count + s->dma_dac2.fragsize) / 2
1135 / DAC2_DIVTOSR((s->ctrl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV);
1136 tmo >>= sample_shift[(s->sctrl & SCTRL_P2FMT) >> SCTRL_SH_P2FMT];
1137 if (!schedule_timeout(tmo + 1))
1138 DBG(printk(KERN_DEBUG "es1370: dma timed out??\n");)
1139 }
1140 remove_wait_queue(&s->dma_dac2.wait, &wait);
1141 set_current_state(TASK_RUNNING);
1142 if (signal_pending(current))
1143 return -ERESTARTSYS;
1144 return 0;
1145}
1146
1147/* --------------------------------------------------------------------- */
1148
1149static ssize_t es1370_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1150{
1151 struct es1370_state *s = (struct es1370_state *)file->private_data;
1152 DECLARE_WAITQUEUE(wait, current);
1153 ssize_t ret = 0;
1154 unsigned long flags;
1155 unsigned swptr;
1156 int cnt;
1157
1158 VALIDATE_STATE(s);
1159 if (s->dma_adc.mapped)
1160 return -ENXIO;
1161 if (!access_ok(VERIFY_WRITE, buffer, count))
1162 return -EFAULT;
1163 mutex_lock(&s->mutex);
1164 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1165 goto out;
1166
1167 add_wait_queue(&s->dma_adc.wait, &wait);
1168 while (count > 0) {
1169 spin_lock_irqsave(&s->lock, flags);
1170 swptr = s->dma_adc.swptr;
1171 cnt = s->dma_adc.dmasize-swptr;
1172 if (s->dma_adc.count < cnt)
1173 cnt = s->dma_adc.count;
1174 if (cnt <= 0)
1175 __set_current_state(TASK_INTERRUPTIBLE);
1176 spin_unlock_irqrestore(&s->lock, flags);
1177 if (cnt > count)
1178 cnt = count;
1179 if (cnt <= 0) {
1180 if (s->dma_adc.enabled)
1181 start_adc(s);
1182 if (file->f_flags & O_NONBLOCK) {
1183 if (!ret)
1184 ret = -EAGAIN;
1185 goto out;
1186 }
1187 mutex_unlock(&s->mutex);
1188 schedule();
1189 if (signal_pending(current)) {
1190 if (!ret)
1191 ret = -ERESTARTSYS;
1192 goto out;
1193 }
1194 mutex_lock(&s->mutex);
1195 if (s->dma_adc.mapped)
1196 {
1197 ret = -ENXIO;
1198 goto out;
1199 }
1200 continue;
1201 }
1202 if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
1203 if (!ret)
1204 ret = -EFAULT;
1205 goto out;
1206 }
1207 swptr = (swptr + cnt) % s->dma_adc.dmasize;
1208 spin_lock_irqsave(&s->lock, flags);
1209 s->dma_adc.swptr = swptr;
1210 s->dma_adc.count -= cnt;
1211 spin_unlock_irqrestore(&s->lock, flags);
1212 count -= cnt;
1213 buffer += cnt;
1214 ret += cnt;
1215 if (s->dma_adc.enabled)
1216 start_adc(s);
1217 }
1218out:
1219 mutex_unlock(&s->mutex);
1220 remove_wait_queue(&s->dma_adc.wait, &wait);
1221 set_current_state(TASK_RUNNING);
1222 return ret;
1223}
1224
1225static ssize_t es1370_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1226{
1227 struct es1370_state *s = (struct es1370_state *)file->private_data;
1228 DECLARE_WAITQUEUE(wait, current);
1229 ssize_t ret = 0;
1230 unsigned long flags;
1231 unsigned swptr;
1232 int cnt;
1233
1234 VALIDATE_STATE(s);
1235 if (s->dma_dac2.mapped)
1236 return -ENXIO;
1237 if (!access_ok(VERIFY_READ, buffer, count))
1238 return -EFAULT;
1239 mutex_lock(&s->mutex);
1240 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s)))
1241 goto out;
1242 ret = 0;
1243 add_wait_queue(&s->dma_dac2.wait, &wait);
1244 while (count > 0) {
1245 spin_lock_irqsave(&s->lock, flags);
1246 if (s->dma_dac2.count < 0) {
1247 s->dma_dac2.count = 0;
1248 s->dma_dac2.swptr = s->dma_dac2.hwptr;
1249 }
1250 swptr = s->dma_dac2.swptr;
1251 cnt = s->dma_dac2.dmasize-swptr;
1252 if (s->dma_dac2.count + cnt > s->dma_dac2.dmasize)
1253 cnt = s->dma_dac2.dmasize - s->dma_dac2.count;
1254 if (cnt <= 0)
1255 __set_current_state(TASK_INTERRUPTIBLE);
1256 spin_unlock_irqrestore(&s->lock, flags);
1257 if (cnt > count)
1258 cnt = count;
1259 if (cnt <= 0) {
1260 if (s->dma_dac2.enabled)
1261 start_dac2(s);
1262 if (file->f_flags & O_NONBLOCK) {
1263 if (!ret)
1264 ret = -EAGAIN;
1265 goto out;
1266 }
1267 mutex_unlock(&s->mutex);
1268 schedule();
1269 if (signal_pending(current)) {
1270 if (!ret)
1271 ret = -ERESTARTSYS;
1272 goto out;
1273 }
1274 mutex_lock(&s->mutex);
1275 if (s->dma_dac2.mapped)
1276 {
1277 ret = -ENXIO;
1278 goto out;
1279 }
1280 continue;
1281 }
1282 if (copy_from_user(s->dma_dac2.rawbuf + swptr, buffer, cnt)) {
1283 if (!ret)
1284 ret = -EFAULT;
1285 goto out;
1286 }
1287 swptr = (swptr + cnt) % s->dma_dac2.dmasize;
1288 spin_lock_irqsave(&s->lock, flags);
1289 s->dma_dac2.swptr = swptr;
1290 s->dma_dac2.count += cnt;
1291 s->dma_dac2.endcleared = 0;
1292 spin_unlock_irqrestore(&s->lock, flags);
1293 count -= cnt;
1294 buffer += cnt;
1295 ret += cnt;
1296 if (s->dma_dac2.enabled)
1297 start_dac2(s);
1298 }
1299out:
1300 mutex_unlock(&s->mutex);
1301 remove_wait_queue(&s->dma_dac2.wait, &wait);
1302 set_current_state(TASK_RUNNING);
1303 return ret;
1304}
1305
1306/* No kernel lock - we have our own spinlock */
1307static unsigned int es1370_poll(struct file *file, struct poll_table_struct *wait)
1308{
1309 struct es1370_state *s = (struct es1370_state *)file->private_data;
1310 unsigned long flags;
1311 unsigned int mask = 0;
1312
1313 VALIDATE_STATE(s);
1314 if (file->f_mode & FMODE_WRITE) {
1315 if (!s->dma_dac2.ready && prog_dmabuf_dac2(s))
1316 return 0;
1317 poll_wait(file, &s->dma_dac2.wait, wait);
1318 }
1319 if (file->f_mode & FMODE_READ) {
1320 if (!s->dma_adc.ready && prog_dmabuf_adc(s))
1321 return 0;
1322 poll_wait(file, &s->dma_adc.wait, wait);
1323 }
1324 spin_lock_irqsave(&s->lock, flags);
1325 es1370_update_ptr(s);
1326 if (file->f_mode & FMODE_READ) {
1327 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1328 mask |= POLLIN | POLLRDNORM;
1329 }
1330 if (file->f_mode & FMODE_WRITE) {
1331 if (s->dma_dac2.mapped) {
1332 if (s->dma_dac2.count >= (signed)s->dma_dac2.fragsize)
1333 mask |= POLLOUT | POLLWRNORM;
1334 } else {
1335 if ((signed)s->dma_dac2.dmasize >= s->dma_dac2.count + (signed)s->dma_dac2.fragsize)
1336 mask |= POLLOUT | POLLWRNORM;
1337 }
1338 }
1339 spin_unlock_irqrestore(&s->lock, flags);
1340 return mask;
1341}
1342
1343static int es1370_mmap(struct file *file, struct vm_area_struct *vma)
1344{
1345 struct es1370_state *s = (struct es1370_state *)file->private_data;
1346 struct dmabuf *db;
1347 int ret = 0;
1348 unsigned long size;
1349
1350 VALIDATE_STATE(s);
1351 lock_kernel();
1352 mutex_lock(&s->mutex);
1353 if (vma->vm_flags & VM_WRITE) {
1354 if ((ret = prog_dmabuf_dac2(s)) != 0) {
1355 goto out;
1356 }
1357 db = &s->dma_dac2;
1358 } else if (vma->vm_flags & VM_READ) {
1359 if ((ret = prog_dmabuf_adc(s)) != 0) {
1360 goto out;
1361 }
1362 db = &s->dma_adc;
1363 } else {
1364 ret = -EINVAL;
1365 goto out;
1366 }
1367 if (vma->vm_pgoff != 0) {
1368 ret = -EINVAL;
1369 goto out;
1370 }
1371 size = vma->vm_end - vma->vm_start;
1372 if (size > (PAGE_SIZE << db->buforder)) {
1373 ret = -EINVAL;
1374 goto out;
1375 }
1376 if (remap_pfn_range(vma, vma->vm_start,
1377 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
1378 size, vma->vm_page_prot)) {
1379 ret = -EAGAIN;
1380 goto out;
1381 }
1382 db->mapped = 1;
1383out:
1384 mutex_unlock(&s->mutex);
1385 unlock_kernel();
1386 return ret;
1387}
1388
1389static int es1370_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1390{
1391 struct es1370_state *s = (struct es1370_state *)file->private_data;
1392 unsigned long flags;
1393 audio_buf_info abinfo;
1394 count_info cinfo;
1395 int count;
1396 int val, mapped, ret;
1397 void __user *argp = (void __user *)arg;
1398 int __user *p = argp;
1399
1400 VALIDATE_STATE(s);
1401 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac2.mapped) ||
1402 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
1403 switch (cmd) {
1404 case OSS_GETVERSION:
1405 return put_user(SOUND_VERSION, p);
1406
1407 case SNDCTL_DSP_SYNC:
1408 if (file->f_mode & FMODE_WRITE)
1409 return drain_dac2(s, 0/*file->f_flags & O_NONBLOCK*/);
1410 return 0;
1411
1412 case SNDCTL_DSP_SETDUPLEX:
1413 return 0;
1414
1415 case SNDCTL_DSP_GETCAPS:
1416 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1417
1418 case SNDCTL_DSP_RESET:
1419 if (file->f_mode & FMODE_WRITE) {
1420 stop_dac2(s);
1421 synchronize_irq(s->irq);
1422 s->dma_dac2.swptr = s->dma_dac2.hwptr = s->dma_dac2.count = s->dma_dac2.total_bytes = 0;
1423 }
1424 if (file->f_mode & FMODE_READ) {
1425 stop_adc(s);
1426 synchronize_irq(s->irq);
1427 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
1428 }
1429 return 0;
1430
1431 case SNDCTL_DSP_SPEED:
1432 if (get_user(val, p))
1433 return -EFAULT;
1434 if (val >= 0) {
1435 if (s->open_mode & (~file->f_mode) & (FMODE_READ|FMODE_WRITE))
1436 return -EINVAL;
1437 if (val < 4000)
1438 val = 4000;
1439 if (val > 50000)
1440 val = 50000;
1441 stop_adc(s);
1442 stop_dac2(s);
1443 s->dma_adc.ready = s->dma_dac2.ready = 0;
1444 spin_lock_irqsave(&s->lock, flags);
1445 s->ctrl = (s->ctrl & ~CTRL_PCLKDIV) | (DAC2_SRTODIV(val) << CTRL_SH_PCLKDIV);
1446 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
1447 spin_unlock_irqrestore(&s->lock, flags);
1448 }
1449 return put_user(DAC2_DIVTOSR((s->ctrl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV), p);
1450
1451 case SNDCTL_DSP_STEREO:
1452 if (get_user(val, p))
1453 return -EFAULT;
1454 if (file->f_mode & FMODE_READ) {
1455 stop_adc(s);
1456 s->dma_adc.ready = 0;
1457 spin_lock_irqsave(&s->lock, flags);
1458 if (val)
1459 s->sctrl |= SCTRL_R1SMB;
1460 else
1461 s->sctrl &= ~SCTRL_R1SMB;
1462 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1463 spin_unlock_irqrestore(&s->lock, flags);
1464 }
1465 if (file->f_mode & FMODE_WRITE) {
1466 stop_dac2(s);
1467 s->dma_dac2.ready = 0;
1468 spin_lock_irqsave(&s->lock, flags);
1469 if (val)
1470 s->sctrl |= SCTRL_P2SMB;
1471 else
1472 s->sctrl &= ~SCTRL_P2SMB;
1473 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1474 spin_unlock_irqrestore(&s->lock, flags);
1475 }
1476 return 0;
1477
1478 case SNDCTL_DSP_CHANNELS:
1479 if (get_user(val, p))
1480 return -EFAULT;
1481 if (val != 0) {
1482 if (file->f_mode & FMODE_READ) {
1483 stop_adc(s);
1484 s->dma_adc.ready = 0;
1485 spin_lock_irqsave(&s->lock, flags);
1486 if (val >= 2)
1487 s->sctrl |= SCTRL_R1SMB;
1488 else
1489 s->sctrl &= ~SCTRL_R1SMB;
1490 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1491 spin_unlock_irqrestore(&s->lock, flags);
1492 }
1493 if (file->f_mode & FMODE_WRITE) {
1494 stop_dac2(s);
1495 s->dma_dac2.ready = 0;
1496 spin_lock_irqsave(&s->lock, flags);
1497 if (val >= 2)
1498 s->sctrl |= SCTRL_P2SMB;
1499 else
1500 s->sctrl &= ~SCTRL_P2SMB;
1501 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1502 spin_unlock_irqrestore(&s->lock, flags);
1503 }
1504 }
1505 return put_user((s->sctrl & ((file->f_mode & FMODE_READ) ? SCTRL_R1SMB : SCTRL_P2SMB)) ? 2 : 1, p);
1506
1507 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1508 return put_user(AFMT_S16_LE|AFMT_U8, p);
1509
1510 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
1511 if (get_user(val, p))
1512 return -EFAULT;
1513 if (val != AFMT_QUERY) {
1514 if (file->f_mode & FMODE_READ) {
1515 stop_adc(s);
1516 s->dma_adc.ready = 0;
1517 spin_lock_irqsave(&s->lock, flags);
1518 if (val == AFMT_S16_LE)
1519 s->sctrl |= SCTRL_R1SEB;
1520 else
1521 s->sctrl &= ~SCTRL_R1SEB;
1522 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1523 spin_unlock_irqrestore(&s->lock, flags);
1524 }
1525 if (file->f_mode & FMODE_WRITE) {
1526 stop_dac2(s);
1527 s->dma_dac2.ready = 0;
1528 spin_lock_irqsave(&s->lock, flags);
1529 if (val == AFMT_S16_LE)
1530 s->sctrl |= SCTRL_P2SEB;
1531 else
1532 s->sctrl &= ~SCTRL_P2SEB;
1533 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1534 spin_unlock_irqrestore(&s->lock, flags);
1535 }
1536 }
1537 return put_user((s->sctrl & ((file->f_mode & FMODE_READ) ? SCTRL_R1SEB : SCTRL_P2SEB)) ?
1538 AFMT_S16_LE : AFMT_U8, p);
1539
1540 case SNDCTL_DSP_POST:
1541 return 0;
1542
1543 case SNDCTL_DSP_GETTRIGGER:
1544 val = 0;
1545 if (file->f_mode & FMODE_READ && s->ctrl & CTRL_ADC_EN)
1546 val |= PCM_ENABLE_INPUT;
1547 if (file->f_mode & FMODE_WRITE && s->ctrl & CTRL_DAC2_EN)
1548 val |= PCM_ENABLE_OUTPUT;
1549 return put_user(val, p);
1550
1551 case SNDCTL_DSP_SETTRIGGER:
1552 if (get_user(val, p))
1553 return -EFAULT;
1554 if (file->f_mode & FMODE_READ) {
1555 if (val & PCM_ENABLE_INPUT) {
1556 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1557 return ret;
1558 s->dma_adc.enabled = 1;
1559 start_adc(s);
1560 } else {
1561 s->dma_adc.enabled = 0;
1562 stop_adc(s);
1563 }
1564 }
1565 if (file->f_mode & FMODE_WRITE) {
1566 if (val & PCM_ENABLE_OUTPUT) {
1567 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s)))
1568 return ret;
1569 s->dma_dac2.enabled = 1;
1570 start_dac2(s);
1571 } else {
1572 s->dma_dac2.enabled = 0;
1573 stop_dac2(s);
1574 }
1575 }
1576 return 0;
1577
1578 case SNDCTL_DSP_GETOSPACE:
1579 if (!(file->f_mode & FMODE_WRITE))
1580 return -EINVAL;
1581 if (!s->dma_dac2.ready && (val = prog_dmabuf_dac2(s)) != 0)
1582 return val;
1583 spin_lock_irqsave(&s->lock, flags);
1584 es1370_update_ptr(s);
1585 abinfo.fragsize = s->dma_dac2.fragsize;
1586 count = s->dma_dac2.count;
1587 if (count < 0)
1588 count = 0;
1589 abinfo.bytes = s->dma_dac2.dmasize - count;
1590 abinfo.fragstotal = s->dma_dac2.numfrag;
1591 abinfo.fragments = abinfo.bytes >> s->dma_dac2.fragshift;
1592 spin_unlock_irqrestore(&s->lock, flags);
1593 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1594
1595 case SNDCTL_DSP_GETISPACE:
1596 if (!(file->f_mode & FMODE_READ))
1597 return -EINVAL;
1598 if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)) != 0)
1599 return val;
1600 spin_lock_irqsave(&s->lock, flags);
1601 es1370_update_ptr(s);
1602 abinfo.fragsize = s->dma_adc.fragsize;
1603 count = s->dma_adc.count;
1604 if (count < 0)
1605 count = 0;
1606 abinfo.bytes = count;
1607 abinfo.fragstotal = s->dma_adc.numfrag;
1608 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
1609 spin_unlock_irqrestore(&s->lock, flags);
1610 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1611
1612 case SNDCTL_DSP_NONBLOCK:
1613 file->f_flags |= O_NONBLOCK;
1614 return 0;
1615
1616 case SNDCTL_DSP_GETODELAY:
1617 if (!(file->f_mode & FMODE_WRITE))
1618 return -EINVAL;
1619 if (!s->dma_dac2.ready && (val = prog_dmabuf_dac2(s)) != 0)
1620 return val;
1621 spin_lock_irqsave(&s->lock, flags);
1622 es1370_update_ptr(s);
1623 count = s->dma_dac2.count;
1624 spin_unlock_irqrestore(&s->lock, flags);
1625 if (count < 0)
1626 count = 0;
1627 return put_user(count, p);
1628
1629 case SNDCTL_DSP_GETIPTR:
1630 if (!(file->f_mode & FMODE_READ))
1631 return -EINVAL;
1632 if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)) != 0)
1633 return val;
1634 spin_lock_irqsave(&s->lock, flags);
1635 es1370_update_ptr(s);
1636 cinfo.bytes = s->dma_adc.total_bytes;
1637 count = s->dma_adc.count;
1638 if (count < 0)
1639 count = 0;
1640 cinfo.blocks = count >> s->dma_adc.fragshift;
1641 cinfo.ptr = s->dma_adc.hwptr;
1642 if (s->dma_adc.mapped)
1643 s->dma_adc.count &= s->dma_adc.fragsize-1;
1644 spin_unlock_irqrestore(&s->lock, flags);
1645 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1646 return -EFAULT;
1647 return 0;
1648
1649 case SNDCTL_DSP_GETOPTR:
1650 if (!(file->f_mode & FMODE_WRITE))
1651 return -EINVAL;
1652 if (!s->dma_dac2.ready && (val = prog_dmabuf_dac2(s)) != 0)
1653 return val;
1654 spin_lock_irqsave(&s->lock, flags);
1655 es1370_update_ptr(s);
1656 cinfo.bytes = s->dma_dac2.total_bytes;
1657 count = s->dma_dac2.count;
1658 if (count < 0)
1659 count = 0;
1660 cinfo.blocks = count >> s->dma_dac2.fragshift;
1661 cinfo.ptr = s->dma_dac2.hwptr;
1662 if (s->dma_dac2.mapped)
1663 s->dma_dac2.count &= s->dma_dac2.fragsize-1;
1664 spin_unlock_irqrestore(&s->lock, flags);
1665 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1666 return -EFAULT;
1667 return 0;
1668
1669 case SNDCTL_DSP_GETBLKSIZE:
1670 if (file->f_mode & FMODE_WRITE) {
1671 if ((val = prog_dmabuf_dac2(s)))
1672 return val;
1673 return put_user(s->dma_dac2.fragsize, p);
1674 }
1675 if ((val = prog_dmabuf_adc(s)))
1676 return val;
1677 return put_user(s->dma_adc.fragsize, p);
1678
1679 case SNDCTL_DSP_SETFRAGMENT:
1680 if (get_user(val, p))
1681 return -EFAULT;
1682 if (file->f_mode & FMODE_READ) {
1683 s->dma_adc.ossfragshift = val & 0xffff;
1684 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
1685 if (s->dma_adc.ossfragshift < 4)
1686 s->dma_adc.ossfragshift = 4;
1687 if (s->dma_adc.ossfragshift > 15)
1688 s->dma_adc.ossfragshift = 15;
1689 if (s->dma_adc.ossmaxfrags < 4)
1690 s->dma_adc.ossmaxfrags = 4;
1691 }
1692 if (file->f_mode & FMODE_WRITE) {
1693 s->dma_dac2.ossfragshift = val & 0xffff;
1694 s->dma_dac2.ossmaxfrags = (val >> 16) & 0xffff;
1695 if (s->dma_dac2.ossfragshift < 4)
1696 s->dma_dac2.ossfragshift = 4;
1697 if (s->dma_dac2.ossfragshift > 15)
1698 s->dma_dac2.ossfragshift = 15;
1699 if (s->dma_dac2.ossmaxfrags < 4)
1700 s->dma_dac2.ossmaxfrags = 4;
1701 }
1702 return 0;
1703
1704 case SNDCTL_DSP_SUBDIVIDE:
1705 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1706 (file->f_mode & FMODE_WRITE && s->dma_dac2.subdivision))
1707 return -EINVAL;
1708 if (get_user(val, p))
1709 return -EFAULT;
1710 if (val != 1 && val != 2 && val != 4)
1711 return -EINVAL;
1712 if (file->f_mode & FMODE_READ)
1713 s->dma_adc.subdivision = val;
1714 if (file->f_mode & FMODE_WRITE)
1715 s->dma_dac2.subdivision = val;
1716 return 0;
1717
1718 case SOUND_PCM_READ_RATE:
1719 return put_user(DAC2_DIVTOSR((s->ctrl & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV), p);
1720
1721 case SOUND_PCM_READ_CHANNELS:
1722 return put_user((s->sctrl & ((file->f_mode & FMODE_READ) ? SCTRL_R1SMB : SCTRL_P2SMB)) ?
1723 2 : 1, p);
1724
1725 case SOUND_PCM_READ_BITS:
1726 return put_user((s->sctrl & ((file->f_mode & FMODE_READ) ? SCTRL_R1SEB : SCTRL_P2SEB)) ?
1727 16 : 8, p);
1728
1729 case SOUND_PCM_WRITE_FILTER:
1730 case SNDCTL_DSP_SETSYNCRO:
1731 case SOUND_PCM_READ_FILTER:
1732 return -EINVAL;
1733
1734 }
1735 return mixer_ioctl(s, cmd, arg);
1736}
1737
1738static int es1370_open(struct inode *inode, struct file *file)
1739{
1740 unsigned int minor = iminor(inode);
1741 DECLARE_WAITQUEUE(wait, current);
1742 unsigned long flags;
1743 struct list_head *list;
1744 struct es1370_state *s;
1745
1746 for (list = devs.next; ; list = list->next) {
1747 if (list == &devs)
1748 return -ENODEV;
1749 s = list_entry(list, struct es1370_state, devs);
1750 if (!((s->dev_audio ^ minor) & ~0xf))
1751 break;
1752 }
1753 VALIDATE_STATE(s);
1754 file->private_data = s;
1755 /* wait for device to become free */
1756 mutex_lock(&s->open_mutex);
1757 while (s->open_mode & file->f_mode) {
1758 if (file->f_flags & O_NONBLOCK) {
1759 mutex_unlock(&s->open_mutex);
1760 return -EBUSY;
1761 }
1762 add_wait_queue(&s->open_wait, &wait);
1763 __set_current_state(TASK_INTERRUPTIBLE);
1764 mutex_unlock(&s->open_mutex);
1765 schedule();
1766 remove_wait_queue(&s->open_wait, &wait);
1767 set_current_state(TASK_RUNNING);
1768 if (signal_pending(current))
1769 return -ERESTARTSYS;
1770 mutex_lock(&s->open_mutex);
1771 }
1772 spin_lock_irqsave(&s->lock, flags);
1773 if (!(s->open_mode & (FMODE_READ|FMODE_WRITE)))
1774 s->ctrl = (s->ctrl & ~CTRL_PCLKDIV) | (DAC2_SRTODIV(8000) << CTRL_SH_PCLKDIV);
1775 if (file->f_mode & FMODE_READ) {
1776 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
1777 s->dma_adc.enabled = 1;
1778 s->sctrl &= ~SCTRL_R1FMT;
1779 if ((minor & 0xf) == SND_DEV_DSP16)
1780 s->sctrl |= ES1370_FMT_S16_MONO << SCTRL_SH_R1FMT;
1781 else
1782 s->sctrl |= ES1370_FMT_U8_MONO << SCTRL_SH_R1FMT;
1783 }
1784 if (file->f_mode & FMODE_WRITE) {
1785 s->dma_dac2.ossfragshift = s->dma_dac2.ossmaxfrags = s->dma_dac2.subdivision = 0;
1786 s->dma_dac2.enabled = 1;
1787 s->sctrl &= ~SCTRL_P2FMT;
1788 if ((minor & 0xf) == SND_DEV_DSP16)
1789 s->sctrl |= ES1370_FMT_S16_MONO << SCTRL_SH_P2FMT;
1790 else
1791 s->sctrl |= ES1370_FMT_U8_MONO << SCTRL_SH_P2FMT;
1792 }
1793 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
1794 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
1795 spin_unlock_irqrestore(&s->lock, flags);
1796 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1797 mutex_unlock(&s->open_mutex);
1798 mutex_init(&s->mutex);
1799 return nonseekable_open(inode, file);
1800}
1801
1802static int es1370_release(struct inode *inode, struct file *file)
1803{
1804 struct es1370_state *s = (struct es1370_state *)file->private_data;
1805
1806 VALIDATE_STATE(s);
1807 lock_kernel();
1808 if (file->f_mode & FMODE_WRITE)
1809 drain_dac2(s, file->f_flags & O_NONBLOCK);
1810 mutex_lock(&s->open_mutex);
1811 if (file->f_mode & FMODE_WRITE) {
1812 stop_dac2(s);
1813 synchronize_irq(s->irq);
1814 dealloc_dmabuf(s, &s->dma_dac2);
1815 }
1816 if (file->f_mode & FMODE_READ) {
1817 stop_adc(s);
1818 dealloc_dmabuf(s, &s->dma_adc);
1819 }
1820 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
1821 wake_up(&s->open_wait);
1822 mutex_unlock(&s->open_mutex);
1823 unlock_kernel();
1824 return 0;
1825}
1826
1827static /*const*/ struct file_operations es1370_audio_fops = {
1828 .owner = THIS_MODULE,
1829 .llseek = no_llseek,
1830 .read = es1370_read,
1831 .write = es1370_write,
1832 .poll = es1370_poll,
1833 .ioctl = es1370_ioctl,
1834 .mmap = es1370_mmap,
1835 .open = es1370_open,
1836 .release = es1370_release,
1837};
1838
1839/* --------------------------------------------------------------------- */
1840
1841static ssize_t es1370_write_dac(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1842{
1843 struct es1370_state *s = (struct es1370_state *)file->private_data;
1844 DECLARE_WAITQUEUE(wait, current);
1845 ssize_t ret = 0;
1846 unsigned long flags;
1847 unsigned swptr;
1848 int cnt;
1849
1850 VALIDATE_STATE(s);
1851 if (s->dma_dac1.mapped)
1852 return -ENXIO;
1853 if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
1854 return ret;
1855 if (!access_ok(VERIFY_READ, buffer, count))
1856 return -EFAULT;
1857 add_wait_queue(&s->dma_dac1.wait, &wait);
1858 while (count > 0) {
1859 spin_lock_irqsave(&s->lock, flags);
1860 if (s->dma_dac1.count < 0) {
1861 s->dma_dac1.count = 0;
1862 s->dma_dac1.swptr = s->dma_dac1.hwptr;
1863 }
1864 swptr = s->dma_dac1.swptr;
1865 cnt = s->dma_dac1.dmasize-swptr;
1866 if (s->dma_dac1.count + cnt > s->dma_dac1.dmasize)
1867 cnt = s->dma_dac1.dmasize - s->dma_dac1.count;
1868 if (cnt <= 0)
1869 __set_current_state(TASK_INTERRUPTIBLE);
1870 spin_unlock_irqrestore(&s->lock, flags);
1871 if (cnt > count)
1872 cnt = count;
1873 if (cnt <= 0) {
1874 if (s->dma_dac1.enabled)
1875 start_dac1(s);
1876 if (file->f_flags & O_NONBLOCK) {
1877 if (!ret)
1878 ret = -EAGAIN;
1879 break;
1880 }
1881 schedule();
1882 if (signal_pending(current)) {
1883 if (!ret)
1884 ret = -ERESTARTSYS;
1885 break;
1886 }
1887 continue;
1888 }
1889 if (copy_from_user(s->dma_dac1.rawbuf + swptr, buffer, cnt)) {
1890 if (!ret)
1891 ret = -EFAULT;
1892 break;
1893 }
1894 swptr = (swptr + cnt) % s->dma_dac1.dmasize;
1895 spin_lock_irqsave(&s->lock, flags);
1896 s->dma_dac1.swptr = swptr;
1897 s->dma_dac1.count += cnt;
1898 s->dma_dac1.endcleared = 0;
1899 spin_unlock_irqrestore(&s->lock, flags);
1900 count -= cnt;
1901 buffer += cnt;
1902 ret += cnt;
1903 if (s->dma_dac1.enabled)
1904 start_dac1(s);
1905 }
1906 remove_wait_queue(&s->dma_dac1.wait, &wait);
1907 set_current_state(TASK_RUNNING);
1908 return ret;
1909}
1910
1911/* No kernel lock - we have our own spinlock */
1912static unsigned int es1370_poll_dac(struct file *file, struct poll_table_struct *wait)
1913{
1914 struct es1370_state *s = (struct es1370_state *)file->private_data;
1915 unsigned long flags;
1916 unsigned int mask = 0;
1917
1918 VALIDATE_STATE(s);
1919 if (!s->dma_dac1.ready && prog_dmabuf_dac1(s))
1920 return 0;
1921 poll_wait(file, &s->dma_dac1.wait, wait);
1922 spin_lock_irqsave(&s->lock, flags);
1923 es1370_update_ptr(s);
1924 if (s->dma_dac1.mapped) {
1925 if (s->dma_dac1.count >= (signed)s->dma_dac1.fragsize)
1926 mask |= POLLOUT | POLLWRNORM;
1927 } else {
1928 if ((signed)s->dma_dac1.dmasize >= s->dma_dac1.count + (signed)s->dma_dac1.fragsize)
1929 mask |= POLLOUT | POLLWRNORM;
1930 }
1931 spin_unlock_irqrestore(&s->lock, flags);
1932 return mask;
1933}
1934
1935static int es1370_mmap_dac(struct file *file, struct vm_area_struct *vma)
1936{
1937 struct es1370_state *s = (struct es1370_state *)file->private_data;
1938 int ret;
1939 unsigned long size;
1940
1941 VALIDATE_STATE(s);
1942 if (!(vma->vm_flags & VM_WRITE))
1943 return -EINVAL;
1944 lock_kernel();
1945 if ((ret = prog_dmabuf_dac1(s)) != 0)
1946 goto out;
1947 ret = -EINVAL;
1948 if (vma->vm_pgoff != 0)
1949 goto out;
1950 size = vma->vm_end - vma->vm_start;
1951 if (size > (PAGE_SIZE << s->dma_dac1.buforder))
1952 goto out;
1953 ret = -EAGAIN;
1954 if (remap_pfn_range(vma, vma->vm_start,
1955 virt_to_phys(s->dma_dac1.rawbuf) >> PAGE_SHIFT,
1956 size, vma->vm_page_prot))
1957 goto out;
1958 s->dma_dac1.mapped = 1;
1959 ret = 0;
1960out:
1961 unlock_kernel();
1962 return ret;
1963}
1964
1965static int es1370_ioctl_dac(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1966{
1967 struct es1370_state *s = (struct es1370_state *)file->private_data;
1968 unsigned long flags;
1969 audio_buf_info abinfo;
1970 count_info cinfo;
1971 int count;
1972 unsigned ctrl;
1973 int val, ret;
1974 int __user *p = (int __user *)arg;
1975
1976 VALIDATE_STATE(s);
1977 switch (cmd) {
1978 case OSS_GETVERSION:
1979 return put_user(SOUND_VERSION, p);
1980
1981 case SNDCTL_DSP_SYNC:
1982 return drain_dac1(s, 0/*file->f_flags & O_NONBLOCK*/);
1983
1984 case SNDCTL_DSP_SETDUPLEX:
1985 return -EINVAL;
1986
1987 case SNDCTL_DSP_GETCAPS:
1988 return put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1989
1990 case SNDCTL_DSP_RESET:
1991 stop_dac1(s);
1992 synchronize_irq(s->irq);
1993 s->dma_dac1.swptr = s->dma_dac1.hwptr = s->dma_dac1.count = s->dma_dac1.total_bytes = 0;
1994 return 0;
1995
1996 case SNDCTL_DSP_SPEED:
1997 if (get_user(val, p))
1998 return -EFAULT;
1999 if (val >= 0) {
2000 stop_dac1(s);
2001 s->dma_dac1.ready = 0;
2002 for (ctrl = 0; ctrl <= 2; ctrl++)
2003 if (val < (dac1_samplerate[ctrl] + dac1_samplerate[ctrl+1]) / 2)
2004 break;
2005 spin_lock_irqsave(&s->lock, flags);
2006 s->ctrl = (s->ctrl & ~CTRL_WTSRSEL) | (ctrl << CTRL_SH_WTSRSEL);
2007 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2008 spin_unlock_irqrestore(&s->lock, flags);
2009 }
2010 return put_user(dac1_samplerate[(s->ctrl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL], p);
2011
2012 case SNDCTL_DSP_STEREO:
2013 if (get_user(val, p))
2014 return -EFAULT;
2015 stop_dac1(s);
2016 s->dma_dac1.ready = 0;
2017 spin_lock_irqsave(&s->lock, flags);
2018 if (val)
2019 s->sctrl |= SCTRL_P1SMB;
2020 else
2021 s->sctrl &= ~SCTRL_P1SMB;
2022 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
2023 spin_unlock_irqrestore(&s->lock, flags);
2024 return 0;
2025
2026 case SNDCTL_DSP_CHANNELS:
2027 if (get_user(val, p))
2028 return -EFAULT;
2029 if (val != 0) {
2030 if (s->dma_dac1.mapped)
2031 return -EINVAL;
2032 stop_dac1(s);
2033 s->dma_dac1.ready = 0;
2034 spin_lock_irqsave(&s->lock, flags);
2035 if (val >= 2)
2036 s->sctrl |= SCTRL_P1SMB;
2037 else
2038 s->sctrl &= ~SCTRL_P1SMB;
2039 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
2040 spin_unlock_irqrestore(&s->lock, flags);
2041 }
2042 return put_user((s->sctrl & SCTRL_P1SMB) ? 2 : 1, p);
2043
2044 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
2045 return put_user(AFMT_S16_LE|AFMT_U8, p);
2046
2047 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
2048 if (get_user(val, p))
2049 return -EFAULT;
2050 if (val != AFMT_QUERY) {
2051 stop_dac1(s);
2052 s->dma_dac1.ready = 0;
2053 spin_lock_irqsave(&s->lock, flags);
2054 if (val == AFMT_S16_LE)
2055 s->sctrl |= SCTRL_P1SEB;
2056 else
2057 s->sctrl &= ~SCTRL_P1SEB;
2058 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
2059 spin_unlock_irqrestore(&s->lock, flags);
2060 }
2061 return put_user((s->sctrl & SCTRL_P1SEB) ? AFMT_S16_LE : AFMT_U8, p);
2062
2063 case SNDCTL_DSP_POST:
2064 return 0;
2065
2066 case SNDCTL_DSP_GETTRIGGER:
2067 return put_user((s->ctrl & CTRL_DAC1_EN) ? PCM_ENABLE_OUTPUT : 0, p);
2068
2069 case SNDCTL_DSP_SETTRIGGER:
2070 if (get_user(val, p))
2071 return -EFAULT;
2072 if (val & PCM_ENABLE_OUTPUT) {
2073 if (!s->dma_dac1.ready && (ret = prog_dmabuf_dac1(s)))
2074 return ret;
2075 s->dma_dac1.enabled = 1;
2076 start_dac1(s);
2077 } else {
2078 s->dma_dac1.enabled = 0;
2079 stop_dac1(s);
2080 }
2081 return 0;
2082
2083 case SNDCTL_DSP_GETOSPACE:
2084 if (!s->dma_dac1.ready && (val = prog_dmabuf_dac1(s)) != 0)
2085 return val;
2086 spin_lock_irqsave(&s->lock, flags);
2087 es1370_update_ptr(s);
2088 abinfo.fragsize = s->dma_dac1.fragsize;
2089 count = s->dma_dac1.count;
2090 if (count < 0)
2091 count = 0;
2092 abinfo.bytes = s->dma_dac1.dmasize - count;
2093 abinfo.fragstotal = s->dma_dac1.numfrag;
2094 abinfo.fragments = abinfo.bytes >> s->dma_dac1.fragshift;
2095 spin_unlock_irqrestore(&s->lock, flags);
2096 return copy_to_user((void __user *)arg, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
2097
2098 case SNDCTL_DSP_NONBLOCK:
2099 file->f_flags |= O_NONBLOCK;
2100 return 0;
2101
2102 case SNDCTL_DSP_GETODELAY:
2103 if (!s->dma_dac1.ready && (val = prog_dmabuf_dac1(s)) != 0)
2104 return val;
2105 spin_lock_irqsave(&s->lock, flags);
2106 es1370_update_ptr(s);
2107 count = s->dma_dac1.count;
2108 spin_unlock_irqrestore(&s->lock, flags);
2109 if (count < 0)
2110 count = 0;
2111 return put_user(count, p);
2112
2113 case SNDCTL_DSP_GETOPTR:
2114 if (!s->dma_dac1.ready && (val = prog_dmabuf_dac1(s)) != 0)
2115 return val;
2116 spin_lock_irqsave(&s->lock, flags);
2117 es1370_update_ptr(s);
2118 cinfo.bytes = s->dma_dac1.total_bytes;
2119 count = s->dma_dac1.count;
2120 if (count < 0)
2121 count = 0;
2122 cinfo.blocks = count >> s->dma_dac1.fragshift;
2123 cinfo.ptr = s->dma_dac1.hwptr;
2124 if (s->dma_dac1.mapped)
2125 s->dma_dac1.count &= s->dma_dac1.fragsize-1;
2126 spin_unlock_irqrestore(&s->lock, flags);
2127 if (copy_to_user((void __user *)arg, &cinfo, sizeof(cinfo)))
2128 return -EFAULT;
2129 return 0;
2130
2131 case SNDCTL_DSP_GETBLKSIZE:
2132 if ((val = prog_dmabuf_dac1(s)))
2133 return val;
2134 return put_user(s->dma_dac1.fragsize, p);
2135
2136 case SNDCTL_DSP_SETFRAGMENT:
2137 if (get_user(val, p))
2138 return -EFAULT;
2139 s->dma_dac1.ossfragshift = val & 0xffff;
2140 s->dma_dac1.ossmaxfrags = (val >> 16) & 0xffff;
2141 if (s->dma_dac1.ossfragshift < 4)
2142 s->dma_dac1.ossfragshift = 4;
2143 if (s->dma_dac1.ossfragshift > 15)
2144 s->dma_dac1.ossfragshift = 15;
2145 if (s->dma_dac1.ossmaxfrags < 4)
2146 s->dma_dac1.ossmaxfrags = 4;
2147 return 0;
2148
2149 case SNDCTL_DSP_SUBDIVIDE:
2150 if (s->dma_dac1.subdivision)
2151 return -EINVAL;
2152 if (get_user(val, p))
2153 return -EFAULT;
2154 if (val != 1 && val != 2 && val != 4)
2155 return -EINVAL;
2156 s->dma_dac1.subdivision = val;
2157 return 0;
2158
2159 case SOUND_PCM_READ_RATE:
2160 return put_user(dac1_samplerate[(s->ctrl & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL], p);
2161
2162 case SOUND_PCM_READ_CHANNELS:
2163 return put_user((s->sctrl & SCTRL_P1SMB) ? 2 : 1, p);
2164
2165 case SOUND_PCM_READ_BITS:
2166 return put_user((s->sctrl & SCTRL_P1SEB) ? 16 : 8, p);
2167
2168 case SOUND_PCM_WRITE_FILTER:
2169 case SNDCTL_DSP_SETSYNCRO:
2170 case SOUND_PCM_READ_FILTER:
2171 return -EINVAL;
2172
2173 }
2174 return mixer_ioctl(s, cmd, arg);
2175}
2176
2177static int es1370_open_dac(struct inode *inode, struct file *file)
2178{
2179 unsigned int minor = iminor(inode);
2180 DECLARE_WAITQUEUE(wait, current);
2181 unsigned long flags;
2182 struct list_head *list;
2183 struct es1370_state *s;
2184
2185 for (list = devs.next; ; list = list->next) {
2186 if (list == &devs)
2187 return -ENODEV;
2188 s = list_entry(list, struct es1370_state, devs);
2189 if (!((s->dev_dac ^ minor) & ~0xf))
2190 break;
2191 }
2192 VALIDATE_STATE(s);
2193 /* we allow opening with O_RDWR, most programs do it although they will only write */
2194#if 0
2195 if (file->f_mode & FMODE_READ)
2196 return -EPERM;
2197#endif
2198 if (!(file->f_mode & FMODE_WRITE))
2199 return -EINVAL;
2200 file->private_data = s;
2201 /* wait for device to become free */
2202 mutex_lock(&s->open_mutex);
2203 while (s->open_mode & FMODE_DAC) {
2204 if (file->f_flags & O_NONBLOCK) {
2205 mutex_unlock(&s->open_mutex);
2206 return -EBUSY;
2207 }
2208 add_wait_queue(&s->open_wait, &wait);
2209 __set_current_state(TASK_INTERRUPTIBLE);
2210 mutex_unlock(&s->open_mutex);
2211 schedule();
2212 remove_wait_queue(&s->open_wait, &wait);
2213 set_current_state(TASK_RUNNING);
2214 if (signal_pending(current))
2215 return -ERESTARTSYS;
2216 mutex_lock(&s->open_mutex);
2217 }
2218 s->dma_dac1.ossfragshift = s->dma_dac1.ossmaxfrags = s->dma_dac1.subdivision = 0;
2219 s->dma_dac1.enabled = 1;
2220 spin_lock_irqsave(&s->lock, flags);
2221 s->ctrl = (s->ctrl & ~CTRL_WTSRSEL) | (1 << CTRL_SH_WTSRSEL);
2222 s->sctrl &= ~SCTRL_P1FMT;
2223 if ((minor & 0xf) == SND_DEV_DSP16)
2224 s->sctrl |= ES1370_FMT_S16_MONO << SCTRL_SH_P1FMT;
2225 else
2226 s->sctrl |= ES1370_FMT_U8_MONO << SCTRL_SH_P1FMT;
2227 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
2228 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2229 spin_unlock_irqrestore(&s->lock, flags);
2230 s->open_mode |= FMODE_DAC;
2231 mutex_unlock(&s->open_mutex);
2232 return nonseekable_open(inode, file);
2233}
2234
2235static int es1370_release_dac(struct inode *inode, struct file *file)
2236{
2237 struct es1370_state *s = (struct es1370_state *)file->private_data;
2238
2239 VALIDATE_STATE(s);
2240 lock_kernel();
2241 drain_dac1(s, file->f_flags & O_NONBLOCK);
2242 mutex_lock(&s->open_mutex);
2243 stop_dac1(s);
2244 dealloc_dmabuf(s, &s->dma_dac1);
2245 s->open_mode &= ~FMODE_DAC;
2246 wake_up(&s->open_wait);
2247 mutex_unlock(&s->open_mutex);
2248 unlock_kernel();
2249 return 0;
2250}
2251
2252static /*const*/ struct file_operations es1370_dac_fops = {
2253 .owner = THIS_MODULE,
2254 .llseek = no_llseek,
2255 .write = es1370_write_dac,
2256 .poll = es1370_poll_dac,
2257 .ioctl = es1370_ioctl_dac,
2258 .mmap = es1370_mmap_dac,
2259 .open = es1370_open_dac,
2260 .release = es1370_release_dac,
2261};
2262
2263/* --------------------------------------------------------------------- */
2264
2265static ssize_t es1370_midi_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
2266{
2267 struct es1370_state *s = (struct es1370_state *)file->private_data;
2268 DECLARE_WAITQUEUE(wait, current);
2269 ssize_t ret;
2270 unsigned long flags;
2271 unsigned ptr;
2272 int cnt;
2273
2274 VALIDATE_STATE(s);
2275 if (!access_ok(VERIFY_WRITE, buffer, count))
2276 return -EFAULT;
2277 if (count == 0)
2278 return 0;
2279 ret = 0;
2280 add_wait_queue(&s->midi.iwait, &wait);
2281 while (count > 0) {
2282 spin_lock_irqsave(&s->lock, flags);
2283 ptr = s->midi.ird;
2284 cnt = MIDIINBUF - ptr;
2285 if (s->midi.icnt < cnt)
2286 cnt = s->midi.icnt;
2287 if (cnt <= 0)
2288 __set_current_state(TASK_INTERRUPTIBLE);
2289 spin_unlock_irqrestore(&s->lock, flags);
2290 if (cnt > count)
2291 cnt = count;
2292 if (cnt <= 0) {
2293 if (file->f_flags & O_NONBLOCK) {
2294 if (!ret)
2295 ret = -EAGAIN;
2296 break;
2297 }
2298 schedule();
2299 if (signal_pending(current)) {
2300 if (!ret)
2301 ret = -ERESTARTSYS;
2302 break;
2303 }
2304 continue;
2305 }
2306 if (copy_to_user(buffer, s->midi.ibuf + ptr, cnt)) {
2307 if (!ret)
2308 ret = -EFAULT;
2309 break;
2310 }
2311 ptr = (ptr + cnt) % MIDIINBUF;
2312 spin_lock_irqsave(&s->lock, flags);
2313 s->midi.ird = ptr;
2314 s->midi.icnt -= cnt;
2315 spin_unlock_irqrestore(&s->lock, flags);
2316 count -= cnt;
2317 buffer += cnt;
2318 ret += cnt;
2319 break;
2320 }
2321 __set_current_state(TASK_RUNNING);
2322 remove_wait_queue(&s->midi.iwait, &wait);
2323 return ret;
2324}
2325
2326static ssize_t es1370_midi_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
2327{
2328 struct es1370_state *s = (struct es1370_state *)file->private_data;
2329 DECLARE_WAITQUEUE(wait, current);
2330 ssize_t ret;
2331 unsigned long flags;
2332 unsigned ptr;
2333 int cnt;
2334
2335 VALIDATE_STATE(s);
2336 if (!access_ok(VERIFY_READ, buffer, count))
2337 return -EFAULT;
2338 if (count == 0)
2339 return 0;
2340 ret = 0;
2341 add_wait_queue(&s->midi.owait, &wait);
2342 while (count > 0) {
2343 spin_lock_irqsave(&s->lock, flags);
2344 ptr = s->midi.owr;
2345 cnt = MIDIOUTBUF - ptr;
2346 if (s->midi.ocnt + cnt > MIDIOUTBUF)
2347 cnt = MIDIOUTBUF - s->midi.ocnt;
2348 if (cnt <= 0) {
2349 __set_current_state(TASK_INTERRUPTIBLE);
2350 es1370_handle_midi(s);
2351 }
2352 spin_unlock_irqrestore(&s->lock, flags);
2353 if (cnt > count)
2354 cnt = count;
2355 if (cnt <= 0) {
2356 if (file->f_flags & O_NONBLOCK) {
2357 if (!ret)
2358 ret = -EAGAIN;
2359 break;
2360 }
2361 schedule();
2362 if (signal_pending(current)) {
2363 if (!ret)
2364 ret = -ERESTARTSYS;
2365 break;
2366 }
2367 continue;
2368 }
2369 if (copy_from_user(s->midi.obuf + ptr, buffer, cnt)) {
2370 if (!ret)
2371 ret = -EFAULT;
2372 break;
2373 }
2374 ptr = (ptr + cnt) % MIDIOUTBUF;
2375 spin_lock_irqsave(&s->lock, flags);
2376 s->midi.owr = ptr;
2377 s->midi.ocnt += cnt;
2378 spin_unlock_irqrestore(&s->lock, flags);
2379 count -= cnt;
2380 buffer += cnt;
2381 ret += cnt;
2382 spin_lock_irqsave(&s->lock, flags);
2383 es1370_handle_midi(s);
2384 spin_unlock_irqrestore(&s->lock, flags);
2385 }
2386 __set_current_state(TASK_RUNNING);
2387 remove_wait_queue(&s->midi.owait, &wait);
2388 return ret;
2389}
2390
2391/* No kernel lock - we have our own spinlock */
2392static unsigned int es1370_midi_poll(struct file *file, struct poll_table_struct *wait)
2393{
2394 struct es1370_state *s = (struct es1370_state *)file->private_data;
2395 unsigned long flags;
2396 unsigned int mask = 0;
2397
2398 VALIDATE_STATE(s);
2399 if (file->f_mode & FMODE_WRITE)
2400 poll_wait(file, &s->midi.owait, wait);
2401 if (file->f_mode & FMODE_READ)
2402 poll_wait(file, &s->midi.iwait, wait);
2403 spin_lock_irqsave(&s->lock, flags);
2404 if (file->f_mode & FMODE_READ) {
2405 if (s->midi.icnt > 0)
2406 mask |= POLLIN | POLLRDNORM;
2407 }
2408 if (file->f_mode & FMODE_WRITE) {
2409 if (s->midi.ocnt < MIDIOUTBUF)
2410 mask |= POLLOUT | POLLWRNORM;
2411 }
2412 spin_unlock_irqrestore(&s->lock, flags);
2413 return mask;
2414}
2415
2416static int es1370_midi_open(struct inode *inode, struct file *file)
2417{
2418 unsigned int minor = iminor(inode);
2419 DECLARE_WAITQUEUE(wait, current);
2420 unsigned long flags;
2421 struct list_head *list;
2422 struct es1370_state *s;
2423
2424 for (list = devs.next; ; list = list->next) {
2425 if (list == &devs)
2426 return -ENODEV;
2427 s = list_entry(list, struct es1370_state, devs);
2428 if (s->dev_midi == minor)
2429 break;
2430 }
2431 VALIDATE_STATE(s);
2432 file->private_data = s;
2433 /* wait for device to become free */
2434 mutex_lock(&s->open_mutex);
2435 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
2436 if (file->f_flags & O_NONBLOCK) {
2437 mutex_unlock(&s->open_mutex);
2438 return -EBUSY;
2439 }
2440 add_wait_queue(&s->open_wait, &wait);
2441 __set_current_state(TASK_INTERRUPTIBLE);
2442 mutex_unlock(&s->open_mutex);
2443 schedule();
2444 remove_wait_queue(&s->open_wait, &wait);
2445 set_current_state(TASK_RUNNING);
2446 if (signal_pending(current))
2447 return -ERESTARTSYS;
2448 mutex_lock(&s->open_mutex);
2449 }
2450 spin_lock_irqsave(&s->lock, flags);
2451 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
2452 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
2453 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
2454 outb(UCTRL_CNTRL_SWR, s->io+ES1370_REG_UART_CONTROL);
2455 outb(0, s->io+ES1370_REG_UART_CONTROL);
2456 outb(0, s->io+ES1370_REG_UART_TEST);
2457 }
2458 if (file->f_mode & FMODE_READ) {
2459 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
2460 }
2461 if (file->f_mode & FMODE_WRITE) {
2462 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
2463 }
2464 s->ctrl |= CTRL_UART_EN;
2465 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2466 es1370_handle_midi(s);
2467 spin_unlock_irqrestore(&s->lock, flags);
2468 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
2469 mutex_unlock(&s->open_mutex);
2470 return nonseekable_open(inode, file);
2471}
2472
2473static int es1370_midi_release(struct inode *inode, struct file *file)
2474{
2475 struct es1370_state *s = (struct es1370_state *)file->private_data;
2476 DECLARE_WAITQUEUE(wait, current);
2477 unsigned long flags;
2478 unsigned count, tmo;
2479
2480 VALIDATE_STATE(s);
2481
2482 lock_kernel();
2483 if (file->f_mode & FMODE_WRITE) {
2484 add_wait_queue(&s->midi.owait, &wait);
2485 for (;;) {
2486 __set_current_state(TASK_INTERRUPTIBLE);
2487 spin_lock_irqsave(&s->lock, flags);
2488 count = s->midi.ocnt;
2489 spin_unlock_irqrestore(&s->lock, flags);
2490 if (count <= 0)
2491 break;
2492 if (signal_pending(current))
2493 break;
2494 if (file->f_flags & O_NONBLOCK)
2495 break;
2496 tmo = (count * HZ) / 3100;
2497 if (!schedule_timeout(tmo ? : 1) && tmo)
2498 DBG(printk(KERN_DEBUG "es1370: midi timed out??\n");)
2499 }
2500 remove_wait_queue(&s->midi.owait, &wait);
2501 set_current_state(TASK_RUNNING);
2502 }
2503 mutex_lock(&s->open_mutex);
2504 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
2505 spin_lock_irqsave(&s->lock, flags);
2506 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
2507 s->ctrl &= ~CTRL_UART_EN;
2508 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2509 }
2510 spin_unlock_irqrestore(&s->lock, flags);
2511 wake_up(&s->open_wait);
2512 mutex_unlock(&s->open_mutex);
2513 unlock_kernel();
2514 return 0;
2515}
2516
2517static /*const*/ struct file_operations es1370_midi_fops = {
2518 .owner = THIS_MODULE,
2519 .llseek = no_llseek,
2520 .read = es1370_midi_read,
2521 .write = es1370_midi_write,
2522 .poll = es1370_midi_poll,
2523 .open = es1370_midi_open,
2524 .release = es1370_midi_release,
2525};
2526
2527/* --------------------------------------------------------------------- */
2528
2529/* maximum number of devices; only used for command line params */
2530#define NR_DEVICE 5
2531
2532static int lineout[NR_DEVICE];
2533static int micbias[NR_DEVICE];
2534
2535static unsigned int devindex;
2536
2537module_param_array(lineout, bool, NULL, 0);
2538MODULE_PARM_DESC(lineout, "if 1 the LINE input is converted to LINE out");
2539module_param_array(micbias, bool, NULL, 0);
2540MODULE_PARM_DESC(micbias, "sets the +5V bias for an electret microphone");
2541
2542MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
2543MODULE_DESCRIPTION("ES1370 AudioPCI Driver");
2544MODULE_LICENSE("GPL");
2545
2546
2547/* --------------------------------------------------------------------- */
2548
2549static struct initvol {
2550 int mixch;
2551 int vol;
2552} initvol[] __devinitdata = {
2553 { SOUND_MIXER_WRITE_VOLUME, 0x4040 },
2554 { SOUND_MIXER_WRITE_PCM, 0x4040 },
2555 { SOUND_MIXER_WRITE_SYNTH, 0x4040 },
2556 { SOUND_MIXER_WRITE_CD, 0x4040 },
2557 { SOUND_MIXER_WRITE_LINE, 0x4040 },
2558 { SOUND_MIXER_WRITE_LINE1, 0x4040 },
2559 { SOUND_MIXER_WRITE_LINE2, 0x4040 },
2560 { SOUND_MIXER_WRITE_LINE3, 0x4040 },
2561 { SOUND_MIXER_WRITE_MIC, 0x4040 },
2562 { SOUND_MIXER_WRITE_OGAIN, 0x4040 }
2563};
2564
2565#ifdef SUPPORT_JOYSTICK
2566
2567static int __devinit es1370_register_gameport(struct es1370_state *s)
2568{
2569 struct gameport *gp;
2570
2571 if (!request_region(0x200, JOY_EXTENT, "es1370")) {
2572 printk(KERN_ERR "es1370: joystick io port 0x200 in use\n");
2573 return -EBUSY;
2574 }
2575
2576 s->gameport = gp = gameport_allocate_port();
2577 if (!gp) {
2578 printk(KERN_ERR "es1370: can not allocate memory for gameport\n");
2579 release_region(0x200, JOY_EXTENT);
2580 return -ENOMEM;
2581 }
2582
2583 gameport_set_name(gp, "ESS1370");
2584 gameport_set_phys(gp, "pci%s/gameport0", pci_name(s->dev));
2585 gp->dev.parent = &s->dev->dev;
2586 gp->io = 0x200;
2587
2588 s->ctrl |= CTRL_JYSTK_EN;
2589 outl(s->ctrl, s->io + ES1370_REG_CONTROL);
2590
2591 gameport_register_port(gp);
2592
2593 return 0;
2594}
2595
2596static inline void es1370_unregister_gameport(struct es1370_state *s)
2597{
2598 if (s->gameport) {
2599 int gpio = s->gameport->io;
2600 gameport_unregister_port(s->gameport);
2601 release_region(gpio, JOY_EXTENT);
2602
2603 }
2604}
2605
2606#else
2607static inline int es1370_register_gameport(struct es1370_state *s) { return -ENOSYS; }
2608static inline void es1370_unregister_gameport(struct es1370_state *s) { }
2609#endif /* SUPPORT_JOYSTICK */
2610
2611static int __devinit es1370_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
2612{
2613 struct es1370_state *s;
2614 mm_segment_t fs;
2615 int i, val, ret;
2616
2617 if ((ret=pci_enable_device(pcidev)))
2618 return ret;
2619
2620 if ( !(pci_resource_flags(pcidev, 0) & IORESOURCE_IO) ||
2621 !pci_resource_start(pcidev, 0)
2622 )
2623 return -ENODEV;
2624 if (pcidev->irq == 0)
2625 return -ENODEV;
2626 i = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
2627 if (i) {
2628 printk(KERN_WARNING "es1370: architecture does not support 32bit PCI busmaster DMA\n");
2629 return i;
2630 }
2631 if (!(s = kmalloc(sizeof(struct es1370_state), GFP_KERNEL))) {
2632 printk(KERN_WARNING "es1370: out of memory\n");
2633 return -ENOMEM;
2634 }
2635 memset(s, 0, sizeof(struct es1370_state));
2636 init_waitqueue_head(&s->dma_adc.wait);
2637 init_waitqueue_head(&s->dma_dac1.wait);
2638 init_waitqueue_head(&s->dma_dac2.wait);
2639 init_waitqueue_head(&s->open_wait);
2640 init_waitqueue_head(&s->midi.iwait);
2641 init_waitqueue_head(&s->midi.owait);
2642 mutex_init(&s->open_mutex);
2643 spin_lock_init(&s->lock);
2644 s->magic = ES1370_MAGIC;
2645 s->dev = pcidev;
2646 s->io = pci_resource_start(pcidev, 0);
2647 s->irq = pcidev->irq;
2648 if (!request_region(s->io, ES1370_EXTENT, "es1370")) {
2649 printk(KERN_ERR "es1370: io ports %#lx-%#lx in use\n", s->io, s->io+ES1370_EXTENT-1);
2650 ret = -EBUSY;
2651 goto err_region;
2652 }
2653 if ((ret=request_irq(s->irq, es1370_interrupt, IRQF_SHARED, "es1370",s))) {
2654 printk(KERN_ERR "es1370: irq %u in use\n", s->irq);
2655 goto err_irq;
2656 }
2657
2658 /* initialize codec registers */
2659 /* note: setting CTRL_SERR_DIS is reported to break
2660 * mic bias setting (by Kim.Berts@fisub.mail.abb.com) */
2661 s->ctrl = CTRL_CDC_EN | (DAC2_SRTODIV(8000) << CTRL_SH_PCLKDIV) | (1 << CTRL_SH_WTSRSEL);
2662 if (lineout[devindex])
2663 s->ctrl |= CTRL_XCTL0;
2664 if (micbias[devindex])
2665 s->ctrl |= CTRL_XCTL1;
2666 s->sctrl = 0;
2667 printk(KERN_INFO "es1370: adapter at io %#lx irq %u, line %s, mic impedance %s\n",
2668 s->io, s->irq, (s->ctrl & CTRL_XCTL0) ? "out" : "in",
2669 (s->ctrl & CTRL_XCTL1) ? "1" : "0");
2670 /* register devices */
2671 if ((s->dev_audio = register_sound_dsp(&es1370_audio_fops, -1)) < 0) {
2672 ret = s->dev_audio;
2673 goto err_dev1;
2674 }
2675 if ((s->dev_mixer = register_sound_mixer(&es1370_mixer_fops, -1)) < 0) {
2676 ret = s->dev_mixer;
2677 goto err_dev2;
2678 }
2679 if ((s->dev_dac = register_sound_dsp(&es1370_dac_fops, -1)) < 0) {
2680 ret = s->dev_dac;
2681 goto err_dev3;
2682 }
2683 if ((s->dev_midi = register_sound_midi(&es1370_midi_fops, -1)) < 0) {
2684 ret = s->dev_midi;
2685 goto err_dev4;
2686 }
2687 /* initialize the chips */
2688 outl(s->ctrl, s->io+ES1370_REG_CONTROL);
2689 outl(s->sctrl, s->io+ES1370_REG_SERIAL_CONTROL);
2690 /* point phantom write channel to "bugbuf" */
2691 s->bugbuf_cpu = pci_alloc_consistent(pcidev,16,&s->bugbuf_dma);
2692 if (!s->bugbuf_cpu) {
2693 ret = -ENOMEM;
2694 goto err_dev5;
2695 }
2696 outl((ES1370_REG_PHANTOM_FRAMEADR >> 8) & 15, s->io+ES1370_REG_MEMPAGE);
2697 outl(s->bugbuf_dma, s->io+(ES1370_REG_PHANTOM_FRAMEADR & 0xff));
2698 outl(0, s->io+(ES1370_REG_PHANTOM_FRAMECNT & 0xff));
2699 pci_set_master(pcidev); /* enable bus mastering */
2700 wrcodec(s, 0x16, 3); /* no RST, PD */
2701 wrcodec(s, 0x17, 0); /* CODEC ADC and CODEC DAC use {LR,B}CLK2 and run off the LRCLK2 PLL; program DAC_SYNC=0!! */
2702 wrcodec(s, 0x18, 0); /* recording source is mixer */
2703 wrcodec(s, 0x19, s->mix.micpreamp = 1); /* turn on MIC preamp */
2704 s->mix.imix = 1;
2705 fs = get_fs();
2706 set_fs(KERNEL_DS);
2707 val = SOUND_MASK_LINE|SOUND_MASK_SYNTH|SOUND_MASK_CD;
2708 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val);
2709 for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) {
2710 val = initvol[i].vol;
2711 mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val);
2712 }
2713 set_fs(fs);
2714
2715 es1370_register_gameport(s);
2716
2717 /* store it in the driver field */
2718 pci_set_drvdata(pcidev, s);
2719 /* put it into driver list */
2720 list_add_tail(&s->devs, &devs);
2721 /* increment devindex */
2722 if (devindex < NR_DEVICE-1)
2723 devindex++;
2724 return 0;
2725
2726 err_dev5:
2727 unregister_sound_midi(s->dev_midi);
2728 err_dev4:
2729 unregister_sound_dsp(s->dev_dac);
2730 err_dev3:
2731 unregister_sound_mixer(s->dev_mixer);
2732 err_dev2:
2733 unregister_sound_dsp(s->dev_audio);
2734 err_dev1:
2735 printk(KERN_ERR "es1370: cannot register misc device\n");
2736 free_irq(s->irq, s);
2737 err_irq:
2738 release_region(s->io, ES1370_EXTENT);
2739 err_region:
2740 kfree(s);
2741 return ret;
2742}
2743
2744static void __devexit es1370_remove(struct pci_dev *dev)
2745{
2746 struct es1370_state *s = pci_get_drvdata(dev);
2747
2748 if (!s)
2749 return;
2750 list_del(&s->devs);
2751 outl(CTRL_SERR_DIS | (1 << CTRL_SH_WTSRSEL), s->io+ES1370_REG_CONTROL); /* switch everything off */
2752 outl(0, s->io+ES1370_REG_SERIAL_CONTROL); /* clear serial interrupts */
2753 synchronize_irq(s->irq);
2754 free_irq(s->irq, s);
2755 es1370_unregister_gameport(s);
2756 release_region(s->io, ES1370_EXTENT);
2757 unregister_sound_dsp(s->dev_audio);
2758 unregister_sound_mixer(s->dev_mixer);
2759 unregister_sound_dsp(s->dev_dac);
2760 unregister_sound_midi(s->dev_midi);
2761 pci_free_consistent(dev, 16, s->bugbuf_cpu, s->bugbuf_dma);
2762 kfree(s);
2763 pci_set_drvdata(dev, NULL);
2764}
2765
2766static struct pci_device_id id_table[] = {
2767 { PCI_VENDOR_ID_ENSONIQ, PCI_DEVICE_ID_ENSONIQ_ES1370, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
2768 { 0, }
2769};
2770
2771MODULE_DEVICE_TABLE(pci, id_table);
2772
2773static struct pci_driver es1370_driver = {
2774 .name = "es1370",
2775 .id_table = id_table,
2776 .probe = es1370_probe,
2777 .remove = __devexit_p(es1370_remove),
2778};
2779
2780static int __init init_es1370(void)
2781{
2782 printk(KERN_INFO "es1370: version v0.38 time " __TIME__ " " __DATE__ "\n");
2783 return pci_register_driver(&es1370_driver);
2784}
2785
2786static void __exit cleanup_es1370(void)
2787{
2788 printk(KERN_INFO "es1370: unloading\n");
2789 pci_unregister_driver(&es1370_driver);
2790}
2791
2792module_init(init_es1370);
2793module_exit(cleanup_es1370);
2794
2795/* --------------------------------------------------------------------- */
2796
2797#ifndef MODULE
2798
2799/* format is: es1370=lineout[,micbias]] */
2800
2801static int __init es1370_setup(char *str)
2802{
2803 static unsigned __initdata nr_dev = 0;
2804
2805 if (nr_dev >= NR_DEVICE)
2806 return 0;
2807
2808 (void)
2809 ((get_option(&str,&lineout [nr_dev]) == 2)
2810 && get_option(&str,&micbias [nr_dev])
2811 );
2812
2813 nr_dev++;
2814 return 1;
2815}
2816
2817__setup("es1370=", es1370_setup);
2818
2819#endif /* MODULE */
diff --git a/sound/oss/esssolo1.c b/sound/oss/esssolo1.c
deleted file mode 100644
index 82f40a0a5c9c..000000000000
--- a/sound/oss/esssolo1.c
+++ /dev/null
@@ -1,2516 +0,0 @@
1/****************************************************************************/
2
3/*
4 * esssolo1.c -- ESS Technology Solo1 (ES1946) audio driver.
5 *
6 * Copyright (C) 1998-2001, 2003 Thomas Sailer (t.sailer@alumni.ethz.ch)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * Module command line parameters:
23 * none so far
24 *
25 * Supported devices:
26 * /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
27 * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
28 * /dev/midi simple MIDI UART interface, no ioctl
29 *
30 * Revision history
31 * 10.11.1998 0.1 Initial release (without any hardware)
32 * 22.03.1999 0.2 cinfo.blocks should be reset after GETxPTR ioctl.
33 * reported by Johan Maes <joma@telindus.be>
34 * return EAGAIN instead of EBUSY when O_NONBLOCK
35 * read/write cannot be executed
36 * 07.04.1999 0.3 implemented the following ioctl's: SOUND_PCM_READ_RATE,
37 * SOUND_PCM_READ_CHANNELS, SOUND_PCM_READ_BITS;
38 * Alpha fixes reported by Peter Jones <pjones@redhat.com>
39 * 15.06.1999 0.4 Fix bad allocation bug.
40 * Thanks to Deti Fliegl <fliegl@in.tum.de>
41 * 28.06.1999 0.5 Add pci_set_master
42 * 12.08.1999 0.6 Fix MIDI UART crashing the driver
43 * Changed mixer semantics from OSS documented
44 * behaviour to OSS "code behaviour".
45 * Recording might actually work now.
46 * The real DDMA controller address register is at PCI config
47 * 0x60, while the register at 0x18 is used as a placeholder
48 * register for BIOS address allocation. This register
49 * is supposed to be copied into 0x60, according
50 * to the Solo1 datasheet. When I do that, I can access
51 * the DDMA registers except the mask bit, which
52 * is stuck at 1. When I copy the contents of 0x18 +0x10
53 * to the DDMA base register, everything seems to work.
54 * The fun part is that the Windows Solo1 driver doesn't
55 * seem to do these tricks.
56 * Bugs remaining: plops and clicks when starting/stopping playback
57 * 31.08.1999 0.7 add spin_lock_init
58 * replaced current->state = x with set_current_state(x)
59 * 03.09.1999 0.8 change read semantics for MIDI to match
60 * OSS more closely; remove possible wakeup race
61 * 07.10.1999 0.9 Fix initialization; complain if sequencer writes time out
62 * Revised resource grabbing for the FM synthesizer
63 * 28.10.1999 0.10 More waitqueue races fixed
64 * 09.12.1999 0.11 Work around stupid Alpha port issue (virt_to_bus(kmalloc(GFP_DMA)) > 16M)
65 * Disabling recording on Alpha
66 * 12.01.2000 0.12 Prevent some ioctl's from returning bad count values on underrun/overrun;
67 * Tim Janik's BSE (Bedevilled Sound Engine) found this
68 * Integrated (aka redid 8-)) APM support patch by Zach Brown
69 * 07.02.2000 0.13 Use pci_alloc_consistent and pci_register_driver
70 * 19.02.2000 0.14 Use pci_dma_supported to determine if recording should be disabled
71 * 13.03.2000 0.15 Reintroduce initialization of a couple of PCI config space registers
72 * 21.11.2000 0.16 Initialize dma buffers in poll, otherwise poll may return a bogus mask
73 * 12.12.2000 0.17 More dma buffer initializations, patch from
74 * Tjeerd Mulder <tjeerd.mulder@fujitsu-siemens.com>
75 * 31.01.2001 0.18 Register/Unregister gameport, original patch from
76 * Nathaniel Daw <daw@cs.cmu.edu>
77 * Fix SETTRIGGER non OSS API conformity
78 * 10.03.2001 provide abs function, prevent picking up a bogus kernel macro
79 * for abs. Bug report by Andrew Morton <andrewm@uow.edu.au>
80 * 15.05.2001 pci_enable_device moved, return values in probe cleaned
81 * up. Marcus Meissner <mm@caldera.de>
82 * 22.05.2001 0.19 more cleanups, changed PM to PCI 2.4 style, got rid
83 * of global list of devices, using pci device data.
84 * Marcus Meissner <mm@caldera.de>
85 * 03.01.2003 0.20 open_mode fixes from Georg Acher <acher@in.tum.de>
86 */
87
88/*****************************************************************************/
89
90#include <linux/interrupt.h>
91#include <linux/module.h>
92#include <linux/string.h>
93#include <linux/ioport.h>
94#include <linux/sched.h>
95#include <linux/delay.h>
96#include <linux/sound.h>
97#include <linux/slab.h>
98#include <linux/soundcard.h>
99#include <linux/pci.h>
100#include <linux/bitops.h>
101#include <linux/init.h>
102#include <linux/poll.h>
103#include <linux/spinlock.h>
104#include <linux/smp_lock.h>
105#include <linux/gameport.h>
106#include <linux/wait.h>
107#include <linux/dma-mapping.h>
108#include <linux/mutex.h>
109
110
111#include <asm/io.h>
112#include <asm/page.h>
113#include <asm/uaccess.h>
114
115#include "dm.h"
116
117/* --------------------------------------------------------------------- */
118
119#undef OSS_DOCUMENTED_MIXER_SEMANTICS
120
121/* --------------------------------------------------------------------- */
122
123#ifndef PCI_VENDOR_ID_ESS
124#define PCI_VENDOR_ID_ESS 0x125d
125#endif
126#ifndef PCI_DEVICE_ID_ESS_SOLO1
127#define PCI_DEVICE_ID_ESS_SOLO1 0x1969
128#endif
129
130#define SOLO1_MAGIC ((PCI_VENDOR_ID_ESS<<16)|PCI_DEVICE_ID_ESS_SOLO1)
131
132#define DDMABASE_OFFSET 0 /* chip bug workaround kludge */
133#define DDMABASE_EXTENT 16
134
135#define IOBASE_EXTENT 16
136#define SBBASE_EXTENT 16
137#define VCBASE_EXTENT (DDMABASE_EXTENT+DDMABASE_OFFSET)
138#define MPUBASE_EXTENT 4
139#define GPBASE_EXTENT 4
140#define GAMEPORT_EXTENT 4
141
142#define FMSYNTH_EXTENT 4
143
144/* MIDI buffer sizes */
145
146#define MIDIINBUF 256
147#define MIDIOUTBUF 256
148
149#define FMODE_MIDI_SHIFT 3
150#define FMODE_MIDI_READ (FMODE_READ << FMODE_MIDI_SHIFT)
151#define FMODE_MIDI_WRITE (FMODE_WRITE << FMODE_MIDI_SHIFT)
152
153#define FMODE_DMFM 0x10
154
155#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
156#define SUPPORT_JOYSTICK 1
157#endif
158
159static struct pci_driver solo1_driver;
160
161/* --------------------------------------------------------------------- */
162
163struct solo1_state {
164 /* magic */
165 unsigned int magic;
166
167 /* the corresponding pci_dev structure */
168 struct pci_dev *dev;
169
170 /* soundcore stuff */
171 int dev_audio;
172 int dev_mixer;
173 int dev_midi;
174 int dev_dmfm;
175
176 /* hardware resources */
177 unsigned long iobase, sbbase, vcbase, ddmabase, mpubase; /* long for SPARC */
178 unsigned int irq;
179
180 /* mixer registers */
181 struct {
182 unsigned short vol[10];
183 unsigned int recsrc;
184 unsigned int modcnt;
185 unsigned short micpreamp;
186 } mix;
187
188 /* wave stuff */
189 unsigned fmt;
190 unsigned channels;
191 unsigned rate;
192 unsigned char clkdiv;
193 unsigned ena;
194
195 spinlock_t lock;
196 struct mutex open_mutex;
197 mode_t open_mode;
198 wait_queue_head_t open_wait;
199
200 struct dmabuf {
201 void *rawbuf;
202 dma_addr_t dmaaddr;
203 unsigned buforder;
204 unsigned numfrag;
205 unsigned fragshift;
206 unsigned hwptr, swptr;
207 unsigned total_bytes;
208 int count;
209 unsigned error; /* over/underrun */
210 wait_queue_head_t wait;
211 /* redundant, but makes calculations easier */
212 unsigned fragsize;
213 unsigned dmasize;
214 unsigned fragsamples;
215 /* OSS stuff */
216 unsigned mapped:1;
217 unsigned ready:1;
218 unsigned endcleared:1;
219 unsigned enabled:1;
220 unsigned ossfragshift;
221 int ossmaxfrags;
222 unsigned subdivision;
223 } dma_dac, dma_adc;
224
225 /* midi stuff */
226 struct {
227 unsigned ird, iwr, icnt;
228 unsigned ord, owr, ocnt;
229 wait_queue_head_t iwait;
230 wait_queue_head_t owait;
231 struct timer_list timer;
232 unsigned char ibuf[MIDIINBUF];
233 unsigned char obuf[MIDIOUTBUF];
234 } midi;
235
236#if SUPPORT_JOYSTICK
237 struct gameport *gameport;
238#endif
239};
240
241/* --------------------------------------------------------------------- */
242
243static inline void write_seq(struct solo1_state *s, unsigned char data)
244{
245 int i;
246 unsigned long flags;
247
248 /* the local_irq_save stunt is to send the data within the command window */
249 for (i = 0; i < 0xffff; i++) {
250 local_irq_save(flags);
251 if (!(inb(s->sbbase+0xc) & 0x80)) {
252 outb(data, s->sbbase+0xc);
253 local_irq_restore(flags);
254 return;
255 }
256 local_irq_restore(flags);
257 }
258 printk(KERN_ERR "esssolo1: write_seq timeout\n");
259 outb(data, s->sbbase+0xc);
260}
261
262static inline int read_seq(struct solo1_state *s, unsigned char *data)
263{
264 int i;
265
266 if (!data)
267 return 0;
268 for (i = 0; i < 0xffff; i++)
269 if (inb(s->sbbase+0xe) & 0x80) {
270 *data = inb(s->sbbase+0xa);
271 return 1;
272 }
273 printk(KERN_ERR "esssolo1: read_seq timeout\n");
274 return 0;
275}
276
277static inline int reset_ctrl(struct solo1_state *s)
278{
279 int i;
280
281 outb(3, s->sbbase+6); /* clear sequencer and FIFO */
282 udelay(10);
283 outb(0, s->sbbase+6);
284 for (i = 0; i < 0xffff; i++)
285 if (inb(s->sbbase+0xe) & 0x80)
286 if (inb(s->sbbase+0xa) == 0xaa) {
287 write_seq(s, 0xc6); /* enter enhanced mode */
288 return 1;
289 }
290 return 0;
291}
292
293static void write_ctrl(struct solo1_state *s, unsigned char reg, unsigned char data)
294{
295 write_seq(s, reg);
296 write_seq(s, data);
297}
298
299#if 0 /* unused */
300static unsigned char read_ctrl(struct solo1_state *s, unsigned char reg)
301{
302 unsigned char r;
303
304 write_seq(s, 0xc0);
305 write_seq(s, reg);
306 read_seq(s, &r);
307 return r;
308}
309#endif /* unused */
310
311static void write_mixer(struct solo1_state *s, unsigned char reg, unsigned char data)
312{
313 outb(reg, s->sbbase+4);
314 outb(data, s->sbbase+5);
315}
316
317static unsigned char read_mixer(struct solo1_state *s, unsigned char reg)
318{
319 outb(reg, s->sbbase+4);
320 return inb(s->sbbase+5);
321}
322
323/* --------------------------------------------------------------------- */
324
325static inline unsigned ld2(unsigned int x)
326{
327 unsigned r = 0;
328
329 if (x >= 0x10000) {
330 x >>= 16;
331 r += 16;
332 }
333 if (x >= 0x100) {
334 x >>= 8;
335 r += 8;
336 }
337 if (x >= 0x10) {
338 x >>= 4;
339 r += 4;
340 }
341 if (x >= 4) {
342 x >>= 2;
343 r += 2;
344 }
345 if (x >= 2)
346 r++;
347 return r;
348}
349
350/* --------------------------------------------------------------------- */
351
352static inline void stop_dac(struct solo1_state *s)
353{
354 unsigned long flags;
355
356 spin_lock_irqsave(&s->lock, flags);
357 s->ena &= ~FMODE_WRITE;
358 write_mixer(s, 0x78, 0x10);
359 spin_unlock_irqrestore(&s->lock, flags);
360}
361
362static void start_dac(struct solo1_state *s)
363{
364 unsigned long flags;
365
366 spin_lock_irqsave(&s->lock, flags);
367 if (!(s->ena & FMODE_WRITE) && (s->dma_dac.mapped || s->dma_dac.count > 0) && s->dma_dac.ready) {
368 s->ena |= FMODE_WRITE;
369 write_mixer(s, 0x78, 0x12);
370 udelay(10);
371 write_mixer(s, 0x78, 0x13);
372 }
373 spin_unlock_irqrestore(&s->lock, flags);
374}
375
376static inline void stop_adc(struct solo1_state *s)
377{
378 unsigned long flags;
379
380 spin_lock_irqsave(&s->lock, flags);
381 s->ena &= ~FMODE_READ;
382 write_ctrl(s, 0xb8, 0xe);
383 spin_unlock_irqrestore(&s->lock, flags);
384}
385
386static void start_adc(struct solo1_state *s)
387{
388 unsigned long flags;
389
390 spin_lock_irqsave(&s->lock, flags);
391 if (!(s->ena & FMODE_READ) && (s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
392 && s->dma_adc.ready) {
393 s->ena |= FMODE_READ;
394 write_ctrl(s, 0xb8, 0xf);
395#if 0
396 printk(KERN_DEBUG "solo1: DMAbuffer: 0x%08lx\n", (long)s->dma_adc.rawbuf);
397 printk(KERN_DEBUG "solo1: DMA: mask: 0x%02x cnt: 0x%04x addr: 0x%08x stat: 0x%02x\n",
398 inb(s->ddmabase+0xf), inw(s->ddmabase+4), inl(s->ddmabase), inb(s->ddmabase+8));
399#endif
400 outb(0, s->ddmabase+0xd); /* master reset */
401 outb(1, s->ddmabase+0xf); /* mask */
402 outb(0x54/*0x14*/, s->ddmabase+0xb); /* DMA_MODE_READ | DMA_MODE_AUTOINIT */
403 outl(virt_to_bus(s->dma_adc.rawbuf), s->ddmabase);
404 outw(s->dma_adc.dmasize-1, s->ddmabase+4);
405 outb(0, s->ddmabase+0xf);
406 }
407 spin_unlock_irqrestore(&s->lock, flags);
408#if 0
409 printk(KERN_DEBUG "solo1: start DMA: reg B8: 0x%02x SBstat: 0x%02x\n"
410 KERN_DEBUG "solo1: DMA: stat: 0x%02x cnt: 0x%04x mask: 0x%02x\n",
411 read_ctrl(s, 0xb8), inb(s->sbbase+0xc),
412 inb(s->ddmabase+8), inw(s->ddmabase+4), inb(s->ddmabase+0xf));
413 printk(KERN_DEBUG "solo1: A1: 0x%02x A2: 0x%02x A4: 0x%02x A5: 0x%02x A8: 0x%02x\n"
414 KERN_DEBUG "solo1: B1: 0x%02x B2: 0x%02x B4: 0x%02x B7: 0x%02x B8: 0x%02x B9: 0x%02x\n",
415 read_ctrl(s, 0xa1), read_ctrl(s, 0xa2), read_ctrl(s, 0xa4), read_ctrl(s, 0xa5), read_ctrl(s, 0xa8),
416 read_ctrl(s, 0xb1), read_ctrl(s, 0xb2), read_ctrl(s, 0xb4), read_ctrl(s, 0xb7), read_ctrl(s, 0xb8),
417 read_ctrl(s, 0xb9));
418#endif
419}
420
421/* --------------------------------------------------------------------- */
422
423#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)
424#define DMABUF_MINORDER 1
425
426static inline void dealloc_dmabuf(struct solo1_state *s, struct dmabuf *db)
427{
428 struct page *page, *pend;
429
430 if (db->rawbuf) {
431 /* undo marking the pages as reserved */
432 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
433 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
434 ClearPageReserved(page);
435 pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
436 }
437 db->rawbuf = NULL;
438 db->mapped = db->ready = 0;
439}
440
441static int prog_dmabuf(struct solo1_state *s, struct dmabuf *db)
442{
443 int order;
444 unsigned bytespersec;
445 unsigned bufs, sample_shift = 0;
446 struct page *page, *pend;
447
448 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
449 if (!db->rawbuf) {
450 db->ready = db->mapped = 0;
451 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
452 if ((db->rawbuf = pci_alloc_consistent(s->dev, PAGE_SIZE << order, &db->dmaaddr)))
453 break;
454 if (!db->rawbuf)
455 return -ENOMEM;
456 db->buforder = order;
457 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
458 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
459 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
460 SetPageReserved(page);
461 }
462 if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
463 sample_shift++;
464 if (s->channels > 1)
465 sample_shift++;
466 bytespersec = s->rate << sample_shift;
467 bufs = PAGE_SIZE << db->buforder;
468 if (db->ossfragshift) {
469 if ((1000 << db->ossfragshift) < bytespersec)
470 db->fragshift = ld2(bytespersec/1000);
471 else
472 db->fragshift = db->ossfragshift;
473 } else {
474 db->fragshift = ld2(bytespersec/100/(db->subdivision ? db->subdivision : 1));
475 if (db->fragshift < 3)
476 db->fragshift = 3;
477 }
478 db->numfrag = bufs >> db->fragshift;
479 while (db->numfrag < 4 && db->fragshift > 3) {
480 db->fragshift--;
481 db->numfrag = bufs >> db->fragshift;
482 }
483 db->fragsize = 1 << db->fragshift;
484 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
485 db->numfrag = db->ossmaxfrags;
486 db->fragsamples = db->fragsize >> sample_shift;
487 db->dmasize = db->numfrag << db->fragshift;
488 db->enabled = 1;
489 return 0;
490}
491
492static inline int prog_dmabuf_adc(struct solo1_state *s)
493{
494 unsigned long va;
495 int c;
496
497 stop_adc(s);
498 /* check if PCI implementation supports 24bit busmaster DMA */
499 if (s->dev->dma_mask > 0xffffff)
500 return -EIO;
501 if ((c = prog_dmabuf(s, &s->dma_adc)))
502 return c;
503 va = s->dma_adc.dmaaddr;
504 if ((va & ~((1<<24)-1)))
505 panic("solo1: buffer above 16M boundary");
506 outb(0, s->ddmabase+0xd); /* clear */
507 outb(1, s->ddmabase+0xf); /* mask */
508 /*outb(0, s->ddmabase+8);*/ /* enable (enable is active low!) */
509 outb(0x54, s->ddmabase+0xb); /* DMA_MODE_READ | DMA_MODE_AUTOINIT */
510 outl(va, s->ddmabase);
511 outw(s->dma_adc.dmasize-1, s->ddmabase+4);
512 c = - s->dma_adc.fragsamples;
513 write_ctrl(s, 0xa4, c);
514 write_ctrl(s, 0xa5, c >> 8);
515 outb(0, s->ddmabase+0xf);
516 s->dma_adc.ready = 1;
517 return 0;
518}
519
520static int prog_dmabuf_dac(struct solo1_state *s)
521{
522 unsigned long va;
523 int c;
524
525 stop_dac(s);
526 if ((c = prog_dmabuf(s, &s->dma_dac)))
527 return c;
528 memset(s->dma_dac.rawbuf, (s->fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0 : 0x80, s->dma_dac.dmasize); /* almost correct for U16 */
529 va = s->dma_dac.dmaaddr;
530 if ((va ^ (va + s->dma_dac.dmasize - 1)) & ~((1<<20)-1))
531 panic("solo1: buffer crosses 1M boundary");
532 outl(va, s->iobase);
533 /* warning: s->dma_dac.dmasize & 0xffff must not be zero! i.e. this limits us to a 32k buffer */
534 outw(s->dma_dac.dmasize, s->iobase+4);
535 c = - s->dma_dac.fragsamples;
536 write_mixer(s, 0x74, c);
537 write_mixer(s, 0x76, c >> 8);
538 outb(0xa, s->iobase+6);
539 s->dma_dac.ready = 1;
540 return 0;
541}
542
543static inline void clear_advance(void *buf, unsigned bsize, unsigned bptr, unsigned len, unsigned char c)
544{
545 if (bptr + len > bsize) {
546 unsigned x = bsize - bptr;
547 memset(((char *)buf) + bptr, c, x);
548 bptr = 0;
549 len -= x;
550 }
551 memset(((char *)buf) + bptr, c, len);
552}
553
554/* call with spinlock held! */
555
556static void solo1_update_ptr(struct solo1_state *s)
557{
558 int diff;
559 unsigned hwptr;
560
561 /* update ADC pointer */
562 if (s->ena & FMODE_READ) {
563 hwptr = (s->dma_adc.dmasize - 1 - inw(s->ddmabase+4)) % s->dma_adc.dmasize;
564 diff = (s->dma_adc.dmasize + hwptr - s->dma_adc.hwptr) % s->dma_adc.dmasize;
565 s->dma_adc.hwptr = hwptr;
566 s->dma_adc.total_bytes += diff;
567 s->dma_adc.count += diff;
568#if 0
569 printk(KERN_DEBUG "solo1: rd: hwptr %u swptr %u dmasize %u count %u\n",
570 s->dma_adc.hwptr, s->dma_adc.swptr, s->dma_adc.dmasize, s->dma_adc.count);
571#endif
572 if (s->dma_adc.mapped) {
573 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
574 wake_up(&s->dma_adc.wait);
575 } else {
576 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
577 s->ena &= ~FMODE_READ;
578 write_ctrl(s, 0xb8, 0xe);
579 s->dma_adc.error++;
580 }
581 if (s->dma_adc.count > 0)
582 wake_up(&s->dma_adc.wait);
583 }
584 }
585 /* update DAC pointer */
586 if (s->ena & FMODE_WRITE) {
587 hwptr = (s->dma_dac.dmasize - inw(s->iobase+4)) % s->dma_dac.dmasize;
588 diff = (s->dma_dac.dmasize + hwptr - s->dma_dac.hwptr) % s->dma_dac.dmasize;
589 s->dma_dac.hwptr = hwptr;
590 s->dma_dac.total_bytes += diff;
591#if 0
592 printk(KERN_DEBUG "solo1: wr: hwptr %u swptr %u dmasize %u count %u\n",
593 s->dma_dac.hwptr, s->dma_dac.swptr, s->dma_dac.dmasize, s->dma_dac.count);
594#endif
595 if (s->dma_dac.mapped) {
596 s->dma_dac.count += diff;
597 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
598 wake_up(&s->dma_dac.wait);
599 } else {
600 s->dma_dac.count -= diff;
601 if (s->dma_dac.count <= 0) {
602 s->ena &= ~FMODE_WRITE;
603 write_mixer(s, 0x78, 0x12);
604 s->dma_dac.error++;
605 } else if (s->dma_dac.count <= (signed)s->dma_dac.fragsize && !s->dma_dac.endcleared) {
606 clear_advance(s->dma_dac.rawbuf, s->dma_dac.dmasize, s->dma_dac.swptr,
607 s->dma_dac.fragsize, (s->fmt & (AFMT_U8 | AFMT_U16_LE)) ? 0 : 0x80);
608 s->dma_dac.endcleared = 1;
609 }
610 if (s->dma_dac.count < (signed)s->dma_dac.dmasize)
611 wake_up(&s->dma_dac.wait);
612 }
613 }
614}
615
616/* --------------------------------------------------------------------- */
617
618static void prog_codec(struct solo1_state *s)
619{
620 unsigned long flags;
621 int fdiv, filter;
622 unsigned char c;
623
624 reset_ctrl(s);
625 write_seq(s, 0xd3);
626 /* program sampling rates */
627 filter = s->rate * 9 / 20; /* Set filter roll-off to 90% of rate/2 */
628 fdiv = 256 - 7160000 / (filter * 82);
629 spin_lock_irqsave(&s->lock, flags);
630 write_ctrl(s, 0xa1, s->clkdiv);
631 write_ctrl(s, 0xa2, fdiv);
632 write_mixer(s, 0x70, s->clkdiv);
633 write_mixer(s, 0x72, fdiv);
634 /* program ADC parameters */
635 write_ctrl(s, 0xb8, 0xe);
636 write_ctrl(s, 0xb9, /*0x1*/0);
637 write_ctrl(s, 0xa8, (s->channels > 1) ? 0x11 : 0x12);
638 c = 0xd0;
639 if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
640 c |= 0x04;
641 if (s->fmt & (AFMT_S16_LE | AFMT_S8))
642 c |= 0x20;
643 if (s->channels > 1)
644 c ^= 0x48;
645 write_ctrl(s, 0xb7, (c & 0x70) | 1);
646 write_ctrl(s, 0xb7, c);
647 write_ctrl(s, 0xb1, 0x50);
648 write_ctrl(s, 0xb2, 0x50);
649 /* program DAC parameters */
650 c = 0x40;
651 if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
652 c |= 1;
653 if (s->fmt & (AFMT_S16_LE | AFMT_S8))
654 c |= 4;
655 if (s->channels > 1)
656 c |= 2;
657 write_mixer(s, 0x7a, c);
658 write_mixer(s, 0x78, 0x10);
659 s->ena = 0;
660 spin_unlock_irqrestore(&s->lock, flags);
661}
662
663/* --------------------------------------------------------------------- */
664
665static const char invalid_magic[] = KERN_CRIT "solo1: invalid magic value\n";
666
667#define VALIDATE_STATE(s) \
668({ \
669 if (!(s) || (s)->magic != SOLO1_MAGIC) { \
670 printk(invalid_magic); \
671 return -ENXIO; \
672 } \
673})
674
675/* --------------------------------------------------------------------- */
676
677static int mixer_ioctl(struct solo1_state *s, unsigned int cmd, unsigned long arg)
678{
679 static const unsigned int mixer_src[8] = {
680 SOUND_MASK_MIC, SOUND_MASK_MIC, SOUND_MASK_CD, SOUND_MASK_VOLUME,
681 SOUND_MASK_MIC, 0, SOUND_MASK_LINE, 0
682 };
683 static const unsigned char mixtable1[SOUND_MIXER_NRDEVICES] = {
684 [SOUND_MIXER_PCM] = 1, /* voice */
685 [SOUND_MIXER_SYNTH] = 2, /* FM */
686 [SOUND_MIXER_CD] = 3, /* CD */
687 [SOUND_MIXER_LINE] = 4, /* Line */
688 [SOUND_MIXER_LINE1] = 5, /* AUX */
689 [SOUND_MIXER_MIC] = 6, /* Mic */
690 [SOUND_MIXER_LINE2] = 7, /* Mono in */
691 [SOUND_MIXER_SPEAKER] = 8, /* Speaker */
692 [SOUND_MIXER_RECLEV] = 9, /* Recording level */
693 [SOUND_MIXER_VOLUME] = 10 /* Master Volume */
694 };
695 static const unsigned char mixreg[] = {
696 0x7c, /* voice */
697 0x36, /* FM */
698 0x38, /* CD */
699 0x3e, /* Line */
700 0x3a, /* AUX */
701 0x1a, /* Mic */
702 0x6d /* Mono in */
703 };
704 unsigned char l, r, rl, rr, vidx;
705 int i, val;
706 int __user *p = (int __user *)arg;
707
708 VALIDATE_STATE(s);
709
710 if (cmd == SOUND_MIXER_PRIVATE1) {
711 /* enable/disable/query mixer preamp */
712 if (get_user(val, p))
713 return -EFAULT;
714 if (val != -1) {
715 val = val ? 0xff : 0xf7;
716 write_mixer(s, 0x7d, (read_mixer(s, 0x7d) | 0x08) & val);
717 }
718 val = (read_mixer(s, 0x7d) & 0x08) ? 1 : 0;
719 return put_user(val, p);
720 }
721 if (cmd == SOUND_MIXER_PRIVATE2) {
722 /* enable/disable/query spatializer */
723 if (get_user(val, p))
724 return -EFAULT;
725 if (val != -1) {
726 val &= 0x3f;
727 write_mixer(s, 0x52, val);
728 write_mixer(s, 0x50, val ? 0x08 : 0);
729 }
730 return put_user(read_mixer(s, 0x52), p);
731 }
732 if (cmd == SOUND_MIXER_INFO) {
733 mixer_info info;
734 strncpy(info.id, "Solo1", sizeof(info.id));
735 strncpy(info.name, "ESS Solo1", sizeof(info.name));
736 info.modify_counter = s->mix.modcnt;
737 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
738 return -EFAULT;
739 return 0;
740 }
741 if (cmd == SOUND_OLD_MIXER_INFO) {
742 _old_mixer_info info;
743 strncpy(info.id, "Solo1", sizeof(info.id));
744 strncpy(info.name, "ESS Solo1", sizeof(info.name));
745 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
746 return -EFAULT;
747 return 0;
748 }
749 if (cmd == OSS_GETVERSION)
750 return put_user(SOUND_VERSION, p);
751 if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
752 return -EINVAL;
753 if (_SIOC_DIR(cmd) == _SIOC_READ) {
754 switch (_IOC_NR(cmd)) {
755 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
756 return put_user(mixer_src[read_mixer(s, 0x1c) & 7], p);
757
758 case SOUND_MIXER_DEVMASK: /* Arg contains a bit for each supported device */
759 return put_user(SOUND_MASK_PCM | SOUND_MASK_SYNTH | SOUND_MASK_CD |
760 SOUND_MASK_LINE | SOUND_MASK_LINE1 | SOUND_MASK_MIC |
761 SOUND_MASK_VOLUME | SOUND_MASK_LINE2 | SOUND_MASK_RECLEV |
762 SOUND_MASK_SPEAKER, p);
763
764 case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
765 return put_user(SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME, p);
766
767 case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
768 return put_user(SOUND_MASK_PCM | SOUND_MASK_SYNTH | SOUND_MASK_CD |
769 SOUND_MASK_LINE | SOUND_MASK_LINE1 | SOUND_MASK_MIC |
770 SOUND_MASK_VOLUME | SOUND_MASK_LINE2 | SOUND_MASK_RECLEV, p);
771
772 case SOUND_MIXER_CAPS:
773 return put_user(SOUND_CAP_EXCL_INPUT, p);
774
775 default:
776 i = _IOC_NR(cmd);
777 if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
778 return -EINVAL;
779 return put_user(s->mix.vol[vidx-1], p);
780 }
781 }
782 if (_SIOC_DIR(cmd) != (_SIOC_READ|_SIOC_WRITE))
783 return -EINVAL;
784 s->mix.modcnt++;
785 switch (_IOC_NR(cmd)) {
786 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
787#if 0
788 {
789 static const unsigned char regs[] = {
790 0x1c, 0x1a, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 0x60, 0x62, 0x6d, 0x7c
791 };
792 int i;
793
794 for (i = 0; i < sizeof(regs); i++)
795 printk(KERN_DEBUG "solo1: mixer reg 0x%02x: 0x%02x\n",
796 regs[i], read_mixer(s, regs[i]));
797 printk(KERN_DEBUG "solo1: ctrl reg 0x%02x: 0x%02x\n",
798 0xb4, read_ctrl(s, 0xb4));
799 }
800#endif
801 if (get_user(val, p))
802 return -EFAULT;
803 i = hweight32(val);
804 if (i == 0)
805 return 0;
806 else if (i > 1)
807 val &= ~mixer_src[read_mixer(s, 0x1c) & 7];
808 for (i = 0; i < 8; i++) {
809 if (mixer_src[i] & val)
810 break;
811 }
812 if (i > 7)
813 return 0;
814 write_mixer(s, 0x1c, i);
815 return 0;
816
817 case SOUND_MIXER_VOLUME:
818 if (get_user(val, p))
819 return -EFAULT;
820 l = val & 0xff;
821 if (l > 100)
822 l = 100;
823 r = (val >> 8) & 0xff;
824 if (r > 100)
825 r = 100;
826 if (l < 6) {
827 rl = 0x40;
828 l = 0;
829 } else {
830 rl = (l * 2 - 11) / 3;
831 l = (rl * 3 + 11) / 2;
832 }
833 if (r < 6) {
834 rr = 0x40;
835 r = 0;
836 } else {
837 rr = (r * 2 - 11) / 3;
838 r = (rr * 3 + 11) / 2;
839 }
840 write_mixer(s, 0x60, rl);
841 write_mixer(s, 0x62, rr);
842#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
843 s->mix.vol[9] = ((unsigned int)r << 8) | l;
844#else
845 s->mix.vol[9] = val;
846#endif
847 return put_user(s->mix.vol[9], p);
848
849 case SOUND_MIXER_SPEAKER:
850 if (get_user(val, p))
851 return -EFAULT;
852 l = val & 0xff;
853 if (l > 100)
854 l = 100;
855 else if (l < 2)
856 l = 2;
857 rl = (l - 2) / 14;
858 l = rl * 14 + 2;
859 write_mixer(s, 0x3c, rl);
860#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
861 s->mix.vol[7] = l * 0x101;
862#else
863 s->mix.vol[7] = val;
864#endif
865 return put_user(s->mix.vol[7], p);
866
867 case SOUND_MIXER_RECLEV:
868 if (get_user(val, p))
869 return -EFAULT;
870 l = (val << 1) & 0x1fe;
871 if (l > 200)
872 l = 200;
873 else if (l < 5)
874 l = 5;
875 r = (val >> 7) & 0x1fe;
876 if (r > 200)
877 r = 200;
878 else if (r < 5)
879 r = 5;
880 rl = (l - 5) / 13;
881 rr = (r - 5) / 13;
882 r = (rl * 13 + 5) / 2;
883 l = (rr * 13 + 5) / 2;
884 write_ctrl(s, 0xb4, (rl << 4) | rr);
885#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
886 s->mix.vol[8] = ((unsigned int)r << 8) | l;
887#else
888 s->mix.vol[8] = val;
889#endif
890 return put_user(s->mix.vol[8], p);
891
892 default:
893 i = _IOC_NR(cmd);
894 if (i >= SOUND_MIXER_NRDEVICES || !(vidx = mixtable1[i]))
895 return -EINVAL;
896 if (get_user(val, p))
897 return -EFAULT;
898 l = (val << 1) & 0x1fe;
899 if (l > 200)
900 l = 200;
901 else if (l < 5)
902 l = 5;
903 r = (val >> 7) & 0x1fe;
904 if (r > 200)
905 r = 200;
906 else if (r < 5)
907 r = 5;
908 rl = (l - 5) / 13;
909 rr = (r - 5) / 13;
910 r = (rl * 13 + 5) / 2;
911 l = (rr * 13 + 5) / 2;
912 write_mixer(s, mixreg[vidx-1], (rl << 4) | rr);
913#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
914 s->mix.vol[vidx-1] = ((unsigned int)r << 8) | l;
915#else
916 s->mix.vol[vidx-1] = val;
917#endif
918 return put_user(s->mix.vol[vidx-1], p);
919 }
920}
921
922/* --------------------------------------------------------------------- */
923
924static int solo1_open_mixdev(struct inode *inode, struct file *file)
925{
926 unsigned int minor = iminor(inode);
927 struct solo1_state *s = NULL;
928 struct pci_dev *pci_dev = NULL;
929
930 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
931 struct pci_driver *drvr;
932 drvr = pci_dev_driver (pci_dev);
933 if (drvr != &solo1_driver)
934 continue;
935 s = (struct solo1_state*)pci_get_drvdata(pci_dev);
936 if (!s)
937 continue;
938 if (s->dev_mixer == minor)
939 break;
940 }
941 if (!s)
942 return -ENODEV;
943 VALIDATE_STATE(s);
944 file->private_data = s;
945 return nonseekable_open(inode, file);
946}
947
948static int solo1_release_mixdev(struct inode *inode, struct file *file)
949{
950 struct solo1_state *s = (struct solo1_state *)file->private_data;
951
952 VALIDATE_STATE(s);
953 return 0;
954}
955
956static int solo1_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
957{
958 return mixer_ioctl((struct solo1_state *)file->private_data, cmd, arg);
959}
960
961static /*const*/ struct file_operations solo1_mixer_fops = {
962 .owner = THIS_MODULE,
963 .llseek = no_llseek,
964 .ioctl = solo1_ioctl_mixdev,
965 .open = solo1_open_mixdev,
966 .release = solo1_release_mixdev,
967};
968
969/* --------------------------------------------------------------------- */
970
971static int drain_dac(struct solo1_state *s, int nonblock)
972{
973 DECLARE_WAITQUEUE(wait, current);
974 unsigned long flags;
975 int count;
976 unsigned tmo;
977
978 if (s->dma_dac.mapped)
979 return 0;
980 add_wait_queue(&s->dma_dac.wait, &wait);
981 for (;;) {
982 set_current_state(TASK_INTERRUPTIBLE);
983 spin_lock_irqsave(&s->lock, flags);
984 count = s->dma_dac.count;
985 spin_unlock_irqrestore(&s->lock, flags);
986 if (count <= 0)
987 break;
988 if (signal_pending(current))
989 break;
990 if (nonblock) {
991 remove_wait_queue(&s->dma_dac.wait, &wait);
992 set_current_state(TASK_RUNNING);
993 return -EBUSY;
994 }
995 tmo = 3 * HZ * (count + s->dma_dac.fragsize) / 2 / s->rate;
996 if (s->fmt & (AFMT_S16_LE | AFMT_U16_LE))
997 tmo >>= 1;
998 if (s->channels > 1)
999 tmo >>= 1;
1000 if (!schedule_timeout(tmo + 1))
1001 printk(KERN_DEBUG "solo1: dma timed out??\n");
1002 }
1003 remove_wait_queue(&s->dma_dac.wait, &wait);
1004 set_current_state(TASK_RUNNING);
1005 if (signal_pending(current))
1006 return -ERESTARTSYS;
1007 return 0;
1008}
1009
1010/* --------------------------------------------------------------------- */
1011
1012static ssize_t solo1_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1013{
1014 struct solo1_state *s = (struct solo1_state *)file->private_data;
1015 DECLARE_WAITQUEUE(wait, current);
1016 ssize_t ret;
1017 unsigned long flags;
1018 unsigned swptr;
1019 int cnt;
1020
1021 VALIDATE_STATE(s);
1022 if (s->dma_adc.mapped)
1023 return -ENXIO;
1024 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1025 return ret;
1026 if (!access_ok(VERIFY_WRITE, buffer, count))
1027 return -EFAULT;
1028 ret = 0;
1029 add_wait_queue(&s->dma_adc.wait, &wait);
1030 while (count > 0) {
1031 spin_lock_irqsave(&s->lock, flags);
1032 swptr = s->dma_adc.swptr;
1033 cnt = s->dma_adc.dmasize-swptr;
1034 if (s->dma_adc.count < cnt)
1035 cnt = s->dma_adc.count;
1036 if (cnt <= 0)
1037 __set_current_state(TASK_INTERRUPTIBLE);
1038 spin_unlock_irqrestore(&s->lock, flags);
1039 if (cnt > count)
1040 cnt = count;
1041#ifdef DEBUGREC
1042 printk(KERN_DEBUG "solo1_read: reg B8: 0x%02x DMAstat: 0x%02x DMAcnt: 0x%04x SBstat: 0x%02x cnt: %u\n",
1043 read_ctrl(s, 0xb8), inb(s->ddmabase+8), inw(s->ddmabase+4), inb(s->sbbase+0xc), cnt);
1044#endif
1045 if (cnt <= 0) {
1046 if (s->dma_adc.enabled)
1047 start_adc(s);
1048#ifdef DEBUGREC
1049 printk(KERN_DEBUG "solo1_read: regs: A1: 0x%02x A2: 0x%02x A4: 0x%02x A5: 0x%02x A8: 0x%02x\n"
1050 KERN_DEBUG "solo1_read: regs: B1: 0x%02x B2: 0x%02x B7: 0x%02x B8: 0x%02x B9: 0x%02x\n"
1051 KERN_DEBUG "solo1_read: DMA: addr: 0x%08x cnt: 0x%04x stat: 0x%02x mask: 0x%02x\n"
1052 KERN_DEBUG "solo1_read: SBstat: 0x%02x cnt: %u\n",
1053 read_ctrl(s, 0xa1), read_ctrl(s, 0xa2), read_ctrl(s, 0xa4), read_ctrl(s, 0xa5), read_ctrl(s, 0xa8),
1054 read_ctrl(s, 0xb1), read_ctrl(s, 0xb2), read_ctrl(s, 0xb7), read_ctrl(s, 0xb8), read_ctrl(s, 0xb9),
1055 inl(s->ddmabase), inw(s->ddmabase+4), inb(s->ddmabase+8), inb(s->ddmabase+15), inb(s->sbbase+0xc), cnt);
1056#endif
1057 if (inb(s->ddmabase+15) & 1)
1058 printk(KERN_ERR "solo1: cannot start recording, DDMA mask bit stuck at 1\n");
1059 if (file->f_flags & O_NONBLOCK) {
1060 if (!ret)
1061 ret = -EAGAIN;
1062 break;
1063 }
1064 schedule();
1065#ifdef DEBUGREC
1066 printk(KERN_DEBUG "solo1_read: regs: A1: 0x%02x A2: 0x%02x A4: 0x%02x A5: 0x%02x A8: 0x%02x\n"
1067 KERN_DEBUG "solo1_read: regs: B1: 0x%02x B2: 0x%02x B7: 0x%02x B8: 0x%02x B9: 0x%02x\n"
1068 KERN_DEBUG "solo1_read: DMA: addr: 0x%08x cnt: 0x%04x stat: 0x%02x mask: 0x%02x\n"
1069 KERN_DEBUG "solo1_read: SBstat: 0x%02x cnt: %u\n",
1070 read_ctrl(s, 0xa1), read_ctrl(s, 0xa2), read_ctrl(s, 0xa4), read_ctrl(s, 0xa5), read_ctrl(s, 0xa8),
1071 read_ctrl(s, 0xb1), read_ctrl(s, 0xb2), read_ctrl(s, 0xb7), read_ctrl(s, 0xb8), read_ctrl(s, 0xb9),
1072 inl(s->ddmabase), inw(s->ddmabase+4), inb(s->ddmabase+8), inb(s->ddmabase+15), inb(s->sbbase+0xc), cnt);
1073#endif
1074 if (signal_pending(current)) {
1075 if (!ret)
1076 ret = -ERESTARTSYS;
1077 break;
1078 }
1079 continue;
1080 }
1081 if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
1082 if (!ret)
1083 ret = -EFAULT;
1084 break;
1085 }
1086 swptr = (swptr + cnt) % s->dma_adc.dmasize;
1087 spin_lock_irqsave(&s->lock, flags);
1088 s->dma_adc.swptr = swptr;
1089 s->dma_adc.count -= cnt;
1090 spin_unlock_irqrestore(&s->lock, flags);
1091 count -= cnt;
1092 buffer += cnt;
1093 ret += cnt;
1094 if (s->dma_adc.enabled)
1095 start_adc(s);
1096#ifdef DEBUGREC
1097 printk(KERN_DEBUG "solo1_read: reg B8: 0x%02x DMAstat: 0x%02x DMAcnt: 0x%04x SBstat: 0x%02x\n",
1098 read_ctrl(s, 0xb8), inb(s->ddmabase+8), inw(s->ddmabase+4), inb(s->sbbase+0xc));
1099#endif
1100 }
1101 remove_wait_queue(&s->dma_adc.wait, &wait);
1102 set_current_state(TASK_RUNNING);
1103 return ret;
1104}
1105
1106static ssize_t solo1_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1107{
1108 struct solo1_state *s = (struct solo1_state *)file->private_data;
1109 DECLARE_WAITQUEUE(wait, current);
1110 ssize_t ret;
1111 unsigned long flags;
1112 unsigned swptr;
1113 int cnt;
1114
1115 VALIDATE_STATE(s);
1116 if (s->dma_dac.mapped)
1117 return -ENXIO;
1118 if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
1119 return ret;
1120 if (!access_ok(VERIFY_READ, buffer, count))
1121 return -EFAULT;
1122#if 0
1123 printk(KERN_DEBUG "solo1_write: reg 70: 0x%02x 71: 0x%02x 72: 0x%02x 74: 0x%02x 76: 0x%02x 78: 0x%02x 7A: 0x%02x\n"
1124 KERN_DEBUG "solo1_write: DMA: addr: 0x%08x cnt: 0x%04x stat: 0x%02x SBstat: 0x%02x\n",
1125 read_mixer(s, 0x70), read_mixer(s, 0x71), read_mixer(s, 0x72), read_mixer(s, 0x74), read_mixer(s, 0x76),
1126 read_mixer(s, 0x78), read_mixer(s, 0x7a), inl(s->iobase), inw(s->iobase+4), inb(s->iobase+6), inb(s->sbbase+0xc));
1127 printk(KERN_DEBUG "solo1_write: reg 78: 0x%02x reg 7A: 0x%02x DMAcnt: 0x%04x DMAstat: 0x%02x SBstat: 0x%02x\n",
1128 read_mixer(s, 0x78), read_mixer(s, 0x7a), inw(s->iobase+4), inb(s->iobase+6), inb(s->sbbase+0xc));
1129#endif
1130 ret = 0;
1131 add_wait_queue(&s->dma_dac.wait, &wait);
1132 while (count > 0) {
1133 spin_lock_irqsave(&s->lock, flags);
1134 if (s->dma_dac.count < 0) {
1135 s->dma_dac.count = 0;
1136 s->dma_dac.swptr = s->dma_dac.hwptr;
1137 }
1138 swptr = s->dma_dac.swptr;
1139 cnt = s->dma_dac.dmasize-swptr;
1140 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
1141 cnt = s->dma_dac.dmasize - s->dma_dac.count;
1142 if (cnt <= 0)
1143 __set_current_state(TASK_INTERRUPTIBLE);
1144 spin_unlock_irqrestore(&s->lock, flags);
1145 if (cnt > count)
1146 cnt = count;
1147 if (cnt <= 0) {
1148 if (s->dma_dac.enabled)
1149 start_dac(s);
1150 if (file->f_flags & O_NONBLOCK) {
1151 if (!ret)
1152 ret = -EAGAIN;
1153 break;
1154 }
1155 schedule();
1156 if (signal_pending(current)) {
1157 if (!ret)
1158 ret = -ERESTARTSYS;
1159 break;
1160 }
1161 continue;
1162 }
1163 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) {
1164 if (!ret)
1165 ret = -EFAULT;
1166 break;
1167 }
1168 swptr = (swptr + cnt) % s->dma_dac.dmasize;
1169 spin_lock_irqsave(&s->lock, flags);
1170 s->dma_dac.swptr = swptr;
1171 s->dma_dac.count += cnt;
1172 s->dma_dac.endcleared = 0;
1173 spin_unlock_irqrestore(&s->lock, flags);
1174 count -= cnt;
1175 buffer += cnt;
1176 ret += cnt;
1177 if (s->dma_dac.enabled)
1178 start_dac(s);
1179 }
1180 remove_wait_queue(&s->dma_dac.wait, &wait);
1181 set_current_state(TASK_RUNNING);
1182 return ret;
1183}
1184
1185/* No kernel lock - we have our own spinlock */
1186static unsigned int solo1_poll(struct file *file, struct poll_table_struct *wait)
1187{
1188 struct solo1_state *s = (struct solo1_state *)file->private_data;
1189 unsigned long flags;
1190 unsigned int mask = 0;
1191
1192 VALIDATE_STATE(s);
1193 if (file->f_mode & FMODE_WRITE) {
1194 if (!s->dma_dac.ready && prog_dmabuf_dac(s))
1195 return 0;
1196 poll_wait(file, &s->dma_dac.wait, wait);
1197 }
1198 if (file->f_mode & FMODE_READ) {
1199 if (!s->dma_adc.ready && prog_dmabuf_adc(s))
1200 return 0;
1201 poll_wait(file, &s->dma_adc.wait, wait);
1202 }
1203 spin_lock_irqsave(&s->lock, flags);
1204 solo1_update_ptr(s);
1205 if (file->f_mode & FMODE_READ) {
1206 if (s->dma_adc.mapped) {
1207 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1208 mask |= POLLIN | POLLRDNORM;
1209 } else {
1210 if (s->dma_adc.count > 0)
1211 mask |= POLLIN | POLLRDNORM;
1212 }
1213 }
1214 if (file->f_mode & FMODE_WRITE) {
1215 if (s->dma_dac.mapped) {
1216 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
1217 mask |= POLLOUT | POLLWRNORM;
1218 } else {
1219 if ((signed)s->dma_dac.dmasize > s->dma_dac.count)
1220 mask |= POLLOUT | POLLWRNORM;
1221 }
1222 }
1223 spin_unlock_irqrestore(&s->lock, flags);
1224 return mask;
1225}
1226
1227
1228static int solo1_mmap(struct file *file, struct vm_area_struct *vma)
1229{
1230 struct solo1_state *s = (struct solo1_state *)file->private_data;
1231 struct dmabuf *db;
1232 int ret = -EINVAL;
1233 unsigned long size;
1234
1235 VALIDATE_STATE(s);
1236 lock_kernel();
1237 if (vma->vm_flags & VM_WRITE) {
1238 if ((ret = prog_dmabuf_dac(s)) != 0)
1239 goto out;
1240 db = &s->dma_dac;
1241 } else if (vma->vm_flags & VM_READ) {
1242 if ((ret = prog_dmabuf_adc(s)) != 0)
1243 goto out;
1244 db = &s->dma_adc;
1245 } else
1246 goto out;
1247 ret = -EINVAL;
1248 if (vma->vm_pgoff != 0)
1249 goto out;
1250 size = vma->vm_end - vma->vm_start;
1251 if (size > (PAGE_SIZE << db->buforder))
1252 goto out;
1253 ret = -EAGAIN;
1254 if (remap_pfn_range(vma, vma->vm_start,
1255 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
1256 size, vma->vm_page_prot))
1257 goto out;
1258 db->mapped = 1;
1259 ret = 0;
1260out:
1261 unlock_kernel();
1262 return ret;
1263}
1264
1265static int solo1_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1266{
1267 struct solo1_state *s = (struct solo1_state *)file->private_data;
1268 unsigned long flags;
1269 audio_buf_info abinfo;
1270 count_info cinfo;
1271 int val, mapped, ret, count;
1272 int div1, div2;
1273 unsigned rate1, rate2;
1274 void __user *argp = (void __user *)arg;
1275 int __user *p = argp;
1276
1277 VALIDATE_STATE(s);
1278 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
1279 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
1280 switch (cmd) {
1281 case OSS_GETVERSION:
1282 return put_user(SOUND_VERSION, p);
1283
1284 case SNDCTL_DSP_SYNC:
1285 if (file->f_mode & FMODE_WRITE)
1286 return drain_dac(s, 0/*file->f_flags & O_NONBLOCK*/);
1287 return 0;
1288
1289 case SNDCTL_DSP_SETDUPLEX:
1290 return 0;
1291
1292 case SNDCTL_DSP_GETCAPS:
1293 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1294
1295 case SNDCTL_DSP_RESET:
1296 if (file->f_mode & FMODE_WRITE) {
1297 stop_dac(s);
1298 synchronize_irq(s->irq);
1299 s->dma_dac.swptr = s->dma_dac.hwptr = s->dma_dac.count = s->dma_dac.total_bytes = 0;
1300 }
1301 if (file->f_mode & FMODE_READ) {
1302 stop_adc(s);
1303 synchronize_irq(s->irq);
1304 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
1305 }
1306 prog_codec(s);
1307 return 0;
1308
1309 case SNDCTL_DSP_SPEED:
1310 if (get_user(val, p))
1311 return -EFAULT;
1312 if (val >= 0) {
1313 stop_adc(s);
1314 stop_dac(s);
1315 s->dma_adc.ready = s->dma_dac.ready = 0;
1316 /* program sampling rates */
1317 if (val > 48000)
1318 val = 48000;
1319 if (val < 6300)
1320 val = 6300;
1321 div1 = (768000 + val / 2) / val;
1322 rate1 = (768000 + div1 / 2) / div1;
1323 div1 = -div1;
1324 div2 = (793800 + val / 2) / val;
1325 rate2 = (793800 + div2 / 2) / div2;
1326 div2 = (-div2) & 0x7f;
1327 if (abs(val - rate2) < abs(val - rate1)) {
1328 rate1 = rate2;
1329 div1 = div2;
1330 }
1331 s->rate = rate1;
1332 s->clkdiv = div1;
1333 prog_codec(s);
1334 }
1335 return put_user(s->rate, p);
1336
1337 case SNDCTL_DSP_STEREO:
1338 if (get_user(val, p))
1339 return -EFAULT;
1340 stop_adc(s);
1341 stop_dac(s);
1342 s->dma_adc.ready = s->dma_dac.ready = 0;
1343 /* program channels */
1344 s->channels = val ? 2 : 1;
1345 prog_codec(s);
1346 return 0;
1347
1348 case SNDCTL_DSP_CHANNELS:
1349 if (get_user(val, p))
1350 return -EFAULT;
1351 if (val != 0) {
1352 stop_adc(s);
1353 stop_dac(s);
1354 s->dma_adc.ready = s->dma_dac.ready = 0;
1355 /* program channels */
1356 s->channels = (val >= 2) ? 2 : 1;
1357 prog_codec(s);
1358 }
1359 return put_user(s->channels, p);
1360
1361 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1362 return put_user(AFMT_S16_LE|AFMT_U16_LE|AFMT_S8|AFMT_U8, p);
1363
1364 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
1365 if (get_user(val, p))
1366 return -EFAULT;
1367 if (val != AFMT_QUERY) {
1368 stop_adc(s);
1369 stop_dac(s);
1370 s->dma_adc.ready = s->dma_dac.ready = 0;
1371 /* program format */
1372 if (val != AFMT_S16_LE && val != AFMT_U16_LE &&
1373 val != AFMT_S8 && val != AFMT_U8)
1374 val = AFMT_U8;
1375 s->fmt = val;
1376 prog_codec(s);
1377 }
1378 return put_user(s->fmt, p);
1379
1380 case SNDCTL_DSP_POST:
1381 return 0;
1382
1383 case SNDCTL_DSP_GETTRIGGER:
1384 val = 0;
1385 if (file->f_mode & s->ena & FMODE_READ)
1386 val |= PCM_ENABLE_INPUT;
1387 if (file->f_mode & s->ena & FMODE_WRITE)
1388 val |= PCM_ENABLE_OUTPUT;
1389 return put_user(val, p);
1390
1391 case SNDCTL_DSP_SETTRIGGER:
1392 if (get_user(val, p))
1393 return -EFAULT;
1394 if (file->f_mode & FMODE_READ) {
1395 if (val & PCM_ENABLE_INPUT) {
1396 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1397 return ret;
1398 s->dma_dac.enabled = 1;
1399 start_adc(s);
1400 if (inb(s->ddmabase+15) & 1)
1401 printk(KERN_ERR "solo1: cannot start recording, DDMA mask bit stuck at 1\n");
1402 } else {
1403 s->dma_dac.enabled = 0;
1404 stop_adc(s);
1405 }
1406 }
1407 if (file->f_mode & FMODE_WRITE) {
1408 if (val & PCM_ENABLE_OUTPUT) {
1409 if (!s->dma_dac.ready && (ret = prog_dmabuf_dac(s)))
1410 return ret;
1411 s->dma_dac.enabled = 1;
1412 start_dac(s);
1413 } else {
1414 s->dma_dac.enabled = 0;
1415 stop_dac(s);
1416 }
1417 }
1418 return 0;
1419
1420 case SNDCTL_DSP_GETOSPACE:
1421 if (!(file->f_mode & FMODE_WRITE))
1422 return -EINVAL;
1423 if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s)) != 0)
1424 return val;
1425 spin_lock_irqsave(&s->lock, flags);
1426 solo1_update_ptr(s);
1427 abinfo.fragsize = s->dma_dac.fragsize;
1428 count = s->dma_dac.count;
1429 if (count < 0)
1430 count = 0;
1431 abinfo.bytes = s->dma_dac.dmasize - count;
1432 abinfo.fragstotal = s->dma_dac.numfrag;
1433 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
1434 spin_unlock_irqrestore(&s->lock, flags);
1435 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1436
1437 case SNDCTL_DSP_GETISPACE:
1438 if (!(file->f_mode & FMODE_READ))
1439 return -EINVAL;
1440 if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)) != 0)
1441 return val;
1442 spin_lock_irqsave(&s->lock, flags);
1443 solo1_update_ptr(s);
1444 abinfo.fragsize = s->dma_adc.fragsize;
1445 abinfo.bytes = s->dma_adc.count;
1446 abinfo.fragstotal = s->dma_adc.numfrag;
1447 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
1448 spin_unlock_irqrestore(&s->lock, flags);
1449 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1450
1451 case SNDCTL_DSP_NONBLOCK:
1452 file->f_flags |= O_NONBLOCK;
1453 return 0;
1454
1455 case SNDCTL_DSP_GETODELAY:
1456 if (!(file->f_mode & FMODE_WRITE))
1457 return -EINVAL;
1458 if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s)) != 0)
1459 return val;
1460 spin_lock_irqsave(&s->lock, flags);
1461 solo1_update_ptr(s);
1462 count = s->dma_dac.count;
1463 spin_unlock_irqrestore(&s->lock, flags);
1464 if (count < 0)
1465 count = 0;
1466 return put_user(count, p);
1467
1468 case SNDCTL_DSP_GETIPTR:
1469 if (!(file->f_mode & FMODE_READ))
1470 return -EINVAL;
1471 if (!s->dma_adc.ready && (val = prog_dmabuf_adc(s)) != 0)
1472 return val;
1473 spin_lock_irqsave(&s->lock, flags);
1474 solo1_update_ptr(s);
1475 cinfo.bytes = s->dma_adc.total_bytes;
1476 cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
1477 cinfo.ptr = s->dma_adc.hwptr;
1478 if (s->dma_adc.mapped)
1479 s->dma_adc.count &= s->dma_adc.fragsize-1;
1480 spin_unlock_irqrestore(&s->lock, flags);
1481 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1482 return -EFAULT;
1483 return 0;
1484
1485 case SNDCTL_DSP_GETOPTR:
1486 if (!(file->f_mode & FMODE_WRITE))
1487 return -EINVAL;
1488 if (!s->dma_dac.ready && (val = prog_dmabuf_dac(s)) != 0)
1489 return val;
1490 spin_lock_irqsave(&s->lock, flags);
1491 solo1_update_ptr(s);
1492 cinfo.bytes = s->dma_dac.total_bytes;
1493 count = s->dma_dac.count;
1494 if (count < 0)
1495 count = 0;
1496 cinfo.blocks = count >> s->dma_dac.fragshift;
1497 cinfo.ptr = s->dma_dac.hwptr;
1498 if (s->dma_dac.mapped)
1499 s->dma_dac.count &= s->dma_dac.fragsize-1;
1500 spin_unlock_irqrestore(&s->lock, flags);
1501#if 0
1502 printk(KERN_DEBUG "esssolo1: GETOPTR: bytes %u blocks %u ptr %u, buforder %u numfrag %u fragshift %u\n"
1503 KERN_DEBUG "esssolo1: swptr %u count %u fragsize %u dmasize %u fragsamples %u\n",
1504 cinfo.bytes, cinfo.blocks, cinfo.ptr, s->dma_dac.buforder, s->dma_dac.numfrag, s->dma_dac.fragshift,
1505 s->dma_dac.swptr, s->dma_dac.count, s->dma_dac.fragsize, s->dma_dac.dmasize, s->dma_dac.fragsamples);
1506#endif
1507 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1508 return -EFAULT;
1509 return 0;
1510
1511 case SNDCTL_DSP_GETBLKSIZE:
1512 if (file->f_mode & FMODE_WRITE) {
1513 if ((val = prog_dmabuf_dac(s)))
1514 return val;
1515 return put_user(s->dma_dac.fragsize, p);
1516 }
1517 if ((val = prog_dmabuf_adc(s)))
1518 return val;
1519 return put_user(s->dma_adc.fragsize, p);
1520
1521 case SNDCTL_DSP_SETFRAGMENT:
1522 if (get_user(val, p))
1523 return -EFAULT;
1524 if (file->f_mode & FMODE_READ) {
1525 s->dma_adc.ossfragshift = val & 0xffff;
1526 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
1527 if (s->dma_adc.ossfragshift < 4)
1528 s->dma_adc.ossfragshift = 4;
1529 if (s->dma_adc.ossfragshift > 15)
1530 s->dma_adc.ossfragshift = 15;
1531 if (s->dma_adc.ossmaxfrags < 4)
1532 s->dma_adc.ossmaxfrags = 4;
1533 }
1534 if (file->f_mode & FMODE_WRITE) {
1535 s->dma_dac.ossfragshift = val & 0xffff;
1536 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
1537 if (s->dma_dac.ossfragshift < 4)
1538 s->dma_dac.ossfragshift = 4;
1539 if (s->dma_dac.ossfragshift > 15)
1540 s->dma_dac.ossfragshift = 15;
1541 if (s->dma_dac.ossmaxfrags < 4)
1542 s->dma_dac.ossmaxfrags = 4;
1543 }
1544 return 0;
1545
1546 case SNDCTL_DSP_SUBDIVIDE:
1547 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1548 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
1549 return -EINVAL;
1550 if (get_user(val, p))
1551 return -EFAULT;
1552 if (val != 1 && val != 2 && val != 4)
1553 return -EINVAL;
1554 if (file->f_mode & FMODE_READ)
1555 s->dma_adc.subdivision = val;
1556 if (file->f_mode & FMODE_WRITE)
1557 s->dma_dac.subdivision = val;
1558 return 0;
1559
1560 case SOUND_PCM_READ_RATE:
1561 return put_user(s->rate, p);
1562
1563 case SOUND_PCM_READ_CHANNELS:
1564 return put_user(s->channels, p);
1565
1566 case SOUND_PCM_READ_BITS:
1567 return put_user((s->fmt & (AFMT_S8|AFMT_U8)) ? 8 : 16, p);
1568
1569 case SOUND_PCM_WRITE_FILTER:
1570 case SNDCTL_DSP_SETSYNCRO:
1571 case SOUND_PCM_READ_FILTER:
1572 return -EINVAL;
1573
1574 }
1575 return mixer_ioctl(s, cmd, arg);
1576}
1577
1578static int solo1_release(struct inode *inode, struct file *file)
1579{
1580 struct solo1_state *s = (struct solo1_state *)file->private_data;
1581
1582 VALIDATE_STATE(s);
1583 lock_kernel();
1584 if (file->f_mode & FMODE_WRITE)
1585 drain_dac(s, file->f_flags & O_NONBLOCK);
1586 mutex_lock(&s->open_mutex);
1587 if (file->f_mode & FMODE_WRITE) {
1588 stop_dac(s);
1589 outb(0, s->iobase+6); /* disable DMA */
1590 dealloc_dmabuf(s, &s->dma_dac);
1591 }
1592 if (file->f_mode & FMODE_READ) {
1593 stop_adc(s);
1594 outb(1, s->ddmabase+0xf); /* mask DMA channel */
1595 outb(0, s->ddmabase+0xd); /* DMA master clear */
1596 dealloc_dmabuf(s, &s->dma_adc);
1597 }
1598 s->open_mode &= ~(FMODE_READ | FMODE_WRITE);
1599 wake_up(&s->open_wait);
1600 mutex_unlock(&s->open_mutex);
1601 unlock_kernel();
1602 return 0;
1603}
1604
1605static int solo1_open(struct inode *inode, struct file *file)
1606{
1607 unsigned int minor = iminor(inode);
1608 DECLARE_WAITQUEUE(wait, current);
1609 struct solo1_state *s = NULL;
1610 struct pci_dev *pci_dev = NULL;
1611
1612 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
1613 struct pci_driver *drvr;
1614
1615 drvr = pci_dev_driver(pci_dev);
1616 if (drvr != &solo1_driver)
1617 continue;
1618 s = (struct solo1_state*)pci_get_drvdata(pci_dev);
1619 if (!s)
1620 continue;
1621 if (!((s->dev_audio ^ minor) & ~0xf))
1622 break;
1623 }
1624 if (!s)
1625 return -ENODEV;
1626 VALIDATE_STATE(s);
1627 file->private_data = s;
1628 /* wait for device to become free */
1629 mutex_lock(&s->open_mutex);
1630 while (s->open_mode & (FMODE_READ | FMODE_WRITE)) {
1631 if (file->f_flags & O_NONBLOCK) {
1632 mutex_unlock(&s->open_mutex);
1633 return -EBUSY;
1634 }
1635 add_wait_queue(&s->open_wait, &wait);
1636 __set_current_state(TASK_INTERRUPTIBLE);
1637 mutex_unlock(&s->open_mutex);
1638 schedule();
1639 remove_wait_queue(&s->open_wait, &wait);
1640 set_current_state(TASK_RUNNING);
1641 if (signal_pending(current))
1642 return -ERESTARTSYS;
1643 mutex_lock(&s->open_mutex);
1644 }
1645 s->fmt = AFMT_U8;
1646 s->channels = 1;
1647 s->rate = 8000;
1648 s->clkdiv = 96 | 0x80;
1649 s->ena = 0;
1650 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
1651 s->dma_adc.enabled = 1;
1652 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
1653 s->dma_dac.enabled = 1;
1654 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1655 mutex_unlock(&s->open_mutex);
1656 prog_codec(s);
1657 return nonseekable_open(inode, file);
1658}
1659
1660static /*const*/ struct file_operations solo1_audio_fops = {
1661 .owner = THIS_MODULE,
1662 .llseek = no_llseek,
1663 .read = solo1_read,
1664 .write = solo1_write,
1665 .poll = solo1_poll,
1666 .ioctl = solo1_ioctl,
1667 .mmap = solo1_mmap,
1668 .open = solo1_open,
1669 .release = solo1_release,
1670};
1671
1672/* --------------------------------------------------------------------- */
1673
1674/* hold spinlock for the following! */
1675static void solo1_handle_midi(struct solo1_state *s)
1676{
1677 unsigned char ch;
1678 int wake;
1679
1680 if (!(s->mpubase))
1681 return;
1682 wake = 0;
1683 while (!(inb(s->mpubase+1) & 0x80)) {
1684 ch = inb(s->mpubase);
1685 if (s->midi.icnt < MIDIINBUF) {
1686 s->midi.ibuf[s->midi.iwr] = ch;
1687 s->midi.iwr = (s->midi.iwr + 1) % MIDIINBUF;
1688 s->midi.icnt++;
1689 }
1690 wake = 1;
1691 }
1692 if (wake)
1693 wake_up(&s->midi.iwait);
1694 wake = 0;
1695 while (!(inb(s->mpubase+1) & 0x40) && s->midi.ocnt > 0) {
1696 outb(s->midi.obuf[s->midi.ord], s->mpubase);
1697 s->midi.ord = (s->midi.ord + 1) % MIDIOUTBUF;
1698 s->midi.ocnt--;
1699 if (s->midi.ocnt < MIDIOUTBUF-16)
1700 wake = 1;
1701 }
1702 if (wake)
1703 wake_up(&s->midi.owait);
1704}
1705
1706static irqreturn_t solo1_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1707{
1708 struct solo1_state *s = (struct solo1_state *)dev_id;
1709 unsigned int intsrc;
1710
1711 /* fastpath out, to ease interrupt sharing */
1712 intsrc = inb(s->iobase+7); /* get interrupt source(s) */
1713 if (!intsrc)
1714 return IRQ_NONE;
1715 (void)inb(s->sbbase+0xe); /* clear interrupt */
1716 spin_lock(&s->lock);
1717 /* clear audio interrupts first */
1718 if (intsrc & 0x20)
1719 write_mixer(s, 0x7a, read_mixer(s, 0x7a) & 0x7f);
1720 solo1_update_ptr(s);
1721 solo1_handle_midi(s);
1722 spin_unlock(&s->lock);
1723 return IRQ_HANDLED;
1724}
1725
1726static void solo1_midi_timer(unsigned long data)
1727{
1728 struct solo1_state *s = (struct solo1_state *)data;
1729 unsigned long flags;
1730
1731 spin_lock_irqsave(&s->lock, flags);
1732 solo1_handle_midi(s);
1733 spin_unlock_irqrestore(&s->lock, flags);
1734 s->midi.timer.expires = jiffies+1;
1735 add_timer(&s->midi.timer);
1736}
1737
1738/* --------------------------------------------------------------------- */
1739
1740static ssize_t solo1_midi_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1741{
1742 struct solo1_state *s = (struct solo1_state *)file->private_data;
1743 DECLARE_WAITQUEUE(wait, current);
1744 ssize_t ret;
1745 unsigned long flags;
1746 unsigned ptr;
1747 int cnt;
1748
1749 VALIDATE_STATE(s);
1750 if (!access_ok(VERIFY_WRITE, buffer, count))
1751 return -EFAULT;
1752 if (count == 0)
1753 return 0;
1754 ret = 0;
1755 add_wait_queue(&s->midi.iwait, &wait);
1756 while (count > 0) {
1757 spin_lock_irqsave(&s->lock, flags);
1758 ptr = s->midi.ird;
1759 cnt = MIDIINBUF - ptr;
1760 if (s->midi.icnt < cnt)
1761 cnt = s->midi.icnt;
1762 if (cnt <= 0)
1763 __set_current_state(TASK_INTERRUPTIBLE);
1764 spin_unlock_irqrestore(&s->lock, flags);
1765 if (cnt > count)
1766 cnt = count;
1767 if (cnt <= 0) {
1768 if (file->f_flags & O_NONBLOCK) {
1769 if (!ret)
1770 ret = -EAGAIN;
1771 break;
1772 }
1773 schedule();
1774 if (signal_pending(current)) {
1775 if (!ret)
1776 ret = -ERESTARTSYS;
1777 break;
1778 }
1779 continue;
1780 }
1781 if (copy_to_user(buffer, s->midi.ibuf + ptr, cnt)) {
1782 if (!ret)
1783 ret = -EFAULT;
1784 break;
1785 }
1786 ptr = (ptr + cnt) % MIDIINBUF;
1787 spin_lock_irqsave(&s->lock, flags);
1788 s->midi.ird = ptr;
1789 s->midi.icnt -= cnt;
1790 spin_unlock_irqrestore(&s->lock, flags);
1791 count -= cnt;
1792 buffer += cnt;
1793 ret += cnt;
1794 break;
1795 }
1796 __set_current_state(TASK_RUNNING);
1797 remove_wait_queue(&s->midi.iwait, &wait);
1798 return ret;
1799}
1800
1801static ssize_t solo1_midi_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1802{
1803 struct solo1_state *s = (struct solo1_state *)file->private_data;
1804 DECLARE_WAITQUEUE(wait, current);
1805 ssize_t ret;
1806 unsigned long flags;
1807 unsigned ptr;
1808 int cnt;
1809
1810 VALIDATE_STATE(s);
1811 if (!access_ok(VERIFY_READ, buffer, count))
1812 return -EFAULT;
1813 if (count == 0)
1814 return 0;
1815 ret = 0;
1816 add_wait_queue(&s->midi.owait, &wait);
1817 while (count > 0) {
1818 spin_lock_irqsave(&s->lock, flags);
1819 ptr = s->midi.owr;
1820 cnt = MIDIOUTBUF - ptr;
1821 if (s->midi.ocnt + cnt > MIDIOUTBUF)
1822 cnt = MIDIOUTBUF - s->midi.ocnt;
1823 if (cnt <= 0) {
1824 __set_current_state(TASK_INTERRUPTIBLE);
1825 solo1_handle_midi(s);
1826 }
1827 spin_unlock_irqrestore(&s->lock, flags);
1828 if (cnt > count)
1829 cnt = count;
1830 if (cnt <= 0) {
1831 if (file->f_flags & O_NONBLOCK) {
1832 if (!ret)
1833 ret = -EAGAIN;
1834 break;
1835 }
1836 schedule();
1837 if (signal_pending(current)) {
1838 if (!ret)
1839 ret = -ERESTARTSYS;
1840 break;
1841 }
1842 continue;
1843 }
1844 if (copy_from_user(s->midi.obuf + ptr, buffer, cnt)) {
1845 if (!ret)
1846 ret = -EFAULT;
1847 break;
1848 }
1849 ptr = (ptr + cnt) % MIDIOUTBUF;
1850 spin_lock_irqsave(&s->lock, flags);
1851 s->midi.owr = ptr;
1852 s->midi.ocnt += cnt;
1853 spin_unlock_irqrestore(&s->lock, flags);
1854 count -= cnt;
1855 buffer += cnt;
1856 ret += cnt;
1857 spin_lock_irqsave(&s->lock, flags);
1858 solo1_handle_midi(s);
1859 spin_unlock_irqrestore(&s->lock, flags);
1860 }
1861 __set_current_state(TASK_RUNNING);
1862 remove_wait_queue(&s->midi.owait, &wait);
1863 return ret;
1864}
1865
1866/* No kernel lock - we have our own spinlock */
1867static unsigned int solo1_midi_poll(struct file *file, struct poll_table_struct *wait)
1868{
1869 struct solo1_state *s = (struct solo1_state *)file->private_data;
1870 unsigned long flags;
1871 unsigned int mask = 0;
1872
1873 VALIDATE_STATE(s);
1874 if (file->f_flags & FMODE_WRITE)
1875 poll_wait(file, &s->midi.owait, wait);
1876 if (file->f_flags & FMODE_READ)
1877 poll_wait(file, &s->midi.iwait, wait);
1878 spin_lock_irqsave(&s->lock, flags);
1879 if (file->f_flags & FMODE_READ) {
1880 if (s->midi.icnt > 0)
1881 mask |= POLLIN | POLLRDNORM;
1882 }
1883 if (file->f_flags & FMODE_WRITE) {
1884 if (s->midi.ocnt < MIDIOUTBUF)
1885 mask |= POLLOUT | POLLWRNORM;
1886 }
1887 spin_unlock_irqrestore(&s->lock, flags);
1888 return mask;
1889}
1890
1891static int solo1_midi_open(struct inode *inode, struct file *file)
1892{
1893 unsigned int minor = iminor(inode);
1894 DECLARE_WAITQUEUE(wait, current);
1895 unsigned long flags;
1896 struct solo1_state *s = NULL;
1897 struct pci_dev *pci_dev = NULL;
1898
1899 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
1900 struct pci_driver *drvr;
1901
1902 drvr = pci_dev_driver(pci_dev);
1903 if (drvr != &solo1_driver)
1904 continue;
1905 s = (struct solo1_state*)pci_get_drvdata(pci_dev);
1906 if (!s)
1907 continue;
1908 if (s->dev_midi == minor)
1909 break;
1910 }
1911 if (!s)
1912 return -ENODEV;
1913 VALIDATE_STATE(s);
1914 file->private_data = s;
1915 /* wait for device to become free */
1916 mutex_lock(&s->open_mutex);
1917 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
1918 if (file->f_flags & O_NONBLOCK) {
1919 mutex_unlock(&s->open_mutex);
1920 return -EBUSY;
1921 }
1922 add_wait_queue(&s->open_wait, &wait);
1923 __set_current_state(TASK_INTERRUPTIBLE);
1924 mutex_unlock(&s->open_mutex);
1925 schedule();
1926 remove_wait_queue(&s->open_wait, &wait);
1927 set_current_state(TASK_RUNNING);
1928 if (signal_pending(current))
1929 return -ERESTARTSYS;
1930 mutex_lock(&s->open_mutex);
1931 }
1932 spin_lock_irqsave(&s->lock, flags);
1933 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
1934 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
1935 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
1936 outb(0xff, s->mpubase+1); /* reset command */
1937 outb(0x3f, s->mpubase+1); /* uart command */
1938 if (!(inb(s->mpubase+1) & 0x80))
1939 inb(s->mpubase);
1940 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
1941 outb(0xb0, s->iobase + 7); /* enable A1, A2, MPU irq's */
1942 init_timer(&s->midi.timer);
1943 s->midi.timer.expires = jiffies+1;
1944 s->midi.timer.data = (unsigned long)s;
1945 s->midi.timer.function = solo1_midi_timer;
1946 add_timer(&s->midi.timer);
1947 }
1948 if (file->f_mode & FMODE_READ) {
1949 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
1950 }
1951 if (file->f_mode & FMODE_WRITE) {
1952 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
1953 }
1954 spin_unlock_irqrestore(&s->lock, flags);
1955 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
1956 mutex_unlock(&s->open_mutex);
1957 return nonseekable_open(inode, file);
1958}
1959
1960static int solo1_midi_release(struct inode *inode, struct file *file)
1961{
1962 struct solo1_state *s = (struct solo1_state *)file->private_data;
1963 DECLARE_WAITQUEUE(wait, current);
1964 unsigned long flags;
1965 unsigned count, tmo;
1966
1967 VALIDATE_STATE(s);
1968
1969 lock_kernel();
1970 if (file->f_mode & FMODE_WRITE) {
1971 add_wait_queue(&s->midi.owait, &wait);
1972 for (;;) {
1973 __set_current_state(TASK_INTERRUPTIBLE);
1974 spin_lock_irqsave(&s->lock, flags);
1975 count = s->midi.ocnt;
1976 spin_unlock_irqrestore(&s->lock, flags);
1977 if (count <= 0)
1978 break;
1979 if (signal_pending(current))
1980 break;
1981 if (file->f_flags & O_NONBLOCK)
1982 break;
1983 tmo = (count * HZ) / 3100;
1984 if (!schedule_timeout(tmo ? : 1) && tmo)
1985 printk(KERN_DEBUG "solo1: midi timed out??\n");
1986 }
1987 remove_wait_queue(&s->midi.owait, &wait);
1988 set_current_state(TASK_RUNNING);
1989 }
1990 mutex_lock(&s->open_mutex);
1991 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
1992 spin_lock_irqsave(&s->lock, flags);
1993 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
1994 outb(0x30, s->iobase + 7); /* enable A1, A2 irq's */
1995 del_timer(&s->midi.timer);
1996 }
1997 spin_unlock_irqrestore(&s->lock, flags);
1998 wake_up(&s->open_wait);
1999 mutex_unlock(&s->open_mutex);
2000 unlock_kernel();
2001 return 0;
2002}
2003
2004static /*const*/ struct file_operations solo1_midi_fops = {
2005 .owner = THIS_MODULE,
2006 .llseek = no_llseek,
2007 .read = solo1_midi_read,
2008 .write = solo1_midi_write,
2009 .poll = solo1_midi_poll,
2010 .open = solo1_midi_open,
2011 .release = solo1_midi_release,
2012};
2013
2014/* --------------------------------------------------------------------- */
2015
2016static int solo1_dmfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2017{
2018 static const unsigned char op_offset[18] = {
2019 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
2020 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
2021 0x10, 0x11, 0x12, 0x13, 0x14, 0x15
2022 };
2023 struct solo1_state *s = (struct solo1_state *)file->private_data;
2024 struct dm_fm_voice v;
2025 struct dm_fm_note n;
2026 struct dm_fm_params p;
2027 unsigned int io;
2028 unsigned int regb;
2029
2030 switch (cmd) {
2031 case FM_IOCTL_RESET:
2032 for (regb = 0xb0; regb < 0xb9; regb++) {
2033 outb(regb, s->sbbase);
2034 outb(0, s->sbbase+1);
2035 outb(regb, s->sbbase+2);
2036 outb(0, s->sbbase+3);
2037 }
2038 return 0;
2039
2040 case FM_IOCTL_PLAY_NOTE:
2041 if (copy_from_user(&n, (void __user *)arg, sizeof(n)))
2042 return -EFAULT;
2043 if (n.voice >= 18)
2044 return -EINVAL;
2045 if (n.voice >= 9) {
2046 regb = n.voice - 9;
2047 io = s->sbbase+2;
2048 } else {
2049 regb = n.voice;
2050 io = s->sbbase;
2051 }
2052 outb(0xa0 + regb, io);
2053 outb(n.fnum & 0xff, io+1);
2054 outb(0xb0 + regb, io);
2055 outb(((n.fnum >> 8) & 3) | ((n.octave & 7) << 2) | ((n.key_on & 1) << 5), io+1);
2056 return 0;
2057
2058 case FM_IOCTL_SET_VOICE:
2059 if (copy_from_user(&v, (void __user *)arg, sizeof(v)))
2060 return -EFAULT;
2061 if (v.voice >= 18)
2062 return -EINVAL;
2063 regb = op_offset[v.voice];
2064 io = s->sbbase + ((v.op & 1) << 1);
2065 outb(0x20 + regb, io);
2066 outb(((v.am & 1) << 7) | ((v.vibrato & 1) << 6) | ((v.do_sustain & 1) << 5) |
2067 ((v.kbd_scale & 1) << 4) | (v.harmonic & 0xf), io+1);
2068 outb(0x40 + regb, io);
2069 outb(((v.scale_level & 0x3) << 6) | (v.volume & 0x3f), io+1);
2070 outb(0x60 + regb, io);
2071 outb(((v.attack & 0xf) << 4) | (v.decay & 0xf), io+1);
2072 outb(0x80 + regb, io);
2073 outb(((v.sustain & 0xf) << 4) | (v.release & 0xf), io+1);
2074 outb(0xe0 + regb, io);
2075 outb(v.waveform & 0x7, io+1);
2076 if (n.voice >= 9) {
2077 regb = n.voice - 9;
2078 io = s->sbbase+2;
2079 } else {
2080 regb = n.voice;
2081 io = s->sbbase;
2082 }
2083 outb(0xc0 + regb, io);
2084 outb(((v.right & 1) << 5) | ((v.left & 1) << 4) | ((v.feedback & 7) << 1) |
2085 (v.connection & 1), io+1);
2086 return 0;
2087
2088 case FM_IOCTL_SET_PARAMS:
2089 if (copy_from_user(&p, (void __user *)arg, sizeof(p)))
2090 return -EFAULT;
2091 outb(0x08, s->sbbase);
2092 outb((p.kbd_split & 1) << 6, s->sbbase+1);
2093 outb(0xbd, s->sbbase);
2094 outb(((p.am_depth & 1) << 7) | ((p.vib_depth & 1) << 6) | ((p.rhythm & 1) << 5) | ((p.bass & 1) << 4) |
2095 ((p.snare & 1) << 3) | ((p.tomtom & 1) << 2) | ((p.cymbal & 1) << 1) | (p.hihat & 1), s->sbbase+1);
2096 return 0;
2097
2098 case FM_IOCTL_SET_OPL:
2099 outb(4, s->sbbase+2);
2100 outb(arg, s->sbbase+3);
2101 return 0;
2102
2103 case FM_IOCTL_SET_MODE:
2104 outb(5, s->sbbase+2);
2105 outb(arg & 1, s->sbbase+3);
2106 return 0;
2107
2108 default:
2109 return -EINVAL;
2110 }
2111}
2112
2113static int solo1_dmfm_open(struct inode *inode, struct file *file)
2114{
2115 unsigned int minor = iminor(inode);
2116 DECLARE_WAITQUEUE(wait, current);
2117 struct solo1_state *s = NULL;
2118 struct pci_dev *pci_dev = NULL;
2119
2120 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
2121 struct pci_driver *drvr;
2122
2123 drvr = pci_dev_driver(pci_dev);
2124 if (drvr != &solo1_driver)
2125 continue;
2126 s = (struct solo1_state*)pci_get_drvdata(pci_dev);
2127 if (!s)
2128 continue;
2129 if (s->dev_dmfm == minor)
2130 break;
2131 }
2132 if (!s)
2133 return -ENODEV;
2134 VALIDATE_STATE(s);
2135 file->private_data = s;
2136 /* wait for device to become free */
2137 mutex_lock(&s->open_mutex);
2138 while (s->open_mode & FMODE_DMFM) {
2139 if (file->f_flags & O_NONBLOCK) {
2140 mutex_unlock(&s->open_mutex);
2141 return -EBUSY;
2142 }
2143 add_wait_queue(&s->open_wait, &wait);
2144 __set_current_state(TASK_INTERRUPTIBLE);
2145 mutex_unlock(&s->open_mutex);
2146 schedule();
2147 remove_wait_queue(&s->open_wait, &wait);
2148 set_current_state(TASK_RUNNING);
2149 if (signal_pending(current))
2150 return -ERESTARTSYS;
2151 mutex_lock(&s->open_mutex);
2152 }
2153 if (!request_region(s->sbbase, FMSYNTH_EXTENT, "ESS Solo1")) {
2154 mutex_unlock(&s->open_mutex);
2155 printk(KERN_ERR "solo1: FM synth io ports in use, opl3 loaded?\n");
2156 return -EBUSY;
2157 }
2158 /* init the stuff */
2159 outb(1, s->sbbase);
2160 outb(0x20, s->sbbase+1); /* enable waveforms */
2161 outb(4, s->sbbase+2);
2162 outb(0, s->sbbase+3); /* no 4op enabled */
2163 outb(5, s->sbbase+2);
2164 outb(1, s->sbbase+3); /* enable OPL3 */
2165 s->open_mode |= FMODE_DMFM;
2166 mutex_unlock(&s->open_mutex);
2167 return nonseekable_open(inode, file);
2168}
2169
2170static int solo1_dmfm_release(struct inode *inode, struct file *file)
2171{
2172 struct solo1_state *s = (struct solo1_state *)file->private_data;
2173 unsigned int regb;
2174
2175 VALIDATE_STATE(s);
2176 lock_kernel();
2177 mutex_lock(&s->open_mutex);
2178 s->open_mode &= ~FMODE_DMFM;
2179 for (regb = 0xb0; regb < 0xb9; regb++) {
2180 outb(regb, s->sbbase);
2181 outb(0, s->sbbase+1);
2182 outb(regb, s->sbbase+2);
2183 outb(0, s->sbbase+3);
2184 }
2185 release_region(s->sbbase, FMSYNTH_EXTENT);
2186 wake_up(&s->open_wait);
2187 mutex_unlock(&s->open_mutex);
2188 unlock_kernel();
2189 return 0;
2190}
2191
2192static /*const*/ struct file_operations solo1_dmfm_fops = {
2193 .owner = THIS_MODULE,
2194 .llseek = no_llseek,
2195 .ioctl = solo1_dmfm_ioctl,
2196 .open = solo1_dmfm_open,
2197 .release = solo1_dmfm_release,
2198};
2199
2200/* --------------------------------------------------------------------- */
2201
2202static struct initvol {
2203 int mixch;
2204 int vol;
2205} initvol[] __devinitdata = {
2206 { SOUND_MIXER_WRITE_VOLUME, 0x4040 },
2207 { SOUND_MIXER_WRITE_PCM, 0x4040 },
2208 { SOUND_MIXER_WRITE_SYNTH, 0x4040 },
2209 { SOUND_MIXER_WRITE_CD, 0x4040 },
2210 { SOUND_MIXER_WRITE_LINE, 0x4040 },
2211 { SOUND_MIXER_WRITE_LINE1, 0x4040 },
2212 { SOUND_MIXER_WRITE_LINE2, 0x4040 },
2213 { SOUND_MIXER_WRITE_RECLEV, 0x4040 },
2214 { SOUND_MIXER_WRITE_SPEAKER, 0x4040 },
2215 { SOUND_MIXER_WRITE_MIC, 0x4040 }
2216};
2217
2218static int setup_solo1(struct solo1_state *s)
2219{
2220 struct pci_dev *pcidev = s->dev;
2221 mm_segment_t fs;
2222 int i, val;
2223
2224 /* initialize DDMA base address */
2225 printk(KERN_DEBUG "solo1: ddma base address: 0x%lx\n", s->ddmabase);
2226 pci_write_config_word(pcidev, 0x60, (s->ddmabase & (~0xf)) | 1);
2227 /* set DMA policy to DDMA, IRQ emulation off (CLKRUN disabled for now) */
2228 pci_write_config_dword(pcidev, 0x50, 0);
2229 /* disable legacy audio address decode */
2230 pci_write_config_word(pcidev, 0x40, 0x907f);
2231
2232 /* initialize the chips */
2233 if (!reset_ctrl(s)) {
2234 printk(KERN_ERR "esssolo1: cannot reset controller\n");
2235 return -1;
2236 }
2237 outb(0xb0, s->iobase+7); /* enable A1, A2, MPU irq's */
2238
2239 /* initialize mixer regs */
2240 write_mixer(s, 0x7f, 0); /* disable music digital recording */
2241 write_mixer(s, 0x7d, 0x0c); /* enable mic preamp, MONO_OUT is 2nd DAC right channel */
2242 write_mixer(s, 0x64, 0x45); /* volume control */
2243 write_mixer(s, 0x48, 0x10); /* enable music DAC/ES6xx interface */
2244 write_mixer(s, 0x50, 0); /* disable spatializer */
2245 write_mixer(s, 0x52, 0);
2246 write_mixer(s, 0x14, 0); /* DAC1 minimum volume */
2247 write_mixer(s, 0x71, 0x20); /* enable new 0xA1 reg format */
2248 outb(0, s->ddmabase+0xd); /* DMA master clear */
2249 outb(1, s->ddmabase+0xf); /* mask channel */
2250 /*outb(0, s->ddmabase+0x8);*/ /* enable controller (enable is low active!!) */
2251
2252 pci_set_master(pcidev); /* enable bus mastering */
2253
2254 fs = get_fs();
2255 set_fs(KERNEL_DS);
2256 val = SOUND_MASK_LINE;
2257 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val);
2258 for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) {
2259 val = initvol[i].vol;
2260 mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val);
2261 }
2262 val = 1; /* enable mic preamp */
2263 mixer_ioctl(s, SOUND_MIXER_PRIVATE1, (unsigned long)&val);
2264 set_fs(fs);
2265 return 0;
2266}
2267
2268static int
2269solo1_suspend(struct pci_dev *pci_dev, pm_message_t state) {
2270 struct solo1_state *s = (struct solo1_state*)pci_get_drvdata(pci_dev);
2271 if (!s)
2272 return 1;
2273 outb(0, s->iobase+6);
2274 /* DMA master clear */
2275 outb(0, s->ddmabase+0xd);
2276 /* reset sequencer and FIFO */
2277 outb(3, s->sbbase+6);
2278 /* turn off DDMA controller address space */
2279 pci_write_config_word(s->dev, 0x60, 0);
2280 return 0;
2281}
2282
2283static int
2284solo1_resume(struct pci_dev *pci_dev) {
2285 struct solo1_state *s = (struct solo1_state*)pci_get_drvdata(pci_dev);
2286 if (!s)
2287 return 1;
2288 setup_solo1(s);
2289 return 0;
2290}
2291
2292#ifdef SUPPORT_JOYSTICK
2293static int __devinit solo1_register_gameport(struct solo1_state *s, int io_port)
2294{
2295 struct gameport *gp;
2296
2297 if (!request_region(io_port, GAMEPORT_EXTENT, "ESS Solo1")) {
2298 printk(KERN_ERR "solo1: gameport io ports are in use\n");
2299 return -EBUSY;
2300 }
2301
2302 s->gameport = gp = gameport_allocate_port();
2303 if (!gp) {
2304 printk(KERN_ERR "solo1: can not allocate memory for gameport\n");
2305 release_region(io_port, GAMEPORT_EXTENT);
2306 return -ENOMEM;
2307 }
2308
2309 gameport_set_name(gp, "ESS Solo1 Gameport");
2310 gameport_set_phys(gp, "isa%04x/gameport0", io_port);
2311 gp->dev.parent = &s->dev->dev;
2312 gp->io = io_port;
2313
2314 gameport_register_port(gp);
2315
2316 return 0;
2317}
2318
2319static inline void solo1_unregister_gameport(struct solo1_state *s)
2320{
2321 if (s->gameport) {
2322 int gpio = s->gameport->io;
2323 gameport_unregister_port(s->gameport);
2324 release_region(gpio, GAMEPORT_EXTENT);
2325 }
2326}
2327#else
2328static inline int solo1_register_gameport(struct solo1_state *s, int io_port) { return -ENOSYS; }
2329static inline void solo1_unregister_gameport(struct solo1_state *s) { }
2330#endif /* SUPPORT_JOYSTICK */
2331
2332static int __devinit solo1_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
2333{
2334 struct solo1_state *s;
2335 int gpio;
2336 int ret;
2337
2338 if ((ret=pci_enable_device(pcidev)))
2339 return ret;
2340 if (!(pci_resource_flags(pcidev, 0) & IORESOURCE_IO) ||
2341 !(pci_resource_flags(pcidev, 1) & IORESOURCE_IO) ||
2342 !(pci_resource_flags(pcidev, 2) & IORESOURCE_IO) ||
2343 !(pci_resource_flags(pcidev, 3) & IORESOURCE_IO))
2344 return -ENODEV;
2345 if (pcidev->irq == 0)
2346 return -ENODEV;
2347
2348 /* Recording requires 24-bit DMA, so attempt to set dma mask
2349 * to 24 bits first, then 32 bits (playback only) if that fails.
2350 */
2351 if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK) &&
2352 pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
2353 printk(KERN_WARNING "solo1: architecture does not support 24bit or 32bit PCI busmaster DMA\n");
2354 return -ENODEV;
2355 }
2356
2357 if (!(s = kmalloc(sizeof(struct solo1_state), GFP_KERNEL))) {
2358 printk(KERN_WARNING "solo1: out of memory\n");
2359 return -ENOMEM;
2360 }
2361 memset(s, 0, sizeof(struct solo1_state));
2362 init_waitqueue_head(&s->dma_adc.wait);
2363 init_waitqueue_head(&s->dma_dac.wait);
2364 init_waitqueue_head(&s->open_wait);
2365 init_waitqueue_head(&s->midi.iwait);
2366 init_waitqueue_head(&s->midi.owait);
2367 mutex_init(&s->open_mutex);
2368 spin_lock_init(&s->lock);
2369 s->magic = SOLO1_MAGIC;
2370 s->dev = pcidev;
2371 s->iobase = pci_resource_start(pcidev, 0);
2372 s->sbbase = pci_resource_start(pcidev, 1);
2373 s->vcbase = pci_resource_start(pcidev, 2);
2374 s->ddmabase = s->vcbase + DDMABASE_OFFSET;
2375 s->mpubase = pci_resource_start(pcidev, 3);
2376 gpio = pci_resource_start(pcidev, 4);
2377 s->irq = pcidev->irq;
2378 ret = -EBUSY;
2379 if (!request_region(s->iobase, IOBASE_EXTENT, "ESS Solo1")) {
2380 printk(KERN_ERR "solo1: io ports in use\n");
2381 goto err_region1;
2382 }
2383 if (!request_region(s->sbbase+FMSYNTH_EXTENT, SBBASE_EXTENT-FMSYNTH_EXTENT, "ESS Solo1")) {
2384 printk(KERN_ERR "solo1: io ports in use\n");
2385 goto err_region2;
2386 }
2387 if (!request_region(s->ddmabase, DDMABASE_EXTENT, "ESS Solo1")) {
2388 printk(KERN_ERR "solo1: io ports in use\n");
2389 goto err_region3;
2390 }
2391 if (!request_region(s->mpubase, MPUBASE_EXTENT, "ESS Solo1")) {
2392 printk(KERN_ERR "solo1: io ports in use\n");
2393 goto err_region4;
2394 }
2395 if ((ret=request_irq(s->irq,solo1_interrupt,IRQF_SHARED,"ESS Solo1",s))) {
2396 printk(KERN_ERR "solo1: irq %u in use\n", s->irq);
2397 goto err_irq;
2398 }
2399 /* register devices */
2400 if ((s->dev_audio = register_sound_dsp(&solo1_audio_fops, -1)) < 0) {
2401 ret = s->dev_audio;
2402 goto err_dev1;
2403 }
2404 if ((s->dev_mixer = register_sound_mixer(&solo1_mixer_fops, -1)) < 0) {
2405 ret = s->dev_mixer;
2406 goto err_dev2;
2407 }
2408 if ((s->dev_midi = register_sound_midi(&solo1_midi_fops, -1)) < 0) {
2409 ret = s->dev_midi;
2410 goto err_dev3;
2411 }
2412 if ((s->dev_dmfm = register_sound_special(&solo1_dmfm_fops, 15 /* ?? */)) < 0) {
2413 ret = s->dev_dmfm;
2414 goto err_dev4;
2415 }
2416 if (setup_solo1(s)) {
2417 ret = -EIO;
2418 goto err;
2419 }
2420 /* register gameport */
2421 solo1_register_gameport(s, gpio);
2422 /* store it in the driver field */
2423 pci_set_drvdata(pcidev, s);
2424 return 0;
2425
2426 err:
2427 unregister_sound_special(s->dev_dmfm);
2428 err_dev4:
2429 unregister_sound_midi(s->dev_midi);
2430 err_dev3:
2431 unregister_sound_mixer(s->dev_mixer);
2432 err_dev2:
2433 unregister_sound_dsp(s->dev_audio);
2434 err_dev1:
2435 printk(KERN_ERR "solo1: initialisation error\n");
2436 free_irq(s->irq, s);
2437 err_irq:
2438 release_region(s->mpubase, MPUBASE_EXTENT);
2439 err_region4:
2440 release_region(s->ddmabase, DDMABASE_EXTENT);
2441 err_region3:
2442 release_region(s->sbbase+FMSYNTH_EXTENT, SBBASE_EXTENT-FMSYNTH_EXTENT);
2443 err_region2:
2444 release_region(s->iobase, IOBASE_EXTENT);
2445 err_region1:
2446 kfree(s);
2447 return ret;
2448}
2449
2450static void __devexit solo1_remove(struct pci_dev *dev)
2451{
2452 struct solo1_state *s = pci_get_drvdata(dev);
2453
2454 if (!s)
2455 return;
2456 /* stop DMA controller */
2457 outb(0, s->iobase+6);
2458 outb(0, s->ddmabase+0xd); /* DMA master clear */
2459 outb(3, s->sbbase+6); /* reset sequencer and FIFO */
2460 synchronize_irq(s->irq);
2461 pci_write_config_word(s->dev, 0x60, 0); /* turn off DDMA controller address space */
2462 free_irq(s->irq, s);
2463 solo1_unregister_gameport(s);
2464 release_region(s->iobase, IOBASE_EXTENT);
2465 release_region(s->sbbase+FMSYNTH_EXTENT, SBBASE_EXTENT-FMSYNTH_EXTENT);
2466 release_region(s->ddmabase, DDMABASE_EXTENT);
2467 release_region(s->mpubase, MPUBASE_EXTENT);
2468 unregister_sound_dsp(s->dev_audio);
2469 unregister_sound_mixer(s->dev_mixer);
2470 unregister_sound_midi(s->dev_midi);
2471 unregister_sound_special(s->dev_dmfm);
2472 kfree(s);
2473 pci_set_drvdata(dev, NULL);
2474}
2475
2476static struct pci_device_id id_table[] = {
2477 { PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_SOLO1, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
2478 { 0, }
2479};
2480
2481MODULE_DEVICE_TABLE(pci, id_table);
2482
2483static struct pci_driver solo1_driver = {
2484 .name = "ESS Solo1",
2485 .id_table = id_table,
2486 .probe = solo1_probe,
2487 .remove = __devexit_p(solo1_remove),
2488 .suspend = solo1_suspend,
2489 .resume = solo1_resume,
2490};
2491
2492
2493static int __init init_solo1(void)
2494{
2495 printk(KERN_INFO "solo1: version v0.20 time " __TIME__ " " __DATE__ "\n");
2496 return pci_register_driver(&solo1_driver);
2497}
2498
2499/* --------------------------------------------------------------------- */
2500
2501MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
2502MODULE_DESCRIPTION("ESS Solo1 Driver");
2503MODULE_LICENSE("GPL");
2504
2505
2506static void __exit cleanup_solo1(void)
2507{
2508 printk(KERN_INFO "solo1: unloading\n");
2509 pci_unregister_driver(&solo1_driver);
2510}
2511
2512/* --------------------------------------------------------------------- */
2513
2514module_init(init_solo1);
2515module_exit(cleanup_solo1);
2516
diff --git a/sound/oss/forte.c b/sound/oss/forte.c
deleted file mode 100644
index 6c910498924a..000000000000
--- a/sound/oss/forte.c
+++ /dev/null
@@ -1,2138 +0,0 @@
1/*
2 * forte.c - ForteMedia FM801 OSS Driver
3 *
4 * Written by Martin K. Petersen <mkp@mkp.net>
5 * Copyright (C) 2002 Hewlett-Packard Company
6 * Portions Copyright (C) 2003 Martin K. Petersen
7 *
8 * Latest version: http://mkp.net/forte/
9 *
10 * Based upon the ALSA FM801 driver by Jaroslav Kysela and OSS drivers
11 * by Thomas Sailer, Alan Cox, Zach Brown, and Jeff Garzik. Thanks
12 * guys!
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License version
16 * 2 as published by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 * USA
27 *
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32
33#include <linux/init.h>
34#include <linux/spinlock.h>
35#include <linux/pci.h>
36
37#include <linux/delay.h>
38#include <linux/poll.h>
39
40#include <linux/sound.h>
41#include <linux/ac97_codec.h>
42#include <linux/interrupt.h>
43
44#include <linux/proc_fs.h>
45#include <linux/mutex.h>
46
47#include <asm/uaccess.h>
48#include <asm/io.h>
49
50#define DRIVER_NAME "forte"
51#define DRIVER_VERSION "$Id: forte.c,v 1.63 2003/03/01 05:32:42 mkp Exp $"
52#define PFX DRIVER_NAME ": "
53
54#undef M_DEBUG
55
56#ifdef M_DEBUG
57#define DPRINTK(args...) printk(KERN_WARNING args)
58#else
59#define DPRINTK(args...)
60#endif
61
62/* Card capabilities */
63#define FORTE_CAPS (DSP_CAP_MMAP | DSP_CAP_TRIGGER)
64
65/* Supported audio formats */
66#define FORTE_FMTS (AFMT_U8 | AFMT_S16_LE)
67
68/* Buffers */
69#define FORTE_MIN_FRAG_SIZE 256
70#define FORTE_MAX_FRAG_SIZE PAGE_SIZE
71#define FORTE_DEF_FRAG_SIZE 256
72#define FORTE_MIN_FRAGMENTS 2
73#define FORTE_MAX_FRAGMENTS 256
74#define FORTE_DEF_FRAGMENTS 2
75#define FORTE_MIN_BUF_MSECS 500
76#define FORTE_MAX_BUF_MSECS 1000
77
78/* PCI BARs */
79#define FORTE_PCM_VOL 0x00 /* PCM Output Volume */
80#define FORTE_FM_VOL 0x02 /* FM Output Volume */
81#define FORTE_I2S_VOL 0x04 /* I2S Volume */
82#define FORTE_REC_SRC 0x06 /* Record Source */
83#define FORTE_PLY_CTRL 0x08 /* Playback Control */
84#define FORTE_PLY_COUNT 0x0a /* Playback Count */
85#define FORTE_PLY_BUF1 0x0c /* Playback Buffer I */
86#define FORTE_PLY_BUF2 0x10 /* Playback Buffer II */
87#define FORTE_CAP_CTRL 0x14 /* Capture Control */
88#define FORTE_CAP_COUNT 0x16 /* Capture Count */
89#define FORTE_CAP_BUF1 0x18 /* Capture Buffer I */
90#define FORTE_CAP_BUF2 0x1c /* Capture Buffer II */
91#define FORTE_CODEC_CTRL 0x22 /* Codec Control */
92#define FORTE_I2S_MODE 0x24 /* I2S Mode Control */
93#define FORTE_VOLUME 0x26 /* Volume Up/Down/Mute Status */
94#define FORTE_I2C_CTRL 0x29 /* I2C Control */
95#define FORTE_AC97_CMD 0x2a /* AC'97 Command */
96#define FORTE_AC97_DATA 0x2c /* AC'97 Data */
97#define FORTE_MPU401_DATA 0x30 /* MPU401 Data */
98#define FORTE_MPU401_CMD 0x31 /* MPU401 Command */
99#define FORTE_GPIO_CTRL 0x52 /* General Purpose I/O Control */
100#define FORTE_GEN_CTRL 0x54 /* General Control */
101#define FORTE_IRQ_MASK 0x56 /* Interrupt Mask */
102#define FORTE_IRQ_STATUS 0x5a /* Interrupt Status */
103#define FORTE_OPL3_BANK0 0x68 /* OPL3 Status Read / Bank 0 Write */
104#define FORTE_OPL3_DATA0 0x69 /* OPL3 Data 0 Write */
105#define FORTE_OPL3_BANK1 0x6a /* OPL3 Bank 1 Write */
106#define FORTE_OPL3_DATA1 0x6b /* OPL3 Bank 1 Write */
107#define FORTE_POWERDOWN 0x70 /* Blocks Power Down Control */
108
109#define FORTE_CAP_OFFSET FORTE_CAP_CTRL - FORTE_PLY_CTRL
110
111#define FORTE_AC97_ADDR_SHIFT 10
112
113/* Playback and record control register bits */
114#define FORTE_BUF1_LAST (1<<1)
115#define FORTE_BUF2_LAST (1<<2)
116#define FORTE_START (1<<5)
117#define FORTE_PAUSE (1<<6)
118#define FORTE_IMMED_STOP (1<<7)
119#define FORTE_RATE_SHIFT 8
120#define FORTE_RATE_MASK (15 << FORTE_RATE_SHIFT)
121#define FORTE_CHANNELS_4 (1<<12) /* Playback only */
122#define FORTE_CHANNELS_6 (2<<12) /* Playback only */
123#define FORTE_CHANNELS_6MS (3<<12) /* Playback only */
124#define FORTE_CHANNELS_MASK (3<<12)
125#define FORTE_16BIT (1<<14)
126#define FORTE_STEREO (1<<15)
127
128/* IRQ status bits */
129#define FORTE_IRQ_PLAYBACK (1<<8)
130#define FORTE_IRQ_CAPTURE (1<<9)
131#define FORTE_IRQ_VOLUME (1<<14)
132#define FORTE_IRQ_MPU (1<<15)
133
134/* CODEC control */
135#define FORTE_CC_CODEC_RESET (1<<5)
136#define FORTE_CC_AC97_RESET (1<<6)
137
138/* AC97 cmd */
139#define FORTE_AC97_WRITE (0<<7)
140#define FORTE_AC97_READ (1<<7)
141#define FORTE_AC97_DP_INVALID (0<<8)
142#define FORTE_AC97_DP_VALID (1<<8)
143#define FORTE_AC97_PORT_RDY (0<<9)
144#define FORTE_AC97_PORT_BSY (1<<9)
145
146
147struct forte_channel {
148 const char *name;
149
150 unsigned short ctrl; /* Ctrl BAR contents */
151 unsigned long iobase; /* Ctrl BAR address */
152
153 wait_queue_head_t wait;
154
155 void *buf; /* Buffer */
156 dma_addr_t buf_handle; /* Buffer handle */
157
158 unsigned int record;
159 unsigned int format;
160 unsigned int rate;
161 unsigned int stereo;
162
163 unsigned int frag_sz; /* Current fragment size */
164 unsigned int frag_num; /* Current # of fragments */
165 unsigned int frag_msecs; /* Milliseconds per frag */
166 unsigned int buf_sz; /* Current buffer size */
167
168 unsigned int hwptr; /* Tail */
169 unsigned int swptr; /* Head */
170 unsigned int filled_frags; /* Fragments currently full */
171 unsigned int next_buf; /* Index of next buffer */
172
173 unsigned int active; /* Channel currently in use */
174 unsigned int mapped; /* mmap */
175
176 unsigned int buf_pages; /* Real size of buffer */
177 unsigned int nr_irqs; /* Number of interrupts */
178 unsigned int bytes; /* Total bytes */
179 unsigned int residue; /* Partial fragment */
180};
181
182
183struct forte_chip {
184 struct pci_dev *pci_dev;
185 unsigned long iobase;
186 int irq;
187
188 struct mutex open_mutex; /* Device access */
189 spinlock_t lock; /* State */
190
191 spinlock_t ac97_lock;
192 struct ac97_codec *ac97;
193
194 int multichannel;
195 int dsp; /* OSS handle */
196 int trigger; /* mmap I/O trigger */
197
198 struct forte_channel play;
199 struct forte_channel rec;
200};
201
202
203static int channels[] = { 2, 4, 6, };
204static int rates[] = { 5500, 8000, 9600, 11025, 16000, 19200,
205 22050, 32000, 38400, 44100, 48000, };
206
207static struct forte_chip *forte;
208static int found;
209
210
211/* AC97 Codec -------------------------------------------------------------- */
212
213
214/**
215 * forte_ac97_wait:
216 * @chip: fm801 instance whose AC97 codec to wait on
217 *
218 * FIXME:
219 * Stop busy-waiting
220 */
221
222static inline int
223forte_ac97_wait (struct forte_chip *chip)
224{
225 int i = 10000;
226
227 while ( (inw (chip->iobase + FORTE_AC97_CMD) & FORTE_AC97_PORT_BSY)
228 && i-- )
229 cpu_relax();
230
231 return i == 0;
232}
233
234
235/**
236 * forte_ac97_read:
237 * @codec: AC97 codec to read from
238 * @reg: register to read
239 */
240
241static u16
242forte_ac97_read (struct ac97_codec *codec, u8 reg)
243{
244 u16 ret = 0;
245 struct forte_chip *chip = codec->private_data;
246
247 spin_lock (&chip->ac97_lock);
248
249 /* Knock, knock */
250 if (forte_ac97_wait (chip)) {
251 printk (KERN_ERR PFX "ac97_read: Serial bus busy\n");
252 goto out;
253 }
254
255 /* Send read command */
256 outw (reg | (1<<7), chip->iobase + FORTE_AC97_CMD);
257
258 if (forte_ac97_wait (chip)) {
259 printk (KERN_ERR PFX "ac97_read: Bus busy reading reg 0x%x\n",
260 reg);
261 goto out;
262 }
263
264 /* Sanity checking */
265 if (inw (chip->iobase + FORTE_AC97_CMD) & FORTE_AC97_DP_INVALID) {
266 printk (KERN_ERR PFX "ac97_read: Invalid data port");
267 goto out;
268 }
269
270 /* Fetch result */
271 ret = inw (chip->iobase + FORTE_AC97_DATA);
272
273 out:
274 spin_unlock (&chip->ac97_lock);
275 return ret;
276}
277
278
279/**
280 * forte_ac97_write:
281 * @codec: AC97 codec to send command to
282 * @reg: register to write
283 * @val: value to write
284 */
285
286static void
287forte_ac97_write (struct ac97_codec *codec, u8 reg, u16 val)
288{
289 struct forte_chip *chip = codec->private_data;
290
291 spin_lock (&chip->ac97_lock);
292
293 /* Knock, knock */
294 if (forte_ac97_wait (chip)) {
295 printk (KERN_ERR PFX "ac97_write: Serial bus busy\n");
296 goto out;
297 }
298
299 outw (val, chip->iobase + FORTE_AC97_DATA);
300 outb (reg | FORTE_AC97_WRITE, chip->iobase + FORTE_AC97_CMD);
301
302 /* Wait for completion */
303 if (forte_ac97_wait (chip)) {
304 printk (KERN_ERR PFX "ac97_write: Bus busy after write\n");
305 goto out;
306 }
307
308 out:
309 spin_unlock (&chip->ac97_lock);
310}
311
312
313/* Mixer ------------------------------------------------------------------- */
314
315
316/**
317 * forte_mixer_open:
318 * @inode:
319 * @file:
320 */
321
322static int
323forte_mixer_open (struct inode *inode, struct file *file)
324{
325 struct forte_chip *chip = forte;
326 file->private_data = chip->ac97;
327 return 0;
328}
329
330
331/**
332 * forte_mixer_release:
333 * @inode:
334 * @file:
335 */
336
337static int
338forte_mixer_release (struct inode *inode, struct file *file)
339{
340 /* We will welease Wodewick */
341 return 0;
342}
343
344
345/**
346 * forte_mixer_ioctl:
347 * @inode:
348 * @file:
349 */
350
351static int
352forte_mixer_ioctl (struct inode *inode, struct file *file,
353 unsigned int cmd, unsigned long arg)
354{
355 struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
356
357 return codec->mixer_ioctl (codec, cmd, arg);
358}
359
360
361static struct file_operations forte_mixer_fops = {
362 .owner = THIS_MODULE,
363 .llseek = no_llseek,
364 .ioctl = forte_mixer_ioctl,
365 .open = forte_mixer_open,
366 .release = forte_mixer_release,
367};
368
369
370/* Channel ----------------------------------------------------------------- */
371
372/**
373 * forte_channel_reset:
374 * @channel: Channel to reset
375 *
376 * Locking: Must be called with lock held.
377 */
378
379static void
380forte_channel_reset (struct forte_channel *channel)
381{
382 if (!channel || !channel->iobase)
383 return;
384
385 DPRINTK ("%s: channel = %s\n", __FUNCTION__, channel->name);
386
387 channel->ctrl &= ~FORTE_START;
388 outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
389
390 /* We always play at least two fragments, hence these defaults */
391 channel->hwptr = channel->frag_sz;
392 channel->next_buf = 1;
393 channel->swptr = 0;
394 channel->filled_frags = 0;
395 channel->active = 0;
396 channel->bytes = 0;
397 channel->nr_irqs = 0;
398 channel->mapped = 0;
399 channel->residue = 0;
400}
401
402
403/**
404 * forte_channel_start:
405 * @channel: Channel to start (record/playback)
406 *
407 * Locking: Must be called with lock held.
408 */
409
410static void inline
411forte_channel_start (struct forte_channel *channel)
412{
413 if (!channel || !channel->iobase || channel->active)
414 return;
415
416 channel->ctrl &= ~(FORTE_PAUSE | FORTE_BUF1_LAST | FORTE_BUF2_LAST
417 | FORTE_IMMED_STOP);
418 channel->ctrl |= FORTE_START;
419 channel->active = 1;
420 outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
421}
422
423
424/**
425 * forte_channel_stop:
426 * @channel: Channel to stop
427 *
428 * Locking: Must be called with lock held.
429 */
430
431static void inline
432forte_channel_stop (struct forte_channel *channel)
433{
434 if (!channel || !channel->iobase)
435 return;
436
437 channel->ctrl &= ~(FORTE_START | FORTE_PAUSE);
438 channel->ctrl |= FORTE_IMMED_STOP;
439
440 channel->active = 0;
441 outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
442}
443
444
445/**
446 * forte_channel_pause:
447 * @channel: Channel to pause
448 *
449 * Locking: Must be called with lock held.
450 */
451
452static void inline
453forte_channel_pause (struct forte_channel *channel)
454{
455 if (!channel || !channel->iobase)
456 return;
457
458 channel->ctrl |= FORTE_PAUSE;
459
460 channel->active = 0;
461 outw (channel->ctrl, channel->iobase + FORTE_PLY_CTRL);
462}
463
464
465/**
466 * forte_channel_rate:
467 * @channel: Channel whose rate to set. Playback and record are
468 * independent.
469 * @rate: Channel rate in Hz
470 *
471 * Locking: Must be called with lock held.
472 */
473
474static int
475forte_channel_rate (struct forte_channel *channel, unsigned int rate)
476{
477 int new_rate;
478
479 if (!channel || !channel->iobase)
480 return -EINVAL;
481
482 /* The FM801 only supports a handful of fixed frequencies.
483 * We find the value closest to what userland requested.
484 */
485 if (rate <= 6250) { rate = 5500; new_rate = 0; }
486 else if (rate <= 8800) { rate = 8000; new_rate = 1; }
487 else if (rate <= 10312) { rate = 9600; new_rate = 2; }
488 else if (rate <= 13512) { rate = 11025; new_rate = 3; }
489 else if (rate <= 17600) { rate = 16000; new_rate = 4; }
490 else if (rate <= 20625) { rate = 19200; new_rate = 5; }
491 else if (rate <= 27025) { rate = 22050; new_rate = 6; }
492 else if (rate <= 35200) { rate = 32000; new_rate = 7; }
493 else if (rate <= 41250) { rate = 38400; new_rate = 8; }
494 else if (rate <= 46050) { rate = 44100; new_rate = 9; }
495 else { rate = 48000; new_rate = 10; }
496
497 channel->ctrl &= ~FORTE_RATE_MASK;
498 channel->ctrl |= new_rate << FORTE_RATE_SHIFT;
499 channel->rate = rate;
500
501 DPRINTK ("%s: %s rate = %d\n", __FUNCTION__, channel->name, rate);
502
503 return rate;
504}
505
506
507/**
508 * forte_channel_format:
509 * @channel: Channel whose audio format to set
510 * @format: OSS format ID
511 *
512 * Locking: Must be called with lock held.
513 */
514
515static int
516forte_channel_format (struct forte_channel *channel, int format)
517{
518 if (!channel || !channel->iobase)
519 return -EINVAL;
520
521 switch (format) {
522
523 case AFMT_QUERY:
524 break;
525
526 case AFMT_U8:
527 channel->ctrl &= ~FORTE_16BIT;
528 channel->format = AFMT_U8;
529 break;
530
531 case AFMT_S16_LE:
532 default:
533 channel->ctrl |= FORTE_16BIT;
534 channel->format = AFMT_S16_LE;
535 break;
536 }
537
538 DPRINTK ("%s: %s want %d format, got %d\n", __FUNCTION__, channel->name,
539 format, channel->format);
540
541 return channel->format;
542}
543
544
545/**
546 * forte_channel_stereo:
547 * @channel: Channel to toggle
548 * @stereo: 0 for Mono, 1 for Stereo
549 *
550 * Locking: Must be called with lock held.
551 */
552
553static int
554forte_channel_stereo (struct forte_channel *channel, unsigned int stereo)
555{
556 int ret;
557
558 if (!channel || !channel->iobase)
559 return -EINVAL;
560
561 DPRINTK ("%s: %s stereo = %d\n", __FUNCTION__, channel->name, stereo);
562
563 switch (stereo) {
564
565 case 0:
566 channel->ctrl &= ~(FORTE_STEREO | FORTE_CHANNELS_MASK);
567 channel-> stereo = stereo;
568 ret = stereo;
569 break;
570
571 case 1:
572 channel->ctrl &= ~FORTE_CHANNELS_MASK;
573 channel->ctrl |= FORTE_STEREO;
574 channel-> stereo = stereo;
575 ret = stereo;
576 break;
577
578 default:
579 DPRINTK ("Unsupported channel format");
580 ret = -EINVAL;
581 break;
582 }
583
584 return ret;
585}
586
587
588/**
589 * forte_channel_buffer:
590 * @channel: Channel whose buffer to set up
591 *
592 * Locking: Must be called with lock held.
593 */
594
595static void
596forte_channel_buffer (struct forte_channel *channel, int sz, int num)
597{
598 unsigned int msecs, shift;
599
600 /* Go away, I'm busy */
601 if (channel->filled_frags || channel->bytes)
602 return;
603
604 /* Fragment size must be a power of 2 */
605 shift = 0; sz++;
606 while (sz >>= 1)
607 shift++;
608 channel->frag_sz = 1 << shift;
609
610 /* Round fragment size to something reasonable */
611 if (channel->frag_sz < FORTE_MIN_FRAG_SIZE)
612 channel->frag_sz = FORTE_MIN_FRAG_SIZE;
613
614 if (channel->frag_sz > FORTE_MAX_FRAG_SIZE)
615 channel->frag_sz = FORTE_MAX_FRAG_SIZE;
616
617 /* Find fragment length in milliseconds */
618 msecs = channel->frag_sz /
619 (channel->format == AFMT_S16_LE ? 2 : 1) /
620 (channel->stereo ? 2 : 1) /
621 (channel->rate / 1000);
622
623 channel->frag_msecs = msecs;
624
625 /* Pick a suitable number of fragments */
626 if (msecs * num < FORTE_MIN_BUF_MSECS)
627 num = FORTE_MIN_BUF_MSECS / msecs;
628
629 if (msecs * num > FORTE_MAX_BUF_MSECS)
630 num = FORTE_MAX_BUF_MSECS / msecs;
631
632 /* Fragment number must be a power of 2 */
633 shift = 0;
634 while (num >>= 1)
635 shift++;
636 channel->frag_num = 1 << (shift + 1);
637
638 /* Round fragment number to something reasonable */
639 if (channel->frag_num < FORTE_MIN_FRAGMENTS)
640 channel->frag_num = FORTE_MIN_FRAGMENTS;
641
642 if (channel->frag_num > FORTE_MAX_FRAGMENTS)
643 channel->frag_num = FORTE_MAX_FRAGMENTS;
644
645 channel->buf_sz = channel->frag_sz * channel->frag_num;
646
647 DPRINTK ("%s: %s frag_sz = %d, frag_num = %d, buf_sz = %d\n",
648 __FUNCTION__, channel->name, channel->frag_sz,
649 channel->frag_num, channel->buf_sz);
650}
651
652
653/**
654 * forte_channel_prep:
655 * @channel: Channel whose buffer to prepare
656 *
657 * Locking: Lock held.
658 */
659
660static void
661forte_channel_prep (struct forte_channel *channel)
662{
663 struct page *page;
664 int i;
665
666 if (channel->buf)
667 return;
668
669 forte_channel_buffer (channel, channel->frag_sz, channel->frag_num);
670 channel->buf_pages = channel->buf_sz >> PAGE_SHIFT;
671
672 if (channel->buf_sz % PAGE_SIZE)
673 channel->buf_pages++;
674
675 DPRINTK ("%s: %s frag_sz = %d, frag_num = %d, buf_sz = %d, pg = %d\n",
676 __FUNCTION__, channel->name, channel->frag_sz,
677 channel->frag_num, channel->buf_sz, channel->buf_pages);
678
679 /* DMA buffer */
680 channel->buf = pci_alloc_consistent (forte->pci_dev,
681 channel->buf_pages * PAGE_SIZE,
682 &channel->buf_handle);
683
684 if (!channel->buf || !channel->buf_handle)
685 BUG();
686
687 page = virt_to_page (channel->buf);
688
689 /* FIXME: can this go away ? */
690 for (i = 0 ; i < channel->buf_pages ; i++)
691 SetPageReserved(page++);
692
693 /* Prep buffer registers */
694 outw (channel->frag_sz - 1, channel->iobase + FORTE_PLY_COUNT);
695 outl (channel->buf_handle, channel->iobase + FORTE_PLY_BUF1);
696 outl (channel->buf_handle + channel->frag_sz,
697 channel->iobase + FORTE_PLY_BUF2);
698
699 /* Reset hwptr */
700 channel->hwptr = channel->frag_sz;
701 channel->next_buf = 1;
702
703 DPRINTK ("%s: %s buffer @ %p (%p)\n", __FUNCTION__, channel->name,
704 channel->buf, channel->buf_handle);
705}
706
707
708/**
709 * forte_channel_drain:
710 * @chip:
711 * @channel:
712 *
713 * Locking: Don't hold the lock.
714 */
715
716static inline int
717forte_channel_drain (struct forte_channel *channel)
718{
719 DECLARE_WAITQUEUE (wait, current);
720 unsigned long flags;
721
722 DPRINTK ("%s\n", __FUNCTION__);
723
724 if (channel->mapped) {
725 spin_lock_irqsave (&forte->lock, flags);
726 forte_channel_stop (channel);
727 spin_unlock_irqrestore (&forte->lock, flags);
728 return 0;
729 }
730
731 spin_lock_irqsave (&forte->lock, flags);
732 add_wait_queue (&channel->wait, &wait);
733
734 for (;;) {
735 if (channel->active == 0 || channel->filled_frags == 1)
736 break;
737
738 spin_unlock_irqrestore (&forte->lock, flags);
739
740 __set_current_state (TASK_INTERRUPTIBLE);
741 schedule();
742
743 spin_lock_irqsave (&forte->lock, flags);
744 }
745
746 forte_channel_stop (channel);
747 forte_channel_reset (channel);
748 set_current_state (TASK_RUNNING);
749 remove_wait_queue (&channel->wait, &wait);
750 spin_unlock_irqrestore (&forte->lock, flags);
751
752 return 0;
753}
754
755
756/**
757 * forte_channel_init:
758 * @chip: Forte chip instance the channel hangs off
759 * @channel: Channel to initialize
760 *
761 * Description:
762 * Initializes a channel, sets defaults, and allocates
763 * buffers.
764 *
765 * Locking: No lock held.
766 */
767
768static int
769forte_channel_init (struct forte_chip *chip, struct forte_channel *channel)
770{
771 DPRINTK ("%s: chip iobase @ %p\n", __FUNCTION__, (void *)chip->iobase);
772
773 spin_lock_irq (&chip->lock);
774 memset (channel, 0x0, sizeof (*channel));
775
776 if (channel == &chip->play) {
777 channel->name = "PCM_OUT";
778 channel->iobase = chip->iobase;
779 DPRINTK ("%s: PCM-OUT iobase @ %p\n", __FUNCTION__,
780 (void *) channel->iobase);
781 }
782 else if (channel == &chip->rec) {
783 channel->name = "PCM_IN";
784 channel->iobase = chip->iobase + FORTE_CAP_OFFSET;
785 channel->record = 1;
786 DPRINTK ("%s: PCM-IN iobase @ %p\n", __FUNCTION__,
787 (void *) channel->iobase);
788 }
789 else
790 BUG();
791
792 init_waitqueue_head (&channel->wait);
793
794 /* Defaults: 48kHz, 16-bit, stereo */
795 channel->ctrl = inw (channel->iobase + FORTE_PLY_CTRL);
796 forte_channel_reset (channel);
797 forte_channel_stereo (channel, 1);
798 forte_channel_format (channel, AFMT_S16_LE);
799 forte_channel_rate (channel, 48000);
800 channel->frag_sz = FORTE_DEF_FRAG_SIZE;
801 channel->frag_num = FORTE_DEF_FRAGMENTS;
802
803 chip->trigger = 0;
804 spin_unlock_irq (&chip->lock);
805
806 return 0;
807}
808
809
810/**
811 * forte_channel_free:
812 * @chip: Chip this channel hangs off
813 * @channel: Channel to nuke
814 *
815 * Description:
816 * Resets channel and frees buffers.
817 *
818 * Locking: Hold your horses.
819 */
820
821static void
822forte_channel_free (struct forte_chip *chip, struct forte_channel *channel)
823{
824 DPRINTK ("%s: %s\n", __FUNCTION__, channel->name);
825
826 if (!channel->buf_handle)
827 return;
828
829 pci_free_consistent (chip->pci_dev, channel->buf_pages * PAGE_SIZE,
830 channel->buf, channel->buf_handle);
831
832 memset (channel, 0x0, sizeof (*channel));
833}
834
835
836/* DSP --------------------------------------------------------------------- */
837
838
839/**
840 * forte_dsp_ioctl:
841 */
842
843static int
844forte_dsp_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
845 unsigned long arg)
846{
847 int ival=0, ret, rval=0, rd, wr, count;
848 struct forte_chip *chip;
849 struct audio_buf_info abi;
850 struct count_info cinfo;
851 void __user *argp = (void __user *)arg;
852 int __user *p = argp;
853
854 chip = file->private_data;
855
856 if (file->f_mode & FMODE_WRITE)
857 wr = 1;
858 else
859 wr = 0;
860
861 if (file->f_mode & FMODE_READ)
862 rd = 1;
863 else
864 rd = 0;
865
866 switch (cmd) {
867
868 case OSS_GETVERSION:
869 return put_user (SOUND_VERSION, p);
870
871 case SNDCTL_DSP_GETCAPS:
872 DPRINTK ("%s: GETCAPS\n", __FUNCTION__);
873
874 ival = FORTE_CAPS; /* DUPLEX */
875 return put_user (ival, p);
876
877 case SNDCTL_DSP_GETFMTS:
878 DPRINTK ("%s: GETFMTS\n", __FUNCTION__);
879
880 ival = FORTE_FMTS; /* U8, 16LE */
881 return put_user (ival, p);
882
883 case SNDCTL_DSP_SETFMT: /* U8, 16LE */
884 DPRINTK ("%s: SETFMT\n", __FUNCTION__);
885
886 if (get_user (ival, p))
887 return -EFAULT;
888
889 spin_lock_irq (&chip->lock);
890
891 if (rd) {
892 forte_channel_stop (&chip->rec);
893 rval = forte_channel_format (&chip->rec, ival);
894 }
895
896 if (wr) {
897 forte_channel_stop (&chip->rec);
898 rval = forte_channel_format (&chip->play, ival);
899 }
900
901 spin_unlock_irq (&chip->lock);
902
903 return put_user (rval, p);
904
905 case SNDCTL_DSP_STEREO: /* 0 - mono, 1 - stereo */
906 DPRINTK ("%s: STEREO\n", __FUNCTION__);
907
908 if (get_user (ival, p))
909 return -EFAULT;
910
911 spin_lock_irq (&chip->lock);
912
913 if (rd) {
914 forte_channel_stop (&chip->rec);
915 rval = forte_channel_stereo (&chip->rec, ival);
916 }
917
918 if (wr) {
919 forte_channel_stop (&chip->rec);
920 rval = forte_channel_stereo (&chip->play, ival);
921 }
922
923 spin_unlock_irq (&chip->lock);
924
925 return put_user (rval, p);
926
927 case SNDCTL_DSP_CHANNELS: /* 1 - mono, 2 - stereo */
928 DPRINTK ("%s: CHANNELS\n", __FUNCTION__);
929
930 if (get_user (ival, p))
931 return -EFAULT;
932
933 spin_lock_irq (&chip->lock);
934
935 if (rd) {
936 forte_channel_stop (&chip->rec);
937 rval = forte_channel_stereo (&chip->rec, ival-1) + 1;
938 }
939
940 if (wr) {
941 forte_channel_stop (&chip->play);
942 rval = forte_channel_stereo (&chip->play, ival-1) + 1;
943 }
944
945 spin_unlock_irq (&chip->lock);
946
947 return put_user (rval, p);
948
949 case SNDCTL_DSP_SPEED:
950 DPRINTK ("%s: SPEED\n", __FUNCTION__);
951
952 if (get_user (ival, p))
953 return -EFAULT;
954
955 spin_lock_irq (&chip->lock);
956
957 if (rd) {
958 forte_channel_stop (&chip->rec);
959 rval = forte_channel_rate (&chip->rec, ival);
960 }
961
962 if (wr) {
963 forte_channel_stop (&chip->play);
964 rval = forte_channel_rate (&chip->play, ival);
965 }
966
967 spin_unlock_irq (&chip->lock);
968
969 return put_user(rval, p);
970
971 case SNDCTL_DSP_GETBLKSIZE:
972 DPRINTK ("%s: GETBLKSIZE\n", __FUNCTION__);
973
974 spin_lock_irq (&chip->lock);
975
976 if (rd)
977 ival = chip->rec.frag_sz;
978
979 if (wr)
980 ival = chip->play.frag_sz;
981
982 spin_unlock_irq (&chip->lock);
983
984 return put_user (ival, p);
985
986 case SNDCTL_DSP_RESET:
987 DPRINTK ("%s: RESET\n", __FUNCTION__);
988
989 spin_lock_irq (&chip->lock);
990
991 if (rd)
992 forte_channel_reset (&chip->rec);
993
994 if (wr)
995 forte_channel_reset (&chip->play);
996
997 spin_unlock_irq (&chip->lock);
998
999 return 0;
1000
1001 case SNDCTL_DSP_SYNC:
1002 DPRINTK ("%s: SYNC\n", __FUNCTION__);
1003
1004 if (wr)
1005 ret = forte_channel_drain (&chip->play);
1006
1007 return 0;
1008
1009 case SNDCTL_DSP_POST:
1010 DPRINTK ("%s: POST\n", __FUNCTION__);
1011
1012 if (wr) {
1013 spin_lock_irq (&chip->lock);
1014
1015 if (chip->play.filled_frags)
1016 forte_channel_start (&chip->play);
1017
1018 spin_unlock_irq (&chip->lock);
1019 }
1020
1021 return 0;
1022
1023 case SNDCTL_DSP_SETFRAGMENT:
1024 DPRINTK ("%s: SETFRAGMENT\n", __FUNCTION__);
1025
1026 if (get_user (ival, p))
1027 return -EFAULT;
1028
1029 spin_lock_irq (&chip->lock);
1030
1031 if (rd) {
1032 forte_channel_buffer (&chip->rec, ival & 0xffff,
1033 (ival >> 16) & 0xffff);
1034 ival = (chip->rec.frag_num << 16) + chip->rec.frag_sz;
1035 }
1036
1037 if (wr) {
1038 forte_channel_buffer (&chip->play, ival & 0xffff,
1039 (ival >> 16) & 0xffff);
1040 ival = (chip->play.frag_num << 16) +chip->play.frag_sz;
1041 }
1042
1043 spin_unlock_irq (&chip->lock);
1044
1045 return put_user (ival, p);
1046
1047 case SNDCTL_DSP_GETISPACE:
1048 DPRINTK ("%s: GETISPACE\n", __FUNCTION__);
1049
1050 if (!rd)
1051 return -EINVAL;
1052
1053 spin_lock_irq (&chip->lock);
1054
1055 abi.fragstotal = chip->rec.frag_num;
1056 abi.fragsize = chip->rec.frag_sz;
1057
1058 if (chip->rec.mapped) {
1059 abi.fragments = chip->rec.frag_num - 2;
1060 abi.bytes = abi.fragments * abi.fragsize;
1061 }
1062 else {
1063 abi.fragments = chip->rec.filled_frags;
1064 abi.bytes = abi.fragments * abi.fragsize;
1065 }
1066
1067 spin_unlock_irq (&chip->lock);
1068
1069 return copy_to_user (argp, &abi, sizeof (abi)) ? -EFAULT : 0;
1070
1071 case SNDCTL_DSP_GETIPTR:
1072 DPRINTK ("%s: GETIPTR\n", __FUNCTION__);
1073
1074 if (!rd)
1075 return -EINVAL;
1076
1077 spin_lock_irq (&chip->lock);
1078
1079 if (chip->rec.active)
1080 cinfo.ptr = chip->rec.hwptr;
1081 else
1082 cinfo.ptr = 0;
1083
1084 cinfo.bytes = chip->rec.bytes;
1085 cinfo.blocks = chip->rec.nr_irqs;
1086 chip->rec.nr_irqs = 0;
1087
1088 spin_unlock_irq (&chip->lock);
1089
1090 return copy_to_user (argp, &cinfo, sizeof (cinfo)) ? -EFAULT : 0;
1091
1092 case SNDCTL_DSP_GETOSPACE:
1093 if (!wr)
1094 return -EINVAL;
1095
1096 spin_lock_irq (&chip->lock);
1097
1098 abi.fragstotal = chip->play.frag_num;
1099 abi.fragsize = chip->play.frag_sz;
1100
1101 if (chip->play.mapped) {
1102 abi.fragments = chip->play.frag_num - 2;
1103 abi.bytes = chip->play.buf_sz;
1104 }
1105 else {
1106 abi.fragments = chip->play.frag_num -
1107 chip->play.filled_frags;
1108
1109 if (chip->play.residue)
1110 abi.fragments--;
1111
1112 abi.bytes = abi.fragments * abi.fragsize +
1113 chip->play.residue;
1114 }
1115
1116 spin_unlock_irq (&chip->lock);
1117
1118 return copy_to_user (argp, &abi, sizeof (abi)) ? -EFAULT : 0;
1119
1120 case SNDCTL_DSP_GETOPTR:
1121 if (!wr)
1122 return -EINVAL;
1123
1124 spin_lock_irq (&chip->lock);
1125
1126 if (chip->play.active)
1127 cinfo.ptr = chip->play.hwptr;
1128 else
1129 cinfo.ptr = 0;
1130
1131 cinfo.bytes = chip->play.bytes;
1132 cinfo.blocks = chip->play.nr_irqs;
1133 chip->play.nr_irqs = 0;
1134
1135 spin_unlock_irq (&chip->lock);
1136
1137 return copy_to_user (argp, &cinfo, sizeof (cinfo)) ? -EFAULT : 0;
1138
1139 case SNDCTL_DSP_GETODELAY:
1140 if (!wr)
1141 return -EINVAL;
1142
1143 spin_lock_irq (&chip->lock);
1144
1145 if (!chip->play.active) {
1146 ival = 0;
1147 }
1148 else if (chip->play.mapped) {
1149 count = inw (chip->play.iobase + FORTE_PLY_COUNT) + 1;
1150 ival = chip->play.frag_sz - count;
1151 }
1152 else {
1153 ival = chip->play.filled_frags * chip->play.frag_sz;
1154
1155 if (chip->play.residue)
1156 ival += chip->play.frag_sz - chip->play.residue;
1157 }
1158
1159 spin_unlock_irq (&chip->lock);
1160
1161 return put_user (ival, p);
1162
1163 case SNDCTL_DSP_SETDUPLEX:
1164 DPRINTK ("%s: SETDUPLEX\n", __FUNCTION__);
1165
1166 return -EINVAL;
1167
1168 case SNDCTL_DSP_GETTRIGGER:
1169 DPRINTK ("%s: GETTRIGGER\n", __FUNCTION__);
1170
1171 return put_user (chip->trigger, p);
1172
1173 case SNDCTL_DSP_SETTRIGGER:
1174
1175 if (get_user (ival, p))
1176 return -EFAULT;
1177
1178 DPRINTK ("%s: SETTRIGGER %d\n", __FUNCTION__, ival);
1179
1180 if (wr) {
1181 spin_lock_irq (&chip->lock);
1182
1183 if (ival & PCM_ENABLE_OUTPUT)
1184 forte_channel_start (&chip->play);
1185 else {
1186 chip->trigger = 1;
1187 forte_channel_prep (&chip->play);
1188 forte_channel_stop (&chip->play);
1189 }
1190
1191 spin_unlock_irq (&chip->lock);
1192 }
1193 else if (rd) {
1194 spin_lock_irq (&chip->lock);
1195
1196 if (ival & PCM_ENABLE_INPUT)
1197 forte_channel_start (&chip->rec);
1198 else {
1199 chip->trigger = 1;
1200 forte_channel_prep (&chip->rec);
1201 forte_channel_stop (&chip->rec);
1202 }
1203
1204 spin_unlock_irq (&chip->lock);
1205 }
1206
1207 return 0;
1208
1209 case SOUND_PCM_READ_RATE:
1210 DPRINTK ("%s: PCM_READ_RATE\n", __FUNCTION__);
1211 return put_user (chip->play.rate, p);
1212
1213 case SOUND_PCM_READ_CHANNELS:
1214 DPRINTK ("%s: PCM_READ_CHANNELS\n", __FUNCTION__);
1215 return put_user (chip->play.stereo, p);
1216
1217 case SOUND_PCM_READ_BITS:
1218 DPRINTK ("%s: PCM_READ_BITS\n", __FUNCTION__);
1219 return put_user (chip->play.format, p);
1220
1221 case SNDCTL_DSP_NONBLOCK:
1222 DPRINTK ("%s: DSP_NONBLOCK\n", __FUNCTION__);
1223 file->f_flags |= O_NONBLOCK;
1224 return 0;
1225
1226 default:
1227 DPRINTK ("Unsupported ioctl: %x (%p)\n", cmd, argp);
1228 break;
1229 }
1230
1231 return -EINVAL;
1232}
1233
1234
1235/**
1236 * forte_dsp_open:
1237 */
1238
1239static int
1240forte_dsp_open (struct inode *inode, struct file *file)
1241{
1242 struct forte_chip *chip = forte; /* FIXME: HACK FROM HELL! */
1243
1244 if (file->f_flags & O_NONBLOCK) {
1245 if (!mutex_trylock(&chip->open_mutex)) {
1246 DPRINTK ("%s: returning -EAGAIN\n", __FUNCTION__);
1247 return -EAGAIN;
1248 }
1249 }
1250 else {
1251 if (mutex_lock_interruptible(&chip->open_mutex)) {
1252 DPRINTK ("%s: returning -ERESTARTSYS\n", __FUNCTION__);
1253 return -ERESTARTSYS;
1254 }
1255 }
1256
1257 file->private_data = forte;
1258
1259 DPRINTK ("%s: dsp opened by %d\n", __FUNCTION__, current->pid);
1260
1261 if (file->f_mode & FMODE_WRITE)
1262 forte_channel_init (forte, &forte->play);
1263
1264 if (file->f_mode & FMODE_READ)
1265 forte_channel_init (forte, &forte->rec);
1266
1267 return nonseekable_open(inode, file);
1268}
1269
1270
1271/**
1272 * forte_dsp_release:
1273 */
1274
1275static int
1276forte_dsp_release (struct inode *inode, struct file *file)
1277{
1278 struct forte_chip *chip = file->private_data;
1279 int ret = 0;
1280
1281 DPRINTK ("%s: chip @ %p\n", __FUNCTION__, chip);
1282
1283 if (file->f_mode & FMODE_WRITE) {
1284 forte_channel_drain (&chip->play);
1285
1286 spin_lock_irq (&chip->lock);
1287
1288 forte_channel_free (chip, &chip->play);
1289
1290 spin_unlock_irq (&chip->lock);
1291 }
1292
1293 if (file->f_mode & FMODE_READ) {
1294 while (chip->rec.filled_frags > 0)
1295 interruptible_sleep_on (&chip->rec.wait);
1296
1297 spin_lock_irq (&chip->lock);
1298
1299 forte_channel_stop (&chip->rec);
1300 forte_channel_free (chip, &chip->rec);
1301
1302 spin_unlock_irq (&chip->lock);
1303 }
1304
1305 mutex_unlock(&chip->open_mutex);
1306
1307 return ret;
1308}
1309
1310
1311/**
1312 * forte_dsp_poll:
1313 *
1314 */
1315
1316static unsigned int
1317forte_dsp_poll (struct file *file, struct poll_table_struct *wait)
1318{
1319 struct forte_chip *chip;
1320 struct forte_channel *channel;
1321 unsigned int mask = 0;
1322
1323 chip = file->private_data;
1324
1325 if (file->f_mode & FMODE_WRITE) {
1326 channel = &chip->play;
1327
1328 if (channel->active)
1329 poll_wait (file, &channel->wait, wait);
1330
1331 spin_lock_irq (&chip->lock);
1332
1333 if (channel->frag_num - channel->filled_frags > 0)
1334 mask |= POLLOUT | POLLWRNORM;
1335
1336 spin_unlock_irq (&chip->lock);
1337 }
1338
1339 if (file->f_mode & FMODE_READ) {
1340 channel = &chip->rec;
1341
1342 if (channel->active)
1343 poll_wait (file, &channel->wait, wait);
1344
1345 spin_lock_irq (&chip->lock);
1346
1347 if (channel->filled_frags > 0)
1348 mask |= POLLIN | POLLRDNORM;
1349
1350 spin_unlock_irq (&chip->lock);
1351 }
1352
1353 return mask;
1354}
1355
1356
1357/**
1358 * forte_dsp_mmap:
1359 */
1360
1361static int
1362forte_dsp_mmap (struct file *file, struct vm_area_struct *vma)
1363{
1364 struct forte_chip *chip;
1365 struct forte_channel *channel;
1366 unsigned long size;
1367 int ret;
1368
1369 chip = file->private_data;
1370
1371 DPRINTK ("%s: start %lXh, size %ld, pgoff %ld\n", __FUNCTION__,
1372 vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_pgoff);
1373
1374 spin_lock_irq (&chip->lock);
1375
1376 if (vma->vm_flags & VM_WRITE && chip->play.active) {
1377 ret = -EBUSY;
1378 goto out;
1379 }
1380
1381 if (vma->vm_flags & VM_READ && chip->rec.active) {
1382 ret = -EBUSY;
1383 goto out;
1384 }
1385
1386 if (file->f_mode & FMODE_WRITE)
1387 channel = &chip->play;
1388 else if (file->f_mode & FMODE_READ)
1389 channel = &chip->rec;
1390 else {
1391 ret = -EINVAL;
1392 goto out;
1393 }
1394
1395 forte_channel_prep (channel);
1396 channel->mapped = 1;
1397
1398 if (vma->vm_pgoff != 0) {
1399 ret = -EINVAL;
1400 goto out;
1401 }
1402
1403 size = vma->vm_end - vma->vm_start;
1404
1405 if (size > channel->buf_pages * PAGE_SIZE) {
1406 DPRINTK ("%s: size (%ld) > buf_sz (%d) \n", __FUNCTION__,
1407 size, channel->buf_sz);
1408 ret = -EINVAL;
1409 goto out;
1410 }
1411
1412 if (remap_pfn_range(vma, vma->vm_start,
1413 virt_to_phys(channel->buf) >> PAGE_SHIFT,
1414 size, vma->vm_page_prot)) {
1415 DPRINTK ("%s: remap el a no worko\n", __FUNCTION__);
1416 ret = -EAGAIN;
1417 goto out;
1418 }
1419
1420 ret = 0;
1421
1422 out:
1423 spin_unlock_irq (&chip->lock);
1424 return ret;
1425}
1426
1427
1428/**
1429 * forte_dsp_write:
1430 */
1431
1432static ssize_t
1433forte_dsp_write (struct file *file, const char __user *buffer, size_t bytes,
1434 loff_t *ppos)
1435{
1436 struct forte_chip *chip;
1437 struct forte_channel *channel;
1438 unsigned int i = bytes, sz = 0;
1439 unsigned long flags;
1440
1441 if (!access_ok (VERIFY_READ, buffer, bytes))
1442 return -EFAULT;
1443
1444 chip = (struct forte_chip *) file->private_data;
1445
1446 if (!chip)
1447 BUG();
1448
1449 channel = &chip->play;
1450
1451 if (!channel)
1452 BUG();
1453
1454 spin_lock_irqsave (&chip->lock, flags);
1455
1456 /* Set up buffers with the right fragment size */
1457 forte_channel_prep (channel);
1458
1459 while (i) {
1460 /* All fragment buffers in use -> wait */
1461 if (channel->frag_num - channel->filled_frags == 0) {
1462 DECLARE_WAITQUEUE (wait, current);
1463
1464 /* For trigger or non-blocking operation, get out */
1465 if (chip->trigger || file->f_flags & O_NONBLOCK) {
1466 spin_unlock_irqrestore (&chip->lock, flags);
1467 return -EAGAIN;
1468 }
1469
1470 /* Otherwise wait for buffers */
1471 add_wait_queue (&channel->wait, &wait);
1472
1473 for (;;) {
1474 spin_unlock_irqrestore (&chip->lock, flags);
1475
1476 set_current_state (TASK_INTERRUPTIBLE);
1477 schedule();
1478
1479 spin_lock_irqsave (&chip->lock, flags);
1480
1481 if (channel->frag_num - channel->filled_frags)
1482 break;
1483 }
1484
1485 remove_wait_queue (&channel->wait, &wait);
1486 set_current_state (TASK_RUNNING);
1487
1488 if (signal_pending (current)) {
1489 spin_unlock_irqrestore (&chip->lock, flags);
1490 return -ERESTARTSYS;
1491 }
1492 }
1493
1494 if (channel->residue)
1495 sz = channel->residue;
1496 else if (i > channel->frag_sz)
1497 sz = channel->frag_sz;
1498 else
1499 sz = i;
1500
1501 spin_unlock_irqrestore (&chip->lock, flags);
1502
1503 if (copy_from_user ((void *) channel->buf + channel->swptr, buffer, sz))
1504 return -EFAULT;
1505
1506 spin_lock_irqsave (&chip->lock, flags);
1507
1508 /* Advance software pointer */
1509 buffer += sz;
1510 channel->swptr += sz;
1511 channel->swptr %= channel->buf_sz;
1512 i -= sz;
1513
1514 /* Only bump filled_frags if a full fragment has been written */
1515 if (channel->swptr % channel->frag_sz == 0) {
1516 channel->filled_frags++;
1517 channel->residue = 0;
1518 }
1519 else
1520 channel->residue = channel->frag_sz - sz;
1521
1522 /* If playback isn't active, start it */
1523 if (channel->active == 0 && chip->trigger == 0)
1524 forte_channel_start (channel);
1525 }
1526
1527 spin_unlock_irqrestore (&chip->lock, flags);
1528
1529 return bytes - i;
1530}
1531
1532
1533/**
1534 * forte_dsp_read:
1535 */
1536
1537static ssize_t
1538forte_dsp_read (struct file *file, char __user *buffer, size_t bytes,
1539 loff_t *ppos)
1540{
1541 struct forte_chip *chip;
1542 struct forte_channel *channel;
1543 unsigned int i = bytes, sz;
1544 unsigned long flags;
1545
1546 if (!access_ok (VERIFY_WRITE, buffer, bytes))
1547 return -EFAULT;
1548
1549 chip = (struct forte_chip *) file->private_data;
1550
1551 if (!chip)
1552 BUG();
1553
1554 channel = &chip->rec;
1555
1556 if (!channel)
1557 BUG();
1558
1559 spin_lock_irqsave (&chip->lock, flags);
1560
1561 /* Set up buffers with the right fragment size */
1562 forte_channel_prep (channel);
1563
1564 /* Start recording */
1565 if (!chip->trigger)
1566 forte_channel_start (channel);
1567
1568 while (i) {
1569 /* No fragment buffers in use -> wait */
1570 if (channel->filled_frags == 0) {
1571 DECLARE_WAITQUEUE (wait, current);
1572
1573 /* For trigger mode operation, get out */
1574 if (chip->trigger) {
1575 spin_unlock_irqrestore (&chip->lock, flags);
1576 return -EAGAIN;
1577 }
1578
1579 add_wait_queue (&channel->wait, &wait);
1580
1581 for (;;) {
1582 if (channel->active == 0)
1583 break;
1584
1585 if (channel->filled_frags)
1586 break;
1587
1588 spin_unlock_irqrestore (&chip->lock, flags);
1589
1590 set_current_state (TASK_INTERRUPTIBLE);
1591 schedule();
1592
1593 spin_lock_irqsave (&chip->lock, flags);
1594 }
1595
1596 set_current_state (TASK_RUNNING);
1597 remove_wait_queue (&channel->wait, &wait);
1598 }
1599
1600 if (i > channel->frag_sz)
1601 sz = channel->frag_sz;
1602 else
1603 sz = i;
1604
1605 spin_unlock_irqrestore (&chip->lock, flags);
1606
1607 if (copy_to_user (buffer, (void *)channel->buf+channel->swptr, sz)) {
1608 DPRINTK ("%s: copy_to_user failed\n", __FUNCTION__);
1609 return -EFAULT;
1610 }
1611
1612 spin_lock_irqsave (&chip->lock, flags);
1613
1614 /* Advance software pointer */
1615 buffer += sz;
1616 if (channel->filled_frags > 0)
1617 channel->filled_frags--;
1618 channel->swptr += channel->frag_sz;
1619 channel->swptr %= channel->buf_sz;
1620 i -= sz;
1621 }
1622
1623 spin_unlock_irqrestore (&chip->lock, flags);
1624
1625 return bytes - i;
1626}
1627
1628
1629static struct file_operations forte_dsp_fops = {
1630 .owner = THIS_MODULE,
1631 .llseek = &no_llseek,
1632 .read = &forte_dsp_read,
1633 .write = &forte_dsp_write,
1634 .poll = &forte_dsp_poll,
1635 .ioctl = &forte_dsp_ioctl,
1636 .open = &forte_dsp_open,
1637 .release = &forte_dsp_release,
1638 .mmap = &forte_dsp_mmap,
1639};
1640
1641
1642/* Common ------------------------------------------------------------------ */
1643
1644
1645/**
1646 * forte_interrupt:
1647 */
1648
1649static irqreturn_t
1650forte_interrupt (int irq, void *dev_id, struct pt_regs *regs)
1651{
1652 struct forte_chip *chip = dev_id;
1653 struct forte_channel *channel = NULL;
1654 u16 status, count;
1655
1656 status = inw (chip->iobase + FORTE_IRQ_STATUS);
1657
1658 /* If this is not for us, get outta here ASAP */
1659 if ((status & (FORTE_IRQ_PLAYBACK | FORTE_IRQ_CAPTURE)) == 0)
1660 return IRQ_NONE;
1661
1662 if (status & FORTE_IRQ_PLAYBACK) {
1663 channel = &chip->play;
1664
1665 spin_lock (&chip->lock);
1666
1667 if (channel->frag_sz == 0)
1668 goto pack;
1669
1670 /* Declare a fragment done */
1671 if (channel->filled_frags > 0)
1672 channel->filled_frags--;
1673 channel->bytes += channel->frag_sz;
1674 channel->nr_irqs++;
1675
1676 /* Flip-flop between buffer I and II */
1677 channel->next_buf ^= 1;
1678
1679 /* Advance hardware pointer by fragment size and wrap around */
1680 channel->hwptr += channel->frag_sz;
1681 channel->hwptr %= channel->buf_sz;
1682
1683 /* Buffer I or buffer II BAR */
1684 outl (channel->buf_handle + channel->hwptr,
1685 channel->next_buf == 0 ?
1686 channel->iobase + FORTE_PLY_BUF1 :
1687 channel->iobase + FORTE_PLY_BUF2);
1688
1689 /* If the currently playing fragment is last, schedule pause */
1690 if (channel->filled_frags == 1)
1691 forte_channel_pause (channel);
1692
1693 pack:
1694 /* Acknowledge interrupt */
1695 outw (FORTE_IRQ_PLAYBACK, chip->iobase + FORTE_IRQ_STATUS);
1696
1697 if (waitqueue_active (&channel->wait))
1698 wake_up_all (&channel->wait);
1699
1700 spin_unlock (&chip->lock);
1701 }
1702
1703 if (status & FORTE_IRQ_CAPTURE) {
1704 channel = &chip->rec;
1705 spin_lock (&chip->lock);
1706
1707 /* One fragment filled */
1708 channel->filled_frags++;
1709
1710 /* Get # of completed bytes */
1711 count = inw (channel->iobase + FORTE_PLY_COUNT) + 1;
1712
1713 if (count == 0) {
1714 DPRINTK ("%s: last, filled_frags = %d\n", __FUNCTION__,
1715 channel->filled_frags);
1716 channel->filled_frags = 0;
1717 goto rack;
1718 }
1719
1720 /* Buffer I or buffer II BAR */
1721 outl (channel->buf_handle + channel->hwptr,
1722 channel->next_buf == 0 ?
1723 channel->iobase + FORTE_PLY_BUF1 :
1724 channel->iobase + FORTE_PLY_BUF2);
1725
1726 /* Flip-flop between buffer I and II */
1727 channel->next_buf ^= 1;
1728
1729 /* Advance hardware pointer by fragment size and wrap around */
1730 channel->hwptr += channel->frag_sz;
1731 channel->hwptr %= channel->buf_sz;
1732
1733 /* Out of buffers */
1734 if (channel->filled_frags == channel->frag_num - 1)
1735 forte_channel_stop (channel);
1736 rack:
1737 /* Acknowledge interrupt */
1738 outw (FORTE_IRQ_CAPTURE, chip->iobase + FORTE_IRQ_STATUS);
1739
1740 spin_unlock (&chip->lock);
1741
1742 if (waitqueue_active (&channel->wait))
1743 wake_up_all (&channel->wait);
1744 }
1745
1746 return IRQ_HANDLED;
1747}
1748
1749
1750/**
1751 * forte_proc_read:
1752 */
1753
1754static int
1755forte_proc_read (char *page, char **start, off_t off, int count,
1756 int *eof, void *data)
1757{
1758 int i = 0, p_rate, p_chan, r_rate;
1759 unsigned short p_reg, r_reg;
1760
1761 i += sprintf (page, "ForteMedia FM801 OSS Lite driver\n%s\n \n",
1762 DRIVER_VERSION);
1763
1764 if (!forte->iobase)
1765 return i;
1766
1767 p_rate = p_chan = -1;
1768 p_reg = inw (forte->iobase + FORTE_PLY_CTRL);
1769 p_rate = (p_reg >> 8) & 15;
1770 p_chan = (p_reg >> 12) & 3;
1771
1772 if (p_rate >= 0 || p_rate <= 10)
1773 p_rate = rates[p_rate];
1774
1775 if (p_chan >= 0 || p_chan <= 2)
1776 p_chan = channels[p_chan];
1777
1778 r_rate = -1;
1779 r_reg = inw (forte->iobase + FORTE_CAP_CTRL);
1780 r_rate = (r_reg >> 8) & 15;
1781
1782 if (r_rate >= 0 || r_rate <= 10)
1783 r_rate = rates[r_rate];
1784
1785 i += sprintf (page + i,
1786 " Playback Capture\n"
1787 "FIFO empty : %-3s %-3s\n"
1788 "Buf1 Last : %-3s %-3s\n"
1789 "Buf2 Last : %-3s %-3s\n"
1790 "Started : %-3s %-3s\n"
1791 "Paused : %-3s %-3s\n"
1792 "Immed Stop : %-3s %-3s\n"
1793 "Rate : %-5d %-5d\n"
1794 "Channels : %-5d -\n"
1795 "16-bit : %-3s %-3s\n"
1796 "Stereo : %-3s %-3s\n"
1797 " \n"
1798 "Buffer Sz : %-6d %-6d\n"
1799 "Frag Sz : %-6d %-6d\n"
1800 "Frag Num : %-6d %-6d\n"
1801 "Frag msecs : %-6d %-6d\n"
1802 "Used Frags : %-6d %-6d\n"
1803 "Mapped : %-3s %-3s\n",
1804 p_reg & 1<<0 ? "yes" : "no",
1805 r_reg & 1<<0 ? "yes" : "no",
1806 p_reg & 1<<1 ? "yes" : "no",
1807 r_reg & 1<<1 ? "yes" : "no",
1808 p_reg & 1<<2 ? "yes" : "no",
1809 r_reg & 1<<2 ? "yes" : "no",
1810 p_reg & 1<<5 ? "yes" : "no",
1811 r_reg & 1<<5 ? "yes" : "no",
1812 p_reg & 1<<6 ? "yes" : "no",
1813 r_reg & 1<<6 ? "yes" : "no",
1814 p_reg & 1<<7 ? "yes" : "no",
1815 r_reg & 1<<7 ? "yes" : "no",
1816 p_rate, r_rate,
1817 p_chan,
1818 p_reg & 1<<14 ? "yes" : "no",
1819 r_reg & 1<<14 ? "yes" : "no",
1820 p_reg & 1<<15 ? "yes" : "no",
1821 r_reg & 1<<15 ? "yes" : "no",
1822 forte->play.buf_sz, forte->rec.buf_sz,
1823 forte->play.frag_sz, forte->rec.frag_sz,
1824 forte->play.frag_num, forte->rec.frag_num,
1825 forte->play.frag_msecs, forte->rec.frag_msecs,
1826 forte->play.filled_frags, forte->rec.filled_frags,
1827 forte->play.mapped ? "yes" : "no",
1828 forte->rec.mapped ? "yes" : "no"
1829 );
1830
1831 return i;
1832}
1833
1834
1835/**
1836 * forte_proc_init:
1837 *
1838 * Creates driver info entries in /proc
1839 */
1840
1841static int __init
1842forte_proc_init (void)
1843{
1844 if (!proc_mkdir ("driver/forte", NULL))
1845 return -EIO;
1846
1847 if (!create_proc_read_entry ("driver/forte/chip", 0, NULL, forte_proc_read, forte)) {
1848 remove_proc_entry ("driver/forte", NULL);
1849 return -EIO;
1850 }
1851
1852 if (!create_proc_read_entry("driver/forte/ac97", 0, NULL, ac97_read_proc, forte->ac97)) {
1853 remove_proc_entry ("driver/forte/chip", NULL);
1854 remove_proc_entry ("driver/forte", NULL);
1855 return -EIO;
1856 }
1857
1858 return 0;
1859}
1860
1861
1862/**
1863 * forte_proc_remove:
1864 *
1865 * Removes driver info entries in /proc
1866 */
1867
1868static void
1869forte_proc_remove (void)
1870{
1871 remove_proc_entry ("driver/forte/ac97", NULL);
1872 remove_proc_entry ("driver/forte/chip", NULL);
1873 remove_proc_entry ("driver/forte", NULL);
1874}
1875
1876
1877/**
1878 * forte_chip_init:
1879 * @chip: Chip instance to initialize
1880 *
1881 * Description:
1882 * Resets chip, configures codec and registers the driver with
1883 * the sound subsystem.
1884 *
1885 * Press and hold Start for 8 secs, then switch on Run
1886 * and hold for 4 seconds. Let go of Start. Numbers
1887 * assume a properly oiled TWG.
1888 */
1889
1890static int __devinit
1891forte_chip_init (struct forte_chip *chip)
1892{
1893 u8 revision;
1894 u16 cmdw;
1895 struct ac97_codec *codec;
1896
1897 pci_read_config_byte (chip->pci_dev, PCI_REVISION_ID, &revision);
1898
1899 if (revision >= 0xB1) {
1900 chip->multichannel = 1;
1901 printk (KERN_INFO PFX "Multi-channel device detected.\n");
1902 }
1903
1904 /* Reset chip */
1905 outw (FORTE_CC_CODEC_RESET | FORTE_CC_AC97_RESET,
1906 chip->iobase + FORTE_CODEC_CTRL);
1907 udelay(100);
1908 outw (0, chip->iobase + FORTE_CODEC_CTRL);
1909
1910 /* Request read from AC97 */
1911 outw (FORTE_AC97_READ | (0 << FORTE_AC97_ADDR_SHIFT),
1912 chip->iobase + FORTE_AC97_CMD);
1913 mdelay(750);
1914
1915 if ((inw (chip->iobase + FORTE_AC97_CMD) & (3<<8)) != (1<<8)) {
1916 printk (KERN_INFO PFX "AC97 codec not responding");
1917 return -EIO;
1918 }
1919
1920 /* Init volume */
1921 outw (0x0808, chip->iobase + FORTE_PCM_VOL);
1922 outw (0x9f1f, chip->iobase + FORTE_FM_VOL);
1923 outw (0x8808, chip->iobase + FORTE_I2S_VOL);
1924
1925 /* I2S control - I2S mode */
1926 outw (0x0003, chip->iobase + FORTE_I2S_MODE);
1927
1928 /* Interrupt setup - unmask PLAYBACK & CAPTURE */
1929 cmdw = inw (chip->iobase + FORTE_IRQ_MASK);
1930 cmdw &= ~0x0003;
1931 outw (cmdw, chip->iobase + FORTE_IRQ_MASK);
1932
1933 /* Interrupt clear */
1934 outw (FORTE_IRQ_PLAYBACK|FORTE_IRQ_CAPTURE,
1935 chip->iobase + FORTE_IRQ_STATUS);
1936
1937 /* Set up the AC97 codec */
1938 if ((codec = ac97_alloc_codec()) == NULL)
1939 return -ENOMEM;
1940 codec->private_data = chip;
1941 codec->codec_read = forte_ac97_read;
1942 codec->codec_write = forte_ac97_write;
1943 codec->id = 0;
1944
1945 if (ac97_probe_codec (codec) == 0) {
1946 printk (KERN_ERR PFX "codec probe failed\n");
1947 ac97_release_codec(codec);
1948 return -1;
1949 }
1950
1951 /* Register mixer */
1952 if ((codec->dev_mixer =
1953 register_sound_mixer (&forte_mixer_fops, -1)) < 0) {
1954 printk (KERN_ERR PFX "couldn't register mixer!\n");
1955 ac97_release_codec(codec);
1956 return -1;
1957 }
1958
1959 chip->ac97 = codec;
1960
1961 /* Register DSP */
1962 if ((chip->dsp = register_sound_dsp (&forte_dsp_fops, -1) ) < 0) {
1963 printk (KERN_ERR PFX "couldn't register dsp!\n");
1964 return -1;
1965 }
1966
1967 /* Register with /proc */
1968 if (forte_proc_init()) {
1969 printk (KERN_ERR PFX "couldn't add entries to /proc!\n");
1970 return -1;
1971 }
1972
1973 return 0;
1974}
1975
1976
1977/**
1978 * forte_probe:
1979 * @pci_dev: PCI struct for probed device
1980 * @pci_id:
1981 *
1982 * Description:
1983 * Allocates chip instance, I/O region, and IRQ
1984 */
1985static int __init
1986forte_probe (struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
1987{
1988 struct forte_chip *chip;
1989 int ret = 0;
1990
1991 /* FIXME: Support more than one chip */
1992 if (found++)
1993 return -EIO;
1994
1995 /* Ignition */
1996 if (pci_enable_device (pci_dev))
1997 return -EIO;
1998
1999 pci_set_master (pci_dev);
2000
2001 /* Allocate chip instance and configure */
2002 forte = (struct forte_chip *)
2003 kmalloc (sizeof (struct forte_chip), GFP_KERNEL);
2004 chip = forte;
2005
2006 if (chip == NULL) {
2007 printk (KERN_WARNING PFX "Out of memory");
2008 return -ENOMEM;
2009 }
2010
2011 memset (chip, 0, sizeof (struct forte_chip));
2012 chip->pci_dev = pci_dev;
2013
2014 mutex_init(&chip->open_mutex);
2015 spin_lock_init (&chip->lock);
2016 spin_lock_init (&chip->ac97_lock);
2017
2018 if (! request_region (pci_resource_start (pci_dev, 0),
2019 pci_resource_len (pci_dev, 0), DRIVER_NAME)) {
2020 printk (KERN_WARNING PFX "Unable to reserve I/O space");
2021 ret = -ENOMEM;
2022 goto error;
2023 }
2024
2025 chip->iobase = pci_resource_start (pci_dev, 0);
2026 chip->irq = pci_dev->irq;
2027
2028 if (request_irq (chip->irq, forte_interrupt, IRQF_SHARED, DRIVER_NAME,
2029 chip)) {
2030 printk (KERN_WARNING PFX "Unable to reserve IRQ");
2031 ret = -EIO;
2032 goto error;
2033 }
2034
2035 pci_set_drvdata (pci_dev, chip);
2036
2037 printk (KERN_INFO PFX "FM801 chip found at 0x%04lX-0x%16llX IRQ %u\n",
2038 chip->iobase, (unsigned long long)pci_resource_end (pci_dev, 0),
2039 chip->irq);
2040
2041 /* Power it up */
2042 if ((ret = forte_chip_init (chip)) == 0)
2043 return 0;
2044
2045 error:
2046 if (chip->irq)
2047 free_irq (chip->irq, chip);
2048
2049 if (chip->iobase)
2050 release_region (pci_resource_start (pci_dev, 0),
2051 pci_resource_len (pci_dev, 0));
2052
2053 kfree (chip);
2054
2055 return ret;
2056}
2057
2058
2059/**
2060 * forte_remove:
2061 * @pci_dev: PCI device to unclaim
2062 *
2063 */
2064
2065static void
2066forte_remove (struct pci_dev *pci_dev)
2067{
2068 struct forte_chip *chip = pci_get_drvdata (pci_dev);
2069
2070 if (chip == NULL)
2071 return;
2072
2073 /* Turn volume down to avoid popping */
2074 outw (0x1f1f, chip->iobase + FORTE_PCM_VOL);
2075 outw (0x1f1f, chip->iobase + FORTE_FM_VOL);
2076 outw (0x1f1f, chip->iobase + FORTE_I2S_VOL);
2077
2078 forte_proc_remove();
2079 free_irq (chip->irq, chip);
2080 release_region (chip->iobase, pci_resource_len (pci_dev, 0));
2081
2082 unregister_sound_dsp (chip->dsp);
2083 unregister_sound_mixer (chip->ac97->dev_mixer);
2084 ac97_release_codec(chip->ac97);
2085 kfree (chip);
2086
2087 printk (KERN_INFO PFX "driver released\n");
2088}
2089
2090
2091static struct pci_device_id forte_pci_ids[] = {
2092 { 0x1319, 0x0801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0, },
2093 { 0, }
2094};
2095
2096
2097static struct pci_driver forte_pci_driver = {
2098 .name = DRIVER_NAME,
2099 .id_table = forte_pci_ids,
2100 .probe = forte_probe,
2101 .remove = forte_remove,
2102
2103};
2104
2105
2106/**
2107 * forte_init_module:
2108 *
2109 */
2110
2111static int __init
2112forte_init_module (void)
2113{
2114 printk (KERN_INFO PFX DRIVER_VERSION "\n");
2115
2116 return pci_register_driver (&forte_pci_driver);
2117}
2118
2119
2120/**
2121 * forte_cleanup_module:
2122 *
2123 */
2124
2125static void __exit
2126forte_cleanup_module (void)
2127{
2128 pci_unregister_driver (&forte_pci_driver);
2129}
2130
2131
2132module_init(forte_init_module);
2133module_exit(forte_cleanup_module);
2134
2135MODULE_AUTHOR("Martin K. Petersen <mkp@mkp.net>");
2136MODULE_DESCRIPTION("ForteMedia FM801 OSS Driver");
2137MODULE_LICENSE("GPL");
2138MODULE_DEVICE_TABLE (pci, forte_pci_ids);
diff --git a/sound/oss/gus.h b/sound/oss/gus.h
deleted file mode 100644
index 3d5271baf042..000000000000
--- a/sound/oss/gus.h
+++ /dev/null
@@ -1,24 +0,0 @@
1
2#include "ad1848.h"
3
4/* From gus_card.c */
5int gus_set_midi_irq(int num);
6irqreturn_t gusintr(int irq, void *dev_id, struct pt_regs * dummy);
7
8/* From gus_wave.c */
9int gus_wave_detect(int baseaddr);
10void gus_wave_init(struct address_info *hw_config);
11void gus_wave_unload (struct address_info *hw_config);
12void gus_voice_irq(void);
13void gus_write8(int reg, unsigned int data);
14void guswave_dma_irq(void);
15void gus_delay(void);
16int gus_default_mixer_ioctl (int dev, unsigned int cmd, void __user *arg);
17void gus_timer_command (unsigned int addr, unsigned int val);
18
19/* From gus_midi.c */
20void gus_midi_init(struct address_info *hw_config);
21void gus_midi_interrupt(int dummy);
22
23/* From ics2101.c */
24int ics2101_mixer_init(void);
diff --git a/sound/oss/gus_card.c b/sound/oss/gus_card.c
deleted file mode 100644
index 4539269b3d95..000000000000
--- a/sound/oss/gus_card.c
+++ /dev/null
@@ -1,292 +0,0 @@
1/*
2 * sound/oss/gus_card.c
3 *
4 * Detection routine for the Gravis Ultrasound.
5 *
6 * Copyright (C) by Hannu Savolainen 1993-1997
7 *
8 *
9 * Frank van de Pol : Fixed GUS MAX interrupt handling, enabled simultanious
10 * usage of CS4231A codec, GUS wave and MIDI for GUS MAX.
11 * Christoph Hellwig: Adapted to module_init/module_exit, simple cleanups.
12 *
13 * Status:
14 * Tested...
15 */
16
17
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21
22#include "sound_config.h"
23
24#include "gus.h"
25#include "gus_hw.h"
26
27irqreturn_t gusintr(int irq, void *dev_id, struct pt_regs *dummy);
28
29int gus_base = 0, gus_irq = 0, gus_dma = 0;
30int gus_no_wave_dma = 0;
31extern int gus_wave_volume;
32extern int gus_pcm_volume;
33extern int have_gus_max;
34int gus_pnp_flag = 0;
35#ifdef CONFIG_SOUND_GUS16
36static int db16; /* Has a Gus16 AD1848 on it */
37#endif
38
39static void __init attach_gus(struct address_info *hw_config)
40{
41 gus_wave_init(hw_config);
42
43 if (sound_alloc_dma(hw_config->dma, "GUS"))
44 printk(KERN_ERR "gus_card.c: Can't allocate DMA channel %d\n", hw_config->dma);
45 if (hw_config->dma2 != -1 && hw_config->dma2 != hw_config->dma)
46 if (sound_alloc_dma(hw_config->dma2, "GUS(2)"))
47 printk(KERN_ERR "gus_card.c: Can't allocate DMA channel %d\n", hw_config->dma2);
48 gus_midi_init(hw_config);
49 if(request_irq(hw_config->irq, gusintr, 0, "Gravis Ultrasound", hw_config)<0)
50 printk(KERN_ERR "gus_card.c: Unable to allocate IRQ %d\n", hw_config->irq);
51
52 return;
53}
54
55static int __init probe_gus(struct address_info *hw_config)
56{
57 int irq;
58 int io_addr;
59
60 if (hw_config->card_subtype == 1)
61 gus_pnp_flag = 1;
62
63 irq = hw_config->irq;
64
65 if (hw_config->card_subtype == 0) /* GUS/MAX/ACE */
66 if (irq != 3 && irq != 5 && irq != 7 && irq != 9 &&
67 irq != 11 && irq != 12 && irq != 15)
68 {
69 printk(KERN_ERR "GUS: Unsupported IRQ %d\n", irq);
70 return 0;
71 }
72 if (gus_wave_detect(hw_config->io_base))
73 return 1;
74
75#ifndef EXCLUDE_GUS_IODETECT
76
77 /*
78 * Look at the possible base addresses (0x2X0, X=1, 2, 3, 4, 5, 6)
79 */
80
81 for (io_addr = 0x210; io_addr <= 0x260; io_addr += 0x10) {
82 if (io_addr == hw_config->io_base) /* Already tested */
83 continue;
84 if (gus_wave_detect(io_addr)) {
85 hw_config->io_base = io_addr;
86 return 1;
87 }
88 }
89#endif
90
91 printk("NO GUS card found !\n");
92 return 0;
93}
94
95static void __exit unload_gus(struct address_info *hw_config)
96{
97 DDB(printk("unload_gus(%x)\n", hw_config->io_base));
98
99 gus_wave_unload(hw_config);
100
101 release_region(hw_config->io_base, 16);
102 release_region(hw_config->io_base + 0x100, 12); /* 0x10c-> is MAX */
103 free_irq(hw_config->irq, hw_config);
104
105 sound_free_dma(hw_config->dma);
106
107 if (hw_config->dma2 != -1 && hw_config->dma2 != hw_config->dma)
108 sound_free_dma(hw_config->dma2);
109}
110
111irqreturn_t gusintr(int irq, void *dev_id, struct pt_regs *dummy)
112{
113 unsigned char src;
114 extern int gus_timer_enabled;
115 int handled = 0;
116
117#ifdef CONFIG_SOUND_GUSMAX
118 if (have_gus_max) {
119 struct address_info *hw_config = dev_id;
120 adintr(irq, (void *)hw_config->slots[1], NULL);
121 }
122#endif
123#ifdef CONFIG_SOUND_GUS16
124 if (db16) {
125 struct address_info *hw_config = dev_id;
126 adintr(irq, (void *)hw_config->slots[3], NULL);
127 }
128#endif
129
130 while (1)
131 {
132 if (!(src = inb(u_IrqStatus)))
133 break;
134 handled = 1;
135 if (src & DMA_TC_IRQ)
136 {
137 guswave_dma_irq();
138 }
139 if (src & (MIDI_TX_IRQ | MIDI_RX_IRQ))
140 {
141 gus_midi_interrupt(0);
142 }
143 if (src & (GF1_TIMER1_IRQ | GF1_TIMER2_IRQ))
144 {
145 if (gus_timer_enabled)
146 sound_timer_interrupt();
147 gus_write8(0x45, 0); /* Ack IRQ */
148 gus_timer_command(4, 0x80); /* Reset IRQ flags */
149 }
150 if (src & (WAVETABLE_IRQ | ENVELOPE_IRQ))
151 gus_voice_irq();
152 }
153 return IRQ_RETVAL(handled);
154}
155
156/*
157 * Some extra code for the 16 bit sampling option
158 */
159
160#ifdef CONFIG_SOUND_GUS16
161
162static int __init init_gus_db16(struct address_info *hw_config)
163{
164 struct resource *ports;
165
166 ports = request_region(hw_config->io_base, 4, "ad1848");
167 if (!ports)
168 return 0;
169
170 if (!ad1848_detect(ports, NULL, hw_config->osp)) {
171 release_region(hw_config->io_base, 4);
172 return 0;
173 }
174
175 gus_pcm_volume = 100;
176 gus_wave_volume = 90;
177
178 hw_config->slots[3] = ad1848_init("GUS 16 bit sampling", ports,
179 hw_config->irq,
180 hw_config->dma,
181 hw_config->dma, 0,
182 hw_config->osp,
183 THIS_MODULE);
184 return 1;
185}
186
187static void __exit unload_gus_db16(struct address_info *hw_config)
188{
189
190 ad1848_unload(hw_config->io_base,
191 hw_config->irq,
192 hw_config->dma,
193 hw_config->dma, 0);
194 sound_unload_audiodev(hw_config->slots[3]);
195}
196#endif
197
198#ifdef CONFIG_SOUND_GUS16
199static int gus16;
200#endif
201#ifdef CONFIG_SOUND_GUSMAX
202static int no_wave_dma; /* Set if no dma is to be used for the
203 wave table (GF1 chip) */
204#endif
205
206
207/*
208 * Note DMA2 of -1 has the right meaning in the GUS driver as well
209 * as here.
210 */
211
212static struct address_info cfg;
213
214static int __initdata io = -1;
215static int __initdata irq = -1;
216static int __initdata dma = -1;
217static int __initdata dma16 = -1; /* Set this for modules that need it */
218static int __initdata type = 0; /* 1 for PnP */
219
220module_param(io, int, 0);
221module_param(irq, int, 0);
222module_param(dma, int, 0);
223module_param(dma16, int, 0);
224module_param(type, int, 0);
225#ifdef CONFIG_SOUND_GUSMAX
226module_param(no_wave_dma, int, 0);
227#endif
228#ifdef CONFIG_SOUND_GUS16
229module_param(db16, int, 0);
230module_param(gus16, int, 0);
231#endif
232MODULE_LICENSE("GPL");
233
234static int __init init_gus(void)
235{
236 printk(KERN_INFO "Gravis Ultrasound audio driver Copyright (C) by Hannu Savolainen 1993-1996\n");
237
238 cfg.io_base = io;
239 cfg.irq = irq;
240 cfg.dma = dma;
241 cfg.dma2 = dma16;
242 cfg.card_subtype = type;
243#ifdef CONFIG_SOUND_GUSMAX
244 gus_no_wave_dma = no_wave_dma;
245#endif
246
247 if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
248 printk(KERN_ERR "I/O, IRQ, and DMA are mandatory\n");
249 return -EINVAL;
250 }
251
252#ifdef CONFIG_SOUND_GUS16
253 if (gus16 && init_gus_db16(&cfg))
254 db16 = 1;
255#endif
256 if (!probe_gus(&cfg))
257 return -ENODEV;
258 attach_gus(&cfg);
259
260 return 0;
261}
262
263static void __exit cleanup_gus(void)
264{
265#ifdef CONFIG_SOUND_GUS16
266 if (db16)
267 unload_gus_db16(&cfg);
268#endif
269 unload_gus(&cfg);
270}
271
272module_init(init_gus);
273module_exit(cleanup_gus);
274
275#ifndef MODULE
276static int __init setup_gus(char *str)
277{
278 /* io, irq, dma, dma2 */
279 int ints[5];
280
281 str = get_options(str, ARRAY_SIZE(ints), ints);
282
283 io = ints[1];
284 irq = ints[2];
285 dma = ints[3];
286 dma16 = ints[4];
287
288 return 1;
289}
290
291__setup("gus=", setup_gus);
292#endif
diff --git a/sound/oss/gus_hw.h b/sound/oss/gus_hw.h
deleted file mode 100644
index f97a0b8670e3..000000000000
--- a/sound/oss/gus_hw.h
+++ /dev/null
@@ -1,50 +0,0 @@
1
2/*
3 * I/O addresses
4 */
5
6#define u_Base (gus_base + 0x000)
7#define u_Mixer u_Base
8#define u_Status (gus_base + 0x006)
9#define u_TimerControl (gus_base + 0x008)
10#define u_TimerData (gus_base + 0x009)
11#define u_IRQDMAControl (gus_base + 0x00b)
12#define u_MidiControl (gus_base + 0x100)
13#define MIDI_RESET 0x03
14#define MIDI_ENABLE_XMIT 0x20
15#define MIDI_ENABLE_RCV 0x80
16#define u_MidiStatus u_MidiControl
17#define MIDI_RCV_FULL 0x01
18#define MIDI_XMIT_EMPTY 0x02
19#define MIDI_FRAME_ERR 0x10
20#define MIDI_OVERRUN 0x20
21#define MIDI_IRQ_PEND 0x80
22#define u_MidiData (gus_base + 0x101)
23#define u_Voice (gus_base + 0x102)
24#define u_Command (gus_base + 0x103)
25#define u_DataLo (gus_base + 0x104)
26#define u_DataHi (gus_base + 0x105)
27#define u_MixData (gus_base + 0x106) /* Rev. 3.7+ mixing */
28#define u_MixSelect (gus_base + 0x506) /* registers. */
29#define u_IrqStatus u_Status
30# define MIDI_TX_IRQ 0x01 /* pending MIDI xmit IRQ */
31# define MIDI_RX_IRQ 0x02 /* pending MIDI recv IRQ */
32# define GF1_TIMER1_IRQ 0x04 /* general purpose timer */
33# define GF1_TIMER2_IRQ 0x08 /* general purpose timer */
34# define WAVETABLE_IRQ 0x20 /* pending wavetable IRQ */
35# define ENVELOPE_IRQ 0x40 /* pending volume envelope IRQ */
36# define DMA_TC_IRQ 0x80 /* pending dma tc IRQ */
37
38#define ICS2101 1
39# define ICS_MIXDEVS 6
40# define DEV_MIC 0
41# define DEV_LINE 1
42# define DEV_CD 2
43# define DEV_GF1 3
44# define DEV_UNUSED 4
45# define DEV_VOL 5
46
47# define CHN_LEFT 0
48# define CHN_RIGHT 1
49#define CS4231 2
50#define u_DRAMIO (gus_base + 0x107)
diff --git a/sound/oss/gus_linearvol.h b/sound/oss/gus_linearvol.h
deleted file mode 100644
index 7ad0c30d4fd9..000000000000
--- a/sound/oss/gus_linearvol.h
+++ /dev/null
@@ -1,18 +0,0 @@
1static unsigned short gus_linearvol[128] = {
2 0x0000, 0x08ff, 0x09ff, 0x0a80, 0x0aff, 0x0b40, 0x0b80, 0x0bc0,
3 0x0bff, 0x0c20, 0x0c40, 0x0c60, 0x0c80, 0x0ca0, 0x0cc0, 0x0ce0,
4 0x0cff, 0x0d10, 0x0d20, 0x0d30, 0x0d40, 0x0d50, 0x0d60, 0x0d70,
5 0x0d80, 0x0d90, 0x0da0, 0x0db0, 0x0dc0, 0x0dd0, 0x0de0, 0x0df0,
6 0x0dff, 0x0e08, 0x0e10, 0x0e18, 0x0e20, 0x0e28, 0x0e30, 0x0e38,
7 0x0e40, 0x0e48, 0x0e50, 0x0e58, 0x0e60, 0x0e68, 0x0e70, 0x0e78,
8 0x0e80, 0x0e88, 0x0e90, 0x0e98, 0x0ea0, 0x0ea8, 0x0eb0, 0x0eb8,
9 0x0ec0, 0x0ec8, 0x0ed0, 0x0ed8, 0x0ee0, 0x0ee8, 0x0ef0, 0x0ef8,
10 0x0eff, 0x0f04, 0x0f08, 0x0f0c, 0x0f10, 0x0f14, 0x0f18, 0x0f1c,
11 0x0f20, 0x0f24, 0x0f28, 0x0f2c, 0x0f30, 0x0f34, 0x0f38, 0x0f3c,
12 0x0f40, 0x0f44, 0x0f48, 0x0f4c, 0x0f50, 0x0f54, 0x0f58, 0x0f5c,
13 0x0f60, 0x0f64, 0x0f68, 0x0f6c, 0x0f70, 0x0f74, 0x0f78, 0x0f7c,
14 0x0f80, 0x0f84, 0x0f88, 0x0f8c, 0x0f90, 0x0f94, 0x0f98, 0x0f9c,
15 0x0fa0, 0x0fa4, 0x0fa8, 0x0fac, 0x0fb0, 0x0fb4, 0x0fb8, 0x0fbc,
16 0x0fc0, 0x0fc4, 0x0fc8, 0x0fcc, 0x0fd0, 0x0fd4, 0x0fd8, 0x0fdc,
17 0x0fe0, 0x0fe4, 0x0fe8, 0x0fec, 0x0ff0, 0x0ff4, 0x0ff8, 0x0ffc
18};
diff --git a/sound/oss/gus_midi.c b/sound/oss/gus_midi.c
deleted file mode 100644
index d1997a417ad0..000000000000
--- a/sound/oss/gus_midi.c
+++ /dev/null
@@ -1,256 +0,0 @@
1/*
2 * sound/oss/gus_midi.c
3 *
4 * The low level driver for the GUS Midi Interface.
5 *
6 *
7 * Copyright (C) by Hannu Savolainen 1993-1997
8 *
9 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
10 * Version 2 (June 1991). See the "COPYING" file distributed with this software
11 * for more info.
12 *
13 * Changes:
14 * 11-10-2000 Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
15 * Added __init to gus_midi_init()
16 */
17
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include "sound_config.h"
21
22#include "gus.h"
23#include "gus_hw.h"
24
25static int midi_busy, input_opened;
26static int my_dev;
27static int output_used;
28static volatile unsigned char gus_midi_control;
29static void (*midi_input_intr) (int dev, unsigned char data);
30
31static unsigned char tmp_queue[256];
32extern int gus_pnp_flag;
33static volatile int qlen;
34static volatile unsigned char qhead, qtail;
35extern int gus_base, gus_irq, gus_dma;
36extern int *gus_osp;
37extern spinlock_t gus_lock;
38
39static int GUS_MIDI_STATUS(void)
40{
41 return inb(u_MidiStatus);
42}
43
44static int gus_midi_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev))
45{
46 if (midi_busy)
47 {
48/* printk("GUS: Midi busy\n");*/
49 return -EBUSY;
50 }
51 outb((MIDI_RESET), u_MidiControl);
52 gus_delay();
53
54 gus_midi_control = 0;
55 input_opened = 0;
56
57 if (mode == OPEN_READ || mode == OPEN_READWRITE)
58 if (!gus_pnp_flag)
59 {
60 gus_midi_control |= MIDI_ENABLE_RCV;
61 input_opened = 1;
62 }
63 outb((gus_midi_control), u_MidiControl); /* Enable */
64
65 midi_busy = 1;
66 qlen = qhead = qtail = output_used = 0;
67 midi_input_intr = input;
68
69 return 0;
70}
71
72static int dump_to_midi(unsigned char midi_byte)
73{
74 unsigned long flags;
75 int ok = 0;
76
77 output_used = 1;
78
79 spin_lock_irqsave(&gus_lock, flags);
80
81 if (GUS_MIDI_STATUS() & MIDI_XMIT_EMPTY)
82 {
83 ok = 1;
84 outb((midi_byte), u_MidiData);
85 }
86 else
87 {
88 /*
89 * Enable Midi xmit interrupts (again)
90 */
91 gus_midi_control |= MIDI_ENABLE_XMIT;
92 outb((gus_midi_control), u_MidiControl);
93 }
94
95 spin_unlock_irqrestore(&gus_lock,flags);
96 return ok;
97}
98
99static void gus_midi_close(int dev)
100{
101 /*
102 * Reset FIFO pointers, disable intrs
103 */
104
105 outb((MIDI_RESET), u_MidiControl);
106 midi_busy = 0;
107}
108
109static int gus_midi_out(int dev, unsigned char midi_byte)
110{
111 unsigned long flags;
112
113 /*
114 * Drain the local queue first
115 */
116 spin_lock_irqsave(&gus_lock, flags);
117
118 while (qlen && dump_to_midi(tmp_queue[qhead]))
119 {
120 qlen--;
121 qhead++;
122 }
123 spin_unlock_irqrestore(&gus_lock,flags);
124
125 /*
126 * Output the byte if the local queue is empty.
127 */
128
129 if (!qlen)
130 if (dump_to_midi(midi_byte))
131 return 1; /*
132 * OK
133 */
134
135 /*
136 * Put to the local queue
137 */
138
139 if (qlen >= 256)
140 return 0; /*
141 * Local queue full
142 */
143 spin_lock_irqsave(&gus_lock, flags);
144
145 tmp_queue[qtail] = midi_byte;
146 qlen++;
147 qtail++;
148
149 spin_unlock_irqrestore(&gus_lock,flags);
150 return 1;
151}
152
153static int gus_midi_start_read(int dev)
154{
155 return 0;
156}
157
158static int gus_midi_end_read(int dev)
159{
160 return 0;
161}
162
163static void gus_midi_kick(int dev)
164{
165}
166
167static int gus_midi_buffer_status(int dev)
168{
169 unsigned long flags;
170
171 if (!output_used)
172 return 0;
173
174 spin_lock_irqsave(&gus_lock, flags);
175
176 if (qlen && dump_to_midi(tmp_queue[qhead]))
177 {
178 qlen--;
179 qhead++;
180 }
181 spin_unlock_irqrestore(&gus_lock,flags);
182 return (qlen > 0) || !(GUS_MIDI_STATUS() & MIDI_XMIT_EMPTY);
183}
184
185#define MIDI_SYNTH_NAME "Gravis Ultrasound Midi"
186#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
187#include "midi_synth.h"
188
189static struct midi_operations gus_midi_operations =
190{
191 .owner = THIS_MODULE,
192 .info = {"Gravis UltraSound Midi", 0, 0, SNDCARD_GUS},
193 .converter = &std_midi_synth,
194 .in_info = {0},
195 .open = gus_midi_open,
196 .close = gus_midi_close,
197 .outputc = gus_midi_out,
198 .start_read = gus_midi_start_read,
199 .end_read = gus_midi_end_read,
200 .kick = gus_midi_kick,
201 .buffer_status = gus_midi_buffer_status,
202};
203
204void __init gus_midi_init(struct address_info *hw_config)
205{
206 int dev = sound_alloc_mididev();
207
208 if (dev == -1)
209 {
210 printk(KERN_INFO "gus_midi: Too many midi devices detected\n");
211 return;
212 }
213 outb((MIDI_RESET), u_MidiControl);
214
215 std_midi_synth.midi_dev = my_dev = dev;
216 hw_config->slots[2] = dev;
217 midi_devs[dev] = &gus_midi_operations;
218 sequencer_init();
219 return;
220}
221
222void gus_midi_interrupt(int dummy)
223{
224 volatile unsigned char stat, data;
225 int timeout = 10;
226
227 spin_lock(&gus_lock);
228
229 while (timeout-- > 0 && (stat = GUS_MIDI_STATUS()) & (MIDI_RCV_FULL | MIDI_XMIT_EMPTY))
230 {
231 if (stat & MIDI_RCV_FULL)
232 {
233 data = inb(u_MidiData);
234 if (input_opened)
235 midi_input_intr(my_dev, data);
236 }
237 if (stat & MIDI_XMIT_EMPTY)
238 {
239 while (qlen && dump_to_midi(tmp_queue[qhead]))
240 {
241 qlen--;
242 qhead++;
243 }
244 if (!qlen)
245 {
246 /*
247 * Disable Midi output interrupts, since no data in the buffer
248 */
249 gus_midi_control &= ~MIDI_ENABLE_XMIT;
250 outb((gus_midi_control), u_MidiControl);
251 outb((gus_midi_control), u_MidiControl);
252 }
253 }
254 }
255 spin_unlock(&gus_lock);
256}
diff --git a/sound/oss/gus_vol.c b/sound/oss/gus_vol.c
deleted file mode 100644
index 6ae6924e1647..000000000000
--- a/sound/oss/gus_vol.c
+++ /dev/null
@@ -1,153 +0,0 @@
1
2/*
3 * gus_vol.c - Compute volume for GUS.
4 *
5 *
6 * Copyright (C) by Hannu Savolainen 1993-1997
7 *
8 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
9 * Version 2 (June 1991). See the "COPYING" file distributed with this software
10 * for more info.
11 */
12#include "sound_config.h"
13
14#include "gus.h"
15#include "gus_linearvol.h"
16
17#define GUS_VOLUME gus_wave_volume
18
19
20extern int gus_wave_volume;
21
22/*
23 * Calculate gus volume from note velocity, main volume, expression, and
24 * intrinsic patch volume given in patch library. Expression is multiplied
25 * in, so it emphasizes differences in note velocity, while main volume is
26 * added in -- I don't know whether this is right, but it seems reasonable to
27 * me. (In the previous stage, main volume controller messages were changed
28 * to expression controller messages, if they were found to be used for
29 * dynamic volume adjustments, so here, main volume can be assumed to be
30 * constant throughout a song.)
31 *
32 * Intrinsic patch volume is added in, but if over 64 is also multiplied in, so
33 * we can give a big boost to very weak voices like nylon guitar and the
34 * basses. The normal value is 64. Strings are assigned lower values.
35 */
36
37unsigned short gus_adagio_vol(int vel, int mainv, int xpn, int voicev)
38{
39 int i, m, n, x;
40
41
42 /*
43 * A voice volume of 64 is considered neutral, so adjust the main volume if
44 * something other than this neutral value was assigned in the patch
45 * library.
46 */
47 x = 256 + 6 * (voicev - 64);
48
49 /*
50 * Boost expression by voice volume above neutral.
51 */
52
53 if (voicev > 65)
54 xpn += voicev - 64;
55 xpn += (voicev - 64) / 2;
56
57 /*
58 * Combine multiplicative and level components.
59 */
60 x = vel * xpn * 6 + (voicev / 4) * x;
61
62#ifdef GUS_VOLUME
63 /*
64 * Further adjustment by installation-specific master volume control
65 * (default 60).
66 */
67 x = (x * GUS_VOLUME * GUS_VOLUME) / 10000;
68#endif
69
70#ifdef GUS_USE_CHN_MAIN_VOLUME
71 /*
72 * Experimental support for the channel main volume
73 */
74
75 mainv = (mainv / 2) + 64; /* Scale to 64 to 127 */
76 x = (x * mainv * mainv) / 16384;
77#endif
78
79 if (x < 2)
80 return (0);
81 else if (x >= 65535)
82 return ((15 << 8) | 255);
83
84 /*
85 * Convert to GUS's logarithmic form with 4 bit exponent i and 8 bit
86 * mantissa m.
87 */
88
89 n = x;
90 i = 7;
91 if (n < 128)
92 {
93 while (i > 0 && n < (1 << i))
94 i--;
95 }
96 else
97 {
98 while (n > 255)
99 {
100 n >>= 1;
101 i++;
102 }
103 }
104 /*
105 * Mantissa is part of linear volume not expressed in exponent. (This is
106 * not quite like real logs -- I wonder if it's right.)
107 */
108 m = x - (1 << i);
109
110 /*
111 * Adjust mantissa to 8 bits.
112 */
113 if (m > 0)
114 {
115 if (i > 8)
116 m >>= i - 8;
117 else if (i < 8)
118 m <<= 8 - i;
119 }
120 return ((i << 8) + m);
121}
122
123/*
124 * Volume-values are interpreted as linear values. Volume is based on the
125 * value supplied with SEQ_START_NOTE(), channel main volume (if compiled in)
126 * and the volume set by the mixer-device (default 60%).
127 */
128
129unsigned short gus_linear_vol(int vol, int mainvol)
130{
131 int mixer_mainvol;
132
133 if (vol <= 0)
134 vol = 0;
135 else if (vol >= 127)
136 vol = 127;
137
138#ifdef GUS_VOLUME
139 mixer_mainvol = GUS_VOLUME;
140#else
141 mixer_mainvol = 100;
142#endif
143
144#ifdef GUS_USE_CHN_MAIN_VOLUME
145 if (mainvol <= 0)
146 mainvol = 0;
147 else if (mainvol >= 127)
148 mainvol = 127;
149#else
150 mainvol = 127;
151#endif
152 return gus_linearvol[(((vol * mainvol) / 127) * mixer_mainvol) / 100];
153}
diff --git a/sound/oss/gus_wave.c b/sound/oss/gus_wave.c
deleted file mode 100644
index de10cedee1c7..000000000000
--- a/sound/oss/gus_wave.c
+++ /dev/null
@@ -1,3463 +0,0 @@
1/*
2 * sound/oss/gus_wave.c
3 *
4 * Driver for the Gravis UltraSound wave table synth.
5 *
6 *
7 * Copyright (C) by Hannu Savolainen 1993-1997
8 *
9 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
10 * Version 2 (June 1991). See the "COPYING" file distributed with this software
11 * for more info.
12 *
13 *
14 * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
15 * Frank van de Pol : Fixed GUS MAX interrupt handling. Enabled simultanious
16 * usage of CS4231A codec, GUS wave and MIDI for GUS MAX.
17 * Bartlomiej Zolnierkiewicz : added some __init/__exit
18 */
19
20#include <linux/init.h>
21#include <linux/spinlock.h>
22
23#define GUSPNP_AUTODETECT
24
25#include "sound_config.h"
26#include <linux/ultrasound.h>
27
28#include "gus.h"
29#include "gus_hw.h"
30
31#define GUS_BANK_SIZE (((iw_mode) ? 256*1024*1024 : 256*1024))
32
33#define MAX_SAMPLE 150
34#define MAX_PATCH 256
35
36#define NOT_SAMPLE 0xffff
37
38struct voice_info
39{
40 unsigned long orig_freq;
41 unsigned long current_freq;
42 unsigned long mode;
43 int fixed_pitch;
44 int bender;
45 int bender_range;
46 int panning;
47 int midi_volume;
48 unsigned int initial_volume;
49 unsigned int current_volume;
50 int loop_irq_mode, loop_irq_parm;
51#define LMODE_FINISH 1
52#define LMODE_PCM 2
53#define LMODE_PCM_STOP 3
54 int volume_irq_mode, volume_irq_parm;
55#define VMODE_HALT 1
56#define VMODE_ENVELOPE 2
57#define VMODE_START_NOTE 3
58
59 int env_phase;
60 unsigned char env_rate[6];
61 unsigned char env_offset[6];
62
63 /*
64 * Volume computation parameters for gus_adagio_vol()
65 */
66 int main_vol, expression_vol, patch_vol;
67
68 /* Variables for "Ultraclick" removal */
69 int dev_pending, note_pending, volume_pending,
70 sample_pending;
71 char kill_pending;
72 long offset_pending;
73
74};
75
76static struct voice_alloc_info *voice_alloc;
77static struct address_info *gus_hw_config;
78extern int gus_base;
79extern int gus_irq, gus_dma;
80extern int gus_pnp_flag;
81extern int gus_no_wave_dma;
82static int gus_dma2 = -1;
83static int dual_dma_mode;
84static long gus_mem_size;
85static long free_mem_ptr;
86static int gus_busy;
87static int gus_no_dma;
88static int nr_voices;
89static int gus_devnum;
90static int volume_base, volume_scale, volume_method;
91static int gus_recmask = SOUND_MASK_MIC;
92static int recording_active;
93static int only_read_access;
94static int only_8_bits;
95
96static int iw_mode = 0;
97int gus_wave_volume = 60;
98int gus_pcm_volume = 80;
99int have_gus_max = 0;
100static int gus_line_vol = 100, gus_mic_vol;
101static unsigned char mix_image = 0x00;
102
103int gus_timer_enabled = 0;
104
105/*
106 * Current version of this driver doesn't allow synth and PCM functions
107 * at the same time. The active_device specifies the active driver
108 */
109
110static int active_device;
111
112#define GUS_DEV_WAVE 1 /* Wave table synth */
113#define GUS_DEV_PCM_DONE 2 /* PCM device, transfer done */
114#define GUS_DEV_PCM_CONTINUE 3 /* PCM device, transfer done ch. 1/2 */
115
116static int gus_audio_speed;
117static int gus_audio_channels;
118static int gus_audio_bits;
119static int gus_audio_bsize;
120static char bounce_buf[8 * 1024]; /* Must match value set to max_fragment */
121
122static DECLARE_WAIT_QUEUE_HEAD(dram_sleeper);
123
124/*
125 * Variables and buffers for PCM output
126 */
127
128#define MAX_PCM_BUFFERS (128*MAX_REALTIME_FACTOR) /* Don't change */
129
130static int pcm_bsize, pcm_nblk, pcm_banksize;
131static int pcm_datasize[MAX_PCM_BUFFERS];
132static volatile int pcm_head, pcm_tail, pcm_qlen;
133static volatile int pcm_active;
134static volatile int dma_active;
135static int pcm_opened;
136static int pcm_current_dev;
137static int pcm_current_block;
138static unsigned long pcm_current_buf;
139static int pcm_current_count;
140static int pcm_current_intrflag;
141DEFINE_SPINLOCK(gus_lock);
142
143extern int *gus_osp;
144
145static struct voice_info voices[32];
146
147static int freq_div_table[] =
148{
149 44100, /* 14 */
150 41160, /* 15 */
151 38587, /* 16 */
152 36317, /* 17 */
153 34300, /* 18 */
154 32494, /* 19 */
155 30870, /* 20 */
156 29400, /* 21 */
157 28063, /* 22 */
158 26843, /* 23 */
159 25725, /* 24 */
160 24696, /* 25 */
161 23746, /* 26 */
162 22866, /* 27 */
163 22050, /* 28 */
164 21289, /* 29 */
165 20580, /* 30 */
166 19916, /* 31 */
167 19293 /* 32 */
168};
169
170static struct patch_info *samples;
171static long sample_ptrs[MAX_SAMPLE + 1];
172static int sample_map[32];
173static int free_sample;
174static int mixer_type;
175
176
177static int patch_table[MAX_PATCH];
178static int patch_map[32];
179
180static struct synth_info gus_info = {
181 "Gravis UltraSound", 0, SYNTH_TYPE_SAMPLE, SAMPLE_TYPE_GUS,
182 0, 16, 0, MAX_PATCH
183};
184
185static void gus_poke(long addr, unsigned char data);
186static void compute_and_set_volume(int voice, int volume, int ramp_time);
187extern unsigned short gus_adagio_vol(int vel, int mainv, int xpn, int voicev);
188extern unsigned short gus_linear_vol(int vol, int mainvol);
189static void compute_volume(int voice, int volume);
190static void do_volume_irq(int voice);
191static void set_input_volumes(void);
192static void gus_tmr_install(int io_base);
193
194#define INSTANT_RAMP -1 /* Instant change. No ramping */
195#define FAST_RAMP 0 /* Fastest possible ramp */
196
197static void reset_sample_memory(void)
198{
199 int i;
200
201 for (i = 0; i <= MAX_SAMPLE; i++)
202 sample_ptrs[i] = -1;
203 for (i = 0; i < 32; i++)
204 sample_map[i] = -1;
205 for (i = 0; i < 32; i++)
206 patch_map[i] = -1;
207
208 gus_poke(0, 0); /* Put a silent sample to the beginning */
209 gus_poke(1, 0);
210 free_mem_ptr = 2;
211
212 free_sample = 0;
213
214 for (i = 0; i < MAX_PATCH; i++)
215 patch_table[i] = NOT_SAMPLE;
216}
217
218void gus_delay(void)
219{
220 int i;
221
222 for (i = 0; i < 7; i++)
223 inb(u_DRAMIO);
224}
225
226static void gus_poke(long addr, unsigned char data)
227{ /* Writes a byte to the DRAM */
228 outb((0x43), u_Command);
229 outb((addr & 0xff), u_DataLo);
230 outb(((addr >> 8) & 0xff), u_DataHi);
231
232 outb((0x44), u_Command);
233 outb(((addr >> 16) & 0xff), u_DataHi);
234 outb((data), u_DRAMIO);
235}
236
237static unsigned char gus_peek(long addr)
238{ /* Reads a byte from the DRAM */
239 unsigned char tmp;
240
241 outb((0x43), u_Command);
242 outb((addr & 0xff), u_DataLo);
243 outb(((addr >> 8) & 0xff), u_DataHi);
244
245 outb((0x44), u_Command);
246 outb(((addr >> 16) & 0xff), u_DataHi);
247 tmp = inb(u_DRAMIO);
248
249 return tmp;
250}
251
252void gus_write8(int reg, unsigned int data)
253{ /* Writes to an indirect register (8 bit) */
254 outb((reg), u_Command);
255 outb(((unsigned char) (data & 0xff)), u_DataHi);
256}
257
258static unsigned char gus_read8(int reg)
259{
260 /* Reads from an indirect register (8 bit). Offset 0x80. */
261 unsigned char val;
262
263 outb((reg | 0x80), u_Command);
264 val = inb(u_DataHi);
265
266 return val;
267}
268
269static unsigned char gus_look8(int reg)
270{
271 /* Reads from an indirect register (8 bit). No additional offset. */
272 unsigned char val;
273
274 outb((reg), u_Command);
275 val = inb(u_DataHi);
276
277 return val;
278}
279
280static void gus_write16(int reg, unsigned int data)
281{
282 /* Writes to an indirect register (16 bit) */
283 outb((reg), u_Command);
284
285 outb(((unsigned char) (data & 0xff)), u_DataLo);
286 outb(((unsigned char) ((data >> 8) & 0xff)), u_DataHi);
287}
288
289static unsigned short gus_read16(int reg)
290{
291 /* Reads from an indirect register (16 bit). Offset 0x80. */
292 unsigned char hi, lo;
293
294 outb((reg | 0x80), u_Command);
295
296 lo = inb(u_DataLo);
297 hi = inb(u_DataHi);
298
299 return ((hi << 8) & 0xff00) | lo;
300}
301
302static unsigned short gus_look16(int reg)
303{
304 /* Reads from an indirect register (16 bit). No additional offset. */
305 unsigned char hi, lo;
306
307 outb((reg), u_Command);
308
309 lo = inb(u_DataLo);
310 hi = inb(u_DataHi);
311
312 return ((hi << 8) & 0xff00) | lo;
313}
314
315static void gus_write_addr(int reg, unsigned long address, int frac, int is16bit)
316{
317 /* Writes an 24 bit memory address */
318 unsigned long hold_address;
319
320 if (is16bit)
321 {
322 if (iw_mode)
323 {
324 /* Interwave spesific address translations */
325 address >>= 1;
326 }
327 else
328 {
329 /*
330 * Special processing required for 16 bit patches
331 */
332
333 hold_address = address;
334 address = address >> 1;
335 address &= 0x0001ffffL;
336 address |= (hold_address & 0x000c0000L);
337 }
338 }
339 gus_write16(reg, (unsigned short) ((address >> 7) & 0xffff));
340 gus_write16(reg + 1, (unsigned short) ((address << 9) & 0xffff)
341 + (frac << 5));
342 /* Could writing twice fix problems with GUS_VOICE_POS()? Let's try. */
343 gus_delay();
344 gus_write16(reg, (unsigned short) ((address >> 7) & 0xffff));
345 gus_write16(reg + 1, (unsigned short) ((address << 9) & 0xffff)
346 + (frac << 5));
347}
348
349static void gus_select_voice(int voice)
350{
351 if (voice < 0 || voice > 31)
352 return;
353 outb((voice), u_Voice);
354}
355
356static void gus_select_max_voices(int nvoices)
357{
358 if (iw_mode)
359 nvoices = 32;
360 if (nvoices < 14)
361 nvoices = 14;
362 if (nvoices > 32)
363 nvoices = 32;
364
365 voice_alloc->max_voice = nr_voices = nvoices;
366 gus_write8(0x0e, (nvoices - 1) | 0xc0);
367}
368
369static void gus_voice_on(unsigned int mode)
370{
371 gus_write8(0x00, (unsigned char) (mode & 0xfc));
372 gus_delay();
373 gus_write8(0x00, (unsigned char) (mode & 0xfc));
374}
375
376static void gus_voice_off(void)
377{
378 gus_write8(0x00, gus_read8(0x00) | 0x03);
379}
380
381static void gus_voice_mode(unsigned int m)
382{
383 unsigned char mode = (unsigned char) (m & 0xff);
384
385 gus_write8(0x00, (gus_read8(0x00) & 0x03) |
386 (mode & 0xfc)); /* Don't touch last two bits */
387 gus_delay();
388 gus_write8(0x00, (gus_read8(0x00) & 0x03) | (mode & 0xfc));
389}
390
391static void gus_voice_freq(unsigned long freq)
392{
393 unsigned long divisor = freq_div_table[nr_voices - 14];
394 unsigned short fc;
395
396 /* Interwave plays at 44100 Hz with any number of voices */
397 if (iw_mode)
398 fc = (unsigned short) (((freq << 9) + (44100 >> 1)) / 44100);
399 else
400 fc = (unsigned short) (((freq << 9) + (divisor >> 1)) / divisor);
401 fc = fc << 1;
402
403 gus_write16(0x01, fc);
404}
405
406static void gus_voice_volume(unsigned int vol)
407{
408 gus_write8(0x0d, 0x03); /* Stop ramp before setting volume */
409 gus_write16(0x09, (unsigned short) (vol << 4));
410}
411
412static void gus_voice_balance(unsigned int balance)
413{
414 gus_write8(0x0c, (unsigned char) (balance & 0xff));
415}
416
417static void gus_ramp_range(unsigned int low, unsigned int high)
418{
419 gus_write8(0x07, (unsigned char) ((low >> 4) & 0xff));
420 gus_write8(0x08, (unsigned char) ((high >> 4) & 0xff));
421}
422
423static void gus_ramp_rate(unsigned int scale, unsigned int rate)
424{
425 gus_write8(0x06, (unsigned char) (((scale & 0x03) << 6) | (rate & 0x3f)));
426}
427
428static void gus_rampon(unsigned int m)
429{
430 unsigned char mode = (unsigned char) (m & 0xff);
431
432 gus_write8(0x0d, mode & 0xfc);
433 gus_delay();
434 gus_write8(0x0d, mode & 0xfc);
435}
436
437static void gus_ramp_mode(unsigned int m)
438{
439 unsigned char mode = (unsigned char) (m & 0xff);
440
441 gus_write8(0x0d, (gus_read8(0x0d) & 0x03) |
442 (mode & 0xfc)); /* Leave the last 2 bits alone */
443 gus_delay();
444 gus_write8(0x0d, (gus_read8(0x0d) & 0x03) | (mode & 0xfc));
445}
446
447static void gus_rampoff(void)
448{
449 gus_write8(0x0d, 0x03);
450}
451
452static void gus_set_voice_pos(int voice, long position)
453{
454 int sample_no;
455
456 if ((sample_no = sample_map[voice]) != -1) {
457 if (position < samples[sample_no].len) {
458 if (voices[voice].volume_irq_mode == VMODE_START_NOTE)
459 voices[voice].offset_pending = position;
460 else
461 gus_write_addr(0x0a, sample_ptrs[sample_no] + position, 0,
462 samples[sample_no].mode & WAVE_16_BITS);
463 }
464 }
465}
466
467static void gus_voice_init(int voice)
468{
469 unsigned long flags;
470
471 spin_lock_irqsave(&gus_lock,flags);
472 gus_select_voice(voice);
473 gus_voice_volume(0);
474 gus_voice_off();
475 gus_write_addr(0x0a, 0, 0, 0); /* Set current position to 0 */
476 gus_write8(0x00, 0x03); /* Voice off */
477 gus_write8(0x0d, 0x03); /* Ramping off */
478 voice_alloc->map[voice] = 0;
479 voice_alloc->alloc_times[voice] = 0;
480 spin_unlock_irqrestore(&gus_lock,flags);
481
482}
483
484static void gus_voice_init2(int voice)
485{
486 voices[voice].panning = 0;
487 voices[voice].mode = 0;
488 voices[voice].orig_freq = 20000;
489 voices[voice].current_freq = 20000;
490 voices[voice].bender = 0;
491 voices[voice].bender_range = 200;
492 voices[voice].initial_volume = 0;
493 voices[voice].current_volume = 0;
494 voices[voice].loop_irq_mode = 0;
495 voices[voice].loop_irq_parm = 0;
496 voices[voice].volume_irq_mode = 0;
497 voices[voice].volume_irq_parm = 0;
498 voices[voice].env_phase = 0;
499 voices[voice].main_vol = 127;
500 voices[voice].patch_vol = 127;
501 voices[voice].expression_vol = 127;
502 voices[voice].sample_pending = -1;
503 voices[voice].fixed_pitch = 0;
504}
505
506static void step_envelope(int voice)
507{
508 unsigned vol, prev_vol, phase;
509 unsigned char rate;
510 unsigned long flags;
511
512 if (voices[voice].mode & WAVE_SUSTAIN_ON && voices[voice].env_phase == 2)
513 {
514 spin_lock_irqsave(&gus_lock,flags);
515 gus_select_voice(voice);
516 gus_rampoff();
517 spin_unlock_irqrestore(&gus_lock,flags);
518 return;
519 /*
520 * Sustain phase begins. Continue envelope after receiving note off.
521 */
522 }
523 if (voices[voice].env_phase >= 5)
524 {
525 /* Envelope finished. Shoot the voice down */
526 gus_voice_init(voice);
527 return;
528 }
529 prev_vol = voices[voice].current_volume;
530 phase = ++voices[voice].env_phase;
531 compute_volume(voice, voices[voice].midi_volume);
532 vol = voices[voice].initial_volume * voices[voice].env_offset[phase] / 255;
533 rate = voices[voice].env_rate[phase];
534
535 spin_lock_irqsave(&gus_lock,flags);
536 gus_select_voice(voice);
537
538 gus_voice_volume(prev_vol);
539
540
541 gus_write8(0x06, rate); /* Ramping rate */
542
543 voices[voice].volume_irq_mode = VMODE_ENVELOPE;
544
545 if (((vol - prev_vol) / 64) == 0) /* No significant volume change */
546 {
547 spin_unlock_irqrestore(&gus_lock,flags);
548 step_envelope(voice); /* Continue the envelope on the next step */
549 return;
550 }
551 if (vol > prev_vol)
552 {
553 if (vol >= (4096 - 64))
554 vol = 4096 - 65;
555 gus_ramp_range(0, vol);
556 gus_rampon(0x20); /* Increasing volume, with IRQ */
557 }
558 else
559 {
560 if (vol <= 64)
561 vol = 65;
562 gus_ramp_range(vol, 4030);
563 gus_rampon(0x60); /* Decreasing volume, with IRQ */
564 }
565 voices[voice].current_volume = vol;
566 spin_unlock_irqrestore(&gus_lock,flags);
567}
568
569static void init_envelope(int voice)
570{
571 voices[voice].env_phase = -1;
572 voices[voice].current_volume = 64;
573
574 step_envelope(voice);
575}
576
577static void start_release(int voice)
578{
579 if (gus_read8(0x00) & 0x03)
580 return; /* Voice already stopped */
581
582 voices[voice].env_phase = 2; /* Will be incremented by step_envelope */
583
584 voices[voice].current_volume = voices[voice].initial_volume =
585 gus_read16(0x09) >> 4; /* Get current volume */
586
587 voices[voice].mode &= ~WAVE_SUSTAIN_ON;
588 gus_rampoff();
589 step_envelope(voice);
590}
591
592static void gus_voice_fade(int voice)
593{
594 int instr_no = sample_map[voice], is16bits;
595 unsigned long flags;
596
597 spin_lock_irqsave(&gus_lock,flags);
598 gus_select_voice(voice);
599
600 if (instr_no < 0 || instr_no > MAX_SAMPLE)
601 {
602 gus_write8(0x00, 0x03); /* Hard stop */
603 voice_alloc->map[voice] = 0;
604 spin_unlock_irqrestore(&gus_lock,flags);
605 return;
606 }
607 is16bits = (samples[instr_no].mode & WAVE_16_BITS) ? 1 : 0; /* 8 or 16 bits */
608
609 if (voices[voice].mode & WAVE_ENVELOPES)
610 {
611 start_release(voice);
612 spin_unlock_irqrestore(&gus_lock,flags);
613 return;
614 }
615 /*
616 * Ramp the volume down but not too quickly.
617 */
618 if ((int) (gus_read16(0x09) >> 4) < 100) /* Get current volume */
619 {
620 gus_voice_off();
621 gus_rampoff();
622 gus_voice_init(voice);
623 spin_unlock_irqrestore(&gus_lock,flags);
624 return;
625 }
626 gus_ramp_range(65, 4030);
627 gus_ramp_rate(2, 4);
628 gus_rampon(0x40 | 0x20); /* Down, once, with IRQ */
629 voices[voice].volume_irq_mode = VMODE_HALT;
630 spin_unlock_irqrestore(&gus_lock,flags);
631}
632
633static void gus_reset(void)
634{
635 int i;
636
637 gus_select_max_voices(24);
638 volume_base = 3071;
639 volume_scale = 4;
640 volume_method = VOL_METHOD_ADAGIO;
641
642 for (i = 0; i < 32; i++)
643 {
644 gus_voice_init(i); /* Turn voice off */
645 gus_voice_init2(i);
646 }
647}
648
649static void gus_initialize(void)
650{
651 unsigned long flags;
652 unsigned char dma_image, irq_image, tmp;
653
654 static unsigned char gus_irq_map[16] = {
655 0, 0, 0, 3, 0, 2, 0, 4, 0, 1, 0, 5, 6, 0, 0, 7
656 };
657
658 static unsigned char gus_dma_map[8] = {
659 0, 1, 0, 2, 0, 3, 4, 5
660 };
661
662 spin_lock_irqsave(&gus_lock,flags);
663 gus_write8(0x4c, 0); /* Reset GF1 */
664 gus_delay();
665 gus_delay();
666
667 gus_write8(0x4c, 1); /* Release Reset */
668 gus_delay();
669 gus_delay();
670
671 /*
672 * Clear all interrupts
673 */
674
675 gus_write8(0x41, 0); /* DMA control */
676 gus_write8(0x45, 0); /* Timer control */
677 gus_write8(0x49, 0); /* Sample control */
678
679 gus_select_max_voices(24);
680
681 inb(u_Status); /* Touch the status register */
682
683 gus_look8(0x41); /* Clear any pending DMA IRQs */
684 gus_look8(0x49); /* Clear any pending sample IRQs */
685 gus_read8(0x0f); /* Clear pending IRQs */
686
687 gus_reset(); /* Resets all voices */
688
689 gus_look8(0x41); /* Clear any pending DMA IRQs */
690 gus_look8(0x49); /* Clear any pending sample IRQs */
691 gus_read8(0x0f); /* Clear pending IRQs */
692
693 gus_write8(0x4c, 7); /* Master reset | DAC enable | IRQ enable */
694
695 /*
696 * Set up for Digital ASIC
697 */
698
699 outb((0x05), gus_base + 0x0f);
700
701 mix_image |= 0x02; /* Disable line out (for a moment) */
702 outb((mix_image), u_Mixer);
703
704 outb((0x00), u_IRQDMAControl);
705
706 outb((0x00), gus_base + 0x0f);
707
708 /*
709 * Now set up the DMA and IRQ interface
710 *
711 * The GUS supports two IRQs and two DMAs.
712 *
713 * Just one DMA channel is used. This prevents simultaneous ADC and DAC.
714 * Adding this support requires significant changes to the dmabuf.c, dsp.c
715 * and audio.c also.
716 */
717
718 irq_image = 0;
719 tmp = gus_irq_map[gus_irq];
720 if (!gus_pnp_flag && !tmp)
721 printk(KERN_WARNING "Warning! GUS IRQ not selected\n");
722 irq_image |= tmp;
723 irq_image |= 0x40; /* Combine IRQ1 (GF1) and IRQ2 (Midi) */
724
725 dual_dma_mode = 1;
726 if (gus_dma2 == gus_dma || gus_dma2 == -1)
727 {
728 dual_dma_mode = 0;
729 dma_image = 0x40; /* Combine DMA1 (DRAM) and IRQ2 (ADC) */
730
731 tmp = gus_dma_map[gus_dma];
732 if (!tmp)
733 printk(KERN_WARNING "Warning! GUS DMA not selected\n");
734
735 dma_image |= tmp;
736 }
737 else
738 {
739 /* Setup dual DMA channel mode for GUS MAX */
740
741 dma_image = gus_dma_map[gus_dma];
742 if (!dma_image)
743 printk(KERN_WARNING "Warning! GUS DMA not selected\n");
744
745 tmp = gus_dma_map[gus_dma2] << 3;
746 if (!tmp)
747 {
748 printk(KERN_WARNING "Warning! Invalid GUS MAX DMA\n");
749 tmp = 0x40; /* Combine DMA channels */
750 dual_dma_mode = 0;
751 }
752 dma_image |= tmp;
753 }
754
755 /*
756 * For some reason the IRQ and DMA addresses must be written twice
757 */
758
759 /*
760 * Doing it first time
761 */
762
763 outb((mix_image), u_Mixer); /* Select DMA control */
764 outb((dma_image | 0x80), u_IRQDMAControl); /* Set DMA address */
765
766 outb((mix_image | 0x40), u_Mixer); /* Select IRQ control */
767 outb((irq_image), u_IRQDMAControl); /* Set IRQ address */
768
769 /*
770 * Doing it second time
771 */
772
773 outb((mix_image), u_Mixer); /* Select DMA control */
774 outb((dma_image), u_IRQDMAControl); /* Set DMA address */
775
776 outb((mix_image | 0x40), u_Mixer); /* Select IRQ control */
777 outb((irq_image), u_IRQDMAControl); /* Set IRQ address */
778
779 gus_select_voice(0); /* This disables writes to IRQ/DMA reg */
780
781 mix_image &= ~0x02; /* Enable line out */
782 mix_image |= 0x08; /* Enable IRQ */
783 outb((mix_image), u_Mixer); /*
784 * Turn mixer channels on
785 * Note! Mic in is left off.
786 */
787
788 gus_select_voice(0); /* This disables writes to IRQ/DMA reg */
789
790 gusintr(gus_irq, (void *)gus_hw_config, NULL); /* Serve pending interrupts */
791
792 inb(u_Status); /* Touch the status register */
793
794 gus_look8(0x41); /* Clear any pending DMA IRQs */
795 gus_look8(0x49); /* Clear any pending sample IRQs */
796
797 gus_read8(0x0f); /* Clear pending IRQs */
798
799 if (iw_mode)
800 gus_write8(0x19, gus_read8(0x19) | 0x01);
801 spin_unlock_irqrestore(&gus_lock,flags);
802}
803
804
805static void __init pnp_mem_init(void)
806{
807#include "iwmem.h"
808#define CHUNK_SIZE (256*1024)
809#define BANK_SIZE (4*1024*1024)
810#define CHUNKS_PER_BANK (BANK_SIZE/CHUNK_SIZE)
811
812 int bank, chunk, addr, total = 0;
813 int bank_sizes[4];
814 int i, j, bits = -1, testbits = -1, nbanks = 0;
815
816 /*
817 * This routine determines what kind of RAM is installed in each of the four
818 * SIMM banks and configures the DRAM address decode logic accordingly.
819 */
820
821 /*
822 * Place the chip into enhanced mode
823 */
824 gus_write8(0x19, gus_read8(0x19) | 0x01);
825 gus_write8(0x53, gus_look8(0x53) & ~0x02); /* Select DRAM I/O access */
826
827 /*
828 * Set memory configuration to 4 DRAM banks of 4M in each (16M total).
829 */
830
831 gus_write16(0x52, (gus_look16(0x52) & 0xfff0) | 0x000c);
832
833 /*
834 * Perform the DRAM size detection for each bank individually.
835 */
836 for (bank = 0; bank < 4; bank++)
837 {
838 int size = 0;
839
840 addr = bank * BANK_SIZE;
841
842 /* Clean check points of each chunk */
843 for (chunk = 0; chunk < CHUNKS_PER_BANK; chunk++)
844 {
845 gus_poke(addr + chunk * CHUNK_SIZE + 0L, 0x00);
846 gus_poke(addr + chunk * CHUNK_SIZE + 1L, 0x00);
847 }
848
849 /* Write a value to each chunk point and verify the result */
850 for (chunk = 0; chunk < CHUNKS_PER_BANK; chunk++)
851 {
852 gus_poke(addr + chunk * CHUNK_SIZE + 0L, 0x55);
853 gus_poke(addr + chunk * CHUNK_SIZE + 1L, 0xAA);
854
855 if (gus_peek(addr + chunk * CHUNK_SIZE + 0L) == 0x55 &&
856 gus_peek(addr + chunk * CHUNK_SIZE + 1L) == 0xAA)
857 {
858 /* OK. There is RAM. Now check for possible shadows */
859 int ok = 1, chunk2;
860
861 for (chunk2 = 0; ok && chunk2 < chunk; chunk2++)
862 if (gus_peek(addr + chunk2 * CHUNK_SIZE + 0L) ||
863 gus_peek(addr + chunk2 * CHUNK_SIZE + 1L))
864 ok = 0; /* Addressing wraps */
865
866 if (ok)
867 size = (chunk + 1) * CHUNK_SIZE;
868 }
869 gus_poke(addr + chunk * CHUNK_SIZE + 0L, 0x00);
870 gus_poke(addr + chunk * CHUNK_SIZE + 1L, 0x00);
871 }
872 bank_sizes[bank] = size;
873 if (size)
874 nbanks = bank + 1;
875 DDB(printk("Interwave: Bank %d, size=%dk\n", bank, size / 1024));
876 }
877
878 if (nbanks == 0) /* No RAM - Give up */
879 {
880 printk(KERN_ERR "Sound: An Interwave audio chip detected but no DRAM\n");
881 printk(KERN_ERR "Sound: Unable to work with this card.\n");
882 gus_write8(0x19, gus_read8(0x19) & ~0x01);
883 gus_mem_size = 0;
884 return;
885 }
886
887 /*
888 * Now we know how much DRAM there is in each bank. The next step is
889 * to find a DRAM size encoding (0 to 12) which is best for the combination
890 * we have.
891 *
892 * First try if any of the possible alternatives matches exactly the amount
893 * of memory we have.
894 */
895
896 for (i = 0; bits == -1 && i < 13; i++)
897 {
898 bits = i;
899
900 for (j = 0; bits != -1 && j < 4; j++)
901 if (mem_decode[i][j] != bank_sizes[j])
902 bits = -1; /* No hit */
903 }
904
905 /*
906 * If necessary, try to find a combination where other than the last
907 * bank matches our configuration and the last bank is left oversized.
908 * In this way we don't leave holes in the middle of memory.
909 */
910
911 if (bits == -1) /* No luck yet */
912 {
913 for (i = 0; bits == -1 && i < 13; i++)
914 {
915 bits = i;
916
917 for (j = 0; bits != -1 && j < nbanks - 1; j++)
918 if (mem_decode[i][j] != bank_sizes[j])
919 bits = -1; /* No hit */
920 if (mem_decode[i][nbanks - 1] < bank_sizes[nbanks - 1])
921 bits = -1; /* The last bank is too small */
922 }
923 }
924 /*
925 * The last resort is to search for a combination where the banks are
926 * smaller than the actual SIMMs. This leaves some memory in the banks
927 * unused but doesn't leave holes in the DRAM address space.
928 */
929 if (bits == -1) /* No luck yet */
930 {
931 for (i = 0; i < 13; i++)
932 {
933 testbits = i;
934 for (j = 0; testbits != -1 && j < nbanks - 1; j++)
935 if (mem_decode[i][j] > bank_sizes[j]) {
936 testbits = -1;
937 }
938 if(testbits > bits) bits = testbits;
939 }
940 if (bits != -1)
941 {
942 printk(KERN_INFO "Interwave: Can't use all installed RAM.\n");
943 printk(KERN_INFO "Interwave: Try reordering SIMMS.\n");
944 }
945 printk(KERN_INFO "Interwave: Can't find working DRAM encoding.\n");
946 printk(KERN_INFO "Interwave: Defaulting to 256k. Try reordering SIMMS.\n");
947 bits = 0;
948 }
949 DDB(printk("Interwave: Selecting DRAM addressing mode %d\n", bits));
950
951 for (bank = 0; bank < 4; bank++)
952 {
953 DDB(printk(" Bank %d, mem=%dk (limit %dk)\n", bank, bank_sizes[bank] / 1024, mem_decode[bits][bank] / 1024));
954
955 if (bank_sizes[bank] > mem_decode[bits][bank])
956 total += mem_decode[bits][bank];
957 else
958 total += bank_sizes[bank];
959 }
960
961 DDB(printk("Total %dk of DRAM (enhanced mode)\n", total / 1024));
962
963 /*
964 * Set the memory addressing mode.
965 */
966 gus_write16(0x52, (gus_look16(0x52) & 0xfff0) | bits);
967
968/* Leave the chip into enhanced mode. Disable LFO */
969 gus_mem_size = total;
970 iw_mode = 1;
971 gus_write8(0x19, (gus_read8(0x19) | 0x01) & ~0x02);
972}
973
974int __init gus_wave_detect(int baseaddr)
975{
976 unsigned long i, max_mem = 1024L;
977 unsigned long loc;
978 unsigned char val;
979
980 if (!request_region(baseaddr, 16, "GUS"))
981 return 0;
982 if (!request_region(baseaddr + 0x100, 12, "GUS")) { /* 0x10c-> is MAX */
983 release_region(baseaddr, 16);
984 return 0;
985 }
986
987 gus_base = baseaddr;
988
989 gus_write8(0x4c, 0); /* Reset GF1 */
990 gus_delay();
991 gus_delay();
992
993 gus_write8(0x4c, 1); /* Release Reset */
994 gus_delay();
995 gus_delay();
996
997#ifdef GUSPNP_AUTODETECT
998 val = gus_look8(0x5b); /* Version number register */
999 gus_write8(0x5b, ~val); /* Invert all bits */
1000
1001 if ((gus_look8(0x5b) & 0xf0) == (val & 0xf0)) /* No change */
1002 {
1003 if ((gus_look8(0x5b) & 0x0f) == ((~val) & 0x0f)) /* Change */
1004 {
1005 DDB(printk("Interwave chip version %d detected\n", (val & 0xf0) >> 4));
1006 gus_pnp_flag = 1;
1007 }
1008 else
1009 {
1010 DDB(printk("Not an Interwave chip (%x)\n", gus_look8(0x5b)));
1011 gus_pnp_flag = 0;
1012 }
1013 }
1014 gus_write8(0x5b, val); /* Restore all bits */
1015#endif
1016
1017 if (gus_pnp_flag)
1018 pnp_mem_init();
1019 if (iw_mode)
1020 return 1;
1021
1022 /* See if there is first block there.... */
1023 gus_poke(0L, 0xaa);
1024 if (gus_peek(0L) != 0xaa) {
1025 release_region(baseaddr + 0x100, 12);
1026 release_region(baseaddr, 16);
1027 return 0;
1028 }
1029
1030 /* Now zero it out so that I can check for mirroring .. */
1031 gus_poke(0L, 0x00);
1032 for (i = 1L; i < max_mem; i++)
1033 {
1034 int n, failed;
1035
1036 /* check for mirroring ... */
1037 if (gus_peek(0L) != 0)
1038 break;
1039 loc = i << 10;
1040
1041 for (n = loc - 1, failed = 0; n <= loc; n++)
1042 {
1043 gus_poke(loc, 0xaa);
1044 if (gus_peek(loc) != 0xaa)
1045 failed = 1;
1046 gus_poke(loc, 0x55);
1047 if (gus_peek(loc) != 0x55)
1048 failed = 1;
1049 }
1050 if (failed)
1051 break;
1052 }
1053 gus_mem_size = i << 10;
1054 return 1;
1055}
1056
1057static int guswave_ioctl(int dev, unsigned int cmd, void __user *arg)
1058{
1059
1060 switch (cmd)
1061 {
1062 case SNDCTL_SYNTH_INFO:
1063 gus_info.nr_voices = nr_voices;
1064 if (copy_to_user(arg, &gus_info, sizeof(gus_info)))
1065 return -EFAULT;
1066 return 0;
1067
1068 case SNDCTL_SEQ_RESETSAMPLES:
1069 reset_sample_memory();
1070 return 0;
1071
1072 case SNDCTL_SEQ_PERCMODE:
1073 return 0;
1074
1075 case SNDCTL_SYNTH_MEMAVL:
1076 return (gus_mem_size == 0) ? 0 : gus_mem_size - free_mem_ptr - 32;
1077
1078 default:
1079 return -EINVAL;
1080 }
1081}
1082
1083static int guswave_set_instr(int dev, int voice, int instr_no)
1084{
1085 int sample_no;
1086
1087 if (instr_no < 0 || instr_no > MAX_PATCH)
1088 instr_no = 0; /* Default to acoustic piano */
1089
1090 if (voice < 0 || voice > 31)
1091 return -EINVAL;
1092
1093 if (voices[voice].volume_irq_mode == VMODE_START_NOTE)
1094 {
1095 voices[voice].sample_pending = instr_no;
1096 return 0;
1097 }
1098 sample_no = patch_table[instr_no];
1099 patch_map[voice] = -1;
1100
1101 if (sample_no == NOT_SAMPLE)
1102 {
1103/* printk("GUS: Undefined patch %d for voice %d\n", instr_no, voice);*/
1104 return -EINVAL; /* Patch not defined */
1105 }
1106 if (sample_ptrs[sample_no] == -1) /* Sample not loaded */
1107 {
1108/* printk("GUS: Sample #%d not loaded for patch %d (voice %d)\n", sample_no, instr_no, voice);*/
1109 return -EINVAL;
1110 }
1111 sample_map[voice] = sample_no;
1112 patch_map[voice] = instr_no;
1113 return 0;
1114}
1115
1116static int guswave_kill_note(int dev, int voice, int note, int velocity)
1117{
1118 unsigned long flags;
1119
1120 spin_lock_irqsave(&gus_lock,flags);
1121 /* voice_alloc->map[voice] = 0xffff; */
1122 if (voices[voice].volume_irq_mode == VMODE_START_NOTE)
1123 {
1124 voices[voice].kill_pending = 1;
1125 spin_unlock_irqrestore(&gus_lock,flags);
1126 }
1127 else
1128 {
1129 spin_unlock_irqrestore(&gus_lock,flags);
1130 gus_voice_fade(voice);
1131 }
1132
1133 return 0;
1134}
1135
1136static void guswave_aftertouch(int dev, int voice, int pressure)
1137{
1138}
1139
1140static void guswave_panning(int dev, int voice, int value)
1141{
1142 if (voice >= 0 || voice < 32)
1143 voices[voice].panning = value;
1144}
1145
1146static void guswave_volume_method(int dev, int mode)
1147{
1148 if (mode == VOL_METHOD_LINEAR || mode == VOL_METHOD_ADAGIO)
1149 volume_method = mode;
1150}
1151
1152static void compute_volume(int voice, int volume)
1153{
1154 if (volume < 128)
1155 voices[voice].midi_volume = volume;
1156
1157 switch (volume_method)
1158 {
1159 case VOL_METHOD_ADAGIO:
1160 voices[voice].initial_volume =
1161 gus_adagio_vol(voices[voice].midi_volume, voices[voice].main_vol,
1162 voices[voice].expression_vol,
1163 voices[voice].patch_vol);
1164 break;
1165
1166 case VOL_METHOD_LINEAR: /* Totally ignores patch-volume and expression */
1167 voices[voice].initial_volume = gus_linear_vol(volume, voices[voice].main_vol);
1168 break;
1169
1170 default:
1171 voices[voice].initial_volume = volume_base +
1172 (voices[voice].midi_volume * volume_scale);
1173 }
1174
1175 if (voices[voice].initial_volume > 4030)
1176 voices[voice].initial_volume = 4030;
1177}
1178
1179static void compute_and_set_volume(int voice, int volume, int ramp_time)
1180{
1181 int curr, target, rate;
1182 unsigned long flags;
1183
1184 compute_volume(voice, volume);
1185 voices[voice].current_volume = voices[voice].initial_volume;
1186
1187 spin_lock_irqsave(&gus_lock,flags);
1188 /*
1189 * CAUTION! Interrupts disabled. Enable them before returning
1190 */
1191
1192 gus_select_voice(voice);
1193
1194 curr = gus_read16(0x09) >> 4;
1195 target = voices[voice].initial_volume;
1196
1197 if (ramp_time == INSTANT_RAMP)
1198 {
1199 gus_rampoff();
1200 gus_voice_volume(target);
1201 spin_unlock_irqrestore(&gus_lock,flags);
1202 return;
1203 }
1204 if (ramp_time == FAST_RAMP)
1205 rate = 63;
1206 else
1207 rate = 16;
1208 gus_ramp_rate(0, rate);
1209
1210 if ((target - curr) / 64 == 0) /* Close enough to target. */
1211 {
1212 gus_rampoff();
1213 gus_voice_volume(target);
1214 spin_unlock_irqrestore(&gus_lock,flags);
1215 return;
1216 }
1217 if (target > curr)
1218 {
1219 if (target > (4095 - 65))
1220 target = 4095 - 65;
1221 gus_ramp_range(curr, target);
1222 gus_rampon(0x00); /* Ramp up, once, no IRQ */
1223 }
1224 else
1225 {
1226 if (target < 65)
1227 target = 65;
1228
1229 gus_ramp_range(target, curr);
1230 gus_rampon(0x40); /* Ramp down, once, no irq */
1231 }
1232 spin_unlock_irqrestore(&gus_lock,flags);
1233}
1234
1235static void dynamic_volume_change(int voice)
1236{
1237 unsigned char status;
1238 unsigned long flags;
1239
1240 spin_lock_irqsave(&gus_lock,flags);
1241 gus_select_voice(voice);
1242 status = gus_read8(0x00); /* Get voice status */
1243 spin_unlock_irqrestore(&gus_lock,flags);
1244
1245 if (status & 0x03)
1246 return; /* Voice was not running */
1247
1248 if (!(voices[voice].mode & WAVE_ENVELOPES))
1249 {
1250 compute_and_set_volume(voice, voices[voice].midi_volume, 1);
1251 return;
1252 }
1253
1254 /*
1255 * Voice is running and has envelopes.
1256 */
1257
1258 spin_lock_irqsave(&gus_lock,flags);
1259 gus_select_voice(voice);
1260 status = gus_read8(0x0d); /* Ramping status */
1261 spin_unlock_irqrestore(&gus_lock,flags);
1262
1263 if (status & 0x03) /* Sustain phase? */
1264 {
1265 compute_and_set_volume(voice, voices[voice].midi_volume, 1);
1266 return;
1267 }
1268 if (voices[voice].env_phase < 0)
1269 return;
1270
1271 compute_volume(voice, voices[voice].midi_volume);
1272
1273}
1274
1275static void guswave_controller(int dev, int voice, int ctrl_num, int value)
1276{
1277 unsigned long flags;
1278 unsigned long freq;
1279
1280 if (voice < 0 || voice > 31)
1281 return;
1282
1283 switch (ctrl_num)
1284 {
1285 case CTRL_PITCH_BENDER:
1286 voices[voice].bender = value;
1287
1288 if (voices[voice].volume_irq_mode != VMODE_START_NOTE)
1289 {
1290 freq = compute_finetune(voices[voice].orig_freq, value, voices[voice].bender_range, 0);
1291 voices[voice].current_freq = freq;
1292
1293 spin_lock_irqsave(&gus_lock,flags);
1294 gus_select_voice(voice);
1295 gus_voice_freq(freq);
1296 spin_unlock_irqrestore(&gus_lock,flags);
1297 }
1298 break;
1299
1300 case CTRL_PITCH_BENDER_RANGE:
1301 voices[voice].bender_range = value;
1302 break;
1303 case CTL_EXPRESSION:
1304 value /= 128;
1305 case CTRL_EXPRESSION:
1306 if (volume_method == VOL_METHOD_ADAGIO)
1307 {
1308 voices[voice].expression_vol = value;
1309 if (voices[voice].volume_irq_mode != VMODE_START_NOTE)
1310 dynamic_volume_change(voice);
1311 }
1312 break;
1313
1314 case CTL_PAN:
1315 voices[voice].panning = (value * 2) - 128;
1316 break;
1317
1318 case CTL_MAIN_VOLUME:
1319 value = (value * 100) / 16383;
1320
1321 case CTRL_MAIN_VOLUME:
1322 voices[voice].main_vol = value;
1323 if (voices[voice].volume_irq_mode != VMODE_START_NOTE)
1324 dynamic_volume_change(voice);
1325 break;
1326
1327 default:
1328 break;
1329 }
1330}
1331
1332static int guswave_start_note2(int dev, int voice, int note_num, int volume)
1333{
1334 int sample, best_sample, best_delta, delta_freq;
1335 int is16bits, samplep, patch, pan;
1336 unsigned long note_freq, base_note, freq, flags;
1337 unsigned char mode = 0;
1338
1339 if (voice < 0 || voice > 31)
1340 {
1341/* printk("GUS: Invalid voice\n");*/
1342 return -EINVAL;
1343 }
1344 if (note_num == 255)
1345 {
1346 if (voices[voice].mode & WAVE_ENVELOPES)
1347 {
1348 voices[voice].midi_volume = volume;
1349 dynamic_volume_change(voice);
1350 return 0;
1351 }
1352 compute_and_set_volume(voice, volume, 1);
1353 return 0;
1354 }
1355 if ((patch = patch_map[voice]) == -1)
1356 return -EINVAL;
1357 if ((samplep = patch_table[patch]) == NOT_SAMPLE)
1358 {
1359 return -EINVAL;
1360 }
1361 note_freq = note_to_freq(note_num);
1362
1363 /*
1364 * Find a sample within a patch so that the note_freq is between low_note
1365 * and high_note.
1366 */
1367 sample = -1;
1368
1369 best_sample = samplep;
1370 best_delta = 1000000;
1371 while (samplep != 0 && samplep != NOT_SAMPLE && sample == -1)
1372 {
1373 delta_freq = note_freq - samples[samplep].base_note;
1374 if (delta_freq < 0)
1375 delta_freq = -delta_freq;
1376 if (delta_freq < best_delta)
1377 {
1378 best_sample = samplep;
1379 best_delta = delta_freq;
1380 }
1381 if (samples[samplep].low_note <= note_freq &&
1382 note_freq <= samples[samplep].high_note)
1383 {
1384 sample = samplep;
1385 }
1386 else
1387 samplep = samples[samplep].key; /* Link to next sample */
1388 }
1389 if (sample == -1)
1390 sample = best_sample;
1391
1392 if (sample == -1)
1393 {
1394/* printk("GUS: Patch %d not defined for note %d\n", patch, note_num);*/
1395 return 0; /* Should play default patch ??? */
1396 }
1397 is16bits = (samples[sample].mode & WAVE_16_BITS) ? 1 : 0;
1398 voices[voice].mode = samples[sample].mode;
1399 voices[voice].patch_vol = samples[sample].volume;
1400
1401 if (iw_mode)
1402 gus_write8(0x15, 0x00); /* RAM, Reset voice deactivate bit of SMSI */
1403
1404 if (voices[voice].mode & WAVE_ENVELOPES)
1405 {
1406 int i;
1407
1408 for (i = 0; i < 6; i++)
1409 {
1410 voices[voice].env_rate[i] = samples[sample].env_rate[i];
1411 voices[voice].env_offset[i] = samples[sample].env_offset[i];
1412 }
1413 }
1414 sample_map[voice] = sample;
1415
1416 if (voices[voice].fixed_pitch) /* Fixed pitch */
1417 {
1418 freq = samples[sample].base_freq;
1419 }
1420 else
1421 {
1422 base_note = samples[sample].base_note / 100;
1423 note_freq /= 100;
1424
1425 freq = samples[sample].base_freq * note_freq / base_note;
1426 }
1427
1428 voices[voice].orig_freq = freq;
1429
1430 /*
1431 * Since the pitch bender may have been set before playing the note, we
1432 * have to calculate the bending now.
1433 */
1434
1435 freq = compute_finetune(voices[voice].orig_freq, voices[voice].bender,
1436 voices[voice].bender_range, 0);
1437 voices[voice].current_freq = freq;
1438
1439 pan = (samples[sample].panning + voices[voice].panning) / 32;
1440 pan += 7;
1441 if (pan < 0)
1442 pan = 0;
1443 if (pan > 15)
1444 pan = 15;
1445
1446 if (samples[sample].mode & WAVE_16_BITS)
1447 {
1448 mode |= 0x04; /* 16 bits */
1449 if ((sample_ptrs[sample] / GUS_BANK_SIZE) !=
1450 ((sample_ptrs[sample] + samples[sample].len) / GUS_BANK_SIZE))
1451 printk(KERN_ERR "GUS: Sample address error\n");
1452 }
1453 spin_lock_irqsave(&gus_lock,flags);
1454 gus_select_voice(voice);
1455 gus_voice_off();
1456 gus_rampoff();
1457
1458 spin_unlock_irqrestore(&gus_lock,flags);
1459
1460 if (voices[voice].mode & WAVE_ENVELOPES)
1461 {
1462 compute_volume(voice, volume);
1463 init_envelope(voice);
1464 }
1465 else
1466 {
1467 compute_and_set_volume(voice, volume, 0);
1468 }
1469
1470 spin_lock_irqsave(&gus_lock,flags);
1471 gus_select_voice(voice);
1472
1473 if (samples[sample].mode & WAVE_LOOP_BACK)
1474 gus_write_addr(0x0a, sample_ptrs[sample] + samples[sample].len -
1475 voices[voice].offset_pending, 0, is16bits); /* start=end */
1476 else
1477 gus_write_addr(0x0a, sample_ptrs[sample] + voices[voice].offset_pending, 0, is16bits); /* Sample start=begin */
1478
1479 if (samples[sample].mode & WAVE_LOOPING)
1480 {
1481 mode |= 0x08;
1482
1483 if (samples[sample].mode & WAVE_BIDIR_LOOP)
1484 mode |= 0x10;
1485
1486 if (samples[sample].mode & WAVE_LOOP_BACK)
1487 {
1488 gus_write_addr(0x0a, sample_ptrs[sample] + samples[sample].loop_end -
1489 voices[voice].offset_pending,
1490 (samples[sample].fractions >> 4) & 0x0f, is16bits);
1491 mode |= 0x40;
1492 }
1493 gus_write_addr(0x02, sample_ptrs[sample] + samples[sample].loop_start,
1494 samples[sample].fractions & 0x0f, is16bits); /* Loop start location */
1495 gus_write_addr(0x04, sample_ptrs[sample] + samples[sample].loop_end,
1496 (samples[sample].fractions >> 4) & 0x0f, is16bits); /* Loop end location */
1497 }
1498 else
1499 {
1500 mode |= 0x20; /* Loop IRQ at the end */
1501 voices[voice].loop_irq_mode = LMODE_FINISH; /* Ramp down at the end */
1502 voices[voice].loop_irq_parm = 1;
1503 gus_write_addr(0x02, sample_ptrs[sample], 0, is16bits); /* Loop start location */
1504 gus_write_addr(0x04, sample_ptrs[sample] + samples[sample].len - 1,
1505 (samples[sample].fractions >> 4) & 0x0f, is16bits); /* Loop end location */
1506 }
1507 gus_voice_freq(freq);
1508 gus_voice_balance(pan);
1509 gus_voice_on(mode);
1510 spin_unlock_irqrestore(&gus_lock,flags);
1511
1512 return 0;
1513}
1514
1515/*
1516 * New guswave_start_note by Andrew J. Robinson attempts to minimize clicking
1517 * when the note playing on the voice is changed. It uses volume
1518 * ramping.
1519 */
1520
1521static int guswave_start_note(int dev, int voice, int note_num, int volume)
1522{
1523 unsigned long flags;
1524 int mode;
1525 int ret_val = 0;
1526
1527 spin_lock_irqsave(&gus_lock,flags);
1528 if (note_num == 255)
1529 {
1530 if (voices[voice].volume_irq_mode == VMODE_START_NOTE)
1531 {
1532 voices[voice].volume_pending = volume;
1533 }
1534 else
1535 {
1536 ret_val = guswave_start_note2(dev, voice, note_num, volume);
1537 }
1538 }
1539 else
1540 {
1541 gus_select_voice(voice);
1542 mode = gus_read8(0x00);
1543 if (mode & 0x20)
1544 gus_write8(0x00, mode & 0xdf); /* No interrupt! */
1545
1546 voices[voice].offset_pending = 0;
1547 voices[voice].kill_pending = 0;
1548 voices[voice].volume_irq_mode = 0;
1549 voices[voice].loop_irq_mode = 0;
1550
1551 if (voices[voice].sample_pending >= 0)
1552 {
1553 spin_unlock_irqrestore(&gus_lock,flags); /* Run temporarily with interrupts enabled */
1554 guswave_set_instr(voices[voice].dev_pending, voice, voices[voice].sample_pending);
1555 voices[voice].sample_pending = -1;
1556 spin_lock_irqsave(&gus_lock,flags);
1557 gus_select_voice(voice); /* Reselect the voice (just to be sure) */
1558 }
1559 if ((mode & 0x01) || (int) ((gus_read16(0x09) >> 4) < (unsigned) 2065))
1560 {
1561 ret_val = guswave_start_note2(dev, voice, note_num, volume);
1562 }
1563 else
1564 {
1565 voices[voice].dev_pending = dev;
1566 voices[voice].note_pending = note_num;
1567 voices[voice].volume_pending = volume;
1568 voices[voice].volume_irq_mode = VMODE_START_NOTE;
1569
1570 gus_rampoff();
1571 gus_ramp_range(2000, 4065);
1572 gus_ramp_rate(0, 63); /* Fastest possible rate */
1573 gus_rampon(0x20 | 0x40); /* Ramp down, once, irq */
1574 }
1575 }
1576 spin_unlock_irqrestore(&gus_lock,flags);
1577 return ret_val;
1578}
1579
1580static void guswave_reset(int dev)
1581{
1582 int i;
1583
1584 for (i = 0; i < 32; i++)
1585 {
1586 gus_voice_init(i);
1587 gus_voice_init2(i);
1588 }
1589}
1590
1591static int guswave_open(int dev, int mode)
1592{
1593 int err;
1594
1595 if (gus_busy)
1596 return -EBUSY;
1597
1598 voice_alloc->timestamp = 0;
1599
1600 if (gus_no_wave_dma) {
1601 gus_no_dma = 1;
1602 } else {
1603 if ((err = DMAbuf_open_dma(gus_devnum)) < 0)
1604 {
1605 /* printk( "GUS: Loading samples without DMA\n"); */
1606 gus_no_dma = 1; /* Upload samples using PIO */
1607 }
1608 else
1609 gus_no_dma = 0;
1610 }
1611
1612 init_waitqueue_head(&dram_sleeper);
1613 gus_busy = 1;
1614 active_device = GUS_DEV_WAVE;
1615
1616 gusintr(gus_irq, (void *)gus_hw_config, NULL); /* Serve pending interrupts */
1617 gus_initialize();
1618 gus_reset();
1619 gusintr(gus_irq, (void *)gus_hw_config, NULL); /* Serve pending interrupts */
1620
1621 return 0;
1622}
1623
1624static void guswave_close(int dev)
1625{
1626 gus_busy = 0;
1627 active_device = 0;
1628 gus_reset();
1629
1630 if (!gus_no_dma)
1631 DMAbuf_close_dma(gus_devnum);
1632}
1633
1634static int guswave_load_patch(int dev, int format, const char __user *addr,
1635 int offs, int count, int pmgr_flag)
1636{
1637 struct patch_info patch;
1638 int instr;
1639 long sizeof_patch;
1640
1641 unsigned long blk_sz, blk_end, left, src_offs, target;
1642
1643 sizeof_patch = (long) &patch.data[0] - (long) &patch; /* Header size */
1644
1645 if (format != GUS_PATCH)
1646 {
1647/* printk("GUS Error: Invalid patch format (key) 0x%x\n", format);*/
1648 return -EINVAL;
1649 }
1650 if (count < sizeof_patch)
1651 {
1652/* printk("GUS Error: Patch header too short\n");*/
1653 return -EINVAL;
1654 }
1655 count -= sizeof_patch;
1656
1657 if (free_sample >= MAX_SAMPLE)
1658 {
1659/* printk("GUS: Sample table full\n");*/
1660 return -ENOSPC;
1661 }
1662 /*
1663 * Copy the header from user space but ignore the first bytes which have
1664 * been transferred already.
1665 */
1666
1667 if (copy_from_user(&((char *) &patch)[offs], &(addr)[offs],
1668 sizeof_patch - offs))
1669 return -EFAULT;
1670
1671 if (patch.mode & WAVE_ROM)
1672 return -EINVAL;
1673 if (gus_mem_size == 0)
1674 return -ENOSPC;
1675
1676 instr = patch.instr_no;
1677
1678 if (instr < 0 || instr > MAX_PATCH)
1679 {
1680/* printk(KERN_ERR "GUS: Invalid patch number %d\n", instr);*/
1681 return -EINVAL;
1682 }
1683 if (count < patch.len)
1684 {
1685/* printk(KERN_ERR "GUS Warning: Patch record too short (%d<%d)\n", count, (int) patch.len);*/
1686 patch.len = count;
1687 }
1688 if (patch.len <= 0 || patch.len > gus_mem_size)
1689 {
1690/* printk(KERN_ERR "GUS: Invalid sample length %d\n", (int) patch.len);*/
1691 return -EINVAL;
1692 }
1693 if (patch.mode & WAVE_LOOPING)
1694 {
1695 if (patch.loop_start < 0 || patch.loop_start >= patch.len)
1696 {
1697/* printk(KERN_ERR "GUS: Invalid loop start\n");*/
1698 return -EINVAL;
1699 }
1700 if (patch.loop_end < patch.loop_start || patch.loop_end > patch.len)
1701 {
1702/* printk(KERN_ERR "GUS: Invalid loop end\n");*/
1703 return -EINVAL;
1704 }
1705 }
1706 free_mem_ptr = (free_mem_ptr + 31) & ~31; /* 32 byte alignment */
1707
1708 if (patch.mode & WAVE_16_BITS)
1709 {
1710 /*
1711 * 16 bit samples must fit one 256k bank.
1712 */
1713 if (patch.len >= GUS_BANK_SIZE)
1714 {
1715/* printk("GUS: Sample (16 bit) too long %d\n", (int) patch.len);*/
1716 return -ENOSPC;
1717 }
1718 if ((free_mem_ptr / GUS_BANK_SIZE) !=
1719 ((free_mem_ptr + patch.len) / GUS_BANK_SIZE))
1720 {
1721 unsigned long tmp_mem =
1722 /* Align to 256K */
1723 ((free_mem_ptr / GUS_BANK_SIZE) + 1) * GUS_BANK_SIZE;
1724
1725 if ((tmp_mem + patch.len) > gus_mem_size)
1726 return -ENOSPC;
1727
1728 free_mem_ptr = tmp_mem; /* This leaves unusable memory */
1729 }
1730 }
1731 if ((free_mem_ptr + patch.len) > gus_mem_size)
1732 return -ENOSPC;
1733
1734 sample_ptrs[free_sample] = free_mem_ptr;
1735
1736 /*
1737 * Tremolo is not possible with envelopes
1738 */
1739
1740 if (patch.mode & WAVE_ENVELOPES)
1741 patch.mode &= ~WAVE_TREMOLO;
1742
1743 if (!(patch.mode & WAVE_FRACTIONS))
1744 {
1745 patch.fractions = 0;
1746 }
1747 memcpy((char *) &samples[free_sample], &patch, sizeof_patch);
1748
1749 /*
1750 * Link this_one sample to the list of samples for patch 'instr'.
1751 */
1752
1753 samples[free_sample].key = patch_table[instr];
1754 patch_table[instr] = free_sample;
1755
1756 /*
1757 * Use DMA to transfer the wave data to the DRAM
1758 */
1759
1760 left = patch.len;
1761 src_offs = 0;
1762 target = free_mem_ptr;
1763
1764 while (left) /* Not completely transferred yet */
1765 {
1766 blk_sz = audio_devs[gus_devnum]->dmap_out->bytes_in_use;
1767 if (blk_sz > left)
1768 blk_sz = left;
1769
1770 /*
1771 * DMA cannot cross bank (256k) boundaries. Check for that.
1772 */
1773
1774 blk_end = target + blk_sz;
1775
1776 if ((target / GUS_BANK_SIZE) != (blk_end / GUS_BANK_SIZE))
1777 {
1778 /* Split the block */
1779 blk_end &= ~(GUS_BANK_SIZE - 1);
1780 blk_sz = blk_end - target;
1781 }
1782 if (gus_no_dma)
1783 {
1784 /*
1785 * For some reason the DMA is not possible. We have to use PIO.
1786 */
1787 long i;
1788 unsigned char data;
1789
1790 for (i = 0; i < blk_sz; i++)
1791 {
1792 get_user(*(unsigned char *) &data, (unsigned char __user *) &((addr)[sizeof_patch + i]));
1793 if (patch.mode & WAVE_UNSIGNED)
1794 if (!(patch.mode & WAVE_16_BITS) || (i & 0x01))
1795 data ^= 0x80; /* Convert to signed */
1796 gus_poke(target + i, data);
1797 }
1798 }
1799 else
1800 {
1801 unsigned long address, hold_address;
1802 unsigned char dma_command;
1803 unsigned long flags;
1804
1805 if (audio_devs[gus_devnum]->dmap_out->raw_buf == NULL)
1806 {
1807 printk(KERN_ERR "GUS: DMA buffer == NULL\n");
1808 return -ENOSPC;
1809 }
1810 /*
1811 * OK, move now. First in and then out.
1812 */
1813
1814 if (copy_from_user(audio_devs[gus_devnum]->dmap_out->raw_buf,
1815 &(addr)[sizeof_patch + src_offs],
1816 blk_sz))
1817 return -EFAULT;
1818
1819 spin_lock_irqsave(&gus_lock,flags);
1820 gus_write8(0x41, 0); /* Disable GF1 DMA */
1821 DMAbuf_start_dma(gus_devnum, audio_devs[gus_devnum]->dmap_out->raw_buf_phys,
1822 blk_sz, DMA_MODE_WRITE);
1823
1824 /*
1825 * Set the DRAM address for the wave data
1826 */
1827
1828 if (iw_mode)
1829 {
1830 /* Different address translation in enhanced mode */
1831
1832 unsigned char hi;
1833
1834 if (gus_dma > 4)
1835 address = target >> 1; /* Convert to 16 bit word address */
1836 else
1837 address = target;
1838
1839 hi = (unsigned char) ((address >> 16) & 0xf0);
1840 hi += (unsigned char) (address & 0x0f);
1841
1842 gus_write16(0x42, (address >> 4) & 0xffff); /* DMA address (low) */
1843 gus_write8(0x50, hi);
1844 }
1845 else
1846 {
1847 address = target;
1848 if (audio_devs[gus_devnum]->dmap_out->dma > 3)
1849 {
1850 hold_address = address;
1851 address = address >> 1;
1852 address &= 0x0001ffffL;
1853 address |= (hold_address & 0x000c0000L);
1854 }
1855 gus_write16(0x42, (address >> 4) & 0xffff); /* DRAM DMA address */
1856 }
1857
1858 /*
1859 * Start the DMA transfer
1860 */
1861
1862 dma_command = 0x21; /* IRQ enable, DMA start */
1863 if (patch.mode & WAVE_UNSIGNED)
1864 dma_command |= 0x80; /* Invert MSB */
1865 if (patch.mode & WAVE_16_BITS)
1866 dma_command |= 0x40; /* 16 bit _DATA_ */
1867 if (audio_devs[gus_devnum]->dmap_out->dma > 3)
1868 dma_command |= 0x04; /* 16 bit DMA _channel_ */
1869
1870 /*
1871 * Sleep here until the DRAM DMA done interrupt is served
1872 */
1873 active_device = GUS_DEV_WAVE;
1874 gus_write8(0x41, dma_command); /* Lets go luteet (=bugs) */
1875
1876 spin_unlock_irqrestore(&gus_lock,flags); /* opens a race */
1877 if (!interruptible_sleep_on_timeout(&dram_sleeper, HZ))
1878 printk("GUS: DMA Transfer timed out\n");
1879 }
1880
1881 /*
1882 * Now the next part
1883 */
1884
1885 left -= blk_sz;
1886 src_offs += blk_sz;
1887 target += blk_sz;
1888
1889 gus_write8(0x41, 0); /* Stop DMA */
1890 }
1891
1892 free_mem_ptr += patch.len;
1893 free_sample++;
1894 return 0;
1895}
1896
1897static void guswave_hw_control(int dev, unsigned char *event_rec)
1898{
1899 int voice, cmd;
1900 unsigned short p1, p2;
1901 unsigned int plong;
1902 unsigned long flags;
1903
1904 cmd = event_rec[2];
1905 voice = event_rec[3];
1906 p1 = *(unsigned short *) &event_rec[4];
1907 p2 = *(unsigned short *) &event_rec[6];
1908 plong = *(unsigned int *) &event_rec[4];
1909
1910 if ((voices[voice].volume_irq_mode == VMODE_START_NOTE) &&
1911 (cmd != _GUS_VOICESAMPLE) && (cmd != _GUS_VOICE_POS))
1912 do_volume_irq(voice);
1913
1914 switch (cmd)
1915 {
1916 case _GUS_NUMVOICES:
1917 spin_lock_irqsave(&gus_lock,flags);
1918 gus_select_voice(voice);
1919 gus_select_max_voices(p1);
1920 spin_unlock_irqrestore(&gus_lock,flags);
1921 break;
1922
1923 case _GUS_VOICESAMPLE:
1924 guswave_set_instr(dev, voice, p1);
1925 break;
1926
1927 case _GUS_VOICEON:
1928 spin_lock_irqsave(&gus_lock,flags);
1929 gus_select_voice(voice);
1930 p1 &= ~0x20; /* Don't allow interrupts */
1931 gus_voice_on(p1);
1932 spin_unlock_irqrestore(&gus_lock,flags);
1933 break;
1934
1935 case _GUS_VOICEOFF:
1936 spin_lock_irqsave(&gus_lock,flags);
1937 gus_select_voice(voice);
1938 gus_voice_off();
1939 spin_unlock_irqrestore(&gus_lock,flags);
1940 break;
1941
1942 case _GUS_VOICEFADE:
1943 gus_voice_fade(voice);
1944 break;
1945
1946 case _GUS_VOICEMODE:
1947 spin_lock_irqsave(&gus_lock,flags);
1948 gus_select_voice(voice);
1949 p1 &= ~0x20; /* Don't allow interrupts */
1950 gus_voice_mode(p1);
1951 spin_unlock_irqrestore(&gus_lock,flags);
1952 break;
1953
1954 case _GUS_VOICEBALA:
1955 spin_lock_irqsave(&gus_lock,flags);
1956 gus_select_voice(voice);
1957 gus_voice_balance(p1);
1958 spin_unlock_irqrestore(&gus_lock,flags);
1959 break;
1960
1961 case _GUS_VOICEFREQ:
1962 spin_lock_irqsave(&gus_lock,flags);
1963 gus_select_voice(voice);
1964 gus_voice_freq(plong);
1965 spin_unlock_irqrestore(&gus_lock,flags);
1966 break;
1967
1968 case _GUS_VOICEVOL:
1969 spin_lock_irqsave(&gus_lock,flags);
1970 gus_select_voice(voice);
1971 gus_voice_volume(p1);
1972 spin_unlock_irqrestore(&gus_lock,flags);
1973 break;
1974
1975 case _GUS_VOICEVOL2: /* Just update the software voice level */
1976 voices[voice].initial_volume = voices[voice].current_volume = p1;
1977 break;
1978
1979 case _GUS_RAMPRANGE:
1980 if (voices[voice].mode & WAVE_ENVELOPES)
1981 break; /* NO-NO */
1982 spin_lock_irqsave(&gus_lock,flags);
1983 gus_select_voice(voice);
1984 gus_ramp_range(p1, p2);
1985 spin_unlock_irqrestore(&gus_lock,flags);
1986 break;
1987
1988 case _GUS_RAMPRATE:
1989 if (voices[voice].mode & WAVE_ENVELOPES)
1990 break; /* NJET-NJET */
1991 spin_lock_irqsave(&gus_lock,flags);
1992 gus_select_voice(voice);
1993 gus_ramp_rate(p1, p2);
1994 spin_unlock_irqrestore(&gus_lock,flags);
1995 break;
1996
1997 case _GUS_RAMPMODE:
1998 if (voices[voice].mode & WAVE_ENVELOPES)
1999 break; /* NO-NO */
2000 spin_lock_irqsave(&gus_lock,flags);
2001 gus_select_voice(voice);
2002 p1 &= ~0x20; /* Don't allow interrupts */
2003 gus_ramp_mode(p1);
2004 spin_unlock_irqrestore(&gus_lock,flags);
2005 break;
2006
2007 case _GUS_RAMPON:
2008 if (voices[voice].mode & WAVE_ENVELOPES)
2009 break; /* EI-EI */
2010 spin_lock_irqsave(&gus_lock,flags);
2011 gus_select_voice(voice);
2012 p1 &= ~0x20; /* Don't allow interrupts */
2013 gus_rampon(p1);
2014 spin_unlock_irqrestore(&gus_lock,flags);
2015 break;
2016
2017 case _GUS_RAMPOFF:
2018 if (voices[voice].mode & WAVE_ENVELOPES)
2019 break; /* NEJ-NEJ */
2020 spin_lock_irqsave(&gus_lock,flags);
2021 gus_select_voice(voice);
2022 gus_rampoff();
2023 spin_unlock_irqrestore(&gus_lock,flags);
2024 break;
2025
2026 case _GUS_VOLUME_SCALE:
2027 volume_base = p1;
2028 volume_scale = p2;
2029 break;
2030
2031 case _GUS_VOICE_POS:
2032 spin_lock_irqsave(&gus_lock,flags);
2033 gus_select_voice(voice);
2034 gus_set_voice_pos(voice, plong);
2035 spin_unlock_irqrestore(&gus_lock,flags);
2036 break;
2037
2038 default:
2039 break;
2040 }
2041}
2042
2043static int gus_audio_set_speed(int speed)
2044{
2045 if (speed <= 0)
2046 speed = gus_audio_speed;
2047
2048 if (speed < 4000)
2049 speed = 4000;
2050
2051 if (speed > 44100)
2052 speed = 44100;
2053
2054 gus_audio_speed = speed;
2055
2056 if (only_read_access)
2057 {
2058 /* Compute nearest valid recording speed and return it */
2059
2060 /* speed = (9878400 / (gus_audio_speed + 2)) / 16; */
2061 speed = (((9878400 + gus_audio_speed / 2) / (gus_audio_speed + 2)) + 8) / 16;
2062 speed = (9878400 / (speed * 16)) - 2;
2063 }
2064 return speed;
2065}
2066
2067static int gus_audio_set_channels(int channels)
2068{
2069 if (!channels)
2070 return gus_audio_channels;
2071 if (channels > 2)
2072 channels = 2;
2073 if (channels < 1)
2074 channels = 1;
2075 gus_audio_channels = channels;
2076 return channels;
2077}
2078
2079static int gus_audio_set_bits(int bits)
2080{
2081 if (!bits)
2082 return gus_audio_bits;
2083
2084 if (bits != 8 && bits != 16)
2085 bits = 8;
2086
2087 if (only_8_bits)
2088 bits = 8;
2089
2090 gus_audio_bits = bits;
2091 return bits;
2092}
2093
2094static int gus_audio_ioctl(int dev, unsigned int cmd, void __user *arg)
2095{
2096 int val;
2097
2098 switch (cmd)
2099 {
2100 case SOUND_PCM_WRITE_RATE:
2101 if (get_user(val, (int __user*)arg))
2102 return -EFAULT;
2103 val = gus_audio_set_speed(val);
2104 break;
2105
2106 case SOUND_PCM_READ_RATE:
2107 val = gus_audio_speed;
2108 break;
2109
2110 case SNDCTL_DSP_STEREO:
2111 if (get_user(val, (int __user *)arg))
2112 return -EFAULT;
2113 val = gus_audio_set_channels(val + 1) - 1;
2114 break;
2115
2116 case SOUND_PCM_WRITE_CHANNELS:
2117 if (get_user(val, (int __user *)arg))
2118 return -EFAULT;
2119 val = gus_audio_set_channels(val);
2120 break;
2121
2122 case SOUND_PCM_READ_CHANNELS:
2123 val = gus_audio_channels;
2124 break;
2125
2126 case SNDCTL_DSP_SETFMT:
2127 if (get_user(val, (int __user *)arg))
2128 return -EFAULT;
2129 val = gus_audio_set_bits(val);
2130 break;
2131
2132 case SOUND_PCM_READ_BITS:
2133 val = gus_audio_bits;
2134 break;
2135
2136 case SOUND_PCM_WRITE_FILTER: /* NOT POSSIBLE */
2137 case SOUND_PCM_READ_FILTER:
2138 val = -EINVAL;
2139 break;
2140 default:
2141 return -EINVAL;
2142 }
2143 return put_user(val, (int __user *)arg);
2144}
2145
2146static void gus_audio_reset(int dev)
2147{
2148 if (recording_active)
2149 {
2150 gus_write8(0x49, 0x00); /* Halt recording */
2151 set_input_volumes();
2152 }
2153}
2154
2155static int saved_iw_mode; /* A hack hack hack */
2156
2157static int gus_audio_open(int dev, int mode)
2158{
2159 if (gus_busy)
2160 return -EBUSY;
2161
2162 if (gus_pnp_flag && mode & OPEN_READ)
2163 {
2164/* printk(KERN_ERR "GUS: Audio device #%d is playback only.\n", dev);*/
2165 return -EIO;
2166 }
2167 gus_initialize();
2168
2169 gus_busy = 1;
2170 active_device = 0;
2171
2172 saved_iw_mode = iw_mode;
2173 if (iw_mode)
2174 {
2175 /* There are some problems with audio in enhanced mode so disable it */
2176 gus_write8(0x19, gus_read8(0x19) & ~0x01); /* Disable enhanced mode */
2177 iw_mode = 0;
2178 }
2179
2180 gus_reset();
2181 reset_sample_memory();
2182 gus_select_max_voices(14);
2183
2184 pcm_active = 0;
2185 dma_active = 0;
2186 pcm_opened = 1;
2187 if (mode & OPEN_READ)
2188 {
2189 recording_active = 1;
2190 set_input_volumes();
2191 }
2192 only_read_access = !(mode & OPEN_WRITE);
2193 only_8_bits = mode & OPEN_READ;
2194 if (only_8_bits)
2195 audio_devs[dev]->format_mask = AFMT_U8;
2196 else
2197 audio_devs[dev]->format_mask = AFMT_U8 | AFMT_S16_LE;
2198
2199 return 0;
2200}
2201
2202static void gus_audio_close(int dev)
2203{
2204 iw_mode = saved_iw_mode;
2205 gus_reset();
2206 gus_busy = 0;
2207 pcm_opened = 0;
2208 active_device = 0;
2209
2210 if (recording_active)
2211 {
2212 gus_write8(0x49, 0x00); /* Halt recording */
2213 set_input_volumes();
2214 }
2215 recording_active = 0;
2216}
2217
2218static void gus_audio_update_volume(void)
2219{
2220 unsigned long flags;
2221 int voice;
2222
2223 if (pcm_active && pcm_opened)
2224 for (voice = 0; voice < gus_audio_channels; voice++)
2225 {
2226 spin_lock_irqsave(&gus_lock,flags);
2227 gus_select_voice(voice);
2228 gus_rampoff();
2229 gus_voice_volume(1530 + (25 * gus_pcm_volume));
2230 gus_ramp_range(65, 1530 + (25 * gus_pcm_volume));
2231 spin_unlock_irqrestore(&gus_lock,flags);
2232 }
2233}
2234
2235static void play_next_pcm_block(void)
2236{
2237 unsigned long flags;
2238 int speed = gus_audio_speed;
2239 int this_one, is16bits, chn;
2240 unsigned long dram_loc;
2241 unsigned char mode[2], ramp_mode[2];
2242
2243 if (!pcm_qlen)
2244 return;
2245
2246 this_one = pcm_head;
2247
2248 for (chn = 0; chn < gus_audio_channels; chn++)
2249 {
2250 mode[chn] = 0x00;
2251 ramp_mode[chn] = 0x03; /* Ramping and rollover off */
2252
2253 if (chn == 0)
2254 {
2255 mode[chn] |= 0x20; /* Loop IRQ */
2256 voices[chn].loop_irq_mode = LMODE_PCM;
2257 }
2258 if (gus_audio_bits != 8)
2259 {
2260 is16bits = 1;
2261 mode[chn] |= 0x04; /* 16 bit data */
2262 }
2263 else
2264 is16bits = 0;
2265
2266 dram_loc = this_one * pcm_bsize;
2267 dram_loc += chn * pcm_banksize;
2268
2269 if (this_one == (pcm_nblk - 1)) /* Last fragment of the DRAM buffer */
2270 {
2271 mode[chn] |= 0x08; /* Enable loop */
2272 ramp_mode[chn] = 0x03; /* Disable rollover bit */
2273 }
2274 else
2275 {
2276 if (chn == 0)
2277 ramp_mode[chn] = 0x04; /* Enable rollover bit */
2278 }
2279 spin_lock_irqsave(&gus_lock,flags);
2280 gus_select_voice(chn);
2281 gus_voice_freq(speed);
2282
2283 if (gus_audio_channels == 1)
2284 gus_voice_balance(7); /* mono */
2285 else if (chn == 0)
2286 gus_voice_balance(0); /* left */
2287 else
2288 gus_voice_balance(15); /* right */
2289
2290 if (!pcm_active) /* Playback not already active */
2291 {
2292 /*
2293 * The playback was not started yet (or there has been a pause).
2294 * Start the voice (again) and ask for a rollover irq at the end of
2295 * this_one block. If this_one one is last of the buffers, use just
2296 * the normal loop with irq.
2297 */
2298
2299 gus_voice_off();
2300 gus_rampoff();
2301 gus_voice_volume(1530 + (25 * gus_pcm_volume));
2302 gus_ramp_range(65, 1530 + (25 * gus_pcm_volume));
2303
2304 gus_write_addr(0x0a, chn * pcm_banksize, 0, is16bits); /* Starting position */
2305 gus_write_addr(0x02, chn * pcm_banksize, 0, is16bits); /* Loop start */
2306
2307 if (chn != 0)
2308 gus_write_addr(0x04, pcm_banksize + (pcm_bsize * pcm_nblk) - 1,
2309 0, is16bits); /* Loop end location */
2310 }
2311 if (chn == 0)
2312 gus_write_addr(0x04, dram_loc + pcm_bsize - 1,
2313 0, is16bits); /* Loop end location */
2314 else
2315 mode[chn] |= 0x08; /* Enable looping */
2316 spin_unlock_irqrestore(&gus_lock,flags);
2317 }
2318 for (chn = 0; chn < gus_audio_channels; chn++)
2319 {
2320 spin_lock_irqsave(&gus_lock,flags);
2321 gus_select_voice(chn);
2322 gus_write8(0x0d, ramp_mode[chn]);
2323 if (iw_mode)
2324 gus_write8(0x15, 0x00); /* Reset voice deactivate bit of SMSI */
2325 gus_voice_on(mode[chn]);
2326 spin_unlock_irqrestore(&gus_lock,flags);
2327 }
2328 pcm_active = 1;
2329}
2330
2331static void gus_transfer_output_block(int dev, unsigned long buf,
2332 int total_count, int intrflag, int chn)
2333{
2334 /*
2335 * This routine transfers one block of audio data to the DRAM. In mono mode
2336 * it's called just once. When in stereo mode, this_one routine is called
2337 * once for both channels.
2338 *
2339 * The left/mono channel data is transferred to the beginning of dram and the
2340 * right data to the area pointed by gus_page_size.
2341 */
2342
2343 int this_one, count;
2344 unsigned long flags;
2345 unsigned char dma_command;
2346 unsigned long address, hold_address;
2347
2348 spin_lock_irqsave(&gus_lock,flags);
2349
2350 count = total_count / gus_audio_channels;
2351
2352 if (chn == 0)
2353 {
2354 if (pcm_qlen >= pcm_nblk)
2355 printk(KERN_WARNING "GUS Warning: PCM buffers out of sync\n");
2356
2357 this_one = pcm_current_block = pcm_tail;
2358 pcm_qlen++;
2359 pcm_tail = (pcm_tail + 1) % pcm_nblk;
2360 pcm_datasize[this_one] = count;
2361 }
2362 else
2363 this_one = pcm_current_block;
2364
2365 gus_write8(0x41, 0); /* Disable GF1 DMA */
2366 DMAbuf_start_dma(dev, buf + (chn * count), count, DMA_MODE_WRITE);
2367
2368 address = this_one * pcm_bsize;
2369 address += chn * pcm_banksize;
2370
2371 if (audio_devs[dev]->dmap_out->dma > 3)
2372 {
2373 hold_address = address;
2374 address = address >> 1;
2375 address &= 0x0001ffffL;
2376 address |= (hold_address & 0x000c0000L);
2377 }
2378 gus_write16(0x42, (address >> 4) & 0xffff); /* DRAM DMA address */
2379
2380 dma_command = 0x21; /* IRQ enable, DMA start */
2381
2382 if (gus_audio_bits != 8)
2383 dma_command |= 0x40; /* 16 bit _DATA_ */
2384 else
2385 dma_command |= 0x80; /* Invert MSB */
2386
2387 if (audio_devs[dev]->dmap_out->dma > 3)
2388 dma_command |= 0x04; /* 16 bit DMA channel */
2389
2390 gus_write8(0x41, dma_command); /* Kick start */
2391
2392 if (chn == (gus_audio_channels - 1)) /* Last channel */
2393 {
2394 /*
2395 * Last (right or mono) channel data
2396 */
2397 dma_active = 1; /* DMA started. There is a unacknowledged buffer */
2398 active_device = GUS_DEV_PCM_DONE;
2399 if (!pcm_active && (pcm_qlen > 1 || count < pcm_bsize))
2400 {
2401 play_next_pcm_block();
2402 }
2403 }
2404 else
2405 {
2406 /*
2407 * Left channel data. The right channel
2408 * is transferred after DMA interrupt
2409 */
2410 active_device = GUS_DEV_PCM_CONTINUE;
2411 }
2412
2413 spin_unlock_irqrestore(&gus_lock,flags);
2414}
2415
2416static void gus_uninterleave8(char *buf, int l)
2417{
2418/* This routine uninterleaves 8 bit stereo output (LRLRLR->LLLRRR) */
2419 int i, p = 0, halfsize = l / 2;
2420 char *buf2 = buf + halfsize, *src = bounce_buf;
2421
2422 memcpy(bounce_buf, buf, l);
2423
2424 for (i = 0; i < halfsize; i++)
2425 {
2426 buf[i] = src[p++]; /* Left channel */
2427 buf2[i] = src[p++]; /* Right channel */
2428 }
2429}
2430
2431static void gus_uninterleave16(short *buf, int l)
2432{
2433/* This routine uninterleaves 16 bit stereo output (LRLRLR->LLLRRR) */
2434 int i, p = 0, halfsize = l / 2;
2435 short *buf2 = buf + halfsize, *src = (short *) bounce_buf;
2436
2437 memcpy(bounce_buf, (char *) buf, l * 2);
2438
2439 for (i = 0; i < halfsize; i++)
2440 {
2441 buf[i] = src[p++]; /* Left channel */
2442 buf2[i] = src[p++]; /* Right channel */
2443 }
2444}
2445
2446static void gus_audio_output_block(int dev, unsigned long buf, int total_count,
2447 int intrflag)
2448{
2449 struct dma_buffparms *dmap = audio_devs[dev]->dmap_out;
2450
2451 dmap->flags |= DMA_NODMA | DMA_NOTIMEOUT;
2452
2453 pcm_current_buf = buf;
2454 pcm_current_count = total_count;
2455 pcm_current_intrflag = intrflag;
2456 pcm_current_dev = dev;
2457 if (gus_audio_channels == 2)
2458 {
2459 char *b = dmap->raw_buf + (buf - dmap->raw_buf_phys);
2460
2461 if (gus_audio_bits == 8)
2462 gus_uninterleave8(b, total_count);
2463 else
2464 gus_uninterleave16((short *) b, total_count / 2);
2465 }
2466 gus_transfer_output_block(dev, buf, total_count, intrflag, 0);
2467}
2468
2469static void gus_audio_start_input(int dev, unsigned long buf, int count,
2470 int intrflag)
2471{
2472 unsigned long flags;
2473 unsigned char mode;
2474
2475 spin_lock_irqsave(&gus_lock,flags);
2476
2477 DMAbuf_start_dma(dev, buf, count, DMA_MODE_READ);
2478 mode = 0xa0; /* DMA IRQ enabled, invert MSB */
2479
2480 if (audio_devs[dev]->dmap_in->dma > 3)
2481 mode |= 0x04; /* 16 bit DMA channel */
2482 if (gus_audio_channels > 1)
2483 mode |= 0x02; /* Stereo */
2484 mode |= 0x01; /* DMA enable */
2485
2486 gus_write8(0x49, mode);
2487 spin_unlock_irqrestore(&gus_lock,flags);
2488}
2489
2490static int gus_audio_prepare_for_input(int dev, int bsize, int bcount)
2491{
2492 unsigned int rate;
2493
2494 gus_audio_bsize = bsize;
2495 audio_devs[dev]->dmap_in->flags |= DMA_NODMA;
2496 rate = (((9878400 + gus_audio_speed / 2) / (gus_audio_speed + 2)) + 8) / 16;
2497
2498 gus_write8(0x48, rate & 0xff); /* Set sampling rate */
2499
2500 if (gus_audio_bits != 8)
2501 {
2502/* printk("GUS Error: 16 bit recording not supported\n");*/
2503 return -EINVAL;
2504 }
2505 return 0;
2506}
2507
2508static int gus_audio_prepare_for_output(int dev, int bsize, int bcount)
2509{
2510 int i;
2511
2512 long mem_ptr, mem_size;
2513
2514 audio_devs[dev]->dmap_out->flags |= DMA_NODMA | DMA_NOTIMEOUT;
2515 mem_ptr = 0;
2516 mem_size = gus_mem_size / gus_audio_channels;
2517
2518 if (mem_size > (256 * 1024))
2519 mem_size = 256 * 1024;
2520
2521 pcm_bsize = bsize / gus_audio_channels;
2522 pcm_head = pcm_tail = pcm_qlen = 0;
2523
2524 pcm_nblk = 2; /* MAX_PCM_BUFFERS; */
2525 if ((pcm_bsize * pcm_nblk) > mem_size)
2526 pcm_nblk = mem_size / pcm_bsize;
2527
2528 for (i = 0; i < pcm_nblk; i++)
2529 pcm_datasize[i] = 0;
2530
2531 pcm_banksize = pcm_nblk * pcm_bsize;
2532
2533 if (gus_audio_bits != 8 && pcm_banksize == (256 * 1024))
2534 pcm_nblk--;
2535 gus_write8(0x41, 0); /* Disable GF1 DMA */
2536 return 0;
2537}
2538
2539static int gus_local_qlen(int dev)
2540{
2541 return pcm_qlen;
2542}
2543
2544
2545static struct audio_driver gus_audio_driver =
2546{
2547 .owner = THIS_MODULE,
2548 .open = gus_audio_open,
2549 .close = gus_audio_close,
2550 .output_block = gus_audio_output_block,
2551 .start_input = gus_audio_start_input,
2552 .ioctl = gus_audio_ioctl,
2553 .prepare_for_input = gus_audio_prepare_for_input,
2554 .prepare_for_output = gus_audio_prepare_for_output,
2555 .halt_io = gus_audio_reset,
2556 .local_qlen = gus_local_qlen,
2557};
2558
2559static void guswave_setup_voice(int dev, int voice, int chn)
2560{
2561 struct channel_info *info = &synth_devs[dev]->chn_info[chn];
2562
2563 guswave_set_instr(dev, voice, info->pgm_num);
2564 voices[voice].expression_vol = info->controllers[CTL_EXPRESSION]; /* Just MSB */
2565 voices[voice].main_vol = (info->controllers[CTL_MAIN_VOLUME] * 100) / (unsigned) 128;
2566 voices[voice].panning = (info->controllers[CTL_PAN] * 2) - 128;
2567 voices[voice].bender = 0;
2568 voices[voice].bender_range = info->bender_range;
2569
2570 if (chn == 9)
2571 voices[voice].fixed_pitch = 1;
2572}
2573
2574static void guswave_bender(int dev, int voice, int value)
2575{
2576 int freq;
2577 unsigned long flags;
2578
2579 voices[voice].bender = value - 8192;
2580 freq = compute_finetune(voices[voice].orig_freq, value - 8192, voices[voice].bender_range, 0);
2581 voices[voice].current_freq = freq;
2582
2583 spin_lock_irqsave(&gus_lock,flags);
2584 gus_select_voice(voice);
2585 gus_voice_freq(freq);
2586 spin_unlock_irqrestore(&gus_lock,flags);
2587}
2588
2589static int guswave_alloc(int dev, int chn, int note, struct voice_alloc_info *alloc)
2590{
2591 int i, p, best = -1, best_time = 0x7fffffff;
2592
2593 p = alloc->ptr;
2594 /*
2595 * First look for a completely stopped voice
2596 */
2597
2598 for (i = 0; i < alloc->max_voice; i++)
2599 {
2600 if (alloc->map[p] == 0)
2601 {
2602 alloc->ptr = p;
2603 return p;
2604 }
2605 if (alloc->alloc_times[p] < best_time)
2606 {
2607 best = p;
2608 best_time = alloc->alloc_times[p];
2609 }
2610 p = (p + 1) % alloc->max_voice;
2611 }
2612
2613 /*
2614 * Then look for a releasing voice
2615 */
2616
2617 for (i = 0; i < alloc->max_voice; i++)
2618 {
2619 if (alloc->map[p] == 0xffff)
2620 {
2621 alloc->ptr = p;
2622 return p;
2623 }
2624 p = (p + 1) % alloc->max_voice;
2625 }
2626 if (best >= 0)
2627 p = best;
2628
2629 alloc->ptr = p;
2630 return p;
2631}
2632
2633static struct synth_operations guswave_operations =
2634{
2635 .owner = THIS_MODULE,
2636 .id = "GUS",
2637 .info = &gus_info,
2638 .midi_dev = 0,
2639 .synth_type = SYNTH_TYPE_SAMPLE,
2640 .synth_subtype = SAMPLE_TYPE_GUS,
2641 .open = guswave_open,
2642 .close = guswave_close,
2643 .ioctl = guswave_ioctl,
2644 .kill_note = guswave_kill_note,
2645 .start_note = guswave_start_note,
2646 .set_instr = guswave_set_instr,
2647 .reset = guswave_reset,
2648 .hw_control = guswave_hw_control,
2649 .load_patch = guswave_load_patch,
2650 .aftertouch = guswave_aftertouch,
2651 .controller = guswave_controller,
2652 .panning = guswave_panning,
2653 .volume_method = guswave_volume_method,
2654 .bender = guswave_bender,
2655 .alloc_voice = guswave_alloc,
2656 .setup_voice = guswave_setup_voice
2657};
2658
2659static void set_input_volumes(void)
2660{
2661 unsigned long flags;
2662 unsigned char mask = 0xff & ~0x06; /* Just line out enabled */
2663
2664 if (have_gus_max) /* Don't disturb GUS MAX */
2665 return;
2666
2667 spin_lock_irqsave(&gus_lock,flags);
2668
2669 /*
2670 * Enable channels having vol > 10%
2671 * Note! bit 0x01 means the line in DISABLED while 0x04 means
2672 * the mic in ENABLED.
2673 */
2674 if (gus_line_vol > 10)
2675 mask &= ~0x01;
2676 if (gus_mic_vol > 10)
2677 mask |= 0x04;
2678
2679 if (recording_active)
2680 {
2681 /*
2682 * Disable channel, if not selected for recording
2683 */
2684 if (!(gus_recmask & SOUND_MASK_LINE))
2685 mask |= 0x01;
2686 if (!(gus_recmask & SOUND_MASK_MIC))
2687 mask &= ~0x04;
2688 }
2689 mix_image &= ~0x07;
2690 mix_image |= mask & 0x07;
2691 outb((mix_image), u_Mixer);
2692
2693 spin_unlock_irqrestore(&gus_lock,flags);
2694}
2695
2696#define MIX_DEVS (SOUND_MASK_MIC|SOUND_MASK_LINE| \
2697 SOUND_MASK_SYNTH|SOUND_MASK_PCM)
2698
2699int gus_default_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
2700{
2701 int vol, val;
2702
2703 if (((cmd >> 8) & 0xff) != 'M')
2704 return -EINVAL;
2705
2706 if (!access_ok(VERIFY_WRITE, arg, sizeof(int)))
2707 return -EFAULT;
2708
2709 if (_SIOC_DIR(cmd) & _SIOC_WRITE)
2710 {
2711 if (__get_user(val, (int __user *) arg))
2712 return -EFAULT;
2713
2714 switch (cmd & 0xff)
2715 {
2716 case SOUND_MIXER_RECSRC:
2717 gus_recmask = val & MIX_DEVS;
2718 if (!(gus_recmask & (SOUND_MASK_MIC | SOUND_MASK_LINE)))
2719 gus_recmask = SOUND_MASK_MIC;
2720 /* Note! Input volumes are updated during next open for recording */
2721 val = gus_recmask;
2722 break;
2723
2724 case SOUND_MIXER_MIC:
2725 vol = val & 0xff;
2726 if (vol < 0)
2727 vol = 0;
2728 if (vol > 100)
2729 vol = 100;
2730 gus_mic_vol = vol;
2731 set_input_volumes();
2732 val = vol | (vol << 8);
2733 break;
2734
2735 case SOUND_MIXER_LINE:
2736 vol = val & 0xff;
2737 if (vol < 0)
2738 vol = 0;
2739 if (vol > 100)
2740 vol = 100;
2741 gus_line_vol = vol;
2742 set_input_volumes();
2743 val = vol | (vol << 8);
2744 break;
2745
2746 case SOUND_MIXER_PCM:
2747 gus_pcm_volume = val & 0xff;
2748 if (gus_pcm_volume < 0)
2749 gus_pcm_volume = 0;
2750 if (gus_pcm_volume > 100)
2751 gus_pcm_volume = 100;
2752 gus_audio_update_volume();
2753 val = gus_pcm_volume | (gus_pcm_volume << 8);
2754 break;
2755
2756 case SOUND_MIXER_SYNTH:
2757 gus_wave_volume = val & 0xff;
2758 if (gus_wave_volume < 0)
2759 gus_wave_volume = 0;
2760 if (gus_wave_volume > 100)
2761 gus_wave_volume = 100;
2762 if (active_device == GUS_DEV_WAVE)
2763 {
2764 int voice;
2765 for (voice = 0; voice < nr_voices; voice++)
2766 dynamic_volume_change(voice); /* Apply the new vol */
2767 }
2768 val = gus_wave_volume | (gus_wave_volume << 8);
2769 break;
2770
2771 default:
2772 return -EINVAL;
2773 }
2774 }
2775 else
2776 {
2777 switch (cmd & 0xff)
2778 {
2779 /*
2780 * Return parameters
2781 */
2782 case SOUND_MIXER_RECSRC:
2783 val = gus_recmask;
2784 break;
2785
2786 case SOUND_MIXER_DEVMASK:
2787 val = MIX_DEVS;
2788 break;
2789
2790 case SOUND_MIXER_STEREODEVS:
2791 val = 0;
2792 break;
2793
2794 case SOUND_MIXER_RECMASK:
2795 val = SOUND_MASK_MIC | SOUND_MASK_LINE;
2796 break;
2797
2798 case SOUND_MIXER_CAPS:
2799 val = 0;
2800 break;
2801
2802 case SOUND_MIXER_MIC:
2803 val = gus_mic_vol | (gus_mic_vol << 8);
2804 break;
2805
2806 case SOUND_MIXER_LINE:
2807 val = gus_line_vol | (gus_line_vol << 8);
2808 break;
2809
2810 case SOUND_MIXER_PCM:
2811 val = gus_pcm_volume | (gus_pcm_volume << 8);
2812 break;
2813
2814 case SOUND_MIXER_SYNTH:
2815 val = gus_wave_volume | (gus_wave_volume << 8);
2816 break;
2817
2818 default:
2819 return -EINVAL;
2820 }
2821 }
2822 return __put_user(val, (int __user *)arg);
2823}
2824
2825static struct mixer_operations gus_mixer_operations =
2826{
2827 .owner = THIS_MODULE,
2828 .id = "GUS",
2829 .name = "Gravis Ultrasound",
2830 .ioctl = gus_default_mixer_ioctl
2831};
2832
2833static int __init gus_default_mixer_init(void)
2834{
2835 int n;
2836
2837 if ((n = sound_alloc_mixerdev()) != -1)
2838 {
2839 /*
2840 * Don't install if there is another
2841 * mixer
2842 */
2843 mixer_devs[n] = &gus_mixer_operations;
2844 }
2845 if (have_gus_max)
2846 {
2847 /*
2848 * Enable all mixer channels on the GF1 side. Otherwise recording will
2849 * not be possible using GUS MAX.
2850 */
2851 mix_image &= ~0x07;
2852 mix_image |= 0x04; /* All channels enabled */
2853 outb((mix_image), u_Mixer);
2854 }
2855 return n;
2856}
2857
2858void __init gus_wave_init(struct address_info *hw_config)
2859{
2860 unsigned long flags;
2861 unsigned char val;
2862 char *model_num = "2.4";
2863 char tmp[64];
2864 int gus_type = 0x24; /* 2.4 */
2865
2866 int irq = hw_config->irq, dma = hw_config->dma, dma2 = hw_config->dma2;
2867 int sdev;
2868
2869 hw_config->slots[0] = -1; /* No wave */
2870 hw_config->slots[1] = -1; /* No ad1848 */
2871 hw_config->slots[4] = -1; /* No audio */
2872 hw_config->slots[5] = -1; /* No mixer */
2873
2874 if (!gus_pnp_flag)
2875 {
2876 if (irq < 0 || irq > 15)
2877 {
2878 printk(KERN_ERR "ERROR! Invalid IRQ#%d. GUS Disabled", irq);
2879 return;
2880 }
2881 }
2882
2883 if (dma < 0 || dma > 7 || dma == 4)
2884 {
2885 printk(KERN_ERR "ERROR! Invalid DMA#%d. GUS Disabled", dma);
2886 return;
2887 }
2888 gus_irq = irq;
2889 gus_dma = dma;
2890 gus_dma2 = dma2;
2891 gus_hw_config = hw_config;
2892
2893 if (gus_dma2 == -1)
2894 gus_dma2 = dma;
2895
2896 /*
2897 * Try to identify the GUS model.
2898 *
2899 * Versions < 3.6 don't have the digital ASIC. Try to probe it first.
2900 */
2901
2902 spin_lock_irqsave(&gus_lock,flags);
2903 outb((0x20), gus_base + 0x0f);
2904 val = inb(gus_base + 0x0f);
2905 spin_unlock_irqrestore(&gus_lock,flags);
2906
2907 if (gus_pnp_flag || (val != 0xff && (val & 0x06))) /* Should be 0x02?? */
2908 {
2909 int ad_flags = 0;
2910
2911 if (gus_pnp_flag)
2912 ad_flags = 0x12345678; /* Interwave "magic" */
2913 /*
2914 * It has the digital ASIC so the card is at least v3.4.
2915 * Next try to detect the true model.
2916 */
2917
2918 if (gus_pnp_flag) /* Hack hack hack */
2919 val = 10;
2920 else
2921 val = inb(u_MixSelect);
2922
2923 /*
2924 * Value 255 means pre-3.7 which don't have mixer.
2925 * Values 5 thru 9 mean v3.7 which has a ICS2101 mixer.
2926 * 10 and above is GUS MAX which has the CS4231 codec/mixer.
2927 *
2928 */
2929
2930 if (val == 255 || val < 5)
2931 {
2932 model_num = "3.4";
2933 gus_type = 0x34;
2934 }
2935 else if (val < 10)
2936 {
2937 model_num = "3.7";
2938 gus_type = 0x37;
2939 mixer_type = ICS2101;
2940 request_region(u_MixSelect, 1, "GUS mixer");
2941 }
2942 else
2943 {
2944 struct resource *ports;
2945 ports = request_region(gus_base + 0x10c, 4, "ad1848");
2946 model_num = "MAX";
2947 gus_type = 0x40;
2948 mixer_type = CS4231;
2949#ifdef CONFIG_SOUND_GUSMAX
2950 {
2951 unsigned char max_config = 0x40; /* Codec enable */
2952
2953 if (gus_dma2 == -1)
2954 gus_dma2 = gus_dma;
2955
2956 if (gus_dma > 3)
2957 max_config |= 0x10; /* 16 bit capture DMA */
2958
2959 if (gus_dma2 > 3)
2960 max_config |= 0x20; /* 16 bit playback DMA */
2961
2962 max_config |= (gus_base >> 4) & 0x0f; /* Extract the X from 2X0 */
2963
2964 outb((max_config), gus_base + 0x106); /* UltraMax control */
2965 }
2966
2967 if (!ports)
2968 goto no_cs4231;
2969
2970 if (ad1848_detect(ports, &ad_flags, hw_config->osp))
2971 {
2972 char *name = "GUS MAX";
2973 int old_num_mixers = num_mixers;
2974
2975 if (gus_pnp_flag)
2976 name = "GUS PnP";
2977
2978 gus_mic_vol = gus_line_vol = gus_pcm_volume = 100;
2979 gus_wave_volume = 90;
2980 have_gus_max = 1;
2981 if (hw_config->name)
2982 name = hw_config->name;
2983
2984 hw_config->slots[1] = ad1848_init(name, ports,
2985 -irq, gus_dma2, /* Playback DMA */
2986 gus_dma, /* Capture DMA */
2987 1, /* Share DMA channels with GF1 */
2988 hw_config->osp,
2989 THIS_MODULE);
2990
2991 if (num_mixers > old_num_mixers)
2992 {
2993 /* GUS has it's own mixer map */
2994 AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_SYNTH);
2995 AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_CD);
2996 AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_LINE);
2997 }
2998 }
2999 else {
3000 release_region(gus_base + 0x10c, 4);
3001 no_cs4231:
3002 printk(KERN_WARNING "GUS: No CS4231 ??");
3003 }
3004#else
3005 printk(KERN_ERR "GUS MAX found, but not compiled in\n");
3006#endif
3007 }
3008 }
3009 else
3010 {
3011 /*
3012 * ASIC not detected so the card must be 2.2 or 2.4.
3013 * There could still be the 16-bit/mixer daughter card.
3014 */
3015 }
3016
3017 if (hw_config->name)
3018 snprintf(tmp, sizeof(tmp), "%s (%dk)", hw_config->name,
3019 (int) gus_mem_size / 1024);
3020 else if (gus_pnp_flag)
3021 snprintf(tmp, sizeof(tmp), "Gravis UltraSound PnP (%dk)",
3022 (int) gus_mem_size / 1024);
3023 else
3024 snprintf(tmp, sizeof(tmp), "Gravis UltraSound %s (%dk)", model_num,
3025 (int) gus_mem_size / 1024);
3026
3027
3028 samples = (struct patch_info *)vmalloc((MAX_SAMPLE + 1) * sizeof(*samples));
3029 if (samples == NULL)
3030 {
3031 printk(KERN_WARNING "gus_init: Cant allocate memory for instrument tables\n");
3032 return;
3033 }
3034 conf_printf(tmp, hw_config);
3035 strlcpy(gus_info.name, tmp, sizeof(gus_info.name));
3036
3037 if ((sdev = sound_alloc_synthdev()) == -1)
3038 printk(KERN_WARNING "gus_init: Too many synthesizers\n");
3039 else
3040 {
3041 voice_alloc = &guswave_operations.alloc;
3042 if (iw_mode)
3043 guswave_operations.id = "IWAVE";
3044 hw_config->slots[0] = sdev;
3045 synth_devs[sdev] = &guswave_operations;
3046 sequencer_init();
3047 gus_tmr_install(gus_base + 8);
3048 }
3049
3050 reset_sample_memory();
3051
3052 gus_initialize();
3053
3054 if ((gus_mem_size > 0) && !gus_no_wave_dma)
3055 {
3056 hw_config->slots[4] = -1;
3057 if ((gus_devnum = sound_install_audiodrv(AUDIO_DRIVER_VERSION,
3058 "Ultrasound",
3059 &gus_audio_driver,
3060 sizeof(struct audio_driver),
3061 NEEDS_RESTART |
3062 ((!iw_mode && dma2 != dma && dma2 != -1) ?
3063 DMA_DUPLEX : 0),
3064 AFMT_U8 | AFMT_S16_LE,
3065 NULL, dma, dma2)) < 0)
3066 {
3067 return;
3068 }
3069
3070 hw_config->slots[4] = gus_devnum;
3071 audio_devs[gus_devnum]->min_fragment = 9; /* 512k */
3072 audio_devs[gus_devnum]->max_fragment = 11; /* 8k (must match size of bounce_buf */
3073 audio_devs[gus_devnum]->mixer_dev = -1; /* Next mixer# */
3074 audio_devs[gus_devnum]->flags |= DMA_HARDSTOP;
3075 }
3076
3077 /*
3078 * Mixer dependent initialization.
3079 */
3080
3081 switch (mixer_type)
3082 {
3083 case ICS2101:
3084 gus_mic_vol = gus_line_vol = gus_pcm_volume = 100;
3085 gus_wave_volume = 90;
3086 request_region(u_MixSelect, 1, "GUS mixer");
3087 hw_config->slots[5] = ics2101_mixer_init();
3088 audio_devs[gus_devnum]->mixer_dev = hw_config->slots[5]; /* Next mixer# */
3089 return;
3090
3091 case CS4231:
3092 /* Initialized elsewhere (ad1848.c) */
3093 default:
3094 hw_config->slots[5] = gus_default_mixer_init();
3095 audio_devs[gus_devnum]->mixer_dev = hw_config->slots[5]; /* Next mixer# */
3096 return;
3097 }
3098}
3099
3100void __exit gus_wave_unload(struct address_info *hw_config)
3101{
3102#ifdef CONFIG_SOUND_GUSMAX
3103 if (have_gus_max)
3104 {
3105 ad1848_unload(gus_base + 0x10c,
3106 -gus_irq,
3107 gus_dma2, /* Playback DMA */
3108 gus_dma, /* Capture DMA */
3109 1); /* Share DMA channels with GF1 */
3110 }
3111#endif
3112
3113 if (mixer_type == ICS2101)
3114 {
3115 release_region(u_MixSelect, 1);
3116 }
3117 if (hw_config->slots[0] != -1)
3118 sound_unload_synthdev(hw_config->slots[0]);
3119 if (hw_config->slots[1] != -1)
3120 sound_unload_audiodev(hw_config->slots[1]);
3121 if (hw_config->slots[2] != -1)
3122 sound_unload_mididev(hw_config->slots[2]);
3123 if (hw_config->slots[4] != -1)
3124 sound_unload_audiodev(hw_config->slots[4]);
3125 if (hw_config->slots[5] != -1)
3126 sound_unload_mixerdev(hw_config->slots[5]);
3127
3128 vfree(samples);
3129 samples=NULL;
3130}
3131/* called in interrupt context */
3132static void do_loop_irq(int voice)
3133{
3134 unsigned char tmp;
3135 int mode, parm;
3136
3137 spin_lock(&gus_lock);
3138 gus_select_voice(voice);
3139
3140 tmp = gus_read8(0x00);
3141 tmp &= ~0x20; /*
3142 * Disable wave IRQ for this_one voice
3143 */
3144 gus_write8(0x00, tmp);
3145
3146 if (tmp & 0x03) /* Voice stopped */
3147 voice_alloc->map[voice] = 0;
3148
3149 mode = voices[voice].loop_irq_mode;
3150 voices[voice].loop_irq_mode = 0;
3151 parm = voices[voice].loop_irq_parm;
3152
3153 switch (mode)
3154 {
3155 case LMODE_FINISH: /*
3156 * Final loop finished, shoot volume down
3157 */
3158
3159 if ((int) (gus_read16(0x09) >> 4) < 100) /*
3160 * Get current volume
3161 */
3162 {
3163 gus_voice_off();
3164 gus_rampoff();
3165 gus_voice_init(voice);
3166 break;
3167 }
3168 gus_ramp_range(65, 4065);
3169 gus_ramp_rate(0, 63); /*
3170 * Fastest possible rate
3171 */
3172 gus_rampon(0x20 | 0x40); /*
3173 * Ramp down, once, irq
3174 */
3175 voices[voice].volume_irq_mode = VMODE_HALT;
3176 break;
3177
3178 case LMODE_PCM_STOP:
3179 pcm_active = 0; /* Signal to the play_next_pcm_block routine */
3180 case LMODE_PCM:
3181 {
3182 pcm_qlen--;
3183 pcm_head = (pcm_head + 1) % pcm_nblk;
3184 if (pcm_qlen && pcm_active)
3185 {
3186 play_next_pcm_block();
3187 }
3188 else
3189 {
3190 /* Underrun. Just stop the voice */
3191 gus_select_voice(0); /* Left channel */
3192 gus_voice_off();
3193 gus_rampoff();
3194 gus_select_voice(1); /* Right channel */
3195 gus_voice_off();
3196 gus_rampoff();
3197 pcm_active = 0;
3198 }
3199
3200 /*
3201 * If the queue was full before this interrupt, the DMA transfer was
3202 * suspended. Let it continue now.
3203 */
3204
3205 if (audio_devs[gus_devnum]->dmap_out->qlen > 0)
3206 DMAbuf_outputintr(gus_devnum, 0);
3207 }
3208 break;
3209
3210 default:
3211 break;
3212 }
3213 spin_unlock(&gus_lock);
3214}
3215
3216static void do_volume_irq(int voice)
3217{
3218 unsigned char tmp;
3219 int mode, parm;
3220 unsigned long flags;
3221
3222 spin_lock_irqsave(&gus_lock,flags);
3223
3224 gus_select_voice(voice);
3225 tmp = gus_read8(0x0d);
3226 tmp &= ~0x20; /*
3227 * Disable volume ramp IRQ
3228 */
3229 gus_write8(0x0d, tmp);
3230
3231 mode = voices[voice].volume_irq_mode;
3232 voices[voice].volume_irq_mode = 0;
3233 parm = voices[voice].volume_irq_parm;
3234
3235 switch (mode)
3236 {
3237 case VMODE_HALT: /* Decay phase finished */
3238 if (iw_mode)
3239 gus_write8(0x15, 0x02); /* Set voice deactivate bit of SMSI */
3240 spin_unlock_irqrestore(&gus_lock,flags);
3241 gus_voice_init(voice);
3242 break;
3243
3244 case VMODE_ENVELOPE:
3245 gus_rampoff();
3246 spin_unlock_irqrestore(&gus_lock,flags);
3247 step_envelope(voice);
3248 break;
3249
3250 case VMODE_START_NOTE:
3251 spin_unlock_irqrestore(&gus_lock,flags);
3252 guswave_start_note2(voices[voice].dev_pending, voice,
3253 voices[voice].note_pending, voices[voice].volume_pending);
3254 if (voices[voice].kill_pending)
3255 guswave_kill_note(voices[voice].dev_pending, voice,
3256 voices[voice].note_pending, 0);
3257
3258 if (voices[voice].sample_pending >= 0)
3259 {
3260 guswave_set_instr(voices[voice].dev_pending, voice,
3261 voices[voice].sample_pending);
3262 voices[voice].sample_pending = -1;
3263 }
3264 break;
3265
3266 default:
3267 spin_unlock_irqrestore(&gus_lock,flags);
3268 }
3269}
3270/* called in irq context */
3271void gus_voice_irq(void)
3272{
3273 unsigned long wave_ignore = 0, volume_ignore = 0;
3274 unsigned long voice_bit;
3275
3276 unsigned char src, voice;
3277
3278 while (1)
3279 {
3280 src = gus_read8(0x0f); /*
3281 * Get source info
3282 */
3283 voice = src & 0x1f;
3284 src &= 0xc0;
3285
3286 if (src == (0x80 | 0x40))
3287 return; /*
3288 * No interrupt
3289 */
3290
3291 voice_bit = 1 << voice;
3292
3293 if (!(src & 0x80)) /*
3294 * Wave IRQ pending
3295 */
3296 if (!(wave_ignore & voice_bit) && (int) voice < nr_voices) /*
3297 * Not done
3298 * yet
3299 */
3300 {
3301 wave_ignore |= voice_bit;
3302 do_loop_irq(voice);
3303 }
3304 if (!(src & 0x40)) /*
3305 * Volume IRQ pending
3306 */
3307 if (!(volume_ignore & voice_bit) && (int) voice < nr_voices) /*
3308 * Not done
3309 * yet
3310 */
3311 {
3312 volume_ignore |= voice_bit;
3313 do_volume_irq(voice);
3314 }
3315 }
3316}
3317
3318void guswave_dma_irq(void)
3319{
3320 unsigned char status;
3321
3322 status = gus_look8(0x41); /* Get DMA IRQ Status */
3323 if (status & 0x40) /* DMA interrupt pending */
3324 switch (active_device)
3325 {
3326 case GUS_DEV_WAVE:
3327 wake_up(&dram_sleeper);
3328 break;
3329
3330 case GUS_DEV_PCM_CONTINUE: /* Left channel data transferred */
3331 gus_write8(0x41, 0); /* Disable GF1 DMA */
3332 gus_transfer_output_block(pcm_current_dev, pcm_current_buf,
3333 pcm_current_count,
3334 pcm_current_intrflag, 1);
3335 break;
3336
3337 case GUS_DEV_PCM_DONE: /* Right or mono channel data transferred */
3338 gus_write8(0x41, 0); /* Disable GF1 DMA */
3339 if (pcm_qlen < pcm_nblk)
3340 {
3341 dma_active = 0;
3342 if (gus_busy)
3343 {
3344 if (audio_devs[gus_devnum]->dmap_out->qlen > 0)
3345 DMAbuf_outputintr(gus_devnum, 0);
3346 }
3347 }
3348 break;
3349
3350 default:
3351 break;
3352 }
3353 status = gus_look8(0x49); /*
3354 * Get Sampling IRQ Status
3355 */
3356 if (status & 0x40) /*
3357 * Sampling Irq pending
3358 */
3359 {
3360 DMAbuf_inputintr(gus_devnum);
3361 }
3362}
3363
3364/*
3365 * Timer stuff
3366 */
3367
3368static volatile int select_addr, data_addr;
3369static volatile int curr_timer;
3370
3371void gus_timer_command(unsigned int addr, unsigned int val)
3372{
3373 int i;
3374
3375 outb(((unsigned char) (addr & 0xff)), select_addr);
3376
3377 for (i = 0; i < 2; i++)
3378 inb(select_addr);
3379
3380 outb(((unsigned char) (val & 0xff)), data_addr);
3381
3382 for (i = 0; i < 2; i++)
3383 inb(select_addr);
3384}
3385
3386static void arm_timer(int timer, unsigned int interval)
3387{
3388 curr_timer = timer;
3389
3390 if (timer == 1)
3391 {
3392 gus_write8(0x46, 256 - interval); /* Set counter for timer 1 */
3393 gus_write8(0x45, 0x04); /* Enable timer 1 IRQ */
3394 gus_timer_command(0x04, 0x01); /* Start timer 1 */
3395 }
3396 else
3397 {
3398 gus_write8(0x47, 256 - interval); /* Set counter for timer 2 */
3399 gus_write8(0x45, 0x08); /* Enable timer 2 IRQ */
3400 gus_timer_command(0x04, 0x02); /* Start timer 2 */
3401 }
3402
3403 gus_timer_enabled = 1;
3404}
3405
3406static unsigned int gus_tmr_start(int dev, unsigned int usecs_per_tick)
3407{
3408 int timer_no, resolution;
3409 int divisor;
3410
3411 if (usecs_per_tick > (256 * 80))
3412 {
3413 timer_no = 2;
3414 resolution = 320; /* usec */
3415 }
3416 else
3417 {
3418 timer_no = 1;
3419 resolution = 80; /* usec */
3420 }
3421 divisor = (usecs_per_tick + (resolution / 2)) / resolution;
3422 arm_timer(timer_no, divisor);
3423
3424 return divisor * resolution;
3425}
3426
3427static void gus_tmr_disable(int dev)
3428{
3429 gus_write8(0x45, 0); /* Disable both timers */
3430 gus_timer_enabled = 0;
3431}
3432
3433static void gus_tmr_restart(int dev)
3434{
3435 if (curr_timer == 1)
3436 gus_write8(0x45, 0x04); /* Start timer 1 again */
3437 else
3438 gus_write8(0x45, 0x08); /* Start timer 2 again */
3439 gus_timer_enabled = 1;
3440}
3441
3442static struct sound_lowlev_timer gus_tmr =
3443{
3444 0,
3445 1,
3446 gus_tmr_start,
3447 gus_tmr_disable,
3448 gus_tmr_restart
3449};
3450
3451static void gus_tmr_install(int io_base)
3452{
3453 struct sound_lowlev_timer *tmr;
3454
3455 select_addr = io_base;
3456 data_addr = io_base + 1;
3457
3458 tmr = &gus_tmr;
3459
3460#ifdef THIS_GETS_FIXED
3461 sound_timer_init(&gus_tmr, "GUS");
3462#endif
3463}
diff --git a/sound/oss/harmony.c b/sound/oss/harmony.c
deleted file mode 100644
index 6601b284f03a..000000000000
--- a/sound/oss/harmony.c
+++ /dev/null
@@ -1,1330 +0,0 @@
1/*
2 sound/oss/harmony.c
3
4 This is a sound driver for ASP's and Lasi's Harmony sound chip
5 and is unlikely to be used for anything other than on a HP PA-RISC.
6
7 Harmony is found in HP 712s, 715/new and many other GSC based machines.
8 On older 715 machines you'll find the technically identical chip
9 called 'Vivace'. Both Harmony and Vicace are supported by this driver.
10
11 Copyright 2000 (c) Linuxcare Canada, Alex deVries <alex@onefishtwo.ca>
12 Copyright 2000-2003 (c) Helge Deller <deller@gmx.de>
13 Copyright 2001 (c) Matthieu Delahaye <delahaym@esiee.fr>
14 Copyright 2001 (c) Jean-Christophe Vaugeois <vaugeoij@esiee.fr>
15 Copyright 2004 (c) Stuart Brady <sdbrady@ntlworld.com>
16
17
18TODO:
19 - fix SNDCTL_DSP_GETOSPACE and SNDCTL_DSP_GETISPACE ioctls to
20 return the real values
21 - add private ioctl for selecting line- or microphone input
22 (only one of them is available at the same time)
23 - add module parameters
24 - implement mmap functionality
25 - implement gain meter ?
26 - ...
27*/
28
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/ioport.h>
34#include <linux/types.h>
35#include <linux/mm.h>
36#include <linux/pci.h>
37
38#include <asm/parisc-device.h>
39#include <asm/io.h>
40
41#include "sound_config.h"
42
43
44#define PFX "harmony: "
45#define HARMONY_VERSION "V0.9a"
46
47#undef DEBUG
48#ifdef DEBUG
49# define DPRINTK printk
50#else
51# define DPRINTK(x,...)
52#endif
53
54
55#define MAX_BUFS 10 /* maximum number of rotating buffers */
56#define HARMONY_BUF_SIZE 4096 /* needs to be a multiple of PAGE_SIZE (4096)! */
57
58#define CNTL_C 0x80000000
59#define CNTL_ST 0x00000020
60#define CNTL_44100 0x00000015 /* HARMONY_SR_44KHZ */
61#define CNTL_8000 0x00000008 /* HARMONY_SR_8KHZ */
62
63#define GAINCTL_HE 0x08000000
64#define GAINCTL_LE 0x04000000
65#define GAINCTL_SE 0x02000000
66
67#define DSTATUS_PN 0x00000200
68#define DSTATUS_RN 0x00000002
69
70#define DSTATUS_IE 0x80000000
71
72#define HARMONY_DF_16BIT_LINEAR 0
73#define HARMONY_DF_8BIT_ULAW 1
74#define HARMONY_DF_8BIT_ALAW 2
75
76#define HARMONY_SS_MONO 0
77#define HARMONY_SS_STEREO 1
78
79#define HARMONY_SR_8KHZ 0x08
80#define HARMONY_SR_16KHZ 0x09
81#define HARMONY_SR_27KHZ 0x0A
82#define HARMONY_SR_32KHZ 0x0B
83#define HARMONY_SR_48KHZ 0x0E
84#define HARMONY_SR_9KHZ 0x0F
85#define HARMONY_SR_5KHZ 0x10
86#define HARMONY_SR_11KHZ 0x11
87#define HARMONY_SR_18KHZ 0x12
88#define HARMONY_SR_22KHZ 0x13
89#define HARMONY_SR_37KHZ 0x14
90#define HARMONY_SR_44KHZ 0x15
91#define HARMONY_SR_33KHZ 0x16
92#define HARMONY_SR_6KHZ 0x17
93
94/*
95 * Some magics numbers used to auto-detect file formats
96 */
97
98#define HARMONY_MAGIC_8B_ULAW 1
99#define HARMONY_MAGIC_8B_ALAW 27
100#define HARMONY_MAGIC_16B_LINEAR 3
101#define HARMONY_MAGIC_MONO 1
102#define HARMONY_MAGIC_STEREO 2
103
104/*
105 * Channels Positions in mixer register
106 */
107
108#define GAIN_HE_SHIFT 27
109#define GAIN_HE_MASK ( 1 << GAIN_HE_SHIFT)
110#define GAIN_LE_SHIFT 26
111#define GAIN_LE_MASK ( 1 << GAIN_LE_SHIFT)
112#define GAIN_SE_SHIFT 25
113#define GAIN_SE_MASK ( 1 << GAIN_SE_SHIFT)
114#define GAIN_IS_SHIFT 24
115#define GAIN_IS_MASK ( 1 << GAIN_IS_SHIFT)
116#define GAIN_MA_SHIFT 20
117#define GAIN_MA_MASK ( 0x0f << GAIN_MA_SHIFT)
118#define GAIN_LI_SHIFT 16
119#define GAIN_LI_MASK ( 0x0f << GAIN_LI_SHIFT)
120#define GAIN_RI_SHIFT 12
121#define GAIN_RI_MASK ( 0x0f << GAIN_RI_SHIFT)
122#define GAIN_LO_SHIFT 6
123#define GAIN_LO_MASK ( 0x3f << GAIN_LO_SHIFT)
124#define GAIN_RO_SHIFT 0
125#define GAIN_RO_MASK ( 0x3f << GAIN_RO_SHIFT)
126
127
128#define MAX_OUTPUT_LEVEL (GAIN_RO_MASK >> GAIN_RO_SHIFT)
129#define MAX_INPUT_LEVEL (GAIN_RI_MASK >> GAIN_RI_SHIFT)
130#define MAX_MONITOR_LEVEL (GAIN_MA_MASK >> GAIN_MA_SHIFT)
131
132#define MIXER_INTERNAL SOUND_MIXER_LINE1
133#define MIXER_LINEOUT SOUND_MIXER_LINE2
134#define MIXER_HEADPHONES SOUND_MIXER_LINE3
135
136#define MASK_INTERNAL SOUND_MASK_LINE1
137#define MASK_LINEOUT SOUND_MASK_LINE2
138#define MASK_HEADPHONES SOUND_MASK_LINE3
139
140/*
141 * Channels Mask in mixer register
142 */
143
144#define GAIN_TOTAL_SILENCE 0x00F00FFF
145#define GAIN_DEFAULT 0x0FF00000
146
147
148struct harmony_hpa {
149 u8 unused000;
150 u8 id;
151 u8 teleshare_id;
152 u8 unused003;
153 u32 reset;
154 u32 cntl;
155 u32 gainctl;
156 u32 pnxtadd;
157 u32 pcuradd;
158 u32 rnxtadd;
159 u32 rcuradd;
160 u32 dstatus;
161 u32 ov;
162 u32 pio;
163 u32 unused02c;
164 u32 unused030[3];
165 u32 diag;
166};
167
168struct harmony_dev {
169 struct harmony_hpa *hpa;
170 struct parisc_device *dev;
171 u32 current_gain;
172 u32 dac_rate; /* 8000 ... 48000 (Hz) */
173 u8 data_format; /* HARMONY_DF_xx_BIT_xxx */
174 u8 sample_rate; /* HARMONY_SR_xx_KHZ */
175 u8 stereo_select; /* HARMONY_SS_MONO or HARMONY_SS_STEREO */
176 int format_initialized :1;
177 int suspended_playing :1;
178 int suspended_recording :1;
179
180 int blocked_playing :1;
181 int blocked_recording :1;
182 int audio_open :1;
183 int mixer_open :1;
184
185 wait_queue_head_t wq_play, wq_record;
186 int first_filled_play; /* first buffer containing data (next to play) */
187 int nb_filled_play;
188 int play_offset;
189 int first_filled_record;
190 int nb_filled_record;
191
192 int dsp_unit, mixer_unit;
193};
194
195
196static struct harmony_dev harmony;
197
198
199/*
200 * Dynamic sound buffer allocation and DMA memory
201 */
202
203struct harmony_buffer {
204 unsigned char *addr;
205 dma_addr_t dma_handle;
206 int dma_coherent; /* Zero if dma_alloc_coherent() fails */
207 unsigned int len;
208};
209
210/*
211 * Harmony memory buffers
212 */
213
214static struct harmony_buffer played_buf, recorded_buf, silent, graveyard;
215
216
217#define CHECK_WBACK_INV_OFFSET(b,offset,len) \
218 do { if (!b.dma_coherent) \
219 dma_cache_wback_inv((unsigned long)b.addr+offset,len); \
220 } while (0)
221
222
223static int __init harmony_alloc_buffer(struct harmony_buffer *b,
224 unsigned int buffer_count)
225{
226 b->len = buffer_count * HARMONY_BUF_SIZE;
227 b->addr = dma_alloc_coherent(&harmony.dev->dev,
228 b->len, &b->dma_handle, GFP_KERNEL|GFP_DMA);
229 if (b->addr && b->dma_handle) {
230 b->dma_coherent = 1;
231 DPRINTK(KERN_INFO PFX "coherent memory: 0x%lx, played_buf: 0x%lx\n",
232 (unsigned long)b->dma_handle, (unsigned long)b->addr);
233 } else {
234 b->dma_coherent = 0;
235 /* kmalloc()ed memory will HPMC on ccio machines ! */
236 b->addr = kmalloc(b->len, GFP_KERNEL);
237 if (!b->addr) {
238 printk(KERN_ERR PFX "couldn't allocate memory\n");
239 return -EBUSY;
240 }
241 b->dma_handle = __pa(b->addr);
242 }
243 return 0;
244}
245
246static void __exit harmony_free_buffer(struct harmony_buffer *b)
247{
248 if (!b->addr)
249 return;
250
251 if (b->dma_coherent)
252 dma_free_coherent(&harmony.dev->dev,
253 b->len, b->addr, b->dma_handle);
254 else
255 kfree(b->addr);
256
257 memset(b, 0, sizeof(*b));
258}
259
260
261
262/*
263 * Low-Level sound-chip programming
264 */
265
266static void __inline__ harmony_wait_CNTL(void)
267{
268 /* Wait until we're out of control mode */
269 while (gsc_readl(&harmony.hpa->cntl) & CNTL_C)
270 /* wait */ ;
271}
272
273
274static void harmony_update_control(void)
275{
276 u32 default_cntl;
277
278 /* Set CNTL */
279 default_cntl = (CNTL_C | /* The C bit */
280 (harmony.data_format << 6) | /* Set the data format */
281 (harmony.stereo_select << 5) | /* Stereo select */
282 (harmony.sample_rate)); /* Set sample rate */
283 harmony.format_initialized = 1;
284
285 /* initialize CNTL */
286 gsc_writel(default_cntl, &harmony.hpa->cntl);
287}
288
289static void harmony_set_control(u8 data_format, u8 sample_rate, u8 stereo_select)
290{
291 harmony.sample_rate = sample_rate;
292 harmony.data_format = data_format;
293 harmony.stereo_select = stereo_select;
294 harmony_update_control();
295}
296
297static void harmony_set_rate(u8 data_rate)
298{
299 harmony.sample_rate = data_rate;
300 harmony_update_control();
301}
302
303static int harmony_detect_rate(int *freq)
304{
305 int newrate;
306 switch (*freq) {
307 case 8000: newrate = HARMONY_SR_8KHZ; break;
308 case 16000: newrate = HARMONY_SR_16KHZ; break;
309 case 27428: newrate = HARMONY_SR_27KHZ; break;
310 case 32000: newrate = HARMONY_SR_32KHZ; break;
311 case 48000: newrate = HARMONY_SR_48KHZ; break;
312 case 9600: newrate = HARMONY_SR_9KHZ; break;
313 case 5512: newrate = HARMONY_SR_5KHZ; break;
314 case 11025: newrate = HARMONY_SR_11KHZ; break;
315 case 18900: newrate = HARMONY_SR_18KHZ; break;
316 case 22050: newrate = HARMONY_SR_22KHZ; break;
317 case 37800: newrate = HARMONY_SR_37KHZ; break;
318 case 44100: newrate = HARMONY_SR_44KHZ; break;
319 case 33075: newrate = HARMONY_SR_33KHZ; break;
320 case 6615: newrate = HARMONY_SR_6KHZ; break;
321 default: newrate = HARMONY_SR_8KHZ;
322 *freq = 8000; break;
323 }
324 return newrate;
325}
326
327static void harmony_set_format(u8 data_format)
328{
329 harmony.data_format = data_format;
330 harmony_update_control();
331}
332
333static void harmony_set_stereo(u8 stereo_select)
334{
335 harmony.stereo_select = stereo_select;
336 harmony_update_control();
337}
338
339static void harmony_disable_interrupts(void)
340{
341 harmony_wait_CNTL();
342 gsc_writel(0, &harmony.hpa->dstatus);
343}
344
345static void harmony_enable_interrupts(void)
346{
347 harmony_wait_CNTL();
348 gsc_writel(DSTATUS_IE, &harmony.hpa->dstatus);
349}
350
351/*
352 * harmony_silence()
353 *
354 * This subroutine fills in a buffer starting at location start and
355 * silences for length bytes. This references the current
356 * configuration of the audio format.
357 *
358 */
359
360static void harmony_silence(struct harmony_buffer *buffer, int start, int length)
361{
362 u8 silence_char;
363
364 /* Despite what you hear, silence is different in
365 different audio formats. */
366 switch (harmony.data_format) {
367 case HARMONY_DF_8BIT_ULAW: silence_char = 0x55; break;
368 case HARMONY_DF_8BIT_ALAW: silence_char = 0xff; break;
369 case HARMONY_DF_16BIT_LINEAR: /* fall through */
370 default: silence_char = 0;
371 }
372
373 memset(buffer->addr+start, silence_char, length);
374}
375
376
377static int harmony_audio_open(struct inode *inode, struct file *file)
378{
379 if (harmony.audio_open)
380 return -EBUSY;
381
382 harmony.audio_open = 1;
383 harmony.suspended_playing = harmony.suspended_recording = 1;
384 harmony.blocked_playing = harmony.blocked_recording = 0;
385 harmony.first_filled_play = harmony.first_filled_record = 0;
386 harmony.nb_filled_play = harmony.nb_filled_record = 0;
387 harmony.play_offset = 0;
388 init_waitqueue_head(&harmony.wq_play);
389 init_waitqueue_head(&harmony.wq_record);
390
391 /* Start off in a balanced mode. */
392 harmony_set_control(HARMONY_DF_8BIT_ULAW, HARMONY_SR_8KHZ, HARMONY_SS_MONO);
393 harmony_update_control();
394 harmony.format_initialized = 0;
395
396 /* Clear out all the buffers and flush to cache */
397 harmony_silence(&played_buf, 0, HARMONY_BUF_SIZE*MAX_BUFS);
398 CHECK_WBACK_INV_OFFSET(played_buf, 0, HARMONY_BUF_SIZE*MAX_BUFS);
399
400 return 0;
401}
402
403/*
404 * Release (close) the audio device.
405 */
406
407static int harmony_audio_release(struct inode *inode, struct file *file)
408{
409 if (!harmony.audio_open)
410 return -EBUSY;
411
412 harmony.audio_open = 0;
413
414 return 0;
415}
416
417/*
418 * Read recorded data off the audio device.
419 */
420
421static ssize_t harmony_audio_read(struct file *file,
422 char *buffer,
423 size_t size_count,
424 loff_t *ppos)
425{
426 int total_count = (int) size_count;
427 int count = 0;
428 int buf_to_read;
429
430 while (count<total_count) {
431 /* Wait until we're out of control mode */
432 harmony_wait_CNTL();
433
434 /* Figure out which buffer to fill in */
435 if (harmony.nb_filled_record <= 2) {
436 harmony.blocked_recording = 1;
437 if (harmony.suspended_recording) {
438 harmony.suspended_recording = 0;
439 harmony_enable_interrupts();
440 }
441
442 interruptible_sleep_on(&harmony.wq_record);
443 harmony.blocked_recording = 0;
444 }
445
446 if (harmony.nb_filled_record < 2)
447 return -EBUSY;
448
449 buf_to_read = harmony.first_filled_record;
450
451 /* Copy the page to an aligned buffer */
452 if (copy_to_user(buffer+count, recorded_buf.addr +
453 (HARMONY_BUF_SIZE*buf_to_read),
454 HARMONY_BUF_SIZE)) {
455 count = -EFAULT;
456 break;
457 }
458
459 harmony.nb_filled_record--;
460 harmony.first_filled_record++;
461 harmony.first_filled_record %= MAX_BUFS;
462
463 count += HARMONY_BUF_SIZE;
464 }
465 return count;
466}
467
468
469
470
471/*
472 * Here is the place where we try to recognize file format.
473 * Sun/NeXT .au files begin with the string .snd
474 * At offset 12 is specified the encoding.
475 * At offset 16 is specified speed rate
476 * At Offset 20 is specified the numbers of voices
477 */
478
479#define four_bytes_to_u32(start) (file_header[start] << 24)|\
480 (file_header[start+1] << 16)|\
481 (file_header[start+2] << 8)|\
482 (file_header[start+3]);
483
484#define test_rate(tested,real_value,harmony_value) if ((tested)<=(real_value))\
485
486
487static int harmony_format_auto_detect(const char *buffer, int block_size)
488{
489 u8 file_header[24];
490 u32 start_string;
491 int ret = 0;
492
493 if (block_size>24) {
494 if (copy_from_user(file_header, buffer, sizeof(file_header)))
495 ret = -EFAULT;
496
497 start_string = four_bytes_to_u32(0);
498
499 if ((file_header[4]==0) && (start_string==0x2E736E64)) {
500 u32 format;
501 u32 nb_voices;
502 u32 speed;
503
504 format = four_bytes_to_u32(12);
505 nb_voices = four_bytes_to_u32(20);
506 speed = four_bytes_to_u32(16);
507
508 switch (format) {
509 case HARMONY_MAGIC_8B_ULAW:
510 harmony.data_format = HARMONY_DF_8BIT_ULAW;
511 break;
512 case HARMONY_MAGIC_8B_ALAW:
513 harmony.data_format = HARMONY_DF_8BIT_ALAW;
514 break;
515 case HARMONY_MAGIC_16B_LINEAR:
516 harmony.data_format = HARMONY_DF_16BIT_LINEAR;
517 break;
518 default:
519 harmony_set_control(HARMONY_DF_16BIT_LINEAR,
520 HARMONY_SR_44KHZ, HARMONY_SS_STEREO);
521 goto out;
522 }
523 switch (nb_voices) {
524 case HARMONY_MAGIC_MONO:
525 harmony.stereo_select = HARMONY_SS_MONO;
526 break;
527 case HARMONY_MAGIC_STEREO:
528 harmony.stereo_select = HARMONY_SS_STEREO;
529 break;
530 default:
531 harmony.stereo_select = HARMONY_SS_MONO;
532 break;
533 }
534 harmony_set_rate(harmony_detect_rate(&speed));
535 harmony.dac_rate = speed;
536 goto out;
537 }
538 }
539 harmony_set_control(HARMONY_DF_8BIT_ULAW, HARMONY_SR_8KHZ, HARMONY_SS_MONO);
540out:
541 return ret;
542}
543#undef four_bytes_to_u32
544
545
546static ssize_t harmony_audio_write(struct file *file,
547 const char *buffer,
548 size_t size_count,
549 loff_t *ppos)
550{
551 int total_count = (int) size_count;
552 int count = 0;
553 int frame_size;
554 int buf_to_fill;
555 int fresh_buffer;
556
557 if (!harmony.format_initialized) {
558 if (harmony_format_auto_detect(buffer, total_count))
559 return -EFAULT;
560 }
561
562 while (count<total_count) {
563 /* Wait until we're out of control mode */
564 harmony_wait_CNTL();
565
566 /* Figure out which buffer to fill in */
567 if (harmony.nb_filled_play+2 >= MAX_BUFS && !harmony.play_offset) {
568 harmony.blocked_playing = 1;
569 interruptible_sleep_on(&harmony.wq_play);
570 harmony.blocked_playing = 0;
571 }
572 if (harmony.nb_filled_play+2 >= MAX_BUFS && !harmony.play_offset)
573 return -EBUSY;
574
575
576 buf_to_fill = (harmony.first_filled_play+harmony.nb_filled_play);
577 if (harmony.play_offset) {
578 buf_to_fill--;
579 buf_to_fill += MAX_BUFS;
580 }
581 buf_to_fill %= MAX_BUFS;
582
583 fresh_buffer = (harmony.play_offset == 0);
584
585 /* Figure out the size of the frame */
586 if ((total_count-count) >= HARMONY_BUF_SIZE - harmony.play_offset) {
587 frame_size = HARMONY_BUF_SIZE - harmony.play_offset;
588 } else {
589 frame_size = total_count - count;
590 /* Clear out the buffer, since there we'll only be
591 overlaying part of the old buffer with the new one */
592 harmony_silence(&played_buf,
593 HARMONY_BUF_SIZE*buf_to_fill+frame_size+harmony.play_offset,
594 HARMONY_BUF_SIZE-frame_size-harmony.play_offset);
595 }
596
597 /* Copy the page to an aligned buffer */
598 if (copy_from_user(played_buf.addr +(HARMONY_BUF_SIZE*buf_to_fill) + harmony.play_offset,
599 buffer+count, frame_size))
600 return -EFAULT;
601 CHECK_WBACK_INV_OFFSET(played_buf, (HARMONY_BUF_SIZE*buf_to_fill + harmony.play_offset),
602 frame_size);
603
604 if (fresh_buffer)
605 harmony.nb_filled_play++;
606
607 count += frame_size;
608 harmony.play_offset += frame_size;
609 harmony.play_offset %= HARMONY_BUF_SIZE;
610 if (harmony.suspended_playing && (harmony.nb_filled_play>=4))
611 harmony_enable_interrupts();
612 }
613
614 return count;
615}
616
617static unsigned int harmony_audio_poll(struct file *file,
618 struct poll_table_struct *wait)
619{
620 unsigned int mask = 0;
621
622 if (file->f_mode & FMODE_READ) {
623 if (!harmony.suspended_recording)
624 poll_wait(file, &harmony.wq_record, wait);
625 if (harmony.nb_filled_record)
626 mask |= POLLIN | POLLRDNORM;
627 }
628
629 if (file->f_mode & FMODE_WRITE) {
630 if (!harmony.suspended_playing)
631 poll_wait(file, &harmony.wq_play, wait);
632 if (harmony.nb_filled_play)
633 mask |= POLLOUT | POLLWRNORM;
634 }
635
636 return mask;
637}
638
639static int harmony_audio_ioctl(struct inode *inode,
640 struct file *file,
641 unsigned int cmd,
642 unsigned long arg)
643{
644 int ival, new_format;
645 int frag_size, frag_buf;
646 struct audio_buf_info info;
647
648 switch (cmd) {
649 case OSS_GETVERSION:
650 return put_user(SOUND_VERSION, (int *) arg);
651
652 case SNDCTL_DSP_GETCAPS:
653 ival = DSP_CAP_DUPLEX;
654 return put_user(ival, (int *) arg);
655
656 case SNDCTL_DSP_GETFMTS:
657 ival = (AFMT_S16_BE | AFMT_MU_LAW | AFMT_A_LAW );
658 return put_user(ival, (int *) arg);
659
660 case SNDCTL_DSP_SETFMT:
661 if (get_user(ival, (int *) arg))
662 return -EFAULT;
663 if (ival != AFMT_QUERY) {
664 switch (ival) {
665 case AFMT_MU_LAW: new_format = HARMONY_DF_8BIT_ULAW; break;
666 case AFMT_A_LAW: new_format = HARMONY_DF_8BIT_ALAW; break;
667 case AFMT_S16_BE: new_format = HARMONY_DF_16BIT_LINEAR; break;
668 default: {
669 DPRINTK(KERN_WARNING PFX
670 "unsupported sound format 0x%04x requested.\n",
671 ival);
672 ival = AFMT_S16_BE;
673 return put_user(ival, (int *) arg);
674 }
675 }
676 harmony_set_format(new_format);
677 return 0;
678 } else {
679 switch (harmony.data_format) {
680 case HARMONY_DF_8BIT_ULAW: ival = AFMT_MU_LAW; break;
681 case HARMONY_DF_8BIT_ALAW: ival = AFMT_A_LAW; break;
682 case HARMONY_DF_16BIT_LINEAR: ival = AFMT_U16_BE; break;
683 default: ival = 0;
684 }
685 return put_user(ival, (int *) arg);
686 }
687
688 case SOUND_PCM_READ_RATE:
689 ival = harmony.dac_rate;
690 return put_user(ival, (int *) arg);
691
692 case SNDCTL_DSP_SPEED:
693 if (get_user(ival, (int *) arg))
694 return -EFAULT;
695 harmony_set_rate(harmony_detect_rate(&ival));
696 harmony.dac_rate = ival;
697 return put_user(ival, (int*) arg);
698
699 case SNDCTL_DSP_STEREO:
700 if (get_user(ival, (int *) arg))
701 return -EFAULT;
702 if (ival != 0 && ival != 1)
703 return -EINVAL;
704 harmony_set_stereo(ival);
705 return 0;
706
707 case SNDCTL_DSP_CHANNELS:
708 if (get_user(ival, (int *) arg))
709 return -EFAULT;
710 if (ival != 1 && ival != 2) {
711 ival = harmony.stereo_select == HARMONY_SS_MONO ? 1 : 2;
712 return put_user(ival, (int *) arg);
713 }
714 harmony_set_stereo(ival-1);
715 return 0;
716
717 case SNDCTL_DSP_GETBLKSIZE:
718 ival = HARMONY_BUF_SIZE;
719 return put_user(ival, (int *) arg);
720
721 case SNDCTL_DSP_NONBLOCK:
722 file->f_flags |= O_NONBLOCK;
723 return 0;
724
725 case SNDCTL_DSP_RESET:
726 if (!harmony.suspended_recording) {
727 /* TODO: stop_recording() */
728 }
729 return 0;
730
731 case SNDCTL_DSP_SETFRAGMENT:
732 if (get_user(ival, (int *)arg))
733 return -EFAULT;
734 frag_size = ival & 0xffff;
735 frag_buf = (ival>>16) & 0xffff;
736 /* TODO: We use hardcoded fragment sizes and numbers for now */
737 frag_size = 12; /* 4096 == 2^12 */
738 frag_buf = MAX_BUFS;
739 ival = (frag_buf << 16) + frag_size;
740 return put_user(ival, (int *) arg);
741
742 case SNDCTL_DSP_GETOSPACE:
743 if (!(file->f_mode & FMODE_WRITE))
744 return -EINVAL;
745 info.fragstotal = MAX_BUFS;
746 info.fragments = MAX_BUFS - harmony.nb_filled_play;
747 info.fragsize = HARMONY_BUF_SIZE;
748 info.bytes = info.fragments * info.fragsize;
749 return copy_to_user((void *)arg, &info, sizeof(info)) ? -EFAULT : 0;
750
751 case SNDCTL_DSP_GETISPACE:
752 if (!(file->f_mode & FMODE_READ))
753 return -EINVAL;
754 info.fragstotal = MAX_BUFS;
755 info.fragments = /*MAX_BUFS-*/ harmony.nb_filled_record;
756 info.fragsize = HARMONY_BUF_SIZE;
757 info.bytes = info.fragments * info.fragsize;
758 return copy_to_user((void *)arg, &info, sizeof(info)) ? -EFAULT : 0;
759
760 case SNDCTL_DSP_SYNC:
761 return 0;
762 }
763
764 return -EINVAL;
765}
766
767
768/*
769 * harmony_interrupt()
770 *
771 * harmony interruption service routine
772 *
773 */
774
775static irqreturn_t harmony_interrupt(int irq, void *dev, struct pt_regs *regs)
776{
777 u32 dstatus;
778 struct harmony_hpa *hpa;
779
780 /* Setup the hpa */
781 hpa = ((struct harmony_dev *)dev)->hpa;
782 harmony_wait_CNTL();
783
784 /* Read dstatus and pcuradd (the current address) */
785 dstatus = gsc_readl(&hpa->dstatus);
786
787 /* Turn off interrupts */
788 harmony_disable_interrupts();
789
790 /* Check if this is a request to get the next play buffer */
791 if (dstatus & DSTATUS_PN) {
792 if (!harmony.nb_filled_play) {
793 harmony.suspended_playing = 1;
794 gsc_writel((unsigned long)silent.dma_handle, &hpa->pnxtadd);
795
796 if (!harmony.suspended_recording)
797 harmony_enable_interrupts();
798 } else {
799 harmony.suspended_playing = 0;
800 gsc_writel((unsigned long)played_buf.dma_handle +
801 (HARMONY_BUF_SIZE*harmony.first_filled_play),
802 &hpa->pnxtadd);
803 harmony.first_filled_play++;
804 harmony.first_filled_play %= MAX_BUFS;
805 harmony.nb_filled_play--;
806
807 harmony_enable_interrupts();
808 }
809
810 if (harmony.blocked_playing)
811 wake_up_interruptible(&harmony.wq_play);
812 }
813
814 /* Check if we're being asked to fill in a recording buffer */
815 if (dstatus & DSTATUS_RN) {
816 if((harmony.nb_filled_record+2>=MAX_BUFS) || harmony.suspended_recording)
817 {
818 harmony.nb_filled_record = 0;
819 harmony.first_filled_record = 0;
820 harmony.suspended_recording = 1;
821 gsc_writel((unsigned long)graveyard.dma_handle, &hpa->rnxtadd);
822 if (!harmony.suspended_playing)
823 harmony_enable_interrupts();
824 } else {
825 int buf_to_fill;
826 buf_to_fill = (harmony.first_filled_record+harmony.nb_filled_record) % MAX_BUFS;
827 CHECK_WBACK_INV_OFFSET(recorded_buf, HARMONY_BUF_SIZE*buf_to_fill, HARMONY_BUF_SIZE);
828 gsc_writel((unsigned long)recorded_buf.dma_handle +
829 HARMONY_BUF_SIZE*buf_to_fill,
830 &hpa->rnxtadd);
831 harmony.nb_filled_record++;
832 harmony_enable_interrupts();
833 }
834
835 if (harmony.blocked_recording && harmony.nb_filled_record>3)
836 wake_up_interruptible(&harmony.wq_record);
837 }
838 return IRQ_HANDLED;
839}
840
841/*
842 * Sound playing functions
843 */
844
845static struct file_operations harmony_audio_fops = {
846 .owner = THIS_MODULE,
847 .llseek = no_llseek,
848 .read = harmony_audio_read,
849 .write = harmony_audio_write,
850 .poll = harmony_audio_poll,
851 .ioctl = harmony_audio_ioctl,
852 .open = harmony_audio_open,
853 .release = harmony_audio_release,
854};
855
856static int harmony_audio_init(void)
857{
858 /* Request that IRQ */
859 if (request_irq(harmony.dev->irq, harmony_interrupt, 0 ,"harmony", &harmony)) {
860 printk(KERN_ERR PFX "Error requesting irq %d.\n", harmony.dev->irq);
861 return -EFAULT;
862 }
863
864 harmony.dsp_unit = register_sound_dsp(&harmony_audio_fops, -1);
865 if (harmony.dsp_unit < 0) {
866 printk(KERN_ERR PFX "Error registering dsp\n");
867 free_irq(harmony.dev->irq, &harmony);
868 return -EFAULT;
869 }
870
871 /* Clear the buffers so you don't end up with crap in the buffers. */
872 harmony_silence(&played_buf, 0, HARMONY_BUF_SIZE*MAX_BUFS);
873
874 /* Make sure this makes it to cache */
875 CHECK_WBACK_INV_OFFSET(played_buf, 0, HARMONY_BUF_SIZE*MAX_BUFS);
876
877 /* Clear out the silent buffer and flush to cache */
878 harmony_silence(&silent, 0, HARMONY_BUF_SIZE);
879 CHECK_WBACK_INV_OFFSET(silent, 0, HARMONY_BUF_SIZE);
880
881 harmony.audio_open = 0;
882
883 return 0;
884}
885
886
887/*
888 * mixer functions
889 */
890
891static void harmony_mixer_set_gain(void)
892{
893 harmony_wait_CNTL();
894 gsc_writel(harmony.current_gain, &harmony.hpa->gainctl);
895}
896
897/*
898 * Read gain of selected channel.
899 * The OSS rate is from 0 (silent) to 100 -> need some conversions
900 *
901 * The harmony gain are attenuation for output and monitor gain.
902 * is amplifaction for input gain
903 */
904#define to_harmony_level(level,max) ((level)*max/100)
905#define to_oss_level(level,max) ((level)*100/max)
906
907static int harmony_mixer_get_level(int channel)
908{
909 int left_level;
910 int right_level;
911
912 switch (channel) {
913 case SOUND_MIXER_VOLUME:
914 left_level = (harmony.current_gain & GAIN_LO_MASK) >> GAIN_LO_SHIFT;
915 right_level = (harmony.current_gain & GAIN_RO_MASK) >> GAIN_RO_SHIFT;
916 left_level = to_oss_level(MAX_OUTPUT_LEVEL - left_level, MAX_OUTPUT_LEVEL);
917 right_level = to_oss_level(MAX_OUTPUT_LEVEL - right_level, MAX_OUTPUT_LEVEL);
918 return (right_level << 8)+left_level;
919
920 case SOUND_MIXER_IGAIN:
921 left_level = (harmony.current_gain & GAIN_LI_MASK) >> GAIN_LI_SHIFT;
922 right_level= (harmony.current_gain & GAIN_RI_MASK) >> GAIN_RI_SHIFT;
923 left_level = to_oss_level(left_level, MAX_INPUT_LEVEL);
924 right_level= to_oss_level(right_level, MAX_INPUT_LEVEL);
925 return (right_level << 8)+left_level;
926
927 case SOUND_MIXER_MONITOR:
928 left_level = (harmony.current_gain & GAIN_MA_MASK) >> GAIN_MA_SHIFT;
929 left_level = to_oss_level(MAX_MONITOR_LEVEL-left_level, MAX_MONITOR_LEVEL);
930 return (left_level << 8)+left_level;
931 }
932 return -EINVAL;
933}
934
935
936
937/*
938 * Some conversions for the same reasons.
939 * We give back the new real value(s) due to
940 * the rescale.
941 */
942
943static int harmony_mixer_set_level(int channel, int value)
944{
945 int left_level;
946 int right_level;
947 int new_left_level;
948 int new_right_level;
949
950 right_level = (value & 0x0000ff00) >> 8;
951 left_level = value & 0x000000ff;
952 if (right_level > 100) right_level = 100;
953 if (left_level > 100) left_level = 100;
954
955 switch (channel) {
956 case SOUND_MIXER_VOLUME:
957 right_level = to_harmony_level(100-right_level, MAX_OUTPUT_LEVEL);
958 left_level = to_harmony_level(100-left_level, MAX_OUTPUT_LEVEL);
959 new_right_level = to_oss_level(MAX_OUTPUT_LEVEL - right_level, MAX_OUTPUT_LEVEL);
960 new_left_level = to_oss_level(MAX_OUTPUT_LEVEL - left_level, MAX_OUTPUT_LEVEL);
961 harmony.current_gain = (harmony.current_gain & ~(GAIN_LO_MASK | GAIN_RO_MASK))
962 | (left_level << GAIN_LO_SHIFT) | (right_level << GAIN_RO_SHIFT);
963 harmony_mixer_set_gain();
964 return (new_right_level << 8) + new_left_level;
965
966 case SOUND_MIXER_IGAIN:
967 right_level = to_harmony_level(right_level, MAX_INPUT_LEVEL);
968 left_level = to_harmony_level(left_level, MAX_INPUT_LEVEL);
969 new_right_level = to_oss_level(right_level, MAX_INPUT_LEVEL);
970 new_left_level = to_oss_level(left_level, MAX_INPUT_LEVEL);
971 harmony.current_gain = (harmony.current_gain & ~(GAIN_LI_MASK | GAIN_RI_MASK))
972 | (left_level << GAIN_LI_SHIFT) | (right_level << GAIN_RI_SHIFT);
973 harmony_mixer_set_gain();
974 return (new_right_level << 8) + new_left_level;
975
976 case SOUND_MIXER_MONITOR:
977 left_level = to_harmony_level(100-left_level, MAX_MONITOR_LEVEL);
978 new_left_level = to_oss_level(MAX_MONITOR_LEVEL-left_level, MAX_MONITOR_LEVEL);
979 harmony.current_gain = (harmony.current_gain & ~GAIN_MA_MASK) | (left_level << GAIN_MA_SHIFT);
980 harmony_mixer_set_gain();
981 return (new_left_level << 8) + new_left_level;
982 }
983
984 return -EINVAL;
985}
986
987#undef to_harmony_level
988#undef to_oss_level
989
990/*
991 * Return the selected input device (mic or line)
992 */
993
994static int harmony_mixer_get_recmask(void)
995{
996 int current_input_line;
997
998 current_input_line = (harmony.current_gain & GAIN_IS_MASK)
999 >> GAIN_IS_SHIFT;
1000 if (current_input_line)
1001 return SOUND_MASK_MIC;
1002
1003 return SOUND_MASK_LINE;
1004}
1005
1006/*
1007 * Set the input (only one at time, arbitrary priority to line in)
1008 */
1009
1010static int harmony_mixer_set_recmask(int recmask)
1011{
1012 int new_input_line;
1013 int new_input_mask;
1014 int current_input_line;
1015
1016 current_input_line = (harmony.current_gain & GAIN_IS_MASK)
1017 >> GAIN_IS_SHIFT;
1018 if ((current_input_line && ((recmask & SOUND_MASK_LINE) || !(recmask & SOUND_MASK_MIC))) ||
1019 (!current_input_line && ((recmask & SOUND_MASK_LINE) && !(recmask & SOUND_MASK_MIC)))) {
1020 new_input_line = 0;
1021 new_input_mask = SOUND_MASK_LINE;
1022 } else {
1023 new_input_line = 1;
1024 new_input_mask = SOUND_MASK_MIC;
1025 }
1026 harmony.current_gain = ((harmony.current_gain & ~GAIN_IS_MASK) |
1027 (new_input_line << GAIN_IS_SHIFT ));
1028 harmony_mixer_set_gain();
1029 return new_input_mask;
1030}
1031
1032
1033/*
1034 * give the active outlines
1035 */
1036
1037static int harmony_mixer_get_outmask(void)
1038{
1039 int outmask = 0;
1040
1041 if (harmony.current_gain & GAIN_SE_MASK) outmask |= MASK_INTERNAL;
1042 if (harmony.current_gain & GAIN_LE_MASK) outmask |= MASK_LINEOUT;
1043 if (harmony.current_gain & GAIN_HE_MASK) outmask |= MASK_HEADPHONES;
1044
1045 return outmask;
1046}
1047
1048
1049static int harmony_mixer_set_outmask(int outmask)
1050{
1051 if (outmask & MASK_INTERNAL)
1052 harmony.current_gain |= GAIN_SE_MASK;
1053 else
1054 harmony.current_gain &= ~GAIN_SE_MASK;
1055
1056 if (outmask & MASK_LINEOUT)
1057 harmony.current_gain |= GAIN_LE_MASK;
1058 else
1059 harmony.current_gain &= ~GAIN_LE_MASK;
1060
1061 if (outmask & MASK_HEADPHONES)
1062 harmony.current_gain |= GAIN_HE_MASK;
1063 else
1064 harmony.current_gain &= ~GAIN_HE_MASK;
1065
1066 harmony_mixer_set_gain();
1067
1068 return (outmask & (MASK_INTERNAL | MASK_LINEOUT | MASK_HEADPHONES));
1069}
1070
1071/*
1072 * This code is inspired from sb_mixer.c
1073 */
1074
1075static int harmony_mixer_ioctl(struct inode * inode, struct file * file,
1076 unsigned int cmd, unsigned long arg)
1077{
1078 int val;
1079 int ret;
1080
1081 if (cmd == SOUND_MIXER_INFO) {
1082 mixer_info info;
1083 memset(&info, 0, sizeof(info));
1084 strncpy(info.id, "harmony", sizeof(info.id)-1);
1085 strncpy(info.name, "Harmony audio", sizeof(info.name)-1);
1086 info.modify_counter = 1; /* ? */
1087 if (copy_to_user((void *)arg, &info, sizeof(info)))
1088 return -EFAULT;
1089 return 0;
1090 }
1091
1092 if (cmd == OSS_GETVERSION)
1093 return put_user(SOUND_VERSION, (int *)arg);
1094
1095 /* read */
1096 val = 0;
1097 if (_SIOC_DIR(cmd) & _SIOC_WRITE)
1098 if (get_user(val, (int *)arg))
1099 return -EFAULT;
1100
1101 switch (cmd) {
1102 case MIXER_READ(SOUND_MIXER_CAPS):
1103 ret = SOUND_CAP_EXCL_INPUT;
1104 break;
1105 case MIXER_READ(SOUND_MIXER_STEREODEVS):
1106 ret = SOUND_MASK_VOLUME | SOUND_MASK_IGAIN;
1107 break;
1108
1109 case MIXER_READ(SOUND_MIXER_RECMASK):
1110 ret = SOUND_MASK_MIC | SOUND_MASK_LINE;
1111 break;
1112 case MIXER_READ(SOUND_MIXER_DEVMASK):
1113 ret = SOUND_MASK_VOLUME | SOUND_MASK_IGAIN |
1114 SOUND_MASK_MONITOR;
1115 break;
1116 case MIXER_READ(SOUND_MIXER_OUTMASK):
1117 ret = MASK_INTERNAL | MASK_LINEOUT |
1118 MASK_HEADPHONES;
1119 break;
1120
1121 case MIXER_WRITE(SOUND_MIXER_RECSRC):
1122 ret = harmony_mixer_set_recmask(val);
1123 break;
1124 case MIXER_READ(SOUND_MIXER_RECSRC):
1125 ret = harmony_mixer_get_recmask();
1126 break;
1127
1128 case MIXER_WRITE(SOUND_MIXER_OUTSRC):
1129 ret = harmony_mixer_set_outmask(val);
1130 break;
1131 case MIXER_READ(SOUND_MIXER_OUTSRC):
1132 ret = harmony_mixer_get_outmask();
1133 break;
1134
1135 case MIXER_WRITE(SOUND_MIXER_VOLUME):
1136 case MIXER_WRITE(SOUND_MIXER_IGAIN):
1137 case MIXER_WRITE(SOUND_MIXER_MONITOR):
1138 ret = harmony_mixer_set_level(cmd & 0xff, val);
1139 break;
1140
1141 case MIXER_READ(SOUND_MIXER_VOLUME):
1142 case MIXER_READ(SOUND_MIXER_IGAIN):
1143 case MIXER_READ(SOUND_MIXER_MONITOR):
1144 ret = harmony_mixer_get_level(cmd & 0xff);
1145 break;
1146
1147 default:
1148 return -EINVAL;
1149 }
1150
1151 if (put_user(ret, (int *)arg))
1152 return -EFAULT;
1153 return 0;
1154}
1155
1156
1157static int harmony_mixer_open(struct inode *inode, struct file *file)
1158{
1159 if (harmony.mixer_open)
1160 return -EBUSY;
1161 harmony.mixer_open = 1;
1162 return 0;
1163}
1164
1165static int harmony_mixer_release(struct inode *inode, struct file *file)
1166{
1167 if (!harmony.mixer_open)
1168 return -EBUSY;
1169 harmony.mixer_open = 0;
1170 return 0;
1171}
1172
1173static struct file_operations harmony_mixer_fops = {
1174 .owner = THIS_MODULE,
1175 .llseek = no_llseek,
1176 .open = harmony_mixer_open,
1177 .release = harmony_mixer_release,
1178 .ioctl = harmony_mixer_ioctl,
1179};
1180
1181
1182/*
1183 * Mute all the output and reset Harmony.
1184 */
1185
1186static void __init harmony_mixer_reset(void)
1187{
1188 harmony.current_gain = GAIN_TOTAL_SILENCE;
1189 harmony_mixer_set_gain();
1190 harmony_wait_CNTL();
1191 gsc_writel(1, &harmony.hpa->reset);
1192 mdelay(50); /* wait 50 ms */
1193 gsc_writel(0, &harmony.hpa->reset);
1194 harmony.current_gain = GAIN_DEFAULT;
1195 harmony_mixer_set_gain();
1196}
1197
1198static int __init harmony_mixer_init(void)
1199{
1200 /* Register the device file operations */
1201 harmony.mixer_unit = register_sound_mixer(&harmony_mixer_fops, -1);
1202 if (harmony.mixer_unit < 0) {
1203 printk(KERN_WARNING PFX "Error Registering Mixer Driver\n");
1204 return -EFAULT;
1205 }
1206
1207 harmony_mixer_reset();
1208 harmony.mixer_open = 0;
1209
1210 return 0;
1211}
1212
1213
1214
1215/*
1216 * This is the callback that's called by the inventory hardware code
1217 * if it finds a match to the registered driver.
1218 */
1219static int __devinit
1220harmony_driver_probe(struct parisc_device *dev)
1221{
1222 u8 id;
1223 u8 rev;
1224 u32 cntl;
1225 int ret;
1226
1227 if (harmony.hpa) {
1228 /* We only support one Harmony at this time */
1229 printk(KERN_ERR PFX "driver already registered\n");
1230 return -EBUSY;
1231 }
1232
1233 if (!dev->irq) {
1234 printk(KERN_ERR PFX "no irq found\n");
1235 return -ENODEV;
1236 }
1237
1238 /* Set the HPA of harmony */
1239 harmony.hpa = (struct harmony_hpa *)dev->hpa.start;
1240 harmony.dev = dev;
1241
1242 /* Grab the ID and revision from the device */
1243 id = gsc_readb(&harmony.hpa->id);
1244 if ((id | 1) != 0x15) {
1245 printk(KERN_WARNING PFX "wrong harmony id 0x%02x\n", id);
1246 return -EBUSY;
1247 }
1248 cntl = gsc_readl(&harmony.hpa->cntl);
1249 rev = (cntl>>20) & 0xff;
1250
1251 printk(KERN_INFO "Lasi Harmony Audio driver " HARMONY_VERSION ", "
1252 "h/w id %i, rev. %i at 0x%lx, IRQ %i\n",
1253 id, rev, dev->hpa.start, harmony.dev->irq);
1254
1255 /* Make sure the control bit isn't set, although I don't think it
1256 ever is. */
1257 if (cntl & CNTL_C) {
1258 printk(KERN_WARNING PFX "CNTL busy\n");
1259 harmony.hpa = 0;
1260 return -EBUSY;
1261 }
1262
1263 /* Initialize the memory buffers */
1264 if (harmony_alloc_buffer(&played_buf, MAX_BUFS) ||
1265 harmony_alloc_buffer(&recorded_buf, MAX_BUFS) ||
1266 harmony_alloc_buffer(&graveyard, 1) ||
1267 harmony_alloc_buffer(&silent, 1)) {
1268 ret = -EBUSY;
1269 goto out_err;
1270 }
1271
1272 /* Initialize /dev/mixer and /dev/audio */
1273 if ((ret=harmony_mixer_init()))
1274 goto out_err;
1275 if ((ret=harmony_audio_init()))
1276 goto out_err;
1277
1278 return 0;
1279
1280out_err:
1281 harmony.hpa = 0;
1282 harmony_free_buffer(&played_buf);
1283 harmony_free_buffer(&recorded_buf);
1284 harmony_free_buffer(&graveyard);
1285 harmony_free_buffer(&silent);
1286 return ret;
1287}
1288
1289
1290static struct parisc_device_id harmony_tbl[] = {
1291 /* { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007A }, Bushmaster/Flounder */
1292 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007B }, /* 712/715 Audio */
1293 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007E }, /* Pace Audio */
1294 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0007F }, /* Outfield / Coral II */
1295 { 0, }
1296};
1297
1298MODULE_DEVICE_TABLE(parisc, harmony_tbl);
1299
1300static struct parisc_driver harmony_driver = {
1301 .name = "Lasi Harmony",
1302 .id_table = harmony_tbl,
1303 .probe = harmony_driver_probe,
1304};
1305
1306static int __init init_harmony(void)
1307{
1308 return register_parisc_driver(&harmony_driver);
1309}
1310
1311static void __exit cleanup_harmony(void)
1312{
1313 free_irq(harmony.dev->irq, &harmony);
1314 unregister_sound_mixer(harmony.mixer_unit);
1315 unregister_sound_dsp(harmony.dsp_unit);
1316 harmony_free_buffer(&played_buf);
1317 harmony_free_buffer(&recorded_buf);
1318 harmony_free_buffer(&graveyard);
1319 harmony_free_buffer(&silent);
1320 unregister_parisc_driver(&harmony_driver);
1321}
1322
1323
1324MODULE_AUTHOR("Alex DeVries <alex@onefishtwo.ca>");
1325MODULE_DESCRIPTION("Harmony sound driver");
1326MODULE_LICENSE("GPL");
1327
1328module_init(init_harmony);
1329module_exit(cleanup_harmony);
1330
diff --git a/sound/oss/ics2101.c b/sound/oss/ics2101.c
deleted file mode 100644
index 45918df150b3..000000000000
--- a/sound/oss/ics2101.c
+++ /dev/null
@@ -1,247 +0,0 @@
1/*
2 * sound/oss/ics2101.c
3 *
4 * Driver for the ICS2101 mixer of GUS v3.7.
5 *
6 *
7 * Copyright (C) by Hannu Savolainen 1993-1997
8 *
9 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
10 * Version 2 (June 1991). See the "COPYING" file distributed with this software
11 * for more info.
12 *
13 *
14 * Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
15 * Bartlomiej Zolnierkiewicz : added __init to ics2101_mixer_init()
16 */
17#include <linux/init.h>
18#include <linux/spinlock.h>
19#include "sound_config.h"
20
21#include <linux/ultrasound.h>
22
23#include "gus.h"
24#include "gus_hw.h"
25
26#define MIX_DEVS (SOUND_MASK_MIC|SOUND_MASK_LINE| \
27 SOUND_MASK_SYNTH| \
28 SOUND_MASK_CD | SOUND_MASK_VOLUME)
29
30extern int *gus_osp;
31extern int gus_base;
32extern spinlock_t gus_lock;
33static int volumes[ICS_MIXDEVS];
34static int left_fix[ICS_MIXDEVS] =
35{1, 1, 1, 2, 1, 2};
36static int right_fix[ICS_MIXDEVS] =
37{2, 2, 2, 1, 2, 1};
38
39static int scale_vol(int vol)
40{
41 /*
42 * Experimental volume scaling by Risto Kankkunen.
43 * This should give smoother volume response than just
44 * a plain multiplication.
45 */
46
47 int e;
48
49 if (vol < 0)
50 vol = 0;
51 if (vol > 100)
52 vol = 100;
53 vol = (31 * vol + 50) / 100;
54 e = 0;
55 if (vol)
56 {
57 while (vol < 16)
58 {
59 vol <<= 1;
60 e--;
61 }
62 vol -= 16;
63 e += 7;
64 }
65 return ((e << 4) + vol);
66}
67
68static void write_mix(int dev, int chn, int vol)
69{
70 int *selector;
71 unsigned long flags;
72 int ctrl_addr = dev << 3;
73 int attn_addr = dev << 3;
74
75 vol = scale_vol(vol);
76
77 if (chn == CHN_LEFT)
78 {
79 selector = left_fix;
80 ctrl_addr |= 0x00;
81 attn_addr |= 0x02;
82 }
83 else
84 {
85 selector = right_fix;
86 ctrl_addr |= 0x01;
87 attn_addr |= 0x03;
88 }
89
90 spin_lock_irqsave(&gus_lock, flags);
91 outb((ctrl_addr), u_MixSelect);
92 outb((selector[dev]), u_MixData);
93 outb((attn_addr), u_MixSelect);
94 outb(((unsigned char) vol), u_MixData);
95 spin_unlock_irqrestore(&gus_lock,flags);
96}
97
98static int set_volumes(int dev, int vol)
99{
100 int left = vol & 0x00ff;
101 int right = (vol >> 8) & 0x00ff;
102
103 if (left < 0)
104 left = 0;
105 if (left > 100)
106 left = 100;
107 if (right < 0)
108 right = 0;
109 if (right > 100)
110 right = 100;
111
112 write_mix(dev, CHN_LEFT, left);
113 write_mix(dev, CHN_RIGHT, right);
114
115 vol = left + (right << 8);
116 volumes[dev] = vol;
117 return vol;
118}
119
120static int ics2101_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
121{
122 int val;
123
124 if (((cmd >> 8) & 0xff) == 'M') {
125 if (_SIOC_DIR(cmd) & _SIOC_WRITE) {
126
127 if (get_user(val, (int __user *)arg))
128 return -EFAULT;
129 switch (cmd & 0xff) {
130 case SOUND_MIXER_RECSRC:
131 return gus_default_mixer_ioctl(dev, cmd, arg);
132
133 case SOUND_MIXER_MIC:
134 val = set_volumes(DEV_MIC, val);
135 break;
136
137 case SOUND_MIXER_CD:
138 val = set_volumes(DEV_CD, val);
139 break;
140
141 case SOUND_MIXER_LINE:
142 val = set_volumes(DEV_LINE, val);
143 break;
144
145 case SOUND_MIXER_SYNTH:
146 val = set_volumes(DEV_GF1, val);
147 break;
148
149 case SOUND_MIXER_VOLUME:
150 val = set_volumes(DEV_VOL, val);
151 break;
152
153 default:
154 return -EINVAL;
155 }
156 return put_user(val, (int __user *)arg);
157 } else {
158 switch (cmd & 0xff) {
159 /*
160 * Return parameters
161 */
162 case SOUND_MIXER_RECSRC:
163 return gus_default_mixer_ioctl(dev, cmd, arg);
164
165 case SOUND_MIXER_DEVMASK:
166 val = MIX_DEVS;
167 break;
168
169 case SOUND_MIXER_STEREODEVS:
170 val = SOUND_MASK_LINE | SOUND_MASK_CD | SOUND_MASK_SYNTH | SOUND_MASK_VOLUME | SOUND_MASK_MIC;
171 break;
172
173 case SOUND_MIXER_RECMASK:
174 val = SOUND_MASK_MIC | SOUND_MASK_LINE;
175 break;
176
177 case SOUND_MIXER_CAPS:
178 val = 0;
179 break;
180
181 case SOUND_MIXER_MIC:
182 val = volumes[DEV_MIC];
183 break;
184
185 case SOUND_MIXER_LINE:
186 val = volumes[DEV_LINE];
187 break;
188
189 case SOUND_MIXER_CD:
190 val = volumes[DEV_CD];
191 break;
192
193 case SOUND_MIXER_VOLUME:
194 val = volumes[DEV_VOL];
195 break;
196
197 case SOUND_MIXER_SYNTH:
198 val = volumes[DEV_GF1];
199 break;
200
201 default:
202 return -EINVAL;
203 }
204 return put_user(val, (int __user *)arg);
205 }
206 }
207 return -EINVAL;
208}
209
210static struct mixer_operations ics2101_mixer_operations =
211{
212 .owner = THIS_MODULE,
213 .id = "ICS2101",
214 .name = "ICS2101 Multimedia Mixer",
215 .ioctl = ics2101_mixer_ioctl
216};
217
218int __init ics2101_mixer_init(void)
219{
220 int i;
221 int n;
222
223 if ((n = sound_alloc_mixerdev()) != -1)
224 {
225 mixer_devs[n] = &ics2101_mixer_operations;
226
227 /*
228 * Some GUS v3.7 cards had some channels flipped. Disable
229 * the flipping feature if the model id is other than 5.
230 */
231
232 if (inb(u_MixSelect) != 5)
233 {
234 for (i = 0; i < ICS_MIXDEVS; i++)
235 left_fix[i] = 1;
236 for (i = 0; i < ICS_MIXDEVS; i++)
237 right_fix[i] = 2;
238 }
239 set_volumes(DEV_GF1, 0x5a5a);
240 set_volumes(DEV_CD, 0x5a5a);
241 set_volumes(DEV_MIC, 0x0000);
242 set_volumes(DEV_LINE, 0x5a5a);
243 set_volumes(DEV_VOL, 0x5a5a);
244 set_volumes(DEV_UNUSED, 0x0000);
245 }
246 return n;
247}
diff --git a/sound/oss/iwmem.h b/sound/oss/iwmem.h
deleted file mode 100644
index 48d333c7302b..000000000000
--- a/sound/oss/iwmem.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * sound/oss/iwmem.h
3 *
4 * DRAM size encoding table for AMD Interwave chip.
5 */
6/*
7 * Copyright (C) by Hannu Savolainen 1993-1997
8 *
9 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
10 * Version 2 (June 1991). See the "COPYING" file distributed with this software
11 * for more info.
12 *
13 * Changes:
14 * Bartlomiej Zolnierkiewicz : added __initdata to mem_decode
15 */
16
17
18#define K 1024
19#define M (1024*K)
20static int mem_decode[][4] __initdata =
21{
22/* Bank0 Bank1 Bank2 Bank3 Encoding bits */
23 {256*K, 0, 0, 0}, /* 0 */
24 {256*K, 256*K, 0, 0}, /* 1 */
25 {256*K, 256*K, 256*K, 256*K}, /* 2 */
26 {256*K, 1*M, 0, 0}, /* 3 */
27 {256*K, 1*M, 1*M, 1*M}, /* 4 */
28 {256*K, 256*K, 1*M, 0}, /* 5 */
29 {256*K, 256*K, 1*M, 1*M}, /* 6 */
30 {1*M, 0, 0, 0}, /* 7 */
31 {1*M, 1*M, 0, 0}, /* 8 */
32 {1*M, 1*M, 1*M, 1*M}, /* 9 */
33 {4*M, 0, 0, 0}, /* 10 */
34 {4*M, 4*M, 0, 0}, /* 11 */
35 {4*M, 4*M, 4*M, 4*M} /* 12 */
36};
diff --git a/sound/oss/mad16.c b/sound/oss/mad16.c
deleted file mode 100644
index 954647f41dff..000000000000
--- a/sound/oss/mad16.c
+++ /dev/null
@@ -1,1112 +0,0 @@
1/*
2 * Copyright (C) by Hannu Savolainen 1993-1997
3 *
4 * mad16.c
5 *
6 * Initialization code for OPTi MAD16 compatible audio chips. Including
7 *
8 * OPTi 82C928 MAD16 (replaced by C929)
9 * OAK OTI-601D Mozart
10 * OAK OTI-605 Mozart (later version with MPU401 Midi)
11 * OPTi 82C929 MAD16 Pro
12 * OPTi 82C930
13 * OPTi 82C924
14 *
15 * These audio interface chips don't produce sound themselves. They just
16 * connect some other components (OPL-[234] and a WSS compatible codec)
17 * to the PC bus and perform I/O, DMA and IRQ address decoding. There is
18 * also a UART for the MPU-401 mode (not 82C928/Mozart).
19 * The Mozart chip appears to be compatible with the 82C928, although later
20 * issues of the card, using the OTI-605 chip, have an MPU-401 compatible Midi
21 * port. This port is configured differently to that of the OPTi audio chips.
22 *
23 * Changes
24 *
25 * Alan Cox Clean up, added module selections.
26 *
27 * A. Wik Added support for Opti924 PnP.
28 * Improved debugging support. 16-May-1998
29 * Fixed bug. 16-Jun-1998
30 *
31 * Torsten Duwe Made Opti924 PnP support non-destructive
32 * 23-Dec-1998
33 *
34 * Paul Grayson Added support for Midi on later Mozart cards.
35 * 25-Nov-1999
36 * Christoph Hellwig Adapted to module_init/module_exit.
37 * Arnaldo C. de Melo got rid of attach_uart401 21-Sep-2000
38 *
39 * Pavel Rabel Clean up Nov-2000
40 */
41
42#include <linux/init.h>
43#include <linux/module.h>
44#include <linux/gameport.h>
45#include <linux/spinlock.h>
46#include "sound_config.h"
47
48#include "ad1848.h"
49#include "sb.h"
50#include "mpu401.h"
51
52#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
53#define SUPPORT_JOYSTICK 1
54#endif
55
56static int mad16_conf;
57static int mad16_cdsel;
58static DEFINE_SPINLOCK(lock);
59
60#define C928 1
61#define MOZART 2
62#define C929 3
63#define C930 4
64#define C924 5
65
66/*
67 * Registers
68 *
69 * The MAD16 occupies I/O ports 0xf8d to 0xf93 (fixed locations).
70 * All ports are inactive by default. They can be activated by
71 * writing 0xE2 or 0xE3 to the password register. The password is valid
72 * only until the next I/O read or write.
73 *
74 * 82C930 uses 0xE4 as the password and indirect addressing to access
75 * the config registers.
76 */
77
78#define MC0_PORT 0xf8c /* Dummy port */
79#define MC1_PORT 0xf8d /* SB address, CD-ROM interface type, joystick */
80#define MC2_PORT 0xf8e /* CD-ROM address, IRQ, DMA, plus OPL4 bit */
81#define MC3_PORT 0xf8f
82#define PASSWD_REG 0xf8f
83#define MC4_PORT 0xf90
84#define MC5_PORT 0xf91
85#define MC6_PORT 0xf92
86#define MC7_PORT 0xf93
87#define MC8_PORT 0xf94
88#define MC9_PORT 0xf95
89#define MC10_PORT 0xf96
90#define MC11_PORT 0xf97
91#define MC12_PORT 0xf98
92
93static int board_type = C928;
94
95static int *mad16_osp;
96static int c931_detected; /* minor differences from C930 */
97static char c924pnp; /* " " " C924 */
98static int debug; /* debugging output */
99
100#ifdef DDB
101#undef DDB
102#endif
103#define DDB(x) do {if (debug) x;} while (0)
104
105static unsigned char mad_read(int port)
106{
107 unsigned long flags;
108 unsigned char tmp;
109
110 spin_lock_irqsave(&lock,flags);
111
112 switch (board_type) /* Output password */
113 {
114 case C928:
115 case MOZART:
116 outb((0xE2), PASSWD_REG);
117 break;
118
119 case C929:
120 outb((0xE3), PASSWD_REG);
121 break;
122
123 case C930:
124 /* outb(( 0xE4), PASSWD_REG); */
125 break;
126
127 case C924:
128 /* the c924 has its ports relocated by -128 if
129 PnP is enabled -aw */
130 if (!c924pnp)
131 outb((0xE5), PASSWD_REG); else
132 outb((0xE5), PASSWD_REG - 0x80);
133 break;
134 }
135
136 if (board_type == C930)
137 {
138 outb((port - MC0_PORT), 0xe0e); /* Write to index reg */
139 tmp = inb(0xe0f); /* Read from data reg */
140 }
141 else
142 if (!c924pnp)
143 tmp = inb(port); else
144 tmp = inb(port-0x80);
145 spin_unlock_irqrestore(&lock,flags);
146
147 return tmp;
148}
149
150static void mad_write(int port, int value)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&lock,flags);
155
156 switch (board_type) /* Output password */
157 {
158 case C928:
159 case MOZART:
160 outb((0xE2), PASSWD_REG);
161 break;
162
163 case C929:
164 outb((0xE3), PASSWD_REG);
165 break;
166
167 case C930:
168 /* outb(( 0xE4), PASSWD_REG); */
169 break;
170
171 case C924:
172 if (!c924pnp)
173 outb((0xE5), PASSWD_REG); else
174 outb((0xE5), PASSWD_REG - 0x80);
175 break;
176 }
177
178 if (board_type == C930)
179 {
180 outb((port - MC0_PORT), 0xe0e); /* Write to index reg */
181 outb(((unsigned char) (value & 0xff)), 0xe0f);
182 }
183 else
184 if (!c924pnp)
185 outb(((unsigned char) (value & 0xff)), port); else
186 outb(((unsigned char) (value & 0xff)), port-0x80);
187 spin_unlock_irqrestore(&lock,flags);
188}
189
190static int __init detect_c930(void)
191{
192 unsigned char tmp = mad_read(MC1_PORT);
193
194 if ((tmp & 0x06) != 0x06)
195 {
196 DDB(printk("Wrong C930 signature (%x)\n", tmp));
197 /* return 0; */
198 }
199 mad_write(MC1_PORT, 0);
200
201 if (mad_read(MC1_PORT) != 0x06)
202 {
203 DDB(printk("Wrong C930 signature2 (%x)\n", tmp));
204 /* return 0; */
205 }
206 mad_write(MC1_PORT, tmp); /* Restore bits */
207
208 mad_write(MC7_PORT, 0);
209 if ((tmp = mad_read(MC7_PORT)) != 0)
210 {
211 DDB(printk("MC7 not writable (%x)\n", tmp));
212 return 0;
213 }
214 mad_write(MC7_PORT, 0xcb);
215 if ((tmp = mad_read(MC7_PORT)) != 0xcb)
216 {
217 DDB(printk("MC7 not writable2 (%x)\n", tmp));
218 return 0;
219 }
220
221 tmp = mad_read(MC0_PORT+18);
222 if (tmp == 0xff || tmp == 0x00)
223 return 1;
224 /* We probably have a C931 */
225 DDB(printk("Detected C931 config=0x%02x\n", tmp));
226 c931_detected = 1;
227
228 /*
229 * We cannot configure the chip if it is in PnP mode.
230 * If we have a CSN assigned (bit 8 in MC13) we first try
231 * a software reset, then a software power off, finally
232 * Clearing PnP mode. The last option is not
233 * Bit 8 in MC13
234 */
235 if ((mad_read(MC0_PORT+13) & 0x80) == 0)
236 return 1;
237
238 /* Software reset */
239 mad_write(MC9_PORT, 0x02);
240 mad_write(MC9_PORT, 0x00);
241
242 if ((mad_read(MC0_PORT+13) & 0x80) == 0)
243 return 1;
244
245 /* Power off, and on again */
246 mad_write(MC9_PORT, 0xc2);
247 mad_write(MC9_PORT, 0xc0);
248
249 if ((mad_read(MC0_PORT+13) & 0x80) == 0)
250 return 1;
251
252#if 0
253 /* Force off PnP mode. This is not recommended because
254 * the PnP bios will not recognize the chip on the next
255 * warm boot and may assignd different resources to other
256 * PnP/PCI cards.
257 */
258 mad_write(MC0_PORT+17, 0x04);
259#endif
260 return 1;
261}
262
263static int __init detect_mad16(void)
264{
265 unsigned char tmp, tmp2, bit;
266 int i, port;
267
268 /*
269 * Check that reading a register doesn't return bus float (0xff)
270 * when the card is accessed using password. This may fail in case
271 * the card is in low power mode. Normally at least the power saving
272 * mode bit should be 0.
273 */
274
275 if ((tmp = mad_read(MC1_PORT)) == 0xff)
276 {
277 DDB(printk("MC1_PORT returned 0xff\n"));
278 return 0;
279 }
280 for (i = 0xf8d; i <= 0xf98; i++)
281 if (!c924pnp)
282 DDB(printk("Port %0x (init value) = %0x\n", i, mad_read(i)));
283 else
284 DDB(printk("Port %0x (init value) = %0x\n", i-0x80, mad_read(i)));
285
286 if (board_type == C930)
287 return detect_c930();
288
289 /*
290 * Now check that the gate is closed on first I/O after writing
291 * the password. (This is how a MAD16 compatible card works).
292 */
293
294 if ((tmp2 = inb(MC1_PORT)) == tmp) /* It didn't close */
295 {
296 DDB(printk("MC1_PORT didn't close after read (0x%02x)\n", tmp2));
297 return 0;
298 }
299
300 bit = (c924pnp) ? 0x20 : 0x80;
301 port = (c924pnp) ? MC2_PORT : MC1_PORT;
302
303 tmp = mad_read(port);
304 mad_write(port, tmp ^ bit); /* Toggle a bit */
305 if ((tmp2 = mad_read(port)) != (tmp ^ bit)) /* Compare the bit */
306 {
307 mad_write(port, tmp); /* Restore */
308 DDB(printk("Bit revert test failed (0x%02x, 0x%02x)\n", tmp, tmp2));
309 return 0;
310 }
311 mad_write(port, tmp); /* Restore */
312 return 1; /* Bingo */
313}
314
315static int __init wss_init(struct address_info *hw_config)
316{
317 /*
318 * Check if the IO port returns valid signature. The original MS Sound
319 * system returns 0x04 while some cards (AudioTrix Pro for example)
320 * return 0x00.
321 */
322
323 if ((inb(hw_config->io_base + 3) & 0x3f) != 0x04 &&
324 (inb(hw_config->io_base + 3) & 0x3f) != 0x00)
325 {
326 DDB(printk("No MSS signature detected on port 0x%x (0x%x)\n", hw_config->io_base, inb(hw_config->io_base + 3)));
327 return 0;
328 }
329 /*
330 * Check that DMA0 is not in use with a 8 bit board.
331 */
332 if (hw_config->dma == 0 && inb(hw_config->io_base + 3) & 0x80)
333 {
334 printk("MSS: Can't use DMA0 with a 8 bit card/slot\n");
335 return 0;
336 }
337 if (hw_config->irq > 9 && inb(hw_config->io_base + 3) & 0x80)
338 printk(KERN_ERR "MSS: Can't use IRQ%d with a 8 bit card/slot\n", hw_config->irq);
339 return 1;
340}
341
342static void __init init_c930(struct address_info *hw_config, int base)
343{
344 unsigned char cfg = 0;
345
346 cfg |= (0x0f & mad16_conf);
347
348 if(c931_detected)
349 {
350 /* Bit 0 has reversd meaning. Bits 1 and 2 sese
351 reversed on write.
352 Support only IDE cdrom. IDE port programmed
353 somewhere else. */
354 cfg = (cfg & 0x09) ^ 0x07;
355 }
356 cfg |= base << 4;
357 mad_write(MC1_PORT, cfg);
358
359 /* MC2 is CD configuration. Don't touch it. */
360
361 mad_write(MC3_PORT, 0); /* Disable SB mode IRQ and DMA */
362
363 /* bit 2 of MC4 reverses it's meaning between the C930
364 and the C931. */
365 cfg = c931_detected ? 0x04 : 0x00;
366
367 if(mad16_cdsel & 0x20)
368 mad_write(MC4_PORT, 0x62|cfg); /* opl4 */
369 else
370 mad_write(MC4_PORT, 0x52|cfg); /* opl3 */
371
372 mad_write(MC5_PORT, 0x3C); /* Init it into mode2 */
373 mad_write(MC6_PORT, 0x02); /* Enable WSS, Disable MPU and SB */
374 mad_write(MC7_PORT, 0xCB);
375 mad_write(MC10_PORT, 0x11);
376}
377
378static int __init chip_detect(void)
379{
380 int i;
381
382 /*
383 * Then try to detect with the old password
384 */
385 board_type = C924;
386
387 DDB(printk("Detect using password = 0xE5\n"));
388
389 if (detect_mad16()) {
390 return 1;
391 }
392
393 board_type = C928;
394
395 DDB(printk("Detect using password = 0xE2\n"));
396
397 if (detect_mad16())
398 {
399 unsigned char model;
400
401 if (((model = mad_read(MC3_PORT)) & 0x03) == 0x03) {
402 DDB(printk("mad16.c: Mozart detected\n"));
403 board_type = MOZART;
404 } else {
405 DDB(printk("mad16.c: 82C928 detected???\n"));
406 board_type = C928;
407 }
408 return 1;
409 }
410
411 board_type = C929;
412
413 DDB(printk("Detect using password = 0xE3\n"));
414
415 if (detect_mad16())
416 {
417 DDB(printk("mad16.c: 82C929 detected\n"));
418 return 1;
419 }
420
421 if (inb(PASSWD_REG) != 0xff)
422 return 0;
423
424 /*
425 * First relocate MC# registers to 0xe0e/0xe0f, disable password
426 */
427
428 outb((0xE4), PASSWD_REG);
429 outb((0x80), PASSWD_REG);
430
431 board_type = C930;
432
433 DDB(printk("Detect using password = 0xE4\n"));
434
435 for (i = 0xf8d; i <= 0xf93; i++)
436 DDB(printk("port %03x = %02x\n", i, mad_read(i)));
437
438 if(detect_mad16()) {
439 DDB(printk("mad16.c: 82C930 detected\n"));
440 return 1;
441 }
442
443 /* The C931 has the password reg at F8D */
444 outb((0xE4), 0xF8D);
445 outb((0x80), 0xF8D);
446 DDB(printk("Detect using password = 0xE4 for C931\n"));
447
448 if (detect_mad16()) {
449 return 1;
450 }
451
452 board_type = C924;
453 c924pnp++;
454 DDB(printk("Detect using password = 0xE5 (again), port offset -0x80\n"));
455 if (detect_mad16()) {
456 DDB(printk("mad16.c: 82C924 PnP detected\n"));
457 return 1;
458 }
459
460 c924pnp=0;
461
462 return 0;
463}
464
465static int __init probe_mad16(struct address_info *hw_config)
466{
467 int i;
468 unsigned char tmp;
469 unsigned char cs4231_mode = 0;
470
471 int ad_flags = 0;
472
473 signed char bits;
474
475 static char dma_bits[4] = {
476 1, 2, 0, 3
477 };
478
479 int config_port = hw_config->io_base + 0, version_port = hw_config->io_base + 3;
480 int dma = hw_config->dma, dma2 = hw_config->dma2;
481 unsigned char dma2_bit = 0;
482 int base;
483 struct resource *ports;
484
485 mad16_osp = hw_config->osp;
486
487 switch (hw_config->io_base) {
488 case 0x530:
489 base = 0;
490 break;
491 case 0xe80:
492 base = 1;
493 break;
494 case 0xf40:
495 base = 2;
496 break;
497 case 0x604:
498 base = 3;
499 break;
500 default:
501 printk(KERN_ERR "MAD16/Mozart: Bad WSS base address 0x%x\n", hw_config->io_base);
502 return 0;
503 }
504
505 if (dma != 0 && dma != 1 && dma != 3) {
506 printk(KERN_ERR "MSS: Bad DMA %d\n", dma);
507 return 0;
508 }
509
510 /*
511 * Check that all ports return 0xff (bus float) when no password
512 * is written to the password register.
513 */
514
515 DDB(printk("--- Detecting MAD16 / Mozart ---\n"));
516 if (!chip_detect())
517 return 0;
518
519 switch (hw_config->irq) {
520 case 7:
521 bits = 8;
522 break;
523 case 9:
524 bits = 0x10;
525 break;
526 case 10:
527 bits = 0x18;
528 break;
529 case 12:
530 bits = 0x20;
531 break;
532 case 5: /* Also IRQ5 is possible on C930 */
533 if (board_type == C930 || c924pnp) {
534 bits = 0x28;
535 break;
536 }
537 default:
538 printk(KERN_ERR "MAD16/Mozart: Bad IRQ %d\n", hw_config->irq);
539 return 0;
540 }
541
542 ports = request_region(hw_config->io_base + 4, 4, "ad1848");
543 if (!ports) {
544 printk(KERN_ERR "MSS: I/O port conflict\n");
545 return 0;
546 }
547 if (!request_region(hw_config->io_base, 4, "mad16 WSS config")) {
548 release_region(hw_config->io_base + 4, 4);
549 printk(KERN_ERR "MSS: I/O port conflict\n");
550 return 0;
551 }
552
553 if (board_type == C930) {
554 init_c930(hw_config, base);
555 goto got_it;
556 }
557
558 for (i = 0xf8d; i <= 0xf93; i++) {
559 if (!c924pnp)
560 DDB(printk("port %03x = %02x\n", i, mad_read(i)));
561 else
562 DDB(printk("port %03x = %02x\n", i-0x80, mad_read(i)));
563 }
564
565/*
566 * Set the WSS address
567 */
568
569 tmp = (mad_read(MC1_PORT) & 0x0f) | 0x80; /* Enable WSS, Disable SB */
570 tmp |= base << 4; /* WSS port select bits */
571
572 /*
573 * Set optional CD-ROM and joystick settings.
574 */
575
576 tmp &= ~0x0f;
577 tmp |= (mad16_conf & 0x0f); /* CD-ROM and joystick bits */
578 mad_write(MC1_PORT, tmp);
579
580 tmp = mad16_cdsel;
581 mad_write(MC2_PORT, tmp);
582 mad_write(MC3_PORT, 0xf0); /* Disable SB */
583
584 if (board_type == C924) /* Specific C924 init values */
585 {
586 mad_write(MC4_PORT, 0xA0);
587 mad_write(MC5_PORT, 0x05);
588 mad_write(MC6_PORT, 0x03);
589 }
590 if (!ad1848_detect(ports, &ad_flags, mad16_osp))
591 goto fail;
592
593 if (ad_flags & (AD_F_CS4231 | AD_F_CS4248))
594 cs4231_mode = 0x02; /* CS4248/CS4231 sync delay switch */
595
596 if (board_type == C929)
597 {
598 mad_write(MC4_PORT, 0xa2);
599 mad_write(MC5_PORT, 0xA5 | cs4231_mode);
600 mad_write(MC6_PORT, 0x03); /* Disable MPU401 */
601 }
602 else
603 {
604 mad_write(MC4_PORT, 0x02);
605 mad_write(MC5_PORT, 0x30 | cs4231_mode);
606 }
607
608 for (i = 0xf8d; i <= 0xf93; i++) {
609 if (!c924pnp)
610 DDB(printk("port %03x after init = %02x\n", i, mad_read(i)));
611 else
612 DDB(printk("port %03x after init = %02x\n", i-0x80, mad_read(i)));
613 }
614
615got_it:
616 ad_flags = 0;
617 if (!ad1848_detect(ports, &ad_flags, mad16_osp))
618 goto fail;
619
620 if (!wss_init(hw_config))
621 goto fail;
622
623 /*
624 * Set the IRQ and DMA addresses.
625 */
626
627 outb((bits | 0x40), config_port);
628 if ((inb(version_port) & 0x40) == 0)
629 printk(KERN_ERR "[IRQ Conflict?]\n");
630
631 /*
632 * Handle the capture DMA channel
633 */
634
635 if (ad_flags & AD_F_CS4231 && dma2 != -1 && dma2 != dma)
636 {
637 if (!((dma == 0 && dma2 == 1) ||
638 (dma == 1 && dma2 == 0) ||
639 (dma == 3 && dma2 == 0)))
640 { /* Unsupported combination. Try to swap channels */
641 int tmp = dma;
642
643 dma = dma2;
644 dma2 = tmp;
645 }
646 if ((dma == 0 && dma2 == 1) || (dma == 1 && dma2 == 0) ||
647 (dma == 3 && dma2 == 0))
648 {
649 dma2_bit = 0x04; /* Enable capture DMA */
650 }
651 else
652 {
653 printk("MAD16: Invalid capture DMA\n");
654 dma2 = dma;
655 }
656 }
657 else dma2 = dma;
658
659 outb((bits | dma_bits[dma] | dma2_bit), config_port); /* Write IRQ+DMA setup */
660
661 hw_config->slots[0] = ad1848_init("mad16 WSS", ports,
662 hw_config->irq,
663 dma,
664 dma2, 0,
665 hw_config->osp,
666 THIS_MODULE);
667 return 1;
668
669fail:
670 release_region(hw_config->io_base + 4, 4);
671 release_region(hw_config->io_base, 4);
672 return 0;
673}
674
675static int __init probe_mad16_mpu(struct address_info *hw_config)
676{
677 unsigned char tmp;
678
679 if (board_type < C929) /* Early chip. No MPU support. Just SB MIDI */
680 {
681
682#ifdef CONFIG_MAD16_OLDCARD
683
684 tmp = mad_read(MC3_PORT);
685
686 /*
687 * MAD16 SB base is defined by the WSS base. It cannot be changed
688 * alone.
689 * Ignore configured I/O base. Use the active setting.
690 */
691
692 if (mad_read(MC1_PORT) & 0x20)
693 hw_config->io_base = 0x240;
694 else
695 hw_config->io_base = 0x220;
696
697 switch (hw_config->irq)
698 {
699 case 5:
700 tmp = (tmp & 0x3f) | 0x80;
701 break;
702 case 7:
703 tmp = (tmp & 0x3f);
704 break;
705 case 11:
706 tmp = (tmp & 0x3f) | 0x40;
707 break;
708 default:
709 printk(KERN_ERR "mad16/Mozart: Invalid MIDI IRQ\n");
710 return 0;
711 }
712
713 mad_write(MC3_PORT, tmp | 0x04);
714 hw_config->driver_use_1 = SB_MIDI_ONLY;
715 if (!request_region(hw_config->io_base, 16, "soundblaster"))
716 return 0;
717 if (!sb_dsp_detect(hw_config, 0, 0, NULL)) {
718 release_region(hw_config->io_base, 16);
719 return 0;
720 }
721
722 if (mad_read(MC1_PORT) & 0x20)
723 hw_config->io_base = 0x240;
724 else
725 hw_config->io_base = 0x220;
726
727 hw_config->name = "Mad16/Mozart";
728 sb_dsp_init(hw_config, THIS_MODULE);
729 return 1;
730#else
731 /* assuming all later Mozart cards are identified as
732 * either 82C928 or Mozart. If so, following code attempts
733 * to set MPU register. TODO - add probing
734 */
735
736 tmp = mad_read(MC8_PORT);
737
738 switch (hw_config->irq)
739 {
740 case 5:
741 tmp |= 0x08;
742 break;
743 case 7:
744 tmp |= 0x10;
745 break;
746 case 9:
747 tmp |= 0x18;
748 break;
749 case 10:
750 tmp |= 0x20;
751 break;
752 case 11:
753 tmp |= 0x28;
754 break;
755 default:
756 printk(KERN_ERR "mad16/MOZART: invalid mpu_irq\n");
757 return 0;
758 }
759
760 switch (hw_config->io_base)
761 {
762 case 0x300:
763 tmp |= 0x01;
764 break;
765 case 0x310:
766 tmp |= 0x03;
767 break;
768 case 0x320:
769 tmp |= 0x05;
770 break;
771 case 0x330:
772 tmp |= 0x07;
773 break;
774 default:
775 printk(KERN_ERR "mad16/MOZART: invalid mpu_io\n");
776 return 0;
777 }
778
779 mad_write(MC8_PORT, tmp); /* write MPU port parameters */
780 goto probe_401;
781#endif
782 }
783 tmp = mad_read(MC6_PORT) & 0x83;
784 tmp |= 0x80; /* MPU-401 enable */
785
786 /* Set the MPU base bits */
787
788 switch (hw_config->io_base)
789 {
790 case 0x300:
791 tmp |= 0x60;
792 break;
793 case 0x310:
794 tmp |= 0x40;
795 break;
796 case 0x320:
797 tmp |= 0x20;
798 break;
799 case 0x330:
800 tmp |= 0x00;
801 break;
802 default:
803 printk(KERN_ERR "MAD16: Invalid MIDI port 0x%x\n", hw_config->io_base);
804 return 0;
805 }
806
807 /* Set the MPU IRQ bits */
808
809 switch (hw_config->irq)
810 {
811 case 5:
812 tmp |= 0x10;
813 break;
814 case 7:
815 tmp |= 0x18;
816 break;
817 case 9:
818 tmp |= 0x00;
819 break;
820 case 10:
821 tmp |= 0x08;
822 break;
823 default:
824 printk(KERN_ERR "MAD16: Invalid MIDI IRQ %d\n", hw_config->irq);
825 break;
826 }
827
828 mad_write(MC6_PORT, tmp); /* Write MPU401 config */
829
830#ifndef CONFIG_MAD16_OLDCARD
831probe_401:
832#endif
833 hw_config->driver_use_1 = SB_MIDI_ONLY;
834 hw_config->name = "Mad16/Mozart";
835 return probe_uart401(hw_config, THIS_MODULE);
836}
837
838static void __exit unload_mad16(struct address_info *hw_config)
839{
840 ad1848_unload(hw_config->io_base + 4,
841 hw_config->irq,
842 hw_config->dma,
843 hw_config->dma2, 0);
844 release_region(hw_config->io_base, 4);
845 sound_unload_audiodev(hw_config->slots[0]);
846}
847
848static void __exit unload_mad16_mpu(struct address_info *hw_config)
849{
850#ifdef CONFIG_MAD16_OLDCARD
851 if (board_type < C929) /* Early chip. No MPU support. Just SB MIDI */
852 {
853 sb_dsp_unload(hw_config, 0);
854 return;
855 }
856#endif
857
858 unload_uart401(hw_config);
859}
860
861static struct address_info cfg;
862static struct address_info cfg_mpu;
863
864static int found_mpu;
865
866static int __initdata mpu_io = 0;
867static int __initdata mpu_irq = 0;
868static int __initdata io = -1;
869static int __initdata dma = -1;
870static int __initdata dma16 = -1; /* Set this for modules that need it */
871static int __initdata irq = -1;
872static int __initdata cdtype = 0;
873static int __initdata cdirq = 0;
874static int __initdata cdport = 0x340;
875static int __initdata cddma = -1;
876static int __initdata opl4 = 0;
877static int __initdata joystick = 0;
878
879module_param(mpu_io, int, 0);
880module_param(mpu_irq, int, 0);
881module_param(io, int, 0);
882module_param(dma, int, 0);
883module_param(dma16, int, 0);
884module_param(irq, int, 0);
885module_param(cdtype, int, 0);
886module_param(cdirq, int, 0);
887module_param(cdport, int, 0);
888module_param(cddma, int, 0);
889module_param(opl4, int, 0);
890module_param(joystick, bool, 0);
891module_param(debug, bool, 0644);
892
893static int __initdata dma_map[2][8] =
894{
895 {0x03, -1, -1, -1, -1, 0x00, 0x01, 0x02},
896 {0x03, -1, 0x01, 0x00, -1, -1, -1, -1}
897};
898
899static int __initdata irq_map[16] =
900{
901 0x00, -1, -1, 0x0A,
902 -1, 0x04, -1, 0x08,
903 -1, 0x10, 0x14, 0x18,
904 -1, -1, -1, -1
905};
906
907#ifdef SUPPORT_JOYSTICK
908
909static struct gameport *gameport;
910
911static int __devinit mad16_register_gameport(int io_port)
912{
913 if (!request_region(io_port, 1, "mad16 gameport")) {
914 printk(KERN_ERR "mad16: gameport address 0x%#x already in use\n", io_port);
915 return -EBUSY;
916 }
917
918 gameport = gameport_allocate_port();
919 if (!gameport) {
920 printk(KERN_ERR "mad16: can not allocate memory for gameport\n");
921 release_region(io_port, 1);
922 return -ENOMEM;
923 }
924
925 gameport_set_name(gameport, "MAD16 Gameport");
926 gameport_set_phys(gameport, "isa%04x/gameport0", io_port);
927 gameport->io = io_port;
928
929 gameport_register_port(gameport);
930
931 return 0;
932}
933
934static inline void mad16_unregister_gameport(void)
935{
936 if (gameport) {
937 /* the gameport was initialized so we must free it up */
938 gameport_unregister_port(gameport);
939 gameport = NULL;
940 release_region(0x201, 1);
941 }
942}
943#else
944static inline int mad16_register_gameport(int io_port) { return -ENOSYS; }
945static inline void mad16_unregister_gameport(void) { }
946#endif
947
948static int __devinit init_mad16(void)
949{
950 int dmatype = 0;
951
952 printk(KERN_INFO "MAD16 audio driver Copyright (C) by Hannu Savolainen 1993-1996\n");
953
954 printk(KERN_INFO "CDROM ");
955 switch (cdtype)
956 {
957 case 0x00:
958 printk("Disabled");
959 cdirq = 0;
960 break;
961 case 0x02:
962 printk("Sony CDU31A");
963 dmatype = 1;
964 if(cddma == -1) cddma = 3;
965 break;
966 case 0x04:
967 printk("Mitsumi");
968 dmatype = 0;
969 if(cddma == -1) cddma = 5;
970 break;
971 case 0x06:
972 printk("Panasonic Lasermate");
973 dmatype = 1;
974 if(cddma == -1) cddma = 3;
975 break;
976 case 0x08:
977 printk("Secondary IDE");
978 dmatype = 0;
979 if(cddma == -1) cddma = 5;
980 break;
981 case 0x0A:
982 printk("Primary IDE");
983 dmatype = 0;
984 if(cddma == -1) cddma = 5;
985 break;
986 default:
987 printk("\n");
988 printk(KERN_ERR "Invalid CDROM type\n");
989 return -EINVAL;
990 }
991
992 /*
993 * Build the config words
994 */
995
996 mad16_conf = (joystick ^ 1) | cdtype;
997 mad16_cdsel = 0;
998 if (opl4)
999 mad16_cdsel |= 0x20;
1000
1001 if(cdtype){
1002 if (cddma > 7 || cddma < 0 || dma_map[dmatype][cddma] == -1)
1003 {
1004 printk("\n");
1005 printk(KERN_ERR "Invalid CDROM DMA\n");
1006 return -EINVAL;
1007 }
1008 if (cddma)
1009 printk(", DMA %d", cddma);
1010 else
1011 printk(", no DMA");
1012
1013 if (!cdirq)
1014 printk(", no IRQ");
1015 else if (cdirq < 0 || cdirq > 15 || irq_map[cdirq] == -1)
1016 {
1017 printk(", invalid IRQ (disabling)");
1018 cdirq = 0;
1019 }
1020 else printk(", IRQ %d", cdirq);
1021
1022 mad16_cdsel |= dma_map[dmatype][cddma];
1023
1024 if (cdtype < 0x08)
1025 {
1026 switch (cdport)
1027 {
1028 case 0x340:
1029 mad16_cdsel |= 0x00;
1030 break;
1031 case 0x330:
1032 mad16_cdsel |= 0x40;
1033 break;
1034 case 0x360:
1035 mad16_cdsel |= 0x80;
1036 break;
1037 case 0x320:
1038 mad16_cdsel |= 0xC0;
1039 break;
1040 default:
1041 printk(KERN_ERR "Unknown CDROM I/O base %d\n", cdport);
1042 return -EINVAL;
1043 }
1044 }
1045 mad16_cdsel |= irq_map[cdirq];
1046 }
1047
1048 printk(".\n");
1049
1050 cfg.io_base = io;
1051 cfg.irq = irq;
1052 cfg.dma = dma;
1053 cfg.dma2 = dma16;
1054
1055 if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
1056 printk(KERN_ERR "I/O, DMA and irq are mandatory\n");
1057 return -EINVAL;
1058 }
1059
1060 if (!request_region(MC0_PORT, 12, "mad16"))
1061 return -EBUSY;
1062
1063 if (!probe_mad16(&cfg)) {
1064 release_region(MC0_PORT, 12);
1065 return -ENODEV;
1066 }
1067
1068 cfg_mpu.io_base = mpu_io;
1069 cfg_mpu.irq = mpu_irq;
1070
1071 found_mpu = probe_mad16_mpu(&cfg_mpu);
1072
1073 if (joystick)
1074 mad16_register_gameport(0x201);
1075
1076 return 0;
1077}
1078
1079static void __exit cleanup_mad16(void)
1080{
1081 if (found_mpu)
1082 unload_mad16_mpu(&cfg_mpu);
1083 mad16_unregister_gameport();
1084 unload_mad16(&cfg);
1085 release_region(MC0_PORT, 12);
1086}
1087
1088module_init(init_mad16);
1089module_exit(cleanup_mad16);
1090
1091#ifndef MODULE
1092static int __init setup_mad16(char *str)
1093{
1094 /* io, irq */
1095 int ints[8];
1096
1097 str = get_options(str, ARRAY_SIZE(ints), ints);
1098
1099 io = ints[1];
1100 irq = ints[2];
1101 dma = ints[3];
1102 dma16 = ints[4];
1103 mpu_io = ints[5];
1104 mpu_irq = ints[6];
1105 joystick = ints[7];
1106
1107 return 1;
1108}
1109
1110__setup("mad16=", setup_mad16);
1111#endif
1112MODULE_LICENSE("GPL");
diff --git a/sound/oss/maestro.c b/sound/oss/maestro.c
deleted file mode 100644
index 1d98d100d739..000000000000
--- a/sound/oss/maestro.c
+++ /dev/null
@@ -1,3686 +0,0 @@
1/*****************************************************************************
2 *
3 * ESS Maestro/Maestro-2/Maestro-2E driver for Linux 2.[23].x
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * (c) Copyright 1999 Alan Cox <alan.cox@linux.org>
20 *
21 * Based heavily on SonicVibes.c:
22 * Copyright (C) 1998-1999 Thomas Sailer (sailer@ife.ee.ethz.ch)
23 *
24 * Heavily modified by Zach Brown <zab@zabbo.net> based on lunch
25 * with ESS engineers. Many thanks to Howard Kim for providing
26 * contacts and hardware. Honorable mention goes to Eric
27 * Brombaugh for all sorts of things. Best regards to the
28 * proprietors of Hack Central for fine lodging.
29 *
30 * Supported devices:
31 * /dev/dsp0-3 standard /dev/dsp device, (mostly) OSS compatible
32 * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
33 *
34 * Hardware Description
35 *
36 * A working Maestro setup contains the Maestro chip wired to a
37 * codec or 2. In the Maestro we have the APUs, the ASSP, and the
38 * Wavecache. The APUs can be though of as virtual audio routing
39 * channels. They can take data from a number of sources and perform
40 * basic encodings of the data. The wavecache is a storehouse for
41 * PCM data. Typically it deals with PCI and interracts with the
42 * APUs. The ASSP is a wacky DSP like device that ESS is loth
43 * to release docs on. Thankfully it isn't required on the Maestro
44 * until you start doing insane things like FM emulation and surround
45 * encoding. The codecs are almost always AC-97 compliant codecs,
46 * but it appears that early Maestros may have had PT101 (an ESS
47 * part?) wired to them. The only real difference in the Maestro
48 * families is external goop like docking capability, memory for
49 * the ASSP, and initialization differences.
50 *
51 * Driver Operation
52 *
53 * We only drive the APU/Wavecache as typical DACs and drive the
54 * mixers in the codecs. There are 64 APUs. We assign 6 to each
55 * /dev/dsp? device. 2 channels for output, and 4 channels for
56 * input.
57 *
58 * Each APU can do a number of things, but we only really use
59 * 3 basic functions. For playback we use them to convert PCM
60 * data fetched over PCI by the wavecahche into analog data that
61 * is handed to the codec. One APU for mono, and a pair for stereo.
62 * When in stereo, the combination of smarts in the APU and Wavecache
63 * decide which wavecache gets the left or right channel.
64 *
65 * For record we still use the old overly mono system. For each in
66 * coming channel the data comes in from the codec, through a 'input'
67 * APU, through another rate converter APU, and then into memory via
68 * the wavecache and PCI. If its stereo, we mash it back into LRLR in
69 * software. The pass between the 2 APUs is supposedly what requires us
70 * to have a 512 byte buffer sitting around in wavecache/memory.
71 *
72 * The wavecache makes our life even more fun. First off, it can
73 * only address the first 28 bits of PCI address space, making it
74 * useless on quite a few architectures. Secondly, its insane.
75 * It claims to fetch from 4 regions of PCI space, each 4 meg in length.
76 * But that doesn't really work. You can only use 1 region. So all our
77 * allocations have to be in 4meg of each other. Booo. Hiss.
78 * So we have a module parameter, dsps_order, that is the order of
79 * the number of dsps to provide. All their buffer space is allocated
80 * on open time. The sonicvibes OSS routines we inherited really want
81 * power of 2 buffers, so we have all those next to each other, then
82 * 512 byte regions for the recording wavecaches. This ends up
83 * wasting quite a bit of memory. The only fixes I can see would be
84 * getting a kernel allocator that could work in zones, or figuring out
85 * just how to coerce the WP into doing what we want.
86 *
87 * The indirection of the various registers means we have to spinlock
88 * nearly all register accesses. We have the main register indirection
89 * like the wave cache, maestro registers, etc. Then we have beasts
90 * like the APU interface that is indirect registers gotten at through
91 * the main maestro indirection. Ouch. We spinlock around the actual
92 * ports on a per card basis. This means spinlock activity at each IO
93 * operation, but the only IO operation clusters are in non critical
94 * paths and it makes the code far easier to follow. Interrupts are
95 * blocked while holding the locks because the int handler has to
96 * get at some of them :(. The mixer interface doesn't, however.
97 * We also have an OSS state lock that is thrown around in a few
98 * places.
99 *
100 * This driver has brute force APM suspend support. We catch suspend
101 * notifications and stop all work being done on the chip. Any people
102 * that try between this shutdown and the real suspend operation will
103 * be put to sleep. When we resume we restore our software state on
104 * the chip and wake up the people that were using it. The code thats
105 * being used now is quite dirty and assumes we're on a uni-processor
106 * machine. Much of it will need to be cleaned up for SMP ACPI or
107 * similar.
108 *
109 * We also pay attention to PCI power management now. The driver
110 * will power down units of the chip that it knows aren't needed.
111 * The WaveProcessor and company are only powered on when people
112 * have /dev/dsp*s open. On removal the driver will
113 * power down the maestro entirely. There could still be
114 * trouble with BIOSen that magically change power states
115 * themselves, but we'll see.
116 *
117 * History
118 * v0.15 - May 21 2001 - Marcus Meissner <mm@caldera.de>
119 * Ported to Linux 2.4 PCI API. Some clean ups, global devs list
120 * removed (now using pci device driver data).
121 * PM needs to be polished still. Bumped version.
122 * (still kind of v0.14) May 13 2001 - Ben Pfaff <pfaffben@msu.edu>
123 * Add support for 978 docking and basic hardware volume control
124 * (still kind of v0.14) Nov 23 - Alan Cox <alan@redhat.com>
125 * Add clocking= for people with seriously warped hardware
126 * (still v0.14) Nov 10 2000 - Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
127 * add __init to maestro_ac97_init() and maestro_install()
128 * (still based on v0.14) Mar 29 2000 - Zach Brown <zab@redhat.com>
129 * move to 2.3 power management interface, which
130 * required hacking some suspend/resume/check paths
131 * make static compilation work
132 * v0.14 - Jan 28 2000 - Zach Brown <zab@redhat.com>
133 * add PCI power management through ACPI regs.
134 * we now shut down on machine reboot/halt
135 * leave scary PCI config items alone (isa stuff, mostly)
136 * enable 1921s, it seems only mine was broke.
137 * fix swapped left/right pcm dac. har har.
138 * up bob freq, increase buffers, fix pointers at underflow
139 * silly compilation problems
140 * v0.13 - Nov 18 1999 - Zach Brown <zab@redhat.com>
141 * fix nec Versas? man would that be cool.
142 * v0.12 - Nov 12 1999 - Zach Brown <zab@redhat.com>
143 * brown bag volume max fix..
144 * v0.11 - Nov 11 1999 - Zach Brown <zab@redhat.com>
145 * use proper stereo apu decoding, mmap/write should work.
146 * make volume sliders more useful, tweak rate calculation.
147 * fix lame 8bit format reporting bug. duh. apm apu saving buglet also
148 * fix maestro 1 clock freq "bug", remove pt101 support
149 * v0.10 - Oct 28 1999 - Zach Brown <zab@redhat.com>
150 * aha, so, sometimes the WP writes a status word to offset 0
151 * from one of the PCMBARs. rearrange allocation accordingly..
152 * cheers again to Eric for being a good hacker in investigating this.
153 * Jeroen Hoogervorst submits 7500 fix out of nowhere. yay. :)
154 * v0.09 - Oct 23 1999 - Zach Brown <zab@redhat.com>
155 * added APM support.
156 * re-order something such that some 2Es now work. Magic!
157 * new codec reset routine. made some codecs come to life.
158 * fix clear_advance, sync some control with ESS.
159 * now write to all base regs to be paranoid.
160 * v0.08 - Oct 20 1999 - Zach Brown <zab@redhat.com>
161 * Fix initial buflen bug. I am so smart. also smp compiling..
162 * I owe Eric yet another beer: fixed recmask, igain,
163 * muting, and adc sync consistency. Go Team.
164 * v0.07 - Oct 4 1999 - Zach Brown <zab@redhat.com>
165 * tweak adc/dac, formating, and stuff to allow full duplex
166 * allocate dsps memory at open() so we can fit in the wavecache window
167 * fix wavecache braindamage. again. no more scribbling?
168 * fix ess 1921 codec bug on some laptops.
169 * fix dumb pci scanning bug
170 * started 2.3 cleanup, redid spinlocks, little cleanups
171 * v0.06 - Sep 20 1999 - Zach Brown <zab@redhat.com>
172 * fix wavecache thinkos. limit to 1 /dev/dsp.
173 * eric is wearing his thinking toque this week.
174 * spotted apu mode bugs and gain ramping problem
175 * don't touch weird mixer regs, make recmask optional
176 * fixed igain inversion, defaults for mixers, clean up rec_start
177 * make mono recording work.
178 * report subsystem stuff, please send reports.
179 * littles: parallel out, amp now
180 * v0.05 - Sep 17 1999 - Zach Brown <zab@redhat.com>
181 * merged and fixed up Eric's initial recording code
182 * munged format handling to catch misuse, needs rewrite.
183 * revert ring bus init, fixup shared int, add pci busmaster setting
184 * fix mixer oss interface, fix mic mute and recmask
185 * mask off unsupported mixers, reset with all 1s, modularize defaults
186 * make sure bob is running while we need it
187 * got rid of device limit, initial minimal apm hooks
188 * pull out dead code/includes, only allow multimedia/audio maestros
189 * v0.04 - Sep 01 1999 - Zach Brown <zab@redhat.com>
190 * copied memory leak fix from sonicvibes driver
191 * different ac97 reset, play with 2.0 ac97, simplify ring bus setup
192 * bob freq code, region sanity, jitter sync fix; all from Eric
193 *
194 * TODO
195 * fix bob frequency
196 * endianness
197 * do smart things with ac97 2.0 bits.
198 * dual codecs
199 * leave 54->61 open
200 *
201 * it also would be fun to have a mode that would not use pci dma at all
202 * but would copy into the wavecache on board memory and use that
203 * on architectures that don't like the maestro's pci dma ickiness.
204 */
205
206/*****************************************************************************/
207
208#include <linux/module.h>
209#include <linux/sched.h>
210#include <linux/smp_lock.h>
211#include <linux/string.h>
212#include <linux/ctype.h>
213#include <linux/ioport.h>
214#include <linux/delay.h>
215#include <linux/sound.h>
216#include <linux/slab.h>
217#include <linux/soundcard.h>
218#include <linux/pci.h>
219#include <linux/spinlock.h>
220#include <linux/init.h>
221#include <linux/interrupt.h>
222#include <linux/poll.h>
223#include <linux/reboot.h>
224#include <linux/bitops.h>
225#include <linux/wait.h>
226#include <linux/mutex.h>
227
228
229#include <asm/current.h>
230#include <asm/dma.h>
231#include <asm/io.h>
232#include <asm/page.h>
233#include <asm/uaccess.h>
234
235#include "maestro.h"
236
237static struct pci_driver maestro_pci_driver;
238
239/* --------------------------------------------------------------------- */
240
241#define M_DEBUG 1
242
243#ifdef M_DEBUG
244static int debug;
245#define M_printk(args...) {if (debug) printk(args);}
246#else
247#define M_printk(x)
248#endif
249
250/* we try to setup 2^(dsps_order) /dev/dsp devices */
251static int dsps_order;
252/* whether or not we mess around with power management */
253static int use_pm=2; /* set to 1 for force */
254/* clocking for broken hardware - a few laptops seem to use a 50Khz clock
255 ie insmod with clocking=50000 or so */
256
257static int clocking=48000;
258
259MODULE_AUTHOR("Zach Brown <zab@zabbo.net>, Alan Cox <alan@redhat.com>");
260MODULE_DESCRIPTION("ESS Maestro Driver");
261MODULE_LICENSE("GPL");
262
263#ifdef M_DEBUG
264module_param(debug, bool, 0644);
265#endif
266module_param(dsps_order, int, 0);
267module_param(use_pm, int, 0);
268module_param(clocking, int, 0);
269
270/* --------------------------------------------------------------------- */
271#define DRIVER_VERSION "0.15"
272
273#ifndef PCI_VENDOR_ESS
274#define PCI_VENDOR_ESS 0x125D
275#define PCI_DEVICE_ID_ESS_ESS1968 0x1968 /* Maestro 2 */
276#define PCI_DEVICE_ID_ESS_ESS1978 0x1978 /* Maestro 2E */
277
278#define PCI_VENDOR_ESS_OLD 0x1285 /* Platform Tech,
279 the people the maestro
280 was bought from */
281#define PCI_DEVICE_ID_ESS_ESS0100 0x0100 /* maestro 1 */
282#endif /* PCI_VENDOR_ESS */
283
284#define ESS_CHAN_HARD 0x100
285
286/* NEC Versas ? */
287#define NEC_VERSA_SUBID1 0x80581033
288#define NEC_VERSA_SUBID2 0x803c1033
289
290
291/* changed so that I could actually find all the
292 references and fix them up. it's a little more readable now. */
293#define ESS_FMT_STEREO 0x01
294#define ESS_FMT_16BIT 0x02
295#define ESS_FMT_MASK 0x03
296#define ESS_DAC_SHIFT 0
297#define ESS_ADC_SHIFT 4
298
299#define ESS_STATE_MAGIC 0x125D1968
300#define ESS_CARD_MAGIC 0x19283746
301
302#define DAC_RUNNING 1
303#define ADC_RUNNING 2
304
305#define MAX_DSP_ORDER 2
306#define MAX_DSPS (1<<MAX_DSP_ORDER)
307#define NR_DSPS (1<<dsps_order)
308#define NR_IDRS 32
309
310#define NR_APUS 64
311#define NR_APU_REGS 16
312
313/* acpi states */
314enum {
315 ACPI_D0=0,
316 ACPI_D1,
317 ACPI_D2,
318 ACPI_D3
319};
320
321/* bits in the acpi masks */
322#define ACPI_12MHZ ( 1 << 15)
323#define ACPI_24MHZ ( 1 << 14)
324#define ACPI_978 ( 1 << 13)
325#define ACPI_SPDIF ( 1 << 12)
326#define ACPI_GLUE ( 1 << 11)
327#define ACPI__10 ( 1 << 10) /* reserved */
328#define ACPI_PCIINT ( 1 << 9)
329#define ACPI_HV ( 1 << 8) /* hardware volume */
330#define ACPI_GPIO ( 1 << 7)
331#define ACPI_ASSP ( 1 << 6)
332#define ACPI_SB ( 1 << 5) /* sb emul */
333#define ACPI_FM ( 1 << 4) /* fm emul */
334#define ACPI_RB ( 1 << 3) /* ringbus / aclink */
335#define ACPI_MIDI ( 1 << 2)
336#define ACPI_GP ( 1 << 1) /* game port */
337#define ACPI_WP ( 1 << 0) /* wave processor */
338
339#define ACPI_ALL (0xffff)
340#define ACPI_SLEEP (~(ACPI_SPDIF|ACPI_ASSP|ACPI_SB|ACPI_FM| \
341 ACPI_MIDI|ACPI_GP|ACPI_WP))
342#define ACPI_NONE (ACPI__10)
343
344/* these masks indicate which units we care about at
345 which states */
346static u16 acpi_state_mask[] = {
347 [ACPI_D0] = ACPI_ALL,
348 [ACPI_D1] = ACPI_SLEEP,
349 [ACPI_D2] = ACPI_SLEEP,
350 [ACPI_D3] = ACPI_NONE
351};
352
353static char version[] __devinitdata =
354KERN_INFO "maestro: version " DRIVER_VERSION " time " __TIME__ " " __DATE__ "\n";
355
356
357
358static const unsigned sample_size[] = { 1, 2, 2, 4 };
359static const unsigned sample_shift[] = { 0, 1, 1, 2 };
360
361enum card_types_t {
362 TYPE_MAESTRO,
363 TYPE_MAESTRO2,
364 TYPE_MAESTRO2E
365};
366
367static const char *card_names[]={
368 [TYPE_MAESTRO] = "ESS Maestro",
369 [TYPE_MAESTRO2] = "ESS Maestro 2",
370 [TYPE_MAESTRO2E] = "ESS Maestro 2E"
371};
372
373static int clock_freq[]={
374 [TYPE_MAESTRO] = (49152000L / 1024L),
375 [TYPE_MAESTRO2] = (50000000L / 1024L),
376 [TYPE_MAESTRO2E] = (50000000L / 1024L)
377};
378
379static int maestro_notifier(struct notifier_block *nb, unsigned long event, void *buf);
380
381static struct notifier_block maestro_nb = {maestro_notifier, NULL, 0};
382
383/* --------------------------------------------------------------------- */
384
385struct ess_state {
386 unsigned int magic;
387 /* FIXME: we probably want submixers in here, but only one record pair */
388 u8 apu[6]; /* l/r output, l/r intput converters, l/r input apus */
389 u8 apu_mode[6]; /* Running mode for this APU */
390 u8 apu_pan[6]; /* Panning setup for this APU */
391 u32 apu_base[6]; /* base address for this apu */
392 struct ess_card *card; /* Card info */
393 /* wave stuff */
394 unsigned int rateadc, ratedac;
395 unsigned char fmt, enable;
396
397 int index;
398
399 /* this locks around the oss state in the driver */
400 spinlock_t lock;
401 /* only let 1 be opening at a time */
402 struct mutex open_mutex;
403 wait_queue_head_t open_wait;
404 mode_t open_mode;
405
406 /* soundcore stuff */
407 int dev_audio;
408
409 struct dmabuf {
410 void *rawbuf;
411 unsigned buforder;
412 unsigned numfrag;
413 unsigned fragshift;
414 /* XXX zab - swptr only in here so that it can be referenced by
415 clear_advance, as far as I can tell :( */
416 unsigned hwptr, swptr;
417 unsigned total_bytes;
418 int count;
419 unsigned error; /* over/underrun */
420 wait_queue_head_t wait;
421 /* redundant, but makes calculations easier */
422 unsigned fragsize;
423 unsigned dmasize;
424 unsigned fragsamples;
425 /* OSS stuff */
426 unsigned mapped:1;
427 unsigned ready:1; /* our oss buffers are ready to go */
428 unsigned endcleared:1;
429 unsigned ossfragshift;
430 int ossmaxfrags;
431 unsigned subdivision;
432 u16 base; /* Offset for ptr */
433 } dma_dac, dma_adc;
434
435 /* pointer to each dsp?s piece of the apu->src buffer page */
436 void *mixbuf;
437
438};
439
440struct ess_card {
441 unsigned int magic;
442
443 /* We keep maestro cards in a linked list */
444 struct ess_card *next;
445
446 int dev_mixer;
447
448 int card_type;
449
450 /* as most of this is static,
451 perhaps it should be a pointer to a global struct */
452 struct mixer_goo {
453 int modcnt;
454 int supported_mixers;
455 int stereo_mixers;
456 int record_sources;
457 /* the caller must guarantee arg sanity before calling these */
458/* int (*read_mixer)(struct ess_card *card, int index);*/
459 void (*write_mixer)(struct ess_card *card,int mixer, unsigned int left,unsigned int right);
460 int (*recmask_io)(struct ess_card *card,int rw,int mask);
461 unsigned int mixer_state[SOUND_MIXER_NRDEVICES];
462 } mix;
463
464 int power_regs;
465
466 int in_suspend;
467 wait_queue_head_t suspend_queue;
468
469 struct ess_state channels[MAX_DSPS];
470 u16 maestro_map[NR_IDRS]; /* Register map */
471 /* we have to store this junk so that we can come back from a
472 suspend */
473 u16 apu_map[NR_APUS][NR_APU_REGS]; /* contents of apu regs */
474
475 /* this locks around the physical registers on the card */
476 spinlock_t lock;
477
478 /* memory for this card.. wavecache limited :(*/
479 void *dmapages;
480 int dmaorder;
481
482 /* hardware resources */
483 struct pci_dev *pcidev;
484 u32 iobase;
485 u32 irq;
486
487 int bob_freq;
488 char dsps_open;
489
490 int dock_mute_vol;
491};
492
493static void set_mixer(struct ess_card *card,unsigned int mixer, unsigned int val );
494
495static unsigned
496ld2(unsigned int x)
497{
498 unsigned r = 0;
499
500 if (x >= 0x10000) {
501 x >>= 16;
502 r += 16;
503 }
504 if (x >= 0x100) {
505 x >>= 8;
506 r += 8;
507 }
508 if (x >= 0x10) {
509 x >>= 4;
510 r += 4;
511 }
512 if (x >= 4) {
513 x >>= 2;
514 r += 2;
515 }
516 if (x >= 2)
517 r++;
518 return r;
519}
520
521
522/* --------------------------------------------------------------------- */
523
524static void check_suspend(struct ess_card *card);
525
526/* --------------------------------------------------------------------- */
527
528
529/*
530 * ESS Maestro AC97 codec programming interface.
531 */
532
533static void maestro_ac97_set(struct ess_card *card, u8 cmd, u16 val)
534{
535 int io = card->iobase;
536 int i;
537 /*
538 * Wait for the codec bus to be free
539 */
540
541 check_suspend(card);
542
543 for(i=0;i<10000;i++)
544 {
545 if(!(inb(io+ESS_AC97_INDEX)&1))
546 break;
547 }
548 /*
549 * Write the bus
550 */
551 outw(val, io+ESS_AC97_DATA);
552 mdelay(1);
553 outb(cmd, io+ESS_AC97_INDEX);
554 mdelay(1);
555}
556
557static u16 maestro_ac97_get(struct ess_card *card, u8 cmd)
558{
559 int io = card->iobase;
560 int sanity=10000;
561 u16 data;
562 int i;
563
564 check_suspend(card);
565 /*
566 * Wait for the codec bus to be free
567 */
568
569 for(i=0;i<10000;i++)
570 {
571 if(!(inb(io+ESS_AC97_INDEX)&1))
572 break;
573 }
574
575 outb(cmd|0x80, io+ESS_AC97_INDEX);
576 mdelay(1);
577
578 while(inb(io+ESS_AC97_INDEX)&1)
579 {
580 sanity--;
581 if(!sanity)
582 {
583 printk(KERN_ERR "maestro: ac97 codec timeout reading 0x%x.\n",cmd);
584 return 0;
585 }
586 }
587 data=inw(io+ESS_AC97_DATA);
588 mdelay(1);
589 return data;
590}
591
592/* OSS interface to the ac97s.. */
593
594#define AC97_STEREO_MASK (SOUND_MASK_VOLUME|\
595 SOUND_MASK_PCM|SOUND_MASK_LINE|SOUND_MASK_CD|\
596 SOUND_MASK_VIDEO|SOUND_MASK_LINE1|SOUND_MASK_IGAIN)
597
598#define AC97_SUPPORTED_MASK (AC97_STEREO_MASK | \
599 SOUND_MASK_BASS|SOUND_MASK_TREBLE|SOUND_MASK_MIC|\
600 SOUND_MASK_SPEAKER)
601
602#define AC97_RECORD_MASK (SOUND_MASK_MIC|\
603 SOUND_MASK_CD| SOUND_MASK_VIDEO| SOUND_MASK_LINE1| SOUND_MASK_LINE|\
604 SOUND_MASK_PHONEIN)
605
606#define supported_mixer(CARD,FOO) ( CARD->mix.supported_mixers & (1<<FOO) )
607
608/* this table has default mixer values for all OSS mixers.
609 be sure to fill it in if you add oss mixers
610 to anyone's supported mixer defines */
611
612static unsigned int mixer_defaults[SOUND_MIXER_NRDEVICES] = {
613 [SOUND_MIXER_VOLUME] = 0x3232,
614 [SOUND_MIXER_BASS] = 0x3232,
615 [SOUND_MIXER_TREBLE] = 0x3232,
616 [SOUND_MIXER_SPEAKER] = 0x3232,
617 [SOUND_MIXER_MIC] = 0x8000, /* annoying */
618 [SOUND_MIXER_LINE] = 0x3232,
619 [SOUND_MIXER_CD] = 0x3232,
620 [SOUND_MIXER_VIDEO] = 0x3232,
621 [SOUND_MIXER_LINE1] = 0x3232,
622 [SOUND_MIXER_PCM] = 0x3232,
623 [SOUND_MIXER_IGAIN] = 0x3232
624};
625
626static struct ac97_mixer_hw {
627 unsigned char offset;
628 int scale;
629} ac97_hw[SOUND_MIXER_NRDEVICES]= {
630 [SOUND_MIXER_VOLUME] = {0x02,63},
631 [SOUND_MIXER_BASS] = {0x08,15},
632 [SOUND_MIXER_TREBLE] = {0x08,15},
633 [SOUND_MIXER_SPEAKER] = {0x0a,15},
634 [SOUND_MIXER_MIC] = {0x0e,31},
635 [SOUND_MIXER_LINE] = {0x10,31},
636 [SOUND_MIXER_CD] = {0x12,31},
637 [SOUND_MIXER_VIDEO] = {0x14,31},
638 [SOUND_MIXER_LINE1] = {0x16,31},
639 [SOUND_MIXER_PCM] = {0x18,31},
640 [SOUND_MIXER_IGAIN] = {0x1c,15}
641};
642
643#if 0 /* *shrug* removed simply because we never used it.
644 feel free to implement again if needed */
645
646/* reads the given OSS mixer from the ac97
647 the caller must have insured that the ac97 knows
648 about that given mixer, and should be holding a
649 spinlock for the card */
650static int ac97_read_mixer(struct ess_card *card, int mixer)
651{
652 u16 val;
653 int ret=0;
654 struct ac97_mixer_hw *mh = &ac97_hw[mixer];
655
656 val = maestro_ac97_get(card, mh->offset);
657
658 if(AC97_STEREO_MASK & (1<<mixer)) {
659 /* nice stereo mixers .. */
660 int left,right;
661
662 left = (val >> 8) & 0x7f;
663 right = val & 0x7f;
664
665 if (mixer == SOUND_MIXER_IGAIN) {
666 right = (right * 100) / mh->scale;
667 left = (left * 100) / mh->scale;
668 } else {
669 right = 100 - ((right * 100) / mh->scale);
670 left = 100 - ((left * 100) / mh->scale);
671 }
672
673 ret = left | (right << 8);
674 } else if (mixer == SOUND_MIXER_SPEAKER) {
675 ret = 100 - ((((val & 0x1e)>>1) * 100) / mh->scale);
676 } else if (mixer == SOUND_MIXER_MIC) {
677 ret = 100 - (((val & 0x1f) * 100) / mh->scale);
678 /* the low bit is optional in the tone sliders and masking
679 it lets is avoid the 0xf 'bypass'.. */
680 } else if (mixer == SOUND_MIXER_BASS) {
681 ret = 100 - ((((val >> 8) & 0xe) * 100) / mh->scale);
682 } else if (mixer == SOUND_MIXER_TREBLE) {
683 ret = 100 - (((val & 0xe) * 100) / mh->scale);
684 }
685
686 M_printk("read mixer %d (0x%x) %x -> %x\n",mixer,mh->offset,val,ret);
687
688 return ret;
689}
690#endif
691
692/* write the OSS encoded volume to the given OSS encoded mixer,
693 again caller's job to make sure all is well in arg land,
694 call with spinlock held */
695
696/* linear scale -> log */
697static unsigned char lin2log[101] =
698{
6990, 0 , 15 , 23 , 30 , 34 , 38 , 42 , 45 , 47 ,
70050 , 52 , 53 , 55 , 57 , 58 , 60 , 61 , 62 ,
70163 , 65 , 66 , 67 , 68 , 69 , 69 , 70 , 71 ,
70272 , 73 , 73 , 74 , 75 , 75 , 76 , 77 , 77 ,
70378 , 78 , 79 , 80 , 80 , 81 , 81 , 82 , 82 ,
70483 , 83 , 84 , 84 , 84 , 85 , 85 , 86 , 86 ,
70587 , 87 , 87 , 88 , 88 , 88 , 89 , 89 , 89 ,
70690 , 90 , 90 , 91 , 91 , 91 , 92 , 92 , 92 ,
70793 , 93 , 93 , 94 , 94 , 94 , 94 , 95 , 95 ,
70895 , 95 , 96 , 96 , 96 , 96 , 97 , 97 , 97 ,
70997 , 98 , 98 , 98 , 98 , 99 , 99 , 99 , 99 , 99
710};
711
712static void ac97_write_mixer(struct ess_card *card,int mixer, unsigned int left, unsigned int right)
713{
714 u16 val=0;
715 struct ac97_mixer_hw *mh = &ac97_hw[mixer];
716
717 M_printk("wrote mixer %d (0x%x) %d,%d",mixer,mh->offset,left,right);
718
719 if(AC97_STEREO_MASK & (1<<mixer)) {
720 /* stereo mixers, mute them if we can */
721
722 if (mixer == SOUND_MIXER_IGAIN) {
723 /* igain's slider is reversed.. */
724 right = (right * mh->scale) / 100;
725 left = (left * mh->scale) / 100;
726 if ((left == 0) && (right == 0))
727 val |= 0x8000;
728 } else if (mixer == SOUND_MIXER_PCM || mixer == SOUND_MIXER_CD) {
729 /* log conversion seems bad for them */
730 if ((left == 0) && (right == 0))
731 val = 0x8000;
732 right = ((100 - right) * mh->scale) / 100;
733 left = ((100 - left) * mh->scale) / 100;
734 } else {
735 /* log conversion for the stereo controls */
736 if((left == 0) && (right == 0))
737 val = 0x8000;
738 right = ((100 - lin2log[right]) * mh->scale) / 100;
739 left = ((100 - lin2log[left]) * mh->scale) / 100;
740 }
741
742 val |= (left << 8) | right;
743
744 } else if (mixer == SOUND_MIXER_SPEAKER) {
745 val = (((100 - left) * mh->scale) / 100) << 1;
746 } else if (mixer == SOUND_MIXER_MIC) {
747 val = maestro_ac97_get(card, mh->offset) & ~0x801f;
748 val |= (((100 - left) * mh->scale) / 100);
749 /* the low bit is optional in the tone sliders and masking
750 it lets is avoid the 0xf 'bypass'.. */
751 } else if (mixer == SOUND_MIXER_BASS) {
752 val = maestro_ac97_get(card , mh->offset) & ~0x0f00;
753 val |= ((((100 - left) * mh->scale) / 100) << 8) & 0x0e00;
754 } else if (mixer == SOUND_MIXER_TREBLE) {
755 val = maestro_ac97_get(card , mh->offset) & ~0x000f;
756 val |= (((100 - left) * mh->scale) / 100) & 0x000e;
757 }
758
759 maestro_ac97_set(card , mh->offset, val);
760
761 M_printk(" -> %x\n",val);
762}
763
764/* the following tables allow us to go from
765 OSS <-> ac97 quickly. */
766
767enum ac97_recsettings {
768 AC97_REC_MIC=0,
769 AC97_REC_CD,
770 AC97_REC_VIDEO,
771 AC97_REC_AUX,
772 AC97_REC_LINE,
773 AC97_REC_STEREO, /* combination of all enabled outputs.. */
774 AC97_REC_MONO, /*.. or the mono equivalent */
775 AC97_REC_PHONE
776};
777
778static unsigned int ac97_oss_mask[] = {
779 [AC97_REC_MIC] = SOUND_MASK_MIC,
780 [AC97_REC_CD] = SOUND_MASK_CD,
781 [AC97_REC_VIDEO] = SOUND_MASK_VIDEO,
782 [AC97_REC_AUX] = SOUND_MASK_LINE1,
783 [AC97_REC_LINE] = SOUND_MASK_LINE,
784 [AC97_REC_PHONE] = SOUND_MASK_PHONEIN
785};
786
787/* indexed by bit position */
788static unsigned int ac97_oss_rm[] = {
789 [SOUND_MIXER_MIC] = AC97_REC_MIC,
790 [SOUND_MIXER_CD] = AC97_REC_CD,
791 [SOUND_MIXER_VIDEO] = AC97_REC_VIDEO,
792 [SOUND_MIXER_LINE1] = AC97_REC_AUX,
793 [SOUND_MIXER_LINE] = AC97_REC_LINE,
794 [SOUND_MIXER_PHONEIN] = AC97_REC_PHONE
795};
796
797/* read or write the recmask
798 the ac97 can really have left and right recording
799 inputs independently set, but OSS doesn't seem to
800 want us to express that to the user.
801 the caller guarantees that we have a supported bit set,
802 and they must be holding the card's spinlock */
803static int
804ac97_recmask_io(struct ess_card *card, int read, int mask)
805{
806 unsigned int val = ac97_oss_mask[ maestro_ac97_get(card, 0x1a) & 0x7 ];
807
808 if (read) return val;
809
810 /* oss can have many inputs, maestro can't. try
811 to pick the 'new' one */
812
813 if (mask != val) mask &= ~val;
814
815 val = ffs(mask) - 1;
816 val = ac97_oss_rm[val];
817 val |= val << 8; /* set both channels */
818
819 M_printk("maestro: setting ac97 recmask to 0x%x\n",val);
820
821 maestro_ac97_set(card,0x1a,val);
822
823 return 0;
824};
825
826/*
827 * The Maestro can be wired to a standard AC97 compliant codec
828 * (see www.intel.com for the pdf's on this), or to a PT101 codec
829 * which appears to be the ES1918 (data sheet on the esstech.com.tw site)
830 *
831 * The PT101 setup is untested.
832 */
833
834static u16 __init maestro_ac97_init(struct ess_card *card)
835{
836 u16 vend1, vend2, caps;
837
838 card->mix.supported_mixers = AC97_SUPPORTED_MASK;
839 card->mix.stereo_mixers = AC97_STEREO_MASK;
840 card->mix.record_sources = AC97_RECORD_MASK;
841/* card->mix.read_mixer = ac97_read_mixer;*/
842 card->mix.write_mixer = ac97_write_mixer;
843 card->mix.recmask_io = ac97_recmask_io;
844
845 vend1 = maestro_ac97_get(card, 0x7c);
846 vend2 = maestro_ac97_get(card, 0x7e);
847
848 caps = maestro_ac97_get(card, 0x00);
849
850 printk(KERN_INFO "maestro: AC97 Codec detected: v: 0x%2x%2x caps: 0x%x pwr: 0x%x\n",
851 vend1,vend2,caps,maestro_ac97_get(card,0x26) & 0xf);
852
853 if (! (caps & 0x4) ) {
854 /* no bass/treble nobs */
855 card->mix.supported_mixers &= ~(SOUND_MASK_BASS|SOUND_MASK_TREBLE);
856 }
857
858 /* XXX endianness, dork head. */
859 /* vendor specifc bits.. */
860 switch ((long)(vend1 << 16) | vend2) {
861 case 0x545200ff: /* TriTech */
862 /* no idea what this does */
863 maestro_ac97_set(card,0x2a,0x0001);
864 maestro_ac97_set(card,0x2c,0x0000);
865 maestro_ac97_set(card,0x2c,0xffff);
866 break;
867#if 0 /* i thought the problems I was seeing were with
868 the 1921, but apparently they were with the pci board
869 it was on, so this code is commented out.
870 lets see if this holds true. */
871 case 0x83847609: /* ESS 1921 */
872 /* writing to 0xe (mic) or 0x1a (recmask) seems
873 to hang this codec */
874 card->mix.supported_mixers &= ~(SOUND_MASK_MIC);
875 card->mix.record_sources = 0;
876 card->mix.recmask_io = NULL;
877#if 0 /* don't ask. I have yet to see what these actually do. */
878 maestro_ac97_set(card,0x76,0xABBA); /* o/~ Take a chance on me o/~ */
879 udelay(20);
880 maestro_ac97_set(card,0x78,0x3002);
881 udelay(20);
882 maestro_ac97_set(card,0x78,0x3802);
883 udelay(20);
884#endif
885 break;
886#endif
887 default: break;
888 }
889
890 maestro_ac97_set(card, 0x1E, 0x0404);
891 /* null misc stuff */
892 maestro_ac97_set(card, 0x20, 0x0000);
893
894 return 0;
895}
896
897#if 0 /* there has been 1 person on the planet with a pt101 that we
898 know of. If they care, they can put this back in :) */
899static u16 maestro_pt101_init(struct ess_card *card,int iobase)
900{
901 printk(KERN_INFO "maestro: PT101 Codec detected, initializing but _not_ installing mixer device.\n");
902 /* who knows.. */
903 maestro_ac97_set(iobase, 0x2A, 0x0001);
904 maestro_ac97_set(iobase, 0x2C, 0x0000);
905 maestro_ac97_set(iobase, 0x2C, 0xFFFF);
906 maestro_ac97_set(iobase, 0x10, 0x9F1F);
907 maestro_ac97_set(iobase, 0x12, 0x0808);
908 maestro_ac97_set(iobase, 0x14, 0x9F1F);
909 maestro_ac97_set(iobase, 0x16, 0x9F1F);
910 maestro_ac97_set(iobase, 0x18, 0x0404);
911 maestro_ac97_set(iobase, 0x1A, 0x0000);
912 maestro_ac97_set(iobase, 0x1C, 0x0000);
913 maestro_ac97_set(iobase, 0x02, 0x0404);
914 maestro_ac97_set(iobase, 0x04, 0x0808);
915 maestro_ac97_set(iobase, 0x0C, 0x801F);
916 maestro_ac97_set(iobase, 0x0E, 0x801F);
917 return 0;
918}
919#endif
920
921/* this is very magic, and very slow.. */
922static void
923maestro_ac97_reset(int ioaddr, struct pci_dev *pcidev)
924{
925 u16 save_68;
926 u16 w;
927 u32 vend;
928
929 outw( inw(ioaddr + 0x38) & 0xfffc, ioaddr + 0x38);
930 outw( inw(ioaddr + 0x3a) & 0xfffc, ioaddr + 0x3a);
931 outw( inw(ioaddr + 0x3c) & 0xfffc, ioaddr + 0x3c);
932
933 /* reset the first codec */
934 outw(0x0000, ioaddr+0x36);
935 save_68 = inw(ioaddr+0x68);
936 pci_read_config_word(pcidev, 0x58, &w); /* something magical with gpio and bus arb. */
937 pci_read_config_dword(pcidev, PCI_SUBSYSTEM_VENDOR_ID, &vend);
938 if( w & 0x1)
939 save_68 |= 0x10;
940 outw(0xfffe, ioaddr + 0x64); /* tickly gpio 0.. */
941 outw(0x0001, ioaddr + 0x68);
942 outw(0x0000, ioaddr + 0x60);
943 udelay(20);
944 outw(0x0001, ioaddr + 0x60);
945 mdelay(20);
946
947 outw(save_68 | 0x1, ioaddr + 0x68); /* now restore .. */
948 outw( (inw(ioaddr + 0x38) & 0xfffc)|0x1, ioaddr + 0x38);
949 outw( (inw(ioaddr + 0x3a) & 0xfffc)|0x1, ioaddr + 0x3a);
950 outw( (inw(ioaddr + 0x3c) & 0xfffc)|0x1, ioaddr + 0x3c);
951
952 /* now the second codec */
953 outw(0x0000, ioaddr+0x36);
954 outw(0xfff7, ioaddr + 0x64);
955 save_68 = inw(ioaddr+0x68);
956 outw(0x0009, ioaddr + 0x68);
957 outw(0x0001, ioaddr + 0x60);
958 udelay(20);
959 outw(0x0009, ioaddr + 0x60);
960 mdelay(500); /* .. ouch.. */
961 outw( inw(ioaddr + 0x38) & 0xfffc, ioaddr + 0x38);
962 outw( inw(ioaddr + 0x3a) & 0xfffc, ioaddr + 0x3a);
963 outw( inw(ioaddr + 0x3c) & 0xfffc, ioaddr + 0x3c);
964
965#if 0 /* the loop here needs to be much better if we want it.. */
966 M_printk("trying software reset\n");
967 /* try and do a software reset */
968 outb(0x80|0x7c, ioaddr + 0x30);
969 for (w=0; ; w++) {
970 if ((inw(ioaddr+ 0x30) & 1) == 0) {
971 if(inb(ioaddr + 0x32) !=0) break;
972
973 outb(0x80|0x7d, ioaddr + 0x30);
974 if (((inw(ioaddr+ 0x30) & 1) == 0) && (inb(ioaddr + 0x32) !=0)) break;
975 outb(0x80|0x7f, ioaddr + 0x30);
976 if (((inw(ioaddr+ 0x30) & 1) == 0) && (inb(ioaddr + 0x32) !=0)) break;
977 }
978
979 if( w > 10000) {
980 outb( inb(ioaddr + 0x37) | 0x08, ioaddr + 0x37); /* do a software reset */
981 mdelay(500); /* oh my.. */
982 outb( inb(ioaddr + 0x37) & ~0x08, ioaddr + 0x37);
983 udelay(1);
984 outw( 0x80, ioaddr+0x30);
985 for(w = 0 ; w < 10000; w++) {
986 if((inw(ioaddr + 0x30) & 1) ==0) break;
987 }
988 }
989 }
990#endif
991 if ( vend == NEC_VERSA_SUBID1 || vend == NEC_VERSA_SUBID2) {
992 /* turn on external amp? */
993 outw(0xf9ff, ioaddr + 0x64);
994 outw(inw(ioaddr+0x68) | 0x600, ioaddr + 0x68);
995 outw(0x0209, ioaddr + 0x60);
996 }
997
998 /* Turn on the 978 docking chip.
999 First frob the "master output enable" bit,
1000 then set most of the playback volume control registers to max. */
1001 outb(inb(ioaddr+0xc0)|(1<<5), ioaddr+0xc0);
1002 outb(0xff, ioaddr+0xc3);
1003 outb(0xff, ioaddr+0xc4);
1004 outb(0xff, ioaddr+0xc6);
1005 outb(0xff, ioaddr+0xc8);
1006 outb(0x3f, ioaddr+0xcf);
1007 outb(0x3f, ioaddr+0xd0);
1008}
1009/*
1010 * Indirect register access. Not all registers are readable so we
1011 * need to keep register state ourselves
1012 */
1013
1014#define WRITEABLE_MAP 0xEFFFFF
1015#define READABLE_MAP 0x64003F
1016
1017/*
1018 * The Maestro engineers were a little indirection happy. These indirected
1019 * registers themselves include indirect registers at another layer
1020 */
1021
1022static void __maestro_write(struct ess_card *card, u16 reg, u16 data)
1023{
1024 long ioaddr = card->iobase;
1025
1026 outw(reg, ioaddr+0x02);
1027 outw(data, ioaddr+0x00);
1028 if( reg >= NR_IDRS) printk("maestro: IDR %d out of bounds!\n",reg);
1029 else card->maestro_map[reg]=data;
1030
1031}
1032
1033static void maestro_write(struct ess_state *s, u16 reg, u16 data)
1034{
1035 unsigned long flags;
1036
1037 check_suspend(s->card);
1038 spin_lock_irqsave(&s->card->lock,flags);
1039
1040 __maestro_write(s->card,reg,data);
1041
1042 spin_unlock_irqrestore(&s->card->lock,flags);
1043}
1044
1045static u16 __maestro_read(struct ess_card *card, u16 reg)
1046{
1047 long ioaddr = card->iobase;
1048
1049 outw(reg, ioaddr+0x02);
1050 return card->maestro_map[reg]=inw(ioaddr+0x00);
1051}
1052
1053static u16 maestro_read(struct ess_state *s, u16 reg)
1054{
1055 if(READABLE_MAP & (1<<reg))
1056 {
1057 unsigned long flags;
1058 check_suspend(s->card);
1059 spin_lock_irqsave(&s->card->lock,flags);
1060
1061 __maestro_read(s->card,reg);
1062
1063 spin_unlock_irqrestore(&s->card->lock,flags);
1064 }
1065 return s->card->maestro_map[reg];
1066}
1067
1068/*
1069 * These routines handle accessing the second level indirections to the
1070 * wave ram.
1071 */
1072
1073/*
1074 * The register names are the ones ESS uses (see 104T31.ZIP)
1075 */
1076
1077#define IDR0_DATA_PORT 0x00
1078#define IDR1_CRAM_POINTER 0x01
1079#define IDR2_CRAM_DATA 0x02
1080#define IDR3_WAVE_DATA 0x03
1081#define IDR4_WAVE_PTR_LOW 0x04
1082#define IDR5_WAVE_PTR_HI 0x05
1083#define IDR6_TIMER_CTRL 0x06
1084#define IDR7_WAVE_ROMRAM 0x07
1085
1086static void apu_index_set(struct ess_card *card, u16 index)
1087{
1088 int i;
1089 __maestro_write(card, IDR1_CRAM_POINTER, index);
1090 for(i=0;i<1000;i++)
1091 if(__maestro_read(card, IDR1_CRAM_POINTER)==index)
1092 return;
1093 printk(KERN_WARNING "maestro: APU register select failed.\n");
1094}
1095
1096static void apu_data_set(struct ess_card *card, u16 data)
1097{
1098 int i;
1099 for(i=0;i<1000;i++)
1100 {
1101 if(__maestro_read(card, IDR0_DATA_PORT)==data)
1102 return;
1103 __maestro_write(card, IDR0_DATA_PORT, data);
1104 }
1105}
1106
1107/*
1108 * This is the public interface for APU manipulation. It handles the
1109 * interlock to avoid two APU writes in parallel etc. Don't diddle
1110 * directly with the stuff above.
1111 */
1112
1113static void apu_set_register(struct ess_state *s, u16 channel, u8 reg, u16 data)
1114{
1115 unsigned long flags;
1116
1117 check_suspend(s->card);
1118
1119 if(channel&ESS_CHAN_HARD)
1120 channel&=~ESS_CHAN_HARD;
1121 else
1122 {
1123 if(channel>5)
1124 printk("BAD CHANNEL %d.\n",channel);
1125 else
1126 channel = s->apu[channel];
1127 /* store based on real hardware apu/reg */
1128 s->card->apu_map[channel][reg]=data;
1129 }
1130 reg|=(channel<<4);
1131
1132 /* hooray for double indirection!! */
1133 spin_lock_irqsave(&s->card->lock,flags);
1134
1135 apu_index_set(s->card, reg);
1136 apu_data_set(s->card, data);
1137
1138 spin_unlock_irqrestore(&s->card->lock,flags);
1139}
1140
1141static u16 apu_get_register(struct ess_state *s, u16 channel, u8 reg)
1142{
1143 unsigned long flags;
1144 u16 v;
1145
1146 check_suspend(s->card);
1147
1148 if(channel&ESS_CHAN_HARD)
1149 channel&=~ESS_CHAN_HARD;
1150 else
1151 channel = s->apu[channel];
1152
1153 reg|=(channel<<4);
1154
1155 spin_lock_irqsave(&s->card->lock,flags);
1156
1157 apu_index_set(s->card, reg);
1158 v=__maestro_read(s->card, IDR0_DATA_PORT);
1159
1160 spin_unlock_irqrestore(&s->card->lock,flags);
1161 return v;
1162}
1163
1164
1165/*
1166 * The wavecache buffers between the APUs and
1167 * pci bus mastering
1168 */
1169
1170static void wave_set_register(struct ess_state *s, u16 reg, u16 value)
1171{
1172 long ioaddr = s->card->iobase;
1173 unsigned long flags;
1174 check_suspend(s->card);
1175
1176 spin_lock_irqsave(&s->card->lock,flags);
1177
1178 outw(reg, ioaddr+0x10);
1179 outw(value, ioaddr+0x12);
1180
1181 spin_unlock_irqrestore(&s->card->lock,flags);
1182}
1183
1184static u16 wave_get_register(struct ess_state *s, u16 reg)
1185{
1186 long ioaddr = s->card->iobase;
1187 unsigned long flags;
1188 u16 value;
1189 check_suspend(s->card);
1190
1191 spin_lock_irqsave(&s->card->lock,flags);
1192 outw(reg, ioaddr+0x10);
1193 value=inw(ioaddr+0x12);
1194 spin_unlock_irqrestore(&s->card->lock,flags);
1195
1196 return value;
1197}
1198
1199static void sound_reset(int ioaddr)
1200{
1201 outw(0x2000, 0x18+ioaddr);
1202 udelay(1);
1203 outw(0x0000, 0x18+ioaddr);
1204 udelay(1);
1205}
1206
1207/* sets the play formats of these apus, should be passed the already shifted format */
1208static void set_apu_fmt(struct ess_state *s, int apu, int mode)
1209{
1210 int apu_fmt = 0x10;
1211
1212 if(!(mode&ESS_FMT_16BIT)) apu_fmt+=0x20;
1213 if((mode&ESS_FMT_STEREO)) apu_fmt+=0x10;
1214 s->apu_mode[apu] = apu_fmt;
1215 s->apu_mode[apu+1] = apu_fmt;
1216}
1217
1218/* this only fixes the output apu mode to be later set by start_dac and
1219 company. output apu modes are set in ess_rec_setup */
1220static void set_fmt(struct ess_state *s, unsigned char mask, unsigned char data)
1221{
1222 s->fmt = (s->fmt & mask) | data;
1223 set_apu_fmt(s, 0, (s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK);
1224}
1225
1226/* this is off by a little bit.. */
1227static u32 compute_rate(struct ess_state *s, u32 freq)
1228{
1229 u32 clock = clock_freq[s->card->card_type];
1230
1231 freq = (freq * clocking)/48000;
1232
1233 if (freq == 48000)
1234 return 0x10000;
1235
1236 return ((freq / clock) <<16 )+
1237 (((freq % clock) << 16) / clock);
1238}
1239
1240static void set_dac_rate(struct ess_state *s, unsigned int rate)
1241{
1242 u32 freq;
1243 int fmt = (s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK;
1244
1245 if (rate > 48000)
1246 rate = 48000;
1247 if (rate < 4000)
1248 rate = 4000;
1249
1250 s->ratedac = rate;
1251
1252 if(! (fmt & ESS_FMT_16BIT) && !(fmt & ESS_FMT_STEREO))
1253 rate >>= 1;
1254
1255/* M_printk("computing dac rate %d with mode %d\n",rate,s->fmt);*/
1256
1257 freq = compute_rate(s, rate);
1258
1259 /* Load the frequency, turn on 6dB */
1260 apu_set_register(s, 0, 2,(apu_get_register(s, 0, 2)&0x00FF)|
1261 ( ((freq&0xFF)<<8)|0x10 ));
1262 apu_set_register(s, 0, 3, freq>>8);
1263 apu_set_register(s, 1, 2,(apu_get_register(s, 1, 2)&0x00FF)|
1264 ( ((freq&0xFF)<<8)|0x10 ));
1265 apu_set_register(s, 1, 3, freq>>8);
1266}
1267
1268static void set_adc_rate(struct ess_state *s, unsigned rate)
1269{
1270 u32 freq;
1271
1272 /* Sample Rate conversion APUs don't like 0x10000 for their rate */
1273 if (rate > 47999)
1274 rate = 47999;
1275 if (rate < 4000)
1276 rate = 4000;
1277
1278 s->rateadc = rate;
1279
1280 freq = compute_rate(s, rate);
1281
1282 /* Load the frequency, turn on 6dB */
1283 apu_set_register(s, 2, 2,(apu_get_register(s, 2, 2)&0x00FF)|
1284 ( ((freq&0xFF)<<8)|0x10 ));
1285 apu_set_register(s, 2, 3, freq>>8);
1286 apu_set_register(s, 3, 2,(apu_get_register(s, 3, 2)&0x00FF)|
1287 ( ((freq&0xFF)<<8)|0x10 ));
1288 apu_set_register(s, 3, 3, freq>>8);
1289
1290 /* fix mixer rate at 48khz. and its _must_ be 0x10000. */
1291 freq = 0x10000;
1292
1293 apu_set_register(s, 4, 2,(apu_get_register(s, 4, 2)&0x00FF)|
1294 ( ((freq&0xFF)<<8)|0x10 ));
1295 apu_set_register(s, 4, 3, freq>>8);
1296 apu_set_register(s, 5, 2,(apu_get_register(s, 5, 2)&0x00FF)|
1297 ( ((freq&0xFF)<<8)|0x10 ));
1298 apu_set_register(s, 5, 3, freq>>8);
1299}
1300
1301/* Stop our host of recording apus */
1302static inline void stop_adc(struct ess_state *s)
1303{
1304 /* XXX lets hope we don't have to lock around this */
1305 if (! (s->enable & ADC_RUNNING)) return;
1306
1307 s->enable &= ~ADC_RUNNING;
1308 apu_set_register(s, 2, 0, apu_get_register(s, 2, 0)&0xFF0F);
1309 apu_set_register(s, 3, 0, apu_get_register(s, 3, 0)&0xFF0F);
1310 apu_set_register(s, 4, 0, apu_get_register(s, 2, 0)&0xFF0F);
1311 apu_set_register(s, 5, 0, apu_get_register(s, 3, 0)&0xFF0F);
1312}
1313
1314/* stop output apus */
1315static void stop_dac(struct ess_state *s)
1316{
1317 /* XXX have to lock around this? */
1318 if (! (s->enable & DAC_RUNNING)) return;
1319
1320 s->enable &= ~DAC_RUNNING;
1321 apu_set_register(s, 0, 0, apu_get_register(s, 0, 0)&0xFF0F);
1322 apu_set_register(s, 1, 0, apu_get_register(s, 1, 0)&0xFF0F);
1323}
1324
1325static void start_dac(struct ess_state *s)
1326{
1327 /* XXX locks? */
1328 if ( (s->dma_dac.mapped || s->dma_dac.count > 0) &&
1329 s->dma_dac.ready &&
1330 (! (s->enable & DAC_RUNNING)) ) {
1331
1332 s->enable |= DAC_RUNNING;
1333
1334 apu_set_register(s, 0, 0,
1335 (apu_get_register(s, 0, 0)&0xFF0F)|s->apu_mode[0]);
1336
1337 if((s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_STEREO)
1338 apu_set_register(s, 1, 0,
1339 (apu_get_register(s, 1, 0)&0xFF0F)|s->apu_mode[1]);
1340 }
1341}
1342
1343static void start_adc(struct ess_state *s)
1344{
1345 /* XXX locks? */
1346 if ((s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
1347 && s->dma_adc.ready && (! (s->enable & ADC_RUNNING)) ) {
1348
1349 s->enable |= ADC_RUNNING;
1350 apu_set_register(s, 2, 0,
1351 (apu_get_register(s, 2, 0)&0xFF0F)|s->apu_mode[2]);
1352 apu_set_register(s, 4, 0,
1353 (apu_get_register(s, 4, 0)&0xFF0F)|s->apu_mode[4]);
1354
1355 if( s->fmt & (ESS_FMT_STEREO << ESS_ADC_SHIFT)) {
1356 apu_set_register(s, 3, 0,
1357 (apu_get_register(s, 3, 0)&0xFF0F)|s->apu_mode[3]);
1358 apu_set_register(s, 5, 0,
1359 (apu_get_register(s, 5, 0)&0xFF0F)|s->apu_mode[5]);
1360 }
1361
1362 }
1363}
1364
1365
1366/*
1367 * Native play back driver
1368 */
1369
1370/* the mode passed should be already shifted and masked */
1371static void
1372ess_play_setup(struct ess_state *ess, int mode, u32 rate, void *buffer, int size)
1373{
1374 u32 pa;
1375 u32 tmpval;
1376 int high_apu = 0;
1377 int channel;
1378
1379 M_printk("mode=%d rate=%d buf=%p len=%d.\n",
1380 mode, rate, buffer, size);
1381
1382 /* all maestro sizes are in 16bit words */
1383 size >>=1;
1384
1385 if(mode&ESS_FMT_STEREO) {
1386 high_apu++;
1387 /* only 16/stereo gets size divided */
1388 if(mode&ESS_FMT_16BIT)
1389 size>>=1;
1390 }
1391
1392 for(channel=0; channel <= high_apu; channel++)
1393 {
1394 pa = virt_to_bus(buffer);
1395
1396 /* set the wavecache control reg */
1397 tmpval = (pa - 0x10) & 0xFFF8;
1398 if(!(mode & ESS_FMT_16BIT)) tmpval |= 4;
1399 if(mode & ESS_FMT_STEREO) tmpval |= 2;
1400 ess->apu_base[channel]=tmpval;
1401 wave_set_register(ess, ess->apu[channel]<<3, tmpval);
1402
1403 pa -= virt_to_bus(ess->card->dmapages);
1404 pa>>=1; /* words */
1405
1406 /* base offset of dma calcs when reading the pointer
1407 on the left one */
1408 if(!channel) ess->dma_dac.base = pa&0xFFFF;
1409
1410 pa|=0x00400000; /* System RAM */
1411
1412 /* XXX the 16bit here might not be needed.. */
1413 if((mode & ESS_FMT_STEREO) && (mode & ESS_FMT_16BIT)) {
1414 if(channel)
1415 pa|=0x00800000; /* Stereo */
1416 pa>>=1;
1417 }
1418
1419/* XXX think about endianess when writing these registers */
1420 M_printk("maestro: ess_play_setup: APU[%d] pa = 0x%x\n", ess->apu[channel], pa);
1421 /* start of sample */
1422 apu_set_register(ess, channel, 4, ((pa>>16)&0xFF)<<8);
1423 apu_set_register(ess, channel, 5, pa&0xFFFF);
1424 /* sample end */
1425 apu_set_register(ess, channel, 6, (pa+size)&0xFFFF);
1426 /* setting loop len == sample len */
1427 apu_set_register(ess, channel, 7, size);
1428
1429 /* clear effects/env.. */
1430 apu_set_register(ess, channel, 8, 0x0000);
1431 /* set amp now to 0xd0 (?), low byte is 'amplitude dest'? */
1432 apu_set_register(ess, channel, 9, 0xD000);
1433
1434 /* clear routing stuff */
1435 apu_set_register(ess, channel, 11, 0x0000);
1436 /* dma on, no envelopes, filter to all 1s) */
1437 apu_set_register(ess, channel, 0, 0x400F);
1438
1439 if(mode&ESS_FMT_16BIT)
1440 ess->apu_mode[channel]=0x10;
1441 else
1442 ess->apu_mode[channel]=0x30;
1443
1444 if(mode&ESS_FMT_STEREO) {
1445 /* set panning: left or right */
1446 apu_set_register(ess, channel, 10, 0x8F00 | (channel ? 0 : 0x10));
1447 ess->apu_mode[channel] += 0x10;
1448 } else
1449 apu_set_register(ess, channel, 10, 0x8F08);
1450 }
1451
1452 /* clear WP interrupts */
1453 outw(1, ess->card->iobase+0x04);
1454 /* enable WP ints */
1455 outw(inw(ess->card->iobase+0x18)|4, ess->card->iobase+0x18);
1456
1457 /* go team! */
1458 set_dac_rate(ess,rate);
1459 start_dac(ess);
1460}
1461
1462/*
1463 * Native record driver
1464 */
1465
1466/* again, passed mode is alrady shifted/masked */
1467static void
1468ess_rec_setup(struct ess_state *ess, int mode, u32 rate, void *buffer, int size)
1469{
1470 int apu_step = 2;
1471 int channel;
1472
1473 M_printk("maestro: ess_rec_setup: mode=%d rate=%d buf=0x%p len=%d.\n",
1474 mode, rate, buffer, size);
1475
1476 /* all maestro sizes are in 16bit words */
1477 size >>=1;
1478
1479 /* we're given the full size of the buffer, but
1480 in stereo each channel will only use its half */
1481 if(mode&ESS_FMT_STEREO) {
1482 size >>=1;
1483 apu_step = 1;
1484 }
1485
1486 /* APU assignments: 2 = mono/left SRC
1487 3 = right SRC
1488 4 = mono/left Input Mixer
1489 5 = right Input Mixer */
1490 for(channel=2;channel<6;channel+=apu_step)
1491 {
1492 int i;
1493 int bsize, route;
1494 u32 pa;
1495 u32 tmpval;
1496
1497 /* data seems to flow from the codec, through an apu into
1498 the 'mixbuf' bit of page, then through the SRC apu
1499 and out to the real 'buffer'. ok. sure. */
1500
1501 if(channel & 0x04) {
1502 /* ok, we're an input mixer going from adc
1503 through the mixbuf to the other apus */
1504
1505 if(!(channel & 0x01)) {
1506 pa = virt_to_bus(ess->mixbuf);
1507 } else {
1508 pa = virt_to_bus(ess->mixbuf + (PAGE_SIZE >> 4));
1509 }
1510
1511 /* we source from a 'magic' apu */
1512 bsize = PAGE_SIZE >> 5; /* half of this channels alloc, in words */
1513 route = 0x14 + (channel - 4); /* parallel in crap, see maestro reg 0xC [8-11] */
1514 ess->apu_mode[channel] = 0x90; /* Input Mixer */
1515
1516 } else {
1517 /* we're a rate converter taking
1518 input from the input apus and outputing it to
1519 system memory */
1520 if(!(channel & 0x01)) {
1521 pa = virt_to_bus(buffer);
1522 } else {
1523 /* right channel records its split half.
1524 *2 accommodates for rampant shifting earlier */
1525 pa = virt_to_bus(buffer + size*2);
1526 }
1527
1528 ess->apu_mode[channel] = 0xB0; /* Sample Rate Converter */
1529
1530 bsize = size;
1531 /* get input from inputing apu */
1532 route = channel + 2;
1533 }
1534
1535 M_printk("maestro: ess_rec_setup: getting pa 0x%x from %d\n",pa,channel);
1536
1537 /* set the wavecache control reg */
1538 tmpval = (pa - 0x10) & 0xFFF8;
1539 ess->apu_base[channel]=tmpval;
1540 wave_set_register(ess, ess->apu[channel]<<3, tmpval);
1541
1542 pa -= virt_to_bus(ess->card->dmapages);
1543 pa>>=1; /* words */
1544
1545 /* base offset of dma calcs when reading the pointer
1546 on this left one */
1547 if(channel==2) ess->dma_adc.base = pa&0xFFFF;
1548
1549 pa|=0x00400000; /* bit 22 -> System RAM */
1550
1551 M_printk("maestro: ess_rec_setup: APU[%d] pa = 0x%x size = 0x%x route = 0x%x\n",
1552 ess->apu[channel], pa, bsize, route);
1553
1554 /* Begin loading the APU */
1555 for(i=0;i<15;i++) /* clear all PBRs */
1556 apu_set_register(ess, channel, i, 0x0000);
1557
1558 apu_set_register(ess, channel, 0, 0x400F);
1559
1560 /* need to enable subgroups.. and we should probably
1561 have different groups for different /dev/dsps.. */
1562 apu_set_register(ess, channel, 2, 0x8);
1563
1564 /* Load the buffer into the wave engine */
1565 apu_set_register(ess, channel, 4, ((pa>>16)&0xFF)<<8);
1566 /* XXX reg is little endian.. */
1567 apu_set_register(ess, channel, 5, pa&0xFFFF);
1568 apu_set_register(ess, channel, 6, (pa+bsize)&0xFFFF);
1569 apu_set_register(ess, channel, 7, bsize);
1570
1571 /* clear effects/env.. */
1572 apu_set_register(ess, channel, 8, 0x00F0);
1573
1574 /* amplitude now? sure. why not. */
1575 apu_set_register(ess, channel, 9, 0x0000);
1576
1577 /* set filter tune, radius, polar pan */
1578 apu_set_register(ess, channel, 10, 0x8F08);
1579
1580 /* route input */
1581 apu_set_register(ess, channel, 11, route);
1582 }
1583
1584 /* clear WP interrupts */
1585 outw(1, ess->card->iobase+0x04);
1586 /* enable WP ints */
1587 outw(inw(ess->card->iobase+0x18)|4, ess->card->iobase+0x18);
1588
1589 /* let 'er rip */
1590 set_adc_rate(ess,rate);
1591 start_adc(ess);
1592}
1593/* --------------------------------------------------------------------- */
1594
1595static void set_dmaa(struct ess_state *s, unsigned int addr, unsigned int count)
1596{
1597 M_printk("set_dmaa??\n");
1598}
1599
1600static void set_dmac(struct ess_state *s, unsigned int addr, unsigned int count)
1601{
1602 M_printk("set_dmac??\n");
1603}
1604
1605/* Playback pointer */
1606static inline unsigned get_dmaa(struct ess_state *s)
1607{
1608 int offset;
1609
1610 offset = apu_get_register(s,0,5);
1611
1612/* M_printk("dmaa: offset: %d, base: %d\n",offset,s->dma_dac.base); */
1613
1614 offset-=s->dma_dac.base;
1615
1616 return (offset&0xFFFE)<<1; /* hardware is in words */
1617}
1618
1619/* Record pointer */
1620static inline unsigned get_dmac(struct ess_state *s)
1621{
1622 int offset;
1623
1624 offset = apu_get_register(s,2,5);
1625
1626/* M_printk("dmac: offset: %d, base: %d\n",offset,s->dma_adc.base); */
1627
1628 /* The offset is an address not a position relative to base */
1629 offset-=s->dma_adc.base;
1630
1631 return (offset&0xFFFE)<<1; /* hardware is in words */
1632}
1633
1634/*
1635 * Meet Bob, the timer...
1636 */
1637
1638static irqreturn_t ess_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1639
1640static void stop_bob(struct ess_state *s)
1641{
1642 /* Mask IDR 11,17 */
1643 maestro_write(s, 0x11, maestro_read(s, 0x11)&~1);
1644 maestro_write(s, 0x17, maestro_read(s, 0x17)&~1);
1645}
1646
1647/* eventually we could be clever and limit bob ints
1648 to the frequency at which our smallest duration
1649 chunks may expire */
1650#define ESS_SYSCLK 50000000
1651static void start_bob(struct ess_state *s)
1652{
1653 int prescale;
1654 int divide;
1655
1656 /* XXX make freq selector much smarter, see calc_bob_rate */
1657 int freq = 200;
1658
1659 /* compute ideal interrupt frequency for buffer size & play rate */
1660 /* first, find best prescaler value to match freq */
1661 for(prescale=5;prescale<12;prescale++)
1662 if(freq > (ESS_SYSCLK>>(prescale+9)))
1663 break;
1664
1665 /* next, back off prescaler whilst getting divider into optimum range */
1666 divide=1;
1667 while((prescale > 5) && (divide<32))
1668 {
1669 prescale--;
1670 divide <<=1;
1671 }
1672 divide>>=1;
1673
1674 /* now fine-tune the divider for best match */
1675 for(;divide<31;divide++)
1676 if(freq >= ((ESS_SYSCLK>>(prescale+9))/(divide+1)))
1677 break;
1678
1679 /* divide = 0 is illegal, but don't let prescale = 4! */
1680 if(divide == 0)
1681 {
1682 divide++;
1683 if(prescale>5)
1684 prescale--;
1685 }
1686
1687 maestro_write(s, 6, 0x9000 | (prescale<<5) | divide); /* set reg */
1688
1689 /* Now set IDR 11/17 */
1690 maestro_write(s, 0x11, maestro_read(s, 0x11)|1);
1691 maestro_write(s, 0x17, maestro_read(s, 0x17)|1);
1692}
1693/* --------------------------------------------------------------------- */
1694
1695/* this quickly calculates the frequency needed for bob
1696 and sets it if its different than what bob is
1697 currently running at. its called often so
1698 needs to be fairly quick. */
1699#define BOB_MIN 50
1700#define BOB_MAX 400
1701static void calc_bob_rate(struct ess_state *s) {
1702#if 0 /* this thing tries to set the frequency of bob such that
1703 there are 2 interrupts / buffer walked by the dac/adc. That
1704 is probably very wrong for people who actually care about
1705 mid buffer positioning. it should be calculated as bytes/interrupt
1706 and that needs to be decided :) so for now just use the static 150
1707 in start_bob.*/
1708
1709 unsigned int dac_rate=2,adc_rate=1,newrate;
1710 static int israte=-1;
1711
1712 if (s->dma_dac.fragsize == 0) dac_rate = BOB_MIN;
1713 else {
1714 dac_rate = (2 * s->ratedac * sample_size[(s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK]) /
1715 (s->dma_dac.fragsize) ;
1716 }
1717
1718 if (s->dma_adc.fragsize == 0) adc_rate = BOB_MIN;
1719 else {
1720 adc_rate = (2 * s->rateadc * sample_size[(s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK]) /
1721 (s->dma_adc.fragsize) ;
1722 }
1723
1724 if(dac_rate > adc_rate) newrate = adc_rate;
1725 else newrate=dac_rate;
1726
1727 if(newrate > BOB_MAX) newrate = BOB_MAX;
1728 else {
1729 if(newrate < BOB_MIN)
1730 newrate = BOB_MIN;
1731 }
1732
1733 if( israte != newrate) {
1734 printk("dac: %d adc: %d rate: %d\n",dac_rate,adc_rate,israte);
1735 israte=newrate;
1736 }
1737#endif
1738
1739}
1740
1741static int
1742prog_dmabuf(struct ess_state *s, unsigned rec)
1743{
1744 struct dmabuf *db = rec ? &s->dma_adc : &s->dma_dac;
1745 unsigned rate = rec ? s->rateadc : s->ratedac;
1746 unsigned bytepersec;
1747 unsigned bufs;
1748 unsigned char fmt;
1749 unsigned long flags;
1750
1751 spin_lock_irqsave(&s->lock, flags);
1752 fmt = s->fmt;
1753 if (rec) {
1754 stop_adc(s);
1755 fmt >>= ESS_ADC_SHIFT;
1756 } else {
1757 stop_dac(s);
1758 fmt >>= ESS_DAC_SHIFT;
1759 }
1760 spin_unlock_irqrestore(&s->lock, flags);
1761 fmt &= ESS_FMT_MASK;
1762
1763 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
1764
1765 /* this algorithm is a little nuts.. where did /1000 come from? */
1766 bytepersec = rate << sample_shift[fmt];
1767 bufs = PAGE_SIZE << db->buforder;
1768 if (db->ossfragshift) {
1769 if ((1000 << db->ossfragshift) < bytepersec)
1770 db->fragshift = ld2(bytepersec/1000);
1771 else
1772 db->fragshift = db->ossfragshift;
1773 } else {
1774 db->fragshift = ld2(bytepersec/100/(db->subdivision ? db->subdivision : 1));
1775 if (db->fragshift < 3)
1776 db->fragshift = 3;
1777 }
1778 db->numfrag = bufs >> db->fragshift;
1779 while (db->numfrag < 4 && db->fragshift > 3) {
1780 db->fragshift--;
1781 db->numfrag = bufs >> db->fragshift;
1782 }
1783 db->fragsize = 1 << db->fragshift;
1784 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
1785 db->numfrag = db->ossmaxfrags;
1786 db->fragsamples = db->fragsize >> sample_shift[fmt];
1787 db->dmasize = db->numfrag << db->fragshift;
1788
1789 M_printk("maestro: setup oss: numfrag: %d fragsize: %d dmasize: %d\n",db->numfrag,db->fragsize,db->dmasize);
1790
1791 memset(db->rawbuf, (fmt & ESS_FMT_16BIT) ? 0 : 0x80, db->dmasize);
1792
1793 spin_lock_irqsave(&s->lock, flags);
1794 if (rec)
1795 ess_rec_setup(s, fmt, s->rateadc, db->rawbuf, db->dmasize);
1796 else
1797 ess_play_setup(s, fmt, s->ratedac, db->rawbuf, db->dmasize);
1798
1799 spin_unlock_irqrestore(&s->lock, flags);
1800 db->ready = 1;
1801
1802 return 0;
1803}
1804
1805static __inline__ void
1806clear_advance(struct ess_state *s)
1807{
1808 unsigned char c = ((s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_16BIT) ? 0 : 0x80;
1809
1810 unsigned char *buf = s->dma_dac.rawbuf;
1811 unsigned bsize = s->dma_dac.dmasize;
1812 unsigned bptr = s->dma_dac.swptr;
1813 unsigned len = s->dma_dac.fragsize;
1814
1815 if (bptr + len > bsize) {
1816 unsigned x = bsize - bptr;
1817 memset(buf + bptr, c, x);
1818 /* account for wrapping? */
1819 bptr = 0;
1820 len -= x;
1821 }
1822 memset(buf + bptr, c, len);
1823}
1824
1825/* call with spinlock held! */
1826static void
1827ess_update_ptr(struct ess_state *s)
1828{
1829 unsigned hwptr;
1830 int diff;
1831
1832 /* update ADC pointer */
1833 if (s->dma_adc.ready) {
1834 /* oh boy should this all be re-written. everything in the current code paths think
1835 that the various counters/pointers are expressed in bytes to the user but we have
1836 two apus doing stereo stuff so we fix it up here.. it propagates to all the various
1837 counters from here. */
1838 if ( s->fmt & (ESS_FMT_STEREO << ESS_ADC_SHIFT)) {
1839 hwptr = (get_dmac(s)*2) % s->dma_adc.dmasize;
1840 } else {
1841 hwptr = get_dmac(s) % s->dma_adc.dmasize;
1842 }
1843 diff = (s->dma_adc.dmasize + hwptr - s->dma_adc.hwptr) % s->dma_adc.dmasize;
1844 s->dma_adc.hwptr = hwptr;
1845 s->dma_adc.total_bytes += diff;
1846 s->dma_adc.count += diff;
1847 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1848 wake_up(&s->dma_adc.wait);
1849 if (!s->dma_adc.mapped) {
1850 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
1851 /* FILL ME
1852 wrindir(s, SV_CIENABLE, s->enable); */
1853 stop_adc(s);
1854 /* brute force everyone back in sync, sigh */
1855 s->dma_adc.count = 0;
1856 s->dma_adc.swptr = 0;
1857 s->dma_adc.hwptr = 0;
1858 s->dma_adc.error++;
1859 }
1860 }
1861 }
1862 /* update DAC pointer */
1863 if (s->dma_dac.ready) {
1864 hwptr = get_dmaa(s) % s->dma_dac.dmasize;
1865 /* the apu only reports the length it has seen, not the
1866 length of the memory that has been used (the WP
1867 knows that) */
1868 if ( ((s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK) == (ESS_FMT_STEREO|ESS_FMT_16BIT))
1869 hwptr<<=1;
1870
1871 diff = (s->dma_dac.dmasize + hwptr - s->dma_dac.hwptr) % s->dma_dac.dmasize;
1872/* M_printk("updating dac: hwptr: %d diff: %d\n",hwptr,diff);*/
1873 s->dma_dac.hwptr = hwptr;
1874 s->dma_dac.total_bytes += diff;
1875 if (s->dma_dac.mapped) {
1876 s->dma_dac.count += diff;
1877 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize) {
1878 wake_up(&s->dma_dac.wait);
1879 }
1880 } else {
1881 s->dma_dac.count -= diff;
1882/* M_printk("maestro: ess_update_ptr: diff: %d, count: %d\n", diff, s->dma_dac.count); */
1883 if (s->dma_dac.count <= 0) {
1884 M_printk("underflow! diff: %d count: %d hw: %d sw: %d\n", diff, s->dma_dac.count,
1885 hwptr, s->dma_dac.swptr);
1886 /* FILL ME
1887 wrindir(s, SV_CIENABLE, s->enable); */
1888 /* XXX how on earth can calling this with the lock held work.. */
1889 stop_dac(s);
1890 /* brute force everyone back in sync, sigh */
1891 s->dma_dac.count = 0;
1892 s->dma_dac.swptr = hwptr;
1893 s->dma_dac.error++;
1894 } else if (s->dma_dac.count <= (signed)s->dma_dac.fragsize && !s->dma_dac.endcleared) {
1895 clear_advance(s);
1896 s->dma_dac.endcleared = 1;
1897 }
1898 if (s->dma_dac.count + (signed)s->dma_dac.fragsize <= (signed)s->dma_dac.dmasize) {
1899 wake_up(&s->dma_dac.wait);
1900/* printk("waking up DAC count: %d sw: %d hw: %d\n",s->dma_dac.count, s->dma_dac.swptr,
1901 hwptr);*/
1902 }
1903 }
1904 }
1905}
1906
1907static irqreturn_t
1908ess_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1909{
1910 struct ess_state *s;
1911 struct ess_card *c = (struct ess_card *)dev_id;
1912 int i;
1913 u32 event;
1914
1915 if ( ! (event = inb(c->iobase+0x1A)) )
1916 return IRQ_NONE;
1917
1918 outw(inw(c->iobase+4)&1, c->iobase+4);
1919
1920/* M_printk("maestro int: %x\n",event);*/
1921 if(event&(1<<6))
1922 {
1923 int x;
1924 enum {UP_EVT, DOWN_EVT, MUTE_EVT} vol_evt;
1925 int volume;
1926
1927 /* Figure out which volume control button was pushed,
1928 based on differences from the default register
1929 values. */
1930 x = inb(c->iobase+0x1c);
1931 if (x&1) vol_evt = MUTE_EVT;
1932 else if (((x>>1)&7) > 4) vol_evt = UP_EVT;
1933 else vol_evt = DOWN_EVT;
1934
1935 /* Reset the volume control registers. */
1936 outb(0x88, c->iobase+0x1c);
1937 outb(0x88, c->iobase+0x1d);
1938 outb(0x88, c->iobase+0x1e);
1939 outb(0x88, c->iobase+0x1f);
1940
1941 /* Deal with the button press in a hammer-handed
1942 manner by adjusting the master mixer volume. */
1943 volume = c->mix.mixer_state[0] & 0xff;
1944 if (vol_evt == UP_EVT) {
1945 volume += 5;
1946 if (volume > 100)
1947 volume = 100;
1948 }
1949 else if (vol_evt == DOWN_EVT) {
1950 volume -= 5;
1951 if (volume < 0)
1952 volume = 0;
1953 } else {
1954 /* vol_evt == MUTE_EVT */
1955 if (volume == 0)
1956 volume = c->dock_mute_vol;
1957 else {
1958 c->dock_mute_vol = volume;
1959 volume = 0;
1960 }
1961 }
1962 set_mixer (c, 0, (volume << 8) | volume);
1963 }
1964
1965 /* Ack all the interrupts. */
1966 outb(0xFF, c->iobase+0x1A);
1967
1968 /*
1969 * Update the pointers for all APU's we are running.
1970 */
1971 for(i=0;i<NR_DSPS;i++)
1972 {
1973 s=&c->channels[i];
1974 if(s->dev_audio == -1)
1975 break;
1976 spin_lock(&s->lock);
1977 ess_update_ptr(s);
1978 spin_unlock(&s->lock);
1979 }
1980 return IRQ_HANDLED;
1981}
1982
1983
1984/* --------------------------------------------------------------------- */
1985
1986static const char invalid_magic[] = KERN_CRIT "maestro: invalid magic value in %s\n";
1987
1988#define VALIDATE_MAGIC(FOO,MAG) \
1989({ \
1990 if (!(FOO) || (FOO)->magic != MAG) { \
1991 printk(invalid_magic,__FUNCTION__); \
1992 return -ENXIO; \
1993 } \
1994})
1995
1996#define VALIDATE_STATE(a) VALIDATE_MAGIC(a,ESS_STATE_MAGIC)
1997#define VALIDATE_CARD(a) VALIDATE_MAGIC(a,ESS_CARD_MAGIC)
1998
1999static void set_mixer(struct ess_card *card,unsigned int mixer, unsigned int val )
2000{
2001 unsigned int left,right;
2002 /* cleanse input a little */
2003 right = ((val >> 8) & 0xff) ;
2004 left = (val & 0xff) ;
2005
2006 if(right > 100) right = 100;
2007 if(left > 100) left = 100;
2008
2009 card->mix.mixer_state[mixer]=(right << 8) | left;
2010 card->mix.write_mixer(card,mixer,left,right);
2011}
2012
2013static void
2014mixer_push_state(struct ess_card *card)
2015{
2016 int i;
2017 for(i = 0 ; i < SOUND_MIXER_NRDEVICES ; i++) {
2018 if( ! supported_mixer(card,i)) continue;
2019
2020 set_mixer(card,i,card->mix.mixer_state[i]);
2021 }
2022}
2023
2024static int mixer_ioctl(struct ess_card *card, unsigned int cmd, unsigned long arg)
2025{
2026 int i, val=0;
2027 unsigned long flags;
2028 void __user *argp = (void __user *)arg;
2029 int __user *p = argp;
2030
2031 VALIDATE_CARD(card);
2032 if (cmd == SOUND_MIXER_INFO) {
2033 mixer_info info;
2034 memset(&info, 0, sizeof(info));
2035 strlcpy(info.id, card_names[card->card_type], sizeof(info.id));
2036 strlcpy(info.name, card_names[card->card_type], sizeof(info.name));
2037 info.modify_counter = card->mix.modcnt;
2038 if (copy_to_user(argp, &info, sizeof(info)))
2039 return -EFAULT;
2040 return 0;
2041 }
2042 if (cmd == SOUND_OLD_MIXER_INFO) {
2043 _old_mixer_info info;
2044 memset(&info, 0, sizeof(info));
2045 strlcpy(info.id, card_names[card->card_type], sizeof(info.id));
2046 strlcpy(info.name, card_names[card->card_type], sizeof(info.name));
2047 if (copy_to_user(argp, &info, sizeof(info)))
2048 return -EFAULT;
2049 return 0;
2050 }
2051 if (cmd == OSS_GETVERSION)
2052 return put_user(SOUND_VERSION, p);
2053
2054 if (_IOC_TYPE(cmd) != 'M' || _IOC_SIZE(cmd) != sizeof(int))
2055 return -EINVAL;
2056
2057 if (_IOC_DIR(cmd) == _IOC_READ) {
2058 switch (_IOC_NR(cmd)) {
2059 case SOUND_MIXER_RECSRC: /* give them the current record source */
2060
2061 if(!card->mix.recmask_io) {
2062 val = 0;
2063 } else {
2064 spin_lock_irqsave(&card->lock, flags);
2065 val = card->mix.recmask_io(card,1,0);
2066 spin_unlock_irqrestore(&card->lock, flags);
2067 }
2068 break;
2069
2070 case SOUND_MIXER_DEVMASK: /* give them the supported mixers */
2071 val = card->mix.supported_mixers;
2072 break;
2073
2074 case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
2075 val = card->mix.record_sources;
2076 break;
2077
2078 case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
2079 val = card->mix.stereo_mixers;
2080 break;
2081
2082 case SOUND_MIXER_CAPS:
2083 val = SOUND_CAP_EXCL_INPUT;
2084 break;
2085
2086 default: /* read a specific mixer */
2087 i = _IOC_NR(cmd);
2088
2089 if ( ! supported_mixer(card,i))
2090 return -EINVAL;
2091
2092 /* do we ever want to touch the hardware? */
2093/* spin_lock_irqsave(&card->lock, flags);
2094 val = card->mix.read_mixer(card,i);
2095 spin_unlock_irqrestore(&card->lock, flags);*/
2096
2097 val = card->mix.mixer_state[i];
2098/* M_printk("returned 0x%x for mixer %d\n",val,i);*/
2099
2100 break;
2101 }
2102 return put_user(val, p);
2103 }
2104
2105 if (_IOC_DIR(cmd) != (_IOC_WRITE|_IOC_READ))
2106 return -EINVAL;
2107
2108 card->mix.modcnt++;
2109
2110 if (get_user(val, p))
2111 return -EFAULT;
2112
2113 switch (_IOC_NR(cmd)) {
2114 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
2115
2116 if (!card->mix.recmask_io) return -EINVAL;
2117 if(!val) return 0;
2118 if(! (val &= card->mix.record_sources)) return -EINVAL;
2119
2120 spin_lock_irqsave(&card->lock, flags);
2121 card->mix.recmask_io(card,0,val);
2122 spin_unlock_irqrestore(&card->lock, flags);
2123 return 0;
2124
2125 default:
2126 i = _IOC_NR(cmd);
2127
2128 if ( ! supported_mixer(card,i))
2129 return -EINVAL;
2130
2131 spin_lock_irqsave(&card->lock, flags);
2132 set_mixer(card,i,val);
2133 spin_unlock_irqrestore(&card->lock, flags);
2134
2135 return 0;
2136 }
2137}
2138
2139/* --------------------------------------------------------------------- */
2140static int ess_open_mixdev(struct inode *inode, struct file *file)
2141{
2142 unsigned int minor = iminor(inode);
2143 struct ess_card *card = NULL;
2144 struct pci_dev *pdev = NULL;
2145 struct pci_driver *drvr;
2146
2147 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
2148 drvr = pci_dev_driver (pdev);
2149 if (drvr == &maestro_pci_driver) {
2150 card = (struct ess_card*)pci_get_drvdata (pdev);
2151 if (!card)
2152 continue;
2153 if (card->dev_mixer == minor)
2154 break;
2155 }
2156 }
2157 if (!card)
2158 return -ENODEV;
2159 file->private_data = card;
2160 return nonseekable_open(inode, file);
2161}
2162
2163static int ess_release_mixdev(struct inode *inode, struct file *file)
2164{
2165 struct ess_card *card = (struct ess_card *)file->private_data;
2166
2167 VALIDATE_CARD(card);
2168
2169 return 0;
2170}
2171
2172static int ess_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2173{
2174 struct ess_card *card = (struct ess_card *)file->private_data;
2175
2176 VALIDATE_CARD(card);
2177
2178 return mixer_ioctl(card, cmd, arg);
2179}
2180
2181static /*const*/ struct file_operations ess_mixer_fops = {
2182 .owner = THIS_MODULE,
2183 .llseek = no_llseek,
2184 .ioctl = ess_ioctl_mixdev,
2185 .open = ess_open_mixdev,
2186 .release = ess_release_mixdev,
2187};
2188
2189/* --------------------------------------------------------------------- */
2190
2191static int drain_dac(struct ess_state *s, int nonblock)
2192{
2193 DECLARE_WAITQUEUE(wait,current);
2194 unsigned long flags;
2195 int count;
2196 signed long tmo;
2197
2198 if (s->dma_dac.mapped || !s->dma_dac.ready)
2199 return 0;
2200 current->state = TASK_INTERRUPTIBLE;
2201 add_wait_queue(&s->dma_dac.wait, &wait);
2202 for (;;) {
2203 /* XXX uhm.. questionable locking*/
2204 spin_lock_irqsave(&s->lock, flags);
2205 count = s->dma_dac.count;
2206 spin_unlock_irqrestore(&s->lock, flags);
2207 if (count <= 0)
2208 break;
2209 if (signal_pending(current))
2210 break;
2211 if (nonblock) {
2212 remove_wait_queue(&s->dma_dac.wait, &wait);
2213 current->state = TASK_RUNNING;
2214 return -EBUSY;
2215 }
2216 tmo = (count * HZ) / s->ratedac;
2217 tmo >>= sample_shift[(s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK];
2218 /* XXX this is just broken. someone is waking us up alot, or schedule_timeout is broken.
2219 or something. who cares. - zach */
2220 if (!schedule_timeout(tmo ? tmo : 1) && tmo)
2221 M_printk(KERN_DEBUG "maestro: dma timed out?? %ld\n",jiffies);
2222 }
2223 remove_wait_queue(&s->dma_dac.wait, &wait);
2224 current->state = TASK_RUNNING;
2225 if (signal_pending(current))
2226 return -ERESTARTSYS;
2227 return 0;
2228}
2229
2230/* --------------------------------------------------------------------- */
2231/* Zach sez: "god this is gross.." */
2232static int
2233comb_stereo(unsigned char *real_buffer,unsigned char *tmp_buffer, int offset,
2234 int count, int bufsize)
2235{
2236 /* No such thing as stereo recording, so we
2237 use dual input mixers. which means we have to
2238 combine mono to stereo buffer. yuck.
2239
2240 but we don't have to be able to work a byte at a time..*/
2241
2242 unsigned char *so,*left,*right;
2243 int i;
2244
2245 so = tmp_buffer;
2246 left = real_buffer + offset;
2247 right = real_buffer + bufsize/2 + offset;
2248
2249/* M_printk("comb_stereo writing %d to %p from %p and %p, offset: %d size: %d\n",count/2, tmp_buffer,left,right,offset,bufsize);*/
2250
2251 for(i=count/4; i ; i--) {
2252 (*(so+2)) = *(right++);
2253 (*(so+3)) = *(right++);
2254 (*so) = *(left++);
2255 (*(so+1)) = *(left++);
2256 so+=4;
2257 }
2258
2259 return 0;
2260}
2261
2262/* in this loop, dma_adc.count signifies the amount of data thats waiting
2263 to be copied to the user's buffer. it is filled by the interrupt
2264 handler and drained by this loop. */
2265static ssize_t
2266ess_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
2267{
2268 struct ess_state *s = (struct ess_state *)file->private_data;
2269 ssize_t ret;
2270 unsigned long flags;
2271 unsigned swptr;
2272 int cnt;
2273 unsigned char *combbuf = NULL;
2274
2275 VALIDATE_STATE(s);
2276 if (s->dma_adc.mapped)
2277 return -ENXIO;
2278 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2279 return ret;
2280 if (!access_ok(VERIFY_WRITE, buffer, count))
2281 return -EFAULT;
2282 if(!(combbuf = kmalloc(count,GFP_KERNEL)))
2283 return -ENOMEM;
2284 ret = 0;
2285
2286 calc_bob_rate(s);
2287
2288 while (count > 0) {
2289 spin_lock_irqsave(&s->lock, flags);
2290 /* remember, all these things are expressed in bytes to be
2291 sent to the user.. hence the evil / 2 down below */
2292 swptr = s->dma_adc.swptr;
2293 cnt = s->dma_adc.dmasize-swptr;
2294 if (s->dma_adc.count < cnt)
2295 cnt = s->dma_adc.count;
2296 spin_unlock_irqrestore(&s->lock, flags);
2297
2298 if (cnt > count)
2299 cnt = count;
2300
2301 if ( cnt > 0 ) cnt &= ~3;
2302
2303 if (cnt <= 0) {
2304 start_adc(s);
2305 if (file->f_flags & O_NONBLOCK)
2306 {
2307 ret = ret ? ret : -EAGAIN;
2308 goto rec_return_free;
2309 }
2310 if (!interruptible_sleep_on_timeout(&s->dma_adc.wait, HZ)) {
2311 if(! s->card->in_suspend) printk(KERN_DEBUG "maestro: read: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
2312 s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count,
2313 s->dma_adc.hwptr, s->dma_adc.swptr);
2314 stop_adc(s);
2315 spin_lock_irqsave(&s->lock, flags);
2316 set_dmac(s, virt_to_bus(s->dma_adc.rawbuf), s->dma_adc.numfrag << s->dma_adc.fragshift);
2317 /* program enhanced mode registers */
2318 /* FILL ME */
2319/* wrindir(s, SV_CIDMACBASECOUNT1, (s->dma_adc.fragsamples-1) >> 8);
2320 wrindir(s, SV_CIDMACBASECOUNT0, s->dma_adc.fragsamples-1); */
2321 s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0;
2322 spin_unlock_irqrestore(&s->lock, flags);
2323 }
2324 if (signal_pending(current))
2325 {
2326 ret = ret ? ret : -ERESTARTSYS;
2327 goto rec_return_free;
2328 }
2329 continue;
2330 }
2331
2332 if(s->fmt & (ESS_FMT_STEREO << ESS_ADC_SHIFT)) {
2333 /* swptr/2 so that we know the real offset in each apu's buffer */
2334 comb_stereo(s->dma_adc.rawbuf,combbuf,swptr/2,cnt,s->dma_adc.dmasize);
2335 if (copy_to_user(buffer, combbuf, cnt)) {
2336 ret = ret ? ret : -EFAULT;
2337 goto rec_return_free;
2338 }
2339 } else {
2340 if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
2341 ret = ret ? ret : -EFAULT;
2342 goto rec_return_free;
2343 }
2344 }
2345
2346 swptr = (swptr + cnt) % s->dma_adc.dmasize;
2347 spin_lock_irqsave(&s->lock, flags);
2348 s->dma_adc.swptr = swptr;
2349 s->dma_adc.count -= cnt;
2350 spin_unlock_irqrestore(&s->lock, flags);
2351 count -= cnt;
2352 buffer += cnt;
2353 ret += cnt;
2354 start_adc(s);
2355 }
2356
2357rec_return_free:
2358 kfree(combbuf);
2359 return ret;
2360}
2361
2362static ssize_t
2363ess_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
2364{
2365 struct ess_state *s = (struct ess_state *)file->private_data;
2366 ssize_t ret;
2367 unsigned long flags;
2368 unsigned swptr;
2369 int cnt;
2370
2371 VALIDATE_STATE(s);
2372 if (s->dma_dac.mapped)
2373 return -ENXIO;
2374 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2375 return ret;
2376 if (!access_ok(VERIFY_READ, buffer, count))
2377 return -EFAULT;
2378 ret = 0;
2379
2380 calc_bob_rate(s);
2381
2382 while (count > 0) {
2383 spin_lock_irqsave(&s->lock, flags);
2384
2385 if (s->dma_dac.count < 0) {
2386 s->dma_dac.count = 0;
2387 s->dma_dac.swptr = s->dma_dac.hwptr;
2388 }
2389 swptr = s->dma_dac.swptr;
2390
2391 cnt = s->dma_dac.dmasize-swptr;
2392
2393 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
2394 cnt = s->dma_dac.dmasize - s->dma_dac.count;
2395
2396 spin_unlock_irqrestore(&s->lock, flags);
2397
2398 if (cnt > count)
2399 cnt = count;
2400
2401 if (cnt <= 0) {
2402 start_dac(s);
2403 if (file->f_flags & O_NONBLOCK) {
2404 if(!ret) ret = -EAGAIN;
2405 goto return_free;
2406 }
2407 if (!interruptible_sleep_on_timeout(&s->dma_dac.wait, HZ)) {
2408 if(! s->card->in_suspend) printk(KERN_DEBUG "maestro: write: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
2409 s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count,
2410 s->dma_dac.hwptr, s->dma_dac.swptr);
2411 stop_dac(s);
2412 spin_lock_irqsave(&s->lock, flags);
2413 set_dmaa(s, virt_to_bus(s->dma_dac.rawbuf), s->dma_dac.numfrag << s->dma_dac.fragshift);
2414 /* program enhanced mode registers */
2415/* wrindir(s, SV_CIDMAABASECOUNT1, (s->dma_dac.fragsamples-1) >> 8);
2416 wrindir(s, SV_CIDMAABASECOUNT0, s->dma_dac.fragsamples-1); */
2417 /* FILL ME */
2418 s->dma_dac.count = s->dma_dac.hwptr = s->dma_dac.swptr = 0;
2419 spin_unlock_irqrestore(&s->lock, flags);
2420 }
2421 if (signal_pending(current)) {
2422 if (!ret) ret = -ERESTARTSYS;
2423 goto return_free;
2424 }
2425 continue;
2426 }
2427 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) {
2428 if (!ret) ret = -EFAULT;
2429 goto return_free;
2430 }
2431/* printk("wrote %d bytes at sw: %d cnt: %d while hw: %d\n",cnt, swptr, s->dma_dac.count, s->dma_dac.hwptr);*/
2432
2433 swptr = (swptr + cnt) % s->dma_dac.dmasize;
2434
2435 spin_lock_irqsave(&s->lock, flags);
2436 s->dma_dac.swptr = swptr;
2437 s->dma_dac.count += cnt;
2438 s->dma_dac.endcleared = 0;
2439 spin_unlock_irqrestore(&s->lock, flags);
2440 count -= cnt;
2441 buffer += cnt;
2442 ret += cnt;
2443 start_dac(s);
2444 }
2445return_free:
2446 return ret;
2447}
2448
2449/* No kernel lock - we have our own spinlock */
2450static unsigned int ess_poll(struct file *file, struct poll_table_struct *wait)
2451{
2452 struct ess_state *s = (struct ess_state *)file->private_data;
2453 unsigned long flags;
2454 unsigned int mask = 0;
2455
2456 VALIDATE_STATE(s);
2457
2458/* In 0.14 prog_dmabuf always returns success anyway ... */
2459 if (file->f_mode & FMODE_WRITE) {
2460 if (!s->dma_dac.ready && prog_dmabuf(s, 0))
2461 return 0;
2462 }
2463 if (file->f_mode & FMODE_READ) {
2464 if (!s->dma_adc.ready && prog_dmabuf(s, 1))
2465 return 0;
2466 }
2467
2468 if (file->f_mode & FMODE_WRITE)
2469 poll_wait(file, &s->dma_dac.wait, wait);
2470 if (file->f_mode & FMODE_READ)
2471 poll_wait(file, &s->dma_adc.wait, wait);
2472 spin_lock_irqsave(&s->lock, flags);
2473 ess_update_ptr(s);
2474 if (file->f_mode & FMODE_READ) {
2475 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
2476 mask |= POLLIN | POLLRDNORM;
2477 }
2478 if (file->f_mode & FMODE_WRITE) {
2479 if (s->dma_dac.mapped) {
2480 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
2481 mask |= POLLOUT | POLLWRNORM;
2482 } else {
2483 if ((signed)s->dma_dac.dmasize >= s->dma_dac.count + (signed)s->dma_dac.fragsize)
2484 mask |= POLLOUT | POLLWRNORM;
2485 }
2486 }
2487 spin_unlock_irqrestore(&s->lock, flags);
2488 return mask;
2489}
2490
2491static int ess_mmap(struct file *file, struct vm_area_struct *vma)
2492{
2493 struct ess_state *s = (struct ess_state *)file->private_data;
2494 struct dmabuf *db;
2495 int ret = -EINVAL;
2496 unsigned long size;
2497
2498 VALIDATE_STATE(s);
2499 lock_kernel();
2500 if (vma->vm_flags & VM_WRITE) {
2501 if ((ret = prog_dmabuf(s, 1)) != 0)
2502 goto out;
2503 db = &s->dma_dac;
2504 } else
2505#if 0
2506 /* if we can have the wp/wc do the combining
2507 we can turn this back on. */
2508 if (vma->vm_flags & VM_READ) {
2509 if ((ret = prog_dmabuf(s, 0)) != 0)
2510 goto out;
2511 db = &s->dma_adc;
2512 } else
2513#endif
2514 goto out;
2515 ret = -EINVAL;
2516 if (vma->vm_pgoff != 0)
2517 goto out;
2518 size = vma->vm_end - vma->vm_start;
2519 if (size > (PAGE_SIZE << db->buforder))
2520 goto out;
2521 ret = -EAGAIN;
2522 if (remap_pfn_range(vma, vma->vm_start,
2523 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
2524 size, vma->vm_page_prot))
2525 goto out;
2526 db->mapped = 1;
2527 ret = 0;
2528out:
2529 unlock_kernel();
2530 return ret;
2531}
2532
2533static int ess_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2534{
2535 struct ess_state *s = (struct ess_state *)file->private_data;
2536 unsigned long flags;
2537 audio_buf_info abinfo;
2538 count_info cinfo;
2539 int val, mapped, ret;
2540 unsigned char fmtm, fmtd;
2541 void __user *argp = (void __user *)arg;
2542 int __user *p = argp;
2543
2544/* printk("maestro: ess_ioctl: cmd %d\n", cmd);*/
2545
2546 VALIDATE_STATE(s);
2547 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
2548 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
2549 switch (cmd) {
2550 case OSS_GETVERSION:
2551 return put_user(SOUND_VERSION, p);
2552
2553 case SNDCTL_DSP_SYNC:
2554 if (file->f_mode & FMODE_WRITE)
2555 return drain_dac(s, file->f_flags & O_NONBLOCK);
2556 return 0;
2557
2558 case SNDCTL_DSP_SETDUPLEX:
2559 /* XXX fix */
2560 return 0;
2561
2562 case SNDCTL_DSP_GETCAPS:
2563 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
2564
2565 case SNDCTL_DSP_RESET:
2566 if (file->f_mode & FMODE_WRITE) {
2567 stop_dac(s);
2568 synchronize_irq(s->card->pcidev->irq);
2569 s->dma_dac.swptr = s->dma_dac.hwptr = s->dma_dac.count = s->dma_dac.total_bytes = 0;
2570 }
2571 if (file->f_mode & FMODE_READ) {
2572 stop_adc(s);
2573 synchronize_irq(s->card->pcidev->irq);
2574 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
2575 }
2576 return 0;
2577
2578 case SNDCTL_DSP_SPEED:
2579 if (get_user(val, p))
2580 return -EFAULT;
2581 if (val >= 0) {
2582 if (file->f_mode & FMODE_READ) {
2583 stop_adc(s);
2584 s->dma_adc.ready = 0;
2585 set_adc_rate(s, val);
2586 }
2587 if (file->f_mode & FMODE_WRITE) {
2588 stop_dac(s);
2589 s->dma_dac.ready = 0;
2590 set_dac_rate(s, val);
2591 }
2592 }
2593 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
2594
2595 case SNDCTL_DSP_STEREO:
2596 if (get_user(val, p))
2597 return -EFAULT;
2598 fmtd = 0;
2599 fmtm = ~0;
2600 if (file->f_mode & FMODE_READ) {
2601 stop_adc(s);
2602 s->dma_adc.ready = 0;
2603 if (val)
2604 fmtd |= ESS_FMT_STEREO << ESS_ADC_SHIFT;
2605 else
2606 fmtm &= ~(ESS_FMT_STEREO << ESS_ADC_SHIFT);
2607 }
2608 if (file->f_mode & FMODE_WRITE) {
2609 stop_dac(s);
2610 s->dma_dac.ready = 0;
2611 if (val)
2612 fmtd |= ESS_FMT_STEREO << ESS_DAC_SHIFT;
2613 else
2614 fmtm &= ~(ESS_FMT_STEREO << ESS_DAC_SHIFT);
2615 }
2616 set_fmt(s, fmtm, fmtd);
2617 return 0;
2618
2619 case SNDCTL_DSP_CHANNELS:
2620 if (get_user(val, p))
2621 return -EFAULT;
2622 if (val != 0) {
2623 fmtd = 0;
2624 fmtm = ~0;
2625 if (file->f_mode & FMODE_READ) {
2626 stop_adc(s);
2627 s->dma_adc.ready = 0;
2628 if (val >= 2)
2629 fmtd |= ESS_FMT_STEREO << ESS_ADC_SHIFT;
2630 else
2631 fmtm &= ~(ESS_FMT_STEREO << ESS_ADC_SHIFT);
2632 }
2633 if (file->f_mode & FMODE_WRITE) {
2634 stop_dac(s);
2635 s->dma_dac.ready = 0;
2636 if (val >= 2)
2637 fmtd |= ESS_FMT_STEREO << ESS_DAC_SHIFT;
2638 else
2639 fmtm &= ~(ESS_FMT_STEREO << ESS_DAC_SHIFT);
2640 }
2641 set_fmt(s, fmtm, fmtd);
2642 }
2643 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_STEREO << ESS_ADC_SHIFT)
2644 : (ESS_FMT_STEREO << ESS_DAC_SHIFT))) ? 2 : 1, p);
2645
2646 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
2647 return put_user(AFMT_U8|AFMT_S16_LE, p);
2648
2649 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
2650 if (get_user(val, p))
2651 return -EFAULT;
2652 if (val != AFMT_QUERY) {
2653 fmtd = 0;
2654 fmtm = ~0;
2655 if (file->f_mode & FMODE_READ) {
2656 stop_adc(s);
2657 s->dma_adc.ready = 0;
2658 /* fixed at 16bit for now */
2659 fmtd |= ESS_FMT_16BIT << ESS_ADC_SHIFT;
2660#if 0
2661 if (val == AFMT_S16_LE)
2662 fmtd |= ESS_FMT_16BIT << ESS_ADC_SHIFT;
2663 else
2664 fmtm &= ~(ESS_FMT_16BIT << ESS_ADC_SHIFT);
2665#endif
2666 }
2667 if (file->f_mode & FMODE_WRITE) {
2668 stop_dac(s);
2669 s->dma_dac.ready = 0;
2670 if (val == AFMT_S16_LE)
2671 fmtd |= ESS_FMT_16BIT << ESS_DAC_SHIFT;
2672 else
2673 fmtm &= ~(ESS_FMT_16BIT << ESS_DAC_SHIFT);
2674 }
2675 set_fmt(s, fmtm, fmtd);
2676 }
2677 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ?
2678 (ESS_FMT_16BIT << ESS_ADC_SHIFT)
2679 : (ESS_FMT_16BIT << ESS_DAC_SHIFT))) ?
2680 AFMT_S16_LE :
2681 AFMT_U8,
2682 p);
2683
2684 case SNDCTL_DSP_POST:
2685 return 0;
2686
2687 case SNDCTL_DSP_GETTRIGGER:
2688 val = 0;
2689 if ((file->f_mode & FMODE_READ) && (s->enable & ADC_RUNNING))
2690 val |= PCM_ENABLE_INPUT;
2691 if ((file->f_mode & FMODE_WRITE) && (s->enable & DAC_RUNNING))
2692 val |= PCM_ENABLE_OUTPUT;
2693 return put_user(val, p);
2694
2695 case SNDCTL_DSP_SETTRIGGER:
2696 if (get_user(val, p))
2697 return -EFAULT;
2698 if (file->f_mode & FMODE_READ) {
2699 if (val & PCM_ENABLE_INPUT) {
2700 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2701 return ret;
2702 start_adc(s);
2703 } else
2704 stop_adc(s);
2705 }
2706 if (file->f_mode & FMODE_WRITE) {
2707 if (val & PCM_ENABLE_OUTPUT) {
2708 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2709 return ret;
2710 start_dac(s);
2711 } else
2712 stop_dac(s);
2713 }
2714 return 0;
2715
2716 case SNDCTL_DSP_GETOSPACE:
2717 if (!(file->f_mode & FMODE_WRITE))
2718 return -EINVAL;
2719 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2720 return ret;
2721 spin_lock_irqsave(&s->lock, flags);
2722 ess_update_ptr(s);
2723 abinfo.fragsize = s->dma_dac.fragsize;
2724 abinfo.bytes = s->dma_dac.dmasize - s->dma_dac.count;
2725 abinfo.fragstotal = s->dma_dac.numfrag;
2726 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
2727 spin_unlock_irqrestore(&s->lock, flags);
2728 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
2729
2730 case SNDCTL_DSP_GETISPACE:
2731 if (!(file->f_mode & FMODE_READ))
2732 return -EINVAL;
2733 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2734 return ret;
2735 spin_lock_irqsave(&s->lock, flags);
2736 ess_update_ptr(s);
2737 abinfo.fragsize = s->dma_adc.fragsize;
2738 abinfo.bytes = s->dma_adc.count;
2739 abinfo.fragstotal = s->dma_adc.numfrag;
2740 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
2741 spin_unlock_irqrestore(&s->lock, flags);
2742 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
2743
2744 case SNDCTL_DSP_NONBLOCK:
2745 file->f_flags |= O_NONBLOCK;
2746 return 0;
2747
2748 case SNDCTL_DSP_GETODELAY:
2749 if (!(file->f_mode & FMODE_WRITE))
2750 return -EINVAL;
2751 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2752 return ret;
2753 spin_lock_irqsave(&s->lock, flags);
2754 ess_update_ptr(s);
2755 val = s->dma_dac.count;
2756 spin_unlock_irqrestore(&s->lock, flags);
2757 return put_user(val, p);
2758
2759 case SNDCTL_DSP_GETIPTR:
2760 if (!(file->f_mode & FMODE_READ))
2761 return -EINVAL;
2762 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
2763 return ret;
2764 spin_lock_irqsave(&s->lock, flags);
2765 ess_update_ptr(s);
2766 cinfo.bytes = s->dma_adc.total_bytes;
2767 cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
2768 cinfo.ptr = s->dma_adc.hwptr;
2769 if (s->dma_adc.mapped)
2770 s->dma_adc.count &= s->dma_adc.fragsize-1;
2771 spin_unlock_irqrestore(&s->lock, flags);
2772 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
2773 return -EFAULT;
2774 return 0;
2775
2776 case SNDCTL_DSP_GETOPTR:
2777 if (!(file->f_mode & FMODE_WRITE))
2778 return -EINVAL;
2779 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
2780 return ret;
2781 spin_lock_irqsave(&s->lock, flags);
2782 ess_update_ptr(s);
2783 cinfo.bytes = s->dma_dac.total_bytes;
2784 cinfo.blocks = s->dma_dac.count >> s->dma_dac.fragshift;
2785 cinfo.ptr = s->dma_dac.hwptr;
2786 if (s->dma_dac.mapped)
2787 s->dma_dac.count &= s->dma_dac.fragsize-1;
2788 spin_unlock_irqrestore(&s->lock, flags);
2789 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
2790 return -EFAULT;
2791 return 0;
2792
2793 case SNDCTL_DSP_GETBLKSIZE:
2794 if (file->f_mode & FMODE_WRITE) {
2795 if ((val = prog_dmabuf(s, 0)))
2796 return val;
2797 return put_user(s->dma_dac.fragsize, p);
2798 }
2799 if ((val = prog_dmabuf(s, 1)))
2800 return val;
2801 return put_user(s->dma_adc.fragsize, p);
2802
2803 case SNDCTL_DSP_SETFRAGMENT:
2804 if (get_user(val, p))
2805 return -EFAULT;
2806 M_printk("maestro: SETFRAGMENT: %0x\n",val);
2807 if (file->f_mode & FMODE_READ) {
2808 s->dma_adc.ossfragshift = val & 0xffff;
2809 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
2810 if (s->dma_adc.ossfragshift < 4)
2811 s->dma_adc.ossfragshift = 4;
2812 if (s->dma_adc.ossfragshift > 15)
2813 s->dma_adc.ossfragshift = 15;
2814 if (s->dma_adc.ossmaxfrags < 4)
2815 s->dma_adc.ossmaxfrags = 4;
2816 }
2817 if (file->f_mode & FMODE_WRITE) {
2818 s->dma_dac.ossfragshift = val & 0xffff;
2819 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
2820 if (s->dma_dac.ossfragshift < 4)
2821 s->dma_dac.ossfragshift = 4;
2822 if (s->dma_dac.ossfragshift > 15)
2823 s->dma_dac.ossfragshift = 15;
2824 if (s->dma_dac.ossmaxfrags < 4)
2825 s->dma_dac.ossmaxfrags = 4;
2826 }
2827 return 0;
2828
2829 case SNDCTL_DSP_SUBDIVIDE:
2830 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
2831 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
2832 return -EINVAL;
2833 if (get_user(val, p))
2834 return -EFAULT;
2835 if (val != 1 && val != 2 && val != 4)
2836 return -EINVAL;
2837 if (file->f_mode & FMODE_READ)
2838 s->dma_adc.subdivision = val;
2839 if (file->f_mode & FMODE_WRITE)
2840 s->dma_dac.subdivision = val;
2841 return 0;
2842
2843 case SOUND_PCM_READ_RATE:
2844 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
2845
2846 case SOUND_PCM_READ_CHANNELS:
2847 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_STEREO << ESS_ADC_SHIFT)
2848 : (ESS_FMT_STEREO << ESS_DAC_SHIFT))) ? 2 : 1, p);
2849
2850 case SOUND_PCM_READ_BITS:
2851 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_16BIT << ESS_ADC_SHIFT)
2852 : (ESS_FMT_16BIT << ESS_DAC_SHIFT))) ? 16 : 8, p);
2853
2854 case SOUND_PCM_WRITE_FILTER:
2855 case SNDCTL_DSP_SETSYNCRO:
2856 case SOUND_PCM_READ_FILTER:
2857 return -EINVAL;
2858
2859 }
2860 return -EINVAL;
2861}
2862
2863static void
2864set_base_registers(struct ess_state *s,void *vaddr)
2865{
2866 unsigned long packed_phys = virt_to_bus(vaddr)>>12;
2867 wave_set_register(s, 0x01FC , packed_phys);
2868 wave_set_register(s, 0x01FD , packed_phys);
2869 wave_set_register(s, 0x01FE , packed_phys);
2870 wave_set_register(s, 0x01FF , packed_phys);
2871}
2872
2873/*
2874 * this guy makes sure we're in the right power
2875 * state for what we want to be doing
2876 */
2877static void maestro_power(struct ess_card *card, int tostate)
2878{
2879 u16 active_mask = acpi_state_mask[tostate];
2880 u8 state;
2881
2882 if(!use_pm) return;
2883
2884 pci_read_config_byte(card->pcidev, card->power_regs+0x4, &state);
2885 state&=3;
2886
2887 /* make sure we're in the right state */
2888 if(state != tostate) {
2889 M_printk(KERN_WARNING "maestro: dev %02x:%02x.%x switching from D%d to D%d\n",
2890 card->pcidev->bus->number,
2891 PCI_SLOT(card->pcidev->devfn),
2892 PCI_FUNC(card->pcidev->devfn),
2893 state,tostate);
2894 pci_write_config_byte(card->pcidev, card->power_regs+0x4, tostate);
2895 }
2896
2897 /* and make sure the units we care about are on
2898 XXX we might want to do this before state flipping? */
2899 pci_write_config_word(card->pcidev, 0x54, ~ active_mask);
2900 pci_write_config_word(card->pcidev, 0x56, ~ active_mask);
2901}
2902
2903/* we allocate a large power of two for all our memory.
2904 this is cut up into (not to scale :):
2905 |silly fifo word | 512byte mixbuf per adc | dac/adc * channels |
2906*/
2907static int
2908allocate_buffers(struct ess_state *s)
2909{
2910 void *rawbuf=NULL;
2911 int order,i;
2912 struct page *page, *pend;
2913
2914 /* alloc as big a chunk as we can */
2915 for (order = (dsps_order + (16-PAGE_SHIFT) + 1); order >= (dsps_order + 2 + 1); order--)
2916 if((rawbuf = (void *)__get_free_pages(GFP_KERNEL|GFP_DMA, order)))
2917 break;
2918
2919 if (!rawbuf)
2920 return 1;
2921
2922 M_printk("maestro: allocated %ld (%d) bytes at %p\n",PAGE_SIZE<<order,order, rawbuf);
2923
2924 if ((virt_to_bus(rawbuf) + (PAGE_SIZE << order) - 1) & ~((1<<28)-1)) {
2925 printk(KERN_ERR "maestro: DMA buffer beyond 256MB! busaddr 0x%lx size %ld\n",
2926 virt_to_bus(rawbuf), PAGE_SIZE << order);
2927 kfree(rawbuf);
2928 return 1;
2929 }
2930
2931 s->card->dmapages = rawbuf;
2932 s->card->dmaorder = order;
2933
2934 for(i=0;i<NR_DSPS;i++) {
2935 struct ess_state *ess = &s->card->channels[i];
2936
2937 if(ess->dev_audio == -1)
2938 continue;
2939
2940 ess->dma_dac.ready = s->dma_dac.mapped = 0;
2941 ess->dma_adc.ready = s->dma_adc.mapped = 0;
2942 ess->dma_adc.buforder = ess->dma_dac.buforder = order - 1 - dsps_order - 1;
2943
2944 /* offset dac and adc buffers starting half way through and then at each [da][ad]c's
2945 order's intervals.. */
2946 ess->dma_dac.rawbuf = rawbuf + (PAGE_SIZE<<(order-1)) + (i * ( PAGE_SIZE << (ess->dma_dac.buforder + 1 )));
2947 ess->dma_adc.rawbuf = ess->dma_dac.rawbuf + ( PAGE_SIZE << ess->dma_dac.buforder);
2948 /* offset mixbuf by a mixbuf so that the lame status fifo can
2949 happily scribble away.. */
2950 ess->mixbuf = rawbuf + (512 * (i+1));
2951
2952 M_printk("maestro: setup apu %d: dac: %p adc: %p mix: %p\n",i,ess->dma_dac.rawbuf,
2953 ess->dma_adc.rawbuf, ess->mixbuf);
2954
2955 }
2956
2957 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
2958 pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
2959 for (page = virt_to_page(rawbuf); page <= pend; page++)
2960 SetPageReserved(page);
2961
2962 return 0;
2963}
2964static void
2965free_buffers(struct ess_state *s)
2966{
2967 struct page *page, *pend;
2968
2969 s->dma_dac.rawbuf = s->dma_adc.rawbuf = NULL;
2970 s->dma_dac.mapped = s->dma_adc.mapped = 0;
2971 s->dma_dac.ready = s->dma_adc.ready = 0;
2972
2973 M_printk("maestro: freeing %p\n",s->card->dmapages);
2974 /* undo marking the pages as reserved */
2975
2976 pend = virt_to_page(s->card->dmapages + (PAGE_SIZE << s->card->dmaorder) - 1);
2977 for (page = virt_to_page(s->card->dmapages); page <= pend; page++)
2978 ClearPageReserved(page);
2979
2980 free_pages((unsigned long)s->card->dmapages,s->card->dmaorder);
2981 s->card->dmapages = NULL;
2982}
2983
2984static int
2985ess_open(struct inode *inode, struct file *file)
2986{
2987 unsigned int minor = iminor(inode);
2988 struct ess_state *s = NULL;
2989 unsigned char fmtm = ~0, fmts = 0;
2990 struct pci_dev *pdev = NULL;
2991 /*
2992 * Scan the cards and find the channel. We only
2993 * do this at open time so it is ok
2994 */
2995
2996 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
2997 struct ess_card *c;
2998 struct pci_driver *drvr;
2999
3000 drvr = pci_dev_driver (pdev);
3001 if (drvr == &maestro_pci_driver) {
3002 int i;
3003 struct ess_state *sp;
3004
3005 c = (struct ess_card*)pci_get_drvdata (pdev);
3006 if (!c)
3007 continue;
3008 for(i=0;i<NR_DSPS;i++)
3009 {
3010 sp=&c->channels[i];
3011 if(sp->dev_audio < 0)
3012 continue;
3013 if((sp->dev_audio ^ minor) & ~0xf)
3014 continue;
3015 s=sp;
3016 }
3017 }
3018 }
3019 if (!s)
3020 return -ENODEV;
3021
3022 VALIDATE_STATE(s);
3023 file->private_data = s;
3024 /* wait for device to become free */
3025 mutex_lock(&s->open_mutex);
3026 while (s->open_mode & file->f_mode) {
3027 if (file->f_flags & O_NONBLOCK) {
3028 mutex_unlock(&s->open_mutex);
3029 return -EWOULDBLOCK;
3030 }
3031 mutex_unlock(&s->open_mutex);
3032 interruptible_sleep_on(&s->open_wait);
3033 if (signal_pending(current))
3034 return -ERESTARTSYS;
3035 mutex_lock(&s->open_mutex);
3036 }
3037
3038 /* under semaphore.. */
3039 if ((s->card->dmapages==NULL) && allocate_buffers(s)) {
3040 mutex_unlock(&s->open_mutex);
3041 return -ENOMEM;
3042 }
3043
3044 /* we're covered by the open_mutex */
3045 if( ! s->card->dsps_open ) {
3046 maestro_power(s->card,ACPI_D0);
3047 start_bob(s);
3048 }
3049 s->card->dsps_open++;
3050 M_printk("maestro: open, %d bobs now\n",s->card->dsps_open);
3051
3052 /* ok, lets write WC base regs now that we've
3053 powered up the chip */
3054 M_printk("maestro: writing 0x%lx (bus 0x%lx) to the wp\n",virt_to_bus(s->card->dmapages),
3055 ((virt_to_bus(s->card->dmapages))&0xFFE00000)>>12);
3056 set_base_registers(s,s->card->dmapages);
3057
3058 if (file->f_mode & FMODE_READ) {
3059/*
3060 fmtm &= ~((ESS_FMT_STEREO | ESS_FMT_16BIT) << ESS_ADC_SHIFT);
3061 if ((minor & 0xf) == SND_DEV_DSP16)
3062 fmts |= ESS_FMT_16BIT << ESS_ADC_SHIFT; */
3063
3064 fmtm &= ~((ESS_FMT_STEREO|ESS_FMT_16BIT) << ESS_ADC_SHIFT);
3065 fmts = (ESS_FMT_STEREO|ESS_FMT_16BIT) << ESS_ADC_SHIFT;
3066
3067 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
3068 set_adc_rate(s, 8000);
3069 }
3070 if (file->f_mode & FMODE_WRITE) {
3071 fmtm &= ~((ESS_FMT_STEREO | ESS_FMT_16BIT) << ESS_DAC_SHIFT);
3072 if ((minor & 0xf) == SND_DEV_DSP16)
3073 fmts |= ESS_FMT_16BIT << ESS_DAC_SHIFT;
3074
3075 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
3076 set_dac_rate(s, 8000);
3077 }
3078 set_fmt(s, fmtm, fmts);
3079 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
3080
3081 mutex_unlock(&s->open_mutex);
3082 return nonseekable_open(inode, file);
3083}
3084
3085static int
3086ess_release(struct inode *inode, struct file *file)
3087{
3088 struct ess_state *s = (struct ess_state *)file->private_data;
3089
3090 VALIDATE_STATE(s);
3091 lock_kernel();
3092 if (file->f_mode & FMODE_WRITE)
3093 drain_dac(s, file->f_flags & O_NONBLOCK);
3094 mutex_lock(&s->open_mutex);
3095 if (file->f_mode & FMODE_WRITE) {
3096 stop_dac(s);
3097 }
3098 if (file->f_mode & FMODE_READ) {
3099 stop_adc(s);
3100 }
3101
3102 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
3103 /* we're covered by the open_mutex */
3104 M_printk("maestro: %d dsps now alive\n",s->card->dsps_open-1);
3105 if( --s->card->dsps_open <= 0) {
3106 s->card->dsps_open = 0;
3107 stop_bob(s);
3108 free_buffers(s);
3109 maestro_power(s->card,ACPI_D2);
3110 }
3111 mutex_unlock(&s->open_mutex);
3112 wake_up(&s->open_wait);
3113 unlock_kernel();
3114 return 0;
3115}
3116
3117static struct file_operations ess_audio_fops = {
3118 .owner = THIS_MODULE,
3119 .llseek = no_llseek,
3120 .read = ess_read,
3121 .write = ess_write,
3122 .poll = ess_poll,
3123 .ioctl = ess_ioctl,
3124 .mmap = ess_mmap,
3125 .open = ess_open,
3126 .release = ess_release,
3127};
3128
3129static int
3130maestro_config(struct ess_card *card)
3131{
3132 struct pci_dev *pcidev = card->pcidev;
3133 struct ess_state *ess = &card->channels[0];
3134 int apu,iobase = card->iobase;
3135 u16 w;
3136 u32 n;
3137
3138 /* We used to muck around with pci config space that
3139 * we had no business messing with. We don't know enough
3140 * about the machine to know which DMA mode is appropriate,
3141 * etc. We were guessing wrong on some machines and making
3142 * them unhappy. We now trust in the BIOS to do things right,
3143 * which almost certainly means a new host of problems will
3144 * arise with broken BIOS implementations. screw 'em.
3145 * We're already intolerant of machines that don't assign
3146 * IRQs.
3147 */
3148
3149 /* do config work at full power */
3150 maestro_power(card,ACPI_D0);
3151
3152 pci_read_config_word(pcidev, 0x50, &w);
3153
3154 w&=~(1<<5); /* Don't swap left/right (undoc)*/
3155
3156 pci_write_config_word(pcidev, 0x50, w);
3157
3158 pci_read_config_word(pcidev, 0x52, &w);
3159 w&=~(1<<15); /* Turn off internal clock multiplier */
3160 /* XXX how do we know which to use? */
3161 w&=~(1<<14); /* External clock */
3162
3163 w|= (1<<7); /* Hardware volume control on */
3164 w|= (1<<6); /* Debounce off: easier to push the HWV buttons. */
3165 w&=~(1<<5); /* GPIO 4:5 */
3166 w|= (1<<4); /* Disconnect from the CHI. Enabling this made a dell 7500 work. */
3167 w&=~(1<<2); /* MIDI fix off (undoc) */
3168 w&=~(1<<1); /* reserved, always write 0 */
3169 pci_write_config_word(pcidev, 0x52, w);
3170
3171 /*
3172 * Legacy mode
3173 */
3174
3175 pci_read_config_word(pcidev, 0x40, &w);
3176 w|=(1<<15); /* legacy decode off */
3177 w&=~(1<<14); /* Disable SIRQ */
3178 w&=~(0x1f); /* disable mpu irq/io, game port, fm, SB */
3179
3180 pci_write_config_word(pcidev, 0x40, w);
3181
3182 /* Set up 978 docking control chip. */
3183 pci_read_config_word(pcidev, 0x58, &w);
3184 w|=1<<2; /* Enable 978. */
3185 w|=1<<3; /* Turn on 978 hardware volume control. */
3186 w&=~(1<<11); /* Turn on 978 mixer volume control. */
3187 pci_write_config_word(pcidev, 0x58, w);
3188
3189 sound_reset(iobase);
3190
3191 /*
3192 * Ring Bus Setup
3193 */
3194
3195 /* setup usual 0x34 stuff.. 0x36 may be chip specific */
3196 outw(0xC090, iobase+0x34); /* direct sound, stereo */
3197 udelay(20);
3198 outw(0x3000, iobase+0x36); /* direct sound, stereo */
3199 udelay(20);
3200
3201
3202 /*
3203 * Reset the CODEC
3204 */
3205
3206 maestro_ac97_reset(iobase,pcidev);
3207
3208 /*
3209 * Ring Bus Setup
3210 */
3211
3212 n=inl(iobase+0x34);
3213 n&=~0xF000;
3214 n|=12<<12; /* Direct Sound, Stereo */
3215 outl(n, iobase+0x34);
3216
3217 n=inl(iobase+0x34);
3218 n&=~0x0F00; /* Modem off */
3219 outl(n, iobase+0x34);
3220
3221 n=inl(iobase+0x34);
3222 n&=~0x00F0;
3223 n|=9<<4; /* DAC, Stereo */
3224 outl(n, iobase+0x34);
3225
3226 n=inl(iobase+0x34);
3227 n&=~0x000F; /* ASSP off */
3228 outl(n, iobase+0x34);
3229
3230 n=inl(iobase+0x34);
3231 n|=(1<<29); /* Enable ring bus */
3232 outl(n, iobase+0x34);
3233
3234 n=inl(iobase+0x34);
3235 n|=(1<<28); /* Enable serial bus */
3236 outl(n, iobase+0x34);
3237
3238 n=inl(iobase+0x34);
3239 n&=~0x00F00000; /* MIC off */
3240 outl(n, iobase+0x34);
3241
3242 n=inl(iobase+0x34);
3243 n&=~0x000F0000; /* I2S off */
3244 outl(n, iobase+0x34);
3245
3246
3247 w=inw(iobase+0x18);
3248 w&=~(1<<7); /* ClkRun off */
3249 outw(w, iobase+0x18);
3250
3251 w=inw(iobase+0x18);
3252 w&=~(1<<6); /* Hardware volume control interrupt off... for now. */
3253 outw(w, iobase+0x18);
3254
3255 w=inw(iobase+0x18);
3256 w&=~(1<<4); /* ASSP irq off */
3257 outw(w, iobase+0x18);
3258
3259 w=inw(iobase+0x18);
3260 w&=~(1<<3); /* ISDN irq off */
3261 outw(w, iobase+0x18);
3262
3263 w=inw(iobase+0x18);
3264 w|=(1<<2); /* Direct Sound IRQ on */
3265 outw(w, iobase+0x18);
3266
3267 w=inw(iobase+0x18);
3268 w&=~(1<<1); /* MPU401 IRQ off */
3269 outw(w, iobase+0x18);
3270
3271 w=inw(iobase+0x18);
3272 w|=(1<<0); /* SB IRQ on */
3273 outw(w, iobase+0x18);
3274
3275 /* Set hardware volume control registers to midpoints.
3276 We can tell which button was pushed based on how they change. */
3277 outb(0x88, iobase+0x1c);
3278 outb(0x88, iobase+0x1d);
3279 outb(0x88, iobase+0x1e);
3280 outb(0x88, iobase+0x1f);
3281
3282 /* it appears some maestros (dell 7500) only work if these are set,
3283 regardless of whether we use the assp or not. */
3284
3285 outb(0, iobase+0xA4);
3286 outb(3, iobase+0xA2);
3287 outb(0, iobase+0xA6);
3288
3289 for(apu=0;apu<16;apu++)
3290 {
3291 /* Write 0 into the buffer area 0x1E0->1EF */
3292 outw(0x01E0+apu, 0x10+iobase);
3293 outw(0x0000, 0x12+iobase);
3294
3295 /*
3296 * The 1.10 test program seem to write 0 into the buffer area
3297 * 0x1D0-0x1DF too.
3298 */
3299 outw(0x01D0+apu, 0x10+iobase);
3300 outw(0x0000, 0x12+iobase);
3301 }
3302
3303#if 1
3304 wave_set_register(ess, IDR7_WAVE_ROMRAM,
3305 (wave_get_register(ess, IDR7_WAVE_ROMRAM)&0xFF00));
3306 wave_set_register(ess, IDR7_WAVE_ROMRAM,
3307 wave_get_register(ess, IDR7_WAVE_ROMRAM)|0x100);
3308 wave_set_register(ess, IDR7_WAVE_ROMRAM,
3309 wave_get_register(ess, IDR7_WAVE_ROMRAM)&~0x200);
3310 wave_set_register(ess, IDR7_WAVE_ROMRAM,
3311 wave_get_register(ess, IDR7_WAVE_ROMRAM)|~0x400);
3312#else
3313 maestro_write(ess, IDR7_WAVE_ROMRAM,
3314 (maestro_read(ess, IDR7_WAVE_ROMRAM)&0xFF00));
3315 maestro_write(ess, IDR7_WAVE_ROMRAM,
3316 maestro_read(ess, IDR7_WAVE_ROMRAM)|0x100);
3317 maestro_write(ess, IDR7_WAVE_ROMRAM,
3318 maestro_read(ess, IDR7_WAVE_ROMRAM)&~0x200);
3319 maestro_write(ess, IDR7_WAVE_ROMRAM,
3320 maestro_read(ess, IDR7_WAVE_ROMRAM)|0x400);
3321#endif
3322
3323 maestro_write(ess, IDR2_CRAM_DATA, 0x0000);
3324 maestro_write(ess, 0x08, 0xB004);
3325 /* Now back to the DirectSound stuff */
3326 maestro_write(ess, 0x09, 0x001B);
3327 maestro_write(ess, 0x0A, 0x8000);
3328 maestro_write(ess, 0x0B, 0x3F37);
3329 maestro_write(ess, 0x0C, 0x0098);
3330
3331 /* parallel out ?? */
3332 maestro_write(ess, 0x0C,
3333 (maestro_read(ess, 0x0C)&~0xF000)|0x8000);
3334 /* parallel in, has something to do with recording :) */
3335 maestro_write(ess, 0x0C,
3336 (maestro_read(ess, 0x0C)&~0x0F00)|0x0500);
3337
3338 maestro_write(ess, 0x0D, 0x7632);
3339
3340 /* Wave cache control on - test off, sg off,
3341 enable, enable extra chans 1Mb */
3342
3343 outw(inw(0x14+iobase)|(1<<8),0x14+iobase);
3344 outw(inw(0x14+iobase)&0xFE03,0x14+iobase);
3345 outw((inw(0x14+iobase)&0xFFFC), 0x14+iobase);
3346 outw(inw(0x14+iobase)|(1<<7),0x14+iobase);
3347
3348 outw(0xA1A0, 0x14+iobase); /* 0300 ? */
3349
3350 /* Now clear the APU control ram */
3351 for(apu=0;apu<NR_APUS;apu++)
3352 {
3353 for(w=0;w<NR_APU_REGS;w++)
3354 apu_set_register(ess, apu|ESS_CHAN_HARD, w, 0);
3355
3356 }
3357
3358 return 0;
3359
3360}
3361
3362/* this guy tries to find the pci power management
3363 * register bank. this should really be in core
3364 * code somewhere. 1 on success. */
3365static int
3366parse_power(struct ess_card *card, struct pci_dev *pcidev)
3367{
3368 u32 n;
3369 u16 w;
3370 u8 next;
3371 int max = 64; /* an a 8bit guy pointing to 32bit guys
3372 can only express so much. */
3373
3374 card->power_regs = 0;
3375
3376 /* check to see if we have a capabilities list in
3377 the config register */
3378 pci_read_config_word(pcidev, PCI_STATUS, &w);
3379 if(!(w & PCI_STATUS_CAP_LIST)) return 0;
3380
3381 /* walk the list, starting at the head. */
3382 pci_read_config_byte(pcidev,PCI_CAPABILITY_LIST,&next);
3383
3384 while(next && max--) {
3385 pci_read_config_dword(pcidev, next & ~3, &n);
3386 if((n & 0xff) == PCI_CAP_ID_PM) {
3387 card->power_regs = next;
3388 break;
3389 }
3390 next = ((n>>8) & 0xff);
3391 }
3392
3393 return card->power_regs ? 1 : 0;
3394}
3395
3396static int __init
3397maestro_probe(struct pci_dev *pcidev,const struct pci_device_id *pdid)
3398{
3399 int card_type = pdid->driver_data;
3400 u32 n;
3401 int iobase;
3402 int i, ret;
3403 struct ess_card *card;
3404 struct ess_state *ess;
3405 int num = 0;
3406
3407/* when built into the kernel, we only print version if device is found */
3408#ifndef MODULE
3409 static int printed_version;
3410 if (!printed_version++)
3411 printk(version);
3412#endif
3413
3414 /* don't pick up weird modem maestros */
3415 if(((pcidev->class >> 8) & 0xffff) != PCI_CLASS_MULTIMEDIA_AUDIO)
3416 return -ENODEV;
3417
3418
3419 if ((ret=pci_enable_device(pcidev)))
3420 return ret;
3421
3422 iobase = pci_resource_start(pcidev,0);
3423 if (!iobase || !(pci_resource_flags(pcidev, 0 ) & IORESOURCE_IO))
3424 return -ENODEV;
3425
3426 if(pcidev->irq == 0)
3427 return -ENODEV;
3428
3429 /* stake our claim on the iospace */
3430 if( request_region(iobase, 256, card_names[card_type]) == NULL )
3431 {
3432 printk(KERN_WARNING "maestro: can't allocate 256 bytes I/O at 0x%4.4x\n", iobase);
3433 return -EBUSY;
3434 }
3435
3436 /* just to be sure */
3437 pci_set_master(pcidev);
3438
3439 card = kmalloc(sizeof(struct ess_card), GFP_KERNEL);
3440 if(card == NULL)
3441 {
3442 printk(KERN_WARNING "maestro: out of memory\n");
3443 release_region(iobase, 256);
3444 return -ENOMEM;
3445 }
3446
3447 memset(card, 0, sizeof(*card));
3448 card->pcidev = pcidev;
3449
3450 card->iobase = iobase;
3451 card->card_type = card_type;
3452 card->irq = pcidev->irq;
3453 card->magic = ESS_CARD_MAGIC;
3454 spin_lock_init(&card->lock);
3455 init_waitqueue_head(&card->suspend_queue);
3456
3457 card->dock_mute_vol = 50;
3458
3459 /* init our groups of 6 apus */
3460 for(i=0;i<NR_DSPS;i++)
3461 {
3462 struct ess_state *s=&card->channels[i];
3463
3464 s->index = i;
3465
3466 s->card = card;
3467 init_waitqueue_head(&s->dma_adc.wait);
3468 init_waitqueue_head(&s->dma_dac.wait);
3469 init_waitqueue_head(&s->open_wait);
3470 spin_lock_init(&s->lock);
3471 mutex_init(&s->open_mutex);
3472 s->magic = ESS_STATE_MAGIC;
3473
3474 s->apu[0] = 6*i;
3475 s->apu[1] = (6*i)+1;
3476 s->apu[2] = (6*i)+2;
3477 s->apu[3] = (6*i)+3;
3478 s->apu[4] = (6*i)+4;
3479 s->apu[5] = (6*i)+5;
3480
3481 if(s->dma_adc.ready || s->dma_dac.ready || s->dma_adc.rawbuf)
3482 printk("maestro: BOTCH!\n");
3483 /* register devices */
3484 if ((s->dev_audio = register_sound_dsp(&ess_audio_fops, -1)) < 0)
3485 break;
3486 }
3487
3488 num = i;
3489
3490 /* clear the rest if we ran out of slots to register */
3491 for(;i<NR_DSPS;i++)
3492 {
3493 struct ess_state *s=&card->channels[i];
3494 s->dev_audio = -1;
3495 }
3496
3497 ess = &card->channels[0];
3498
3499 /*
3500 * Ok card ready. Begin setup proper
3501 */
3502
3503 printk(KERN_INFO "maestro: Configuring %s found at IO 0x%04X IRQ %d\n",
3504 card_names[card_type],iobase,card->irq);
3505 pci_read_config_dword(pcidev, PCI_SUBSYSTEM_VENDOR_ID, &n);
3506 printk(KERN_INFO "maestro: subvendor id: 0x%08x\n",n);
3507
3508 /* turn off power management unless:
3509 * - the user explicitly asks for it
3510 * or
3511 * - we're not a 2e, lesser chipps seem to have problems.
3512 * - we're not on our _very_ small whitelist. some implemenetations
3513 * really don't like the pm code, others require it.
3514 * feel free to expand this as required.
3515 */
3516#define SUBSYSTEM_VENDOR(x) (x&0xffff)
3517 if( (use_pm != 1) &&
3518 ((card_type != TYPE_MAESTRO2E) || (SUBSYSTEM_VENDOR(n) != 0x1028)))
3519 use_pm = 0;
3520
3521 if(!use_pm)
3522 printk(KERN_INFO "maestro: not attempting power management.\n");
3523 else {
3524 if(!parse_power(card,pcidev))
3525 printk(KERN_INFO "maestro: no PCI power management interface found.\n");
3526 else {
3527 pci_read_config_dword(pcidev, card->power_regs, &n);
3528 printk(KERN_INFO "maestro: PCI power management capability: 0x%x\n",n>>16);
3529 }
3530 }
3531
3532 maestro_config(card);
3533
3534 if(maestro_ac97_get(card, 0x00)==0x0080) {
3535 printk(KERN_ERR "maestro: my goodness! you seem to have a pt101 codec, which is quite rare.\n"
3536 "\tyou should tell someone about this.\n");
3537 } else {
3538 maestro_ac97_init(card);
3539 }
3540
3541 if ((card->dev_mixer = register_sound_mixer(&ess_mixer_fops, -1)) < 0) {
3542 printk("maestro: couldn't register mixer!\n");
3543 } else {
3544 memcpy(card->mix.mixer_state,mixer_defaults,sizeof(card->mix.mixer_state));
3545 mixer_push_state(card);
3546 }
3547
3548 if((ret=request_irq(card->irq, ess_interrupt, IRQF_SHARED, card_names[card_type], card)))
3549 {
3550 printk(KERN_ERR "maestro: unable to allocate irq %d,\n", card->irq);
3551 unregister_sound_mixer(card->dev_mixer);
3552 for(i=0;i<NR_DSPS;i++)
3553 {
3554 struct ess_state *s = &card->channels[i];
3555 if(s->dev_audio != -1)
3556 unregister_sound_dsp(s->dev_audio);
3557 }
3558 release_region(card->iobase, 256);
3559 unregister_reboot_notifier(&maestro_nb);
3560 kfree(card);
3561 return ret;
3562 }
3563
3564 /* Turn on hardware volume control interrupt.
3565 This has to come after we grab the IRQ above,
3566 or a crash will result on installation if a button has been pressed,
3567 because in that case we'll get an immediate interrupt. */
3568 n = inw(iobase+0x18);
3569 n|=(1<<6);
3570 outw(n, iobase+0x18);
3571
3572 pci_set_drvdata(pcidev,card);
3573 /* now go to sleep 'till something interesting happens */
3574 maestro_power(card,ACPI_D2);
3575
3576 printk(KERN_INFO "maestro: %d channels configured.\n", num);
3577 return 0;
3578}
3579
3580static void maestro_remove(struct pci_dev *pcidev) {
3581 struct ess_card *card = pci_get_drvdata(pcidev);
3582 int i;
3583 u32 n;
3584
3585 /* XXX maybe should force stop bob, but should be all
3586 stopped by _release by now */
3587
3588 /* Turn off hardware volume control interrupt.
3589 This has to come before we leave the IRQ below,
3590 or a crash results if a button is pressed ! */
3591 n = inw(card->iobase+0x18);
3592 n&=~(1<<6);
3593 outw(n, card->iobase+0x18);
3594
3595 free_irq(card->irq, card);
3596 unregister_sound_mixer(card->dev_mixer);
3597 for(i=0;i<NR_DSPS;i++)
3598 {
3599 struct ess_state *ess = &card->channels[i];
3600 if(ess->dev_audio != -1)
3601 unregister_sound_dsp(ess->dev_audio);
3602 }
3603 /* Goodbye, Mr. Bond. */
3604 maestro_power(card,ACPI_D3);
3605 release_region(card->iobase, 256);
3606 kfree(card);
3607 pci_set_drvdata(pcidev,NULL);
3608}
3609
3610static struct pci_device_id maestro_pci_tbl[] = {
3611 {PCI_VENDOR_ESS, PCI_DEVICE_ID_ESS_ESS1968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_MAESTRO2},
3612 {PCI_VENDOR_ESS, PCI_DEVICE_ID_ESS_ESS1978, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_MAESTRO2E},
3613 {PCI_VENDOR_ESS_OLD, PCI_DEVICE_ID_ESS_ESS0100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPE_MAESTRO},
3614 {0,}
3615};
3616MODULE_DEVICE_TABLE(pci, maestro_pci_tbl);
3617
3618static struct pci_driver maestro_pci_driver = {
3619 .name = "maestro",
3620 .id_table = maestro_pci_tbl,
3621 .probe = maestro_probe,
3622 .remove = maestro_remove,
3623};
3624
3625static int __init init_maestro(void)
3626{
3627 int rc;
3628
3629 rc = pci_register_driver(&maestro_pci_driver);
3630 if (rc < 0)
3631 return rc;
3632
3633 if (register_reboot_notifier(&maestro_nb))
3634 printk(KERN_WARNING "maestro: reboot notifier registration failed; may not reboot properly.\n");
3635#ifdef MODULE
3636 printk(version);
3637#endif
3638 if (dsps_order < 0) {
3639 dsps_order = 1;
3640 printk(KERN_WARNING "maestro: clipping dsps_order to %d\n",dsps_order);
3641 }
3642 else if (dsps_order > MAX_DSP_ORDER) {
3643 dsps_order = MAX_DSP_ORDER;
3644 printk(KERN_WARNING "maestro: clipping dsps_order to %d\n",dsps_order);
3645 }
3646 return 0;
3647}
3648
3649static int maestro_notifier(struct notifier_block *nb, unsigned long event, void *buf)
3650{
3651 /* this notifier is called when the kernel is really shut down. */
3652 M_printk("maestro: shutting down\n");
3653 /* this will remove all card instances too */
3654 pci_unregister_driver(&maestro_pci_driver);
3655 /* XXX dunno about power management */
3656 return NOTIFY_OK;
3657}
3658
3659/* --------------------------------------------------------------------- */
3660
3661
3662static void cleanup_maestro(void) {
3663 M_printk("maestro: unloading\n");
3664 pci_unregister_driver(&maestro_pci_driver);
3665 unregister_reboot_notifier(&maestro_nb);
3666}
3667
3668/* --------------------------------------------------------------------- */
3669
3670void
3671check_suspend(struct ess_card *card)
3672{
3673 DECLARE_WAITQUEUE(wait, current);
3674
3675 if(!card->in_suspend) return;
3676
3677 card->in_suspend++;
3678 add_wait_queue(&(card->suspend_queue), &wait);
3679 current->state = TASK_UNINTERRUPTIBLE;
3680 schedule();
3681 remove_wait_queue(&(card->suspend_queue), &wait);
3682 current->state = TASK_RUNNING;
3683}
3684
3685module_init(init_maestro);
3686module_exit(cleanup_maestro);
diff --git a/sound/oss/maestro.h b/sound/oss/maestro.h
deleted file mode 100644
index 023ec7f968f9..000000000000
--- a/sound/oss/maestro.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Registers for the ESS PCI cards
3 */
4
5/*
6 * Memory access
7 */
8
9#define ESS_MEM_DATA 0x00
10#define ESS_MEM_INDEX 0x02
11
12/*
13 * AC-97 Codec port. Delay 1uS after each write. This is used to
14 * talk AC-97 (see intel.com). Write data then register.
15 */
16
17#define ESS_AC97_INDEX 0x30 /* byte wide */
18#define ESS_AC97_DATA 0x32
19
20/*
21 * Reading is a bit different. You write register|0x80 to ubdex
22 * delay 1uS poll the low bit of index, when it clears read the
23 * data value.
24 */
25
26/*
27 * Control port. Not yet fully understood
28 * The value 0xC090 gets loaded to it then 0x0000 and 0x2800
29 * to the data port. Then after 4uS the value 0x300 is written
30 */
31
32#define RING_BUS_CTRL_L 0x34
33#define RING_BUS_CTRL_H 0x36
34
35/*
36 * This is also used during setup. The value 0x17 is written to it
37 */
38
39#define ESS_SETUP_18 0x18
40
41/*
42 * And this one gets 0x000b
43 */
44
45#define ESS_SETUP_A2 0xA2
46
47/*
48 * And this 0x0000
49 */
50
51#define ESS_SETUP_A4 0xA4
52#define ESS_SETUP_A6 0xA6
53
54/*
55 * Stuff to do with Harpo - the wave stuff
56 */
57
58#define ESS_WAVETABLE_SIZE 0x14
59#define ESS_WAVETABLE_2M 0xA180
60
diff --git a/sound/oss/maestro3.c b/sound/oss/maestro3.c
deleted file mode 100644
index 5ef6e617911b..000000000000
--- a/sound/oss/maestro3.c
+++ /dev/null
@@ -1,2968 +0,0 @@
1/*****************************************************************************
2 *
3 * ESS Maestro3/Allegro driver for Linux 2.4.x
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * (c) Copyright 2000 Zach Brown <zab@zabbo.net>
20 *
21 * I need to thank many people for helping make this driver happen.
22 * As always, Eric Brombaugh was a hacking machine and killed many bugs
23 * that I was too dumb to notice. Howard Kim at ESS provided reference boards
24 * and as much docs as he could. Todd and Mick at Dell tested snapshots on
25 * an army of laptops. msw and deviant at Red Hat also humoured me by hanging
26 * their laptops every few hours in the name of science.
27 *
28 * Shouts go out to Mike "DJ XPCom" Ang.
29 *
30 * History
31 * v1.23 - Jun 5 2002 - Michael Olson <olson@cs.odu.edu>
32 * added a module option to allow selection of GPIO pin number
33 * for external amp
34 * v1.22 - Feb 28 2001 - Zach Brown <zab@zabbo.net>
35 * allocate mem at insmod/setup, rather than open
36 * limit pci dma addresses to 28bit, thanks guys.
37 * v1.21 - Feb 04 2001 - Zach Brown <zab@zabbo.net>
38 * fix up really dumb notifier -> suspend oops
39 * v1.20 - Jan 30 2001 - Zach Brown <zab@zabbo.net>
40 * get rid of pm callback and use pci_dev suspend/resume instead
41 * m3_probe cleanups, including pm oops think-o
42 * v1.10 - Jan 6 2001 - Zach Brown <zab@zabbo.net>
43 * revert to lame remap_page_range mmap() just to make it work
44 * record mmap fixed.
45 * fix up incredibly broken open/release resource management
46 * duh. fix record format setting.
47 * add SMP locking and cleanup formatting here and there
48 * v1.00 - Dec 16 2000 - Zach Brown <zab@zabbo.net>
49 * port to sexy 2.4 interfaces
50 * properly align instance allocations so recording works
51 * clean up function namespace a little :/
52 * update PCI IDs based on mail from ESS
53 * arbitrarily bump version number to show its 2.4 now,
54 * 2.2 will stay 0., oss_audio port gets 2.
55 * v0.03 - Nov 05 2000 - Zach Brown <zab@zabbo.net>
56 * disable recording but allow dsp to be opened read
57 * pull out most silly compat defines
58 * v0.02 - Nov 04 2000 - Zach Brown <zab@zabbo.net>
59 * changed clocking setup for m3, slowdown fixed.
60 * codec reset is hopefully reliable now
61 * rudimentary apm/power management makes suspend/resume work
62 * v0.01 - Oct 31 2000 - Zach Brown <zab@zabbo.net>
63 * first release
64 * v0.00 - Sep 09 2000 - Zach Brown <zab@zabbo.net>
65 * first pass derivation from maestro.c
66 *
67 * TODO
68 * in/out allocated contiguously so fullduplex mmap will work?
69 * no beep on init (mute)
70 * resetup msrc data memory if freq changes?
71 *
72 * --
73 *
74 * Allow me to ramble a bit about the m3 architecture. The core of the
75 * chip is the 'assp', the custom ESS dsp that runs the show. It has
76 * a small amount of code and data ram. ESS drops binary dsp code images
77 * on our heads, but we don't get to see specs on the dsp.
78 *
79 * The constant piece of code on the dsp is the 'kernel'. It also has a
80 * chunk of the dsp memory that is statically set aside for its control
81 * info. This is the KDATA defines in maestro3.h. Part of its core
82 * data is a list of code addresses that point to the pieces of DSP code
83 * that it should walk through in its loop. These other pieces of code
84 * do the real work. The kernel presumably jumps into each of them in turn.
85 * These code images tend to have their own data area, and one can have
86 * multiple data areas representing different states for each of the 'client
87 * instance' code portions. There is generally a list in the kernel data
88 * that points to the data instances for a given piece of code.
89 *
90 * We've only been given the binary image for the 'minisrc', mini sample
91 * rate converter. This is rather annoying because it limits the work
92 * we can do on the dsp, but it also greatly simplifies the job of managing
93 * dsp data memory for the code and data for our playing streams :). We
94 * statically allocate the minisrc code into a region we 'know' to be free
95 * based on the map of the binary kernel image we're loading. We also
96 * statically allocate the data areas for the maximum number of pcm streams
97 * we can be dealing with. This max is set by the length of the static list
98 * in the kernel data that records the number of minisrc data regions we
99 * can have. Thats right, all software dsp mixing with static code list
100 * limits. Rock.
101 *
102 * How sound goes in and out is still a relative mystery. It appears
103 * that the dsp has the ability to get input and output through various
104 * 'connections'. To do IO from or to a connection, you put the address
105 * of the minisrc client area in the static kernel data lists for that
106 * input or output. so for pcm -> dsp -> mixer, we put the minisrc data
107 * instance in the DMA list and also in the list for the mixer. I guess
108 * it Just Knows which is in/out, and we give some dma control info that
109 * helps. There are all sorts of cool inputs/outputs that it seems we can't
110 * use without dsp code images that know how to use them.
111 *
112 * So at init time we preload all the memory allocation stuff and set some
113 * system wide parameters. When we really get a sound to play we build
114 * up its minisrc header (stream parameters, buffer addresses, input/output
115 * settings). Then we throw its header on the various lists. We also
116 * tickle some KDATA settings that ask the assp to raise clock interrupts
117 * and do some amount of software mixing before handing data to the ac97.
118 *
119 * Sorry for the vague details. Feel free to ask Eric or myself if you
120 * happen to be trying to use this driver elsewhere. Please accept my
121 * apologies for the quality of the OSS support code, its passed through
122 * too many hands now and desperately wants to be rethought.
123 */
124
125/*****************************************************************************/
126
127#include <linux/module.h>
128#include <linux/kernel.h>
129#include <linux/string.h>
130#include <linux/ctype.h>
131#include <linux/ioport.h>
132#include <linux/sched.h>
133#include <linux/delay.h>
134#include <linux/sound.h>
135#include <linux/slab.h>
136#include <linux/soundcard.h>
137#include <linux/pci.h>
138#include <linux/vmalloc.h>
139#include <linux/init.h>
140#include <linux/interrupt.h>
141#include <linux/poll.h>
142#include <linux/reboot.h>
143#include <linux/spinlock.h>
144#include <linux/ac97_codec.h>
145#include <linux/wait.h>
146#include <linux/mutex.h>
147
148
149#include <asm/io.h>
150#include <asm/dma.h>
151#include <asm/uaccess.h>
152
153#include "maestro3.h"
154
155#define M_DEBUG 1
156
157#define DRIVER_VERSION "1.23"
158#define M3_MODULE_NAME "maestro3"
159#define PFX M3_MODULE_NAME ": "
160
161#define M3_STATE_MAGIC 0x734d724d
162#define M3_CARD_MAGIC 0x646e6f50
163
164#define ESS_FMT_STEREO 0x01
165#define ESS_FMT_16BIT 0x02
166#define ESS_FMT_MASK 0x03
167#define ESS_DAC_SHIFT 0
168#define ESS_ADC_SHIFT 4
169
170#define DAC_RUNNING 1
171#define ADC_RUNNING 2
172
173#define SND_DEV_DSP16 5
174
175#ifdef M_DEBUG
176static int debug;
177#define DPMOD 1 /* per module load */
178#define DPSTR 2 /* per 'stream' */
179#define DPSYS 3 /* per syscall */
180#define DPCRAP 4 /* stuff the user shouldn't see unless they're really debuggin */
181#define DPINT 5 /* per interrupt, LOTS */
182#define DPRINTK(DP, args...) {if (debug >= (DP)) printk(KERN_DEBUG PFX args);}
183#else
184#define DPRINTK(x)
185#endif
186
187struct m3_list {
188 int curlen;
189 u16 mem_addr;
190 int max;
191};
192
193static int external_amp = 1;
194static int gpio_pin = -1;
195
196struct m3_state {
197 unsigned int magic;
198 struct m3_card *card;
199 unsigned char fmt, enable;
200
201 int index;
202
203 /* this locks around the oss state in the driver */
204 /* no, this lock is removed - only use card->lock */
205 /* otherwise: against what are you protecting on SMP
206 when irqhandler uses s->lock
207 and m3_assp_read uses card->lock ?
208 */
209 struct mutex open_mutex;
210 wait_queue_head_t open_wait;
211 mode_t open_mode;
212
213 int dev_audio;
214
215 struct assp_instance {
216 u16 code, data;
217 } dac_inst, adc_inst;
218
219 /* should be in dmabuf */
220 unsigned int rateadc, ratedac;
221
222 struct dmabuf {
223 void *rawbuf;
224 unsigned buforder;
225 unsigned numfrag;
226 unsigned fragshift;
227 unsigned hwptr, swptr;
228 unsigned total_bytes;
229 int count;
230 unsigned error; /* over/underrun */
231 wait_queue_head_t wait;
232 /* redundant, but makes calculations easier */
233 unsigned fragsize;
234 unsigned dmasize;
235 unsigned fragsamples;
236 /* OSS stuff */
237 unsigned mapped:1;
238 unsigned ready:1;
239 unsigned endcleared:1;
240 unsigned ossfragshift;
241 int ossmaxfrags;
242 unsigned subdivision;
243 /* new in m3 */
244 int mixer_index, dma_index, msrc_index, adc1_index;
245 int in_lists;
246 /* 2.4.. */
247 dma_addr_t handle;
248
249 } dma_dac, dma_adc;
250};
251
252struct m3_card {
253 unsigned int magic;
254
255 struct m3_card *next;
256
257 struct ac97_codec *ac97;
258 spinlock_t ac97_lock;
259
260 int card_type;
261
262#define NR_DSPS 1
263#define MAX_DSPS NR_DSPS
264 struct m3_state channels[MAX_DSPS];
265
266 /* this locks around the physical registers on the card */
267 spinlock_t lock;
268
269 /* hardware resources */
270 struct pci_dev *pcidev;
271 u32 iobase;
272 u32 irq;
273
274 int dacs_active;
275
276 int timer_users;
277
278 struct m3_list msrc_list,
279 mixer_list,
280 adc1_list,
281 dma_list;
282
283 /* for storing reset state..*/
284 u8 reset_state;
285
286 u16 *suspend_mem;
287 int in_suspend;
288 wait_queue_head_t suspend_queue;
289};
290
291/*
292 * an arbitrary volume we set the internal
293 * volume settings to so that the ac97 volume
294 * range is a little less insane. 0x7fff is
295 * max.
296 */
297#define ARB_VOLUME ( 0x6800 )
298
299static const unsigned sample_shift[] = { 0, 1, 1, 2 };
300
301enum {
302 ESS_ALLEGRO,
303 ESS_MAESTRO3,
304 /*
305 * a maestro3 with 'hardware strapping', only
306 * found inside ESS?
307 */
308 ESS_MAESTRO3HW,
309};
310
311static char *card_names[] = {
312 [ESS_ALLEGRO] = "Allegro",
313 [ESS_MAESTRO3] = "Maestro3(i)",
314 [ESS_MAESTRO3HW] = "Maestro3(i)hw"
315};
316
317#ifndef PCI_VENDOR_ESS
318#define PCI_VENDOR_ESS 0x125D
319#endif
320
321#define M3_DEVICE(DEV, TYPE) \
322{ \
323.vendor = PCI_VENDOR_ESS, \
324.device = DEV, \
325.subvendor = PCI_ANY_ID, \
326.subdevice = PCI_ANY_ID, \
327.class = PCI_CLASS_MULTIMEDIA_AUDIO << 8, \
328.class_mask = 0xffff << 8, \
329.driver_data = TYPE, \
330}
331
332static struct pci_device_id m3_id_table[] = {
333 M3_DEVICE(0x1988, ESS_ALLEGRO),
334 M3_DEVICE(0x1998, ESS_MAESTRO3),
335 M3_DEVICE(0x199a, ESS_MAESTRO3HW),
336 {0,}
337};
338
339MODULE_DEVICE_TABLE (pci, m3_id_table);
340
341/*
342 * reports seem to indicate that the m3 is limited
343 * to 28bit bus addresses. aaaargggh...
344 */
345#define M3_PCI_DMA_MASK 0x0fffffff
346
347static unsigned
348ld2(unsigned int x)
349{
350 unsigned r = 0;
351
352 if (x >= 0x10000) {
353 x >>= 16;
354 r += 16;
355 }
356 if (x >= 0x100) {
357 x >>= 8;
358 r += 8;
359 }
360 if (x >= 0x10) {
361 x >>= 4;
362 r += 4;
363 }
364 if (x >= 4) {
365 x >>= 2;
366 r += 2;
367 }
368 if (x >= 2)
369 r++;
370 return r;
371}
372
373static struct m3_card *devs;
374
375/*
376 * I'm not very good at laying out functions in a file :)
377 */
378static int m3_notifier(struct notifier_block *nb, unsigned long event, void *buf);
379static int m3_suspend(struct pci_dev *pci_dev, pm_message_t state);
380static void check_suspend(struct m3_card *card);
381
382static struct notifier_block m3_reboot_nb = {
383 .notifier_call = m3_notifier,
384};
385
386static void m3_outw(struct m3_card *card,
387 u16 value, unsigned long reg)
388{
389 check_suspend(card);
390 outw(value, card->iobase + reg);
391}
392
393static u16 m3_inw(struct m3_card *card, unsigned long reg)
394{
395 check_suspend(card);
396 return inw(card->iobase + reg);
397}
398static void m3_outb(struct m3_card *card,
399 u8 value, unsigned long reg)
400{
401 check_suspend(card);
402 outb(value, card->iobase + reg);
403}
404static u8 m3_inb(struct m3_card *card, unsigned long reg)
405{
406 check_suspend(card);
407 return inb(card->iobase + reg);
408}
409
410/*
411 * access 16bit words to the code or data regions of the dsp's memory.
412 * index addresses 16bit words.
413 */
414static u16 __m3_assp_read(struct m3_card *card, u16 region, u16 index)
415{
416 m3_outw(card, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE);
417 m3_outw(card, index, DSP_PORT_MEMORY_INDEX);
418 return m3_inw(card, DSP_PORT_MEMORY_DATA);
419}
420static u16 m3_assp_read(struct m3_card *card, u16 region, u16 index)
421{
422 unsigned long flags;
423 u16 ret;
424
425 spin_lock_irqsave(&(card->lock), flags);
426 ret = __m3_assp_read(card, region, index);
427 spin_unlock_irqrestore(&(card->lock), flags);
428
429 return ret;
430}
431
432static void __m3_assp_write(struct m3_card *card,
433 u16 region, u16 index, u16 data)
434{
435 m3_outw(card, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE);
436 m3_outw(card, index, DSP_PORT_MEMORY_INDEX);
437 m3_outw(card, data, DSP_PORT_MEMORY_DATA);
438}
439static void m3_assp_write(struct m3_card *card,
440 u16 region, u16 index, u16 data)
441{
442 unsigned long flags;
443
444 spin_lock_irqsave(&(card->lock), flags);
445 __m3_assp_write(card, region, index, data);
446 spin_unlock_irqrestore(&(card->lock), flags);
447}
448
449static void m3_assp_halt(struct m3_card *card)
450{
451 card->reset_state = m3_inb(card, DSP_PORT_CONTROL_REG_B) & ~REGB_STOP_CLOCK;
452 mdelay(10);
453 m3_outb(card, card->reset_state & ~REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B);
454}
455
456static void m3_assp_continue(struct m3_card *card)
457{
458 m3_outb(card, card->reset_state | REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B);
459}
460
461/*
462 * This makes me sad. the maestro3 has lists
463 * internally that must be packed.. 0 terminates,
464 * apparently, or maybe all unused entries have
465 * to be 0, the lists have static lengths set
466 * by the binary code images.
467 */
468
469static int m3_add_list(struct m3_card *card,
470 struct m3_list *list, u16 val)
471{
472 DPRINTK(DPSTR, "adding val 0x%x to list 0x%p at pos %d\n",
473 val, list, list->curlen);
474
475 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
476 list->mem_addr + list->curlen,
477 val);
478
479 return list->curlen++;
480
481}
482
483static void m3_remove_list(struct m3_card *card,
484 struct m3_list *list, int index)
485{
486 u16 val;
487 int lastindex = list->curlen - 1;
488
489 DPRINTK(DPSTR, "removing ind %d from list 0x%p\n",
490 index, list);
491
492 if(index != lastindex) {
493 val = m3_assp_read(card, MEMTYPE_INTERNAL_DATA,
494 list->mem_addr + lastindex);
495 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
496 list->mem_addr + index,
497 val);
498 }
499
500 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
501 list->mem_addr + lastindex,
502 0);
503
504 list->curlen--;
505}
506
507static void set_fmt(struct m3_state *s, unsigned char mask, unsigned char data)
508{
509 int tmp;
510
511 s->fmt = (s->fmt & mask) | data;
512
513 tmp = (s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK;
514
515 /* write to 'mono' word */
516 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
517 s->dac_inst.data + SRC3_DIRECTION_OFFSET + 1,
518 (tmp & ESS_FMT_STEREO) ? 0 : 1);
519 /* write to '8bit' word */
520 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
521 s->dac_inst.data + SRC3_DIRECTION_OFFSET + 2,
522 (tmp & ESS_FMT_16BIT) ? 0 : 1);
523
524 tmp = (s->fmt >> ESS_ADC_SHIFT) & ESS_FMT_MASK;
525
526 /* write to 'mono' word */
527 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
528 s->adc_inst.data + SRC3_DIRECTION_OFFSET + 1,
529 (tmp & ESS_FMT_STEREO) ? 0 : 1);
530 /* write to '8bit' word */
531 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
532 s->adc_inst.data + SRC3_DIRECTION_OFFSET + 2,
533 (tmp & ESS_FMT_16BIT) ? 0 : 1);
534}
535
536static void set_dac_rate(struct m3_state *s, unsigned int rate)
537{
538 u32 freq;
539
540 if (rate > 48000)
541 rate = 48000;
542 if (rate < 8000)
543 rate = 8000;
544
545 s->ratedac = rate;
546
547 freq = ((rate << 15) + 24000 ) / 48000;
548 if(freq)
549 freq--;
550
551 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
552 s->dac_inst.data + CDATA_FREQUENCY,
553 freq);
554}
555
556static void set_adc_rate(struct m3_state *s, unsigned int rate)
557{
558 u32 freq;
559
560 if (rate > 48000)
561 rate = 48000;
562 if (rate < 8000)
563 rate = 8000;
564
565 s->rateadc = rate;
566
567 freq = ((rate << 15) + 24000 ) / 48000;
568 if(freq)
569 freq--;
570
571 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
572 s->adc_inst.data + CDATA_FREQUENCY,
573 freq);
574}
575
576static void inc_timer_users(struct m3_card *card)
577{
578 unsigned long flags;
579
580 spin_lock_irqsave(&card->lock, flags);
581
582 card->timer_users++;
583 DPRINTK(DPSYS, "inc timer users now %d\n",
584 card->timer_users);
585 if(card->timer_users != 1)
586 goto out;
587
588 __m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
589 KDATA_TIMER_COUNT_RELOAD,
590 240 ) ;
591
592 __m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
593 KDATA_TIMER_COUNT_CURRENT,
594 240 ) ;
595
596 m3_outw(card,
597 m3_inw(card, HOST_INT_CTRL) | CLKRUN_GEN_ENABLE,
598 HOST_INT_CTRL);
599out:
600 spin_unlock_irqrestore(&card->lock, flags);
601}
602
603static void dec_timer_users(struct m3_card *card)
604{
605 unsigned long flags;
606
607 spin_lock_irqsave(&card->lock, flags);
608
609 card->timer_users--;
610 DPRINTK(DPSYS, "dec timer users now %d\n",
611 card->timer_users);
612 if(card->timer_users > 0 )
613 goto out;
614
615 __m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
616 KDATA_TIMER_COUNT_RELOAD,
617 0 ) ;
618
619 __m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
620 KDATA_TIMER_COUNT_CURRENT,
621 0 ) ;
622
623 m3_outw(card, m3_inw(card, HOST_INT_CTRL) & ~CLKRUN_GEN_ENABLE,
624 HOST_INT_CTRL);
625out:
626 spin_unlock_irqrestore(&card->lock, flags);
627}
628
629/*
630 * {start,stop}_{adc,dac} should be called
631 * while holding the 'state' lock and they
632 * will try to grab the 'card' lock..
633 */
634static void stop_adc(struct m3_state *s)
635{
636 if (! (s->enable & ADC_RUNNING))
637 return;
638
639 s->enable &= ~ADC_RUNNING;
640 dec_timer_users(s->card);
641
642 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
643 s->adc_inst.data + CDATA_INSTANCE_READY, 0);
644
645 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
646 KDATA_ADC1_REQUEST, 0);
647}
648
649static void stop_dac(struct m3_state *s)
650{
651 if (! (s->enable & DAC_RUNNING))
652 return;
653
654 DPRINTK(DPSYS, "stop_dac()\n");
655
656 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
657 s->dac_inst.data + CDATA_INSTANCE_READY, 0);
658
659 s->enable &= ~DAC_RUNNING;
660 s->card->dacs_active--;
661 dec_timer_users(s->card);
662
663 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
664 KDATA_MIXER_TASK_NUMBER,
665 s->card->dacs_active ) ;
666}
667
668static void start_dac(struct m3_state *s)
669{
670 if( (!s->dma_dac.mapped && s->dma_dac.count < 1) ||
671 !s->dma_dac.ready ||
672 (s->enable & DAC_RUNNING))
673 return;
674
675 DPRINTK(DPSYS, "start_dac()\n");
676
677 s->enable |= DAC_RUNNING;
678 s->card->dacs_active++;
679 inc_timer_users(s->card);
680
681 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
682 s->dac_inst.data + CDATA_INSTANCE_READY, 1);
683
684 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
685 KDATA_MIXER_TASK_NUMBER,
686 s->card->dacs_active ) ;
687}
688
689static void start_adc(struct m3_state *s)
690{
691 if ((! s->dma_adc.mapped &&
692 s->dma_adc.count >= (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
693 || !s->dma_adc.ready
694 || (s->enable & ADC_RUNNING) )
695 return;
696
697 DPRINTK(DPSYS, "start_adc()\n");
698
699 s->enable |= ADC_RUNNING;
700 inc_timer_users(s->card);
701
702 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
703 KDATA_ADC1_REQUEST, 1);
704
705 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
706 s->adc_inst.data + CDATA_INSTANCE_READY, 1);
707}
708
709static struct play_vals {
710 u16 addr, val;
711} pv[] = {
712 {CDATA_LEFT_VOLUME, ARB_VOLUME},
713 {CDATA_RIGHT_VOLUME, ARB_VOLUME},
714 {SRC3_DIRECTION_OFFSET, 0} ,
715 /* +1, +2 are stereo/16 bit */
716 {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */
717 {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */
718 {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */
719 {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */
720 {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */
721 {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */
722 {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */
723 {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */
724 {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */
725 {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */
726 {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */
727 {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */
728 {SRC3_DIRECTION_OFFSET + 16, 8}, /* numin */
729 {SRC3_DIRECTION_OFFSET + 17, 50*2}, /* numout */
730 {SRC3_DIRECTION_OFFSET + 18, MINISRC_BIQUAD_STAGE - 1}, /* numstage */
731 {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */
732 {SRC3_DIRECTION_OFFSET + 21, 0} /* booster */
733};
734
735
736/* the mode passed should be already shifted and masked */
737static void m3_play_setup(struct m3_state *s, int mode, u32 rate, void *buffer, int size)
738{
739 int dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x20 * 2);
740 int dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x20 * 2);
741 int dsp_in_buffer = s->dac_inst.data + (MINISRC_TMP_BUFFER_SIZE / 2);
742 int dsp_out_buffer = dsp_in_buffer + (dsp_in_size / 2) + 1;
743 struct dmabuf *db = &s->dma_dac;
744 int i;
745
746 DPRINTK(DPSTR, "mode=%d rate=%d buf=%p len=%d.\n",
747 mode, rate, buffer, size);
748
749#define LO(x) ((x) & 0xffff)
750#define HI(x) LO((x) >> 16)
751
752 /* host dma buffer pointers */
753
754 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
755 s->dac_inst.data + CDATA_HOST_SRC_ADDRL,
756 LO(virt_to_bus(buffer)));
757
758 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
759 s->dac_inst.data + CDATA_HOST_SRC_ADDRH,
760 HI(virt_to_bus(buffer)));
761
762 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
763 s->dac_inst.data + CDATA_HOST_SRC_END_PLUS_1L,
764 LO(virt_to_bus(buffer) + size));
765
766 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
767 s->dac_inst.data + CDATA_HOST_SRC_END_PLUS_1H,
768 HI(virt_to_bus(buffer) + size));
769
770 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
771 s->dac_inst.data + CDATA_HOST_SRC_CURRENTL,
772 LO(virt_to_bus(buffer)));
773
774 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
775 s->dac_inst.data + CDATA_HOST_SRC_CURRENTH,
776 HI(virt_to_bus(buffer)));
777#undef LO
778#undef HI
779
780 /* dsp buffers */
781
782 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
783 s->dac_inst.data + CDATA_IN_BUF_BEGIN,
784 dsp_in_buffer);
785
786 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
787 s->dac_inst.data + CDATA_IN_BUF_END_PLUS_1,
788 dsp_in_buffer + (dsp_in_size / 2));
789
790 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
791 s->dac_inst.data + CDATA_IN_BUF_HEAD,
792 dsp_in_buffer);
793
794 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
795 s->dac_inst.data + CDATA_IN_BUF_TAIL,
796 dsp_in_buffer);
797
798 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
799 s->dac_inst.data + CDATA_OUT_BUF_BEGIN,
800 dsp_out_buffer);
801
802 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
803 s->dac_inst.data + CDATA_OUT_BUF_END_PLUS_1,
804 dsp_out_buffer + (dsp_out_size / 2));
805
806 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
807 s->dac_inst.data + CDATA_OUT_BUF_HEAD,
808 dsp_out_buffer);
809
810 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
811 s->dac_inst.data + CDATA_OUT_BUF_TAIL,
812 dsp_out_buffer);
813
814 /*
815 * some per client initializers
816 */
817
818 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
819 s->dac_inst.data + SRC3_DIRECTION_OFFSET + 12,
820 s->dac_inst.data + 40 + 8);
821
822 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
823 s->dac_inst.data + SRC3_DIRECTION_OFFSET + 19,
824 s->dac_inst.code + MINISRC_COEF_LOC);
825
826 /* enable or disable low pass filter? */
827 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
828 s->dac_inst.data + SRC3_DIRECTION_OFFSET + 22,
829 s->ratedac > 45000 ? 0xff : 0 );
830
831 /* tell it which way dma is going? */
832 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
833 s->dac_inst.data + CDATA_DMA_CONTROL,
834 DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR);
835
836 /*
837 * set an armload of static initializers
838 */
839 for(i = 0 ; i < (sizeof(pv) / sizeof(pv[0])) ; i++)
840 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
841 s->dac_inst.data + pv[i].addr, pv[i].val);
842
843 /*
844 * put us in the lists if we're not already there
845 */
846
847 if(db->in_lists == 0) {
848
849 db->msrc_index = m3_add_list(s->card, &s->card->msrc_list,
850 s->dac_inst.data >> DP_SHIFT_COUNT);
851
852 db->dma_index = m3_add_list(s->card, &s->card->dma_list,
853 s->dac_inst.data >> DP_SHIFT_COUNT);
854
855 db->mixer_index = m3_add_list(s->card, &s->card->mixer_list,
856 s->dac_inst.data >> DP_SHIFT_COUNT);
857
858 db->in_lists = 1;
859 }
860
861 set_dac_rate(s,rate);
862 start_dac(s);
863}
864
865/*
866 * Native record driver
867 */
868static struct rec_vals {
869 u16 addr, val;
870} rv[] = {
871 {CDATA_LEFT_VOLUME, ARB_VOLUME},
872 {CDATA_RIGHT_VOLUME, ARB_VOLUME},
873 {SRC3_DIRECTION_OFFSET, 1} ,
874 /* +1, +2 are stereo/16 bit */
875 {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */
876 {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */
877 {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */
878 {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */
879 {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */
880 {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */
881 {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */
882 {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */
883 {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */
884 {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */
885 {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */
886 {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */
887 {SRC3_DIRECTION_OFFSET + 16, 50},/* numin */
888 {SRC3_DIRECTION_OFFSET + 17, 8}, /* numout */
889 {SRC3_DIRECTION_OFFSET + 18, 0}, /* numstage */
890 {SRC3_DIRECTION_OFFSET + 19, 0}, /* coef */
891 {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */
892 {SRC3_DIRECTION_OFFSET + 21, 0}, /* booster */
893 {SRC3_DIRECTION_OFFSET + 22, 0xff} /* skip lpf */
894};
895
896/* again, passed mode is alrady shifted/masked */
897static void m3_rec_setup(struct m3_state *s, int mode, u32 rate, void *buffer, int size)
898{
899 int dsp_in_size = MINISRC_IN_BUFFER_SIZE + (0x10 * 2);
900 int dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x10 * 2);
901 int dsp_in_buffer = s->adc_inst.data + (MINISRC_TMP_BUFFER_SIZE / 2);
902 int dsp_out_buffer = dsp_in_buffer + (dsp_in_size / 2) + 1;
903 struct dmabuf *db = &s->dma_adc;
904 int i;
905
906 DPRINTK(DPSTR, "rec_setup mode=%d rate=%d buf=%p len=%d.\n",
907 mode, rate, buffer, size);
908
909#define LO(x) ((x) & 0xffff)
910#define HI(x) LO((x) >> 16)
911
912 /* host dma buffer pointers */
913
914 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
915 s->adc_inst.data + CDATA_HOST_SRC_ADDRL,
916 LO(virt_to_bus(buffer)));
917
918 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
919 s->adc_inst.data + CDATA_HOST_SRC_ADDRH,
920 HI(virt_to_bus(buffer)));
921
922 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
923 s->adc_inst.data + CDATA_HOST_SRC_END_PLUS_1L,
924 LO(virt_to_bus(buffer) + size));
925
926 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
927 s->adc_inst.data + CDATA_HOST_SRC_END_PLUS_1H,
928 HI(virt_to_bus(buffer) + size));
929
930 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
931 s->adc_inst.data + CDATA_HOST_SRC_CURRENTL,
932 LO(virt_to_bus(buffer)));
933
934 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
935 s->adc_inst.data + CDATA_HOST_SRC_CURRENTH,
936 HI(virt_to_bus(buffer)));
937#undef LO
938#undef HI
939
940 /* dsp buffers */
941
942 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
943 s->adc_inst.data + CDATA_IN_BUF_BEGIN,
944 dsp_in_buffer);
945
946 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
947 s->adc_inst.data + CDATA_IN_BUF_END_PLUS_1,
948 dsp_in_buffer + (dsp_in_size / 2));
949
950 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
951 s->adc_inst.data + CDATA_IN_BUF_HEAD,
952 dsp_in_buffer);
953
954 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
955 s->adc_inst.data + CDATA_IN_BUF_TAIL,
956 dsp_in_buffer);
957
958 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
959 s->adc_inst.data + CDATA_OUT_BUF_BEGIN,
960 dsp_out_buffer);
961
962 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
963 s->adc_inst.data + CDATA_OUT_BUF_END_PLUS_1,
964 dsp_out_buffer + (dsp_out_size / 2));
965
966 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
967 s->adc_inst.data + CDATA_OUT_BUF_HEAD,
968 dsp_out_buffer);
969
970 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
971 s->adc_inst.data + CDATA_OUT_BUF_TAIL,
972 dsp_out_buffer);
973
974 /*
975 * some per client initializers
976 */
977
978 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
979 s->adc_inst.data + SRC3_DIRECTION_OFFSET + 12,
980 s->adc_inst.data + 40 + 8);
981
982 /* tell it which way dma is going? */
983 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
984 s->adc_inst.data + CDATA_DMA_CONTROL,
985 DMACONTROL_DIRECTION + DMACONTROL_AUTOREPEAT +
986 DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR);
987
988 /*
989 * set an armload of static initializers
990 */
991 for(i = 0 ; i < (sizeof(rv) / sizeof(rv[0])) ; i++)
992 m3_assp_write(s->card, MEMTYPE_INTERNAL_DATA,
993 s->adc_inst.data + rv[i].addr, rv[i].val);
994
995 /*
996 * put us in the lists if we're not already there
997 */
998
999 if(db->in_lists == 0) {
1000
1001 db->adc1_index = m3_add_list(s->card, &s->card->adc1_list,
1002 s->adc_inst.data >> DP_SHIFT_COUNT);
1003
1004 db->dma_index = m3_add_list(s->card, &s->card->dma_list,
1005 s->adc_inst.data >> DP_SHIFT_COUNT);
1006
1007 db->msrc_index = m3_add_list(s->card, &s->card->msrc_list,
1008 s->adc_inst.data >> DP_SHIFT_COUNT);
1009
1010 db->in_lists = 1;
1011 }
1012
1013 set_adc_rate(s,rate);
1014 start_adc(s);
1015}
1016/* --------------------------------------------------------------------- */
1017
1018static void set_dmaa(struct m3_state *s, unsigned int addr, unsigned int count)
1019{
1020 DPRINTK(DPINT,"set_dmaa??\n");
1021}
1022
1023static void set_dmac(struct m3_state *s, unsigned int addr, unsigned int count)
1024{
1025 DPRINTK(DPINT,"set_dmac??\n");
1026}
1027
1028static u32 get_dma_pos(struct m3_card *card,
1029 int instance_addr)
1030{
1031 u16 hi = 0, lo = 0;
1032 int retry = 10;
1033
1034 /*
1035 * try and get a valid answer
1036 */
1037 while(retry--) {
1038 hi = m3_assp_read(card, MEMTYPE_INTERNAL_DATA,
1039 instance_addr + CDATA_HOST_SRC_CURRENTH);
1040
1041 lo = m3_assp_read(card, MEMTYPE_INTERNAL_DATA,
1042 instance_addr + CDATA_HOST_SRC_CURRENTL);
1043
1044 if(hi == m3_assp_read(card, MEMTYPE_INTERNAL_DATA,
1045 instance_addr + CDATA_HOST_SRC_CURRENTH))
1046 break;
1047 }
1048 return lo | (hi<<16);
1049}
1050
1051static u32 get_dmaa(struct m3_state *s)
1052{
1053 u32 offset;
1054
1055 offset = get_dma_pos(s->card, s->dac_inst.data) -
1056 virt_to_bus(s->dma_dac.rawbuf);
1057
1058 DPRINTK(DPINT,"get_dmaa: 0x%08x\n",offset);
1059
1060 return offset;
1061}
1062
1063static u32 get_dmac(struct m3_state *s)
1064{
1065 u32 offset;
1066
1067 offset = get_dma_pos(s->card, s->adc_inst.data) -
1068 virt_to_bus(s->dma_adc.rawbuf);
1069
1070 DPRINTK(DPINT,"get_dmac: 0x%08x\n",offset);
1071
1072 return offset;
1073
1074}
1075
1076static int
1077prog_dmabuf(struct m3_state *s, unsigned rec)
1078{
1079 struct dmabuf *db = rec ? &s->dma_adc : &s->dma_dac;
1080 unsigned rate = rec ? s->rateadc : s->ratedac;
1081 unsigned bytepersec;
1082 unsigned bufs;
1083 unsigned char fmt;
1084 unsigned long flags;
1085
1086 spin_lock_irqsave(&s->card->lock, flags);
1087
1088 fmt = s->fmt;
1089 if (rec) {
1090 stop_adc(s);
1091 fmt >>= ESS_ADC_SHIFT;
1092 } else {
1093 stop_dac(s);
1094 fmt >>= ESS_DAC_SHIFT;
1095 }
1096 fmt &= ESS_FMT_MASK;
1097
1098 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
1099
1100 bytepersec = rate << sample_shift[fmt];
1101 bufs = PAGE_SIZE << db->buforder;
1102 if (db->ossfragshift) {
1103 if ((1000 << db->ossfragshift) < bytepersec)
1104 db->fragshift = ld2(bytepersec/1000);
1105 else
1106 db->fragshift = db->ossfragshift;
1107 } else {
1108 db->fragshift = ld2(bytepersec/100/(db->subdivision ? db->subdivision : 1));
1109 if (db->fragshift < 3)
1110 db->fragshift = 3;
1111 }
1112 db->numfrag = bufs >> db->fragshift;
1113 while (db->numfrag < 4 && db->fragshift > 3) {
1114 db->fragshift--;
1115 db->numfrag = bufs >> db->fragshift;
1116 }
1117 db->fragsize = 1 << db->fragshift;
1118 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
1119 db->numfrag = db->ossmaxfrags;
1120 db->fragsamples = db->fragsize >> sample_shift[fmt];
1121 db->dmasize = db->numfrag << db->fragshift;
1122
1123 DPRINTK(DPSTR,"prog_dmabuf: numfrag: %d fragsize: %d dmasize: %d\n",db->numfrag,db->fragsize,db->dmasize);
1124
1125 memset(db->rawbuf, (fmt & ESS_FMT_16BIT) ? 0 : 0x80, db->dmasize);
1126
1127 if (rec)
1128 m3_rec_setup(s, fmt, s->rateadc, db->rawbuf, db->dmasize);
1129 else
1130 m3_play_setup(s, fmt, s->ratedac, db->rawbuf, db->dmasize);
1131
1132 db->ready = 1;
1133
1134 spin_unlock_irqrestore(&s->card->lock, flags);
1135
1136 return 0;
1137}
1138
1139static void clear_advance(struct m3_state *s)
1140{
1141 unsigned char c = ((s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_16BIT) ? 0 : 0x80;
1142
1143 unsigned char *buf = s->dma_dac.rawbuf;
1144 unsigned bsize = s->dma_dac.dmasize;
1145 unsigned bptr = s->dma_dac.swptr;
1146 unsigned len = s->dma_dac.fragsize;
1147
1148 if (bptr + len > bsize) {
1149 unsigned x = bsize - bptr;
1150 memset(buf + bptr, c, x);
1151 /* account for wrapping? */
1152 bptr = 0;
1153 len -= x;
1154 }
1155 memset(buf + bptr, c, len);
1156}
1157
1158/* call with spinlock held! */
1159static void m3_update_ptr(struct m3_state *s)
1160{
1161 unsigned hwptr;
1162 int diff;
1163
1164 /* update ADC pointer */
1165 if (s->dma_adc.ready) {
1166 hwptr = get_dmac(s) % s->dma_adc.dmasize;
1167 diff = (s->dma_adc.dmasize + hwptr - s->dma_adc.hwptr) % s->dma_adc.dmasize;
1168 s->dma_adc.hwptr = hwptr;
1169 s->dma_adc.total_bytes += diff;
1170 s->dma_adc.count += diff;
1171 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1172 wake_up(&s->dma_adc.wait);
1173 if (!s->dma_adc.mapped) {
1174 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
1175 stop_adc(s);
1176 /* brute force everyone back in sync, sigh */
1177 s->dma_adc.count = 0;
1178 s->dma_adc.swptr = 0;
1179 s->dma_adc.hwptr = 0;
1180 s->dma_adc.error++;
1181 }
1182 }
1183 }
1184 /* update DAC pointer */
1185 if (s->dma_dac.ready) {
1186 hwptr = get_dmaa(s) % s->dma_dac.dmasize;
1187 diff = (s->dma_dac.dmasize + hwptr - s->dma_dac.hwptr) % s->dma_dac.dmasize;
1188
1189 DPRINTK(DPINT,"updating dac: hwptr: %6d diff: %6d count: %6d\n",
1190 hwptr,diff,s->dma_dac.count);
1191
1192 s->dma_dac.hwptr = hwptr;
1193 s->dma_dac.total_bytes += diff;
1194
1195 if (s->dma_dac.mapped) {
1196
1197 s->dma_dac.count += diff;
1198 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize) {
1199 wake_up(&s->dma_dac.wait);
1200 }
1201 } else {
1202
1203 s->dma_dac.count -= diff;
1204
1205 if (s->dma_dac.count <= 0) {
1206 DPRINTK(DPCRAP,"underflow! diff: %d (0x%x) count: %d (0x%x) hw: %d (0x%x) sw: %d (0x%x)\n",
1207 diff, diff,
1208 s->dma_dac.count,
1209 s->dma_dac.count,
1210 hwptr, hwptr,
1211 s->dma_dac.swptr,
1212 s->dma_dac.swptr);
1213 stop_dac(s);
1214 /* brute force everyone back in sync, sigh */
1215 s->dma_dac.count = 0;
1216 s->dma_dac.swptr = hwptr;
1217 s->dma_dac.error++;
1218 } else if (s->dma_dac.count <= (signed)s->dma_dac.fragsize && !s->dma_dac.endcleared) {
1219 clear_advance(s);
1220 s->dma_dac.endcleared = 1;
1221 }
1222 if (s->dma_dac.count + (signed)s->dma_dac.fragsize <= (signed)s->dma_dac.dmasize) {
1223 wake_up(&s->dma_dac.wait);
1224 DPRINTK(DPINT,"waking up DAC count: %d sw: %d hw: %d\n",
1225 s->dma_dac.count, s->dma_dac.swptr, hwptr);
1226 }
1227 }
1228 }
1229}
1230
1231static irqreturn_t m3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1232{
1233 struct m3_card *c = (struct m3_card *)dev_id;
1234 struct m3_state *s = &c->channels[0];
1235 u8 status;
1236
1237 status = inb(c->iobase+0x1A);
1238
1239 if(status == 0xff)
1240 return IRQ_NONE;
1241
1242 /* presumably acking the ints? */
1243 outw(status, c->iobase+0x1A);
1244
1245 if(c->in_suspend)
1246 return IRQ_HANDLED;
1247
1248 /*
1249 * ack an assp int if its running
1250 * and has an int pending
1251 */
1252 if( status & ASSP_INT_PENDING) {
1253 u8 ctl = inb(c->iobase + ASSP_CONTROL_B);
1254 if( !(ctl & STOP_ASSP_CLOCK)) {
1255 ctl = inb(c->iobase + ASSP_HOST_INT_STATUS );
1256 if(ctl & DSP2HOST_REQ_TIMER) {
1257 outb( DSP2HOST_REQ_TIMER, c->iobase + ASSP_HOST_INT_STATUS);
1258 /* update adc/dac info if it was a timer int */
1259 spin_lock(&c->lock);
1260 m3_update_ptr(s);
1261 spin_unlock(&c->lock);
1262 }
1263 }
1264 }
1265
1266 /* XXX is this needed? */
1267 if(status & 0x40)
1268 outb(0x40, c->iobase+0x1A);
1269 return IRQ_HANDLED;
1270}
1271
1272
1273/* --------------------------------------------------------------------- */
1274
1275static const char invalid_magic[] = KERN_CRIT PFX "invalid magic value in %s\n";
1276
1277#define VALIDATE_MAGIC(FOO,MAG) \
1278({ \
1279 if (!(FOO) || (FOO)->magic != MAG) { \
1280 printk(invalid_magic,__FUNCTION__); \
1281 return -ENXIO; \
1282 } \
1283})
1284
1285#define VALIDATE_STATE(a) VALIDATE_MAGIC(a,M3_STATE_MAGIC)
1286#define VALIDATE_CARD(a) VALIDATE_MAGIC(a,M3_CARD_MAGIC)
1287
1288/* --------------------------------------------------------------------- */
1289
1290static int drain_dac(struct m3_state *s, int nonblock)
1291{
1292 DECLARE_WAITQUEUE(wait,current);
1293 unsigned long flags;
1294 int count;
1295 signed long tmo;
1296
1297 if (s->dma_dac.mapped || !s->dma_dac.ready)
1298 return 0;
1299 set_current_state(TASK_INTERRUPTIBLE);
1300 add_wait_queue(&s->dma_dac.wait, &wait);
1301 for (;;) {
1302 spin_lock_irqsave(&s->card->lock, flags);
1303 count = s->dma_dac.count;
1304 spin_unlock_irqrestore(&s->card->lock, flags);
1305 if (count <= 0)
1306 break;
1307 if (signal_pending(current))
1308 break;
1309 if (nonblock) {
1310 remove_wait_queue(&s->dma_dac.wait, &wait);
1311 set_current_state(TASK_RUNNING);
1312 return -EBUSY;
1313 }
1314 tmo = (count * HZ) / s->ratedac;
1315 tmo >>= sample_shift[(s->fmt >> ESS_DAC_SHIFT) & ESS_FMT_MASK];
1316 /* XXX this is just broken. someone is waking us up alot, or schedule_timeout is broken.
1317 or something. who cares. - zach */
1318 if (!schedule_timeout(tmo ? tmo : 1) && tmo)
1319 DPRINTK(DPCRAP,"dma timed out?? %ld\n",jiffies);
1320 }
1321 remove_wait_queue(&s->dma_dac.wait, &wait);
1322 set_current_state(TASK_RUNNING);
1323 if (signal_pending(current))
1324 return -ERESTARTSYS;
1325 return 0;
1326}
1327
1328static ssize_t m3_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1329{
1330 struct m3_state *s = (struct m3_state *)file->private_data;
1331 ssize_t ret;
1332 unsigned long flags;
1333 unsigned swptr;
1334 int cnt;
1335
1336 VALIDATE_STATE(s);
1337 if (s->dma_adc.mapped)
1338 return -ENXIO;
1339 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
1340 return ret;
1341 if (!access_ok(VERIFY_WRITE, buffer, count))
1342 return -EFAULT;
1343 ret = 0;
1344
1345 spin_lock_irqsave(&s->card->lock, flags);
1346
1347 while (count > 0) {
1348 int timed_out;
1349
1350 swptr = s->dma_adc.swptr;
1351 cnt = s->dma_adc.dmasize-swptr;
1352 if (s->dma_adc.count < cnt)
1353 cnt = s->dma_adc.count;
1354
1355 if (cnt > count)
1356 cnt = count;
1357
1358 if (cnt <= 0) {
1359 start_adc(s);
1360 if (file->f_flags & O_NONBLOCK)
1361 {
1362 ret = ret ? ret : -EAGAIN;
1363 goto out;
1364 }
1365
1366 spin_unlock_irqrestore(&s->card->lock, flags);
1367 timed_out = interruptible_sleep_on_timeout(&s->dma_adc.wait, HZ) == 0;
1368 spin_lock_irqsave(&s->card->lock, flags);
1369
1370 if(timed_out) {
1371 printk("read: chip lockup? dmasz %u fragsz %u count %u hwptr %u swptr %u\n",
1372 s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count,
1373 s->dma_adc.hwptr, s->dma_adc.swptr);
1374 stop_adc(s);
1375 set_dmac(s, virt_to_bus(s->dma_adc.rawbuf), s->dma_adc.numfrag << s->dma_adc.fragshift);
1376 s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0;
1377 }
1378 if (signal_pending(current))
1379 {
1380 ret = ret ? ret : -ERESTARTSYS;
1381 goto out;
1382 }
1383 continue;
1384 }
1385
1386 spin_unlock_irqrestore(&s->card->lock, flags);
1387 if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
1388 ret = ret ? ret : -EFAULT;
1389 return ret;
1390 }
1391 spin_lock_irqsave(&s->card->lock, flags);
1392
1393 swptr = (swptr + cnt) % s->dma_adc.dmasize;
1394 s->dma_adc.swptr = swptr;
1395 s->dma_adc.count -= cnt;
1396 count -= cnt;
1397 buffer += cnt;
1398 ret += cnt;
1399 start_adc(s);
1400 }
1401
1402out:
1403 spin_unlock_irqrestore(&s->card->lock, flags);
1404 return ret;
1405}
1406
1407static ssize_t m3_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1408{
1409 struct m3_state *s = (struct m3_state *)file->private_data;
1410 ssize_t ret;
1411 unsigned long flags;
1412 unsigned swptr;
1413 int cnt;
1414
1415 VALIDATE_STATE(s);
1416 if (s->dma_dac.mapped)
1417 return -ENXIO;
1418 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
1419 return ret;
1420 if (!access_ok(VERIFY_READ, buffer, count))
1421 return -EFAULT;
1422 ret = 0;
1423
1424 spin_lock_irqsave(&s->card->lock, flags);
1425
1426 while (count > 0) {
1427 int timed_out;
1428
1429 if (s->dma_dac.count < 0) {
1430 s->dma_dac.count = 0;
1431 s->dma_dac.swptr = s->dma_dac.hwptr;
1432 }
1433 swptr = s->dma_dac.swptr;
1434
1435 cnt = s->dma_dac.dmasize-swptr;
1436
1437 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
1438 cnt = s->dma_dac.dmasize - s->dma_dac.count;
1439
1440
1441 if (cnt > count)
1442 cnt = count;
1443
1444 if (cnt <= 0) {
1445 start_dac(s);
1446 if (file->f_flags & O_NONBLOCK) {
1447 if(!ret) ret = -EAGAIN;
1448 goto out;
1449 }
1450 spin_unlock_irqrestore(&s->card->lock, flags);
1451 timed_out = interruptible_sleep_on_timeout(&s->dma_dac.wait, HZ) == 0;
1452 spin_lock_irqsave(&s->card->lock, flags);
1453 if(timed_out) {
1454 DPRINTK(DPCRAP,"write: chip lockup? dmasz %u fragsz %u count %u hwptr %u swptr %u\n",
1455 s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count,
1456 s->dma_dac.hwptr, s->dma_dac.swptr);
1457 stop_dac(s);
1458 set_dmaa(s, virt_to_bus(s->dma_dac.rawbuf), s->dma_dac.numfrag << s->dma_dac.fragshift);
1459 s->dma_dac.count = s->dma_dac.hwptr = s->dma_dac.swptr = 0;
1460 }
1461 if (signal_pending(current)) {
1462 if (!ret) ret = -ERESTARTSYS;
1463 goto out;
1464 }
1465 continue;
1466 }
1467 spin_unlock_irqrestore(&s->card->lock, flags);
1468 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) {
1469 if (!ret) ret = -EFAULT;
1470 return ret;
1471 }
1472 spin_lock_irqsave(&s->card->lock, flags);
1473
1474 DPRINTK(DPSYS,"wrote %6d bytes at sw: %6d cnt: %6d while hw: %6d\n",
1475 cnt, swptr, s->dma_dac.count, s->dma_dac.hwptr);
1476
1477 swptr = (swptr + cnt) % s->dma_dac.dmasize;
1478
1479 s->dma_dac.swptr = swptr;
1480 s->dma_dac.count += cnt;
1481 s->dma_dac.endcleared = 0;
1482 count -= cnt;
1483 buffer += cnt;
1484 ret += cnt;
1485 start_dac(s);
1486 }
1487out:
1488 spin_unlock_irqrestore(&s->card->lock, flags);
1489 return ret;
1490}
1491
1492static unsigned int m3_poll(struct file *file, struct poll_table_struct *wait)
1493{
1494 struct m3_state *s = (struct m3_state *)file->private_data;
1495 unsigned long flags;
1496 unsigned int mask = 0;
1497
1498 VALIDATE_STATE(s);
1499 if (file->f_mode & FMODE_WRITE)
1500 poll_wait(file, &s->dma_dac.wait, wait);
1501 if (file->f_mode & FMODE_READ)
1502 poll_wait(file, &s->dma_adc.wait, wait);
1503
1504 spin_lock_irqsave(&s->card->lock, flags);
1505 m3_update_ptr(s);
1506
1507 if (file->f_mode & FMODE_READ) {
1508 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1509 mask |= POLLIN | POLLRDNORM;
1510 }
1511 if (file->f_mode & FMODE_WRITE) {
1512 if (s->dma_dac.mapped) {
1513 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
1514 mask |= POLLOUT | POLLWRNORM;
1515 } else {
1516 if ((signed)s->dma_dac.dmasize >= s->dma_dac.count + (signed)s->dma_dac.fragsize)
1517 mask |= POLLOUT | POLLWRNORM;
1518 }
1519 }
1520
1521 spin_unlock_irqrestore(&s->card->lock, flags);
1522 return mask;
1523}
1524
1525static int m3_mmap(struct file *file, struct vm_area_struct *vma)
1526{
1527 struct m3_state *s = (struct m3_state *)file->private_data;
1528 unsigned long max_size, size, start, offset;
1529 struct dmabuf *db;
1530 int ret = -EINVAL;
1531
1532 VALIDATE_STATE(s);
1533 if (vma->vm_flags & VM_WRITE) {
1534 if ((ret = prog_dmabuf(s, 0)) != 0)
1535 return ret;
1536 db = &s->dma_dac;
1537 } else
1538 if (vma->vm_flags & VM_READ) {
1539 if ((ret = prog_dmabuf(s, 1)) != 0)
1540 return ret;
1541 db = &s->dma_adc;
1542 } else
1543 return -EINVAL;
1544
1545 max_size = db->dmasize;
1546
1547 start = vma->vm_start;
1548 offset = (vma->vm_pgoff << PAGE_SHIFT);
1549 size = vma->vm_end - vma->vm_start;
1550
1551 if(size > max_size)
1552 goto out;
1553 if(offset > max_size - size)
1554 goto out;
1555
1556 /*
1557 * this will be ->nopage() once I can
1558 * ask Jeff what the hell I'm doing wrong.
1559 */
1560 ret = -EAGAIN;
1561 if (remap_pfn_range(vma, vma->vm_start,
1562 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
1563 size, vma->vm_page_prot))
1564 goto out;
1565
1566 db->mapped = 1;
1567 ret = 0;
1568
1569out:
1570 return ret;
1571}
1572
1573/*
1574 * this function is a disaster..
1575 */
1576#define get_user_ret(x, ptr, ret) ({ if(get_user(x, ptr)) return ret; })
1577static int m3_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1578{
1579 struct m3_state *s = (struct m3_state *)file->private_data;
1580 struct m3_card *card=s->card;
1581 unsigned long flags;
1582 audio_buf_info abinfo;
1583 count_info cinfo;
1584 int val, mapped, ret;
1585 unsigned char fmtm, fmtd;
1586 void __user *argp = (void __user *)arg;
1587 int __user *p = argp;
1588
1589 VALIDATE_STATE(s);
1590
1591 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
1592 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
1593
1594 DPRINTK(DPSYS,"m3_ioctl: cmd %d\n", cmd);
1595
1596 switch (cmd) {
1597 case OSS_GETVERSION:
1598 return put_user(SOUND_VERSION, p);
1599
1600 case SNDCTL_DSP_SYNC:
1601 if (file->f_mode & FMODE_WRITE)
1602 return drain_dac(s, file->f_flags & O_NONBLOCK);
1603 return 0;
1604
1605 case SNDCTL_DSP_SETDUPLEX:
1606 /* XXX fix */
1607 return 0;
1608
1609 case SNDCTL_DSP_GETCAPS:
1610 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1611
1612 case SNDCTL_DSP_RESET:
1613 spin_lock_irqsave(&card->lock, flags);
1614 if (file->f_mode & FMODE_WRITE) {
1615 stop_dac(s);
1616 synchronize_irq(s->card->pcidev->irq);
1617 s->dma_dac.swptr = s->dma_dac.hwptr = s->dma_dac.count = s->dma_dac.total_bytes = 0;
1618 }
1619 if (file->f_mode & FMODE_READ) {
1620 stop_adc(s);
1621 synchronize_irq(s->card->pcidev->irq);
1622 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
1623 }
1624 spin_unlock_irqrestore(&card->lock, flags);
1625 return 0;
1626
1627 case SNDCTL_DSP_SPEED:
1628 get_user_ret(val, p, -EFAULT);
1629 spin_lock_irqsave(&card->lock, flags);
1630 if (val >= 0) {
1631 if (file->f_mode & FMODE_READ) {
1632 stop_adc(s);
1633 s->dma_adc.ready = 0;
1634 set_adc_rate(s, val);
1635 }
1636 if (file->f_mode & FMODE_WRITE) {
1637 stop_dac(s);
1638 s->dma_dac.ready = 0;
1639 set_dac_rate(s, val);
1640 }
1641 }
1642 spin_unlock_irqrestore(&card->lock, flags);
1643 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
1644
1645 case SNDCTL_DSP_STEREO:
1646 get_user_ret(val, p, -EFAULT);
1647 spin_lock_irqsave(&card->lock, flags);
1648 fmtd = 0;
1649 fmtm = ~0;
1650 if (file->f_mode & FMODE_READ) {
1651 stop_adc(s);
1652 s->dma_adc.ready = 0;
1653 if (val)
1654 fmtd |= ESS_FMT_STEREO << ESS_ADC_SHIFT;
1655 else
1656 fmtm &= ~(ESS_FMT_STEREO << ESS_ADC_SHIFT);
1657 }
1658 if (file->f_mode & FMODE_WRITE) {
1659 stop_dac(s);
1660 s->dma_dac.ready = 0;
1661 if (val)
1662 fmtd |= ESS_FMT_STEREO << ESS_DAC_SHIFT;
1663 else
1664 fmtm &= ~(ESS_FMT_STEREO << ESS_DAC_SHIFT);
1665 }
1666 set_fmt(s, fmtm, fmtd);
1667 spin_unlock_irqrestore(&card->lock, flags);
1668 return 0;
1669
1670 case SNDCTL_DSP_CHANNELS:
1671 get_user_ret(val, p, -EFAULT);
1672 spin_lock_irqsave(&card->lock, flags);
1673 if (val != 0) {
1674 fmtd = 0;
1675 fmtm = ~0;
1676 if (file->f_mode & FMODE_READ) {
1677 stop_adc(s);
1678 s->dma_adc.ready = 0;
1679 if (val >= 2)
1680 fmtd |= ESS_FMT_STEREO << ESS_ADC_SHIFT;
1681 else
1682 fmtm &= ~(ESS_FMT_STEREO << ESS_ADC_SHIFT);
1683 }
1684 if (file->f_mode & FMODE_WRITE) {
1685 stop_dac(s);
1686 s->dma_dac.ready = 0;
1687 if (val >= 2)
1688 fmtd |= ESS_FMT_STEREO << ESS_DAC_SHIFT;
1689 else
1690 fmtm &= ~(ESS_FMT_STEREO << ESS_DAC_SHIFT);
1691 }
1692 set_fmt(s, fmtm, fmtd);
1693 }
1694 spin_unlock_irqrestore(&card->lock, flags);
1695 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_STEREO << ESS_ADC_SHIFT)
1696 : (ESS_FMT_STEREO << ESS_DAC_SHIFT))) ? 2 : 1, p);
1697
1698 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1699 return put_user(AFMT_U8|AFMT_S16_LE, p);
1700
1701 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
1702 get_user_ret(val, p, -EFAULT);
1703 spin_lock_irqsave(&card->lock, flags);
1704 if (val != AFMT_QUERY) {
1705 fmtd = 0;
1706 fmtm = ~0;
1707 if (file->f_mode & FMODE_READ) {
1708 stop_adc(s);
1709 s->dma_adc.ready = 0;
1710 if (val == AFMT_S16_LE)
1711 fmtd |= ESS_FMT_16BIT << ESS_ADC_SHIFT;
1712 else
1713 fmtm &= ~(ESS_FMT_16BIT << ESS_ADC_SHIFT);
1714 }
1715 if (file->f_mode & FMODE_WRITE) {
1716 stop_dac(s);
1717 s->dma_dac.ready = 0;
1718 if (val == AFMT_S16_LE)
1719 fmtd |= ESS_FMT_16BIT << ESS_DAC_SHIFT;
1720 else
1721 fmtm &= ~(ESS_FMT_16BIT << ESS_DAC_SHIFT);
1722 }
1723 set_fmt(s, fmtm, fmtd);
1724 }
1725 spin_unlock_irqrestore(&card->lock, flags);
1726 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ?
1727 (ESS_FMT_16BIT << ESS_ADC_SHIFT)
1728 : (ESS_FMT_16BIT << ESS_DAC_SHIFT))) ?
1729 AFMT_S16_LE :
1730 AFMT_U8,
1731 p);
1732
1733 case SNDCTL_DSP_POST:
1734 return 0;
1735
1736 case SNDCTL_DSP_GETTRIGGER:
1737 val = 0;
1738 if ((file->f_mode & FMODE_READ) && (s->enable & ADC_RUNNING))
1739 val |= PCM_ENABLE_INPUT;
1740 if ((file->f_mode & FMODE_WRITE) && (s->enable & DAC_RUNNING))
1741 val |= PCM_ENABLE_OUTPUT;
1742 return put_user(val, p);
1743
1744 case SNDCTL_DSP_SETTRIGGER:
1745 get_user_ret(val, p, -EFAULT);
1746 if (file->f_mode & FMODE_READ) {
1747 if (val & PCM_ENABLE_INPUT) {
1748 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
1749 return ret;
1750 start_adc(s);
1751 } else
1752 stop_adc(s);
1753 }
1754 if (file->f_mode & FMODE_WRITE) {
1755 if (val & PCM_ENABLE_OUTPUT) {
1756 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
1757 return ret;
1758 start_dac(s);
1759 } else
1760 stop_dac(s);
1761 }
1762 return 0;
1763
1764 case SNDCTL_DSP_GETOSPACE:
1765 if (!(file->f_mode & FMODE_WRITE))
1766 return -EINVAL;
1767 if (!(s->enable & DAC_RUNNING) && (val = prog_dmabuf(s, 0)) != 0)
1768 return val;
1769 spin_lock_irqsave(&card->lock, flags);
1770 m3_update_ptr(s);
1771 abinfo.fragsize = s->dma_dac.fragsize;
1772 abinfo.bytes = s->dma_dac.dmasize - s->dma_dac.count;
1773 abinfo.fragstotal = s->dma_dac.numfrag;
1774 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
1775 spin_unlock_irqrestore(&card->lock, flags);
1776 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1777
1778 case SNDCTL_DSP_GETISPACE:
1779 if (!(file->f_mode & FMODE_READ))
1780 return -EINVAL;
1781 if (!(s->enable & ADC_RUNNING) && (val = prog_dmabuf(s, 1)) != 0)
1782 return val;
1783 spin_lock_irqsave(&card->lock, flags);
1784 m3_update_ptr(s);
1785 abinfo.fragsize = s->dma_adc.fragsize;
1786 abinfo.bytes = s->dma_adc.count;
1787 abinfo.fragstotal = s->dma_adc.numfrag;
1788 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
1789 spin_unlock_irqrestore(&card->lock, flags);
1790 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1791
1792 case SNDCTL_DSP_NONBLOCK:
1793 file->f_flags |= O_NONBLOCK;
1794 return 0;
1795
1796 case SNDCTL_DSP_GETODELAY:
1797 if (!(file->f_mode & FMODE_WRITE))
1798 return -EINVAL;
1799 spin_lock_irqsave(&card->lock, flags);
1800 m3_update_ptr(s);
1801 val = s->dma_dac.count;
1802 spin_unlock_irqrestore(&card->lock, flags);
1803 return put_user(val, p);
1804
1805 case SNDCTL_DSP_GETIPTR:
1806 if (!(file->f_mode & FMODE_READ))
1807 return -EINVAL;
1808 spin_lock_irqsave(&card->lock, flags);
1809 m3_update_ptr(s);
1810 cinfo.bytes = s->dma_adc.total_bytes;
1811 cinfo.blocks = s->dma_adc.count >> s->dma_adc.fragshift;
1812 cinfo.ptr = s->dma_adc.hwptr;
1813 if (s->dma_adc.mapped)
1814 s->dma_adc.count &= s->dma_adc.fragsize-1;
1815 spin_unlock_irqrestore(&card->lock, flags);
1816 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1817 return -EFAULT;
1818 return 0;
1819
1820 case SNDCTL_DSP_GETOPTR:
1821 if (!(file->f_mode & FMODE_WRITE))
1822 return -EINVAL;
1823 spin_lock_irqsave(&card->lock, flags);
1824 m3_update_ptr(s);
1825 cinfo.bytes = s->dma_dac.total_bytes;
1826 cinfo.blocks = s->dma_dac.count >> s->dma_dac.fragshift;
1827 cinfo.ptr = s->dma_dac.hwptr;
1828 if (s->dma_dac.mapped)
1829 s->dma_dac.count &= s->dma_dac.fragsize-1;
1830 spin_unlock_irqrestore(&card->lock, flags);
1831 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1832 return -EFAULT;
1833 return 0;
1834
1835 case SNDCTL_DSP_GETBLKSIZE:
1836 if (file->f_mode & FMODE_WRITE) {
1837 if ((val = prog_dmabuf(s, 0)))
1838 return val;
1839 return put_user(s->dma_dac.fragsize, p);
1840 }
1841 if ((val = prog_dmabuf(s, 1)))
1842 return val;
1843 return put_user(s->dma_adc.fragsize, p);
1844
1845 case SNDCTL_DSP_SETFRAGMENT:
1846 get_user_ret(val, p, -EFAULT);
1847 spin_lock_irqsave(&card->lock, flags);
1848 if (file->f_mode & FMODE_READ) {
1849 s->dma_adc.ossfragshift = val & 0xffff;
1850 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
1851 if (s->dma_adc.ossfragshift < 4)
1852 s->dma_adc.ossfragshift = 4;
1853 if (s->dma_adc.ossfragshift > 15)
1854 s->dma_adc.ossfragshift = 15;
1855 if (s->dma_adc.ossmaxfrags < 4)
1856 s->dma_adc.ossmaxfrags = 4;
1857 }
1858 if (file->f_mode & FMODE_WRITE) {
1859 s->dma_dac.ossfragshift = val & 0xffff;
1860 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
1861 if (s->dma_dac.ossfragshift < 4)
1862 s->dma_dac.ossfragshift = 4;
1863 if (s->dma_dac.ossfragshift > 15)
1864 s->dma_dac.ossfragshift = 15;
1865 if (s->dma_dac.ossmaxfrags < 4)
1866 s->dma_dac.ossmaxfrags = 4;
1867 }
1868 spin_unlock_irqrestore(&card->lock, flags);
1869 return 0;
1870
1871 case SNDCTL_DSP_SUBDIVIDE:
1872 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1873 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
1874 return -EINVAL;
1875 get_user_ret(val, p, -EFAULT);
1876 if (val != 1 && val != 2 && val != 4)
1877 return -EINVAL;
1878 if (file->f_mode & FMODE_READ)
1879 s->dma_adc.subdivision = val;
1880 if (file->f_mode & FMODE_WRITE)
1881 s->dma_dac.subdivision = val;
1882 return 0;
1883
1884 case SOUND_PCM_READ_RATE:
1885 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
1886
1887 case SOUND_PCM_READ_CHANNELS:
1888 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_STEREO << ESS_ADC_SHIFT)
1889 : (ESS_FMT_STEREO << ESS_DAC_SHIFT))) ? 2 : 1, p);
1890
1891 case SOUND_PCM_READ_BITS:
1892 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (ESS_FMT_16BIT << ESS_ADC_SHIFT)
1893 : (ESS_FMT_16BIT << ESS_DAC_SHIFT))) ? 16 : 8, p);
1894
1895 case SOUND_PCM_WRITE_FILTER:
1896 case SNDCTL_DSP_SETSYNCRO:
1897 case SOUND_PCM_READ_FILTER:
1898 return -EINVAL;
1899
1900 }
1901 return -EINVAL;
1902}
1903
1904static int
1905allocate_dmabuf(struct pci_dev *pci_dev, struct dmabuf *db)
1906{
1907 int order;
1908
1909 DPRINTK(DPSTR,"allocating for dmabuf %p\n", db);
1910
1911 /*
1912 * alloc as big a chunk as we can, start with
1913 * 64k 'cause we're insane. based on order cause
1914 * the amazingly complicated prog_dmabuf wants it.
1915 *
1916 * pci_alloc_sonsistent guarantees that it won't cross a natural
1917 * boundary; the m3 hardware can't have dma cross a 64k bus
1918 * address boundary.
1919 */
1920 for (order = 16-PAGE_SHIFT; order >= 1; order--) {
1921 db->rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
1922 &(db->handle));
1923 if(db->rawbuf)
1924 break;
1925 }
1926
1927 if (!db->rawbuf)
1928 return 1;
1929
1930 DPRINTK(DPSTR,"allocated %ld (%d) bytes at %p\n",
1931 PAGE_SIZE<<order, order, db->rawbuf);
1932
1933 {
1934 struct page *page, *pend;
1935
1936 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << order) - 1);
1937 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
1938 SetPageReserved(page);
1939 }
1940
1941
1942 db->buforder = order;
1943 db->ready = 0;
1944 db->mapped = 0;
1945
1946 return 0;
1947}
1948
1949static void
1950nuke_lists(struct m3_card *card, struct dmabuf *db)
1951{
1952 m3_remove_list(card, &(card->dma_list), db->dma_index);
1953 m3_remove_list(card, &(card->msrc_list), db->msrc_index);
1954 db->in_lists = 0;
1955}
1956
1957static void
1958free_dmabuf(struct pci_dev *pci_dev, struct dmabuf *db)
1959{
1960 if(db->rawbuf == NULL)
1961 return;
1962
1963 DPRINTK(DPSTR,"freeing %p from dmabuf %p\n",db->rawbuf, db);
1964
1965 {
1966 struct page *page, *pend;
1967 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
1968 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
1969 ClearPageReserved(page);
1970 }
1971
1972
1973 pci_free_consistent(pci_dev, PAGE_SIZE << db->buforder,
1974 db->rawbuf, db->handle);
1975
1976 db->rawbuf = NULL;
1977 db->buforder = 0;
1978 db->mapped = 0;
1979 db->ready = 0;
1980}
1981
1982static int m3_open(struct inode *inode, struct file *file)
1983{
1984 unsigned int minor = iminor(inode);
1985 struct m3_card *c;
1986 struct m3_state *s = NULL;
1987 int i;
1988 unsigned char fmtm = ~0, fmts = 0;
1989 unsigned long flags;
1990
1991 /*
1992 * Scan the cards and find the channel. We only
1993 * do this at open time so it is ok
1994 */
1995 for(c = devs ; c != NULL ; c = c->next) {
1996
1997 for(i=0;i<NR_DSPS;i++) {
1998
1999 if(c->channels[i].dev_audio < 0)
2000 continue;
2001 if((c->channels[i].dev_audio ^ minor) & ~0xf)
2002 continue;
2003
2004 s = &c->channels[i];
2005 break;
2006 }
2007 }
2008
2009 if (!s)
2010 return -ENODEV;
2011
2012 VALIDATE_STATE(s);
2013
2014 file->private_data = s;
2015
2016 /* wait for device to become free */
2017 mutex_lock(&s->open_mutex);
2018 while (s->open_mode & file->f_mode) {
2019 if (file->f_flags & O_NONBLOCK) {
2020 mutex_unlock(&s->open_mutex);
2021 return -EWOULDBLOCK;
2022 }
2023 mutex_unlock(&s->open_mutex);
2024 interruptible_sleep_on(&s->open_wait);
2025 if (signal_pending(current))
2026 return -ERESTARTSYS;
2027 mutex_lock(&s->open_mutex);
2028 }
2029
2030 spin_lock_irqsave(&c->lock, flags);
2031
2032 if (file->f_mode & FMODE_READ) {
2033 fmtm &= ~((ESS_FMT_STEREO | ESS_FMT_16BIT) << ESS_ADC_SHIFT);
2034 if ((minor & 0xf) == SND_DEV_DSP16)
2035 fmts |= ESS_FMT_16BIT << ESS_ADC_SHIFT;
2036
2037 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
2038 set_adc_rate(s, 8000);
2039 }
2040 if (file->f_mode & FMODE_WRITE) {
2041 fmtm &= ~((ESS_FMT_STEREO | ESS_FMT_16BIT) << ESS_DAC_SHIFT);
2042 if ((minor & 0xf) == SND_DEV_DSP16)
2043 fmts |= ESS_FMT_16BIT << ESS_DAC_SHIFT;
2044
2045 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
2046 set_dac_rate(s, 8000);
2047 }
2048 set_fmt(s, fmtm, fmts);
2049 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
2050
2051 mutex_unlock(&s->open_mutex);
2052 spin_unlock_irqrestore(&c->lock, flags);
2053 return nonseekable_open(inode, file);
2054}
2055
2056static int m3_release(struct inode *inode, struct file *file)
2057{
2058 struct m3_state *s = (struct m3_state *)file->private_data;
2059 struct m3_card *card=s->card;
2060 unsigned long flags;
2061
2062 VALIDATE_STATE(s);
2063 if (file->f_mode & FMODE_WRITE)
2064 drain_dac(s, file->f_flags & O_NONBLOCK);
2065
2066 mutex_lock(&s->open_mutex);
2067 spin_lock_irqsave(&card->lock, flags);
2068
2069 if (file->f_mode & FMODE_WRITE) {
2070 stop_dac(s);
2071 if(s->dma_dac.in_lists) {
2072 m3_remove_list(s->card, &(s->card->mixer_list), s->dma_dac.mixer_index);
2073 nuke_lists(s->card, &(s->dma_dac));
2074 }
2075 }
2076 if (file->f_mode & FMODE_READ) {
2077 stop_adc(s);
2078 if(s->dma_adc.in_lists) {
2079 m3_remove_list(s->card, &(s->card->adc1_list), s->dma_adc.adc1_index);
2080 nuke_lists(s->card, &(s->dma_adc));
2081 }
2082 }
2083
2084 s->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
2085
2086 spin_unlock_irqrestore(&card->lock, flags);
2087 mutex_unlock(&s->open_mutex);
2088 wake_up(&s->open_wait);
2089
2090 return 0;
2091}
2092
2093/*
2094 * Wait for the ac97 serial bus to be free.
2095 * return nonzero if the bus is still busy.
2096 */
2097static int m3_ac97_wait(struct m3_card *card)
2098{
2099 int i = 10000;
2100
2101 while( (m3_inb(card, 0x30) & 1) && i--) ;
2102
2103 return i == 0;
2104}
2105
2106static u16 m3_ac97_read(struct ac97_codec *codec, u8 reg)
2107{
2108 u16 ret = 0;
2109 struct m3_card *card = codec->private_data;
2110
2111 spin_lock(&card->ac97_lock);
2112
2113 if(m3_ac97_wait(card)) {
2114 printk(KERN_ERR PFX "serial bus busy reading reg 0x%x\n",reg);
2115 goto out;
2116 }
2117
2118 m3_outb(card, 0x80 | (reg & 0x7f), 0x30);
2119
2120 if(m3_ac97_wait(card)) {
2121 printk(KERN_ERR PFX "serial bus busy finishing read reg 0x%x\n",reg);
2122 goto out;
2123 }
2124
2125 ret = m3_inw(card, 0x32);
2126 DPRINTK(DPCRAP,"reading 0x%04x from 0x%02x\n",ret, reg);
2127
2128out:
2129 spin_unlock(&card->ac97_lock);
2130 return ret;
2131}
2132
2133static void m3_ac97_write(struct ac97_codec *codec, u8 reg, u16 val)
2134{
2135 struct m3_card *card = codec->private_data;
2136
2137 spin_lock(&card->ac97_lock);
2138
2139 if(m3_ac97_wait(card)) {
2140 printk(KERN_ERR PFX "serial bus busy writing 0x%x to 0x%x\n",val, reg);
2141 goto out;
2142 }
2143 DPRINTK(DPCRAP,"writing 0x%04x to 0x%02x\n", val, reg);
2144
2145 m3_outw(card, val, 0x32);
2146 m3_outb(card, reg & 0x7f, 0x30);
2147out:
2148 spin_unlock(&card->ac97_lock);
2149}
2150/* OSS /dev/mixer file operation methods */
2151static int m3_open_mixdev(struct inode *inode, struct file *file)
2152{
2153 unsigned int minor = iminor(inode);
2154 struct m3_card *card = devs;
2155
2156 for (card = devs; card != NULL; card = card->next) {
2157 if((card->ac97 != NULL) && (card->ac97->dev_mixer == minor))
2158 break;
2159 }
2160
2161 if (!card) {
2162 return -ENODEV;
2163 }
2164
2165 file->private_data = card->ac97;
2166
2167 return nonseekable_open(inode, file);
2168}
2169
2170static int m3_release_mixdev(struct inode *inode, struct file *file)
2171{
2172 return 0;
2173}
2174
2175static int m3_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
2176 unsigned long arg)
2177{
2178 struct ac97_codec *codec = (struct ac97_codec *)file->private_data;
2179
2180 return codec->mixer_ioctl(codec, cmd, arg);
2181}
2182
2183static struct file_operations m3_mixer_fops = {
2184 .owner = THIS_MODULE,
2185 .llseek = no_llseek,
2186 .ioctl = m3_ioctl_mixdev,
2187 .open = m3_open_mixdev,
2188 .release = m3_release_mixdev,
2189};
2190
2191static void remote_codec_config(int io, int isremote)
2192{
2193 isremote = isremote ? 1 : 0;
2194
2195 outw( (inw(io + RING_BUS_CTRL_B) & ~SECOND_CODEC_ID_MASK) | isremote,
2196 io + RING_BUS_CTRL_B);
2197 outw( (inw(io + SDO_OUT_DEST_CTRL) & ~COMMAND_ADDR_OUT) | isremote,
2198 io + SDO_OUT_DEST_CTRL);
2199 outw( (inw(io + SDO_IN_DEST_CTRL) & ~STATUS_ADDR_IN) | isremote,
2200 io + SDO_IN_DEST_CTRL);
2201}
2202
2203/*
2204 * hack, returns non zero on err
2205 */
2206static int try_read_vendor(struct m3_card *card)
2207{
2208 u16 ret;
2209
2210 if(m3_ac97_wait(card))
2211 return 1;
2212
2213 m3_outb(card, 0x80 | (AC97_VENDOR_ID1 & 0x7f), 0x30);
2214
2215 if(m3_ac97_wait(card))
2216 return 1;
2217
2218 ret = m3_inw(card, 0x32);
2219
2220 return (ret == 0) || (ret == 0xffff);
2221}
2222
2223static void m3_codec_reset(struct m3_card *card, int busywait)
2224{
2225 u16 dir;
2226 int delay1 = 0, delay2 = 0, i;
2227 int io = card->iobase;
2228
2229 switch (card->card_type) {
2230 /*
2231 * the onboard codec on the allegro seems
2232 * to want to wait a very long time before
2233 * coming back to life
2234 */
2235 case ESS_ALLEGRO:
2236 delay1 = 50;
2237 delay2 = 800;
2238 break;
2239 case ESS_MAESTRO3:
2240 case ESS_MAESTRO3HW:
2241 delay1 = 20;
2242 delay2 = 500;
2243 break;
2244 }
2245
2246 for(i = 0; i < 5; i ++) {
2247 dir = inw(io + GPIO_DIRECTION);
2248 dir |= 0x10; /* assuming pci bus master? */
2249
2250 remote_codec_config(io, 0);
2251
2252 outw(IO_SRAM_ENABLE, io + RING_BUS_CTRL_A);
2253 udelay(20);
2254
2255 outw(dir & ~GPO_PRIMARY_AC97 , io + GPIO_DIRECTION);
2256 outw(~GPO_PRIMARY_AC97 , io + GPIO_MASK);
2257 outw(0, io + GPIO_DATA);
2258 outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
2259
2260 if(busywait) {
2261 mdelay(delay1);
2262 } else {
2263 set_current_state(TASK_UNINTERRUPTIBLE);
2264 schedule_timeout((delay1 * HZ) / 1000);
2265 }
2266
2267 outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
2268 udelay(5);
2269 /* ok, bring back the ac-link */
2270 outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
2271 outw(~0, io + GPIO_MASK);
2272
2273 if(busywait) {
2274 mdelay(delay2);
2275 } else {
2276 set_current_state(TASK_UNINTERRUPTIBLE);
2277 schedule_timeout((delay2 * HZ) / 1000);
2278 }
2279 if(! try_read_vendor(card))
2280 break;
2281
2282 delay1 += 10;
2283 delay2 += 100;
2284
2285 DPRINTK(DPMOD, "retrying codec reset with delays of %d and %d ms\n",
2286 delay1, delay2);
2287 }
2288
2289#if 0
2290 /* more gung-ho reset that doesn't
2291 * seem to work anywhere :)
2292 */
2293 tmp = inw(io + RING_BUS_CTRL_A);
2294 outw(RAC_SDFS_ENABLE|LAC_SDFS_ENABLE, io + RING_BUS_CTRL_A);
2295 mdelay(20);
2296 outw(tmp, io + RING_BUS_CTRL_A);
2297 mdelay(50);
2298#endif
2299}
2300
2301static int __devinit m3_codec_install(struct m3_card *card)
2302{
2303 struct ac97_codec *codec;
2304
2305 if ((codec = ac97_alloc_codec()) == NULL)
2306 return -ENOMEM;
2307
2308 codec->private_data = card;
2309 codec->codec_read = m3_ac97_read;
2310 codec->codec_write = m3_ac97_write;
2311 /* someday we should support secondary codecs.. */
2312 codec->id = 0;
2313
2314 if (ac97_probe_codec(codec) == 0) {
2315 printk(KERN_ERR PFX "codec probe failed\n");
2316 ac97_release_codec(codec);
2317 return -1;
2318 }
2319
2320 if ((codec->dev_mixer = register_sound_mixer(&m3_mixer_fops, -1)) < 0) {
2321 printk(KERN_ERR PFX "couldn't register mixer!\n");
2322 ac97_release_codec(codec);
2323 return -1;
2324 }
2325
2326 card->ac97 = codec;
2327
2328 return 0;
2329}
2330
2331
2332#define MINISRC_LPF_LEN 10
2333static u16 minisrc_lpf[MINISRC_LPF_LEN] = {
2334 0X0743, 0X1104, 0X0A4C, 0XF88D, 0X242C,
2335 0X1023, 0X1AA9, 0X0B60, 0XEFDD, 0X186F
2336};
2337static void m3_assp_init(struct m3_card *card)
2338{
2339 int i;
2340
2341 /* zero kernel data */
2342 for(i = 0 ; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++)
2343 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2344 KDATA_BASE_ADDR + i, 0);
2345
2346 /* zero mixer data? */
2347 for(i = 0 ; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++)
2348 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2349 KDATA_BASE_ADDR2 + i, 0);
2350
2351 /* init dma pointer */
2352 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2353 KDATA_CURRENT_DMA,
2354 KDATA_DMA_XFER0);
2355
2356 /* write kernel into code memory.. */
2357 for(i = 0 ; i < sizeof(assp_kernel_image) / 2; i++) {
2358 m3_assp_write(card, MEMTYPE_INTERNAL_CODE,
2359 REV_B_CODE_MEMORY_BEGIN + i,
2360 assp_kernel_image[i]);
2361 }
2362
2363 /*
2364 * We only have this one client and we know that 0x400
2365 * is free in our kernel's mem map, so lets just
2366 * drop it there. It seems that the minisrc doesn't
2367 * need vectors, so we won't bother with them..
2368 */
2369 for(i = 0 ; i < sizeof(assp_minisrc_image) / 2; i++) {
2370 m3_assp_write(card, MEMTYPE_INTERNAL_CODE,
2371 0x400 + i,
2372 assp_minisrc_image[i]);
2373 }
2374
2375 /*
2376 * write the coefficients for the low pass filter?
2377 */
2378 for(i = 0; i < MINISRC_LPF_LEN ; i++) {
2379 m3_assp_write(card, MEMTYPE_INTERNAL_CODE,
2380 0x400 + MINISRC_COEF_LOC + i,
2381 minisrc_lpf[i]);
2382 }
2383
2384 m3_assp_write(card, MEMTYPE_INTERNAL_CODE,
2385 0x400 + MINISRC_COEF_LOC + MINISRC_LPF_LEN,
2386 0x8000);
2387
2388 /*
2389 * the minisrc is the only thing on
2390 * our task list..
2391 */
2392 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2393 KDATA_TASK0,
2394 0x400);
2395
2396 /*
2397 * init the mixer number..
2398 */
2399
2400 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2401 KDATA_MIXER_TASK_NUMBER,0);
2402
2403 /*
2404 * EXTREME KERNEL MASTER VOLUME
2405 */
2406 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2407 KDATA_DAC_LEFT_VOLUME, ARB_VOLUME);
2408 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2409 KDATA_DAC_RIGHT_VOLUME, ARB_VOLUME);
2410
2411 card->mixer_list.mem_addr = KDATA_MIXER_XFER0;
2412 card->mixer_list.max = MAX_VIRTUAL_MIXER_CHANNELS;
2413 card->adc1_list.mem_addr = KDATA_ADC1_XFER0;
2414 card->adc1_list.max = MAX_VIRTUAL_ADC1_CHANNELS;
2415 card->dma_list.mem_addr = KDATA_DMA_XFER0;
2416 card->dma_list.max = MAX_VIRTUAL_DMA_CHANNELS;
2417 card->msrc_list.mem_addr = KDATA_INSTANCE0_MINISRC;
2418 card->msrc_list.max = MAX_INSTANCE_MINISRC;
2419}
2420
2421static int setup_msrc(struct m3_card *card,
2422 struct assp_instance *inst, int index)
2423{
2424 int data_bytes = 2 * ( MINISRC_TMP_BUFFER_SIZE / 2 +
2425 MINISRC_IN_BUFFER_SIZE / 2 +
2426 1 + MINISRC_OUT_BUFFER_SIZE / 2 + 1 );
2427 int address, i;
2428
2429 /*
2430 * the revb memory map has 0x1100 through 0x1c00
2431 * free.
2432 */
2433
2434 /*
2435 * align instance address to 256 bytes so that it's
2436 * shifted list address is aligned.
2437 * list address = (mem address >> 1) >> 7;
2438 */
2439 data_bytes = (data_bytes + 255) & ~255;
2440 address = 0x1100 + ((data_bytes/2) * index);
2441
2442 if((address + (data_bytes/2)) >= 0x1c00) {
2443 printk(KERN_ERR PFX "no memory for %d bytes at ind %d (addr 0x%x)\n",
2444 data_bytes, index, address);
2445 return -1;
2446 }
2447
2448 for(i = 0; i < data_bytes/2 ; i++)
2449 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2450 address + i, 0);
2451
2452 inst->code = 0x400;
2453 inst->data = address;
2454
2455 return 0;
2456}
2457
2458static int m3_assp_client_init(struct m3_state *s)
2459{
2460 setup_msrc(s->card, &(s->dac_inst), s->index * 2);
2461 setup_msrc(s->card, &(s->adc_inst), (s->index * 2) + 1);
2462
2463 return 0;
2464}
2465
2466static void m3_amp_enable(struct m3_card *card, int enable)
2467{
2468 /*
2469 * this works for the reference board, have to find
2470 * out about others
2471 *
2472 * this needs more magic for 4 speaker, but..
2473 */
2474 int io = card->iobase;
2475 u16 gpo, polarity_port, polarity;
2476
2477 if(!external_amp)
2478 return;
2479
2480 if (gpio_pin >= 0 && gpio_pin <= 15) {
2481 polarity_port = 0x1000 + (0x100 * gpio_pin);
2482 } else {
2483 switch (card->card_type) {
2484 case ESS_ALLEGRO:
2485 polarity_port = 0x1800;
2486 break;
2487 default:
2488 polarity_port = 0x1100;
2489 /* Panasonic toughbook CF72 has to be different... */
2490 if(card->pcidev->subsystem_vendor == 0x10F7 && card->pcidev->subsystem_device == 0x833D)
2491 polarity_port = 0x1D00;
2492 break;
2493 }
2494 }
2495
2496 gpo = (polarity_port >> 8) & 0x0F;
2497 polarity = polarity_port >> 12;
2498 if ( enable )
2499 polarity = !polarity;
2500 polarity = polarity << gpo;
2501 gpo = 1 << gpo;
2502
2503 outw(~gpo , io + GPIO_MASK);
2504
2505 outw( inw(io + GPIO_DIRECTION) | gpo ,
2506 io + GPIO_DIRECTION);
2507
2508 outw( (GPO_SECONDARY_AC97 | GPO_PRIMARY_AC97 | polarity) ,
2509 io + GPIO_DATA);
2510
2511 outw(0xffff , io + GPIO_MASK);
2512}
2513
2514static int
2515maestro_config(struct m3_card *card)
2516{
2517 struct pci_dev *pcidev = card->pcidev;
2518 u32 n;
2519 u8 t; /* makes as much sense as 'n', no? */
2520
2521 pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n);
2522 n &= REDUCED_DEBOUNCE;
2523 n |= PM_CTRL_ENABLE | CLK_DIV_BY_49 | USE_PCI_TIMING;
2524 pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n);
2525
2526 outb(RESET_ASSP, card->iobase + ASSP_CONTROL_B);
2527 pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n);
2528 n &= ~INT_CLK_SELECT;
2529 if(card->card_type >= ESS_MAESTRO3) {
2530 n &= ~INT_CLK_MULT_ENABLE;
2531 n |= INT_CLK_SRC_NOT_PCI;
2532 }
2533 n &= ~( CLK_MULT_MODE_SELECT | CLK_MULT_MODE_SELECT_2 );
2534 pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n);
2535
2536 if(card->card_type <= ESS_ALLEGRO) {
2537 pci_read_config_dword(pcidev, PCI_USER_CONFIG, &n);
2538 n |= IN_CLK_12MHZ_SELECT;
2539 pci_write_config_dword(pcidev, PCI_USER_CONFIG, n);
2540 }
2541
2542 t = inb(card->iobase + ASSP_CONTROL_A);
2543 t &= ~( DSP_CLK_36MHZ_SELECT | ASSP_CLK_49MHZ_SELECT);
2544 t |= ASSP_CLK_49MHZ_SELECT;
2545 t |= ASSP_0_WS_ENABLE;
2546 outb(t, card->iobase + ASSP_CONTROL_A);
2547
2548 outb(RUN_ASSP, card->iobase + ASSP_CONTROL_B);
2549
2550 return 0;
2551}
2552
2553static void m3_enable_ints(struct m3_card *card)
2554{
2555 unsigned long io = card->iobase;
2556
2557 outw(ASSP_INT_ENABLE, io + HOST_INT_CTRL);
2558 outb(inb(io + ASSP_CONTROL_C) | ASSP_HOST_INT_ENABLE,
2559 io + ASSP_CONTROL_C);
2560}
2561
2562static struct file_operations m3_audio_fops = {
2563 .owner = THIS_MODULE,
2564 .llseek = no_llseek,
2565 .read = m3_read,
2566 .write = m3_write,
2567 .poll = m3_poll,
2568 .ioctl = m3_ioctl,
2569 .mmap = m3_mmap,
2570 .open = m3_open,
2571 .release = m3_release,
2572};
2573
2574#ifdef CONFIG_PM
2575static int alloc_dsp_suspendmem(struct m3_card *card)
2576{
2577 int len = sizeof(u16) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH);
2578
2579 if( (card->suspend_mem = vmalloc(len)) == NULL)
2580 return 1;
2581
2582 return 0;
2583}
2584
2585#else
2586#define alloc_dsp_suspendmem(args...) 0
2587#endif
2588
2589/*
2590 * great day! this function is ugly as hell.
2591 */
2592static int __devinit m3_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
2593{
2594 u32 n;
2595 int i;
2596 struct m3_card *card = NULL;
2597 int ret = 0;
2598 int card_type = pci_id->driver_data;
2599
2600 DPRINTK(DPMOD, "in maestro_install\n");
2601
2602 if (pci_enable_device(pci_dev))
2603 return -EIO;
2604
2605 if (pci_set_dma_mask(pci_dev, M3_PCI_DMA_MASK)) {
2606 printk(KERN_ERR PFX "architecture does not support limiting to 28bit PCI bus addresses\n");
2607 return -ENODEV;
2608 }
2609
2610 pci_set_master(pci_dev);
2611
2612 if( (card = kmalloc(sizeof(struct m3_card), GFP_KERNEL)) == NULL) {
2613 printk(KERN_WARNING PFX "out of memory\n");
2614 return -ENOMEM;
2615 }
2616 memset(card, 0, sizeof(struct m3_card));
2617 card->pcidev = pci_dev;
2618 init_waitqueue_head(&card->suspend_queue);
2619
2620 if ( ! request_region(pci_resource_start(pci_dev, 0),
2621 pci_resource_len (pci_dev, 0), M3_MODULE_NAME)) {
2622
2623 printk(KERN_WARNING PFX "unable to reserve I/O space.\n");
2624 ret = -EBUSY;
2625 goto out;
2626 }
2627
2628 card->iobase = pci_resource_start(pci_dev, 0);
2629
2630 if(alloc_dsp_suspendmem(card)) {
2631 printk(KERN_WARNING PFX "couldn't alloc %d bytes for saving dsp state on suspend\n",
2632 REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH);
2633 ret = -ENOMEM;
2634 goto out;
2635 }
2636
2637 card->card_type = card_type;
2638 card->irq = pci_dev->irq;
2639 card->next = devs;
2640 card->magic = M3_CARD_MAGIC;
2641 spin_lock_init(&card->lock);
2642 spin_lock_init(&card->ac97_lock);
2643 devs = card;
2644 for(i = 0; i<NR_DSPS; i++) {
2645 struct m3_state *s = &(card->channels[i]);
2646 s->dev_audio = -1;
2647 }
2648
2649 printk(KERN_INFO PFX "Configuring ESS %s found at IO 0x%04X IRQ %d\n",
2650 card_names[card->card_type], card->iobase, card->irq);
2651
2652 pci_read_config_dword(pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &n);
2653 printk(KERN_INFO PFX " subvendor id: 0x%08x\n",n);
2654
2655 maestro_config(card);
2656 m3_assp_halt(card);
2657
2658 m3_codec_reset(card, 0);
2659
2660 if(m3_codec_install(card)) {
2661 ret = -EIO;
2662 goto out;
2663 }
2664
2665 m3_assp_init(card);
2666 m3_amp_enable(card, 1);
2667
2668 for(i=0;i<NR_DSPS;i++) {
2669 struct m3_state *s=&card->channels[i];
2670
2671 s->index = i;
2672
2673 s->card = card;
2674 init_waitqueue_head(&s->dma_adc.wait);
2675 init_waitqueue_head(&s->dma_dac.wait);
2676 init_waitqueue_head(&s->open_wait);
2677 mutex_init(&(s->open_mutex));
2678 s->magic = M3_STATE_MAGIC;
2679
2680 m3_assp_client_init(s);
2681
2682 if(s->dma_adc.ready || s->dma_dac.ready || s->dma_adc.rawbuf)
2683 printk(KERN_WARNING PFX "initing a dsp device that is already in use?\n");
2684 /* register devices */
2685 if ((s->dev_audio = register_sound_dsp(&m3_audio_fops, -1)) < 0) {
2686 break;
2687 }
2688
2689 if( allocate_dmabuf(card->pcidev, &(s->dma_adc)) ||
2690 allocate_dmabuf(card->pcidev, &(s->dma_dac))) {
2691 ret = -ENOMEM;
2692 goto out;
2693 }
2694 }
2695
2696 if(request_irq(card->irq, m3_interrupt, IRQF_SHARED, card_names[card->card_type], card)) {
2697
2698 printk(KERN_ERR PFX "unable to allocate irq %d,\n", card->irq);
2699
2700 ret = -EIO;
2701 goto out;
2702 }
2703
2704 pci_set_drvdata(pci_dev, card);
2705
2706 m3_enable_ints(card);
2707 m3_assp_continue(card);
2708
2709out:
2710 if(ret) {
2711 if(card->iobase)
2712 release_region(pci_resource_start(pci_dev, 0), pci_resource_len(pci_dev, 0));
2713 vfree(card->suspend_mem);
2714 if(card->ac97) {
2715 unregister_sound_mixer(card->ac97->dev_mixer);
2716 kfree(card->ac97);
2717 }
2718 for(i=0;i<NR_DSPS;i++)
2719 {
2720 struct m3_state *s = &card->channels[i];
2721 if(s->dev_audio != -1)
2722 unregister_sound_dsp(s->dev_audio);
2723 }
2724 kfree(card);
2725 }
2726
2727 return ret;
2728}
2729
2730static void m3_remove(struct pci_dev *pci_dev)
2731{
2732 struct m3_card *card;
2733
2734 unregister_reboot_notifier(&m3_reboot_nb);
2735
2736 while ((card = devs)) {
2737 int i;
2738 devs = devs->next;
2739
2740 free_irq(card->irq, card);
2741 unregister_sound_mixer(card->ac97->dev_mixer);
2742 kfree(card->ac97);
2743
2744 for(i=0;i<NR_DSPS;i++)
2745 {
2746 struct m3_state *s = &card->channels[i];
2747 if(s->dev_audio < 0)
2748 continue;
2749
2750 unregister_sound_dsp(s->dev_audio);
2751 free_dmabuf(card->pcidev, &s->dma_adc);
2752 free_dmabuf(card->pcidev, &s->dma_dac);
2753 }
2754
2755 release_region(card->iobase, 256);
2756 vfree(card->suspend_mem);
2757 kfree(card);
2758 }
2759 devs = NULL;
2760}
2761
2762/*
2763 * some bioses like the sound chip to be powered down
2764 * at shutdown. We're just calling _suspend to
2765 * achieve that..
2766 */
2767static int m3_notifier(struct notifier_block *nb, unsigned long event, void *buf)
2768{
2769 struct m3_card *card;
2770
2771 DPRINTK(DPMOD, "notifier suspending all cards\n");
2772
2773 for(card = devs; card != NULL; card = card->next) {
2774 if(!card->in_suspend)
2775 m3_suspend(card->pcidev, PMSG_SUSPEND); /* XXX legal? */
2776 }
2777 return 0;
2778}
2779
2780static int m3_suspend(struct pci_dev *pci_dev, pm_message_t state)
2781{
2782 unsigned long flags;
2783 int i;
2784 struct m3_card *card = pci_get_drvdata(pci_dev);
2785
2786 /* must be a better way.. */
2787 spin_lock_irqsave(&card->lock, flags);
2788
2789 DPRINTK(DPMOD, "pm in dev %p\n",card);
2790
2791 for(i=0;i<NR_DSPS;i++) {
2792 struct m3_state *s = &card->channels[i];
2793
2794 if(s->dev_audio == -1)
2795 continue;
2796
2797 DPRINTK(DPMOD, "stop_adc/dac() device %d\n",i);
2798 stop_dac(s);
2799 stop_adc(s);
2800 }
2801
2802 mdelay(10); /* give the assp a chance to idle.. */
2803
2804 m3_assp_halt(card);
2805
2806 if(card->suspend_mem) {
2807 int index = 0;
2808
2809 DPRINTK(DPMOD, "saving code\n");
2810 for(i = REV_B_CODE_MEMORY_BEGIN ; i <= REV_B_CODE_MEMORY_END; i++)
2811 card->suspend_mem[index++] =
2812 m3_assp_read(card, MEMTYPE_INTERNAL_CODE, i);
2813 DPRINTK(DPMOD, "saving data\n");
2814 for(i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++)
2815 card->suspend_mem[index++] =
2816 m3_assp_read(card, MEMTYPE_INTERNAL_DATA, i);
2817 }
2818
2819 DPRINTK(DPMOD, "powering down apci regs\n");
2820 m3_outw(card, 0xffff, 0x54);
2821 m3_outw(card, 0xffff, 0x56);
2822
2823 card->in_suspend = 1;
2824
2825 spin_unlock_irqrestore(&card->lock, flags);
2826
2827 return 0;
2828}
2829
2830static int m3_resume(struct pci_dev *pci_dev)
2831{
2832 unsigned long flags;
2833 int index;
2834 int i;
2835 struct m3_card *card = pci_get_drvdata(pci_dev);
2836
2837 spin_lock_irqsave(&card->lock, flags);
2838 card->in_suspend = 0;
2839
2840 DPRINTK(DPMOD, "resuming\n");
2841
2842 /* first lets just bring everything back. .*/
2843
2844 DPRINTK(DPMOD, "bringing power back on card 0x%p\n",card);
2845 m3_outw(card, 0, 0x54);
2846 m3_outw(card, 0, 0x56);
2847
2848 DPRINTK(DPMOD, "restoring pci configs and reseting codec\n");
2849 maestro_config(card);
2850 m3_assp_halt(card);
2851 m3_codec_reset(card, 1);
2852
2853 DPRINTK(DPMOD, "restoring dsp code card\n");
2854 index = 0;
2855 for(i = REV_B_CODE_MEMORY_BEGIN ; i <= REV_B_CODE_MEMORY_END; i++)
2856 m3_assp_write(card, MEMTYPE_INTERNAL_CODE, i,
2857 card->suspend_mem[index++]);
2858 for(i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++)
2859 m3_assp_write(card, MEMTYPE_INTERNAL_DATA, i,
2860 card->suspend_mem[index++]);
2861
2862 /* tell the dma engine to restart itself */
2863 m3_assp_write(card, MEMTYPE_INTERNAL_DATA,
2864 KDATA_DMA_ACTIVE, 0);
2865
2866 DPRINTK(DPMOD, "resuming dsp\n");
2867 m3_assp_continue(card);
2868
2869 DPRINTK(DPMOD, "enabling ints\n");
2870 m3_enable_ints(card);
2871
2872 /* bring back the old school flavor */
2873 for(i = 0; i < SOUND_MIXER_NRDEVICES ; i++) {
2874 int state = card->ac97->mixer_state[i];
2875 if (!supported_mixer(card->ac97, i))
2876 continue;
2877
2878 card->ac97->write_mixer(card->ac97, i,
2879 state & 0xff, (state >> 8) & 0xff);
2880 }
2881
2882 m3_amp_enable(card, 1);
2883
2884 /*
2885 * now we flip on the music
2886 */
2887 for(i=0;i<NR_DSPS;i++) {
2888 struct m3_state *s = &card->channels[i];
2889 if(s->dev_audio == -1)
2890 continue;
2891 /*
2892 * db->ready makes it so these guys can be
2893 * called unconditionally..
2894 */
2895 DPRINTK(DPMOD, "turning on dacs ind %d\n",i);
2896 start_dac(s);
2897 start_adc(s);
2898 }
2899
2900 spin_unlock_irqrestore(&card->lock, flags);
2901
2902 /*
2903 * all right, we think things are ready,
2904 * wake up people who were using the device
2905 * when we suspended
2906 */
2907 wake_up(&card->suspend_queue);
2908
2909 return 0;
2910}
2911
2912MODULE_AUTHOR("Zach Brown <zab@zabbo.net>");
2913MODULE_DESCRIPTION("ESS Maestro3/Allegro Driver");
2914MODULE_LICENSE("GPL");
2915
2916#ifdef M_DEBUG
2917module_param(debug, int, 0);
2918#endif
2919module_param(external_amp, int, 0);
2920module_param(gpio_pin, int, 0);
2921
2922static struct pci_driver m3_pci_driver = {
2923 .name = "ess_m3_audio",
2924 .id_table = m3_id_table,
2925 .probe = m3_probe,
2926 .remove = m3_remove,
2927 .suspend = m3_suspend,
2928 .resume = m3_resume,
2929};
2930
2931static int __init m3_init_module(void)
2932{
2933 printk(KERN_INFO PFX "version " DRIVER_VERSION " built at " __TIME__ " " __DATE__ "\n");
2934
2935 if (register_reboot_notifier(&m3_reboot_nb)) {
2936 printk(KERN_WARNING PFX "reboot notifier registration failed\n");
2937 return -ENODEV; /* ? */
2938 }
2939
2940 if (pci_register_driver(&m3_pci_driver)) {
2941 unregister_reboot_notifier(&m3_reboot_nb);
2942 return -ENODEV;
2943 }
2944 return 0;
2945}
2946
2947static void __exit m3_cleanup_module(void)
2948{
2949 pci_unregister_driver(&m3_pci_driver);
2950}
2951
2952module_init(m3_init_module);
2953module_exit(m3_cleanup_module);
2954
2955void check_suspend(struct m3_card *card)
2956{
2957 DECLARE_WAITQUEUE(wait, current);
2958
2959 if(!card->in_suspend)
2960 return;
2961
2962 card->in_suspend++;
2963 add_wait_queue(&card->suspend_queue, &wait);
2964 set_current_state(TASK_UNINTERRUPTIBLE);
2965 schedule();
2966 remove_wait_queue(&card->suspend_queue, &wait);
2967 set_current_state(TASK_RUNNING);
2968}
diff --git a/sound/oss/maestro3.h b/sound/oss/maestro3.h
deleted file mode 100644
index dde29862c572..000000000000
--- a/sound/oss/maestro3.h
+++ /dev/null
@@ -1,821 +0,0 @@
1/*
2 * ESS Technology allegro audio driver.
3 *
4 * Copyright (C) 1992-2000 Don Kim (don.kim@esstech.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 * Hacked for the maestro3 driver by zab
21 */
22
23// Allegro PCI configuration registers
24#define PCI_LEGACY_AUDIO_CTRL 0x40
25#define SOUND_BLASTER_ENABLE 0x00000001
26#define FM_SYNTHESIS_ENABLE 0x00000002
27#define GAME_PORT_ENABLE 0x00000004
28#define MPU401_IO_ENABLE 0x00000008
29#define MPU401_IRQ_ENABLE 0x00000010
30#define ALIAS_10BIT_IO 0x00000020
31#define SB_DMA_MASK 0x000000C0
32#define SB_DMA_0 0x00000040
33#define SB_DMA_1 0x00000040
34#define SB_DMA_R 0x00000080
35#define SB_DMA_3 0x000000C0
36#define SB_IRQ_MASK 0x00000700
37#define SB_IRQ_5 0x00000000
38#define SB_IRQ_7 0x00000100
39#define SB_IRQ_9 0x00000200
40#define SB_IRQ_10 0x00000300
41#define MIDI_IRQ_MASK 0x00003800
42#define SERIAL_IRQ_ENABLE 0x00004000
43#define DISABLE_LEGACY 0x00008000
44
45#define PCI_ALLEGRO_CONFIG 0x50
46#define SB_ADDR_240 0x00000004
47#define MPU_ADDR_MASK 0x00000018
48#define MPU_ADDR_330 0x00000000
49#define MPU_ADDR_300 0x00000008
50#define MPU_ADDR_320 0x00000010
51#define MPU_ADDR_340 0x00000018
52#define USE_PCI_TIMING 0x00000040
53#define POSTED_WRITE_ENABLE 0x00000080
54#define DMA_POLICY_MASK 0x00000700
55#define DMA_DDMA 0x00000000
56#define DMA_TDMA 0x00000100
57#define DMA_PCPCI 0x00000200
58#define DMA_WBDMA16 0x00000400
59#define DMA_WBDMA4 0x00000500
60#define DMA_WBDMA2 0x00000600
61#define DMA_WBDMA1 0x00000700
62#define DMA_SAFE_GUARD 0x00000800
63#define HI_PERF_GP_ENABLE 0x00001000
64#define PIC_SNOOP_MODE_0 0x00002000
65#define PIC_SNOOP_MODE_1 0x00004000
66#define SOUNDBLASTER_IRQ_MASK 0x00008000
67#define RING_IN_ENABLE 0x00010000
68#define SPDIF_TEST_MODE 0x00020000
69#define CLK_MULT_MODE_SELECT_2 0x00040000
70#define EEPROM_WRITE_ENABLE 0x00080000
71#define CODEC_DIR_IN 0x00100000
72#define HV_BUTTON_FROM_GD 0x00200000
73#define REDUCED_DEBOUNCE 0x00400000
74#define HV_CTRL_ENABLE 0x00800000
75#define SPDIF_ENABLE 0x01000000
76#define CLK_DIV_SELECT 0x06000000
77#define CLK_DIV_BY_48 0x00000000
78#define CLK_DIV_BY_49 0x02000000
79#define CLK_DIV_BY_50 0x04000000
80#define CLK_DIV_RESERVED 0x06000000
81#define PM_CTRL_ENABLE 0x08000000
82#define CLK_MULT_MODE_SELECT 0x30000000
83#define CLK_MULT_MODE_SHIFT 28
84#define CLK_MULT_MODE_0 0x00000000
85#define CLK_MULT_MODE_1 0x10000000
86#define CLK_MULT_MODE_2 0x20000000
87#define CLK_MULT_MODE_3 0x30000000
88#define INT_CLK_SELECT 0x40000000
89#define INT_CLK_MULT_RESET 0x80000000
90
91// M3
92#define INT_CLK_SRC_NOT_PCI 0x00100000
93#define INT_CLK_MULT_ENABLE 0x80000000
94
95#define PCI_ACPI_CONTROL 0x54
96#define PCI_ACPI_D0 0x00000000
97#define PCI_ACPI_D1 0xB4F70000
98#define PCI_ACPI_D2 0xB4F7B4F7
99
100#define PCI_USER_CONFIG 0x58
101#define EXT_PCI_MASTER_ENABLE 0x00000001
102#define SPDIF_OUT_SELECT 0x00000002
103#define TEST_PIN_DIR_CTRL 0x00000004
104#define AC97_CODEC_TEST 0x00000020
105#define TRI_STATE_BUFFER 0x00000080
106#define IN_CLK_12MHZ_SELECT 0x00000100
107#define MULTI_FUNC_DISABLE 0x00000200
108#define EXT_MASTER_PAIR_SEL 0x00000400
109#define PCI_MASTER_SUPPORT 0x00000800
110#define STOP_CLOCK_ENABLE 0x00001000
111#define EAPD_DRIVE_ENABLE 0x00002000
112#define REQ_TRI_STATE_ENABLE 0x00004000
113#define REQ_LOW_ENABLE 0x00008000
114#define MIDI_1_ENABLE 0x00010000
115#define MIDI_2_ENABLE 0x00020000
116#define SB_AUDIO_SYNC 0x00040000
117#define HV_CTRL_TEST 0x00100000
118#define SOUNDBLASTER_TEST 0x00400000
119
120#define PCI_USER_CONFIG_C 0x5C
121
122#define PCI_DDMA_CTRL 0x60
123#define DDMA_ENABLE 0x00000001
124
125
126// Allegro registers
127#define HOST_INT_CTRL 0x18
128#define SB_INT_ENABLE 0x0001
129#define MPU401_INT_ENABLE 0x0002
130#define ASSP_INT_ENABLE 0x0010
131#define RING_INT_ENABLE 0x0020
132#define HV_INT_ENABLE 0x0040
133#define CLKRUN_GEN_ENABLE 0x0100
134#define HV_CTRL_TO_PME 0x0400
135#define SOFTWARE_RESET_ENABLE 0x8000
136
137/*
138 * should be using the above defines, probably.
139 */
140#define REGB_ENABLE_RESET 0x01
141#define REGB_STOP_CLOCK 0x10
142
143#define HOST_INT_STATUS 0x1A
144#define SB_INT_PENDING 0x01
145#define MPU401_INT_PENDING 0x02
146#define ASSP_INT_PENDING 0x10
147#define RING_INT_PENDING 0x20
148#define HV_INT_PENDING 0x40
149
150#define HARDWARE_VOL_CTRL 0x1B
151#define SHADOW_MIX_REG_VOICE 0x1C
152#define HW_VOL_COUNTER_VOICE 0x1D
153#define SHADOW_MIX_REG_MASTER 0x1E
154#define HW_VOL_COUNTER_MASTER 0x1F
155
156#define CODEC_COMMAND 0x30
157#define CODEC_READ_B 0x80
158
159#define CODEC_STATUS 0x30
160#define CODEC_BUSY_B 0x01
161
162#define CODEC_DATA 0x32
163
164#define RING_BUS_CTRL_A 0x36
165#define RAC_PME_ENABLE 0x0100
166#define RAC_SDFS_ENABLE 0x0200
167#define LAC_PME_ENABLE 0x0400
168#define LAC_SDFS_ENABLE 0x0800
169#define SERIAL_AC_LINK_ENABLE 0x1000
170#define IO_SRAM_ENABLE 0x2000
171#define IIS_INPUT_ENABLE 0x8000
172
173#define RING_BUS_CTRL_B 0x38
174#define SECOND_CODEC_ID_MASK 0x0003
175#define SPDIF_FUNC_ENABLE 0x0010
176#define SECOND_AC_ENABLE 0x0020
177#define SB_MODULE_INTF_ENABLE 0x0040
178#define SSPE_ENABLE 0x0040
179#define M3I_DOCK_ENABLE 0x0080
180
181#define SDO_OUT_DEST_CTRL 0x3A
182#define COMMAND_ADDR_OUT 0x0003
183#define PCM_LR_OUT_LOCAL 0x0000
184#define PCM_LR_OUT_REMOTE 0x0004
185#define PCM_LR_OUT_MUTE 0x0008
186#define PCM_LR_OUT_BOTH 0x000C
187#define LINE1_DAC_OUT_LOCAL 0x0000
188#define LINE1_DAC_OUT_REMOTE 0x0010
189#define LINE1_DAC_OUT_MUTE 0x0020
190#define LINE1_DAC_OUT_BOTH 0x0030
191#define PCM_CLS_OUT_LOCAL 0x0000
192#define PCM_CLS_OUT_REMOTE 0x0040
193#define PCM_CLS_OUT_MUTE 0x0080
194#define PCM_CLS_OUT_BOTH 0x00C0
195#define PCM_RLF_OUT_LOCAL 0x0000
196#define PCM_RLF_OUT_REMOTE 0x0100
197#define PCM_RLF_OUT_MUTE 0x0200
198#define PCM_RLF_OUT_BOTH 0x0300
199#define LINE2_DAC_OUT_LOCAL 0x0000
200#define LINE2_DAC_OUT_REMOTE 0x0400
201#define LINE2_DAC_OUT_MUTE 0x0800
202#define LINE2_DAC_OUT_BOTH 0x0C00
203#define HANDSET_OUT_LOCAL 0x0000
204#define HANDSET_OUT_REMOTE 0x1000
205#define HANDSET_OUT_MUTE 0x2000
206#define HANDSET_OUT_BOTH 0x3000
207#define IO_CTRL_OUT_LOCAL 0x0000
208#define IO_CTRL_OUT_REMOTE 0x4000
209#define IO_CTRL_OUT_MUTE 0x8000
210#define IO_CTRL_OUT_BOTH 0xC000
211
212#define SDO_IN_DEST_CTRL 0x3C
213#define STATUS_ADDR_IN 0x0003
214#define PCM_LR_IN_LOCAL 0x0000
215#define PCM_LR_IN_REMOTE 0x0004
216#define PCM_LR_RESERVED 0x0008
217#define PCM_LR_IN_BOTH 0x000C
218#define LINE1_ADC_IN_LOCAL 0x0000
219#define LINE1_ADC_IN_REMOTE 0x0010
220#define LINE1_ADC_IN_MUTE 0x0020
221#define MIC_ADC_IN_LOCAL 0x0000
222#define MIC_ADC_IN_REMOTE 0x0040
223#define MIC_ADC_IN_MUTE 0x0080
224#define LINE2_DAC_IN_LOCAL 0x0000
225#define LINE2_DAC_IN_REMOTE 0x0400
226#define LINE2_DAC_IN_MUTE 0x0800
227#define HANDSET_IN_LOCAL 0x0000
228#define HANDSET_IN_REMOTE 0x1000
229#define HANDSET_IN_MUTE 0x2000
230#define IO_STATUS_IN_LOCAL 0x0000
231#define IO_STATUS_IN_REMOTE 0x4000
232
233#define SPDIF_IN_CTRL 0x3E
234#define SPDIF_IN_ENABLE 0x0001
235
236#define GPIO_DATA 0x60
237#define GPIO_DATA_MASK 0x0FFF
238#define GPIO_HV_STATUS 0x3000
239#define GPIO_PME_STATUS 0x4000
240
241#define GPIO_MASK 0x64
242#define GPIO_DIRECTION 0x68
243#define GPO_PRIMARY_AC97 0x0001
244#define GPI_LINEOUT_SENSE 0x0004
245#define GPO_SECONDARY_AC97 0x0008
246#define GPI_VOL_DOWN 0x0010
247#define GPI_VOL_UP 0x0020
248#define GPI_IIS_CLK 0x0040
249#define GPI_IIS_LRCLK 0x0080
250#define GPI_IIS_DATA 0x0100
251#define GPI_DOCKING_STATUS 0x0100
252#define GPI_HEADPHONE_SENSE 0x0200
253#define GPO_EXT_AMP_SHUTDOWN 0x1000
254
255// M3
256#define GPO_M3_EXT_AMP_SHUTDN 0x0002
257
258#define ASSP_INDEX_PORT 0x80
259#define ASSP_MEMORY_PORT 0x82
260#define ASSP_DATA_PORT 0x84
261
262#define MPU401_DATA_PORT 0x98
263#define MPU401_STATUS_PORT 0x99
264
265#define CLK_MULT_DATA_PORT 0x9C
266
267#define ASSP_CONTROL_A 0xA2
268#define ASSP_0_WS_ENABLE 0x01
269#define ASSP_CTRL_A_RESERVED1 0x02
270#define ASSP_CTRL_A_RESERVED2 0x04
271#define ASSP_CLK_49MHZ_SELECT 0x08
272#define FAST_PLU_ENABLE 0x10
273#define ASSP_CTRL_A_RESERVED3 0x20
274#define DSP_CLK_36MHZ_SELECT 0x40
275
276#define ASSP_CONTROL_B 0xA4
277#define RESET_ASSP 0x00
278#define RUN_ASSP 0x01
279#define ENABLE_ASSP_CLOCK 0x00
280#define STOP_ASSP_CLOCK 0x10
281#define RESET_TOGGLE 0x40
282
283#define ASSP_CONTROL_C 0xA6
284#define ASSP_HOST_INT_ENABLE 0x01
285#define FM_ADDR_REMAP_DISABLE 0x02
286#define HOST_WRITE_PORT_ENABLE 0x08
287
288#define ASSP_HOST_INT_STATUS 0xAC
289#define DSP2HOST_REQ_PIORECORD 0x01
290#define DSP2HOST_REQ_I2SRATE 0x02
291#define DSP2HOST_REQ_TIMER 0x04
292
293// AC97 registers
294// XXX fix this crap up
295/*#define AC97_RESET 0x00*/
296
297#define AC97_VOL_MUTE_B 0x8000
298#define AC97_VOL_M 0x1F
299#define AC97_LEFT_VOL_S 8
300
301#define AC97_MASTER_VOL 0x02
302#define AC97_LINE_LEVEL_VOL 0x04
303#define AC97_MASTER_MONO_VOL 0x06
304#define AC97_PC_BEEP_VOL 0x0A
305#define AC97_PC_BEEP_VOL_M 0x0F
306#define AC97_SROUND_MASTER_VOL 0x38
307#define AC97_PC_BEEP_VOL_S 1
308
309/*#define AC97_PHONE_VOL 0x0C
310#define AC97_MIC_VOL 0x0E*/
311#define AC97_MIC_20DB_ENABLE 0x40
312
313/*#define AC97_LINEIN_VOL 0x10
314#define AC97_CD_VOL 0x12
315#define AC97_VIDEO_VOL 0x14
316#define AC97_AUX_VOL 0x16*/
317#define AC97_PCM_OUT_VOL 0x18
318/*#define AC97_RECORD_SELECT 0x1A*/
319#define AC97_RECORD_MIC 0x00
320#define AC97_RECORD_CD 0x01
321#define AC97_RECORD_VIDEO 0x02
322#define AC97_RECORD_AUX 0x03
323#define AC97_RECORD_MONO_MUX 0x02
324#define AC97_RECORD_DIGITAL 0x03
325#define AC97_RECORD_LINE 0x04
326#define AC97_RECORD_STEREO 0x05
327#define AC97_RECORD_MONO 0x06
328#define AC97_RECORD_PHONE 0x07
329
330/*#define AC97_RECORD_GAIN 0x1C*/
331#define AC97_RECORD_VOL_M 0x0F
332
333/*#define AC97_GENERAL_PURPOSE 0x20*/
334#define AC97_POWER_DOWN_CTRL 0x26
335#define AC97_ADC_READY 0x0001
336#define AC97_DAC_READY 0x0002
337#define AC97_ANALOG_READY 0x0004
338#define AC97_VREF_ON 0x0008
339#define AC97_PR0 0x0100
340#define AC97_PR1 0x0200
341#define AC97_PR2 0x0400
342#define AC97_PR3 0x0800
343#define AC97_PR4 0x1000
344
345#define AC97_RESERVED1 0x28
346
347#define AC97_VENDOR_TEST 0x5A
348
349#define AC97_CLOCK_DELAY 0x5C
350#define AC97_LINEOUT_MUX_SEL 0x0001
351#define AC97_MONO_MUX_SEL 0x0002
352#define AC97_CLOCK_DELAY_SEL 0x1F
353#define AC97_DAC_CDS_SHIFT 6
354#define AC97_ADC_CDS_SHIFT 11
355
356#define AC97_MULTI_CHANNEL_SEL 0x74
357
358/*#define AC97_VENDOR_ID1 0x7C
359#define AC97_VENDOR_ID2 0x7E*/
360
361/*
362 * ASSP control regs
363 */
364#define DSP_PORT_TIMER_COUNT 0x06
365
366#define DSP_PORT_MEMORY_INDEX 0x80
367
368#define DSP_PORT_MEMORY_TYPE 0x82
369#define MEMTYPE_INTERNAL_CODE 0x0002
370#define MEMTYPE_INTERNAL_DATA 0x0003
371#define MEMTYPE_MASK 0x0003
372
373#define DSP_PORT_MEMORY_DATA 0x84
374
375#define DSP_PORT_CONTROL_REG_A 0xA2
376#define DSP_PORT_CONTROL_REG_B 0xA4
377#define DSP_PORT_CONTROL_REG_C 0xA6
378
379#define REV_A_CODE_MEMORY_BEGIN 0x0000
380#define REV_A_CODE_MEMORY_END 0x0FFF
381#define REV_A_CODE_MEMORY_UNIT_LENGTH 0x0040
382#define REV_A_CODE_MEMORY_LENGTH (REV_A_CODE_MEMORY_END - REV_A_CODE_MEMORY_BEGIN + 1)
383
384#define REV_B_CODE_MEMORY_BEGIN 0x0000
385#define REV_B_CODE_MEMORY_END 0x0BFF
386#define REV_B_CODE_MEMORY_UNIT_LENGTH 0x0040
387#define REV_B_CODE_MEMORY_LENGTH (REV_B_CODE_MEMORY_END - REV_B_CODE_MEMORY_BEGIN + 1)
388
389#define REV_A_DATA_MEMORY_BEGIN 0x1000
390#define REV_A_DATA_MEMORY_END 0x2FFF
391#define REV_A_DATA_MEMORY_UNIT_LENGTH 0x0080
392#define REV_A_DATA_MEMORY_LENGTH (REV_A_DATA_MEMORY_END - REV_A_DATA_MEMORY_BEGIN + 1)
393
394#define REV_B_DATA_MEMORY_BEGIN 0x1000
395#define REV_B_DATA_MEMORY_END 0x2BFF
396#define REV_B_DATA_MEMORY_UNIT_LENGTH 0x0080
397#define REV_B_DATA_MEMORY_LENGTH (REV_B_DATA_MEMORY_END - REV_B_DATA_MEMORY_BEGIN + 1)
398
399
400#define NUM_UNITS_KERNEL_CODE 16
401#define NUM_UNITS_KERNEL_DATA 2
402
403#define NUM_UNITS_KERNEL_CODE_WITH_HSP 16
404#define NUM_UNITS_KERNEL_DATA_WITH_HSP 5
405
406/*
407 * Kernel data layout
408 */
409
410#define DP_SHIFT_COUNT 7
411
412#define KDATA_BASE_ADDR 0x1000
413#define KDATA_BASE_ADDR2 0x1080
414
415#define KDATA_TASK0 (KDATA_BASE_ADDR + 0x0000)
416#define KDATA_TASK1 (KDATA_BASE_ADDR + 0x0001)
417#define KDATA_TASK2 (KDATA_BASE_ADDR + 0x0002)
418#define KDATA_TASK3 (KDATA_BASE_ADDR + 0x0003)
419#define KDATA_TASK4 (KDATA_BASE_ADDR + 0x0004)
420#define KDATA_TASK5 (KDATA_BASE_ADDR + 0x0005)
421#define KDATA_TASK6 (KDATA_BASE_ADDR + 0x0006)
422#define KDATA_TASK7 (KDATA_BASE_ADDR + 0x0007)
423#define KDATA_TASK_ENDMARK (KDATA_BASE_ADDR + 0x0008)
424
425#define KDATA_CURRENT_TASK (KDATA_BASE_ADDR + 0x0009)
426#define KDATA_TASK_SWITCH (KDATA_BASE_ADDR + 0x000A)
427
428#define KDATA_INSTANCE0_POS3D (KDATA_BASE_ADDR + 0x000B)
429#define KDATA_INSTANCE1_POS3D (KDATA_BASE_ADDR + 0x000C)
430#define KDATA_INSTANCE2_POS3D (KDATA_BASE_ADDR + 0x000D)
431#define KDATA_INSTANCE3_POS3D (KDATA_BASE_ADDR + 0x000E)
432#define KDATA_INSTANCE4_POS3D (KDATA_BASE_ADDR + 0x000F)
433#define KDATA_INSTANCE5_POS3D (KDATA_BASE_ADDR + 0x0010)
434#define KDATA_INSTANCE6_POS3D (KDATA_BASE_ADDR + 0x0011)
435#define KDATA_INSTANCE7_POS3D (KDATA_BASE_ADDR + 0x0012)
436#define KDATA_INSTANCE8_POS3D (KDATA_BASE_ADDR + 0x0013)
437#define KDATA_INSTANCE_POS3D_ENDMARK (KDATA_BASE_ADDR + 0x0014)
438
439#define KDATA_INSTANCE0_SPKVIRT (KDATA_BASE_ADDR + 0x0015)
440#define KDATA_INSTANCE_SPKVIRT_ENDMARK (KDATA_BASE_ADDR + 0x0016)
441
442#define KDATA_INSTANCE0_SPDIF (KDATA_BASE_ADDR + 0x0017)
443#define KDATA_INSTANCE_SPDIF_ENDMARK (KDATA_BASE_ADDR + 0x0018)
444
445#define KDATA_INSTANCE0_MODEM (KDATA_BASE_ADDR + 0x0019)
446#define KDATA_INSTANCE_MODEM_ENDMARK (KDATA_BASE_ADDR + 0x001A)
447
448#define KDATA_INSTANCE0_SRC (KDATA_BASE_ADDR + 0x001B)
449#define KDATA_INSTANCE1_SRC (KDATA_BASE_ADDR + 0x001C)
450#define KDATA_INSTANCE_SRC_ENDMARK (KDATA_BASE_ADDR + 0x001D)
451
452#define KDATA_INSTANCE0_MINISRC (KDATA_BASE_ADDR + 0x001E)
453#define KDATA_INSTANCE1_MINISRC (KDATA_BASE_ADDR + 0x001F)
454#define KDATA_INSTANCE2_MINISRC (KDATA_BASE_ADDR + 0x0020)
455#define KDATA_INSTANCE3_MINISRC (KDATA_BASE_ADDR + 0x0021)
456#define KDATA_INSTANCE_MINISRC_ENDMARK (KDATA_BASE_ADDR + 0x0022)
457
458#define KDATA_INSTANCE0_CPYTHRU (KDATA_BASE_ADDR + 0x0023)
459#define KDATA_INSTANCE1_CPYTHRU (KDATA_BASE_ADDR + 0x0024)
460#define KDATA_INSTANCE_CPYTHRU_ENDMARK (KDATA_BASE_ADDR + 0x0025)
461
462#define KDATA_CURRENT_DMA (KDATA_BASE_ADDR + 0x0026)
463#define KDATA_DMA_SWITCH (KDATA_BASE_ADDR + 0x0027)
464#define KDATA_DMA_ACTIVE (KDATA_BASE_ADDR + 0x0028)
465
466#define KDATA_DMA_XFER0 (KDATA_BASE_ADDR + 0x0029)
467#define KDATA_DMA_XFER1 (KDATA_BASE_ADDR + 0x002A)
468#define KDATA_DMA_XFER2 (KDATA_BASE_ADDR + 0x002B)
469#define KDATA_DMA_XFER3 (KDATA_BASE_ADDR + 0x002C)
470#define KDATA_DMA_XFER4 (KDATA_BASE_ADDR + 0x002D)
471#define KDATA_DMA_XFER5 (KDATA_BASE_ADDR + 0x002E)
472#define KDATA_DMA_XFER6 (KDATA_BASE_ADDR + 0x002F)
473#define KDATA_DMA_XFER7 (KDATA_BASE_ADDR + 0x0030)
474#define KDATA_DMA_XFER8 (KDATA_BASE_ADDR + 0x0031)
475#define KDATA_DMA_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0032)
476
477#define KDATA_I2S_SAMPLE_COUNT (KDATA_BASE_ADDR + 0x0033)
478#define KDATA_I2S_INT_METER (KDATA_BASE_ADDR + 0x0034)
479#define KDATA_I2S_ACTIVE (KDATA_BASE_ADDR + 0x0035)
480
481#define KDATA_TIMER_COUNT_RELOAD (KDATA_BASE_ADDR + 0x0036)
482#define KDATA_TIMER_COUNT_CURRENT (KDATA_BASE_ADDR + 0x0037)
483
484#define KDATA_HALT_SYNCH_CLIENT (KDATA_BASE_ADDR + 0x0038)
485#define KDATA_HALT_SYNCH_DMA (KDATA_BASE_ADDR + 0x0039)
486#define KDATA_HALT_ACKNOWLEDGE (KDATA_BASE_ADDR + 0x003A)
487
488#define KDATA_ADC1_XFER0 (KDATA_BASE_ADDR + 0x003B)
489#define KDATA_ADC1_XFER_ENDMARK (KDATA_BASE_ADDR + 0x003C)
490#define KDATA_ADC1_LEFT_VOLUME (KDATA_BASE_ADDR + 0x003D)
491#define KDATA_ADC1_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x003E)
492#define KDATA_ADC1_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x003F)
493#define KDATA_ADC1_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0040)
494
495#define KDATA_ADC2_XFER0 (KDATA_BASE_ADDR + 0x0041)
496#define KDATA_ADC2_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0042)
497#define KDATA_ADC2_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0043)
498#define KDATA_ADC2_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x0044)
499#define KDATA_ADC2_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x0045)
500#define KDATA_ADC2_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0046)
501
502#define KDATA_CD_XFER0 (KDATA_BASE_ADDR + 0x0047)
503#define KDATA_CD_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0048)
504#define KDATA_CD_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0049)
505#define KDATA_CD_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x004A)
506#define KDATA_CD_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x004B)
507#define KDATA_CD_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x004C)
508
509#define KDATA_MIC_XFER0 (KDATA_BASE_ADDR + 0x004D)
510#define KDATA_MIC_XFER_ENDMARK (KDATA_BASE_ADDR + 0x004E)
511#define KDATA_MIC_VOLUME (KDATA_BASE_ADDR + 0x004F)
512#define KDATA_MIC_SUR_VOL (KDATA_BASE_ADDR + 0x0050)
513
514#define KDATA_I2S_XFER0 (KDATA_BASE_ADDR + 0x0051)
515#define KDATA_I2S_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0052)
516
517#define KDATA_CHI_XFER0 (KDATA_BASE_ADDR + 0x0053)
518#define KDATA_CHI_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0054)
519
520#define KDATA_SPDIF_XFER (KDATA_BASE_ADDR + 0x0055)
521#define KDATA_SPDIF_CURRENT_FRAME (KDATA_BASE_ADDR + 0x0056)
522#define KDATA_SPDIF_FRAME0 (KDATA_BASE_ADDR + 0x0057)
523#define KDATA_SPDIF_FRAME1 (KDATA_BASE_ADDR + 0x0058)
524#define KDATA_SPDIF_FRAME2 (KDATA_BASE_ADDR + 0x0059)
525
526#define KDATA_SPDIF_REQUEST (KDATA_BASE_ADDR + 0x005A)
527#define KDATA_SPDIF_TEMP (KDATA_BASE_ADDR + 0x005B)
528
529#define KDATA_SPDIFIN_XFER0 (KDATA_BASE_ADDR + 0x005C)
530#define KDATA_SPDIFIN_XFER_ENDMARK (KDATA_BASE_ADDR + 0x005D)
531#define KDATA_SPDIFIN_INT_METER (KDATA_BASE_ADDR + 0x005E)
532
533#define KDATA_DSP_RESET_COUNT (KDATA_BASE_ADDR + 0x005F)
534#define KDATA_DEBUG_OUTPUT (KDATA_BASE_ADDR + 0x0060)
535
536#define KDATA_KERNEL_ISR_LIST (KDATA_BASE_ADDR + 0x0061)
537
538#define KDATA_KERNEL_ISR_CBSR1 (KDATA_BASE_ADDR + 0x0062)
539#define KDATA_KERNEL_ISR_CBER1 (KDATA_BASE_ADDR + 0x0063)
540#define KDATA_KERNEL_ISR_CBCR (KDATA_BASE_ADDR + 0x0064)
541#define KDATA_KERNEL_ISR_AR0 (KDATA_BASE_ADDR + 0x0065)
542#define KDATA_KERNEL_ISR_AR1 (KDATA_BASE_ADDR + 0x0066)
543#define KDATA_KERNEL_ISR_AR2 (KDATA_BASE_ADDR + 0x0067)
544#define KDATA_KERNEL_ISR_AR3 (KDATA_BASE_ADDR + 0x0068)
545#define KDATA_KERNEL_ISR_AR4 (KDATA_BASE_ADDR + 0x0069)
546#define KDATA_KERNEL_ISR_AR5 (KDATA_BASE_ADDR + 0x006A)
547#define KDATA_KERNEL_ISR_BRCR (KDATA_BASE_ADDR + 0x006B)
548#define KDATA_KERNEL_ISR_PASR (KDATA_BASE_ADDR + 0x006C)
549#define KDATA_KERNEL_ISR_PAER (KDATA_BASE_ADDR + 0x006D)
550
551#define KDATA_CLIENT_SCRATCH0 (KDATA_BASE_ADDR + 0x006E)
552#define KDATA_CLIENT_SCRATCH1 (KDATA_BASE_ADDR + 0x006F)
553#define KDATA_KERNEL_SCRATCH (KDATA_BASE_ADDR + 0x0070)
554#define KDATA_KERNEL_ISR_SCRATCH (KDATA_BASE_ADDR + 0x0071)
555
556#define KDATA_OUEUE_LEFT (KDATA_BASE_ADDR + 0x0072)
557#define KDATA_QUEUE_RIGHT (KDATA_BASE_ADDR + 0x0073)
558
559#define KDATA_ADC1_REQUEST (KDATA_BASE_ADDR + 0x0074)
560#define KDATA_ADC2_REQUEST (KDATA_BASE_ADDR + 0x0075)
561#define KDATA_CD_REQUEST (KDATA_BASE_ADDR + 0x0076)
562#define KDATA_MIC_REQUEST (KDATA_BASE_ADDR + 0x0077)
563
564#define KDATA_ADC1_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0078)
565#define KDATA_ADC2_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0079)
566#define KDATA_CD_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007A)
567#define KDATA_MIC_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007B)
568#define KDATA_MIC_SYNC_COUNTER (KDATA_BASE_ADDR + 0x007C)
569
570/*
571 * second 'segment' (?) reserved for mixer
572 * buffers..
573 */
574
575#define KDATA_MIXER_WORD0 (KDATA_BASE_ADDR2 + 0x0000)
576#define KDATA_MIXER_WORD1 (KDATA_BASE_ADDR2 + 0x0001)
577#define KDATA_MIXER_WORD2 (KDATA_BASE_ADDR2 + 0x0002)
578#define KDATA_MIXER_WORD3 (KDATA_BASE_ADDR2 + 0x0003)
579#define KDATA_MIXER_WORD4 (KDATA_BASE_ADDR2 + 0x0004)
580#define KDATA_MIXER_WORD5 (KDATA_BASE_ADDR2 + 0x0005)
581#define KDATA_MIXER_WORD6 (KDATA_BASE_ADDR2 + 0x0006)
582#define KDATA_MIXER_WORD7 (KDATA_BASE_ADDR2 + 0x0007)
583#define KDATA_MIXER_WORD8 (KDATA_BASE_ADDR2 + 0x0008)
584#define KDATA_MIXER_WORD9 (KDATA_BASE_ADDR2 + 0x0009)
585#define KDATA_MIXER_WORDA (KDATA_BASE_ADDR2 + 0x000A)
586#define KDATA_MIXER_WORDB (KDATA_BASE_ADDR2 + 0x000B)
587#define KDATA_MIXER_WORDC (KDATA_BASE_ADDR2 + 0x000C)
588#define KDATA_MIXER_WORDD (KDATA_BASE_ADDR2 + 0x000D)
589#define KDATA_MIXER_WORDE (KDATA_BASE_ADDR2 + 0x000E)
590#define KDATA_MIXER_WORDF (KDATA_BASE_ADDR2 + 0x000F)
591
592#define KDATA_MIXER_XFER0 (KDATA_BASE_ADDR2 + 0x0010)
593#define KDATA_MIXER_XFER1 (KDATA_BASE_ADDR2 + 0x0011)
594#define KDATA_MIXER_XFER2 (KDATA_BASE_ADDR2 + 0x0012)
595#define KDATA_MIXER_XFER3 (KDATA_BASE_ADDR2 + 0x0013)
596#define KDATA_MIXER_XFER4 (KDATA_BASE_ADDR2 + 0x0014)
597#define KDATA_MIXER_XFER5 (KDATA_BASE_ADDR2 + 0x0015)
598#define KDATA_MIXER_XFER6 (KDATA_BASE_ADDR2 + 0x0016)
599#define KDATA_MIXER_XFER7 (KDATA_BASE_ADDR2 + 0x0017)
600#define KDATA_MIXER_XFER8 (KDATA_BASE_ADDR2 + 0x0018)
601#define KDATA_MIXER_XFER9 (KDATA_BASE_ADDR2 + 0x0019)
602#define KDATA_MIXER_XFER_ENDMARK (KDATA_BASE_ADDR2 + 0x001A)
603
604#define KDATA_MIXER_TASK_NUMBER (KDATA_BASE_ADDR2 + 0x001B)
605#define KDATA_CURRENT_MIXER (KDATA_BASE_ADDR2 + 0x001C)
606#define KDATA_MIXER_ACTIVE (KDATA_BASE_ADDR2 + 0x001D)
607#define KDATA_MIXER_BANK_STATUS (KDATA_BASE_ADDR2 + 0x001E)
608#define KDATA_DAC_LEFT_VOLUME (KDATA_BASE_ADDR2 + 0x001F)
609#define KDATA_DAC_RIGHT_VOLUME (KDATA_BASE_ADDR2 + 0x0020)
610
611#define MAX_INSTANCE_MINISRC (KDATA_INSTANCE_MINISRC_ENDMARK - KDATA_INSTANCE0_MINISRC)
612#define MAX_VIRTUAL_DMA_CHANNELS (KDATA_DMA_XFER_ENDMARK - KDATA_DMA_XFER0)
613#define MAX_VIRTUAL_MIXER_CHANNELS (KDATA_MIXER_XFER_ENDMARK - KDATA_MIXER_XFER0)
614#define MAX_VIRTUAL_ADC1_CHANNELS (KDATA_ADC1_XFER_ENDMARK - KDATA_ADC1_XFER0)
615
616/*
617 * client data area offsets
618 */
619#define CDATA_INSTANCE_READY 0x00
620
621#define CDATA_HOST_SRC_ADDRL 0x01
622#define CDATA_HOST_SRC_ADDRH 0x02
623#define CDATA_HOST_SRC_END_PLUS_1L 0x03
624#define CDATA_HOST_SRC_END_PLUS_1H 0x04
625#define CDATA_HOST_SRC_CURRENTL 0x05
626#define CDATA_HOST_SRC_CURRENTH 0x06
627
628#define CDATA_IN_BUF_CONNECT 0x07
629#define CDATA_OUT_BUF_CONNECT 0x08
630
631#define CDATA_IN_BUF_BEGIN 0x09
632#define CDATA_IN_BUF_END_PLUS_1 0x0A
633#define CDATA_IN_BUF_HEAD 0x0B
634#define CDATA_IN_BUF_TAIL 0x0C
635#define CDATA_OUT_BUF_BEGIN 0x0D
636#define CDATA_OUT_BUF_END_PLUS_1 0x0E
637#define CDATA_OUT_BUF_HEAD 0x0F
638#define CDATA_OUT_BUF_TAIL 0x10
639
640#define CDATA_DMA_CONTROL 0x11
641#define CDATA_RESERVED 0x12
642
643#define CDATA_FREQUENCY 0x13
644#define CDATA_LEFT_VOLUME 0x14
645#define CDATA_RIGHT_VOLUME 0x15
646#define CDATA_LEFT_SUR_VOL 0x16
647#define CDATA_RIGHT_SUR_VOL 0x17
648
649#define CDATA_HEADER_LEN 0x18
650
651#define SRC3_DIRECTION_OFFSET CDATA_HEADER_LEN
652#define SRC3_MODE_OFFSET (CDATA_HEADER_LEN + 1)
653#define SRC3_WORD_LENGTH_OFFSET (CDATA_HEADER_LEN + 2)
654#define SRC3_PARAMETER_OFFSET (CDATA_HEADER_LEN + 3)
655#define SRC3_COEFF_ADDR_OFFSET (CDATA_HEADER_LEN + 8)
656#define SRC3_FILTAP_ADDR_OFFSET (CDATA_HEADER_LEN + 10)
657#define SRC3_TEMP_INBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 16)
658#define SRC3_TEMP_OUTBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 17)
659
660#define MINISRC_IN_BUFFER_SIZE ( 0x50 * 2 )
661#define MINISRC_OUT_BUFFER_SIZE ( 0x50 * 2 * 2)
662#define MINISRC_OUT_BUFFER_SIZE ( 0x50 * 2 * 2)
663#define MINISRC_TMP_BUFFER_SIZE ( 112 + ( MINISRC_BIQUAD_STAGE * 3 + 4 ) * 2 * 2 )
664#define MINISRC_BIQUAD_STAGE 2
665#define MINISRC_COEF_LOC 0X175
666
667#define DMACONTROL_BLOCK_MASK 0x000F
668#define DMAC_BLOCK0_SELECTOR 0x0000
669#define DMAC_BLOCK1_SELECTOR 0x0001
670#define DMAC_BLOCK2_SELECTOR 0x0002
671#define DMAC_BLOCK3_SELECTOR 0x0003
672#define DMAC_BLOCK4_SELECTOR 0x0004
673#define DMAC_BLOCK5_SELECTOR 0x0005
674#define DMAC_BLOCK6_SELECTOR 0x0006
675#define DMAC_BLOCK7_SELECTOR 0x0007
676#define DMAC_BLOCK8_SELECTOR 0x0008
677#define DMAC_BLOCK9_SELECTOR 0x0009
678#define DMAC_BLOCKA_SELECTOR 0x000A
679#define DMAC_BLOCKB_SELECTOR 0x000B
680#define DMAC_BLOCKC_SELECTOR 0x000C
681#define DMAC_BLOCKD_SELECTOR 0x000D
682#define DMAC_BLOCKE_SELECTOR 0x000E
683#define DMAC_BLOCKF_SELECTOR 0x000F
684#define DMACONTROL_PAGE_MASK 0x00F0
685#define DMAC_PAGE0_SELECTOR 0x0030
686#define DMAC_PAGE1_SELECTOR 0x0020
687#define DMAC_PAGE2_SELECTOR 0x0010
688#define DMAC_PAGE3_SELECTOR 0x0000
689#define DMACONTROL_AUTOREPEAT 0x1000
690#define DMACONTROL_STOPPED 0x2000
691#define DMACONTROL_DIRECTION 0x0100
692
693
694/*
695 * DSP Code images
696 */
697
698static u16 assp_kernel_image[] = {
699 0x7980, 0x0030, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x00FB, 0x7980, 0x00DD, 0x7980, 0x03B4,
700 0x7980, 0x0332, 0x7980, 0x0287, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4,
701 0x7980, 0x031A, 0x7980, 0x03B4, 0x7980, 0x022F, 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x03B4,
702 0x7980, 0x03B4, 0x7980, 0x03B4, 0x7980, 0x0063, 0x7980, 0x006B, 0x7980, 0x03B4, 0x7980, 0x03B4,
703 0xBF80, 0x2C7C, 0x8806, 0x8804, 0xBE40, 0xBC20, 0xAE09, 0x1000, 0xAE0A, 0x0001, 0x6938, 0xEB08,
704 0x0053, 0x695A, 0xEB08, 0x00D6, 0x0009, 0x8B88, 0x6980, 0xE388, 0x0036, 0xBE30, 0xBC20, 0x6909,
705 0xB801, 0x9009, 0xBE41, 0xBE41, 0x6928, 0xEB88, 0x0078, 0xBE41, 0xBE40, 0x7980, 0x0038, 0xBE41,
706 0xBE41, 0x903A, 0x6938, 0xE308, 0x0056, 0x903A, 0xBE41, 0xBE40, 0xEF00, 0x903A, 0x6939, 0xE308,
707 0x005E, 0x903A, 0xEF00, 0x690B, 0x660C, 0xEF8C, 0x690A, 0x660C, 0x620B, 0x6609, 0xEF00, 0x6910,
708 0x660F, 0xEF04, 0xE388, 0x0075, 0x690E, 0x660F, 0x6210, 0x660D, 0xEF00, 0x690E, 0x660D, 0xEF00,
709 0xAE70, 0x0001, 0xBC20, 0xAE27, 0x0001, 0x6939, 0xEB08, 0x005D, 0x6926, 0xB801, 0x9026, 0x0026,
710 0x8B88, 0x6980, 0xE388, 0x00CB, 0x9028, 0x0D28, 0x4211, 0xE100, 0x007A, 0x4711, 0xE100, 0x00A0,
711 0x7A80, 0x0063, 0xB811, 0x660A, 0x6209, 0xE304, 0x007A, 0x0C0B, 0x4005, 0x100A, 0xBA01, 0x9012,
712 0x0C12, 0x4002, 0x7980, 0x00AF, 0x7A80, 0x006B, 0xBE02, 0x620E, 0x660D, 0xBA10, 0xE344, 0x007A,
713 0x0C10, 0x4005, 0x100E, 0xBA01, 0x9012, 0x0C12, 0x4002, 0x1003, 0xBA02, 0x9012, 0x0C12, 0x4000,
714 0x1003, 0xE388, 0x00BA, 0x1004, 0x7980, 0x00BC, 0x1004, 0xBA01, 0x9012, 0x0C12, 0x4001, 0x0C05,
715 0x4003, 0x0C06, 0x4004, 0x1011, 0xBFB0, 0x01FF, 0x9012, 0x0C12, 0x4006, 0xBC20, 0xEF00, 0xAE26,
716 0x1028, 0x6970, 0xBFD0, 0x0001, 0x9070, 0xE388, 0x007A, 0xAE28, 0x0000, 0xEF00, 0xAE70, 0x0300,
717 0x0C70, 0xB00C, 0xAE5A, 0x0000, 0xEF00, 0x7A80, 0x038A, 0x697F, 0xB801, 0x907F, 0x0056, 0x8B88,
718 0x0CA0, 0xB008, 0xAF71, 0xB000, 0x4E71, 0xE200, 0x00F3, 0xAE56, 0x1057, 0x0056, 0x0CA0, 0xB008,
719 0x8056, 0x7980, 0x03A1, 0x0810, 0xBFA0, 0x1059, 0xE304, 0x03A1, 0x8056, 0x7980, 0x03A1, 0x7A80,
720 0x038A, 0xBF01, 0xBE43, 0xBE59, 0x907C, 0x6937, 0xE388, 0x010D, 0xBA01, 0xE308, 0x010C, 0xAE71,
721 0x0004, 0x0C71, 0x5000, 0x6936, 0x9037, 0xBF0A, 0x109E, 0x8B8A, 0xAF80, 0x8014, 0x4C80, 0xBF0A,
722 0x0560, 0xF500, 0xBF0A, 0x0520, 0xB900, 0xBB17, 0x90A0, 0x6917, 0xE388, 0x0148, 0x0D17, 0xE100,
723 0x0127, 0xBF0C, 0x0578, 0xBF0D, 0x057C, 0x7980, 0x012B, 0xBF0C, 0x0538, 0xBF0D, 0x053C, 0x6900,
724 0xE308, 0x0135, 0x8B8C, 0xBE59, 0xBB07, 0x90A0, 0xBC20, 0x7980, 0x0157, 0x030C, 0x8B8B, 0xB903,
725 0x8809, 0xBEC6, 0x013E, 0x69AC, 0x90AB, 0x69AD, 0x90AB, 0x0813, 0x660A, 0xE344, 0x0144, 0x0309,
726 0x830C, 0xBC20, 0x7980, 0x0157, 0x6955, 0xE388, 0x0157, 0x7C38, 0xBF0B, 0x0578, 0xF500, 0xBF0B,
727 0x0538, 0xB907, 0x8809, 0xBEC6, 0x0156, 0x10AB, 0x90AA, 0x6974, 0xE388, 0x0163, 0xAE72, 0x0540,
728 0xF500, 0xAE72, 0x0500, 0xAE61, 0x103B, 0x7A80, 0x02F6, 0x6978, 0xE388, 0x0182, 0x8B8C, 0xBF0C,
729 0x0560, 0xE500, 0x7C40, 0x0814, 0xBA20, 0x8812, 0x733D, 0x7A80, 0x0380, 0x733E, 0x7A80, 0x0380,
730 0x8B8C, 0xBF0C, 0x056C, 0xE500, 0x7C40, 0x0814, 0xBA2C, 0x8812, 0x733F, 0x7A80, 0x0380, 0x7340,
731 0x7A80, 0x0380, 0x6975, 0xE388, 0x018E, 0xAE72, 0x0548, 0xF500, 0xAE72, 0x0508, 0xAE61, 0x1041,
732 0x7A80, 0x02F6, 0x6979, 0xE388, 0x01AD, 0x8B8C, 0xBF0C, 0x0560, 0xE500, 0x7C40, 0x0814, 0xBA18,
733 0x8812, 0x7343, 0x7A80, 0x0380, 0x7344, 0x7A80, 0x0380, 0x8B8C, 0xBF0C, 0x056C, 0xE500, 0x7C40,
734 0x0814, 0xBA24, 0x8812, 0x7345, 0x7A80, 0x0380, 0x7346, 0x7A80, 0x0380, 0x6976, 0xE388, 0x01B9,
735 0xAE72, 0x0558, 0xF500, 0xAE72, 0x0518, 0xAE61, 0x1047, 0x7A80, 0x02F6, 0x697A, 0xE388, 0x01D8,
736 0x8B8C, 0xBF0C, 0x0560, 0xE500, 0x7C40, 0x0814, 0xBA08, 0x8812, 0x7349, 0x7A80, 0x0380, 0x734A,
737 0x7A80, 0x0380, 0x8B8C, 0xBF0C, 0x056C, 0xE500, 0x7C40, 0x0814, 0xBA14, 0x8812, 0x734B, 0x7A80,
738 0x0380, 0x734C, 0x7A80, 0x0380, 0xBC21, 0xAE1C, 0x1090, 0x8B8A, 0xBF0A, 0x0560, 0xE500, 0x7C40,
739 0x0812, 0xB804, 0x8813, 0x8B8D, 0xBF0D, 0x056C, 0xE500, 0x7C40, 0x0815, 0xB804, 0x8811, 0x7A80,
740 0x034A, 0x8B8A, 0xBF0A, 0x0560, 0xE500, 0x7C40, 0x731F, 0xB903, 0x8809, 0xBEC6, 0x01F9, 0x548A,
741 0xBE03, 0x98A0, 0x7320, 0xB903, 0x8809, 0xBEC6, 0x0201, 0x548A, 0xBE03, 0x98A0, 0x1F20, 0x2F1F,
742 0x9826, 0xBC20, 0x6935, 0xE388, 0x03A1, 0x6933, 0xB801, 0x9033, 0xBFA0, 0x02EE, 0xE308, 0x03A1,
743 0x9033, 0xBF00, 0x6951, 0xE388, 0x021F, 0x7334, 0xBE80, 0x5760, 0xBE03, 0x9F7E, 0xBE59, 0x9034,
744 0x697E, 0x0D51, 0x9013, 0xBC20, 0x695C, 0xE388, 0x03A1, 0x735E, 0xBE80, 0x5760, 0xBE03, 0x9F7E,
745 0xBE59, 0x905E, 0x697E, 0x0D5C, 0x9013, 0x7980, 0x03A1, 0x7A80, 0x038A, 0xBF01, 0xBE43, 0x6977,
746 0xE388, 0x024E, 0xAE61, 0x104D, 0x0061, 0x8B88, 0x6980, 0xE388, 0x024E, 0x9071, 0x0D71, 0x000B,
747 0xAFA0, 0x8010, 0xAFA0, 0x8010, 0x0810, 0x660A, 0xE308, 0x0249, 0x0009, 0x0810, 0x660C, 0xE388,
748 0x024E, 0x800B, 0xBC20, 0x697B, 0xE388, 0x03A1, 0xBF0A, 0x109E, 0x8B8A, 0xAF80, 0x8014, 0x4C80,
749 0xE100, 0x0266, 0x697C, 0xBF90, 0x0560, 0x9072, 0x0372, 0x697C, 0xBF90, 0x0564, 0x9073, 0x0473,
750 0x7980, 0x0270, 0x697C, 0xBF90, 0x0520, 0x9072, 0x0372, 0x697C, 0xBF90, 0x0524, 0x9073, 0x0473,
751 0x697C, 0xB801, 0x907C, 0xBF0A, 0x10FD, 0x8B8A, 0xAF80, 0x8010, 0x734F, 0x548A, 0xBE03, 0x9880,
752 0xBC21, 0x7326, 0x548B, 0xBE03, 0x618B, 0x988C, 0xBE03, 0x6180, 0x9880, 0x7980, 0x03A1, 0x7A80,
753 0x038A, 0x0D28, 0x4711, 0xE100, 0x02BE, 0xAF12, 0x4006, 0x6912, 0xBFB0, 0x0C00, 0xE388, 0x02B6,
754 0xBFA0, 0x0800, 0xE388, 0x02B2, 0x6912, 0xBFB0, 0x0C00, 0xBFA0, 0x0400, 0xE388, 0x02A3, 0x6909,
755 0x900B, 0x7980, 0x02A5, 0xAF0B, 0x4005, 0x6901, 0x9005, 0x6902, 0x9006, 0x4311, 0xE100, 0x02ED,
756 0x6911, 0xBFC0, 0x2000, 0x9011, 0x7980, 0x02ED, 0x6909, 0x900B, 0x7980, 0x02B8, 0xAF0B, 0x4005,
757 0xAF05, 0x4003, 0xAF06, 0x4004, 0x7980, 0x02ED, 0xAF12, 0x4006, 0x6912, 0xBFB0, 0x0C00, 0xE388,
758 0x02E7, 0xBFA0, 0x0800, 0xE388, 0x02E3, 0x6912, 0xBFB0, 0x0C00, 0xBFA0, 0x0400, 0xE388, 0x02D4,
759 0x690D, 0x9010, 0x7980, 0x02D6, 0xAF10, 0x4005, 0x6901, 0x9005, 0x6902, 0x9006, 0x4311, 0xE100,
760 0x02ED, 0x6911, 0xBFC0, 0x2000, 0x9011, 0x7980, 0x02ED, 0x690D, 0x9010, 0x7980, 0x02E9, 0xAF10,
761 0x4005, 0xAF05, 0x4003, 0xAF06, 0x4004, 0xBC20, 0x6970, 0x9071, 0x7A80, 0x0078, 0x6971, 0x9070,
762 0x7980, 0x03A1, 0xBC20, 0x0361, 0x8B8B, 0x6980, 0xEF88, 0x0272, 0x0372, 0x7804, 0x9071, 0x0D71,
763 0x8B8A, 0x000B, 0xB903, 0x8809, 0xBEC6, 0x0309, 0x69A8, 0x90AB, 0x69A8, 0x90AA, 0x0810, 0x660A,
764 0xE344, 0x030F, 0x0009, 0x0810, 0x660C, 0xE388, 0x0314, 0x800B, 0xBC20, 0x6961, 0xB801, 0x9061,
765 0x7980, 0x02F7, 0x7A80, 0x038A, 0x5D35, 0x0001, 0x6934, 0xB801, 0x9034, 0xBF0A, 0x109E, 0x8B8A,
766 0xAF80, 0x8014, 0x4880, 0xAE72, 0x0550, 0xF500, 0xAE72, 0x0510, 0xAE61, 0x1051, 0x7A80, 0x02F6,
767 0x7980, 0x03A1, 0x7A80, 0x038A, 0x5D35, 0x0002, 0x695E, 0xB801, 0x905E, 0xBF0A, 0x109E, 0x8B8A,
768 0xAF80, 0x8014, 0x4780, 0xAE72, 0x0558, 0xF500, 0xAE72, 0x0518, 0xAE61, 0x105C, 0x7A80, 0x02F6,
769 0x7980, 0x03A1, 0x001C, 0x8B88, 0x6980, 0xEF88, 0x901D, 0x0D1D, 0x100F, 0x6610, 0xE38C, 0x0358,
770 0x690E, 0x6610, 0x620F, 0x660D, 0xBA0F, 0xE301, 0x037A, 0x0410, 0x8B8A, 0xB903, 0x8809, 0xBEC6,
771 0x036C, 0x6A8C, 0x61AA, 0x98AB, 0x6A8C, 0x61AB, 0x98AD, 0x6A8C, 0x61AD, 0x98A9, 0x6A8C, 0x61A9,
772 0x98AA, 0x7C04, 0x8B8B, 0x7C04, 0x8B8D, 0x7C04, 0x8B89, 0x7C04, 0x0814, 0x660E, 0xE308, 0x0379,
773 0x040D, 0x8410, 0xBC21, 0x691C, 0xB801, 0x901C, 0x7980, 0x034A, 0xB903, 0x8809, 0x8B8A, 0xBEC6,
774 0x0388, 0x54AC, 0xBE03, 0x618C, 0x98AA, 0xEF00, 0xBC20, 0xBE46, 0x0809, 0x906B, 0x080A, 0x906C,
775 0x080B, 0x906D, 0x081A, 0x9062, 0x081B, 0x9063, 0x081E, 0x9064, 0xBE59, 0x881E, 0x8065, 0x8166,
776 0x8267, 0x8368, 0x8469, 0x856A, 0xEF00, 0xBC20, 0x696B, 0x8809, 0x696C, 0x880A, 0x696D, 0x880B,
777 0x6962, 0x881A, 0x6963, 0x881B, 0x6964, 0x881E, 0x0065, 0x0166, 0x0267, 0x0368, 0x0469, 0x056A,
778 0xBE3A,
779};
780
781/*
782 * Mini sample rate converter code image
783 * that is to be loaded at 0x400 on the DSP.
784 */
785static u16 assp_minisrc_image[] = {
786
787 0xBF80, 0x101E, 0x906E, 0x006E, 0x8B88, 0x6980, 0xEF88, 0x906F, 0x0D6F, 0x6900, 0xEB08, 0x0412,
788 0xBC20, 0x696E, 0xB801, 0x906E, 0x7980, 0x0403, 0xB90E, 0x8807, 0xBE43, 0xBF01, 0xBE47, 0xBE41,
789 0x7A80, 0x002A, 0xBE40, 0x3029, 0xEFCC, 0xBE41, 0x7A80, 0x0028, 0xBE40, 0x3028, 0xEFCC, 0x6907,
790 0xE308, 0x042A, 0x6909, 0x902C, 0x7980, 0x042C, 0x690D, 0x902C, 0x1009, 0x881A, 0x100A, 0xBA01,
791 0x881B, 0x100D, 0x881C, 0x100E, 0xBA01, 0x881D, 0xBF80, 0x00ED, 0x881E, 0x050C, 0x0124, 0xB904,
792 0x9027, 0x6918, 0xE308, 0x04B3, 0x902D, 0x6913, 0xBFA0, 0x7598, 0xF704, 0xAE2D, 0x00FF, 0x8B8D,
793 0x6919, 0xE308, 0x0463, 0x691A, 0xE308, 0x0456, 0xB907, 0x8809, 0xBEC6, 0x0453, 0x10A9, 0x90AD,
794 0x7980, 0x047C, 0xB903, 0x8809, 0xBEC6, 0x0460, 0x1889, 0x6C22, 0x90AD, 0x10A9, 0x6E23, 0x6C22,
795 0x90AD, 0x7980, 0x047C, 0x101A, 0xE308, 0x046F, 0xB903, 0x8809, 0xBEC6, 0x046C, 0x10A9, 0x90A0,
796 0x90AD, 0x7980, 0x047C, 0xB901, 0x8809, 0xBEC6, 0x047B, 0x1889, 0x6C22, 0x90A0, 0x90AD, 0x10A9,
797 0x6E23, 0x6C22, 0x90A0, 0x90AD, 0x692D, 0xE308, 0x049C, 0x0124, 0xB703, 0xB902, 0x8818, 0x8B89,
798 0x022C, 0x108A, 0x7C04, 0x90A0, 0x692B, 0x881F, 0x7E80, 0x055B, 0x692A, 0x8809, 0x8B89, 0x99A0,
799 0x108A, 0x90A0, 0x692B, 0x881F, 0x7E80, 0x055B, 0x692A, 0x8809, 0x8B89, 0x99AF, 0x7B99, 0x0484,
800 0x0124, 0x060F, 0x101B, 0x2013, 0x901B, 0xBFA0, 0x7FFF, 0xE344, 0x04AC, 0x901B, 0x8B89, 0x7A80,
801 0x051A, 0x6927, 0xBA01, 0x9027, 0x7A80, 0x0523, 0x6927, 0xE308, 0x049E, 0x7980, 0x050F, 0x0624,
802 0x1026, 0x2013, 0x9026, 0xBFA0, 0x7FFF, 0xE304, 0x04C0, 0x8B8D, 0x7A80, 0x051A, 0x7980, 0x04B4,
803 0x9026, 0x1013, 0x3026, 0x901B, 0x8B8D, 0x7A80, 0x051A, 0x7A80, 0x0523, 0x1027, 0xBA01, 0x9027,
804 0xE308, 0x04B4, 0x0124, 0x060F, 0x8B89, 0x691A, 0xE308, 0x04EA, 0x6919, 0xE388, 0x04E0, 0xB903,
805 0x8809, 0xBEC6, 0x04DD, 0x1FA0, 0x2FAE, 0x98A9, 0x7980, 0x050F, 0xB901, 0x8818, 0xB907, 0x8809,
806 0xBEC6, 0x04E7, 0x10EE, 0x90A9, 0x7980, 0x050F, 0x6919, 0xE308, 0x04FE, 0xB903, 0x8809, 0xBE46,
807 0xBEC6, 0x04FA, 0x17A0, 0xBE1E, 0x1FAE, 0xBFBF, 0xFF00, 0xBE13, 0xBFDF, 0x8080, 0x99A9, 0xBE47,
808 0x7980, 0x050F, 0xB901, 0x8809, 0xBEC6, 0x050E, 0x16A0, 0x26A0, 0xBFB7, 0xFF00, 0xBE1E, 0x1EA0,
809 0x2EAE, 0xBFBF, 0xFF00, 0xBE13, 0xBFDF, 0x8080, 0x99A9, 0x850C, 0x860F, 0x6907, 0xE388, 0x0516,
810 0x0D07, 0x8510, 0xBE59, 0x881E, 0xBE4A, 0xEF00, 0x101E, 0x901C, 0x101F, 0x901D, 0x10A0, 0x901E,
811 0x10A0, 0x901F, 0xEF00, 0x101E, 0x301C, 0x9020, 0x731B, 0x5420, 0xBE03, 0x9825, 0x1025, 0x201C,
812 0x9025, 0x7325, 0x5414, 0xBE03, 0x8B8E, 0x9880, 0x692F, 0xE388, 0x0539, 0xBE59, 0xBB07, 0x6180,
813 0x9880, 0x8BA0, 0x101F, 0x301D, 0x9021, 0x731B, 0x5421, 0xBE03, 0x982E, 0x102E, 0x201D, 0x902E,
814 0x732E, 0x5415, 0xBE03, 0x9880, 0x692F, 0xE388, 0x054F, 0xBE59, 0xBB07, 0x6180, 0x9880, 0x8BA0,
815 0x6918, 0xEF08, 0x7325, 0x5416, 0xBE03, 0x98A0, 0x732E, 0x5417, 0xBE03, 0x98A0, 0xEF00, 0x8BA0,
816 0xBEC6, 0x056B, 0xBE59, 0xBB04, 0xAA90, 0xBE04, 0xBE1E, 0x99E0, 0x8BE0, 0x69A0, 0x90D0, 0x69A0,
817 0x90D0, 0x081F, 0xB805, 0x881F, 0x8B90, 0x69A0, 0x90D0, 0x69A0, 0x9090, 0x8BD0, 0x8BD8, 0xBE1F,
818 0xEF00, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
819 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
820};
821
diff --git a/sound/oss/maui.c b/sound/oss/maui.c
deleted file mode 100644
index 9130fcf96552..000000000000
--- a/sound/oss/maui.c
+++ /dev/null
@@ -1,477 +0,0 @@
1/*
2 * sound/oss/maui.c
3 *
4 * The low level driver for Turtle Beach Maui and Tropez.
5 *
6 *
7 * Copyright (C) by Hannu Savolainen 1993-1997
8 *
9 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
10 * Version 2 (June 1991). See the "COPYING" file distributed with this software
11 * for more info.
12 *
13 * Changes:
14 * Alan Cox General clean up, use kernel IRQ
15 * system
16 * Christoph Hellwig Adapted to module_init/module_exit
17 * Bartlomiej Zolnierkiewicz
18 * Added __init to download_code()
19 *
20 * Status:
21 * Andrew J. Kroll Tested 06/01/1999 with:
22 * * OSWF.MOT File Version: 1.15
23 * * OSWF.MOT File Dated: 09/12/94
24 * * Older versions will cause problems.
25 */
26
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/init.h>
30
31#define USE_SEQ_MACROS
32#define USE_SIMPLE_MACROS
33
34#include "sound_config.h"
35#include "sound_firmware.h"
36
37#include "mpu401.h"
38
39static int maui_base = 0x330;
40
41static volatile int irq_ok;
42static int *maui_osp;
43
44#define HOST_DATA_PORT (maui_base + 2)
45#define HOST_STAT_PORT (maui_base + 3)
46#define HOST_CTRL_PORT (maui_base + 3)
47
48#define STAT_TX_INTR 0x40
49#define STAT_TX_AVAIL 0x20
50#define STAT_TX_IENA 0x10
51#define STAT_RX_INTR 0x04
52#define STAT_RX_AVAIL 0x02
53#define STAT_RX_IENA 0x01
54
55static int (*orig_load_patch)(int dev, int format, const char __user *addr,
56 int offs, int count, int pmgr_flag) = NULL;
57
58#include "maui_boot.h"
59
60static int maui_wait(int mask)
61{
62 int i;
63
64 /*
65 * Perform a short initial wait without sleeping
66 */
67
68 for (i = 0; i < 100; i++)
69 if (inb(HOST_STAT_PORT) & mask)
70 return 1;
71
72 /*
73 * Wait up to 15 seconds with sleeping
74 */
75
76 for (i = 0; i < 150; i++) {
77 if (inb(HOST_STAT_PORT) & mask)
78 return 1;
79 current->state = TASK_INTERRUPTIBLE;
80 schedule_timeout(HZ / 10);
81 if (signal_pending(current))
82 return 0;
83 }
84 return 0;
85}
86
87static int maui_read(void)
88{
89 if (maui_wait(STAT_RX_AVAIL))
90 return inb(HOST_DATA_PORT);
91 return -1;
92}
93
94static int maui_write(unsigned char data)
95{
96 if (maui_wait(STAT_TX_AVAIL)) {
97 outb((data), HOST_DATA_PORT);
98 return 1;
99 }
100 printk(KERN_WARNING "Maui: Write timeout\n");
101 return 0;
102}
103
104static irqreturn_t mauiintr(int irq, void *dev_id, struct pt_regs *dummy)
105{
106 irq_ok = 1;
107 return IRQ_HANDLED;
108}
109
110static int __init download_code(void)
111{
112 int i, lines = 0;
113 int eol_seen = 0, done = 0;
114 int skip = 1;
115
116 printk(KERN_INFO "Code download (%d bytes): ", maui_osLen);
117
118 for (i = 0; i < maui_osLen; i++) {
119 if (maui_os[i] != '\r') {
120 if (!skip || (maui_os[i] == 'S' && (i == 0 || maui_os[i - 1] == '\n'))) {
121 skip = 0;
122
123 if (maui_os[i] == '\n')
124 eol_seen = skip = 1;
125 else if (maui_os[i] == 'S') {
126 if (maui_os[i + 1] == '8')
127 done = 1;
128 if (!maui_write(0xF1))
129 goto failure;
130 if (!maui_write('S'))
131 goto failure;
132 } else {
133 if (!maui_write(maui_os[i]))
134 goto failure;
135 }
136
137 if (eol_seen) {
138 int c = 0;
139 int n;
140
141 eol_seen = 0;
142
143 for (n = 0; n < 2; n++) {
144 if (maui_wait(STAT_RX_AVAIL)) {
145 c = inb(HOST_DATA_PORT);
146 break;
147 }
148 }
149 if (c != 0x80) {
150 printk("Download not acknowledged\n");
151 return 0;
152 }
153 else if (!(lines++ % 10))
154 printk(".");
155
156 if (done) {
157 printk("\n");
158 printk(KERN_INFO "Download complete\n");
159 return 1;
160 }
161 }
162 }
163 }
164 }
165
166failure:
167 printk("\n");
168 printk(KERN_ERR "Download failed!!!\n");
169 return 0;
170}
171
172static int __init maui_init(int irq)
173{
174 unsigned char bits;
175
176 switch (irq) {
177 case 9:
178 bits = 0x00;
179 break;
180 case 5:
181 bits = 0x08;
182 break;
183 case 12:
184 bits = 0x10;
185 break;
186 case 15:
187 bits = 0x18;
188 break;
189
190 default:
191 printk(KERN_ERR "Maui: Invalid IRQ %d\n", irq);
192 return 0;
193 }
194 outb((0x00), HOST_CTRL_PORT); /* Reset */
195 outb((bits), HOST_DATA_PORT); /* Set the IRQ bits */
196 outb((bits | 0x80), HOST_DATA_PORT); /* Set the IRQ bits again? */
197 outb((0x80), HOST_CTRL_PORT); /* Leave reset */
198 outb((0x80), HOST_CTRL_PORT); /* Leave reset */
199 outb((0xD0), HOST_CTRL_PORT); /* Cause interrupt */
200
201#ifdef CONFIG_SMP
202 {
203 int i;
204 for (i = 0; i < 1000000 && !irq_ok; i++)
205 ;
206 if (!irq_ok)
207 return 0;
208 }
209#endif
210 outb((0x80), HOST_CTRL_PORT); /* Leave reset */
211
212 printk(KERN_INFO "Turtle Beach Maui initialization\n");
213
214 if (!download_code())
215 return 0;
216
217 outb((0xE0), HOST_CTRL_PORT); /* Normal operation */
218
219 /* Select mpu401 mode */
220
221 maui_write(0xf0);
222 maui_write(1);
223 if (maui_read() != 0x80) {
224 maui_write(0xf0);
225 maui_write(1);
226 if (maui_read() != 0x80)
227 printk(KERN_ERR "Maui didn't acknowledge set HW mode command\n");
228 }
229 printk(KERN_INFO "Maui initialized OK\n");
230 return 1;
231}
232
233static int maui_short_wait(int mask) {
234 int i;
235
236 for (i = 0; i < 1000; i++) {
237 if (inb(HOST_STAT_PORT) & mask) {
238 return 1;
239 }
240 }
241 return 0;
242}
243
244static int maui_load_patch(int dev, int format, const char __user *addr,
245 int offs, int count, int pmgr_flag)
246{
247
248 struct sysex_info header;
249 unsigned long left, src_offs;
250 int hdr_size = (unsigned long) &header.data[0] - (unsigned long) &header;
251 int i;
252
253 if (format == SYSEX_PATCH) /* Handled by midi_synth.c */
254 return orig_load_patch(dev, format, addr, offs, count, pmgr_flag);
255
256 if (format != MAUI_PATCH)
257 {
258 printk(KERN_WARNING "Maui: Unknown patch format\n");
259 }
260 if (count < hdr_size) {
261/* printk("Maui error: Patch header too short\n");*/
262 return -EINVAL;
263 }
264 count -= hdr_size;
265
266 /*
267 * Copy the header from user space but ignore the first bytes which have
268 * been transferred already.
269 */
270
271 if(copy_from_user(&((char *) &header)[offs], &(addr)[offs], hdr_size - offs))
272 return -EFAULT;
273
274 if (count < header.len) {
275 printk(KERN_ERR "Maui warning: Host command record too short (%d<%d)\n", count, (int) header.len);
276 header.len = count;
277 }
278 left = header.len;
279 src_offs = 0;
280
281 for (i = 0; i < left; i++) {
282 unsigned char data;
283
284 if(get_user(*(unsigned char *) &data, (unsigned char __user *) &((addr)[hdr_size + i])))
285 return -EFAULT;
286 if (i == 0 && !(data & 0x80))
287 return -EINVAL;
288
289 if (maui_write(data) == -1)
290 return -EIO;
291 }
292
293 if ((i = maui_read()) != 0x80) {
294 if (i != -1)
295 printk("Maui: Error status %02x\n", i);
296 return -EIO;
297 }
298 return 0;
299}
300
301static int __init probe_maui(struct address_info *hw_config)
302{
303 struct resource *ports;
304 int this_dev;
305 int i;
306 int tmp1, tmp2, ret;
307
308 ports = request_region(hw_config->io_base, 2, "mpu401");
309 if (!ports)
310 return 0;
311
312 if (!request_region(hw_config->io_base + 2, 6, "Maui"))
313 goto out;
314
315 maui_base = hw_config->io_base;
316 maui_osp = hw_config->osp;
317
318 if (request_irq(hw_config->irq, mauiintr, 0, "Maui", NULL) < 0)
319 goto out2;
320
321 /*
322 * Initialize the processor if necessary
323 */
324
325 if (maui_osLen > 0) {
326 if (!(inb(HOST_STAT_PORT) & STAT_TX_AVAIL) ||
327 !maui_write(0x9F) || /* Report firmware version */
328 !maui_short_wait(STAT_RX_AVAIL) ||
329 maui_read() == -1 || maui_read() == -1)
330 if (!maui_init(hw_config->irq))
331 goto out3;
332 }
333 if (!maui_write(0xCF)) /* Report hardware version */ {
334 printk(KERN_ERR "No WaveFront firmware detected (card uninitialized?)\n");
335 goto out3;
336 }
337 if ((tmp1 = maui_read()) == -1 || (tmp2 = maui_read()) == -1) {
338 printk(KERN_ERR "No WaveFront firmware detected (card uninitialized?)\n");
339 goto out3;
340 }
341 if (tmp1 == 0xff || tmp2 == 0xff)
342 goto out3;
343 printk(KERN_DEBUG "WaveFront hardware version %d.%d\n", tmp1, tmp2);
344
345 if (!maui_write(0x9F)) /* Report firmware version */
346 goto out3;
347 if ((tmp1 = maui_read()) == -1 || (tmp2 = maui_read()) == -1)
348 goto out3;
349
350 printk(KERN_DEBUG "WaveFront firmware version %d.%d\n", tmp1, tmp2);
351
352 if (!maui_write(0x85)) /* Report free DRAM */
353 goto out3;
354 tmp1 = 0;
355 for (i = 0; i < 4; i++) {
356 tmp1 |= maui_read() << (7 * i);
357 }
358 printk(KERN_DEBUG "Available DRAM %dk\n", tmp1 / 1024);
359
360 for (i = 0; i < 1000; i++)
361 if (probe_mpu401(hw_config, ports))
362 break;
363
364 ret = probe_mpu401(hw_config, ports);
365 if (!ret)
366 goto out3;
367
368 conf_printf("Maui", hw_config);
369
370 hw_config->irq *= -1;
371 hw_config->name = "Maui";
372 attach_mpu401(hw_config, THIS_MODULE);
373
374 if (hw_config->slots[1] != -1) /* The MPU401 driver installed itself */ {
375 struct synth_operations *synth;
376
377 this_dev = hw_config->slots[1];
378
379 /*
380 * Intercept patch loading calls so that they can be handled
381 * by the Maui driver.
382 */
383
384 synth = midi_devs[this_dev]->converter;
385 if (synth != NULL) {
386 synth->id = "MAUI";
387 orig_load_patch = synth->load_patch;
388 synth->load_patch = &maui_load_patch;
389 } else
390 printk(KERN_ERR "Maui: Can't install patch loader\n");
391 }
392 return 1;
393
394out3:
395 free_irq(hw_config->irq, NULL);
396out2:
397 release_region(hw_config->io_base + 2, 6);
398out:
399 release_region(hw_config->io_base, 2);
400 return 0;
401}
402
403static void __exit unload_maui(struct address_info *hw_config)
404{
405 int irq = hw_config->irq;
406 release_region(hw_config->io_base + 2, 6);
407 unload_mpu401(hw_config);
408
409 if (irq < 0)
410 irq = -irq;
411 if (irq > 0)
412 free_irq(irq, NULL);
413}
414
415static int fw_load;
416
417static struct address_info cfg;
418
419static int __initdata io = -1;
420static int __initdata irq = -1;
421
422module_param(io, int, 0);
423module_param(irq, int, 0);
424
425/*
426 * Install a Maui card. Needs mpu401 loaded already.
427 */
428
429static int __init init_maui(void)
430{
431 printk(KERN_INFO "Turtle beach Maui and Tropez driver, Copyright (C) by Hannu Savolainen 1993-1996\n");
432
433 cfg.io_base = io;
434 cfg.irq = irq;
435
436 if (cfg.io_base == -1 || cfg.irq == -1) {
437 printk(KERN_INFO "maui: irq and io must be set.\n");
438 return -EINVAL;
439 }
440
441 if (maui_os == NULL) {
442 fw_load = 1;
443 maui_osLen = mod_firmware_load("/etc/sound/oswf.mot", (char **) &maui_os);
444 }
445 if (probe_maui(&cfg) == 0)
446 return -ENODEV;
447
448 return 0;
449}
450
451static void __exit cleanup_maui(void)
452{
453 if (fw_load && maui_os)
454 vfree(maui_os);
455 unload_maui(&cfg);
456}
457
458module_init(init_maui);
459module_exit(cleanup_maui);
460
461#ifndef MODULE
462static int __init setup_maui(char *str)
463{
464 /* io, irq */
465 int ints[3];
466
467 str = get_options(str, ARRAY_SIZE(ints), ints);
468
469 io = ints[1];
470 irq = ints[2];
471
472 return 1;
473}
474
475__setup("maui=", setup_maui);
476#endif
477MODULE_LICENSE("GPL");
diff --git a/sound/oss/midi_syms.c b/sound/oss/midi_syms.c
deleted file mode 100644
index 5b146ddf5725..000000000000
--- a/sound/oss/midi_syms.c
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Exported symbols for midi driver.
3 */
4
5#include <linux/module.h>
6
7char midi_syms_symbol;
8
9#include "sound_config.h"
10#define _MIDI_SYNTH_C_
11#include "midi_synth.h"
12
13EXPORT_SYMBOL(do_midi_msg);
14EXPORT_SYMBOL(midi_synth_open);
15EXPORT_SYMBOL(midi_synth_close);
16EXPORT_SYMBOL(midi_synth_ioctl);
17EXPORT_SYMBOL(midi_synth_kill_note);
18EXPORT_SYMBOL(midi_synth_start_note);
19EXPORT_SYMBOL(midi_synth_set_instr);
20EXPORT_SYMBOL(midi_synth_reset);
21EXPORT_SYMBOL(midi_synth_hw_control);
22EXPORT_SYMBOL(midi_synth_aftertouch);
23EXPORT_SYMBOL(midi_synth_controller);
24EXPORT_SYMBOL(midi_synth_panning);
25EXPORT_SYMBOL(midi_synth_setup_voice);
26EXPORT_SYMBOL(midi_synth_send_sysex);
27EXPORT_SYMBOL(midi_synth_bender);
28EXPORT_SYMBOL(midi_synth_load_patch);
29EXPORT_SYMBOL(MIDIbuf_avail);
diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c
index d2ab5c08b616..9e450988ed36 100644
--- a/sound/oss/midi_synth.c
+++ b/sound/oss/midi_synth.c
@@ -84,6 +84,7 @@ do_midi_msg(int synthno, unsigned char *msg, int mlen)
84 ; 84 ;
85 } 85 }
86} 86}
87EXPORT_SYMBOL(do_midi_msg);
87 88
88static void 89static void
89midi_outc(int midi_dev, int data) 90midi_outc(int midi_dev, int data)
@@ -276,6 +277,7 @@ int midi_synth_ioctl(int dev, unsigned int cmd, void __user *arg)
276 return -EINVAL; 277 return -EINVAL;
277 } 278 }
278} 279}
280EXPORT_SYMBOL(midi_synth_ioctl);
279 281
280int 282int
281midi_synth_kill_note(int dev, int channel, int note, int velocity) 283midi_synth_kill_note(int dev, int channel, int note, int velocity)
@@ -342,6 +344,7 @@ midi_synth_kill_note(int dev, int channel, int note, int velocity)
342 344
343 return 0; 345 return 0;
344} 346}
347EXPORT_SYMBOL(midi_synth_kill_note);
345 348
346int 349int
347midi_synth_set_instr(int dev, int channel, int instr_no) 350midi_synth_set_instr(int dev, int channel, int instr_no)
@@ -364,6 +367,7 @@ midi_synth_set_instr(int dev, int channel, int instr_no)
364 367
365 return 0; 368 return 0;
366} 369}
370EXPORT_SYMBOL(midi_synth_set_instr);
367 371
368int 372int
369midi_synth_start_note(int dev, int channel, int note, int velocity) 373midi_synth_start_note(int dev, int channel, int note, int velocity)
@@ -405,6 +409,7 @@ midi_synth_start_note(int dev, int channel, int note, int velocity)
405 } 409 }
406 return 0; 410 return 0;
407} 411}
412EXPORT_SYMBOL(midi_synth_start_note);
408 413
409void 414void
410midi_synth_reset(int dev) 415midi_synth_reset(int dev)
@@ -412,6 +417,7 @@ midi_synth_reset(int dev)
412 417
413 leave_sysex(dev); 418 leave_sysex(dev);
414} 419}
420EXPORT_SYMBOL(midi_synth_reset);
415 421
416int 422int
417midi_synth_open(int dev, int mode) 423midi_synth_open(int dev, int mode)
@@ -444,6 +450,7 @@ midi_synth_open(int dev, int mode)
444 450
445 return 1; 451 return 1;
446} 452}
453EXPORT_SYMBOL(midi_synth_open);
447 454
448void 455void
449midi_synth_close(int dev) 456midi_synth_close(int dev)
@@ -459,11 +466,13 @@ midi_synth_close(int dev)
459 466
460 midi_devs[orig_dev]->close(orig_dev); 467 midi_devs[orig_dev]->close(orig_dev);
461} 468}
469EXPORT_SYMBOL(midi_synth_close);
462 470
463void 471void
464midi_synth_hw_control(int dev, unsigned char *event) 472midi_synth_hw_control(int dev, unsigned char *event)
465{ 473{
466} 474}
475EXPORT_SYMBOL(midi_synth_hw_control);
467 476
468int 477int
469midi_synth_load_patch(int dev, int format, const char __user *addr, 478midi_synth_load_patch(int dev, int format, const char __user *addr,
@@ -542,11 +551,13 @@ midi_synth_load_patch(int dev, int format, const char __user *addr,
542 midi_outc(orig_dev, 0xf7); 551 midi_outc(orig_dev, 0xf7);
543 return 0; 552 return 0;
544} 553}
545 554EXPORT_SYMBOL(midi_synth_load_patch);
555
546void midi_synth_panning(int dev, int channel, int pressure) 556void midi_synth_panning(int dev, int channel, int pressure)
547{ 557{
548} 558}
549 559EXPORT_SYMBOL(midi_synth_panning);
560
550void midi_synth_aftertouch(int dev, int channel, int pressure) 561void midi_synth_aftertouch(int dev, int channel, int pressure)
551{ 562{
552 int orig_dev = synth_devs[dev]->midi_dev; 563 int orig_dev = synth_devs[dev]->midi_dev;
@@ -576,6 +587,7 @@ void midi_synth_aftertouch(int dev, int channel, int pressure)
576 587
577 midi_outc(orig_dev, pressure); 588 midi_outc(orig_dev, pressure);
578} 589}
590EXPORT_SYMBOL(midi_synth_aftertouch);
579 591
580void 592void
581midi_synth_controller(int dev, int channel, int ctrl_num, int value) 593midi_synth_controller(int dev, int channel, int ctrl_num, int value)
@@ -604,6 +616,7 @@ midi_synth_controller(int dev, int channel, int ctrl_num, int value)
604 midi_outc(orig_dev, ctrl_num); 616 midi_outc(orig_dev, ctrl_num);
605 midi_outc(orig_dev, value & 0x7f); 617 midi_outc(orig_dev, value & 0x7f);
606} 618}
619EXPORT_SYMBOL(midi_synth_controller);
607 620
608void 621void
609midi_synth_bender(int dev, int channel, int value) 622midi_synth_bender(int dev, int channel, int value)
@@ -635,11 +648,13 @@ midi_synth_bender(int dev, int channel, int value)
635 midi_outc(orig_dev, value & 0x7f); 648 midi_outc(orig_dev, value & 0x7f);
636 midi_outc(orig_dev, (value >> 7) & 0x7f); 649 midi_outc(orig_dev, (value >> 7) & 0x7f);
637} 650}
651EXPORT_SYMBOL(midi_synth_bender);
638 652
639void 653void
640midi_synth_setup_voice(int dev, int voice, int channel) 654midi_synth_setup_voice(int dev, int voice, int channel)
641{ 655{
642} 656}
657EXPORT_SYMBOL(midi_synth_setup_voice);
643 658
644int 659int
645midi_synth_send_sysex(int dev, unsigned char *bytes, int len) 660midi_synth_send_sysex(int dev, unsigned char *bytes, int len)
@@ -695,3 +710,5 @@ midi_synth_send_sysex(int dev, unsigned char *bytes, int len)
695 710
696 return 0; 711 return 0;
697} 712}
713EXPORT_SYMBOL(midi_synth_send_sysex);
714
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c
index c0e4bbc22c80..a40be0cf1d97 100644
--- a/sound/oss/midibuf.c
+++ b/sound/oss/midibuf.c
@@ -414,18 +414,11 @@ unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait)
414} 414}
415 415
416 416
417void MIDIbuf_init(void)
418{
419 /* drag in midi_syms.o */
420 {
421 extern char midi_syms_symbol;
422 midi_syms_symbol = 0;
423 }
424}
425
426int MIDIbuf_avail(int dev) 417int MIDIbuf_avail(int dev)
427{ 418{
428 if (midi_in_buf[dev]) 419 if (midi_in_buf[dev])
429 return DATA_AVAIL (midi_in_buf[dev]); 420 return DATA_AVAIL (midi_in_buf[dev]);
430 return 0; 421 return 0;
431} 422}
423EXPORT_SYMBOL(MIDIbuf_avail);
424
diff --git a/sound/oss/mpu401.c b/sound/oss/mpu401.c
index 321f4c4b5a7b..162d07cc489f 100644
--- a/sound/oss/mpu401.c
+++ b/sound/oss/mpu401.c
@@ -432,16 +432,7 @@ static void mpu401_input_loop(struct mpu_config *devc)
432 devc->m_busy = 0; 432 devc->m_busy = 0;
433} 433}
434 434
435int intchk_mpu401(void *dev_id) 435static irqreturn_t mpuintr(int irq, void *dev_id, struct pt_regs *dummy)
436{
437 struct mpu_config *devc;
438 int dev = (int) dev_id;
439
440 devc = &dev_conf[dev];
441 return input_avail(devc);
442}
443
444irqreturn_t mpuintr(int irq, void *dev_id, struct pt_regs *dummy)
445{ 436{
446 struct mpu_config *devc; 437 struct mpu_config *devc;
447 int dev = (int) dev_id; 438 int dev = (int) dev_id;
@@ -1761,8 +1752,6 @@ static int mpu_timer_init(int midi_dev)
1761EXPORT_SYMBOL(probe_mpu401); 1752EXPORT_SYMBOL(probe_mpu401);
1762EXPORT_SYMBOL(attach_mpu401); 1753EXPORT_SYMBOL(attach_mpu401);
1763EXPORT_SYMBOL(unload_mpu401); 1754EXPORT_SYMBOL(unload_mpu401);
1764EXPORT_SYMBOL(intchk_mpu401);
1765EXPORT_SYMBOL(mpuintr);
1766 1755
1767static struct address_info cfg; 1756static struct address_info cfg;
1768 1757
diff --git a/sound/oss/mpu401.h b/sound/oss/mpu401.h
index bdc5bde641e6..84c0e9522ef7 100644
--- a/sound/oss/mpu401.h
+++ b/sound/oss/mpu401.h
@@ -10,5 +10,3 @@ int probe_mpu401(struct address_info *hw_config, struct resource *ports);
10int attach_mpu401(struct address_info * hw_config, struct module *owner); 10int attach_mpu401(struct address_info * hw_config, struct module *owner);
11void unload_mpu401(struct address_info *hw_info); 11void unload_mpu401(struct address_info *hw_info);
12 12
13int intchk_mpu401(void *dev_id);
14irqreturn_t mpuintr(int irq, void *dev_id, struct pt_regs * dummy);
diff --git a/sound/oss/opl3sa.c b/sound/oss/opl3sa.c
deleted file mode 100644
index 2535ed0b5fbf..000000000000
--- a/sound/oss/opl3sa.c
+++ /dev/null
@@ -1,329 +0,0 @@
1/*
2 * sound/oss/opl3sa.c
3 *
4 * Low level driver for Yamaha YMF701B aka OPL3-SA chip
5 *
6 *
7 *
8 * Copyright (C) by Hannu Savolainen 1993-1997
9 *
10 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
11 * Version 2 (June 1991). See the "COPYING" file distributed with this software
12 * for more info.
13 *
14 * Changes:
15 * Alan Cox Modularisation
16 * Christoph Hellwig Adapted to module_init/module_exit
17 * Arnaldo C. de Melo got rid of attach_uart401
18 *
19 * FIXME:
20 * Check for install of mpu etc is wrong, should check result of the mss stuff
21 */
22
23#include <linux/init.h>
24#include <linux/module.h>
25#include <linux/spinlock.h>
26
27#undef SB_OK
28
29#include "sound_config.h"
30
31#include "ad1848.h"
32#include "mpu401.h"
33
34#ifdef SB_OK
35#include "sb.h"
36static int sb_initialized;
37#endif
38
39static DEFINE_SPINLOCK(lock);
40
41static unsigned char opl3sa_read(int addr)
42{
43 unsigned long flags;
44 unsigned char tmp;
45
46 spin_lock_irqsave(&lock,flags);
47 outb((0x1d), 0xf86); /* password */
48 outb(((unsigned char) addr), 0xf86); /* address */
49 tmp = inb(0xf87); /* data */
50 spin_unlock_irqrestore(&lock,flags);
51
52 return tmp;
53}
54
55static void opl3sa_write(int addr, int data)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&lock,flags);
60 outb((0x1d), 0xf86); /* password */
61 outb(((unsigned char) addr), 0xf86); /* address */
62 outb(((unsigned char) data), 0xf87); /* data */
63 spin_unlock_irqrestore(&lock,flags);
64}
65
66static int __init opl3sa_detect(void)
67{
68 int tmp;
69
70 if (((tmp = opl3sa_read(0x01)) & 0xc4) != 0x04)
71 {
72 DDB(printk("OPL3-SA detect error 1 (%x)\n", opl3sa_read(0x01)));
73 /* return 0; */
74 }
75
76 /*
77 * Check that the password feature has any effect
78 */
79
80 if (inb(0xf87) == tmp)
81 {
82 DDB(printk("OPL3-SA detect failed 2 (%x/%x)\n", tmp, inb(0xf87)));
83 return 0;
84 }
85 tmp = (opl3sa_read(0x04) & 0xe0) >> 5;
86
87 if (tmp != 0 && tmp != 1)
88 {
89 DDB(printk("OPL3-SA detect failed 3 (%d)\n", tmp));
90 return 0;
91 }
92 DDB(printk("OPL3-SA mode %x detected\n", tmp));
93
94 opl3sa_write(0x01, 0x00); /* Disable MSS */
95 opl3sa_write(0x02, 0x00); /* Disable SB */
96 opl3sa_write(0x03, 0x00); /* Disable MPU */
97
98 return 1;
99}
100
101/*
102 * Probe and attach routines for the Windows Sound System mode of
103 * OPL3-SA
104 */
105
106static int __init probe_opl3sa_wss(struct address_info *hw_config, struct resource *ports)
107{
108 unsigned char tmp = 0x24; /* WSS enable */
109
110 /*
111 * Check if the IO port returns valid signature. The original MS Sound
112 * system returns 0x04 while some cards (OPL3-SA for example)
113 * return 0x00.
114 */
115
116 if (!opl3sa_detect())
117 {
118 printk(KERN_ERR "OSS: OPL3-SA chip not found\n");
119 return 0;
120 }
121
122 switch (hw_config->io_base)
123 {
124 case 0x530:
125 tmp |= 0x00;
126 break;
127 case 0xe80:
128 tmp |= 0x08;
129 break;
130 case 0xf40:
131 tmp |= 0x10;
132 break;
133 case 0x604:
134 tmp |= 0x18;
135 break;
136 default:
137 printk(KERN_ERR "OSS: Unsupported OPL3-SA/WSS base %x\n", hw_config->io_base);
138 return 0;
139 }
140
141 opl3sa_write(0x01, tmp); /* WSS setup register */
142
143 return probe_ms_sound(hw_config, ports);
144}
145
146static void __init attach_opl3sa_wss(struct address_info *hw_config, struct resource *ports)
147{
148 int nm = num_mixers;
149
150 /* FIXME */
151 attach_ms_sound(hw_config, ports, THIS_MODULE);
152 if (num_mixers > nm) /* A mixer was installed */
153 {
154 AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_CD);
155 AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_SYNTH);
156 AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_LINE);
157 }
158}
159
160
161static int __init probe_opl3sa_mpu(struct address_info *hw_config)
162{
163 unsigned char conf;
164 static signed char irq_bits[] = {
165 -1, -1, -1, -1, -1, 1, -1, 2, -1, 3, 4
166 };
167
168 if (hw_config->irq > 10)
169 {
170 printk(KERN_ERR "OPL3-SA: Bad MPU IRQ %d\n", hw_config->irq);
171 return 0;
172 }
173 if (irq_bits[hw_config->irq] == -1)
174 {
175 printk(KERN_ERR "OPL3-SA: Bad MPU IRQ %d\n", hw_config->irq);
176 return 0;
177 }
178 switch (hw_config->io_base)
179 {
180 case 0x330:
181 conf = 0x00;
182 break;
183 case 0x332:
184 conf = 0x20;
185 break;
186 case 0x334:
187 conf = 0x40;
188 break;
189 case 0x300:
190 conf = 0x60;
191 break;
192 default:
193 return 0; /* Invalid port */
194 }
195
196 conf |= 0x83; /* MPU & OPL3 (synth) & game port enable */
197 conf |= irq_bits[hw_config->irq] << 2;
198
199 opl3sa_write(0x03, conf);
200
201 hw_config->name = "OPL3-SA (MPU401)";
202
203 return probe_uart401(hw_config, THIS_MODULE);
204}
205
206static void __exit unload_opl3sa_wss(struct address_info *hw_config)
207{
208 int dma2 = hw_config->dma2;
209
210 if (dma2 == -1)
211 dma2 = hw_config->dma;
212
213 release_region(0xf86, 2);
214 release_region(hw_config->io_base, 4);
215
216 ad1848_unload(hw_config->io_base + 4,
217 hw_config->irq,
218 hw_config->dma,
219 dma2,
220 0);
221 sound_unload_audiodev(hw_config->slots[0]);
222}
223
224static inline void __exit unload_opl3sa_mpu(struct address_info *hw_config)
225{
226 unload_uart401(hw_config);
227}
228
229#ifdef SB_OK
230static inline void __exit unload_opl3sa_sb(struct address_info *hw_config)
231{
232 sb_dsp_unload(hw_config);
233}
234#endif
235
236static int found_mpu;
237
238static struct address_info cfg;
239static struct address_info cfg_mpu;
240
241static int __initdata io = -1;
242static int __initdata irq = -1;
243static int __initdata dma = -1;
244static int __initdata dma2 = -1;
245static int __initdata mpu_io = -1;
246static int __initdata mpu_irq = -1;
247
248module_param(io, int, 0);
249module_param(irq, int, 0);
250module_param(dma, int, 0);
251module_param(dma2, int, 0);
252module_param(mpu_io, int, 0);
253module_param(mpu_irq, int, 0);
254
255static int __init init_opl3sa(void)
256{
257 struct resource *ports;
258 if (io == -1 || irq == -1 || dma == -1) {
259 printk(KERN_ERR "opl3sa: dma, irq and io must be set.\n");
260 return -EINVAL;
261 }
262
263 cfg.io_base = io;
264 cfg.irq = irq;
265 cfg.dma = dma;
266 cfg.dma2 = dma2;
267
268 cfg_mpu.io_base = mpu_io;
269 cfg_mpu.irq = mpu_irq;
270
271 ports = request_region(io + 4, 4, "ad1848");
272 if (!ports)
273 return -EBUSY;
274
275 if (!request_region(0xf86, 2, "OPL3-SA"))/* Control port is busy */ {
276 release_region(io + 4, 4);
277 return 0;
278 }
279
280 if (!request_region(io, 4, "WSS config")) {
281 release_region(0x86, 2);
282 release_region(io + 4, 4);
283 return 0;
284 }
285
286 if (probe_opl3sa_wss(&cfg, ports) == 0) {
287 release_region(0xf86, 2);
288 release_region(io, 4);
289 release_region(io + 4, 4);
290 return -ENODEV;
291 }
292
293 found_mpu=probe_opl3sa_mpu(&cfg_mpu);
294
295 attach_opl3sa_wss(&cfg, ports);
296 return 0;
297}
298
299static void __exit cleanup_opl3sa(void)
300{
301 if(found_mpu)
302 unload_opl3sa_mpu(&cfg_mpu);
303 unload_opl3sa_wss(&cfg);
304}
305
306module_init(init_opl3sa);
307module_exit(cleanup_opl3sa);
308
309#ifndef MODULE
310static int __init setup_opl3sa(char *str)
311{
312 /* io, irq, dma, dma2, mpu_io, mpu_irq */
313 int ints[7];
314
315 str = get_options(str, ARRAY_SIZE(ints), ints);
316
317 io = ints[1];
318 irq = ints[2];
319 dma = ints[3];
320 dma2 = ints[4];
321 mpu_io = ints[5];
322 mpu_irq = ints[6];
323
324 return 1;
325}
326
327__setup("opl3sa=", setup_opl3sa);
328#endif
329MODULE_LICENSE("GPL");
diff --git a/sound/oss/rme96xx.c b/sound/oss/rme96xx.c
deleted file mode 100644
index f17d25b6f836..000000000000
--- a/sound/oss/rme96xx.c
+++ /dev/null
@@ -1,1857 +0,0 @@
1/* (C) 2000 Guenter Geiger <geiger@debian.org>
2 with copy/pastes from the driver of Winfried Ritsch <ritsch@iem.kug.ac.at>
3 based on es1370.c
4
5
6
7 * 10 Jan 2001: 0.1 initial version
8 * 19 Jan 2001: 0.2 fixed bug in select()
9 * 27 Apr 2001: 0.3 more than one card usable
10 * 11 May 2001: 0.4 fixed for SMP, included into kernel source tree
11 * 17 May 2001: 0.5 draining code didn't work on new cards
12 * 18 May 2001: 0.6 remove synchronize_irq() call
13 * 17 Jul 2001: 0.7 updated xrmectrl to make it work for newer cards
14 * 2 feb 2002: 0.8 fixed pci device handling, see below for patches from Heiko (Thanks!)
15 Marcus Meissner <Marcus.Meissner@caldera.de>
16
17 Modifications - Heiko Purnhagen <purnhage@tnt.uni-hannover.de>
18 HP20020108 fixed handling of "large" read()
19 HP20020116 towards REV 1.5 support, based on ALSA's card-rme9652.c
20 HP20020118 made mixer ioctl and handling of devices>1 more safe
21 HP20020201 fixed handling of "large" read() properly
22 added REV 1.5 S/P-DIF receiver support
23 SNDCTL_DSP_SPEED now returns the actual speed
24 * 10 Aug 2002: added synchronize_irq() again
25
26TODO:
27 - test more than one card --- done
28 - check for pci IOREGION (see es1370) in rme96xx_probe ??
29 - error detection
30 - mmap interface
31 - mixer mmap interface
32 - mixer ioctl
33 - get rid of noise upon first open (why ??)
34 - allow multiple open (at least for read)
35 - allow multiple open for non overlapping regions
36 - recheck the multiple devices part (offsets of different devices, etc)
37 - do decent draining in _release --- done
38 - SMP support
39 - what about using fragstotal>2 for small fragsize? (HP20020118)
40 - add support for AFMT_S32_LE
41*/
42
43#ifndef RMEVERSION
44#define RMEVERSION "0.8"
45#endif
46
47#include <linux/module.h>
48#include <linux/string.h>
49#include <linux/sched.h>
50#include <linux/sound.h>
51#include <linux/soundcard.h>
52#include <linux/pci.h>
53#include <linux/smp_lock.h>
54#include <linux/delay.h>
55#include <linux/slab.h>
56#include <linux/interrupt.h>
57#include <linux/init.h>
58#include <linux/interrupt.h>
59#include <linux/poll.h>
60#include <linux/wait.h>
61#include <linux/mutex.h>
62
63#include <asm/dma.h>
64#include <asm/page.h>
65
66#include "rme96xx.h"
67
68#define NR_DEVICE 2
69
70static int devices = 1;
71module_param(devices, int, 0);
72MODULE_PARM_DESC(devices, "number of dsp devices allocated by the driver");
73
74
75MODULE_AUTHOR("Guenter Geiger, geiger@debian.org");
76MODULE_DESCRIPTION("RME9652/36 \"Hammerfall\" Driver");
77MODULE_LICENSE("GPL");
78
79
80#ifdef DEBUG
81#define DBG(x) printk("RME_DEBUG:");x
82#define COMM(x) printk("RME_COMM: " x "\n");
83#else
84#define DBG(x) while (0) {}
85#define COMM(x)
86#endif
87
88/*--------------------------------------------------------------------------
89 Preporcessor Macros and Definitions
90 --------------------------------------------------------------------------*/
91
92#define RME96xx_MAGIC 0x6473
93
94/* Registers-Space in offsets from base address with 16MByte size */
95
96#define RME96xx_IO_EXTENT 16l*1024l*1024l
97#define RME96xx_CHANNELS_PER_CARD 26
98
99/* Write - Register */
100
101/* 0,4,8,12,16,20,24,28 ... hardware init (erasing fifo-pointer intern) */
102#define RME96xx_num_of_init_regs 8
103
104#define RME96xx_init_buffer (0/4)
105#define RME96xx_play_buffer (32/4) /* pointer to 26x64kBit RAM from mainboard */
106#define RME96xx_rec_buffer (36/4) /* pointer to 26x64kBit RAM from mainboard */
107#define RME96xx_control_register (64/4) /* exact meaning see below */
108#define RME96xx_irq_clear (96/4) /* irq acknowledge */
109#define RME96xx_time_code (100/4) /* if used with alesis adat */
110#define RME96xx_thru_base (128/4) /* 132...228 Thru for 26 channels */
111#define RME96xx_thru_channels RME96xx_CHANNELS_PER_CARD
112
113/* Read Register */
114
115#define RME96xx_status_register 0 /* meaning see below */
116
117
118
119/* Status Register: */
120/* ------------------------------------------------------------------------ */
121#define RME96xx_IRQ 0x0000001 /* IRQ is High if not reset by RMExx_irq_clear */
122#define RME96xx_lock_2 0x0000002 /* ADAT 3-PLL: 1=locked, 0=unlocked */
123#define RME96xx_lock_1 0x0000004 /* ADAT 2-PLL: 1=locked, 0=unlocked */
124#define RME96xx_lock_0 0x0000008 /* ADAT 1-PLL: 1=locked, 0=unlocked */
125
126#define RME96xx_fs48 0x0000010 /* sample rate 0 ...44.1/88.2, 1 ... 48/96 Khz */
127#define RME96xx_wsel_rd 0x0000020 /* if Word-Clock is used and valid then 1 */
128#define RME96xx_buf_pos1 0x0000040 /* Bit 6..15 : Position of buffer-pointer in 64Bytes-blocks */
129#define RME96xx_buf_pos2 0x0000080 /* resolution +/- 1 64Byte/block (since 64Bytes bursts) */
130
131#define RME96xx_buf_pos3 0x0000100 /* 10 bits = 1024 values */
132#define RME96xx_buf_pos4 0x0000200 /* if we mask off the first 6 bits, we can take the status */
133#define RME96xx_buf_pos5 0x0000400 /* register as sample counter in the hardware buffer */
134#define RME96xx_buf_pos6 0x0000800
135
136#define RME96xx_buf_pos7 0x0001000
137#define RME96xx_buf_pos8 0x0002000
138#define RME96xx_buf_pos9 0x0004000
139#define RME96xx_buf_pos10 0x0008000
140
141#define RME96xx_sync_2 0x0010000 /* if ADAT-IN3 synced to system clock */
142#define RME96xx_sync_1 0x0020000 /* if ADAT-IN2 synced to system clock */
143#define RME96xx_sync_0 0x0040000 /* if ADAT-IN1 synced to system clock */
144#define RME96xx_DS_rd 0x0080000 /* 1=Double Speed, 0=Normal Speed */
145
146#define RME96xx_tc_busy 0x0100000 /* 1=time-code copy in progress (960ms) */
147#define RME96xx_tc_out 0x0200000 /* time-code out bit */
148#define RME96xx_F_0 0x0400000 /* 000=64kHz, 100=88.2kHz, 011=96kHz */
149#define RME96xx_F_1 0x0800000 /* 111=32kHz, 110=44.1kHz, 101=48kHz, */
150
151#define RME96xx_F_2 0x1000000 /* 001=Rev 1.5+ external Crystal Chip */
152#define RME96xx_ERF 0x2000000 /* Error-Flag of SDPIF Receiver (1=No Lock)*/
153#define RME96xx_buffer_id 0x4000000 /* toggles by each interrupt on rec/play */
154#define RME96xx_tc_valid 0x8000000 /* 1 = a signal is detected on time-code input */
155#define RME96xx_SPDIF_READ 0x10000000 /* byte available from Rev 1.5+ SPDIF interface */
156
157/* Status Register Fields */
158
159#define RME96xx_lock (RME96xx_lock_0|RME96xx_lock_1|RME96xx_lock_2)
160#define RME96xx_sync (RME96xx_sync_0|RME96xx_sync_1|RME96xx_sync_2)
161#define RME96xx_F (RME96xx_F_0|RME96xx_F_1|RME96xx_F_2)
162#define rme96xx_decode_spdif_rate(x) ((x)>>22)
163
164/* Bit 6..15 : h/w buffer pointer */
165#define RME96xx_buf_pos 0x000FFC0
166/* Bits 31,30,29 are bits 5,4,3 of h/w pointer position on later
167 Rev G EEPROMS and Rev 1.5 cards or later.
168*/
169#define RME96xx_REV15_buf_pos(x) ((((x)&0xE0000000)>>26)|((x)&RME96xx_buf_pos))
170
171
172/* Control-Register: */
173/*--------------------------------------------------------------------------------*/
174
175#define RME96xx_start_bit 0x0001 /* start record/play */
176#define RME96xx_latency0 0x0002 /* Buffer size / latency */
177#define RME96xx_latency1 0x0004 /* buffersize = 512Bytes * 2^n */
178#define RME96xx_latency2 0x0008 /* 0=64samples ... 7=8192samples */
179
180#define RME96xx_Master 0x0010 /* Clock Mode 1=Master, 0=Slave/Auto */
181#define RME96xx_IE 0x0020 /* Interupt Enable */
182#define RME96xx_freq 0x0040 /* samplerate 0=44.1/88.2, 1=48/96 kHz*/
183#define RME96xx_freq1 0x0080 /* samplerate 0=32 kHz, 1=other rates ??? (from ALSA, but may be wrong) */
184#define RME96xx_DS 0x0100 /* double speed 0=44.1/48, 1=88.2/96 Khz */
185#define RME96xx_PRO 0x0200 /* SPDIF-OUT 0=consumer, 1=professional */
186#define RME96xx_EMP 0x0400 /* SPDIF-OUT emphasis 0=off, 1=on */
187#define RME96xx_Dolby 0x0800 /* SPDIF-OUT non-audio bit 1=set, 0=unset */
188
189#define RME96xx_opt_out 0x1000 /* use 1st optical OUT as SPDIF: 1=yes, 0=no */
190#define RME96xx_wsel 0x2000 /* use Wordclock as sync (overwrites master) */
191#define RME96xx_inp_0 0x4000 /* SPDIF-IN 00=optical (ADAT1), */
192#define RME96xx_inp_1 0x8000 /* 01=coaxial (Cinch), 10=internal CDROM */
193
194#define RME96xx_SyncRef0 0x10000 /* preferred sync-source in autosync */
195#define RME96xx_SyncRef1 0x20000 /* 00=ADAT1, 01=ADAT2, 10=ADAT3, 11=SPDIF */
196
197#define RME96xx_SPDIF_RESET (1<<18) /* Rev 1.5+: h/w SPDIF receiver */
198#define RME96xx_SPDIF_SELECT (1<<19)
199#define RME96xx_SPDIF_CLOCK (1<<20)
200#define RME96xx_SPDIF_WRITE (1<<21)
201#define RME96xx_ADAT1_INTERNAL (1<<22) /* Rev 1.5+: if set, internal CD connector carries ADAT */
202
203
204#define RME96xx_ctrl_init (RME96xx_latency0 |\
205 RME96xx_Master |\
206 RME96xx_inp_1)
207
208
209
210/* Control register fields and shortcuts */
211
212#define RME96xx_latency (RME96xx_latency0|RME96xx_latency1|RME96xx_latency2)
213#define RME96xx_inp (RME96xx_inp_0|RME96xx_inp_1)
214#define RME96xx_SyncRef (RME96xx_SyncRef0|RME96xx_SyncRef1)
215#define RME96xx_mixer_allowed (RME96xx_Master|RME96xx_PRO|RME96xx_EMP|RME96xx_Dolby|RME96xx_opt_out|RME96xx_wsel|RME96xx_inp|RME96xx_SyncRef|RME96xx_ADAT1_INTERNAL)
216
217/* latency = 512Bytes * 2^n, where n is made from Bit3 ... Bit1 (??? HP20020201) */
218
219#define RME96xx_SET_LATENCY(x) (((x)&0x7)<<1)
220#define RME96xx_GET_LATENCY(x) (((x)>>1)&0x7)
221#define RME96xx_SET_inp(x) (((x)&0x3)<<14)
222#define RME96xx_GET_inp(x) (((x)>>14)&0x3)
223#define RME96xx_SET_SyncRef(x) (((x)&0x3)<<17)
224#define RME96xx_GET_SyncRef(x) (((x)>>17)&0x3)
225
226
227/* buffer sizes */
228#define RME96xx_BYTES_PER_SAMPLE 4 /* sizeof(u32) */
229#define RME_16K 16*1024
230
231#define RME96xx_DMA_MAX_SAMPLES (RME_16K)
232#define RME96xx_DMA_MAX_SIZE (RME_16K * RME96xx_BYTES_PER_SAMPLE)
233#define RME96xx_DMA_MAX_SIZE_ALL (RME96xx_DMA_MAX_SIZE * RME96xx_CHANNELS_PER_CARD)
234
235#define RME96xx_NUM_OF_FRAGMENTS 2
236#define RME96xx_FRAGMENT_MAX_SIZE (RME96xx_DMA_MAX_SIZE/2)
237#define RME96xx_FRAGMENT_MAX_SAMPLES (RME96xx_DMA_MAX_SAMPLES/2)
238#define RME96xx_MAX_LATENCY 7 /* 16k samples */
239
240
241#define RME96xx_MAX_DEVS 4 /* we provide some OSS stereodevs */
242#define RME96xx_MASK_DEVS 0x3 /* RME96xx_MAX_DEVS-1 */
243
244#define RME_MESS "rme96xx:"
245/*------------------------------------------------------------------------
246 Types, struct and function declarations
247 ------------------------------------------------------------------------*/
248
249
250/* --------------------------------------------------------------------- */
251
252static const char invalid_magic[] = KERN_CRIT RME_MESS" invalid magic value\n";
253
254#define VALIDATE_STATE(s) \
255({ \
256 if (!(s) || (s)->magic != RME96xx_MAGIC) { \
257 printk(invalid_magic); \
258 return -ENXIO; \
259 } \
260})
261
262/* --------------------------------------------------------------------- */
263
264
265static struct file_operations rme96xx_audio_fops;
266static struct file_operations rme96xx_mixer_fops;
267static int numcards;
268
269typedef int32_t raw_sample_t;
270
271typedef struct _rme96xx_info {
272
273 /* hardware settings */
274 int magic;
275 struct pci_dev * pcidev; /* pci_dev structure */
276 unsigned long __iomem *iobase;
277 unsigned int irq;
278
279 /* list of rme96xx devices */
280 struct list_head devs;
281
282 spinlock_t lock;
283
284 u32 *recbuf; /* memory for rec buffer */
285 u32 *playbuf; /* memory for play buffer */
286
287 u32 control_register;
288
289 u32 thru_bits; /* thru 1=on, 0=off channel 1=Bit1... channel 26= Bit26 */
290
291 int hw_rev; /* h/w rev * 10 (i.e. 1.5 has hw_rev = 15) */
292 char *card_name; /* hammerfall or hammerfall light names */
293
294 int open_count; /* unused ??? HP20020201 */
295
296 int rate;
297 int latency;
298 unsigned int fragsize;
299 int started;
300
301 int hwptr; /* can be negativ because of pci burst offset */
302 unsigned int hwbufid; /* set by interrupt, buffer which is written/read now */
303
304 struct dmabuf {
305
306 unsigned int format;
307 int formatshift;
308 int inchannels; /* number of channels for device */
309 int outchannels; /* number of channels for device */
310 int mono; /* if true, we play mono on 2 channels */
311 int inoffset; /* which channel is considered the first one */
312 int outoffset;
313
314 /* state */
315 int opened; /* open() made */
316 int started; /* first write/read */
317 int mmapped; /* mmap */
318 int open_mode;
319
320 struct _rme96xx_info *s;
321
322 /* pointer to read/write position in buffer */
323 unsigned readptr;
324 unsigned writeptr;
325
326 unsigned error; /* over/underruns cleared on sync again */
327
328 /* waiting and locking */
329 wait_queue_head_t wait;
330 struct mutex open_mutex;
331 wait_queue_head_t open_wait;
332
333 } dma[RME96xx_MAX_DEVS];
334
335 int dspnum[RME96xx_MAX_DEVS]; /* register with sound subsystem */
336 int mixer; /* register with sound subsystem */
337} rme96xx_info;
338
339
340/* fiddling with the card (first level hardware control) */
341
342static inline void rme96xx_set_ctrl(rme96xx_info* s,int mask)
343{
344
345 s->control_register|=mask;
346 writel(s->control_register,s->iobase + RME96xx_control_register);
347
348}
349
350static inline void rme96xx_unset_ctrl(rme96xx_info* s,int mask)
351{
352
353 s->control_register&=(~mask);
354 writel(s->control_register,s->iobase + RME96xx_control_register);
355
356}
357
358static inline int rme96xx_get_sample_rate_status(rme96xx_info* s)
359{
360 int val;
361 u32 status;
362 status = readl(s->iobase + RME96xx_status_register);
363 val = (status & RME96xx_fs48) ? 48000 : 44100;
364 if (status & RME96xx_DS_rd)
365 val *= 2;
366 return val;
367}
368
369static inline int rme96xx_get_sample_rate_ctrl(rme96xx_info* s)
370{
371 int val;
372 val = (s->control_register & RME96xx_freq) ? 48000 : 44100;
373 if (s->control_register & RME96xx_DS)
374 val *= 2;
375 return val;
376}
377
378
379/* code from ALSA card-rme9652.c for rev 1.5 SPDIF receiver HP 20020201 */
380
381static void rme96xx_spdif_set_bit (rme96xx_info* s, int mask, int onoff)
382{
383 if (onoff)
384 s->control_register |= mask;
385 else
386 s->control_register &= ~mask;
387
388 writel(s->control_register,s->iobase + RME96xx_control_register);
389}
390
391static void rme96xx_spdif_write_byte (rme96xx_info* s, const int val)
392{
393 long mask;
394 long i;
395
396 for (i = 0, mask = 0x80; i < 8; i++, mask >>= 1) {
397 if (val & mask)
398 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_WRITE, 1);
399 else
400 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_WRITE, 0);
401
402 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_CLOCK, 1);
403 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_CLOCK, 0);
404 }
405}
406
407static int rme96xx_spdif_read_byte (rme96xx_info* s)
408{
409 long mask;
410 long val;
411 long i;
412
413 val = 0;
414
415 for (i = 0, mask = 0x80; i < 8; i++, mask >>= 1) {
416 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_CLOCK, 1);
417 if (readl(s->iobase + RME96xx_status_register) & RME96xx_SPDIF_READ)
418 val |= mask;
419 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_CLOCK, 0);
420 }
421
422 return val;
423}
424
425static void rme96xx_write_spdif_codec (rme96xx_info* s, const int address, const int data)
426{
427 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 1);
428 rme96xx_spdif_write_byte (s, 0x20);
429 rme96xx_spdif_write_byte (s, address);
430 rme96xx_spdif_write_byte (s, data);
431 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 0);
432}
433
434
435static int rme96xx_spdif_read_codec (rme96xx_info* s, const int address)
436{
437 int ret;
438
439 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 1);
440 rme96xx_spdif_write_byte (s, 0x20);
441 rme96xx_spdif_write_byte (s, address);
442 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 0);
443 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 1);
444
445 rme96xx_spdif_write_byte (s, 0x21);
446 ret = rme96xx_spdif_read_byte (s);
447 rme96xx_spdif_set_bit (s, RME96xx_SPDIF_SELECT, 0);
448
449 return ret;
450}
451
452static void rme96xx_initialize_spdif_receiver (rme96xx_info* s)
453{
454 /* XXX what unsets this ? */
455 /* no idea ??? HP 20020201 */
456
457 s->control_register |= RME96xx_SPDIF_RESET;
458
459 rme96xx_write_spdif_codec (s, 4, 0x40);
460 rme96xx_write_spdif_codec (s, 17, 0x13);
461 rme96xx_write_spdif_codec (s, 6, 0x02);
462}
463
464static inline int rme96xx_spdif_sample_rate (rme96xx_info *s, int *spdifrate)
465{
466 unsigned int rate_bits;
467
468 *spdifrate = 0x1;
469 if (readl(s->iobase + RME96xx_status_register) & RME96xx_ERF) {
470 return -1; /* error condition */
471 }
472
473 if (s->hw_rev == 15) {
474
475 int x, y, ret;
476
477 x = rme96xx_spdif_read_codec (s, 30);
478
479 if (x != 0)
480 y = 48000 * 64 / x;
481 else
482 y = 0;
483
484 if (y > 30400 && y < 33600) {ret = 32000; *spdifrate = 0x7;}
485 else if (y > 41900 && y < 46000) {ret = 44100; *spdifrate = 0x6;}
486 else if (y > 46000 && y < 50400) {ret = 48000; *spdifrate = 0x5;}
487 else if (y > 60800 && y < 67200) {ret = 64000; *spdifrate = 0x0;}
488 else if (y > 83700 && y < 92000) {ret = 88200; *spdifrate = 0x4;}
489 else if (y > 92000 && y < 100000) {ret = 96000; *spdifrate = 0x3;}
490 else {ret = 0; *spdifrate = 0x1;}
491 return ret;
492 }
493
494 rate_bits = readl(s->iobase + RME96xx_status_register) & RME96xx_F;
495
496 switch (*spdifrate = rme96xx_decode_spdif_rate(rate_bits)) {
497 case 0x7:
498 return 32000;
499 break;
500
501 case 0x6:
502 return 44100;
503 break;
504
505 case 0x5:
506 return 48000;
507 break;
508
509 case 0x4:
510 return 88200;
511 break;
512
513 case 0x3:
514 return 96000;
515 break;
516
517 case 0x0:
518 return 64000;
519 break;
520
521 default:
522 /* was an ALSA warning ...
523 snd_printk("%s: unknown S/PDIF input rate (bits = 0x%x)\n",
524 s->card_name, rate_bits);
525 */
526 return 0;
527 break;
528 }
529}
530
531/* end of code from ALSA card-rme9652.c */
532
533
534
535/* the hwbuf in the status register seems to have some jitter, to get rid of
536 it, we first only let the numbers grow, to be on the secure side we
537 subtract a certain amount RME96xx_BURSTBYTES from the resulting number */
538
539/* the function returns the hardware pointer in bytes */
540#define RME96xx_BURSTBYTES -64 /* bytes by which hwptr could be off */
541
542static inline int rme96xx_gethwptr(rme96xx_info* s,int exact)
543{
544 unsigned long flags;
545 if (exact) {
546 unsigned int hwp;
547/* the hwptr seems to be rather unreliable :(, so we don't use it */
548 spin_lock_irqsave(&s->lock,flags);
549
550 hwp = readl(s->iobase + RME96xx_status_register) & 0xffc0;
551 s->hwptr = (hwp < s->hwptr) ? s->hwptr : hwp;
552// s->hwptr = hwp;
553
554 spin_unlock_irqrestore(&s->lock,flags);
555 return (s->hwptr+RME96xx_BURSTBYTES) & ((s->fragsize<<1)-1);
556 }
557 return (s->hwbufid ? s->fragsize : 0);
558}
559
560static inline void rme96xx_setlatency(rme96xx_info* s,int l)
561{
562 s->latency = l;
563 s->fragsize = 1<<(8+l);
564 rme96xx_unset_ctrl(s,RME96xx_latency);
565 rme96xx_set_ctrl(s,RME96xx_SET_LATENCY(l));
566}
567
568
569static void rme96xx_clearbufs(struct dmabuf* dma)
570{
571 int i,j;
572 unsigned long flags;
573
574 /* clear dmabufs */
575 for(i=0;i<devices;i++) {
576 for (j=0;j<dma->outchannels + dma->mono;j++)
577 memset(&dma->s->playbuf[(dma->outoffset + j)*RME96xx_DMA_MAX_SAMPLES],
578 0, RME96xx_DMA_MAX_SIZE);
579 }
580 spin_lock_irqsave(&dma->s->lock,flags);
581 dma->writeptr = 0;
582 dma->readptr = 0;
583 spin_unlock_irqrestore(&dma->s->lock,flags);
584}
585
586static int rme96xx_startcard(rme96xx_info *s,int stop)
587{
588 int i;
589 unsigned long flags;
590
591 COMM ("startcard");
592 if(s->control_register & RME96xx_IE){
593 /* disable interrupt first */
594
595 rme96xx_unset_ctrl( s,RME96xx_start_bit );
596 udelay(10);
597 rme96xx_unset_ctrl( s,RME96xx_IE);
598 spin_lock_irqsave(&s->lock,flags); /* timing is critical */
599 s->started = 0;
600 spin_unlock_irqrestore(&s->lock,flags);
601 if (stop) {
602 COMM("Sound card stopped");
603 return 1;
604 }
605 }
606 COMM ("interrupt disabled");
607 /* first initialize all pointers on card */
608 for(i=0;i<RME96xx_num_of_init_regs;i++){
609 writel(0,s->iobase + i);
610 udelay(10); /* ?? */
611 }
612 COMM ("regs cleaned");
613
614 spin_lock_irqsave(&s->lock,flags); /* timing is critical */
615 udelay(10);
616 s->started = 1;
617 s->hwptr = 0;
618 spin_unlock_irqrestore(&s->lock,flags);
619
620 rme96xx_set_ctrl( s, RME96xx_IE | RME96xx_start_bit);
621
622
623 COMM("Sound card started");
624
625 return 1;
626}
627
628
629static inline int rme96xx_getospace(struct dmabuf * dma, unsigned int hwp)
630{
631 int cnt;
632 int swptr;
633 unsigned long flags;
634
635 spin_lock_irqsave(&dma->s->lock,flags);
636 swptr = dma->writeptr;
637 cnt = (hwp - swptr);
638
639 if (cnt < 0) {
640 cnt = ((dma->s->fragsize<<1) - swptr);
641 }
642 spin_unlock_irqrestore(&dma->s->lock,flags);
643 return cnt;
644}
645
646static inline int rme96xx_getispace(struct dmabuf * dma, unsigned int hwp)
647{
648 int cnt;
649 int swptr;
650 unsigned long flags;
651
652 spin_lock_irqsave(&dma->s->lock,flags);
653 swptr = dma->readptr;
654 cnt = (hwp - swptr);
655
656 if (cnt < 0) {
657 cnt = ((dma->s->fragsize<<1) - swptr);
658 }
659 spin_unlock_irqrestore(&dma->s->lock,flags);
660 return cnt;
661}
662
663
664static inline int rme96xx_copyfromuser(struct dmabuf* dma,const char __user * buffer,int count,int hop)
665{
666 int swptr = dma->writeptr;
667 switch (dma->format) {
668 case AFMT_S32_BLOCKED:
669 {
670 char __user * buf = (char __user *)buffer;
671 int cnt = count/dma->outchannels;
672 int i;
673 for (i=0;i < dma->outchannels;i++) {
674 char* hwbuf =(char*) &dma->s->playbuf[(dma->outoffset + i)*RME96xx_DMA_MAX_SAMPLES];
675 hwbuf+=swptr;
676
677 if (copy_from_user(hwbuf,buf, cnt))
678 return -1;
679 buf+=hop;
680 }
681 swptr+=cnt;
682 break;
683 }
684 case AFMT_S16_LE:
685 {
686 int i,j;
687 int cnt = count/dma->outchannels;
688 for (i=0;i < dma->outchannels + dma->mono;i++) {
689 short __user * sbuf = (short __user *)buffer + i*(!dma->mono);
690 short* hwbuf =(short*) &dma->s->playbuf[(dma->outoffset + i)*RME96xx_DMA_MAX_SAMPLES];
691 hwbuf+=(swptr>>1);
692 for (j=0;j<(cnt>>1);j++) {
693 hwbuf++; /* skip the low 16 bits */
694 __get_user(*hwbuf++,sbuf++);
695 sbuf+=(dma->outchannels-1);
696 }
697 }
698 swptr += (cnt<<1);
699 break;
700 }
701 default:
702 printk(RME_MESS" unsupported format\n");
703 return -1;
704 } /* switch */
705
706 swptr&=((dma->s->fragsize<<1) -1);
707 dma->writeptr = swptr;
708
709 return 0;
710}
711
712/* The count argument is the number of bytes */
713static inline int rme96xx_copytouser(struct dmabuf* dma,const char __user* buffer,int count,int hop)
714{
715 int swptr = dma->readptr;
716 switch (dma->format) {
717 case AFMT_S32_BLOCKED:
718 {
719 char __user * buf = (char __user *)buffer;
720 int cnt = count/dma->inchannels;
721 int i;
722
723 for (i=0;i < dma->inchannels;i++) {
724 char* hwbuf =(char*) &dma->s->recbuf[(dma->inoffset + i)*RME96xx_DMA_MAX_SAMPLES];
725 hwbuf+=swptr;
726
727 if (copy_to_user(buf,hwbuf,cnt))
728 return -1;
729 buf+=hop;
730 }
731 swptr+=cnt;
732 break;
733 }
734 case AFMT_S16_LE:
735 {
736 int i,j;
737 int cnt = count/dma->inchannels;
738 for (i=0;i < dma->inchannels;i++) {
739 short __user * sbuf = (short __user *)buffer + i;
740 short* hwbuf =(short*) &dma->s->recbuf[(dma->inoffset + i)*RME96xx_DMA_MAX_SAMPLES];
741 hwbuf+=(swptr>>1);
742 for (j=0;j<(cnt>>1);j++) {
743 hwbuf++;
744 __put_user(*hwbuf++,sbuf++);
745 sbuf+=(dma->inchannels-1);
746 }
747 }
748 swptr += (cnt<<1);
749 break;
750 }
751 default:
752 printk(RME_MESS" unsupported format\n");
753 return -1;
754 } /* switch */
755
756 swptr&=((dma->s->fragsize<<1) -1);
757 dma->readptr = swptr;
758 return 0;
759}
760
761
762static irqreturn_t rme96xx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
763{
764 int i;
765 rme96xx_info *s = (rme96xx_info *)dev_id;
766 struct dmabuf *db;
767 u32 status;
768 unsigned long flags;
769
770 status = readl(s->iobase + RME96xx_status_register);
771 if (!(status & RME96xx_IRQ)) {
772 return IRQ_NONE;
773 }
774
775 spin_lock_irqsave(&s->lock,flags);
776 writel(0,s->iobase + RME96xx_irq_clear);
777
778 s->hwbufid = (status & RME96xx_buffer_id)>>26;
779 if ((status & 0xffc0) <= 256) s->hwptr = 0;
780 for(i=0;i<devices;i++)
781 {
782 db = &(s->dma[i]);
783 if(db->started > 0)
784 wake_up(&(db->wait));
785 }
786 spin_unlock_irqrestore(&s->lock,flags);
787 return IRQ_HANDLED;
788}
789
790
791
792/*----------------------------------------------------------------------------
793 PCI detection and module initialization stuff
794 ----------------------------------------------------------------------------*/
795
796static void* busmaster_malloc(int size) {
797 int pg; /* 2 s exponent of memory size */
798 char *buf;
799
800 DBG(printk("kernel malloc pages ..\n"));
801
802 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
803
804 buf = (char *) __get_free_pages(GFP_KERNEL | GFP_DMA, pg);
805
806 if (buf) {
807 struct page* page, *last_page;
808
809 page = virt_to_page(buf);
810 last_page = page + (1 << pg);
811 DBG(printk("setting reserved bit\n"));
812 while (page < last_page) {
813 SetPageReserved(page);
814 page++;
815 }
816 return buf;
817 }
818 DBG(printk("allocated %ld",(long)buf));
819 return NULL;
820}
821
822static void busmaster_free(void* ptr,int size) {
823 int pg;
824 struct page* page, *last_page;
825
826 if (ptr == NULL)
827 return;
828
829 for (pg = 0; PAGE_SIZE * (1 << pg) < size; pg++);
830
831 page = virt_to_page(ptr);
832 last_page = page + (1 << pg);
833 while (page < last_page) {
834 ClearPageReserved(page);
835 page++;
836 }
837 DBG(printk("freeing pages\n"));
838 free_pages((unsigned long) ptr, pg);
839 DBG(printk("done\n"));
840}
841
842/* initialize those parts of the info structure which are not pci detectable resources */
843
844static int rme96xx_dmabuf_init(rme96xx_info * s,struct dmabuf* dma,int ioffset,int ooffset) {
845
846 mutex_init(&dma->open_mutex);
847 init_waitqueue_head(&dma->open_wait);
848 init_waitqueue_head(&dma->wait);
849 dma->s = s;
850 dma->error = 0;
851
852 dma->format = AFMT_S32_BLOCKED;
853 dma->formatshift = 0;
854 dma->inchannels = dma->outchannels = 1;
855 dma->inoffset = ioffset;
856 dma->outoffset = ooffset;
857
858 dma->opened=0;
859 dma->started=0;
860 dma->mmapped=0;
861 dma->open_mode=0;
862 dma->mono=0;
863
864 rme96xx_clearbufs(dma);
865 return 0;
866}
867
868
869static int rme96xx_init(rme96xx_info* s)
870{
871 int i;
872 int status;
873 unsigned short rev;
874
875 DBG(printk("%s\n", __FUNCTION__));
876 numcards++;
877
878 s->magic = RME96xx_MAGIC;
879
880 spin_lock_init(&s->lock);
881
882 COMM ("setup busmaster memory")
883 s->recbuf = busmaster_malloc(RME96xx_DMA_MAX_SIZE_ALL);
884 s->playbuf = busmaster_malloc(RME96xx_DMA_MAX_SIZE_ALL);
885
886 if (!s->recbuf || !s->playbuf) {
887 printk(KERN_ERR RME_MESS" Unable to allocate busmaster memory\n");
888 return -ENODEV;
889 }
890
891 COMM ("setting rec and playbuffers")
892
893 writel((u32) virt_to_bus(s->recbuf),s->iobase + RME96xx_rec_buffer);
894 writel((u32) virt_to_bus(s->playbuf),s->iobase + RME96xx_play_buffer);
895
896 COMM ("initializing control register")
897 rme96xx_unset_ctrl(s,0xffffffff);
898 rme96xx_set_ctrl(s,RME96xx_ctrl_init);
899
900
901 COMM ("setup devices")
902 for (i=0;i < devices;i++) {
903 struct dmabuf * dma = &s->dma[i];
904 rme96xx_dmabuf_init(s,dma,2*i,2*i);
905 }
906
907 /* code from ALSA card-rme9652.c HP 20020201 */
908 /* Determine the h/w rev level of the card. This seems like
909 a particularly kludgy way to encode it, but its what RME
910 chose to do, so we follow them ...
911 */
912
913 status = readl(s->iobase + RME96xx_status_register);
914 if (rme96xx_decode_spdif_rate(status&RME96xx_F) == 1) {
915 s->hw_rev = 15;
916 } else {
917 s->hw_rev = 11;
918 }
919
920 /* Differentiate between the standard Hammerfall, and the
921 "Light", which does not have the expansion board. This
922 method comes from information received from Mathhias
923 Clausen at RME. Display the EEPROM and h/w revID where
924 relevant.
925 */
926
927 pci_read_config_word(s->pcidev, PCI_CLASS_REVISION, &rev);
928 switch (rev & 0xff) {
929 case 8: /* original eprom */
930 if (s->hw_rev == 15) {
931 s->card_name = "RME Digi9636 (Rev 1.5)";
932 } else {
933 s->card_name = "RME Digi9636";
934 }
935 break;
936 case 9: /* W36_G EPROM */
937 s->card_name = "RME Digi9636 (Rev G)";
938 break;
939 case 4: /* W52_G EPROM */
940 s->card_name = "RME Digi9652 (Rev G)";
941 break;
942 default:
943 case 3: /* original eprom */
944 if (s->hw_rev == 15) {
945 s->card_name = "RME Digi9652 (Rev 1.5)";
946 } else {
947 s->card_name = "RME Digi9652";
948 }
949 break;
950 }
951
952 printk(KERN_INFO RME_MESS" detected %s (hw_rev %d)\n",s->card_name,s->hw_rev);
953
954 if (s->hw_rev == 15)
955 rme96xx_initialize_spdif_receiver (s);
956
957 s->started = 0;
958 rme96xx_setlatency(s,7);
959
960 printk(KERN_INFO RME_MESS" card %d initialized\n",numcards);
961 return 0;
962}
963
964
965/* open uses this to figure out which device was opened .. this seems to be
966 unnecessary complex */
967
968static LIST_HEAD(devs);
969
970static int __devinit rme96xx_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
971{
972 int i;
973 rme96xx_info *s;
974
975 DBG(printk("%s\n", __FUNCTION__));
976
977 if (pcidev->irq == 0)
978 return -1;
979 if (!pci_dma_supported(pcidev, 0xffffffff)) {
980 printk(KERN_WARNING RME_MESS" architecture does not support 32bit PCI busmaster DMA\n");
981 return -1;
982 }
983 if (!(s = kmalloc(sizeof(rme96xx_info), GFP_KERNEL))) {
984 printk(KERN_WARNING RME_MESS" out of memory\n");
985 return -1;
986 }
987 memset(s, 0, sizeof(rme96xx_info));
988
989 s->pcidev = pcidev;
990 s->iobase = ioremap(pci_resource_start(pcidev, 0),RME96xx_IO_EXTENT);
991 s->irq = pcidev->irq;
992
993 DBG(printk("remapped iobase: %lx irq %d\n",(long)s->iobase,s->irq));
994
995 if (pci_enable_device(pcidev))
996 goto err_irq;
997 if (request_irq(s->irq, rme96xx_interrupt, IRQF_SHARED, "rme96xx", s)) {
998 printk(KERN_ERR RME_MESS" irq %u in use\n", s->irq);
999 goto err_irq;
1000 }
1001
1002 /* initialize the card */
1003
1004 i = 0;
1005 if (rme96xx_init(s) < 0) {
1006 printk(KERN_ERR RME_MESS" initialization failed\n");
1007 goto err_devices;
1008 }
1009 for (i=0;i<devices;i++) {
1010 if ((s->dspnum[i] = register_sound_dsp(&rme96xx_audio_fops, -1)) < 0)
1011 goto err_devices;
1012 }
1013
1014 if ((s->mixer = register_sound_mixer(&rme96xx_mixer_fops, -1)) < 0)
1015 goto err_devices;
1016
1017 pci_set_drvdata(pcidev, s);
1018 pcidev->dma_mask = 0xffffffff; /* ????? */
1019 /* put it into driver list */
1020 list_add_tail(&s->devs, &devs);
1021
1022 DBG(printk("initialization successful\n"));
1023 return 0;
1024
1025 /* error handler */
1026 err_devices:
1027 while (i--)
1028 unregister_sound_dsp(s->dspnum[i]);
1029 free_irq(s->irq,s);
1030 err_irq:
1031 kfree(s);
1032 return -1;
1033}
1034
1035
1036static void __devexit rme96xx_remove(struct pci_dev *dev)
1037{
1038 int i;
1039 rme96xx_info *s = pci_get_drvdata(dev);
1040
1041 if (!s) {
1042 printk(KERN_ERR"device structure not valid\n");
1043 return ;
1044 }
1045
1046 if (s->started) rme96xx_startcard(s,0);
1047
1048 i = devices;
1049 while (i) {
1050 i--;
1051 unregister_sound_dsp(s->dspnum[i]);
1052 }
1053
1054 unregister_sound_mixer(s->mixer);
1055 synchronize_irq(s->irq);
1056 free_irq(s->irq,s);
1057 busmaster_free(s->recbuf,RME96xx_DMA_MAX_SIZE_ALL);
1058 busmaster_free(s->playbuf,RME96xx_DMA_MAX_SIZE_ALL);
1059 kfree(s);
1060 pci_set_drvdata(dev, NULL);
1061}
1062
1063
1064#ifndef PCI_VENDOR_ID_RME
1065#define PCI_VENDOR_ID_RME 0x10ee
1066#endif
1067#ifndef PCI_DEVICE_ID_RME9652
1068#define PCI_DEVICE_ID_RME9652 0x3fc4
1069#endif
1070#ifndef PCI_ANY_ID
1071#define PCI_ANY_ID 0
1072#endif
1073
1074static struct pci_device_id id_table[] = {
1075 {
1076 .vendor = PCI_VENDOR_ID_RME,
1077 .device = PCI_DEVICE_ID_RME9652,
1078 .subvendor = PCI_ANY_ID,
1079 .subdevice = PCI_ANY_ID,
1080 },
1081 { 0, },
1082};
1083
1084MODULE_DEVICE_TABLE(pci, id_table);
1085
1086static struct pci_driver rme96xx_driver = {
1087 .name = "rme96xx",
1088 .id_table = id_table,
1089 .probe = rme96xx_probe,
1090 .remove = __devexit_p(rme96xx_remove),
1091};
1092
1093static int __init init_rme96xx(void)
1094{
1095 printk(KERN_INFO RME_MESS" version "RMEVERSION" time " __TIME__ " " __DATE__ "\n");
1096 devices = ((devices-1) & RME96xx_MASK_DEVS) + 1;
1097 printk(KERN_INFO RME_MESS" reserving %d dsp device(s)\n",devices);
1098 numcards = 0;
1099 return pci_register_driver(&rme96xx_driver);
1100}
1101
1102static void __exit cleanup_rme96xx(void)
1103{
1104 printk(KERN_INFO RME_MESS" unloading\n");
1105 pci_unregister_driver(&rme96xx_driver);
1106}
1107
1108module_init(init_rme96xx);
1109module_exit(cleanup_rme96xx);
1110
1111
1112
1113
1114
1115/*--------------------------------------------------------------------------
1116 Implementation of file operations
1117---------------------------------------------------------------------------*/
1118
1119#define RME96xx_FMT (AFMT_S16_LE|AFMT_U8|AFMT_S32_BLOCKED)
1120/* AFTM_U8 is not (yet?) supported ... HP20020201 */
1121
1122static int rme96xx_ioctl(struct inode *in, struct file *file, unsigned int cmd, unsigned long arg)
1123{
1124 struct dmabuf * dma = (struct dmabuf *)file->private_data;
1125 rme96xx_info *s = dma->s;
1126 unsigned long flags;
1127 audio_buf_info abinfo;
1128 count_info cinfo;
1129 int count;
1130 int val = 0;
1131 void __user *argp = (void __user *)arg;
1132 int __user *p = argp;
1133
1134 VALIDATE_STATE(s);
1135
1136 DBG(printk("ioctl %ud\n",cmd));
1137
1138 switch (cmd) {
1139 case OSS_GETVERSION:
1140 return put_user(SOUND_VERSION, p);
1141
1142 case SNDCTL_DSP_SYNC:
1143#if 0
1144 if (file->f_mode & FMODE_WRITE)
1145 return drain_dac2(s, 0/*file->f_flags & O_NONBLOCK*/);
1146#endif
1147 return 0;
1148
1149 case SNDCTL_DSP_SETDUPLEX:
1150 return 0;
1151
1152 case SNDCTL_DSP_GETCAPS:
1153 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1154
1155 case SNDCTL_DSP_RESET:
1156// rme96xx_clearbufs(dma);
1157 return 0;
1158
1159 case SNDCTL_DSP_SPEED:
1160 if (get_user(val, p))
1161 return -EFAULT;
1162 if (val >= 0) {
1163/* generally it's not a problem if we change the speed
1164 if (dma->open_mode & (~file->f_mode) & (FMODE_READ|FMODE_WRITE))
1165 return -EINVAL;
1166*/
1167 spin_lock_irqsave(&s->lock, flags);
1168
1169 switch (val) {
1170 case 44100:
1171 case 88200:
1172 rme96xx_unset_ctrl(s,RME96xx_freq);
1173 break;
1174 case 48000:
1175 case 96000:
1176 rme96xx_set_ctrl(s,RME96xx_freq);
1177 break;
1178 /* just report current rate as default
1179 e.g. use 0 to "select" current digital input rate
1180 default:
1181 rme96xx_unset_ctrl(s,RME96xx_freq);
1182 val = 44100;
1183 */
1184 }
1185 if (val > 50000)
1186 rme96xx_set_ctrl(s,RME96xx_DS);
1187 else
1188 rme96xx_unset_ctrl(s,RME96xx_DS);
1189 /* set val to actual value HP 20020201 */
1190 /* NOTE: if not "Sync Master", reported rate might be not yet "updated" ... but I don't want to insert a long udelay() here */
1191 if ((s->control_register & RME96xx_Master) && !(s->control_register & RME96xx_wsel))
1192 val = rme96xx_get_sample_rate_ctrl(s);
1193 else
1194 val = rme96xx_get_sample_rate_status(s);
1195 s->rate = val;
1196 spin_unlock_irqrestore(&s->lock, flags);
1197 }
1198 DBG(printk("speed set to %d\n",val));
1199 return put_user(val, p);
1200
1201 case SNDCTL_DSP_STEREO: /* this plays a mono file on two channels */
1202 if (get_user(val, p))
1203 return -EFAULT;
1204
1205 if (!val) {
1206 DBG(printk("setting to mono\n"));
1207 dma->mono=1;
1208 dma->inchannels = 1;
1209 dma->outchannels = 1;
1210 }
1211 else {
1212 DBG(printk("setting to stereo\n"));
1213 dma->mono = 0;
1214 dma->inchannels = 2;
1215 dma->outchannels = 2;
1216 }
1217 return 0;
1218 case SNDCTL_DSP_CHANNELS:
1219 /* remember to check for resonable offset/channel pairs here */
1220 if (get_user(val, p))
1221 return -EFAULT;
1222
1223 if (file->f_mode & FMODE_WRITE) {
1224 if (val > 0 && (dma->outoffset + val) <= RME96xx_CHANNELS_PER_CARD)
1225 dma->outchannels = val;
1226 else
1227 dma->outchannels = val = 2;
1228 DBG(printk("setting to outchannels %d\n",val));
1229 }
1230 if (file->f_mode & FMODE_READ) {
1231 if (val > 0 && (dma->inoffset + val) <= RME96xx_CHANNELS_PER_CARD)
1232 dma->inchannels = val;
1233 else
1234 dma->inchannels = val = 2;
1235 DBG(printk("setting to inchannels %d\n",val));
1236 }
1237
1238 dma->mono=0;
1239
1240 return put_user(val, p);
1241
1242 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1243 return put_user(RME96xx_FMT, p);
1244
1245 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
1246 DBG(printk("setting to format %x\n",val));
1247 if (get_user(val, p))
1248 return -EFAULT;
1249 if (val != AFMT_QUERY) {
1250 if (val & RME96xx_FMT)
1251 dma->format = val;
1252 switch (dma->format) {
1253 case AFMT_S16_LE:
1254 dma->formatshift=1;
1255 break;
1256 case AFMT_S32_BLOCKED:
1257 dma->formatshift=0;
1258 break;
1259 }
1260 }
1261 return put_user(dma->format, p);
1262
1263 case SNDCTL_DSP_POST:
1264 return 0;
1265
1266 case SNDCTL_DSP_GETTRIGGER:
1267 val = 0;
1268#if 0
1269 if (file->f_mode & FMODE_READ && s->ctrl & CTRL_ADC_EN)
1270 val |= PCM_ENABLE_INPUT;
1271 if (file->f_mode & FMODE_WRITE && s->ctrl & CTRL_DAC2_EN)
1272 val |= PCM_ENABLE_OUTPUT;
1273#endif
1274 return put_user(val, p);
1275
1276 case SNDCTL_DSP_SETTRIGGER:
1277 if (get_user(val, p))
1278 return -EFAULT;
1279#if 0
1280 if (file->f_mode & FMODE_READ) {
1281 if (val & PCM_ENABLE_INPUT) {
1282 if (!s->dma_adc.ready && (ret = prog_dmabuf_adc(s)))
1283 return ret;
1284 start_adc(s);
1285 } else
1286 stop_adc(s);
1287 }
1288 if (file->f_mode & FMODE_WRITE) {
1289 if (val & PCM_ENABLE_OUTPUT) {
1290 if (!s->dma_dac2.ready && (ret = prog_dmabuf_dac2(s)))
1291 return ret;
1292 start_dac2(s);
1293 } else
1294 stop_dac2(s);
1295 }
1296#endif
1297 return 0;
1298
1299 case SNDCTL_DSP_GETOSPACE:
1300 if (!(file->f_mode & FMODE_WRITE))
1301 return -EINVAL;
1302
1303 val = rme96xx_gethwptr(dma->s,0);
1304
1305
1306 count = rme96xx_getospace(dma,val);
1307 if (!s->started) count = s->fragsize*2;
1308 abinfo.fragsize =(s->fragsize*dma->outchannels)>>dma->formatshift;
1309 abinfo.bytes = (count*dma->outchannels)>>dma->formatshift;
1310 abinfo.fragstotal = 2;
1311 abinfo.fragments = (count > s->fragsize);
1312
1313 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1314
1315 case SNDCTL_DSP_GETISPACE:
1316 if (!(file->f_mode & FMODE_READ))
1317 return -EINVAL;
1318
1319 val = rme96xx_gethwptr(dma->s,0);
1320
1321 count = rme96xx_getispace(dma,val);
1322
1323 abinfo.fragsize = (s->fragsize*dma->inchannels)>>dma->formatshift;
1324 abinfo.bytes = (count*dma->inchannels)>>dma->formatshift;
1325 abinfo.fragstotal = 2;
1326 abinfo.fragments = count > s->fragsize;
1327 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1328
1329 case SNDCTL_DSP_NONBLOCK:
1330 file->f_flags |= O_NONBLOCK;
1331 return 0;
1332
1333 case SNDCTL_DSP_GETODELAY: /* What should this exactly do ? ,
1334 ATM it is just abinfo.bytes */
1335 if (!(file->f_mode & FMODE_WRITE))
1336 return -EINVAL;
1337
1338 val = rme96xx_gethwptr(dma->s,0);
1339 count = val - dma->readptr;
1340 if (count < 0)
1341 count += s->fragsize<<1;
1342
1343 return put_user(count, p);
1344
1345
1346/* check out how to use mmaped mode (can only be blocked !!!) */
1347 case SNDCTL_DSP_GETIPTR:
1348 if (!(file->f_mode & FMODE_READ))
1349 return -EINVAL;
1350 val = rme96xx_gethwptr(dma->s,0);
1351 spin_lock_irqsave(&s->lock,flags);
1352 cinfo.bytes = s->fragsize<<1;
1353 count = val - dma->readptr;
1354 if (count < 0)
1355 count += s->fragsize<<1;
1356
1357 cinfo.blocks = (count > s->fragsize);
1358 cinfo.ptr = val;
1359 if (dma->mmapped)
1360 dma->readptr &= s->fragsize<<1;
1361 spin_unlock_irqrestore(&s->lock,flags);
1362
1363 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1364 return -EFAULT;
1365 return 0;
1366
1367 case SNDCTL_DSP_GETOPTR:
1368 if (!(file->f_mode & FMODE_READ))
1369 return -EINVAL;
1370 val = rme96xx_gethwptr(dma->s,0);
1371 spin_lock_irqsave(&s->lock,flags);
1372 cinfo.bytes = s->fragsize<<1;
1373 count = val - dma->writeptr;
1374 if (count < 0)
1375 count += s->fragsize<<1;
1376
1377 cinfo.blocks = (count > s->fragsize);
1378 cinfo.ptr = val;
1379 if (dma->mmapped)
1380 dma->writeptr &= s->fragsize<<1;
1381 spin_unlock_irqrestore(&s->lock,flags);
1382 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1383 return -EFAULT;
1384 return 0;
1385 case SNDCTL_DSP_GETBLKSIZE:
1386 return put_user(s->fragsize, p);
1387
1388 case SNDCTL_DSP_SETFRAGMENT:
1389 if (get_user(val, p))
1390 return -EFAULT;
1391 val&=0xffff;
1392 val -= 7;
1393 if (val < 0) val = 0;
1394 if (val > 7) val = 7;
1395 rme96xx_setlatency(s,val);
1396 return 0;
1397
1398 case SNDCTL_DSP_SUBDIVIDE:
1399#if 0
1400 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1401 (file->f_mode & FMODE_WRITE && s->dma_dac2.subdivision))
1402 return -EINVAL;
1403 if (get_user(val, p))
1404 return -EFAULT;
1405 if (val != 1 && val != 2 && val != 4)
1406 return -EINVAL;
1407 if (file->f_mode & FMODE_READ)
1408 s->dma_adc.subdivision = val;
1409 if (file->f_mode & FMODE_WRITE)
1410 s->dma_dac2.subdivision = val;
1411#endif
1412 return 0;
1413
1414 case SOUND_PCM_READ_RATE:
1415 /* HP20020201 */
1416 s->rate = rme96xx_get_sample_rate_status(s);
1417 return put_user(s->rate, p);
1418
1419 case SOUND_PCM_READ_CHANNELS:
1420 return put_user(dma->outchannels, p);
1421
1422 case SOUND_PCM_READ_BITS:
1423 switch (dma->format) {
1424 case AFMT_S32_BLOCKED:
1425 val = 32;
1426 break;
1427 case AFMT_S16_LE:
1428 val = 16;
1429 break;
1430 }
1431 return put_user(val, p);
1432
1433 case SOUND_PCM_WRITE_FILTER:
1434 case SNDCTL_DSP_SETSYNCRO:
1435 case SOUND_PCM_READ_FILTER:
1436 return -EINVAL;
1437
1438 }
1439
1440
1441 return -ENODEV;
1442}
1443
1444
1445
1446static int rme96xx_open(struct inode *in, struct file *f)
1447{
1448 int minor = iminor(in);
1449 struct list_head *list;
1450 int devnum;
1451 rme96xx_info *s;
1452 struct dmabuf* dma;
1453 DECLARE_WAITQUEUE(wait, current);
1454
1455 DBG(printk("device num %d open\n",devnum));
1456
1457 nonseekable_open(in, f);
1458 for (list = devs.next; ; list = list->next) {
1459 if (list == &devs)
1460 return -ENODEV;
1461 s = list_entry(list, rme96xx_info, devs);
1462 for (devnum=0; devnum<devices; devnum++)
1463 if (!((s->dspnum[devnum] ^ minor) & ~0xf))
1464 break;
1465 if (devnum<devices)
1466 break;
1467 }
1468 VALIDATE_STATE(s);
1469
1470 dma = &s->dma[devnum];
1471 f->private_data = dma;
1472 /* wait for device to become free */
1473 mutex_lock(&dma->open_mutex);
1474 while (dma->open_mode & f->f_mode) {
1475 if (f->f_flags & O_NONBLOCK) {
1476 mutex_unlock(&dma->open_mutex);
1477 return -EBUSY;
1478 }
1479 add_wait_queue(&dma->open_wait, &wait);
1480 __set_current_state(TASK_INTERRUPTIBLE);
1481 mutex_unlock(&dma->open_mutex);
1482 schedule();
1483 remove_wait_queue(&dma->open_wait, &wait);
1484 set_current_state(TASK_RUNNING);
1485 if (signal_pending(current))
1486 return -ERESTARTSYS;
1487 mutex_lock(&dma->open_mutex);
1488 }
1489
1490 COMM ("hardware open")
1491
1492 if (!dma->opened) rme96xx_dmabuf_init(dma->s,dma,dma->inoffset,dma->outoffset);
1493
1494 dma->open_mode |= (f->f_mode & (FMODE_READ | FMODE_WRITE));
1495 dma->opened = 1;
1496 mutex_unlock(&dma->open_mutex);
1497
1498 DBG(printk("device num %d open finished\n",devnum));
1499 return 0;
1500}
1501
1502static int rme96xx_release(struct inode *in, struct file *file)
1503{
1504 struct dmabuf * dma = (struct dmabuf*) file->private_data;
1505 /* int hwp; ... was unused HP20020201 */
1506 DBG(printk("%s\n", __FUNCTION__));
1507
1508 COMM ("draining")
1509 if (dma->open_mode & FMODE_WRITE) {
1510#if 0 /* Why doesn't this work with some cards ?? */
1511 hwp = rme96xx_gethwptr(dma->s,0);
1512 while (rme96xx_getospace(dma,hwp)) {
1513 interruptible_sleep_on(&(dma->wait));
1514 hwp = rme96xx_gethwptr(dma->s,0);
1515 }
1516#endif
1517 rme96xx_clearbufs(dma);
1518 }
1519
1520 dma->open_mode &= (~file->f_mode) & (FMODE_READ|FMODE_WRITE);
1521
1522 if (!(dma->open_mode & (FMODE_READ|FMODE_WRITE))) {
1523 dma->opened = 0;
1524 if (dma->s->started) rme96xx_startcard(dma->s,1);
1525 }
1526
1527 wake_up(&dma->open_wait);
1528 mutex_unlock(&dma->open_mutex);
1529
1530 return 0;
1531}
1532
1533
1534static ssize_t rme96xx_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1535{
1536 struct dmabuf *dma = (struct dmabuf *)file->private_data;
1537 ssize_t ret = 0;
1538 int cnt; /* number of bytes from "buffer" that will/can be used */
1539 int hop = count/dma->outchannels;
1540 int hwp;
1541 int exact = (file->f_flags & O_NONBLOCK);
1542
1543
1544 if(dma == NULL || (dma->s) == NULL)
1545 return -ENXIO;
1546
1547 if (dma->mmapped || !dma->opened)
1548 return -ENXIO;
1549
1550 if (!access_ok(VERIFY_READ, buffer, count))
1551 return -EFAULT;
1552
1553 if (! (dma->open_mode & FMODE_WRITE))
1554 return -ENXIO;
1555
1556 if (!dma->s->started) rme96xx_startcard(dma->s,exact);
1557 hwp = rme96xx_gethwptr(dma->s,0);
1558
1559 if(!(dma->started)){
1560 COMM ("first write")
1561
1562 dma->readptr = hwp;
1563 dma->writeptr = hwp;
1564 dma->started = 1;
1565 }
1566
1567 while (count > 0) {
1568 cnt = rme96xx_getospace(dma,hwp);
1569 cnt>>=dma->formatshift;
1570 cnt*=dma->outchannels;
1571 if (cnt > count)
1572 cnt = count;
1573
1574 if (cnt != 0) {
1575 if (rme96xx_copyfromuser(dma,buffer,cnt,hop))
1576 return ret ? ret : -EFAULT;
1577 count -= cnt;
1578 buffer += cnt;
1579 ret += cnt;
1580 if (count == 0) return ret;
1581 }
1582 if (file->f_flags & O_NONBLOCK)
1583 return ret ? ret : -EAGAIN;
1584
1585 if ((hwp - dma->writeptr) <= 0) {
1586 interruptible_sleep_on(&(dma->wait));
1587
1588 if (signal_pending(current))
1589 return ret ? ret : -ERESTARTSYS;
1590 }
1591
1592 hwp = rme96xx_gethwptr(dma->s,exact);
1593
1594 }; /* count > 0 */
1595
1596 return ret;
1597}
1598
1599static ssize_t rme96xx_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1600{
1601 struct dmabuf *dma = (struct dmabuf *)file->private_data;
1602 ssize_t ret = 0;
1603 int cnt; /* number of bytes from "buffer" that will/can be used */
1604 int hop = count/dma->inchannels;
1605 int hwp;
1606 int exact = (file->f_flags & O_NONBLOCK);
1607
1608
1609 if(dma == NULL || (dma->s) == NULL)
1610 return -ENXIO;
1611
1612 if (dma->mmapped || !dma->opened)
1613 return -ENXIO;
1614
1615 if (!access_ok(VERIFY_WRITE, buffer, count))
1616 return -EFAULT;
1617
1618 if (! (dma->open_mode & FMODE_READ))
1619 return -ENXIO;
1620
1621 if (!dma->s->started) rme96xx_startcard(dma->s,exact);
1622 hwp = rme96xx_gethwptr(dma->s,0);
1623
1624 if(!(dma->started)){
1625 COMM ("first read")
1626
1627 dma->writeptr = hwp;
1628 dma->readptr = hwp;
1629 dma->started = 1;
1630 }
1631
1632 while (count > 0) {
1633 cnt = rme96xx_getispace(dma,hwp);
1634 cnt>>=dma->formatshift;
1635 cnt*=dma->inchannels;
1636
1637 if (cnt > count)
1638 cnt = count;
1639
1640 if (cnt != 0) {
1641
1642 if (rme96xx_copytouser(dma,buffer,cnt,hop))
1643 return ret ? ret : -EFAULT;
1644
1645 count -= cnt;
1646 buffer += cnt;
1647 ret += cnt;
1648 if (count == 0) return ret;
1649 }
1650 if (file->f_flags & O_NONBLOCK)
1651 return ret ? ret : -EAGAIN;
1652
1653 if ((hwp - dma->readptr) <= 0) {
1654 interruptible_sleep_on(&(dma->wait));
1655
1656 if (signal_pending(current))
1657 return ret ? ret : -ERESTARTSYS;
1658 }
1659 hwp = rme96xx_gethwptr(dma->s,exact);
1660
1661 }; /* count > 0 */
1662
1663 return ret;
1664}
1665
1666static int rm96xx_mmap(struct file *file, struct vm_area_struct *vma) {
1667 struct dmabuf *dma = (struct dmabuf *)file->private_data;
1668 rme96xx_info* s = dma->s;
1669 unsigned long size;
1670
1671 VALIDATE_STATE(s);
1672 lock_kernel();
1673
1674 if (vma->vm_pgoff != 0) {
1675 unlock_kernel();
1676 return -EINVAL;
1677 }
1678 size = vma->vm_end - vma->vm_start;
1679 if (size > RME96xx_DMA_MAX_SIZE) {
1680 unlock_kernel();
1681 return -EINVAL;
1682 }
1683
1684
1685 if (vma->vm_flags & VM_WRITE) {
1686 if (!s->started) rme96xx_startcard(s,1);
1687
1688 if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(s->playbuf + dma->outoffset*RME96xx_DMA_MAX_SIZE) >> PAGE_SHIFT, size, vma->vm_page_prot)) {
1689 unlock_kernel();
1690 return -EAGAIN;
1691 }
1692 }
1693 else if (vma->vm_flags & VM_READ) {
1694 if (!s->started) rme96xx_startcard(s,1);
1695 if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(s->playbuf + dma->inoffset*RME96xx_DMA_MAX_SIZE) >> PAGE_SHIFT, size, vma->vm_page_prot)) {
1696 unlock_kernel();
1697 return -EAGAIN;
1698 }
1699 } else {
1700 unlock_kernel();
1701 return -EINVAL;
1702 }
1703
1704
1705/* this is the mapping */
1706 vma->vm_flags &= ~VM_IO;
1707 dma->mmapped = 1;
1708 unlock_kernel();
1709 return 0;
1710}
1711
1712static unsigned int rme96xx_poll(struct file *file, struct poll_table_struct *wait)
1713{
1714 struct dmabuf *dma = (struct dmabuf *)file->private_data;
1715 rme96xx_info* s = dma->s;
1716 unsigned int mask = 0;
1717 unsigned int hwp,cnt;
1718
1719 DBG(printk("rme96xx poll_wait ...\n"));
1720 VALIDATE_STATE(s);
1721
1722 if (!s->started) {
1723 mask |= POLLOUT | POLLWRNORM;
1724 }
1725 poll_wait(file, &dma->wait, wait);
1726
1727 hwp = rme96xx_gethwptr(dma->s,0);
1728
1729 DBG(printk("rme96xx poll: ..cnt %d > %d\n",cnt,s->fragsize));
1730
1731 cnt = rme96xx_getispace(dma,hwp);
1732
1733 if (file->f_mode & FMODE_READ)
1734 if (cnt > 0)
1735 mask |= POLLIN | POLLRDNORM;
1736
1737
1738
1739 cnt = rme96xx_getospace(dma,hwp);
1740
1741 if (file->f_mode & FMODE_WRITE)
1742 if (cnt > 0)
1743 mask |= POLLOUT | POLLWRNORM;
1744
1745
1746// printk("rme96xx poll_wait ...%d > %d\n",rme96xx_getospace(dma,hwp),rme96xx_getispace(dma,hwp));
1747
1748 return mask;
1749}
1750
1751
1752static struct file_operations rme96xx_audio_fops = {
1753 .owner = THIS_MODULE,
1754 .read = rme96xx_read,
1755 .write = rme96xx_write,
1756 .poll = rme96xx_poll,
1757 .ioctl = rme96xx_ioctl,
1758 .mmap = rm96xx_mmap,
1759 .open = rme96xx_open,
1760 .release = rme96xx_release
1761};
1762
1763static int rme96xx_mixer_open(struct inode *inode, struct file *file)
1764{
1765 int minor = iminor(inode);
1766 struct list_head *list;
1767 rme96xx_info *s;
1768
1769 COMM ("mixer open");
1770
1771 nonseekable_open(inode, file);
1772 for (list = devs.next; ; list = list->next) {
1773 if (list == &devs)
1774 return -ENODEV;
1775 s = list_entry(list, rme96xx_info, devs);
1776 if (s->mixer== minor)
1777 break;
1778 }
1779 VALIDATE_STATE(s);
1780 file->private_data = s;
1781
1782 COMM ("mixer opened")
1783 return 0;
1784}
1785
1786static int rme96xx_mixer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1787{
1788 rme96xx_info *s = (rme96xx_info *)file->private_data;
1789 u32 status;
1790 int spdifrate;
1791 void __user *argp = (void __user *)arg;
1792 int __user *p = argp;
1793
1794 status = readl(s->iobase + RME96xx_status_register);
1795 /* hack to convert rev 1.5 SPDIF rate to "crystalrate" format HP 20020201 */
1796 rme96xx_spdif_sample_rate(s,&spdifrate);
1797 status = (status & ~RME96xx_F) | ((spdifrate<<22) & RME96xx_F);
1798
1799 VALIDATE_STATE(s);
1800 if (cmd == SOUND_MIXER_PRIVATE1) {
1801 rme_mixer mixer;
1802 if (copy_from_user(&mixer,argp,sizeof(mixer)))
1803 return -EFAULT;
1804
1805 mixer.devnr &= RME96xx_MASK_DEVS;
1806 if (mixer.devnr >= devices)
1807 mixer.devnr = devices-1;
1808 if (file->f_mode & FMODE_WRITE && !s->dma[mixer.devnr].opened) {
1809 /* modify only if device not open */
1810 if (mixer.o_offset < 0)
1811 mixer.o_offset = 0;
1812 if (mixer.o_offset >= RME96xx_CHANNELS_PER_CARD)
1813 mixer.o_offset = RME96xx_CHANNELS_PER_CARD-1;
1814 if (mixer.i_offset < 0)
1815 mixer.i_offset = 0;
1816 if (mixer.i_offset >= RME96xx_CHANNELS_PER_CARD)
1817 mixer.i_offset = RME96xx_CHANNELS_PER_CARD-1;
1818 s->dma[mixer.devnr].outoffset = mixer.o_offset;
1819 s->dma[mixer.devnr].inoffset = mixer.i_offset;
1820 }
1821
1822 mixer.o_offset = s->dma[mixer.devnr].outoffset;
1823 mixer.i_offset = s->dma[mixer.devnr].inoffset;
1824
1825 return copy_to_user(argp, &mixer, sizeof(mixer)) ? -EFAULT : 0;
1826 }
1827 if (cmd == SOUND_MIXER_PRIVATE2) {
1828 return put_user(status, p);
1829 }
1830 if (cmd == SOUND_MIXER_PRIVATE3) {
1831 u32 control;
1832 if (copy_from_user(&control,argp,sizeof(control)))
1833 return -EFAULT;
1834 if (file->f_mode & FMODE_WRITE) {
1835 s->control_register &= ~RME96xx_mixer_allowed;
1836 s->control_register |= control & RME96xx_mixer_allowed;
1837 writel(control,s->iobase + RME96xx_control_register);
1838 }
1839
1840 return put_user(s->control_register, p);
1841 }
1842 return -1;
1843}
1844
1845
1846
1847static int rme96xx_mixer_release(struct inode *inode, struct file *file)
1848{
1849 return 0;
1850}
1851
1852static /*const*/ struct file_operations rme96xx_mixer_fops = {
1853 .owner = THIS_MODULE,
1854 .ioctl = rme96xx_mixer_ioctl,
1855 .open = rme96xx_mixer_open,
1856 .release = rme96xx_mixer_release,
1857};
diff --git a/sound/oss/rme96xx.h b/sound/oss/rme96xx.h
deleted file mode 100644
index 7a3c188ea0a8..000000000000
--- a/sound/oss/rme96xx.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* (C) 2000 Guenter Geiger <geiger@debian.org>
2 with copy/pastes from the driver of Winfried Ritsch <ritsch@iem.kug.ac.at>
3
4Modifications - Heiko Purnhagen <purnhage@tnt.uni-hannover.de>
5 HP20020116 towards REV 1.5 support, based on ALSA's card-rme9652.c
6 HP20020201 completed?
7
8A text/graphic control panel (rmectrl/xrmectrl) is available from
9 http://gige.xdv.org/pages/soft/pages/rme
10*/
11
12
13#ifndef AFMT_S32_BLOCKED
14#define AFMT_S32_BLOCKED 0x0000400
15#endif
16
17/* AFMT_S16_BLOCKED not yet supported */
18#ifndef AFMT_S16_BLOCKED
19#define AFMT_S16_BLOCKED 0x0000800
20#endif
21
22
23typedef struct rme_status {
24 unsigned int irq:1;
25 unsigned int lockmask:3; /* ADAT input PLLs locked */
26 /* 100=ADAT1, 010=ADAT2, 001=ADAT3 */
27 unsigned int sr48:1; /* sample rate: 0=44.1/88.2 1=48/96 kHz */
28 unsigned int wclock:1; /* 1=wordclock used */
29 unsigned int bufpoint:10;
30 unsigned int syncmask:3; /* ADAT input in sync with system clock */
31 /* 100=ADAT1, 010=ADAT2, 001=ADAT3 */
32 unsigned int doublespeed:1; /* sample rate: 0=44.1/48 1=88.2/96 kHz */
33 unsigned int tc_busy:1;
34 unsigned int tc_out:1;
35 unsigned int crystalrate:3; /* spdif input sample rate: */
36 /* 000=64kHz, 100=88.2kHz, 011=96kHz */
37 /* 111=32kHz, 110=44.1kHz, 101=48kHz */
38 unsigned int spdif_error:1; /* 1=no spdif lock */
39 unsigned int bufid:1;
40 unsigned int tc_valid:1; /* 1=timecode input detected */
41 unsigned int spdif_read:1;
42} rme_status_t;
43
44
45/* only fields marked W: can be modified by writing to SOUND_MIXER_PRIVATE3 */
46typedef struct rme_control {
47 unsigned int start:1;
48 unsigned int latency:3; /* buffer size / latency [samples]: */
49 /* 0=64 ... 7=8192 */
50 unsigned int master:1; /* W: clock mode: 1=master 0=slave/auto */
51 unsigned int ie:1;
52 unsigned int sr48:1; /* samplerate 0=44.1/88.2, 1=48/96 kHz */
53 unsigned int spare:1;
54 unsigned int doublespeed:1; /* double speed 0=44.1/48, 1=88.2/96 Khz */
55 unsigned int pro:1; /* W: SPDIF-OUT 0=consumer, 1=professional */
56 unsigned int emphasis:1; /* W: SPDIF-OUT emphasis 0=off, 1=on */
57 unsigned int dolby:1; /* W: SPDIF-OUT non-audio bit 1=set, 0=unset */
58 unsigned int opt_out:1; /* W: use 1st optical OUT as SPDIF: 1=yes, 0=no */
59 unsigned int wordclock:1; /* W: use Wordclock as sync (overwrites master) */
60 unsigned int spdif_in:2; /* W: SPDIF-IN: */
61 /* 00=optical (ADAT1), 01=coaxial (Cinch), 10=internal CDROM */
62 unsigned int sync_ref:2; /* W: preferred sync-source in autosync */
63 /* 00=ADAT1, 01=ADAT2, 10=ADAT3, 11=SPDIF */
64 unsigned int spdif_reset:1;
65 unsigned int spdif_select:1;
66 unsigned int spdif_clock:1;
67 unsigned int spdif_write:1;
68 unsigned int adat1_cd:1; /* W: Rev 1.5+: if set, internal CD connector carries ADAT */
69} rme_ctrl_t;
70
71
72typedef struct _rme_mixer {
73 int i_offset;
74 int o_offset;
75 int devnr;
76 int spare[8];
77} rme_mixer;
78
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index 0ce4e4ef6fe9..5c215f787ca9 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -16,7 +16,6 @@
16 */ 16 */
17#include <linux/kmod.h> 17#include <linux/kmod.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#define SEQUENCER_C
20#include "sound_config.h" 19#include "sound_config.h"
21 20
22#include "midi_ctrl.h" 21#include "midi_ctrl.h"
@@ -157,6 +156,7 @@ void seq_copy_to_input(unsigned char *event_rec, int len)
157 wake_up(&midi_sleeper); 156 wake_up(&midi_sleeper);
158 spin_unlock_irqrestore(&lock,flags); 157 spin_unlock_irqrestore(&lock,flags);
159} 158}
159EXPORT_SYMBOL(seq_copy_to_input);
160 160
161static void sequencer_midi_input(int dev, unsigned char data) 161static void sequencer_midi_input(int dev, unsigned char data)
162{ 162{
@@ -206,6 +206,7 @@ void seq_input_event(unsigned char *event_rec, int len)
206 } 206 }
207 seq_copy_to_input(event_rec, len); 207 seq_copy_to_input(event_rec, len);
208} 208}
209EXPORT_SYMBOL(seq_input_event);
209 210
210int sequencer_write(int dev, struct file *file, const char __user *buf, int count) 211int sequencer_write(int dev, struct file *file, const char __user *buf, int count)
211{ 212{
@@ -1554,6 +1555,7 @@ void sequencer_timer(unsigned long dummy)
1554{ 1555{
1555 seq_startplay(); 1556 seq_startplay();
1556} 1557}
1558EXPORT_SYMBOL(sequencer_timer);
1557 1559
1558int note_to_freq(int note_num) 1560int note_to_freq(int note_num)
1559{ 1561{
@@ -1587,6 +1589,7 @@ int note_to_freq(int note_num)
1587 1589
1588 return note_freq; 1590 return note_freq;
1589} 1591}
1592EXPORT_SYMBOL(note_to_freq);
1590 1593
1591unsigned long compute_finetune(unsigned long base_freq, int bend, int range, 1594unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
1592 int vibrato_cents) 1595 int vibrato_cents)
@@ -1640,19 +1643,12 @@ unsigned long compute_finetune(unsigned long base_freq, int bend, int range,
1640 else 1643 else
1641 return (base_freq * amount) / 10000; /* Bend up */ 1644 return (base_freq * amount) / 10000; /* Bend up */
1642} 1645}
1643 1646EXPORT_SYMBOL(compute_finetune);
1644 1647
1645void sequencer_init(void) 1648void sequencer_init(void)
1646{ 1649{
1647 /* drag in sequencer_syms.o */
1648 {
1649 extern char sequencer_syms_symbol;
1650 sequencer_syms_symbol = 0;
1651 }
1652
1653 if (sequencer_ok) 1650 if (sequencer_ok)
1654 return; 1651 return;
1655 MIDIbuf_init();
1656 queue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * EV_SZ); 1652 queue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * EV_SZ);
1657 if (queue == NULL) 1653 if (queue == NULL)
1658 { 1654 {
@@ -1668,6 +1664,7 @@ void sequencer_init(void)
1668 } 1664 }
1669 sequencer_ok = 1; 1665 sequencer_ok = 1;
1670} 1666}
1667EXPORT_SYMBOL(sequencer_init);
1671 1668
1672void sequencer_unload(void) 1669void sequencer_unload(void)
1673{ 1670{
diff --git a/sound/oss/sequencer_syms.c b/sound/oss/sequencer_syms.c
deleted file mode 100644
index 5d008798c310..000000000000
--- a/sound/oss/sequencer_syms.c
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Exported symbols for sequencer driver.
3 */
4
5#include <linux/module.h>
6
7char sequencer_syms_symbol;
8
9#include "sound_config.h"
10#include "sound_calls.h"
11
12EXPORT_SYMBOL(note_to_freq);
13EXPORT_SYMBOL(compute_finetune);
14EXPORT_SYMBOL(seq_copy_to_input);
15EXPORT_SYMBOL(seq_input_event);
16EXPORT_SYMBOL(sequencer_init);
17EXPORT_SYMBOL(sequencer_timer);
18
19EXPORT_SYMBOL(sound_timer_init);
20EXPORT_SYMBOL(sound_timer_interrupt);
21EXPORT_SYMBOL(sound_timer_syncinterval);
22
23/* Tuning */
24
25#define _SEQUENCER_C_
26#include "tuning.h"
27
28EXPORT_SYMBOL(cent_tuning);
29EXPORT_SYMBOL(semitone_tuning);
diff --git a/sound/oss/sgalaxy.c b/sound/oss/sgalaxy.c
deleted file mode 100644
index 0bcff6735319..000000000000
--- a/sound/oss/sgalaxy.c
+++ /dev/null
@@ -1,207 +0,0 @@
1/*
2 * sound/oss/sgalaxy.c
3 *
4 * Low level driver for Aztech Sound Galaxy cards.
5 * Copyright 1998 Artur Skawina <skawina@geocities.com>
6 *
7 * Supported cards:
8 * Aztech Sound Galaxy Waverider Pro 32 - 3D
9 * Aztech Sound Galaxy Washington 16
10 *
11 * Based on cs4232.c by Hannu Savolainen and Alan Cox.
12 *
13 *
14 * Copyright (C) by Hannu Savolainen 1993-1997
15 *
16 * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
17 * Version 2 (June 1991). See the "COPYING" file distributed with this software
18 * for more info.
19 *
20 * Changes:
21 * 11-10-2000 Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
22 * Added __init to sb_rst() and sb_cmd()
23 */
24
25#include <linux/init.h>
26#include <linux/module.h>
27
28#include "sound_config.h"
29#include "ad1848.h"
30
31static void sleep( unsigned howlong )
32{
33 current->state = TASK_INTERRUPTIBLE;
34 schedule_timeout(howlong);
35}
36
37#define DPORT 0x80
38
39/* Sound Blaster regs */
40
41#define SBDSP_RESET 0x6
42#define SBDSP_READ 0xA
43#define SBDSP_COMMAND 0xC
44#define SBDSP_STATUS SBDSP_COMMAND
45#define SBDSP_DATA_AVAIL 0xE
46
47static int __init sb_rst(int base)
48{
49 int i;
50
51 outb( 1, base+SBDSP_RESET ); /* reset the DSP */
52 outb( 0, base+SBDSP_RESET );
53
54 for ( i=0; i<500; i++ ) /* delay */
55 inb(DPORT);
56
57 for ( i=0; i<100000; i++ )
58 {
59 if ( inb( base+SBDSP_DATA_AVAIL )&0x80 )
60 break;
61 }
62
63 if ( inb( base+SBDSP_READ )!=0xAA )
64 return 0;
65
66 return 1;
67}
68
69static int __init sb_cmd( int base, unsigned char val )
70{
71 int i;
72
73 for ( i=100000; i; i-- )
74 {
75 if ( (inb( base+SBDSP_STATUS )&0x80)==0 )
76 {
77 outb( val, base+SBDSP_COMMAND );
78 break;
79 }
80 }
81 return i; /* i>0 == success */
82}
83
84
85#define ai_sgbase driver_use_1
86
87static int __init probe_sgalaxy( struct address_info *ai )
88{
89 struct resource *ports;
90 int n;
91
92 if (!request_region(ai->io_base, 4, "WSS config")) {
93 printk(KERN_ERR "sgalaxy: WSS IO port 0x%03x not available\n", ai->io_base);
94 return 0;
95 }
96
97 ports = request_region(ai->io_base + 4, 4, "ad1848");
98 if (!ports) {
99 printk(KERN_ERR "sgalaxy: WSS IO port 0x%03x not available\n", ai->io_base);
100 release_region(ai->io_base, 4);
101 return 0;
102 }
103
104 if (!request_region( ai->ai_sgbase, 0x10, "SoundGalaxy SB")) {
105 printk(KERN_ERR "sgalaxy: SB IO port 0x%03x not available\n", ai->ai_sgbase);
106 release_region(ai->io_base + 4, 4);
107 release_region(ai->io_base, 4);
108 return 0;
109 }
110
111 if (ad1848_detect(ports, NULL, ai->osp))
112 goto out; /* The card is already active, check irq etc... */
113
114 /* switch to MSS/WSS mode */
115
116 sb_rst( ai->ai_sgbase );
117
118 sb_cmd( ai->ai_sgbase, 9 );
119 sb_cmd( ai->ai_sgbase, 0 );
120
121 sleep( HZ/10 );
122
123out:
124 if (!probe_ms_sound(ai, ports)) {
125 release_region(ai->io_base + 4, 4);
126 release_region(ai->io_base, 4);
127 release_region(ai->ai_sgbase, 0x10);
128 return 0;
129 }
130
131 attach_ms_sound(ai, ports, THIS_MODULE);
132 n=ai->slots[0];
133
134 if (n!=-1 && audio_devs[n]->mixer_dev != -1 ) {
135 AD1848_REROUTE( SOUND_MIXER_LINE1, SOUND_MIXER_LINE ); /* Line-in */
136 AD1848_REROUTE( SOUND_MIXER_LINE2, SOUND_MIXER_SYNTH ); /* FM+Wavetable*/
137 AD1848_REROUTE( SOUND_MIXER_LINE3, SOUND_MIXER_CD ); /* CD */
138 }
139 return 1;
140}
141
142static void __exit unload_sgalaxy( struct address_info *ai )
143{
144 unload_ms_sound( ai );
145 release_region( ai->ai_sgbase, 0x10 );
146}
147
148static struct address_info cfg;
149
150static int __initdata io = -1;
151static int __initdata irq = -1;
152static int __initdata dma = -1;
153static int __initdata dma2 = -1;
154static int __initdata sgbase = -1;
155
156module_param(io, int, 0);
157module_param(irq, int, 0);
158module_param(dma, int, 0);
159module_param(dma2, int, 0);
160module_param(sgbase, int, 0);
161
162static int __init init_sgalaxy(void)
163{
164 cfg.io_base = io;
165 cfg.irq = irq;
166 cfg.dma = dma;
167 cfg.dma2 = dma2;
168 cfg.ai_sgbase = sgbase;
169
170 if (cfg.io_base == -1 || cfg.irq == -1 || cfg.dma == -1 || cfg.ai_sgbase == -1 ) {
171 printk(KERN_ERR "sgalaxy: io, irq, dma and sgbase must be set.\n");
172 return -EINVAL;
173 }
174
175 if ( probe_sgalaxy(&cfg) == 0 )
176 return -ENODEV;
177
178 return 0;
179}
180
181static void __exit cleanup_sgalaxy(void)
182{
183 unload_sgalaxy(&cfg);
184}
185
186module_init(init_sgalaxy);
187module_exit(cleanup_sgalaxy);
188
189#ifndef MODULE
190static int __init setup_sgalaxy(char *str)
191{
192 /* io, irq, dma, dma2, sgbase */
193 int ints[6];
194
195 str = get_options(str, ARRAY_SIZE(ints), ints);
196 io = ints[1];
197 irq = ints[2];
198 dma = ints[3];
199 dma2 = ints[4];
200 sgbase = ints[5];
201
202 return 1;
203}
204
205__setup("sgalaxy=", setup_sgalaxy);
206#endif
207MODULE_LICENSE("GPL");
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c
deleted file mode 100644
index 8ea532d40198..000000000000
--- a/sound/oss/sonicvibes.c
+++ /dev/null
@@ -1,2792 +0,0 @@
1/*****************************************************************************/
2
3/*
4 * sonicvibes.c -- S3 Sonic Vibes audio driver.
5 *
6 * Copyright (C) 1998-2001, 2003 Thomas Sailer (t.sailer@alumni.ethz.ch)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 * Special thanks to David C. Niemi
23 *
24 *
25 * Module command line parameters:
26 * none so far
27 *
28 *
29 * Supported devices:
30 * /dev/dsp standard /dev/dsp device, (mostly) OSS compatible
31 * /dev/mixer standard /dev/mixer device, (mostly) OSS compatible
32 * /dev/midi simple MIDI UART interface, no ioctl
33 *
34 * The card has both an FM and a Wavetable synth, but I have to figure
35 * out first how to drive them...
36 *
37 * Revision history
38 * 06.05.1998 0.1 Initial release
39 * 10.05.1998 0.2 Fixed many bugs, esp. ADC rate calculation
40 * First stab at a simple midi interface (no bells&whistles)
41 * 13.05.1998 0.3 Fix stupid cut&paste error: set_adc_rate was called instead of
42 * set_dac_rate in the FMODE_WRITE case in sv_open
43 * Fix hwptr out of bounds (now mpg123 works)
44 * 14.05.1998 0.4 Don't allow excessive interrupt rates
45 * 08.06.1998 0.5 First release using Alan Cox' soundcore instead of miscdevice
46 * 03.08.1998 0.6 Do not include modversions.h
47 * Now mixer behaviour can basically be selected between
48 * "OSS documented" and "OSS actual" behaviour
49 * 31.08.1998 0.7 Fix realplayer problems - dac.count issues
50 * 10.12.1998 0.8 Fix drain_dac trying to wait on not yet initialized DMA
51 * 16.12.1998 0.9 Fix a few f_file & FMODE_ bugs
52 * 06.01.1999 0.10 remove the silly SA_INTERRUPT flag.
53 * hopefully killed the egcs section type conflict
54 * 12.03.1999 0.11 cinfo.blocks should be reset after GETxPTR ioctl.
55 * reported by Johan Maes <joma@telindus.be>
56 * 22.03.1999 0.12 return EAGAIN instead of EBUSY when O_NONBLOCK
57 * read/write cannot be executed
58 * 05.04.1999 0.13 added code to sv_read and sv_write which should detect
59 * lockups of the sound chip and revive it. This is basically
60 * an ugly hack, but at least applications using this driver
61 * won't hang forever. I don't know why these lockups happen,
62 * it might well be the motherboard chipset (an early 486 PCI
63 * board with ALI chipset), since every busmastering 100MB
64 * ethernet card I've tried (Realtek 8139 and Macronix tulip clone)
65 * exhibit similar behaviour (they work for a couple of packets
66 * and then lock up and can be revived by ifconfig down/up).
67 * 07.04.1999 0.14 implemented the following ioctl's: SOUND_PCM_READ_RATE,
68 * SOUND_PCM_READ_CHANNELS, SOUND_PCM_READ_BITS;
69 * Alpha fixes reported by Peter Jones <pjones@redhat.com>
70 * Note: dmaio hack might still be wrong on archs other than i386
71 * 15.06.1999 0.15 Fix bad allocation bug.
72 * Thanks to Deti Fliegl <fliegl@in.tum.de>
73 * 28.06.1999 0.16 Add pci_set_master
74 * 03.08.1999 0.17 adapt to Linus' new __setup/__initcall
75 * added kernel command line options "sonicvibes=reverb" and "sonicvibesdmaio=dmaioaddr"
76 * 12.08.1999 0.18 module_init/__setup fixes
77 * 24.08.1999 0.19 get rid of the dmaio kludge, replace with allocate_resource
78 * 31.08.1999 0.20 add spin_lock_init
79 * use new resource allocation to allocate DDMA IO space
80 * replaced current->state = x with set_current_state(x)
81 * 03.09.1999 0.21 change read semantics for MIDI to match
82 * OSS more closely; remove possible wakeup race
83 * 28.10.1999 0.22 More waitqueue races fixed
84 * 01.12.1999 0.23 New argument to allocate_resource
85 * 07.12.1999 0.24 More allocate_resource semantics change
86 * 08.01.2000 0.25 Prevent some ioctl's from returning bad count values on underrun/overrun;
87 * Tim Janik's BSE (Bedevilled Sound Engine) found this
88 * use Martin Mares' pci_assign_resource
89 * 07.02.2000 0.26 Use pci_alloc_consistent and pci_register_driver
90 * 21.11.2000 0.27 Initialize dma buffers in poll, otherwise poll may return a bogus mask
91 * 12.12.2000 0.28 More dma buffer initializations, patch from
92 * Tjeerd Mulder <tjeerd.mulder@fujitsu-siemens.com>
93 * 31.01.2001 0.29 Register/Unregister gameport
94 * Fix SETTRIGGER non OSS API conformity
95 * 18.05.2001 0.30 PCI probing and error values cleaned up by Marcus
96 * Meissner <mm@caldera.de>
97 * 03.01.2003 0.31 open_mode fixes from Georg Acher <acher@in.tum.de>
98 *
99 */
100
101/*****************************************************************************/
102
103#include <linux/module.h>
104#include <linux/string.h>
105#include <linux/ioport.h>
106#include <linux/interrupt.h>
107#include <linux/wait.h>
108#include <linux/mm.h>
109#include <linux/delay.h>
110#include <linux/sound.h>
111#include <linux/slab.h>
112#include <linux/soundcard.h>
113#include <linux/pci.h>
114#include <linux/init.h>
115#include <linux/poll.h>
116#include <linux/spinlock.h>
117#include <linux/smp_lock.h>
118#include <linux/gameport.h>
119#include <linux/dma-mapping.h>
120#include <linux/mutex.h>
121
122
123#include <asm/io.h>
124#include <asm/uaccess.h>
125
126#include "dm.h"
127
128#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
129#define SUPPORT_JOYSTICK 1
130#endif
131
132/* --------------------------------------------------------------------- */
133
134#undef OSS_DOCUMENTED_MIXER_SEMANTICS
135
136/* --------------------------------------------------------------------- */
137
138#ifndef PCI_VENDOR_ID_S3
139#define PCI_VENDOR_ID_S3 0x5333
140#endif
141#ifndef PCI_DEVICE_ID_S3_SONICVIBES
142#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
143#endif
144
145#define SV_MAGIC ((PCI_VENDOR_ID_S3<<16)|PCI_DEVICE_ID_S3_SONICVIBES)
146
147#define SV_EXTENT_SB 0x10
148#define SV_EXTENT_ENH 0x10
149#define SV_EXTENT_SYNTH 0x4
150#define SV_EXTENT_MIDI 0x4
151#define SV_EXTENT_GAME 0x8
152#define SV_EXTENT_DMA 0x10
153
154/*
155 * we are not a bridge and thus use a resource for DDMA that is used for bridges but
156 * left empty for normal devices
157 */
158#define RESOURCE_SB 0
159#define RESOURCE_ENH 1
160#define RESOURCE_SYNTH 2
161#define RESOURCE_MIDI 3
162#define RESOURCE_GAME 4
163#define RESOURCE_DDMA 7
164
165#define SV_MIDI_DATA 0
166#define SV_MIDI_COMMAND 1
167#define SV_MIDI_STATUS 1
168
169#define SV_DMA_ADDR0 0
170#define SV_DMA_ADDR1 1
171#define SV_DMA_ADDR2 2
172#define SV_DMA_ADDR3 3
173#define SV_DMA_COUNT0 4
174#define SV_DMA_COUNT1 5
175#define SV_DMA_COUNT2 6
176#define SV_DMA_MODE 0xb
177#define SV_DMA_RESET 0xd
178#define SV_DMA_MASK 0xf
179
180/*
181 * DONT reset the DMA controllers unless you understand
182 * the reset semantics. Assuming reset semantics as in
183 * the 8237 does not work.
184 */
185
186#define DMA_MODE_AUTOINIT 0x10
187#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
188#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
189
190#define SV_CODEC_CONTROL 0
191#define SV_CODEC_INTMASK 1
192#define SV_CODEC_STATUS 2
193#define SV_CODEC_IADDR 4
194#define SV_CODEC_IDATA 5
195
196#define SV_CCTRL_RESET 0x80
197#define SV_CCTRL_INTADRIVE 0x20
198#define SV_CCTRL_WAVETABLE 0x08
199#define SV_CCTRL_REVERB 0x04
200#define SV_CCTRL_ENHANCED 0x01
201
202#define SV_CINTMASK_DMAA 0x01
203#define SV_CINTMASK_DMAC 0x04
204#define SV_CINTMASK_SPECIAL 0x08
205#define SV_CINTMASK_UPDOWN 0x40
206#define SV_CINTMASK_MIDI 0x80
207
208#define SV_CSTAT_DMAA 0x01
209#define SV_CSTAT_DMAC 0x04
210#define SV_CSTAT_SPECIAL 0x08
211#define SV_CSTAT_UPDOWN 0x40
212#define SV_CSTAT_MIDI 0x80
213
214#define SV_CIADDR_TRD 0x80
215#define SV_CIADDR_MCE 0x40
216
217/* codec indirect registers */
218#define SV_CIMIX_ADCINL 0x00
219#define SV_CIMIX_ADCINR 0x01
220#define SV_CIMIX_AUX1INL 0x02
221#define SV_CIMIX_AUX1INR 0x03
222#define SV_CIMIX_CDINL 0x04
223#define SV_CIMIX_CDINR 0x05
224#define SV_CIMIX_LINEINL 0x06
225#define SV_CIMIX_LINEINR 0x07
226#define SV_CIMIX_MICIN 0x08
227#define SV_CIMIX_SYNTHINL 0x0A
228#define SV_CIMIX_SYNTHINR 0x0B
229#define SV_CIMIX_AUX2INL 0x0C
230#define SV_CIMIX_AUX2INR 0x0D
231#define SV_CIMIX_ANALOGINL 0x0E
232#define SV_CIMIX_ANALOGINR 0x0F
233#define SV_CIMIX_PCMINL 0x10
234#define SV_CIMIX_PCMINR 0x11
235
236#define SV_CIGAMECONTROL 0x09
237#define SV_CIDATAFMT 0x12
238#define SV_CIENABLE 0x13
239#define SV_CIUPDOWN 0x14
240#define SV_CIREVISION 0x15
241#define SV_CIADCOUTPUT 0x16
242#define SV_CIDMAABASECOUNT1 0x18
243#define SV_CIDMAABASECOUNT0 0x19
244#define SV_CIDMACBASECOUNT1 0x1c
245#define SV_CIDMACBASECOUNT0 0x1d
246#define SV_CIPCMSR0 0x1e
247#define SV_CIPCMSR1 0x1f
248#define SV_CISYNTHSR0 0x20
249#define SV_CISYNTHSR1 0x21
250#define SV_CIADCCLKSOURCE 0x22
251#define SV_CIADCALTSR 0x23
252#define SV_CIADCPLLM 0x24
253#define SV_CIADCPLLN 0x25
254#define SV_CISYNTHPLLM 0x26
255#define SV_CISYNTHPLLN 0x27
256#define SV_CIUARTCONTROL 0x2a
257#define SV_CIDRIVECONTROL 0x2b
258#define SV_CISRSSPACE 0x2c
259#define SV_CISRSCENTER 0x2d
260#define SV_CIWAVETABLESRC 0x2e
261#define SV_CIANALOGPWRDOWN 0x30
262#define SV_CIDIGITALPWRDOWN 0x31
263
264
265#define SV_CIMIX_ADCSRC_CD 0x20
266#define SV_CIMIX_ADCSRC_DAC 0x40
267#define SV_CIMIX_ADCSRC_AUX2 0x60
268#define SV_CIMIX_ADCSRC_LINE 0x80
269#define SV_CIMIX_ADCSRC_AUX1 0xa0
270#define SV_CIMIX_ADCSRC_MIC 0xc0
271#define SV_CIMIX_ADCSRC_MIXOUT 0xe0
272#define SV_CIMIX_ADCSRC_MASK 0xe0
273
274#define SV_CFMT_STEREO 0x01
275#define SV_CFMT_16BIT 0x02
276#define SV_CFMT_MASK 0x03
277#define SV_CFMT_ASHIFT 0
278#define SV_CFMT_CSHIFT 4
279
280static const unsigned sample_size[] = { 1, 2, 2, 4 };
281static const unsigned sample_shift[] = { 0, 1, 1, 2 };
282
283#define SV_CENABLE_PPE 0x4
284#define SV_CENABLE_RE 0x2
285#define SV_CENABLE_PE 0x1
286
287
288/* MIDI buffer sizes */
289
290#define MIDIINBUF 256
291#define MIDIOUTBUF 256
292
293#define FMODE_MIDI_SHIFT 2
294#define FMODE_MIDI_READ (FMODE_READ << FMODE_MIDI_SHIFT)
295#define FMODE_MIDI_WRITE (FMODE_WRITE << FMODE_MIDI_SHIFT)
296
297#define FMODE_DMFM 0x10
298
299/* --------------------------------------------------------------------- */
300
301struct sv_state {
302 /* magic */
303 unsigned int magic;
304
305 /* list of sonicvibes devices */
306 struct list_head devs;
307
308 /* the corresponding pci_dev structure */
309 struct pci_dev *dev;
310
311 /* soundcore stuff */
312 int dev_audio;
313 int dev_mixer;
314 int dev_midi;
315 int dev_dmfm;
316
317 /* hardware resources */
318 unsigned long iosb, ioenh, iosynth, iomidi; /* long for SPARC */
319 unsigned int iodmaa, iodmac, irq;
320
321 /* mixer stuff */
322 struct {
323 unsigned int modcnt;
324#ifndef OSS_DOCUMENTED_MIXER_SEMANTICS
325 unsigned short vol[13];
326#endif /* OSS_DOCUMENTED_MIXER_SEMANTICS */
327 } mix;
328
329 /* wave stuff */
330 unsigned int rateadc, ratedac;
331 unsigned char fmt, enable;
332
333 spinlock_t lock;
334 struct mutex open_mutex;
335 mode_t open_mode;
336 wait_queue_head_t open_wait;
337
338 struct dmabuf {
339 void *rawbuf;
340 dma_addr_t dmaaddr;
341 unsigned buforder;
342 unsigned numfrag;
343 unsigned fragshift;
344 unsigned hwptr, swptr;
345 unsigned total_bytes;
346 int count;
347 unsigned error; /* over/underrun */
348 wait_queue_head_t wait;
349 /* redundant, but makes calculations easier */
350 unsigned fragsize;
351 unsigned dmasize;
352 unsigned fragsamples;
353 /* OSS stuff */
354 unsigned mapped:1;
355 unsigned ready:1;
356 unsigned endcleared:1;
357 unsigned enabled:1;
358 unsigned ossfragshift;
359 int ossmaxfrags;
360 unsigned subdivision;
361 } dma_dac, dma_adc;
362
363 /* midi stuff */
364 struct {
365 unsigned ird, iwr, icnt;
366 unsigned ord, owr, ocnt;
367 wait_queue_head_t iwait;
368 wait_queue_head_t owait;
369 struct timer_list timer;
370 unsigned char ibuf[MIDIINBUF];
371 unsigned char obuf[MIDIOUTBUF];
372 } midi;
373
374#if SUPPORT_JOYSTICK
375 struct gameport *gameport;
376#endif
377};
378
379/* --------------------------------------------------------------------- */
380
381static LIST_HEAD(devs);
382static unsigned long wavetable_mem;
383
384/* --------------------------------------------------------------------- */
385
386static inline unsigned ld2(unsigned int x)
387{
388 unsigned r = 0;
389
390 if (x >= 0x10000) {
391 x >>= 16;
392 r += 16;
393 }
394 if (x >= 0x100) {
395 x >>= 8;
396 r += 8;
397 }
398 if (x >= 0x10) {
399 x >>= 4;
400 r += 4;
401 }
402 if (x >= 4) {
403 x >>= 2;
404 r += 2;
405 }
406 if (x >= 2)
407 r++;
408 return r;
409}
410
411/* --------------------------------------------------------------------- */
412
413/*
414 * Why use byte IO? Nobody knows, but S3 does it also in their Windows driver.
415 */
416
417#undef DMABYTEIO
418
419static void set_dmaa(struct sv_state *s, unsigned int addr, unsigned int count)
420{
421#ifdef DMABYTEIO
422 unsigned io = s->iodmaa, u;
423
424 count--;
425 for (u = 4; u > 0; u--, addr >>= 8, io++)
426 outb(addr & 0xff, io);
427 for (u = 3; u > 0; u--, count >>= 8, io++)
428 outb(count & 0xff, io);
429#else /* DMABYTEIO */
430 count--;
431 outl(addr, s->iodmaa + SV_DMA_ADDR0);
432 outl(count, s->iodmaa + SV_DMA_COUNT0);
433#endif /* DMABYTEIO */
434 outb(0x18, s->iodmaa + SV_DMA_MODE);
435}
436
437static void set_dmac(struct sv_state *s, unsigned int addr, unsigned int count)
438{
439#ifdef DMABYTEIO
440 unsigned io = s->iodmac, u;
441
442 count >>= 1;
443 count--;
444 for (u = 4; u > 0; u--, addr >>= 8, io++)
445 outb(addr & 0xff, io);
446 for (u = 3; u > 0; u--, count >>= 8, io++)
447 outb(count & 0xff, io);
448#else /* DMABYTEIO */
449 count >>= 1;
450 count--;
451 outl(addr, s->iodmac + SV_DMA_ADDR0);
452 outl(count, s->iodmac + SV_DMA_COUNT0);
453#endif /* DMABYTEIO */
454 outb(0x14, s->iodmac + SV_DMA_MODE);
455}
456
457static inline unsigned get_dmaa(struct sv_state *s)
458{
459#ifdef DMABYTEIO
460 unsigned io = s->iodmaa+6, v = 0, u;
461
462 for (u = 3; u > 0; u--, io--) {
463 v <<= 8;
464 v |= inb(io);
465 }
466 return v + 1;
467#else /* DMABYTEIO */
468 return (inl(s->iodmaa + SV_DMA_COUNT0) & 0xffffff) + 1;
469#endif /* DMABYTEIO */
470}
471
472static inline unsigned get_dmac(struct sv_state *s)
473{
474#ifdef DMABYTEIO
475 unsigned io = s->iodmac+6, v = 0, u;
476
477 for (u = 3; u > 0; u--, io--) {
478 v <<= 8;
479 v |= inb(io);
480 }
481 return (v + 1) << 1;
482#else /* DMABYTEIO */
483 return ((inl(s->iodmac + SV_DMA_COUNT0) & 0xffffff) + 1) << 1;
484#endif /* DMABYTEIO */
485}
486
487static void wrindir(struct sv_state *s, unsigned char idx, unsigned char data)
488{
489 outb(idx & 0x3f, s->ioenh + SV_CODEC_IADDR);
490 udelay(10);
491 outb(data, s->ioenh + SV_CODEC_IDATA);
492 udelay(10);
493}
494
495static unsigned char rdindir(struct sv_state *s, unsigned char idx)
496{
497 unsigned char v;
498
499 outb(idx & 0x3f, s->ioenh + SV_CODEC_IADDR);
500 udelay(10);
501 v = inb(s->ioenh + SV_CODEC_IDATA);
502 udelay(10);
503 return v;
504}
505
506static void set_fmt(struct sv_state *s, unsigned char mask, unsigned char data)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&s->lock, flags);
511 outb(SV_CIDATAFMT | SV_CIADDR_MCE, s->ioenh + SV_CODEC_IADDR);
512 if (mask) {
513 s->fmt = inb(s->ioenh + SV_CODEC_IDATA);
514 udelay(10);
515 }
516 s->fmt = (s->fmt & mask) | data;
517 outb(s->fmt, s->ioenh + SV_CODEC_IDATA);
518 udelay(10);
519 outb(0, s->ioenh + SV_CODEC_IADDR);
520 spin_unlock_irqrestore(&s->lock, flags);
521 udelay(10);
522}
523
524static void frobindir(struct sv_state *s, unsigned char idx, unsigned char mask, unsigned char data)
525{
526 outb(idx & 0x3f, s->ioenh + SV_CODEC_IADDR);
527 udelay(10);
528 outb((inb(s->ioenh + SV_CODEC_IDATA) & mask) ^ data, s->ioenh + SV_CODEC_IDATA);
529 udelay(10);
530}
531
532#define REFFREQUENCY 24576000
533#define ADCMULT 512
534#define FULLRATE 48000
535
536static unsigned setpll(struct sv_state *s, unsigned char reg, unsigned rate)
537{
538 unsigned long flags;
539 unsigned char r, m=0, n=0;
540 unsigned xm, xn, xr, xd, metric = ~0U;
541 /* the warnings about m and n used uninitialized are bogus and may safely be ignored */
542
543 if (rate < 625000/ADCMULT)
544 rate = 625000/ADCMULT;
545 if (rate > 150000000/ADCMULT)
546 rate = 150000000/ADCMULT;
547 /* slight violation of specs, needed for continuous sampling rates */
548 for (r = 0; rate < 75000000/ADCMULT; r += 0x20, rate <<= 1);
549 for (xn = 3; xn < 35; xn++)
550 for (xm = 3; xm < 130; xm++) {
551 xr = REFFREQUENCY/ADCMULT * xm / xn;
552 xd = abs((signed)(xr - rate));
553 if (xd < metric) {
554 metric = xd;
555 m = xm - 2;
556 n = xn - 2;
557 }
558 }
559 reg &= 0x3f;
560 spin_lock_irqsave(&s->lock, flags);
561 outb(reg, s->ioenh + SV_CODEC_IADDR);
562 udelay(10);
563 outb(m, s->ioenh + SV_CODEC_IDATA);
564 udelay(10);
565 outb(reg+1, s->ioenh + SV_CODEC_IADDR);
566 udelay(10);
567 outb(r | n, s->ioenh + SV_CODEC_IDATA);
568 spin_unlock_irqrestore(&s->lock, flags);
569 udelay(10);
570 return (REFFREQUENCY/ADCMULT * (m + 2) / (n + 2)) >> ((r >> 5) & 7);
571}
572
573#if 0
574
575static unsigned getpll(struct sv_state *s, unsigned char reg)
576{
577 unsigned long flags;
578 unsigned char m, n;
579
580 reg &= 0x3f;
581 spin_lock_irqsave(&s->lock, flags);
582 outb(reg, s->ioenh + SV_CODEC_IADDR);
583 udelay(10);
584 m = inb(s->ioenh + SV_CODEC_IDATA);
585 udelay(10);
586 outb(reg+1, s->ioenh + SV_CODEC_IADDR);
587 udelay(10);
588 n = inb(s->ioenh + SV_CODEC_IDATA);
589 spin_unlock_irqrestore(&s->lock, flags);
590 udelay(10);
591 return (REFFREQUENCY/ADCMULT * (m + 2) / ((n & 0x1f) + 2)) >> ((n >> 5) & 7);
592}
593
594#endif
595
596static void set_dac_rate(struct sv_state *s, unsigned rate)
597{
598 unsigned div;
599 unsigned long flags;
600
601 if (rate > 48000)
602 rate = 48000;
603 if (rate < 4000)
604 rate = 4000;
605 div = (rate * 65536 + FULLRATE/2) / FULLRATE;
606 if (div > 65535)
607 div = 65535;
608 spin_lock_irqsave(&s->lock, flags);
609 wrindir(s, SV_CIPCMSR1, div >> 8);
610 wrindir(s, SV_CIPCMSR0, div);
611 spin_unlock_irqrestore(&s->lock, flags);
612 s->ratedac = (div * FULLRATE + 32768) / 65536;
613}
614
615static void set_adc_rate(struct sv_state *s, unsigned rate)
616{
617 unsigned long flags;
618 unsigned rate1, rate2, div;
619
620 if (rate > 48000)
621 rate = 48000;
622 if (rate < 4000)
623 rate = 4000;
624 rate1 = setpll(s, SV_CIADCPLLM, rate);
625 div = (48000 + rate/2) / rate;
626 if (div > 8)
627 div = 8;
628 rate2 = (48000 + div/2) / div;
629 spin_lock_irqsave(&s->lock, flags);
630 wrindir(s, SV_CIADCALTSR, (div-1) << 4);
631 if (abs((signed)(rate-rate2)) <= abs((signed)(rate-rate1))) {
632 wrindir(s, SV_CIADCCLKSOURCE, 0x10);
633 s->rateadc = rate2;
634 } else {
635 wrindir(s, SV_CIADCCLKSOURCE, 0x00);
636 s->rateadc = rate1;
637 }
638 spin_unlock_irqrestore(&s->lock, flags);
639}
640
641/* --------------------------------------------------------------------- */
642
643static inline void stop_adc(struct sv_state *s)
644{
645 unsigned long flags;
646
647 spin_lock_irqsave(&s->lock, flags);
648 s->enable &= ~SV_CENABLE_RE;
649 wrindir(s, SV_CIENABLE, s->enable);
650 spin_unlock_irqrestore(&s->lock, flags);
651}
652
653static inline void stop_dac(struct sv_state *s)
654{
655 unsigned long flags;
656
657 spin_lock_irqsave(&s->lock, flags);
658 s->enable &= ~(SV_CENABLE_PPE | SV_CENABLE_PE);
659 wrindir(s, SV_CIENABLE, s->enable);
660 spin_unlock_irqrestore(&s->lock, flags);
661}
662
663static void start_dac(struct sv_state *s)
664{
665 unsigned long flags;
666
667 spin_lock_irqsave(&s->lock, flags);
668 if ((s->dma_dac.mapped || s->dma_dac.count > 0) && s->dma_dac.ready) {
669 s->enable = (s->enable & ~SV_CENABLE_PPE) | SV_CENABLE_PE;
670 wrindir(s, SV_CIENABLE, s->enable);
671 }
672 spin_unlock_irqrestore(&s->lock, flags);
673}
674
675static void start_adc(struct sv_state *s)
676{
677 unsigned long flags;
678
679 spin_lock_irqsave(&s->lock, flags);
680 if ((s->dma_adc.mapped || s->dma_adc.count < (signed)(s->dma_adc.dmasize - 2*s->dma_adc.fragsize))
681 && s->dma_adc.ready) {
682 s->enable |= SV_CENABLE_RE;
683 wrindir(s, SV_CIENABLE, s->enable);
684 }
685 spin_unlock_irqrestore(&s->lock, flags);
686}
687
688/* --------------------------------------------------------------------- */
689
690#define DMABUF_DEFAULTORDER (17-PAGE_SHIFT)
691#define DMABUF_MINORDER 1
692
693static void dealloc_dmabuf(struct sv_state *s, struct dmabuf *db)
694{
695 struct page *page, *pend;
696
697 if (db->rawbuf) {
698 /* undo marking the pages as reserved */
699 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
700 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
701 ClearPageReserved(page);
702 pci_free_consistent(s->dev, PAGE_SIZE << db->buforder, db->rawbuf, db->dmaaddr);
703 }
704 db->rawbuf = NULL;
705 db->mapped = db->ready = 0;
706}
707
708
709/* DMAA is used for playback, DMAC is used for recording */
710
711static int prog_dmabuf(struct sv_state *s, unsigned rec)
712{
713 struct dmabuf *db = rec ? &s->dma_adc : &s->dma_dac;
714 unsigned rate = rec ? s->rateadc : s->ratedac;
715 int order;
716 unsigned bytepersec;
717 unsigned bufs;
718 struct page *page, *pend;
719 unsigned char fmt;
720 unsigned long flags;
721
722 spin_lock_irqsave(&s->lock, flags);
723 fmt = s->fmt;
724 if (rec) {
725 s->enable &= ~SV_CENABLE_RE;
726 fmt >>= SV_CFMT_CSHIFT;
727 } else {
728 s->enable &= ~SV_CENABLE_PE;
729 fmt >>= SV_CFMT_ASHIFT;
730 }
731 wrindir(s, SV_CIENABLE, s->enable);
732 spin_unlock_irqrestore(&s->lock, flags);
733 fmt &= SV_CFMT_MASK;
734 db->hwptr = db->swptr = db->total_bytes = db->count = db->error = db->endcleared = 0;
735 if (!db->rawbuf) {
736 db->ready = db->mapped = 0;
737 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--)
738 if ((db->rawbuf = pci_alloc_consistent(s->dev, PAGE_SIZE << order, &db->dmaaddr)))
739 break;
740 if (!db->rawbuf)
741 return -ENOMEM;
742 db->buforder = order;
743 if ((virt_to_bus(db->rawbuf) ^ (virt_to_bus(db->rawbuf) + (PAGE_SIZE << db->buforder) - 1)) & ~0xffff)
744 printk(KERN_DEBUG "sv: DMA buffer crosses 64k boundary: busaddr 0x%lx size %ld\n",
745 virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
746 if ((virt_to_bus(db->rawbuf) + (PAGE_SIZE << db->buforder) - 1) & ~0xffffff)
747 printk(KERN_DEBUG "sv: DMA buffer beyond 16MB: busaddr 0x%lx size %ld\n",
748 virt_to_bus(db->rawbuf), PAGE_SIZE << db->buforder);
749 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
750 pend = virt_to_page(db->rawbuf + (PAGE_SIZE << db->buforder) - 1);
751 for (page = virt_to_page(db->rawbuf); page <= pend; page++)
752 SetPageReserved(page);
753 }
754 bytepersec = rate << sample_shift[fmt];
755 bufs = PAGE_SIZE << db->buforder;
756 if (db->ossfragshift) {
757 if ((1000 << db->ossfragshift) < bytepersec)
758 db->fragshift = ld2(bytepersec/1000);
759 else
760 db->fragshift = db->ossfragshift;
761 } else {
762 db->fragshift = ld2(bytepersec/100/(db->subdivision ? db->subdivision : 1));
763 if (db->fragshift < 3)
764 db->fragshift = 3;
765 }
766 db->numfrag = bufs >> db->fragshift;
767 while (db->numfrag < 4 && db->fragshift > 3) {
768 db->fragshift--;
769 db->numfrag = bufs >> db->fragshift;
770 }
771 db->fragsize = 1 << db->fragshift;
772 if (db->ossmaxfrags >= 4 && db->ossmaxfrags < db->numfrag)
773 db->numfrag = db->ossmaxfrags;
774 db->fragsamples = db->fragsize >> sample_shift[fmt];
775 db->dmasize = db->numfrag << db->fragshift;
776 memset(db->rawbuf, (fmt & SV_CFMT_16BIT) ? 0 : 0x80, db->dmasize);
777 spin_lock_irqsave(&s->lock, flags);
778 if (rec) {
779 set_dmac(s, db->dmaaddr, db->numfrag << db->fragshift);
780 /* program enhanced mode registers */
781 wrindir(s, SV_CIDMACBASECOUNT1, (db->fragsamples-1) >> 8);
782 wrindir(s, SV_CIDMACBASECOUNT0, db->fragsamples-1);
783 } else {
784 set_dmaa(s, db->dmaaddr, db->numfrag << db->fragshift);
785 /* program enhanced mode registers */
786 wrindir(s, SV_CIDMAABASECOUNT1, (db->fragsamples-1) >> 8);
787 wrindir(s, SV_CIDMAABASECOUNT0, db->fragsamples-1);
788 }
789 spin_unlock_irqrestore(&s->lock, flags);
790 db->enabled = 1;
791 db->ready = 1;
792 return 0;
793}
794
795static inline void clear_advance(struct sv_state *s)
796{
797 unsigned char c = (s->fmt & (SV_CFMT_16BIT << SV_CFMT_ASHIFT)) ? 0 : 0x80;
798 unsigned char *buf = s->dma_dac.rawbuf;
799 unsigned bsize = s->dma_dac.dmasize;
800 unsigned bptr = s->dma_dac.swptr;
801 unsigned len = s->dma_dac.fragsize;
802
803 if (bptr + len > bsize) {
804 unsigned x = bsize - bptr;
805 memset(buf + bptr, c, x);
806 bptr = 0;
807 len -= x;
808 }
809 memset(buf + bptr, c, len);
810}
811
812/* call with spinlock held! */
813static void sv_update_ptr(struct sv_state *s)
814{
815 unsigned hwptr;
816 int diff;
817
818 /* update ADC pointer */
819 if (s->dma_adc.ready) {
820 hwptr = (s->dma_adc.dmasize - get_dmac(s)) % s->dma_adc.dmasize;
821 diff = (s->dma_adc.dmasize + hwptr - s->dma_adc.hwptr) % s->dma_adc.dmasize;
822 s->dma_adc.hwptr = hwptr;
823 s->dma_adc.total_bytes += diff;
824 s->dma_adc.count += diff;
825 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
826 wake_up(&s->dma_adc.wait);
827 if (!s->dma_adc.mapped) {
828 if (s->dma_adc.count > (signed)(s->dma_adc.dmasize - ((3 * s->dma_adc.fragsize) >> 1))) {
829 s->enable &= ~SV_CENABLE_RE;
830 wrindir(s, SV_CIENABLE, s->enable);
831 s->dma_adc.error++;
832 }
833 }
834 }
835 /* update DAC pointer */
836 if (s->dma_dac.ready) {
837 hwptr = (s->dma_dac.dmasize - get_dmaa(s)) % s->dma_dac.dmasize;
838 diff = (s->dma_dac.dmasize + hwptr - s->dma_dac.hwptr) % s->dma_dac.dmasize;
839 s->dma_dac.hwptr = hwptr;
840 s->dma_dac.total_bytes += diff;
841 if (s->dma_dac.mapped) {
842 s->dma_dac.count += diff;
843 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
844 wake_up(&s->dma_dac.wait);
845 } else {
846 s->dma_dac.count -= diff;
847 if (s->dma_dac.count <= 0) {
848 s->enable &= ~SV_CENABLE_PE;
849 wrindir(s, SV_CIENABLE, s->enable);
850 s->dma_dac.error++;
851 } else if (s->dma_dac.count <= (signed)s->dma_dac.fragsize && !s->dma_dac.endcleared) {
852 clear_advance(s);
853 s->dma_dac.endcleared = 1;
854 }
855 if (s->dma_dac.count + (signed)s->dma_dac.fragsize <= (signed)s->dma_dac.dmasize)
856 wake_up(&s->dma_dac.wait);
857 }
858 }
859}
860
861/* hold spinlock for the following! */
862static void sv_handle_midi(struct sv_state *s)
863{
864 unsigned char ch;
865 int wake;
866
867 wake = 0;
868 while (!(inb(s->iomidi+1) & 0x80)) {
869 ch = inb(s->iomidi);
870 if (s->midi.icnt < MIDIINBUF) {
871 s->midi.ibuf[s->midi.iwr] = ch;
872 s->midi.iwr = (s->midi.iwr + 1) % MIDIINBUF;
873 s->midi.icnt++;
874 }
875 wake = 1;
876 }
877 if (wake)
878 wake_up(&s->midi.iwait);
879 wake = 0;
880 while (!(inb(s->iomidi+1) & 0x40) && s->midi.ocnt > 0) {
881 outb(s->midi.obuf[s->midi.ord], s->iomidi);
882 s->midi.ord = (s->midi.ord + 1) % MIDIOUTBUF;
883 s->midi.ocnt--;
884 if (s->midi.ocnt < MIDIOUTBUF-16)
885 wake = 1;
886 }
887 if (wake)
888 wake_up(&s->midi.owait);
889}
890
891static irqreturn_t sv_interrupt(int irq, void *dev_id, struct pt_regs *regs)
892{
893 struct sv_state *s = (struct sv_state *)dev_id;
894 unsigned int intsrc;
895
896 /* fastpath out, to ease interrupt sharing */
897 intsrc = inb(s->ioenh + SV_CODEC_STATUS);
898 if (!(intsrc & (SV_CSTAT_DMAA | SV_CSTAT_DMAC | SV_CSTAT_MIDI)))
899 return IRQ_NONE;
900 spin_lock(&s->lock);
901 sv_update_ptr(s);
902 sv_handle_midi(s);
903 spin_unlock(&s->lock);
904 return IRQ_HANDLED;
905}
906
907static void sv_midi_timer(unsigned long data)
908{
909 struct sv_state *s = (struct sv_state *)data;
910 unsigned long flags;
911
912 spin_lock_irqsave(&s->lock, flags);
913 sv_handle_midi(s);
914 spin_unlock_irqrestore(&s->lock, flags);
915 s->midi.timer.expires = jiffies+1;
916 add_timer(&s->midi.timer);
917}
918
919/* --------------------------------------------------------------------- */
920
921static const char invalid_magic[] = KERN_CRIT "sv: invalid magic value\n";
922
923#define VALIDATE_STATE(s) \
924({ \
925 if (!(s) || (s)->magic != SV_MAGIC) { \
926 printk(invalid_magic); \
927 return -ENXIO; \
928 } \
929})
930
931/* --------------------------------------------------------------------- */
932
933#define MT_4 1
934#define MT_5MUTE 2
935#define MT_4MUTEMONO 3
936#define MT_6MUTE 4
937
938static const struct {
939 unsigned left:5;
940 unsigned right:5;
941 unsigned type:3;
942 unsigned rec:3;
943} mixtable[SOUND_MIXER_NRDEVICES] = {
944 [SOUND_MIXER_RECLEV] = { SV_CIMIX_ADCINL, SV_CIMIX_ADCINR, MT_4, 0 },
945 [SOUND_MIXER_LINE1] = { SV_CIMIX_AUX1INL, SV_CIMIX_AUX1INR, MT_5MUTE, 5 },
946 [SOUND_MIXER_CD] = { SV_CIMIX_CDINL, SV_CIMIX_CDINR, MT_5MUTE, 1 },
947 [SOUND_MIXER_LINE] = { SV_CIMIX_LINEINL, SV_CIMIX_LINEINR, MT_5MUTE, 4 },
948 [SOUND_MIXER_MIC] = { SV_CIMIX_MICIN, SV_CIMIX_ADCINL, MT_4MUTEMONO, 6 },
949 [SOUND_MIXER_SYNTH] = { SV_CIMIX_SYNTHINL, SV_CIMIX_SYNTHINR, MT_5MUTE, 2 },
950 [SOUND_MIXER_LINE2] = { SV_CIMIX_AUX2INL, SV_CIMIX_AUX2INR, MT_5MUTE, 3 },
951 [SOUND_MIXER_VOLUME] = { SV_CIMIX_ANALOGINL, SV_CIMIX_ANALOGINR, MT_5MUTE, 7 },
952 [SOUND_MIXER_PCM] = { SV_CIMIX_PCMINL, SV_CIMIX_PCMINR, MT_6MUTE, 0 }
953};
954
955#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
956
957static int return_mixval(struct sv_state *s, unsigned i, int *arg)
958{
959 unsigned long flags;
960 unsigned char l, r, rl, rr;
961
962 spin_lock_irqsave(&s->lock, flags);
963 l = rdindir(s, mixtable[i].left);
964 r = rdindir(s, mixtable[i].right);
965 spin_unlock_irqrestore(&s->lock, flags);
966 switch (mixtable[i].type) {
967 case MT_4:
968 r &= 0xf;
969 l &= 0xf;
970 rl = 10 + 6 * (l & 15);
971 rr = 10 + 6 * (r & 15);
972 break;
973
974 case MT_4MUTEMONO:
975 rl = 55 - 3 * (l & 15);
976 if (r & 0x10)
977 rl += 45;
978 rr = rl;
979 r = l;
980 break;
981
982 case MT_5MUTE:
983 default:
984 rl = 100 - 3 * (l & 31);
985 rr = 100 - 3 * (r & 31);
986 break;
987
988 case MT_6MUTE:
989 rl = 100 - 3 * (l & 63) / 2;
990 rr = 100 - 3 * (r & 63) / 2;
991 break;
992 }
993 if (l & 0x80)
994 rl = 0;
995 if (r & 0x80)
996 rr = 0;
997 return put_user((rr << 8) | rl, arg);
998}
999
1000#else /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1001
1002static const unsigned char volidx[SOUND_MIXER_NRDEVICES] =
1003{
1004 [SOUND_MIXER_RECLEV] = 1,
1005 [SOUND_MIXER_LINE1] = 2,
1006 [SOUND_MIXER_CD] = 3,
1007 [SOUND_MIXER_LINE] = 4,
1008 [SOUND_MIXER_MIC] = 5,
1009 [SOUND_MIXER_SYNTH] = 6,
1010 [SOUND_MIXER_LINE2] = 7,
1011 [SOUND_MIXER_VOLUME] = 8,
1012 [SOUND_MIXER_PCM] = 9
1013};
1014
1015#endif /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1016
1017static unsigned mixer_recmask(struct sv_state *s)
1018{
1019 unsigned long flags;
1020 int i, j;
1021
1022 spin_lock_irqsave(&s->lock, flags);
1023 j = rdindir(s, SV_CIMIX_ADCINL) >> 5;
1024 spin_unlock_irqrestore(&s->lock, flags);
1025 j &= 7;
1026 for (i = 0; i < SOUND_MIXER_NRDEVICES && mixtable[i].rec != j; i++);
1027 return 1 << i;
1028}
1029
1030static int mixer_ioctl(struct sv_state *s, unsigned int cmd, unsigned long arg)
1031{
1032 unsigned long flags;
1033 int i, val;
1034 unsigned char l, r, rl, rr;
1035 int __user *p = (int __user *)arg;
1036
1037 VALIDATE_STATE(s);
1038 if (cmd == SOUND_MIXER_INFO) {
1039 mixer_info info;
1040 memset(&info, 0, sizeof(info));
1041 strlcpy(info.id, "SonicVibes", sizeof(info.id));
1042 strlcpy(info.name, "S3 SonicVibes", sizeof(info.name));
1043 info.modify_counter = s->mix.modcnt;
1044 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
1045 return -EFAULT;
1046 return 0;
1047 }
1048 if (cmd == SOUND_OLD_MIXER_INFO) {
1049 _old_mixer_info info;
1050 memset(&info, 0, sizeof(info));
1051 strlcpy(info.id, "SonicVibes", sizeof(info.id));
1052 strlcpy(info.name, "S3 SonicVibes", sizeof(info.name));
1053 if (copy_to_user((void __user *)arg, &info, sizeof(info)))
1054 return -EFAULT;
1055 return 0;
1056 }
1057 if (cmd == OSS_GETVERSION)
1058 return put_user(SOUND_VERSION, p);
1059 if (cmd == SOUND_MIXER_PRIVATE1) { /* SRS settings */
1060 if (get_user(val, p))
1061 return -EFAULT;
1062 spin_lock_irqsave(&s->lock, flags);
1063 if (val & 1) {
1064 if (val & 2) {
1065 l = 4 - ((val >> 2) & 7);
1066 if (l & ~3)
1067 l = 4;
1068 r = 4 - ((val >> 5) & 7);
1069 if (r & ~3)
1070 r = 4;
1071 wrindir(s, SV_CISRSSPACE, l);
1072 wrindir(s, SV_CISRSCENTER, r);
1073 } else
1074 wrindir(s, SV_CISRSSPACE, 0x80);
1075 }
1076 l = rdindir(s, SV_CISRSSPACE);
1077 r = rdindir(s, SV_CISRSCENTER);
1078 spin_unlock_irqrestore(&s->lock, flags);
1079 if (l & 0x80)
1080 return put_user(0, p);
1081 return put_user(((4 - (l & 7)) << 2) | ((4 - (r & 7)) << 5) | 2, p);
1082 }
1083 if (_IOC_TYPE(cmd) != 'M' || _SIOC_SIZE(cmd) != sizeof(int))
1084 return -EINVAL;
1085 if (_SIOC_DIR(cmd) == _SIOC_READ) {
1086 switch (_IOC_NR(cmd)) {
1087 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
1088 return put_user(mixer_recmask(s), p);
1089
1090 case SOUND_MIXER_DEVMASK: /* Arg contains a bit for each supported device */
1091 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1092 if (mixtable[i].type)
1093 val |= 1 << i;
1094 return put_user(val, p);
1095
1096 case SOUND_MIXER_RECMASK: /* Arg contains a bit for each supported recording source */
1097 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1098 if (mixtable[i].rec)
1099 val |= 1 << i;
1100 return put_user(val, p);
1101
1102 case SOUND_MIXER_STEREODEVS: /* Mixer channels supporting stereo */
1103 for (val = i = 0; i < SOUND_MIXER_NRDEVICES; i++)
1104 if (mixtable[i].type && mixtable[i].type != MT_4MUTEMONO)
1105 val |= 1 << i;
1106 return put_user(val, p);
1107
1108 case SOUND_MIXER_CAPS:
1109 return put_user(SOUND_CAP_EXCL_INPUT, p);
1110
1111 default:
1112 i = _IOC_NR(cmd);
1113 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].type)
1114 return -EINVAL;
1115#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
1116 return return_mixval(s, i, p);
1117#else /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1118 if (!volidx[i])
1119 return -EINVAL;
1120 return put_user(s->mix.vol[volidx[i]-1], p);
1121#endif /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1122 }
1123 }
1124 if (_SIOC_DIR(cmd) != (_SIOC_READ|_SIOC_WRITE))
1125 return -EINVAL;
1126 s->mix.modcnt++;
1127 switch (_IOC_NR(cmd)) {
1128 case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */
1129 if (get_user(val, p))
1130 return -EFAULT;
1131 i = hweight32(val);
1132 if (i == 0)
1133 return 0; /*val = mixer_recmask(s);*/
1134 else if (i > 1)
1135 val &= ~mixer_recmask(s);
1136 for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
1137 if (!(val & (1 << i)))
1138 continue;
1139 if (mixtable[i].rec)
1140 break;
1141 }
1142 if (i == SOUND_MIXER_NRDEVICES)
1143 return 0;
1144 spin_lock_irqsave(&s->lock, flags);
1145 frobindir(s, SV_CIMIX_ADCINL, 0x1f, mixtable[i].rec << 5);
1146 frobindir(s, SV_CIMIX_ADCINR, 0x1f, mixtable[i].rec << 5);
1147 spin_unlock_irqrestore(&s->lock, flags);
1148 return 0;
1149
1150 default:
1151 i = _IOC_NR(cmd);
1152 if (i >= SOUND_MIXER_NRDEVICES || !mixtable[i].type)
1153 return -EINVAL;
1154 if (get_user(val, p))
1155 return -EFAULT;
1156 l = val & 0xff;
1157 r = (val >> 8) & 0xff;
1158 if (mixtable[i].type == MT_4MUTEMONO)
1159 l = (r + l) / 2;
1160 if (l > 100)
1161 l = 100;
1162 if (r > 100)
1163 r = 100;
1164 spin_lock_irqsave(&s->lock, flags);
1165 switch (mixtable[i].type) {
1166 case MT_4:
1167 if (l >= 10)
1168 l -= 10;
1169 if (r >= 10)
1170 r -= 10;
1171 frobindir(s, mixtable[i].left, 0xf0, l / 6);
1172 frobindir(s, mixtable[i].right, 0xf0, l / 6);
1173 break;
1174
1175 case MT_4MUTEMONO:
1176 rr = 0;
1177 if (l < 10)
1178 rl = 0x80;
1179 else {
1180 if (l >= 55) {
1181 rr = 0x10;
1182 l -= 45;
1183 }
1184 rl = (55 - l) / 3;
1185 }
1186 wrindir(s, mixtable[i].left, rl);
1187 frobindir(s, mixtable[i].right, ~0x10, rr);
1188 break;
1189
1190 case MT_5MUTE:
1191 if (l < 7)
1192 rl = 0x80;
1193 else
1194 rl = (100 - l) / 3;
1195 if (r < 7)
1196 rr = 0x80;
1197 else
1198 rr = (100 - r) / 3;
1199 wrindir(s, mixtable[i].left, rl);
1200 wrindir(s, mixtable[i].right, rr);
1201 break;
1202
1203 case MT_6MUTE:
1204 if (l < 6)
1205 rl = 0x80;
1206 else
1207 rl = (100 - l) * 2 / 3;
1208 if (r < 6)
1209 rr = 0x80;
1210 else
1211 rr = (100 - r) * 2 / 3;
1212 wrindir(s, mixtable[i].left, rl);
1213 wrindir(s, mixtable[i].right, rr);
1214 break;
1215 }
1216 spin_unlock_irqrestore(&s->lock, flags);
1217#ifdef OSS_DOCUMENTED_MIXER_SEMANTICS
1218 return return_mixval(s, i, p);
1219#else /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1220 if (!volidx[i])
1221 return -EINVAL;
1222 s->mix.vol[volidx[i]-1] = val;
1223 return put_user(s->mix.vol[volidx[i]-1], p);
1224#endif /* OSS_DOCUMENTED_MIXER_SEMANTICS */
1225 }
1226}
1227
1228/* --------------------------------------------------------------------- */
1229
1230static int sv_open_mixdev(struct inode *inode, struct file *file)
1231{
1232 int minor = iminor(inode);
1233 struct list_head *list;
1234 struct sv_state *s;
1235
1236 for (list = devs.next; ; list = list->next) {
1237 if (list == &devs)
1238 return -ENODEV;
1239 s = list_entry(list, struct sv_state, devs);
1240 if (s->dev_mixer == minor)
1241 break;
1242 }
1243 VALIDATE_STATE(s);
1244 file->private_data = s;
1245 return nonseekable_open(inode, file);
1246}
1247
1248static int sv_release_mixdev(struct inode *inode, struct file *file)
1249{
1250 struct sv_state *s = (struct sv_state *)file->private_data;
1251
1252 VALIDATE_STATE(s);
1253 return 0;
1254}
1255
1256static int sv_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1257{
1258 return mixer_ioctl((struct sv_state *)file->private_data, cmd, arg);
1259}
1260
1261static /*const*/ struct file_operations sv_mixer_fops = {
1262 .owner = THIS_MODULE,
1263 .llseek = no_llseek,
1264 .ioctl = sv_ioctl_mixdev,
1265 .open = sv_open_mixdev,
1266 .release = sv_release_mixdev,
1267};
1268
1269/* --------------------------------------------------------------------- */
1270
1271static int drain_dac(struct sv_state *s, int nonblock)
1272{
1273 DECLARE_WAITQUEUE(wait, current);
1274 unsigned long flags;
1275 int count, tmo;
1276
1277 if (s->dma_dac.mapped || !s->dma_dac.ready)
1278 return 0;
1279 add_wait_queue(&s->dma_dac.wait, &wait);
1280 for (;;) {
1281 __set_current_state(TASK_INTERRUPTIBLE);
1282 spin_lock_irqsave(&s->lock, flags);
1283 count = s->dma_dac.count;
1284 spin_unlock_irqrestore(&s->lock, flags);
1285 if (count <= 0)
1286 break;
1287 if (signal_pending(current))
1288 break;
1289 if (nonblock) {
1290 remove_wait_queue(&s->dma_dac.wait, &wait);
1291 set_current_state(TASK_RUNNING);
1292 return -EBUSY;
1293 }
1294 tmo = 3 * HZ * (count + s->dma_dac.fragsize) / 2 / s->ratedac;
1295 tmo >>= sample_shift[(s->fmt >> SV_CFMT_ASHIFT) & SV_CFMT_MASK];
1296 if (!schedule_timeout(tmo + 1))
1297 printk(KERN_DEBUG "sv: dma timed out??\n");
1298 }
1299 remove_wait_queue(&s->dma_dac.wait, &wait);
1300 set_current_state(TASK_RUNNING);
1301 if (signal_pending(current))
1302 return -ERESTARTSYS;
1303 return 0;
1304}
1305
1306/* --------------------------------------------------------------------- */
1307
1308static ssize_t sv_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1309{
1310 struct sv_state *s = (struct sv_state *)file->private_data;
1311 DECLARE_WAITQUEUE(wait, current);
1312 ssize_t ret;
1313 unsigned long flags;
1314 unsigned swptr;
1315 int cnt;
1316
1317 VALIDATE_STATE(s);
1318 if (s->dma_adc.mapped)
1319 return -ENXIO;
1320 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
1321 return ret;
1322 if (!access_ok(VERIFY_WRITE, buffer, count))
1323 return -EFAULT;
1324 ret = 0;
1325#if 0
1326 spin_lock_irqsave(&s->lock, flags);
1327 sv_update_ptr(s);
1328 spin_unlock_irqrestore(&s->lock, flags);
1329#endif
1330 add_wait_queue(&s->dma_adc.wait, &wait);
1331 while (count > 0) {
1332 spin_lock_irqsave(&s->lock, flags);
1333 swptr = s->dma_adc.swptr;
1334 cnt = s->dma_adc.dmasize-swptr;
1335 if (s->dma_adc.count < cnt)
1336 cnt = s->dma_adc.count;
1337 if (cnt <= 0)
1338 __set_current_state(TASK_INTERRUPTIBLE);
1339 spin_unlock_irqrestore(&s->lock, flags);
1340 if (cnt > count)
1341 cnt = count;
1342 if (cnt <= 0) {
1343 if (s->dma_adc.enabled)
1344 start_adc(s);
1345 if (file->f_flags & O_NONBLOCK) {
1346 if (!ret)
1347 ret = -EAGAIN;
1348 break;
1349 }
1350 if (!schedule_timeout(HZ)) {
1351 printk(KERN_DEBUG "sv: read: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1352 s->dma_adc.dmasize, s->dma_adc.fragsize, s->dma_adc.count,
1353 s->dma_adc.hwptr, s->dma_adc.swptr);
1354 stop_adc(s);
1355 spin_lock_irqsave(&s->lock, flags);
1356 set_dmac(s, virt_to_bus(s->dma_adc.rawbuf), s->dma_adc.numfrag << s->dma_adc.fragshift);
1357 /* program enhanced mode registers */
1358 wrindir(s, SV_CIDMACBASECOUNT1, (s->dma_adc.fragsamples-1) >> 8);
1359 wrindir(s, SV_CIDMACBASECOUNT0, s->dma_adc.fragsamples-1);
1360 s->dma_adc.count = s->dma_adc.hwptr = s->dma_adc.swptr = 0;
1361 spin_unlock_irqrestore(&s->lock, flags);
1362 }
1363 if (signal_pending(current)) {
1364 if (!ret)
1365 ret = -ERESTARTSYS;
1366 break;
1367 }
1368 continue;
1369 }
1370 if (copy_to_user(buffer, s->dma_adc.rawbuf + swptr, cnt)) {
1371 if (!ret)
1372 ret = -EFAULT;
1373 break;
1374 }
1375 swptr = (swptr + cnt) % s->dma_adc.dmasize;
1376 spin_lock_irqsave(&s->lock, flags);
1377 s->dma_adc.swptr = swptr;
1378 s->dma_adc.count -= cnt;
1379 spin_unlock_irqrestore(&s->lock, flags);
1380 count -= cnt;
1381 buffer += cnt;
1382 ret += cnt;
1383 if (s->dma_adc.enabled)
1384 start_adc(s);
1385 }
1386 remove_wait_queue(&s->dma_adc.wait, &wait);
1387 set_current_state(TASK_RUNNING);
1388 return ret;
1389}
1390
1391static ssize_t sv_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1392{
1393 struct sv_state *s = (struct sv_state *)file->private_data;
1394 DECLARE_WAITQUEUE(wait, current);
1395 ssize_t ret;
1396 unsigned long flags;
1397 unsigned swptr;
1398 int cnt;
1399
1400 VALIDATE_STATE(s);
1401 if (s->dma_dac.mapped)
1402 return -ENXIO;
1403 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
1404 return ret;
1405 if (!access_ok(VERIFY_READ, buffer, count))
1406 return -EFAULT;
1407 ret = 0;
1408#if 0
1409 spin_lock_irqsave(&s->lock, flags);
1410 sv_update_ptr(s);
1411 spin_unlock_irqrestore(&s->lock, flags);
1412#endif
1413 add_wait_queue(&s->dma_dac.wait, &wait);
1414 while (count > 0) {
1415 spin_lock_irqsave(&s->lock, flags);
1416 if (s->dma_dac.count < 0) {
1417 s->dma_dac.count = 0;
1418 s->dma_dac.swptr = s->dma_dac.hwptr;
1419 }
1420 swptr = s->dma_dac.swptr;
1421 cnt = s->dma_dac.dmasize-swptr;
1422 if (s->dma_dac.count + cnt > s->dma_dac.dmasize)
1423 cnt = s->dma_dac.dmasize - s->dma_dac.count;
1424 if (cnt <= 0)
1425 __set_current_state(TASK_INTERRUPTIBLE);
1426 spin_unlock_irqrestore(&s->lock, flags);
1427 if (cnt > count)
1428 cnt = count;
1429 if (cnt <= 0) {
1430 if (s->dma_dac.enabled)
1431 start_dac(s);
1432 if (file->f_flags & O_NONBLOCK) {
1433 if (!ret)
1434 ret = -EAGAIN;
1435 break;
1436 }
1437 if (!schedule_timeout(HZ)) {
1438 printk(KERN_DEBUG "sv: write: chip lockup? dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1439 s->dma_dac.dmasize, s->dma_dac.fragsize, s->dma_dac.count,
1440 s->dma_dac.hwptr, s->dma_dac.swptr);
1441 stop_dac(s);
1442 spin_lock_irqsave(&s->lock, flags);
1443 set_dmaa(s, virt_to_bus(s->dma_dac.rawbuf), s->dma_dac.numfrag << s->dma_dac.fragshift);
1444 /* program enhanced mode registers */
1445 wrindir(s, SV_CIDMAABASECOUNT1, (s->dma_dac.fragsamples-1) >> 8);
1446 wrindir(s, SV_CIDMAABASECOUNT0, s->dma_dac.fragsamples-1);
1447 s->dma_dac.count = s->dma_dac.hwptr = s->dma_dac.swptr = 0;
1448 spin_unlock_irqrestore(&s->lock, flags);
1449 }
1450 if (signal_pending(current)) {
1451 if (!ret)
1452 ret = -ERESTARTSYS;
1453 break;
1454 }
1455 continue;
1456 }
1457 if (copy_from_user(s->dma_dac.rawbuf + swptr, buffer, cnt)) {
1458 if (!ret)
1459 ret = -EFAULT;
1460 break;
1461 }
1462 swptr = (swptr + cnt) % s->dma_dac.dmasize;
1463 spin_lock_irqsave(&s->lock, flags);
1464 s->dma_dac.swptr = swptr;
1465 s->dma_dac.count += cnt;
1466 s->dma_dac.endcleared = 0;
1467 spin_unlock_irqrestore(&s->lock, flags);
1468 count -= cnt;
1469 buffer += cnt;
1470 ret += cnt;
1471 if (s->dma_dac.enabled)
1472 start_dac(s);
1473 }
1474 remove_wait_queue(&s->dma_dac.wait, &wait);
1475 set_current_state(TASK_RUNNING);
1476 return ret;
1477}
1478
1479/* No kernel lock - we have our own spinlock */
1480static unsigned int sv_poll(struct file *file, struct poll_table_struct *wait)
1481{
1482 struct sv_state *s = (struct sv_state *)file->private_data;
1483 unsigned long flags;
1484 unsigned int mask = 0;
1485
1486 VALIDATE_STATE(s);
1487 if (file->f_mode & FMODE_WRITE) {
1488 if (!s->dma_dac.ready && prog_dmabuf(s, 1))
1489 return 0;
1490 poll_wait(file, &s->dma_dac.wait, wait);
1491 }
1492 if (file->f_mode & FMODE_READ) {
1493 if (!s->dma_adc.ready && prog_dmabuf(s, 0))
1494 return 0;
1495 poll_wait(file, &s->dma_adc.wait, wait);
1496 }
1497 spin_lock_irqsave(&s->lock, flags);
1498 sv_update_ptr(s);
1499 if (file->f_mode & FMODE_READ) {
1500 if (s->dma_adc.count >= (signed)s->dma_adc.fragsize)
1501 mask |= POLLIN | POLLRDNORM;
1502 }
1503 if (file->f_mode & FMODE_WRITE) {
1504 if (s->dma_dac.mapped) {
1505 if (s->dma_dac.count >= (signed)s->dma_dac.fragsize)
1506 mask |= POLLOUT | POLLWRNORM;
1507 } else {
1508 if ((signed)s->dma_dac.dmasize >= s->dma_dac.count + (signed)s->dma_dac.fragsize)
1509 mask |= POLLOUT | POLLWRNORM;
1510 }
1511 }
1512 spin_unlock_irqrestore(&s->lock, flags);
1513 return mask;
1514}
1515
1516static int sv_mmap(struct file *file, struct vm_area_struct *vma)
1517{
1518 struct sv_state *s = (struct sv_state *)file->private_data;
1519 struct dmabuf *db;
1520 int ret = -EINVAL;
1521 unsigned long size;
1522
1523 VALIDATE_STATE(s);
1524 lock_kernel();
1525 if (vma->vm_flags & VM_WRITE) {
1526 if ((ret = prog_dmabuf(s, 1)) != 0)
1527 goto out;
1528 db = &s->dma_dac;
1529 } else if (vma->vm_flags & VM_READ) {
1530 if ((ret = prog_dmabuf(s, 0)) != 0)
1531 goto out;
1532 db = &s->dma_adc;
1533 } else
1534 goto out;
1535 ret = -EINVAL;
1536 if (vma->vm_pgoff != 0)
1537 goto out;
1538 size = vma->vm_end - vma->vm_start;
1539 if (size > (PAGE_SIZE << db->buforder))
1540 goto out;
1541 ret = -EAGAIN;
1542 if (remap_pfn_range(vma, vma->vm_start,
1543 virt_to_phys(db->rawbuf) >> PAGE_SHIFT,
1544 size, vma->vm_page_prot))
1545 goto out;
1546 db->mapped = 1;
1547 ret = 0;
1548out:
1549 unlock_kernel();
1550 return ret;
1551}
1552
1553static int sv_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1554{
1555 struct sv_state *s = (struct sv_state *)file->private_data;
1556 unsigned long flags;
1557 audio_buf_info abinfo;
1558 count_info cinfo;
1559 int count;
1560 int val, mapped, ret;
1561 unsigned char fmtm, fmtd;
1562 void __user *argp = (void __user *)arg;
1563 int __user *p = argp;
1564
1565 VALIDATE_STATE(s);
1566 mapped = ((file->f_mode & FMODE_WRITE) && s->dma_dac.mapped) ||
1567 ((file->f_mode & FMODE_READ) && s->dma_adc.mapped);
1568 switch (cmd) {
1569 case OSS_GETVERSION:
1570 return put_user(SOUND_VERSION, p);
1571
1572 case SNDCTL_DSP_SYNC:
1573 if (file->f_mode & FMODE_WRITE)
1574 return drain_dac(s, 0/*file->f_flags & O_NONBLOCK*/);
1575 return 0;
1576
1577 case SNDCTL_DSP_SETDUPLEX:
1578 return 0;
1579
1580 case SNDCTL_DSP_GETCAPS:
1581 return put_user(DSP_CAP_DUPLEX | DSP_CAP_REALTIME | DSP_CAP_TRIGGER | DSP_CAP_MMAP, p);
1582
1583 case SNDCTL_DSP_RESET:
1584 if (file->f_mode & FMODE_WRITE) {
1585 stop_dac(s);
1586 synchronize_irq(s->irq);
1587 s->dma_dac.swptr = s->dma_dac.hwptr = s->dma_dac.count = s->dma_dac.total_bytes = 0;
1588 }
1589 if (file->f_mode & FMODE_READ) {
1590 stop_adc(s);
1591 synchronize_irq(s->irq);
1592 s->dma_adc.swptr = s->dma_adc.hwptr = s->dma_adc.count = s->dma_adc.total_bytes = 0;
1593 }
1594 return 0;
1595
1596 case SNDCTL_DSP_SPEED:
1597 if (get_user(val, p))
1598 return -EFAULT;
1599 if (val >= 0) {
1600 if (file->f_mode & FMODE_READ) {
1601 stop_adc(s);
1602 s->dma_adc.ready = 0;
1603 set_adc_rate(s, val);
1604 }
1605 if (file->f_mode & FMODE_WRITE) {
1606 stop_dac(s);
1607 s->dma_dac.ready = 0;
1608 set_dac_rate(s, val);
1609 }
1610 }
1611 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
1612
1613 case SNDCTL_DSP_STEREO:
1614 if (get_user(val, p))
1615 return -EFAULT;
1616 fmtd = 0;
1617 fmtm = ~0;
1618 if (file->f_mode & FMODE_READ) {
1619 stop_adc(s);
1620 s->dma_adc.ready = 0;
1621 if (val)
1622 fmtd |= SV_CFMT_STEREO << SV_CFMT_CSHIFT;
1623 else
1624 fmtm &= ~(SV_CFMT_STEREO << SV_CFMT_CSHIFT);
1625 }
1626 if (file->f_mode & FMODE_WRITE) {
1627 stop_dac(s);
1628 s->dma_dac.ready = 0;
1629 if (val)
1630 fmtd |= SV_CFMT_STEREO << SV_CFMT_ASHIFT;
1631 else
1632 fmtm &= ~(SV_CFMT_STEREO << SV_CFMT_ASHIFT);
1633 }
1634 set_fmt(s, fmtm, fmtd);
1635 return 0;
1636
1637 case SNDCTL_DSP_CHANNELS:
1638 if (get_user(val, p))
1639 return -EFAULT;
1640 if (val != 0) {
1641 fmtd = 0;
1642 fmtm = ~0;
1643 if (file->f_mode & FMODE_READ) {
1644 stop_adc(s);
1645 s->dma_adc.ready = 0;
1646 if (val >= 2)
1647 fmtd |= SV_CFMT_STEREO << SV_CFMT_CSHIFT;
1648 else
1649 fmtm &= ~(SV_CFMT_STEREO << SV_CFMT_CSHIFT);
1650 }
1651 if (file->f_mode & FMODE_WRITE) {
1652 stop_dac(s);
1653 s->dma_dac.ready = 0;
1654 if (val >= 2)
1655 fmtd |= SV_CFMT_STEREO << SV_CFMT_ASHIFT;
1656 else
1657 fmtm &= ~(SV_CFMT_STEREO << SV_CFMT_ASHIFT);
1658 }
1659 set_fmt(s, fmtm, fmtd);
1660 }
1661 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (SV_CFMT_STEREO << SV_CFMT_CSHIFT)
1662 : (SV_CFMT_STEREO << SV_CFMT_ASHIFT))) ? 2 : 1, p);
1663
1664 case SNDCTL_DSP_GETFMTS: /* Returns a mask */
1665 return put_user(AFMT_S16_LE|AFMT_U8, p);
1666
1667 case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
1668 if (get_user(val, p))
1669 return -EFAULT;
1670 if (val != AFMT_QUERY) {
1671 fmtd = 0;
1672 fmtm = ~0;
1673 if (file->f_mode & FMODE_READ) {
1674 stop_adc(s);
1675 s->dma_adc.ready = 0;
1676 if (val == AFMT_S16_LE)
1677 fmtd |= SV_CFMT_16BIT << SV_CFMT_CSHIFT;
1678 else
1679 fmtm &= ~(SV_CFMT_16BIT << SV_CFMT_CSHIFT);
1680 }
1681 if (file->f_mode & FMODE_WRITE) {
1682 stop_dac(s);
1683 s->dma_dac.ready = 0;
1684 if (val == AFMT_S16_LE)
1685 fmtd |= SV_CFMT_16BIT << SV_CFMT_ASHIFT;
1686 else
1687 fmtm &= ~(SV_CFMT_16BIT << SV_CFMT_ASHIFT);
1688 }
1689 set_fmt(s, fmtm, fmtd);
1690 }
1691 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (SV_CFMT_16BIT << SV_CFMT_CSHIFT)
1692 : (SV_CFMT_16BIT << SV_CFMT_ASHIFT))) ? AFMT_S16_LE : AFMT_U8, p);
1693
1694 case SNDCTL_DSP_POST:
1695 return 0;
1696
1697 case SNDCTL_DSP_GETTRIGGER:
1698 val = 0;
1699 if (file->f_mode & FMODE_READ && s->enable & SV_CENABLE_RE)
1700 val |= PCM_ENABLE_INPUT;
1701 if (file->f_mode & FMODE_WRITE && s->enable & SV_CENABLE_PE)
1702 val |= PCM_ENABLE_OUTPUT;
1703 return put_user(val, p);
1704
1705 case SNDCTL_DSP_SETTRIGGER:
1706 if (get_user(val, p))
1707 return -EFAULT;
1708 if (file->f_mode & FMODE_READ) {
1709 if (val & PCM_ENABLE_INPUT) {
1710 if (!s->dma_adc.ready && (ret = prog_dmabuf(s, 1)))
1711 return ret;
1712 s->dma_adc.enabled = 1;
1713 start_adc(s);
1714 } else {
1715 s->dma_adc.enabled = 0;
1716 stop_adc(s);
1717 }
1718 }
1719 if (file->f_mode & FMODE_WRITE) {
1720 if (val & PCM_ENABLE_OUTPUT) {
1721 if (!s->dma_dac.ready && (ret = prog_dmabuf(s, 0)))
1722 return ret;
1723 s->dma_dac.enabled = 1;
1724 start_dac(s);
1725 } else {
1726 s->dma_dac.enabled = 0;
1727 stop_dac(s);
1728 }
1729 }
1730 return 0;
1731
1732 case SNDCTL_DSP_GETOSPACE:
1733 if (!(file->f_mode & FMODE_WRITE))
1734 return -EINVAL;
1735 if (!s->dma_dac.ready && (val = prog_dmabuf(s, 0)) != 0)
1736 return val;
1737 spin_lock_irqsave(&s->lock, flags);
1738 sv_update_ptr(s);
1739 abinfo.fragsize = s->dma_dac.fragsize;
1740 count = s->dma_dac.count;
1741 if (count < 0)
1742 count = 0;
1743 abinfo.bytes = s->dma_dac.dmasize - count;
1744 abinfo.fragstotal = s->dma_dac.numfrag;
1745 abinfo.fragments = abinfo.bytes >> s->dma_dac.fragshift;
1746 spin_unlock_irqrestore(&s->lock, flags);
1747 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1748
1749 case SNDCTL_DSP_GETISPACE:
1750 if (!(file->f_mode & FMODE_READ))
1751 return -EINVAL;
1752 if (!s->dma_adc.ready && (val = prog_dmabuf(s, 1)) != 0)
1753 return val;
1754 spin_lock_irqsave(&s->lock, flags);
1755 sv_update_ptr(s);
1756 abinfo.fragsize = s->dma_adc.fragsize;
1757 count = s->dma_adc.count;
1758 if (count < 0)
1759 count = 0;
1760 abinfo.bytes = count;
1761 abinfo.fragstotal = s->dma_adc.numfrag;
1762 abinfo.fragments = abinfo.bytes >> s->dma_adc.fragshift;
1763 spin_unlock_irqrestore(&s->lock, flags);
1764 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1765
1766 case SNDCTL_DSP_NONBLOCK:
1767 file->f_flags |= O_NONBLOCK;
1768 return 0;
1769
1770 case SNDCTL_DSP_GETODELAY:
1771 if (!(file->f_mode & FMODE_WRITE))
1772 return -EINVAL;
1773 if (!s->dma_dac.ready && (val = prog_dmabuf(s, 0)) != 0)
1774 return val;
1775 spin_lock_irqsave(&s->lock, flags);
1776 sv_update_ptr(s);
1777 count = s->dma_dac.count;
1778 spin_unlock_irqrestore(&s->lock, flags);
1779 if (count < 0)
1780 count = 0;
1781 return put_user(count, p);
1782
1783 case SNDCTL_DSP_GETIPTR:
1784 if (!(file->f_mode & FMODE_READ))
1785 return -EINVAL;
1786 if (!s->dma_adc.ready && (val = prog_dmabuf(s, 1)) != 0)
1787 return val;
1788 spin_lock_irqsave(&s->lock, flags);
1789 sv_update_ptr(s);
1790 cinfo.bytes = s->dma_adc.total_bytes;
1791 count = s->dma_adc.count;
1792 if (count < 0)
1793 count = 0;
1794 cinfo.blocks = count >> s->dma_adc.fragshift;
1795 cinfo.ptr = s->dma_adc.hwptr;
1796 if (s->dma_adc.mapped)
1797 s->dma_adc.count &= s->dma_adc.fragsize-1;
1798 spin_unlock_irqrestore(&s->lock, flags);
1799 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1800 return -EFAULT;
1801 return 0;
1802
1803 case SNDCTL_DSP_GETOPTR:
1804 if (!(file->f_mode & FMODE_WRITE))
1805 return -EINVAL;
1806 if (!s->dma_dac.ready && (val = prog_dmabuf(s, 0)) != 0)
1807 return val;
1808 spin_lock_irqsave(&s->lock, flags);
1809 sv_update_ptr(s);
1810 cinfo.bytes = s->dma_dac.total_bytes;
1811 count = s->dma_dac.count;
1812 if (count < 0)
1813 count = 0;
1814 cinfo.blocks = count >> s->dma_dac.fragshift;
1815 cinfo.ptr = s->dma_dac.hwptr;
1816 if (s->dma_dac.mapped)
1817 s->dma_dac.count &= s->dma_dac.fragsize-1;
1818 spin_unlock_irqrestore(&s->lock, flags);
1819 if (copy_to_user(argp, &cinfo, sizeof(cinfo)))
1820 return -EFAULT;
1821 return 0;
1822
1823 case SNDCTL_DSP_GETBLKSIZE:
1824 if (file->f_mode & FMODE_WRITE) {
1825 if ((val = prog_dmabuf(s, 0)))
1826 return val;
1827 return put_user(s->dma_dac.fragsize, p);
1828 }
1829 if ((val = prog_dmabuf(s, 1)))
1830 return val;
1831 return put_user(s->dma_adc.fragsize, p);
1832
1833 case SNDCTL_DSP_SETFRAGMENT:
1834 if (get_user(val, p))
1835 return -EFAULT;
1836 if (file->f_mode & FMODE_READ) {
1837 s->dma_adc.ossfragshift = val & 0xffff;
1838 s->dma_adc.ossmaxfrags = (val >> 16) & 0xffff;
1839 if (s->dma_adc.ossfragshift < 4)
1840 s->dma_adc.ossfragshift = 4;
1841 if (s->dma_adc.ossfragshift > 15)
1842 s->dma_adc.ossfragshift = 15;
1843 if (s->dma_adc.ossmaxfrags < 4)
1844 s->dma_adc.ossmaxfrags = 4;
1845 }
1846 if (file->f_mode & FMODE_WRITE) {
1847 s->dma_dac.ossfragshift = val & 0xffff;
1848 s->dma_dac.ossmaxfrags = (val >> 16) & 0xffff;
1849 if (s->dma_dac.ossfragshift < 4)
1850 s->dma_dac.ossfragshift = 4;
1851 if (s->dma_dac.ossfragshift > 15)
1852 s->dma_dac.ossfragshift = 15;
1853 if (s->dma_dac.ossmaxfrags < 4)
1854 s->dma_dac.ossmaxfrags = 4;
1855 }
1856 return 0;
1857
1858 case SNDCTL_DSP_SUBDIVIDE:
1859 if ((file->f_mode & FMODE_READ && s->dma_adc.subdivision) ||
1860 (file->f_mode & FMODE_WRITE && s->dma_dac.subdivision))
1861 return -EINVAL;
1862 if (get_user(val, p))
1863 return -EFAULT;
1864 if (val != 1 && val != 2 && val != 4)
1865 return -EINVAL;
1866 if (file->f_mode & FMODE_READ)
1867 s->dma_adc.subdivision = val;
1868 if (file->f_mode & FMODE_WRITE)
1869 s->dma_dac.subdivision = val;
1870 return 0;
1871
1872 case SOUND_PCM_READ_RATE:
1873 return put_user((file->f_mode & FMODE_READ) ? s->rateadc : s->ratedac, p);
1874
1875 case SOUND_PCM_READ_CHANNELS:
1876 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (SV_CFMT_STEREO << SV_CFMT_CSHIFT)
1877 : (SV_CFMT_STEREO << SV_CFMT_ASHIFT))) ? 2 : 1, p);
1878
1879 case SOUND_PCM_READ_BITS:
1880 return put_user((s->fmt & ((file->f_mode & FMODE_READ) ? (SV_CFMT_16BIT << SV_CFMT_CSHIFT)
1881 : (SV_CFMT_16BIT << SV_CFMT_ASHIFT))) ? 16 : 8, p);
1882
1883 case SOUND_PCM_WRITE_FILTER:
1884 case SNDCTL_DSP_SETSYNCRO:
1885 case SOUND_PCM_READ_FILTER:
1886 return -EINVAL;
1887
1888 }
1889 return mixer_ioctl(s, cmd, arg);
1890}
1891
1892static int sv_open(struct inode *inode, struct file *file)
1893{
1894 int minor = iminor(inode);
1895 DECLARE_WAITQUEUE(wait, current);
1896 unsigned char fmtm = ~0, fmts = 0;
1897 struct list_head *list;
1898 struct sv_state *s;
1899
1900 for (list = devs.next; ; list = list->next) {
1901 if (list == &devs)
1902 return -ENODEV;
1903 s = list_entry(list, struct sv_state, devs);
1904 if (!((s->dev_audio ^ minor) & ~0xf))
1905 break;
1906 }
1907 VALIDATE_STATE(s);
1908 file->private_data = s;
1909 /* wait for device to become free */
1910 mutex_lock(&s->open_mutex);
1911 while (s->open_mode & file->f_mode) {
1912 if (file->f_flags & O_NONBLOCK) {
1913 mutex_unlock(&s->open_mutex);
1914 return -EBUSY;
1915 }
1916 add_wait_queue(&s->open_wait, &wait);
1917 __set_current_state(TASK_INTERRUPTIBLE);
1918 mutex_unlock(&s->open_mutex);
1919 schedule();
1920 remove_wait_queue(&s->open_wait, &wait);
1921 set_current_state(TASK_RUNNING);
1922 if (signal_pending(current))
1923 return -ERESTARTSYS;
1924 mutex_lock(&s->open_mutex);
1925 }
1926 if (file->f_mode & FMODE_READ) {
1927 fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_CSHIFT);
1928 if ((minor & 0xf) == SND_DEV_DSP16)
1929 fmts |= SV_CFMT_16BIT << SV_CFMT_CSHIFT;
1930 s->dma_adc.ossfragshift = s->dma_adc.ossmaxfrags = s->dma_adc.subdivision = 0;
1931 s->dma_adc.enabled = 1;
1932 set_adc_rate(s, 8000);
1933 }
1934 if (file->f_mode & FMODE_WRITE) {
1935 fmtm &= ~((SV_CFMT_STEREO | SV_CFMT_16BIT) << SV_CFMT_ASHIFT);
1936 if ((minor & 0xf) == SND_DEV_DSP16)
1937 fmts |= SV_CFMT_16BIT << SV_CFMT_ASHIFT;
1938 s->dma_dac.ossfragshift = s->dma_dac.ossmaxfrags = s->dma_dac.subdivision = 0;
1939 s->dma_dac.enabled = 1;
1940 set_dac_rate(s, 8000);
1941 }
1942 set_fmt(s, fmtm, fmts);
1943 s->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
1944 mutex_unlock(&s->open_mutex);
1945 return nonseekable_open(inode, file);
1946}
1947
1948static int sv_release(struct inode *inode, struct file *file)
1949{
1950 struct sv_state *s = (struct sv_state *)file->private_data;
1951
1952 VALIDATE_STATE(s);
1953 lock_kernel();
1954 if (file->f_mode & FMODE_WRITE)
1955 drain_dac(s, file->f_flags & O_NONBLOCK);
1956 mutex_lock(&s->open_mutex);
1957 if (file->f_mode & FMODE_WRITE) {
1958 stop_dac(s);
1959 dealloc_dmabuf(s, &s->dma_dac);
1960 }
1961 if (file->f_mode & FMODE_READ) {
1962 stop_adc(s);
1963 dealloc_dmabuf(s, &s->dma_adc);
1964 }
1965 s->open_mode &= ~(file->f_mode & (FMODE_READ|FMODE_WRITE));
1966 wake_up(&s->open_wait);
1967 mutex_unlock(&s->open_mutex);
1968 unlock_kernel();
1969 return 0;
1970}
1971
1972static /*const*/ struct file_operations sv_audio_fops = {
1973 .owner = THIS_MODULE,
1974 .llseek = no_llseek,
1975 .read = sv_read,
1976 .write = sv_write,
1977 .poll = sv_poll,
1978 .ioctl = sv_ioctl,
1979 .mmap = sv_mmap,
1980 .open = sv_open,
1981 .release = sv_release,
1982};
1983
1984/* --------------------------------------------------------------------- */
1985
1986static ssize_t sv_midi_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1987{
1988 struct sv_state *s = (struct sv_state *)file->private_data;
1989 DECLARE_WAITQUEUE(wait, current);
1990 ssize_t ret;
1991 unsigned long flags;
1992 unsigned ptr;
1993 int cnt;
1994
1995 VALIDATE_STATE(s);
1996 if (!access_ok(VERIFY_WRITE, buffer, count))
1997 return -EFAULT;
1998 if (count == 0)
1999 return 0;
2000 ret = 0;
2001 add_wait_queue(&s->midi.iwait, &wait);
2002 while (count > 0) {
2003 spin_lock_irqsave(&s->lock, flags);
2004 ptr = s->midi.ird;
2005 cnt = MIDIINBUF - ptr;
2006 if (s->midi.icnt < cnt)
2007 cnt = s->midi.icnt;
2008 if (cnt <= 0)
2009 __set_current_state(TASK_INTERRUPTIBLE);
2010 spin_unlock_irqrestore(&s->lock, flags);
2011 if (cnt > count)
2012 cnt = count;
2013 if (cnt <= 0) {
2014 if (file->f_flags & O_NONBLOCK) {
2015 if (!ret)
2016 ret = -EAGAIN;
2017 break;
2018 }
2019 schedule();
2020 if (signal_pending(current)) {
2021 if (!ret)
2022 ret = -ERESTARTSYS;
2023 break;
2024 }
2025 continue;
2026 }
2027 if (copy_to_user(buffer, s->midi.ibuf + ptr, cnt)) {
2028 if (!ret)
2029 ret = -EFAULT;
2030 break;
2031 }
2032 ptr = (ptr + cnt) % MIDIINBUF;
2033 spin_lock_irqsave(&s->lock, flags);
2034 s->midi.ird = ptr;
2035 s->midi.icnt -= cnt;
2036 spin_unlock_irqrestore(&s->lock, flags);
2037 count -= cnt;
2038 buffer += cnt;
2039 ret += cnt;
2040 break;
2041 }
2042 __set_current_state(TASK_RUNNING);
2043 remove_wait_queue(&s->midi.iwait, &wait);
2044 return ret;
2045}
2046
2047static ssize_t sv_midi_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
2048{
2049 struct sv_state *s = (struct sv_state *)file->private_data;
2050 DECLARE_WAITQUEUE(wait, current);
2051 ssize_t ret;
2052 unsigned long flags;
2053 unsigned ptr;
2054 int cnt;
2055
2056 VALIDATE_STATE(s);
2057 if (!access_ok(VERIFY_READ, buffer, count))
2058 return -EFAULT;
2059 if (count == 0)
2060 return 0;
2061 ret = 0;
2062 add_wait_queue(&s->midi.owait, &wait);
2063 while (count > 0) {
2064 spin_lock_irqsave(&s->lock, flags);
2065 ptr = s->midi.owr;
2066 cnt = MIDIOUTBUF - ptr;
2067 if (s->midi.ocnt + cnt > MIDIOUTBUF)
2068 cnt = MIDIOUTBUF - s->midi.ocnt;
2069 if (cnt <= 0) {
2070 __set_current_state(TASK_INTERRUPTIBLE);
2071 sv_handle_midi(s);
2072 }
2073 spin_unlock_irqrestore(&s->lock, flags);
2074 if (cnt > count)
2075 cnt = count;
2076 if (cnt <= 0) {
2077 if (file->f_flags & O_NONBLOCK) {
2078 if (!ret)
2079 ret = -EAGAIN;
2080 break;
2081 }
2082 schedule();
2083 if (signal_pending(current)) {
2084 if (!ret)
2085 ret = -ERESTARTSYS;
2086 break;
2087 }
2088 continue;
2089 }
2090 if (copy_from_user(s->midi.obuf + ptr, buffer, cnt)) {
2091 if (!ret)
2092 ret = -EFAULT;
2093 break;
2094 }
2095 ptr = (ptr + cnt) % MIDIOUTBUF;
2096 spin_lock_irqsave(&s->lock, flags);
2097 s->midi.owr = ptr;
2098 s->midi.ocnt += cnt;
2099 spin_unlock_irqrestore(&s->lock, flags);
2100 count -= cnt;
2101 buffer += cnt;
2102 ret += cnt;
2103 spin_lock_irqsave(&s->lock, flags);
2104 sv_handle_midi(s);
2105 spin_unlock_irqrestore(&s->lock, flags);
2106 }
2107 __set_current_state(TASK_RUNNING);
2108 remove_wait_queue(&s->midi.owait, &wait);
2109 return ret;
2110}
2111
2112/* No kernel lock - we have our own spinlock */
2113static unsigned int sv_midi_poll(struct file *file, struct poll_table_struct *wait)
2114{
2115 struct sv_state *s = (struct sv_state *)file->private_data;
2116 unsigned long flags;
2117 unsigned int mask = 0;
2118
2119 VALIDATE_STATE(s);
2120 if (file->f_mode & FMODE_WRITE)
2121 poll_wait(file, &s->midi.owait, wait);
2122 if (file->f_mode & FMODE_READ)
2123 poll_wait(file, &s->midi.iwait, wait);
2124 spin_lock_irqsave(&s->lock, flags);
2125 if (file->f_mode & FMODE_READ) {
2126 if (s->midi.icnt > 0)
2127 mask |= POLLIN | POLLRDNORM;
2128 }
2129 if (file->f_mode & FMODE_WRITE) {
2130 if (s->midi.ocnt < MIDIOUTBUF)
2131 mask |= POLLOUT | POLLWRNORM;
2132 }
2133 spin_unlock_irqrestore(&s->lock, flags);
2134 return mask;
2135}
2136
2137static int sv_midi_open(struct inode *inode, struct file *file)
2138{
2139 int minor = iminor(inode);
2140 DECLARE_WAITQUEUE(wait, current);
2141 unsigned long flags;
2142 struct list_head *list;
2143 struct sv_state *s;
2144
2145 for (list = devs.next; ; list = list->next) {
2146 if (list == &devs)
2147 return -ENODEV;
2148 s = list_entry(list, struct sv_state, devs);
2149 if (s->dev_midi == minor)
2150 break;
2151 }
2152 VALIDATE_STATE(s);
2153 file->private_data = s;
2154 /* wait for device to become free */
2155 mutex_lock(&s->open_mutex);
2156 while (s->open_mode & (file->f_mode << FMODE_MIDI_SHIFT)) {
2157 if (file->f_flags & O_NONBLOCK) {
2158 mutex_unlock(&s->open_mutex);
2159 return -EBUSY;
2160 }
2161 add_wait_queue(&s->open_wait, &wait);
2162 __set_current_state(TASK_INTERRUPTIBLE);
2163 mutex_unlock(&s->open_mutex);
2164 schedule();
2165 remove_wait_queue(&s->open_wait, &wait);
2166 set_current_state(TASK_RUNNING);
2167 if (signal_pending(current))
2168 return -ERESTARTSYS;
2169 mutex_lock(&s->open_mutex);
2170 }
2171 spin_lock_irqsave(&s->lock, flags);
2172 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
2173 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
2174 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
2175 //outb(inb(s->ioenh + SV_CODEC_CONTROL) | SV_CCTRL_WAVETABLE, s->ioenh + SV_CODEC_CONTROL);
2176 outb(inb(s->ioenh + SV_CODEC_INTMASK) | SV_CINTMASK_MIDI, s->ioenh + SV_CODEC_INTMASK);
2177 wrindir(s, SV_CIUARTCONTROL, 5); /* output MIDI data to external and internal synth */
2178 wrindir(s, SV_CIWAVETABLESRC, 1); /* Wavetable in PC RAM */
2179 outb(0xff, s->iomidi+1); /* reset command */
2180 outb(0x3f, s->iomidi+1); /* uart command */
2181 if (!(inb(s->iomidi+1) & 0x80))
2182 inb(s->iomidi);
2183 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
2184 init_timer(&s->midi.timer);
2185 s->midi.timer.expires = jiffies+1;
2186 s->midi.timer.data = (unsigned long)s;
2187 s->midi.timer.function = sv_midi_timer;
2188 add_timer(&s->midi.timer);
2189 }
2190 if (file->f_mode & FMODE_READ) {
2191 s->midi.ird = s->midi.iwr = s->midi.icnt = 0;
2192 }
2193 if (file->f_mode & FMODE_WRITE) {
2194 s->midi.ord = s->midi.owr = s->midi.ocnt = 0;
2195 }
2196 spin_unlock_irqrestore(&s->lock, flags);
2197 s->open_mode |= (file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ | FMODE_MIDI_WRITE);
2198 mutex_unlock(&s->open_mutex);
2199 return nonseekable_open(inode, file);
2200}
2201
2202static int sv_midi_release(struct inode *inode, struct file *file)
2203{
2204 struct sv_state *s = (struct sv_state *)file->private_data;
2205 DECLARE_WAITQUEUE(wait, current);
2206 unsigned long flags;
2207 unsigned count, tmo;
2208
2209 VALIDATE_STATE(s);
2210
2211 lock_kernel();
2212 if (file->f_mode & FMODE_WRITE) {
2213 add_wait_queue(&s->midi.owait, &wait);
2214 for (;;) {
2215 __set_current_state(TASK_INTERRUPTIBLE);
2216 spin_lock_irqsave(&s->lock, flags);
2217 count = s->midi.ocnt;
2218 spin_unlock_irqrestore(&s->lock, flags);
2219 if (count <= 0)
2220 break;
2221 if (signal_pending(current))
2222 break;
2223 if (file->f_flags & O_NONBLOCK) {
2224 remove_wait_queue(&s->midi.owait, &wait);
2225 set_current_state(TASK_RUNNING);
2226 unlock_kernel();
2227 return -EBUSY;
2228 }
2229 tmo = (count * HZ) / 3100;
2230 if (!schedule_timeout(tmo ? : 1) && tmo)
2231 printk(KERN_DEBUG "sv: midi timed out??\n");
2232 }
2233 remove_wait_queue(&s->midi.owait, &wait);
2234 set_current_state(TASK_RUNNING);
2235 }
2236 mutex_lock(&s->open_mutex);
2237 s->open_mode &= ~((file->f_mode << FMODE_MIDI_SHIFT) & (FMODE_MIDI_READ|FMODE_MIDI_WRITE));
2238 spin_lock_irqsave(&s->lock, flags);
2239 if (!(s->open_mode & (FMODE_MIDI_READ | FMODE_MIDI_WRITE))) {
2240 outb(inb(s->ioenh + SV_CODEC_INTMASK) & ~SV_CINTMASK_MIDI, s->ioenh + SV_CODEC_INTMASK);
2241 del_timer(&s->midi.timer);
2242 }
2243 spin_unlock_irqrestore(&s->lock, flags);
2244 wake_up(&s->open_wait);
2245 mutex_unlock(&s->open_mutex);
2246 unlock_kernel();
2247 return 0;
2248}
2249
2250static /*const*/ struct file_operations sv_midi_fops = {
2251 .owner = THIS_MODULE,
2252 .llseek = no_llseek,
2253 .read = sv_midi_read,
2254 .write = sv_midi_write,
2255 .poll = sv_midi_poll,
2256 .open = sv_midi_open,
2257 .release = sv_midi_release,
2258};
2259
2260/* --------------------------------------------------------------------- */
2261
2262static int sv_dmfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
2263{
2264 static const unsigned char op_offset[18] = {
2265 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
2266 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
2267 0x10, 0x11, 0x12, 0x13, 0x14, 0x15
2268 };
2269 struct sv_state *s = (struct sv_state *)file->private_data;
2270 struct dm_fm_voice v;
2271 struct dm_fm_note n;
2272 struct dm_fm_params p;
2273 unsigned int io;
2274 unsigned int regb;
2275
2276 switch (cmd) {
2277 case FM_IOCTL_RESET:
2278 for (regb = 0xb0; regb < 0xb9; regb++) {
2279 outb(regb, s->iosynth);
2280 outb(0, s->iosynth+1);
2281 outb(regb, s->iosynth+2);
2282 outb(0, s->iosynth+3);
2283 }
2284 return 0;
2285
2286 case FM_IOCTL_PLAY_NOTE:
2287 if (copy_from_user(&n, (void __user *)arg, sizeof(n)))
2288 return -EFAULT;
2289 if (n.voice >= 18)
2290 return -EINVAL;
2291 if (n.voice >= 9) {
2292 regb = n.voice - 9;
2293 io = s->iosynth+2;
2294 } else {
2295 regb = n.voice;
2296 io = s->iosynth;
2297 }
2298 outb(0xa0 + regb, io);
2299 outb(n.fnum & 0xff, io+1);
2300 outb(0xb0 + regb, io);
2301 outb(((n.fnum >> 8) & 3) | ((n.octave & 7) << 2) | ((n.key_on & 1) << 5), io+1);
2302 return 0;
2303
2304 case FM_IOCTL_SET_VOICE:
2305 if (copy_from_user(&v, (void __user *)arg, sizeof(v)))
2306 return -EFAULT;
2307 if (v.voice >= 18)
2308 return -EINVAL;
2309 regb = op_offset[v.voice];
2310 io = s->iosynth + ((v.op & 1) << 1);
2311 outb(0x20 + regb, io);
2312 outb(((v.am & 1) << 7) | ((v.vibrato & 1) << 6) | ((v.do_sustain & 1) << 5) |
2313 ((v.kbd_scale & 1) << 4) | (v.harmonic & 0xf), io+1);
2314 outb(0x40 + regb, io);
2315 outb(((v.scale_level & 0x3) << 6) | (v.volume & 0x3f), io+1);
2316 outb(0x60 + regb, io);
2317 outb(((v.attack & 0xf) << 4) | (v.decay & 0xf), io+1);
2318 outb(0x80 + regb, io);
2319 outb(((v.sustain & 0xf) << 4) | (v.release & 0xf), io+1);
2320 outb(0xe0 + regb, io);
2321 outb(v.waveform & 0x7, io+1);
2322 if (n.voice >= 9) {
2323 regb = n.voice - 9;
2324 io = s->iosynth+2;
2325 } else {
2326 regb = n.voice;
2327 io = s->iosynth;
2328 }
2329 outb(0xc0 + regb, io);
2330 outb(((v.right & 1) << 5) | ((v.left & 1) << 4) | ((v.feedback & 7) << 1) |
2331 (v.connection & 1), io+1);
2332 return 0;
2333
2334 case FM_IOCTL_SET_PARAMS:
2335 if (copy_from_user(&p, (void *__user )arg, sizeof(p)))
2336 return -EFAULT;
2337 outb(0x08, s->iosynth);
2338 outb((p.kbd_split & 1) << 6, s->iosynth+1);
2339 outb(0xbd, s->iosynth);
2340 outb(((p.am_depth & 1) << 7) | ((p.vib_depth & 1) << 6) | ((p.rhythm & 1) << 5) | ((p.bass & 1) << 4) |
2341 ((p.snare & 1) << 3) | ((p.tomtom & 1) << 2) | ((p.cymbal & 1) << 1) | (p.hihat & 1), s->iosynth+1);
2342 return 0;
2343
2344 case FM_IOCTL_SET_OPL:
2345 outb(4, s->iosynth+2);
2346 outb(arg, s->iosynth+3);
2347 return 0;
2348
2349 case FM_IOCTL_SET_MODE:
2350 outb(5, s->iosynth+2);
2351 outb(arg & 1, s->iosynth+3);
2352 return 0;
2353
2354 default:
2355 return -EINVAL;
2356 }
2357}
2358
2359static int sv_dmfm_open(struct inode *inode, struct file *file)
2360{
2361 int minor = iminor(inode);
2362 DECLARE_WAITQUEUE(wait, current);
2363 struct list_head *list;
2364 struct sv_state *s;
2365
2366 for (list = devs.next; ; list = list->next) {
2367 if (list == &devs)
2368 return -ENODEV;
2369 s = list_entry(list, struct sv_state, devs);
2370 if (s->dev_dmfm == minor)
2371 break;
2372 }
2373 VALIDATE_STATE(s);
2374 file->private_data = s;
2375 /* wait for device to become free */
2376 mutex_lock(&s->open_mutex);
2377 while (s->open_mode & FMODE_DMFM) {
2378 if (file->f_flags & O_NONBLOCK) {
2379 mutex_unlock(&s->open_mutex);
2380 return -EBUSY;
2381 }
2382 add_wait_queue(&s->open_wait, &wait);
2383 __set_current_state(TASK_INTERRUPTIBLE);
2384 mutex_unlock(&s->open_mutex);
2385 schedule();
2386 remove_wait_queue(&s->open_wait, &wait);
2387 set_current_state(TASK_RUNNING);
2388 if (signal_pending(current))
2389 return -ERESTARTSYS;
2390 mutex_lock(&s->open_mutex);
2391 }
2392 /* init the stuff */
2393 outb(1, s->iosynth);
2394 outb(0x20, s->iosynth+1); /* enable waveforms */
2395 outb(4, s->iosynth+2);
2396 outb(0, s->iosynth+3); /* no 4op enabled */
2397 outb(5, s->iosynth+2);
2398 outb(1, s->iosynth+3); /* enable OPL3 */
2399 s->open_mode |= FMODE_DMFM;
2400 mutex_unlock(&s->open_mutex);
2401 return nonseekable_open(inode, file);
2402}
2403
2404static int sv_dmfm_release(struct inode *inode, struct file *file)
2405{
2406 struct sv_state *s = (struct sv_state *)file->private_data;
2407 unsigned int regb;
2408
2409 VALIDATE_STATE(s);
2410 lock_kernel();
2411 mutex_lock(&s->open_mutex);
2412 s->open_mode &= ~FMODE_DMFM;
2413 for (regb = 0xb0; regb < 0xb9; regb++) {
2414 outb(regb, s->iosynth);
2415 outb(0, s->iosynth+1);
2416 outb(regb, s->iosynth+2);
2417 outb(0, s->iosynth+3);
2418 }
2419 wake_up(&s->open_wait);
2420 mutex_unlock(&s->open_mutex);
2421 unlock_kernel();
2422 return 0;
2423}
2424
2425static /*const*/ struct file_operations sv_dmfm_fops = {
2426 .owner = THIS_MODULE,
2427 .llseek = no_llseek,
2428 .ioctl = sv_dmfm_ioctl,
2429 .open = sv_dmfm_open,
2430 .release = sv_dmfm_release,
2431};
2432
2433/* --------------------------------------------------------------------- */
2434
2435/* maximum number of devices; only used for command line params */
2436#define NR_DEVICE 5
2437
2438static int reverb[NR_DEVICE];
2439
2440#if 0
2441static int wavetable[NR_DEVICE];
2442#endif
2443
2444static unsigned int devindex;
2445
2446module_param_array(reverb, bool, NULL, 0);
2447MODULE_PARM_DESC(reverb, "if 1 enables the reverb circuitry. NOTE: your card must have the reverb RAM");
2448#if 0
2449MODULE_PARM(wavetable, "1-" __MODULE_STRING(NR_DEVICE) "i");
2450MODULE_PARM_DESC(wavetable, "if 1 the wavetable synth is enabled");
2451#endif
2452
2453MODULE_AUTHOR("Thomas M. Sailer, sailer@ife.ee.ethz.ch, hb9jnx@hb9w.che.eu");
2454MODULE_DESCRIPTION("S3 SonicVibes Driver");
2455MODULE_LICENSE("GPL");
2456
2457
2458/* --------------------------------------------------------------------- */
2459
2460static struct initvol {
2461 int mixch;
2462 int vol;
2463} initvol[] __devinitdata = {
2464 { SOUND_MIXER_WRITE_RECLEV, 0x4040 },
2465 { SOUND_MIXER_WRITE_LINE1, 0x4040 },
2466 { SOUND_MIXER_WRITE_CD, 0x4040 },
2467 { SOUND_MIXER_WRITE_LINE, 0x4040 },
2468 { SOUND_MIXER_WRITE_MIC, 0x4040 },
2469 { SOUND_MIXER_WRITE_SYNTH, 0x4040 },
2470 { SOUND_MIXER_WRITE_LINE2, 0x4040 },
2471 { SOUND_MIXER_WRITE_VOLUME, 0x4040 },
2472 { SOUND_MIXER_WRITE_PCM, 0x4040 }
2473};
2474
2475#define RSRCISIOREGION(dev,num) (pci_resource_start((dev), (num)) != 0 && \
2476 (pci_resource_flags((dev), (num)) & IORESOURCE_IO))
2477
2478#ifdef SUPPORT_JOYSTICK
2479static int __devinit sv_register_gameport(struct sv_state *s, int io_port)
2480{
2481 struct gameport *gp;
2482
2483 if (!request_region(io_port, SV_EXTENT_GAME, "S3 SonicVibes Gameport")) {
2484 printk(KERN_ERR "sv: gameport io ports are in use\n");
2485 return -EBUSY;
2486 }
2487
2488 s->gameport = gp = gameport_allocate_port();
2489 if (!gp) {
2490 printk(KERN_ERR "sv: can not allocate memory for gameport\n");
2491 release_region(io_port, SV_EXTENT_GAME);
2492 return -ENOMEM;
2493 }
2494
2495 gameport_set_name(gp, "S3 SonicVibes Gameport");
2496 gameport_set_phys(gp, "isa%04x/gameport0", io_port);
2497 gp->dev.parent = &s->dev->dev;
2498 gp->io = io_port;
2499
2500 gameport_register_port(gp);
2501
2502 return 0;
2503}
2504
2505static inline void sv_unregister_gameport(struct sv_state *s)
2506{
2507 if (s->gameport) {
2508 int gpio = s->gameport->io;
2509 gameport_unregister_port(s->gameport);
2510 release_region(gpio, SV_EXTENT_GAME);
2511 }
2512}
2513#else
2514static inline int sv_register_gameport(struct sv_state *s, int io_port) { return -ENOSYS; }
2515static inline void sv_unregister_gameport(struct sv_state *s) { }
2516#endif /* SUPPORT_JOYSTICK */
2517
2518static int __devinit sv_probe(struct pci_dev *pcidev, const struct pci_device_id *pciid)
2519{
2520 static char __devinitdata sv_ddma_name[] = "S3 Inc. SonicVibes DDMA Controller";
2521 struct sv_state *s;
2522 mm_segment_t fs;
2523 int i, val, ret;
2524 int gpio;
2525 char *ddmaname;
2526 unsigned ddmanamelen;
2527
2528 if ((ret=pci_enable_device(pcidev)))
2529 return ret;
2530
2531 if (!RSRCISIOREGION(pcidev, RESOURCE_SB) ||
2532 !RSRCISIOREGION(pcidev, RESOURCE_ENH) ||
2533 !RSRCISIOREGION(pcidev, RESOURCE_SYNTH) ||
2534 !RSRCISIOREGION(pcidev, RESOURCE_MIDI) ||
2535 !RSRCISIOREGION(pcidev, RESOURCE_GAME))
2536 return -ENODEV;
2537 if (pcidev->irq == 0)
2538 return -ENODEV;
2539 if (pci_set_dma_mask(pcidev, DMA_24BIT_MASK)) {
2540 printk(KERN_WARNING "sonicvibes: architecture does not support 24bit PCI busmaster DMA\n");
2541 return -ENODEV;
2542 }
2543 /* try to allocate a DDMA resource if not already available */
2544 if (!RSRCISIOREGION(pcidev, RESOURCE_DDMA)) {
2545 pcidev->resource[RESOURCE_DDMA].start = 0;
2546 pcidev->resource[RESOURCE_DDMA].end = 2*SV_EXTENT_DMA-1;
2547 pcidev->resource[RESOURCE_DDMA].flags = PCI_BASE_ADDRESS_SPACE_IO | IORESOURCE_IO;
2548 ddmanamelen = strlen(sv_ddma_name)+1;
2549 if (!(ddmaname = kmalloc(ddmanamelen, GFP_KERNEL)))
2550 return -1;
2551 memcpy(ddmaname, sv_ddma_name, ddmanamelen);
2552 pcidev->resource[RESOURCE_DDMA].name = ddmaname;
2553 if (pci_assign_resource(pcidev, RESOURCE_DDMA)) {
2554 pcidev->resource[RESOURCE_DDMA].name = NULL;
2555 kfree(ddmaname);
2556 printk(KERN_ERR "sv: cannot allocate DDMA controller io ports\n");
2557 return -EBUSY;
2558 }
2559 }
2560 if (!(s = kmalloc(sizeof(struct sv_state), GFP_KERNEL))) {
2561 printk(KERN_WARNING "sv: out of memory\n");
2562 return -ENOMEM;
2563 }
2564 memset(s, 0, sizeof(struct sv_state));
2565 init_waitqueue_head(&s->dma_adc.wait);
2566 init_waitqueue_head(&s->dma_dac.wait);
2567 init_waitqueue_head(&s->open_wait);
2568 init_waitqueue_head(&s->midi.iwait);
2569 init_waitqueue_head(&s->midi.owait);
2570 mutex_init(&s->open_mutex);
2571 spin_lock_init(&s->lock);
2572 s->magic = SV_MAGIC;
2573 s->dev = pcidev;
2574 s->iosb = pci_resource_start(pcidev, RESOURCE_SB);
2575 s->ioenh = pci_resource_start(pcidev, RESOURCE_ENH);
2576 s->iosynth = pci_resource_start(pcidev, RESOURCE_SYNTH);
2577 s->iomidi = pci_resource_start(pcidev, RESOURCE_MIDI);
2578 s->iodmaa = pci_resource_start(pcidev, RESOURCE_DDMA);
2579 s->iodmac = pci_resource_start(pcidev, RESOURCE_DDMA) + SV_EXTENT_DMA;
2580 gpio = pci_resource_start(pcidev, RESOURCE_GAME);
2581 pci_write_config_dword(pcidev, 0x40, s->iodmaa | 9); /* enable and use extended mode */
2582 pci_write_config_dword(pcidev, 0x48, s->iodmac | 9); /* enable */
2583 printk(KERN_DEBUG "sv: io ports: %#lx %#lx %#lx %#lx %#x %#x %#x\n",
2584 s->iosb, s->ioenh, s->iosynth, s->iomidi, gpio, s->iodmaa, s->iodmac);
2585 s->irq = pcidev->irq;
2586
2587 /* hack */
2588 pci_write_config_dword(pcidev, 0x60, wavetable_mem >> 12); /* wavetable base address */
2589
2590 ret = -EBUSY;
2591 if (!request_region(s->ioenh, SV_EXTENT_ENH, "S3 SonicVibes PCM")) {
2592 printk(KERN_ERR "sv: io ports %#lx-%#lx in use\n", s->ioenh, s->ioenh+SV_EXTENT_ENH-1);
2593 goto err_region5;
2594 }
2595 if (!request_region(s->iodmaa, SV_EXTENT_DMA, "S3 SonicVibes DMAA")) {
2596 printk(KERN_ERR "sv: io ports %#x-%#x in use\n", s->iodmaa, s->iodmaa+SV_EXTENT_DMA-1);
2597 goto err_region4;
2598 }
2599 if (!request_region(s->iodmac, SV_EXTENT_DMA, "S3 SonicVibes DMAC")) {
2600 printk(KERN_ERR "sv: io ports %#x-%#x in use\n", s->iodmac, s->iodmac+SV_EXTENT_DMA-1);
2601 goto err_region3;
2602 }
2603 if (!request_region(s->iomidi, SV_EXTENT_MIDI, "S3 SonicVibes Midi")) {
2604 printk(KERN_ERR "sv: io ports %#lx-%#lx in use\n", s->iomidi, s->iomidi+SV_EXTENT_MIDI-1);
2605 goto err_region2;
2606 }
2607 if (!request_region(s->iosynth, SV_EXTENT_SYNTH, "S3 SonicVibes Synth")) {
2608 printk(KERN_ERR "sv: io ports %#lx-%#lx in use\n", s->iosynth, s->iosynth+SV_EXTENT_SYNTH-1);
2609 goto err_region1;
2610 }
2611
2612 /* initialize codec registers */
2613 outb(0x80, s->ioenh + SV_CODEC_CONTROL); /* assert reset */
2614 udelay(50);
2615 outb(0x00, s->ioenh + SV_CODEC_CONTROL); /* deassert reset */
2616 udelay(50);
2617 outb(SV_CCTRL_INTADRIVE | SV_CCTRL_ENHANCED /*| SV_CCTRL_WAVETABLE */
2618 | (reverb[devindex] ? SV_CCTRL_REVERB : 0), s->ioenh + SV_CODEC_CONTROL);
2619 inb(s->ioenh + SV_CODEC_STATUS); /* clear ints */
2620 wrindir(s, SV_CIDRIVECONTROL, 0); /* drive current 16mA */
2621 wrindir(s, SV_CIENABLE, s->enable = 0); /* disable DMAA and DMAC */
2622 outb(~(SV_CINTMASK_DMAA | SV_CINTMASK_DMAC), s->ioenh + SV_CODEC_INTMASK);
2623 /* outb(0xff, s->iodmaa + SV_DMA_RESET); */
2624 /* outb(0xff, s->iodmac + SV_DMA_RESET); */
2625 inb(s->ioenh + SV_CODEC_STATUS); /* ack interrupts */
2626 wrindir(s, SV_CIADCCLKSOURCE, 0); /* use pll as ADC clock source */
2627 wrindir(s, SV_CIANALOGPWRDOWN, 0); /* power up the analog parts of the device */
2628 wrindir(s, SV_CIDIGITALPWRDOWN, 0); /* power up the digital parts of the device */
2629 setpll(s, SV_CIADCPLLM, 8000);
2630 wrindir(s, SV_CISRSSPACE, 0x80); /* SRS off */
2631 wrindir(s, SV_CIPCMSR0, (8000 * 65536 / FULLRATE) & 0xff);
2632 wrindir(s, SV_CIPCMSR1, ((8000 * 65536 / FULLRATE) >> 8) & 0xff);
2633 wrindir(s, SV_CIADCOUTPUT, 0);
2634 /* request irq */
2635 if ((ret=request_irq(s->irq,sv_interrupt,IRQF_SHARED,"S3 SonicVibes",s))) {
2636 printk(KERN_ERR "sv: irq %u in use\n", s->irq);
2637 goto err_irq;
2638 }
2639 printk(KERN_INFO "sv: found adapter at io %#lx irq %u dmaa %#06x dmac %#06x revision %u\n",
2640 s->ioenh, s->irq, s->iodmaa, s->iodmac, rdindir(s, SV_CIREVISION));
2641 /* register devices */
2642 if ((s->dev_audio = register_sound_dsp(&sv_audio_fops, -1)) < 0) {
2643 ret = s->dev_audio;
2644 goto err_dev1;
2645 }
2646 if ((s->dev_mixer = register_sound_mixer(&sv_mixer_fops, -1)) < 0) {
2647 ret = s->dev_mixer;
2648 goto err_dev2;
2649 }
2650 if ((s->dev_midi = register_sound_midi(&sv_midi_fops, -1)) < 0) {
2651 ret = s->dev_midi;
2652 goto err_dev3;
2653 }
2654 if ((s->dev_dmfm = register_sound_special(&sv_dmfm_fops, 15 /* ?? */)) < 0) {
2655 ret = s->dev_dmfm;
2656 goto err_dev4;
2657 }
2658 pci_set_master(pcidev); /* enable bus mastering */
2659 /* initialize the chips */
2660 fs = get_fs();
2661 set_fs(KERNEL_DS);
2662 val = SOUND_MASK_LINE|SOUND_MASK_SYNTH;
2663 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long)&val);
2664 for (i = 0; i < sizeof(initvol)/sizeof(initvol[0]); i++) {
2665 val = initvol[i].vol;
2666 mixer_ioctl(s, initvol[i].mixch, (unsigned long)&val);
2667 }
2668 set_fs(fs);
2669 /* register gameport */
2670 sv_register_gameport(s, gpio);
2671 /* store it in the driver field */
2672 pci_set_drvdata(pcidev, s);
2673 /* put it into driver list */
2674 list_add_tail(&s->devs, &devs);
2675 /* increment devindex */
2676 if (devindex < NR_DEVICE-1)
2677 devindex++;
2678 return 0;
2679
2680 err_dev4:
2681 unregister_sound_midi(s->dev_midi);
2682 err_dev3:
2683 unregister_sound_mixer(s->dev_mixer);
2684 err_dev2:
2685 unregister_sound_dsp(s->dev_audio);
2686 err_dev1:
2687 printk(KERN_ERR "sv: cannot register misc device\n");
2688 free_irq(s->irq, s);
2689 err_irq:
2690 release_region(s->iosynth, SV_EXTENT_SYNTH);
2691 err_region1:
2692 release_region(s->iomidi, SV_EXTENT_MIDI);
2693 err_region2:
2694 release_region(s->iodmac, SV_EXTENT_DMA);
2695 err_region3:
2696 release_region(s->iodmaa, SV_EXTENT_DMA);
2697 err_region4:
2698 release_region(s->ioenh, SV_EXTENT_ENH);
2699 err_region5:
2700 kfree(s);
2701 return ret;
2702}
2703
2704static void __devexit sv_remove(struct pci_dev *dev)
2705{
2706 struct sv_state *s = pci_get_drvdata(dev);
2707
2708 if (!s)
2709 return;
2710 list_del(&s->devs);
2711 outb(~0, s->ioenh + SV_CODEC_INTMASK); /* disable ints */
2712 synchronize_irq(s->irq);
2713 inb(s->ioenh + SV_CODEC_STATUS); /* ack interrupts */
2714 wrindir(s, SV_CIENABLE, 0); /* disable DMAA and DMAC */
2715 /*outb(0, s->iodmaa + SV_DMA_RESET);*/
2716 /*outb(0, s->iodmac + SV_DMA_RESET);*/
2717 free_irq(s->irq, s);
2718 sv_unregister_gameport(s);
2719 release_region(s->iodmac, SV_EXTENT_DMA);
2720 release_region(s->iodmaa, SV_EXTENT_DMA);
2721 release_region(s->ioenh, SV_EXTENT_ENH);
2722 release_region(s->iomidi, SV_EXTENT_MIDI);
2723 release_region(s->iosynth, SV_EXTENT_SYNTH);
2724 unregister_sound_dsp(s->dev_audio);
2725 unregister_sound_mixer(s->dev_mixer);
2726 unregister_sound_midi(s->dev_midi);
2727 unregister_sound_special(s->dev_dmfm);
2728 kfree(s);
2729 pci_set_drvdata(dev, NULL);
2730}
2731
2732static struct pci_device_id id_table[] = {
2733 { PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_SONICVIBES, PCI_ANY_ID, PCI_ANY_ID, 0, 0 },
2734 { 0, }
2735};
2736
2737MODULE_DEVICE_TABLE(pci, id_table);
2738
2739static struct pci_driver sv_driver = {
2740 .name = "sonicvibes",
2741 .id_table = id_table,
2742 .probe = sv_probe,
2743 .remove = __devexit_p(sv_remove),
2744};
2745
2746static int __init init_sonicvibes(void)
2747{
2748 printk(KERN_INFO "sv: version v0.31 time " __TIME__ " " __DATE__ "\n");
2749#if 0
2750 if (!(wavetable_mem = __get_free_pages(GFP_KERNEL, 20-PAGE_SHIFT)))
2751 printk(KERN_INFO "sv: cannot allocate 1MB of contiguous nonpageable memory for wavetable data\n");
2752#endif
2753 return pci_register_driver(&sv_driver);
2754}
2755
2756static void __exit cleanup_sonicvibes(void)
2757{
2758 printk(KERN_INFO "sv: unloading\n");
2759 pci_unregister_driver(&sv_driver);
2760 if (wavetable_mem)
2761 free_pages(wavetable_mem, 20-PAGE_SHIFT);
2762}
2763
2764module_init(init_sonicvibes);
2765module_exit(cleanup_sonicvibes);
2766
2767/* --------------------------------------------------------------------- */
2768
2769#ifndef MODULE
2770
2771/* format is: sonicvibes=[reverb] sonicvibesdmaio=dmaioaddr */
2772
2773static int __init sonicvibes_setup(char *str)
2774{
2775 static unsigned __initdata nr_dev = 0;
2776
2777 if (nr_dev >= NR_DEVICE)
2778 return 0;
2779#if 0
2780 if (get_option(&str, &reverb[nr_dev]) == 2)
2781 (void)get_option(&str, &wavetable[nr_dev]);
2782#else
2783 (void)get_option(&str, &reverb[nr_dev]);
2784#endif
2785
2786 nr_dev++;
2787 return 1;
2788}
2789
2790__setup("sonicvibes=", sonicvibes_setup);
2791
2792#endif /* MODULE */
diff --git a/sound/oss/sound_calls.h b/sound/oss/sound_calls.h
index 1ae07509664f..87d8ad4a0340 100644
--- a/sound/oss/sound_calls.h
+++ b/sound/oss/sound_calls.h
@@ -13,8 +13,6 @@ int DMAbuf_move_wrpointer(int dev, int l);
13void DMAbuf_init(int dev, int dma1, int dma2); 13void DMAbuf_init(int dev, int dma1, int dma2);
14void DMAbuf_deinit(int dev); 14void DMAbuf_deinit(int dev);
15int DMAbuf_start_dma (int dev, unsigned long physaddr, int count, int dma_mode); 15int DMAbuf_start_dma (int dev, unsigned long physaddr, int count, int dma_mode);
16int DMAbuf_open_dma (int dev);
17void DMAbuf_close_dma (int dev);
18void DMAbuf_inputintr(int dev); 16void DMAbuf_inputintr(int dev);
19void DMAbuf_outputintr(int dev, int underflow_flag); 17void DMAbuf_outputintr(int dev, int underflow_flag);
20struct dma_buffparms; 18struct dma_buffparms;
@@ -73,7 +71,6 @@ unsigned int MIDIbuf_poll(int dev, struct file *file, poll_table * wait);
73int MIDIbuf_avail(int dev); 71int MIDIbuf_avail(int dev);
74 72
75void MIDIbuf_bytes_received(int dev, unsigned char *buf, int count); 73void MIDIbuf_bytes_received(int dev, unsigned char *buf, int count);
76void MIDIbuf_init(void);
77 74
78 75
79/* From soundcard.c */ 76/* From soundcard.c */
diff --git a/sound/oss/sound_syms.c b/sound/oss/sound_syms.c
deleted file mode 100644
index cb7c33fe5b05..000000000000
--- a/sound/oss/sound_syms.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * The sound core exports the following symbols to the rest of
3 * modulespace.
4 *
5 * (C) Copyright 1997 Alan Cox, Licensed under the GNU GPL
6 *
7 * Thu May 27 1999 Andrew J. Kroll <ag784@freenet..buffalo..edu>
8 * left out exported symbol... fixed
9 */
10
11#include <linux/module.h>
12#include "sound_config.h"
13#include "sound_calls.h"
14
15char sound_syms_symbol;
16
17EXPORT_SYMBOL(mixer_devs);
18EXPORT_SYMBOL(audio_devs);
19EXPORT_SYMBOL(num_mixers);
20EXPORT_SYMBOL(num_audiodevs);
21
22EXPORT_SYMBOL(midi_devs);
23EXPORT_SYMBOL(num_midis);
24EXPORT_SYMBOL(synth_devs);
25
26EXPORT_SYMBOL(sound_timer_devs);
27
28EXPORT_SYMBOL(sound_install_audiodrv);
29EXPORT_SYMBOL(sound_install_mixer);
30EXPORT_SYMBOL(sound_alloc_dma);
31EXPORT_SYMBOL(sound_free_dma);
32EXPORT_SYMBOL(sound_open_dma);
33EXPORT_SYMBOL(sound_close_dma);
34EXPORT_SYMBOL(sound_alloc_mididev);
35EXPORT_SYMBOL(sound_alloc_mixerdev);
36EXPORT_SYMBOL(sound_alloc_timerdev);
37EXPORT_SYMBOL(sound_alloc_synthdev);
38EXPORT_SYMBOL(sound_unload_audiodev);
39EXPORT_SYMBOL(sound_unload_mididev);
40EXPORT_SYMBOL(sound_unload_mixerdev);
41EXPORT_SYMBOL(sound_unload_timerdev);
42EXPORT_SYMBOL(sound_unload_synthdev);
43
44EXPORT_SYMBOL(load_mixer_volumes);
45
46EXPORT_SYMBOL(conf_printf);
47EXPORT_SYMBOL(conf_printf2);
48
49MODULE_DESCRIPTION("OSS Sound subsystem");
50MODULE_AUTHOR("Hannu Savolainen, et al.");
diff --git a/sound/oss/sound_timer.c b/sound/oss/sound_timer.c
index 146bf85de958..f0f0c19fbff7 100644
--- a/sound/oss/sound_timer.c
+++ b/sound/oss/sound_timer.c
@@ -76,6 +76,7 @@ void sound_timer_syncinterval(unsigned int new_usecs)
76 tmr_ctr = 0; 76 tmr_ctr = 0;
77 usecs_per_tmr = new_usecs; 77 usecs_per_tmr = new_usecs;
78} 78}
79EXPORT_SYMBOL(sound_timer_syncinterval);
79 80
80static void tmr_reset(void) 81static void tmr_reset(void)
81{ 82{
@@ -300,6 +301,7 @@ void sound_timer_interrupt(void)
300 } 301 }
301 spin_unlock_irqrestore(&lock,flags); 302 spin_unlock_irqrestore(&lock,flags);
302} 303}
304EXPORT_SYMBOL(sound_timer_interrupt);
303 305
304void sound_timer_init(struct sound_lowlev_timer *t, char *name) 306void sound_timer_init(struct sound_lowlev_timer *t, char *name)
305{ 307{
@@ -321,3 +323,5 @@ void sound_timer_init(struct sound_lowlev_timer *t, char *name)
321 strcpy(sound_timer.info.name, name); 323 strcpy(sound_timer.info.name, name);
322 sound_timer_devs[n] = &sound_timer; 324 sound_timer_devs[n] = &sound_timer;
323} 325}
326EXPORT_SYMBOL(sound_timer_init);
327
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 683dc00a8d2b..2344d09c7114 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -107,6 +107,7 @@ int *load_mixer_volumes(char *name, int *levels, int present)
107 mixer_vols[n].levels[i] = levels[i]; 107 mixer_vols[n].levels[i] = levels[i];
108 return mixer_vols[n].levels; 108 return mixer_vols[n].levels;
109} 109}
110EXPORT_SYMBOL(load_mixer_volumes);
110 111
111static int set_mixer_levels(void __user * arg) 112static int set_mixer_levels(void __user * arg)
112{ 113{
@@ -541,12 +542,6 @@ static int __init oss_init(void)
541 int err; 542 int err;
542 int i, j; 543 int i, j;
543 544
544 /* drag in sound_syms.o */
545 {
546 extern char sound_syms_symbol;
547 sound_syms_symbol = 0;
548 }
549
550#ifdef CONFIG_PCI 545#ifdef CONFIG_PCI
551 if(dmabug) 546 if(dmabug)
552 isa_dma_bridge_buggy = dmabug; 547 isa_dma_bridge_buggy = dmabug;
@@ -614,6 +609,8 @@ static void __exit oss_cleanup(void)
614module_init(oss_init); 609module_init(oss_init);
615module_exit(oss_cleanup); 610module_exit(oss_cleanup);
616MODULE_LICENSE("GPL"); 611MODULE_LICENSE("GPL");
612MODULE_DESCRIPTION("OSS Sound subsystem");
613MODULE_AUTHOR("Hannu Savolainen, et al.");
617 614
618 615
619int sound_alloc_dma(int chn, char *deviceID) 616int sound_alloc_dma(int chn, char *deviceID)
@@ -627,6 +624,7 @@ int sound_alloc_dma(int chn, char *deviceID)
627 624
628 return 0; 625 return 0;
629} 626}
627EXPORT_SYMBOL(sound_alloc_dma);
630 628
631int sound_open_dma(int chn, char *deviceID) 629int sound_open_dma(int chn, char *deviceID)
632{ 630{
@@ -642,6 +640,7 @@ int sound_open_dma(int chn, char *deviceID)
642 dma_alloc_map[chn] = DMA_MAP_BUSY; 640 dma_alloc_map[chn] = DMA_MAP_BUSY;
643 return 0; 641 return 0;
644} 642}
643EXPORT_SYMBOL(sound_open_dma);
645 644
646void sound_free_dma(int chn) 645void sound_free_dma(int chn)
647{ 646{
@@ -652,6 +651,7 @@ void sound_free_dma(int chn)
652 free_dma(chn); 651 free_dma(chn);
653 dma_alloc_map[chn] = DMA_MAP_UNAVAIL; 652 dma_alloc_map[chn] = DMA_MAP_UNAVAIL;
654} 653}
654EXPORT_SYMBOL(sound_free_dma);
655 655
656void sound_close_dma(int chn) 656void sound_close_dma(int chn)
657{ 657{
@@ -661,6 +661,7 @@ void sound_close_dma(int chn)
661 } 661 }
662 dma_alloc_map[chn] = DMA_MAP_FREE; 662 dma_alloc_map[chn] = DMA_MAP_FREE;
663} 663}
664EXPORT_SYMBOL(sound_close_dma);
664 665
665static void do_sequencer_timer(unsigned long dummy) 666static void do_sequencer_timer(unsigned long dummy)
666{ 667{
@@ -714,6 +715,7 @@ void conf_printf(char *name, struct address_info *hw_config)
714 printk("\n"); 715 printk("\n");
715#endif 716#endif
716} 717}
718EXPORT_SYMBOL(conf_printf);
717 719
718void conf_printf2(char *name, int base, int irq, int dma, int dma2) 720void conf_printf2(char *name, int base, int irq, int dma, int dma2)
719{ 721{
@@ -734,3 +736,5 @@ void conf_printf2(char *name, int base, int irq, int dma, int dma2)
734 printk("\n"); 736 printk("\n");
735#endif 737#endif
736} 738}
739EXPORT_SYMBOL(conf_printf2);
740
diff --git a/sound/oss/tuning.h b/sound/oss/tuning.h
index 858e1fe6c618..a73e3dd39f9a 100644
--- a/sound/oss/tuning.h
+++ b/sound/oss/tuning.h
@@ -1,13 +1,11 @@
1#ifdef SEQUENCER_C 1static unsigned short semitone_tuning[24] =
2
3unsigned short semitone_tuning[24] =
4{ 2{
5/* 0 */ 10000, 10595, 11225, 11892, 12599, 13348, 14142, 14983, 3/* 0 */ 10000, 10595, 11225, 11892, 12599, 13348, 14142, 14983,
6/* 8 */ 15874, 16818, 17818, 18877, 20000, 21189, 22449, 23784, 4/* 8 */ 15874, 16818, 17818, 18877, 20000, 21189, 22449, 23784,
7/* 16 */ 25198, 26697, 28284, 29966, 31748, 33636, 35636, 37755 5/* 16 */ 25198, 26697, 28284, 29966, 31748, 33636, 35636, 37755
8}; 6};
9 7
10unsigned short cent_tuning[100] = 8static unsigned short cent_tuning[100] =
11{ 9{
12/* 0 */ 10000, 10006, 10012, 10017, 10023, 10029, 10035, 10041, 10/* 0 */ 10000, 10006, 10012, 10017, 10023, 10029, 10035, 10041,
13/* 8 */ 10046, 10052, 10058, 10064, 10070, 10075, 10081, 10087, 11/* 8 */ 10046, 10052, 10058, 10064, 10070, 10075, 10081, 10087,
@@ -23,7 +21,3 @@ unsigned short cent_tuning[100] =
23/* 88 */ 10521, 10528, 10534, 10540, 10546, 10552, 10558, 10564, 21/* 88 */ 10521, 10528, 10534, 10540, 10546, 10552, 10558, 10564,
24/* 96 */ 10570, 10576, 10582, 10589 22/* 96 */ 10570, 10576, 10582, 10589
25}; 23};
26#else
27extern unsigned short semitone_tuning[24];
28extern unsigned short cent_tuning[100];
29#endif
diff --git a/sound/oss/wavfront.c b/sound/oss/wavfront.c
deleted file mode 100644
index 38d9aa0f16a5..000000000000
--- a/sound/oss/wavfront.c
+++ /dev/null
@@ -1,3553 +0,0 @@
1/* -*- linux-c -*-
2 *
3 * sound/wavfront.c
4 *
5 * A Linux driver for Turtle Beach WaveFront Series (Maui, Tropez, Tropez Plus)
6 *
7 * This driver supports the onboard wavetable synthesizer (an ICS2115),
8 * including patch, sample and program loading and unloading, conversion
9 * of GUS patches during loading, and full user-level access to all
10 * WaveFront commands. It tries to provide semi-intelligent patch and
11 * sample management as well.
12 *
13 * It also provides support for the ICS emulation of an MPU-401. Full
14 * support for the ICS emulation's "virtual MIDI mode" is provided in
15 * wf_midi.c.
16 *
17 * Support is also provided for the Tropez Plus' onboard FX processor,
18 * a Yamaha YSS225. Currently, code exists to configure the YSS225,
19 * and there is an interface allowing tweaking of any of its memory
20 * addresses. However, I have been unable to decipher the logical
21 * positioning of the configuration info for various effects, so for
22 * now, you just get the YSS225 in the same state as Turtle Beach's
23 * "SETUPSND.EXE" utility leaves it.
24 *
25 * The boards' DAC/ADC (a Crystal CS4232) is supported by cs4232.[co],
26 * This chip also controls the configuration of the card: the wavefront
27 * synth is logical unit 4.
28 *
29 *
30 * Supported devices:
31 *
32 * /dev/dsp - using cs4232+ad1848 modules, OSS compatible
33 * /dev/midiNN and /dev/midiNN+1 - using wf_midi code, OSS compatible
34 * /dev/synth00 - raw synth interface
35 *
36 **********************************************************************
37 *
38 * Copyright (C) by Paul Barton-Davis 1998
39 *
40 * Some portions of this file are taken from work that is
41 * copyright (C) by Hannu Savolainen 1993-1996
42 *
43 * Although the relevant code here is all new, the handling of
44 * sample/alias/multi- samples is entirely based on a driver by Matt
45 * Martin and Rutger Nijlunsing which demonstrated how to get things
46 * to work correctly. The GUS patch loading code has been almost
47 * unaltered by me, except to fit formatting and function names in the
48 * rest of the file. Many thanks to them.
49 *
50 * Appreciation and thanks to Hannu Savolainen for his early work on the Maui
51 * driver, and answering a few questions while this one was developed.
52 *
53 * Absolutely NO thanks to Turtle Beach/Voyetra and Yamaha for their
54 * complete lack of help in developing this driver, and in particular
55 * for their utter silence in response to questions about undocumented
56 * aspects of configuring a WaveFront soundcard, particularly the
57 * effects processor.
58 *
59 * $Id: wavfront.c,v 0.7 1998/09/09 15:47:36 pbd Exp $
60 *
61 * This program is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
62 * Version 2 (June 1991). See the "COPYING" file distributed with this software
63 * for more info.
64 *
65 * Changes:
66 * 11-10-2000 Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
67 * Added some __init and __initdata to entries in yss225.c
68 */
69
70#include <linux/module.h>
71
72#include <linux/kernel.h>
73#include <linux/init.h>
74#include <linux/sched.h>
75#include <linux/smp_lock.h>
76#include <linux/ptrace.h>
77#include <linux/fcntl.h>
78#include <linux/syscalls.h>
79#include <linux/ioport.h>
80#include <linux/spinlock.h>
81#include <linux/interrupt.h>
82
83#include <linux/delay.h>
84
85#include "sound_config.h"
86
87#include <linux/wavefront.h>
88
89#define _MIDI_SYNTH_C_
90#define MIDI_SYNTH_NAME "WaveFront MIDI"
91#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
92#include "midi_synth.h"
93
94/* Compile-time control of the extent to which OSS is supported.
95
96 I consider /dev/sequencer to be an anachronism, but given its
97 widespread usage by various Linux MIDI software, it seems worth
98 offering support to it if it's not too painful. Instead of using
99 /dev/sequencer, I recommend:
100
101 for synth programming and patch loading: /dev/synthNN
102 for kernel-synchronized MIDI sequencing: the ALSA sequencer
103 for direct MIDI control: /dev/midiNN
104
105 I have never tried static compilation into the kernel. The #if's
106 for this are really just notes to myself about what the code is
107 for.
108*/
109
110#define OSS_SUPPORT_SEQ 0x1 /* use of /dev/sequencer */
111#define OSS_SUPPORT_STATIC_INSTALL 0x2 /* static compilation into kernel */
112
113#define OSS_SUPPORT_LEVEL 0x1 /* just /dev/sequencer for now */
114
115#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
116static int (*midi_load_patch) (int devno, int format, const char __user *addr,
117 int offs, int count, int pmgr_flag) = NULL;
118#endif /* OSS_SUPPORT_SEQ */
119
120/* if WF_DEBUG not defined, no run-time debugging messages will
121 be available via the debug flag setting. Given the current
122 beta state of the driver, this will remain set until a future
123 version.
124*/
125
126#define WF_DEBUG 1
127
128#ifdef WF_DEBUG
129
130/* Thank goodness for gcc's preprocessor ... */
131
132#define DPRINT(cond, format, args...) \
133 if ((dev.debug & (cond)) == (cond)) { \
134 printk (KERN_DEBUG LOGNAME format, ## args); \
135 }
136#else
137#define DPRINT(cond, format, args...)
138#endif
139
140#define LOGNAME "WaveFront: "
141
142/* bitmasks for WaveFront status port value */
143
144#define STAT_RINTR_ENABLED 0x01
145#define STAT_CAN_READ 0x02
146#define STAT_INTR_READ 0x04
147#define STAT_WINTR_ENABLED 0x10
148#define STAT_CAN_WRITE 0x20
149#define STAT_INTR_WRITE 0x40
150
151/*** Module-accessible parameters ***************************************/
152
153static int wf_raw; /* we normally check for "raw state" to firmware
154 loading. if set, then during driver loading, the
155 state of the board is ignored, and we reset the
156 board and load the firmware anyway.
157 */
158
159static int fx_raw = 1; /* if this is zero, we'll leave the FX processor in
160 whatever state it is when the driver is loaded.
161 The default is to download the microprogram and
162 associated coefficients to set it up for "default"
163 operation, whatever that means.
164 */
165
166static int debug_default; /* you can set this to control debugging
167 during driver loading. it takes any combination
168 of the WF_DEBUG_* flags defined in
169 wavefront.h
170 */
171
172/* XXX this needs to be made firmware and hardware version dependent */
173
174static char *ospath = "/etc/sound/wavefront.os"; /* where to find a processed
175 version of the WaveFront OS
176 */
177
178static int wait_polls = 2000; /* This is a number of tries we poll the
179 status register before resorting to sleeping.
180 WaveFront being an ISA card each poll takes
181 about 1.2us. So before going to
182 sleep we wait up to 2.4ms in a loop.
183 */
184
185static int sleep_length = HZ/100; /* This says how long we're going to
186 sleep between polls.
187 10ms sounds reasonable for fast response.
188 */
189
190static int sleep_tries = 50; /* Wait for status 0.5 seconds total. */
191
192static int reset_time = 2; /* hundreths of a second we wait after a HW reset for
193 the expected interrupt.
194 */
195
196static int ramcheck_time = 20; /* time in seconds to wait while ROM code
197 checks on-board RAM.
198 */
199
200static int osrun_time = 10; /* time in seconds we wait for the OS to
201 start running.
202 */
203
204module_param(wf_raw, int, 0);
205module_param(fx_raw, int, 0);
206module_param(debug_default, int, 0);
207module_param(wait_polls, int, 0);
208module_param(sleep_length, int, 0);
209module_param(sleep_tries, int, 0);
210module_param(ospath, charp, 0);
211module_param(reset_time, int, 0);
212module_param(ramcheck_time, int, 0);
213module_param(osrun_time, int, 0);
214
215/***************************************************************************/
216
217/* Note: because this module doesn't export any symbols, this really isn't
218 a global variable, even if it looks like one. I was quite confused by
219 this when I started writing this as a (newer) module -- pbd.
220*/
221
222struct wf_config {
223 int devno; /* device number from kernel */
224 int irq; /* "you were one, one of the few ..." */
225 int base; /* low i/o port address */
226
227#define mpu_data_port base
228#define mpu_command_port base + 1 /* write semantics */
229#define mpu_status_port base + 1 /* read semantics */
230#define data_port base + 2
231#define status_port base + 3 /* read semantics */
232#define control_port base + 3 /* write semantics */
233#define block_port base + 4 /* 16 bit, writeonly */
234#define last_block_port base + 6 /* 16 bit, writeonly */
235
236 /* FX ports. These are mapped through the ICS2115 to the YS225.
237 The ICS2115 takes care of flipping the relevant pins on the
238 YS225 so that access to each of these ports does the right
239 thing. Note: these are NOT documented by Turtle Beach.
240 */
241
242#define fx_status base + 8
243#define fx_op base + 8
244#define fx_lcr base + 9
245#define fx_dsp_addr base + 0xa
246#define fx_dsp_page base + 0xb
247#define fx_dsp_lsb base + 0xc
248#define fx_dsp_msb base + 0xd
249#define fx_mod_addr base + 0xe
250#define fx_mod_data base + 0xf
251
252 volatile int irq_ok; /* set by interrupt handler */
253 volatile int irq_cnt; /* ditto */
254 int opened; /* flag, holds open(2) mode */
255 char debug; /* debugging flags */
256 int freemem; /* installed RAM, in bytes */
257
258 int synth_dev; /* devno for "raw" synth */
259 int mididev; /* devno for internal MIDI */
260 int ext_mididev; /* devno for external MIDI */
261 int fx_mididev; /* devno for FX MIDI interface */
262#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
263 int oss_dev; /* devno for OSS sequencer synth */
264#endif /* OSS_SUPPORT_SEQ */
265
266 char fw_version[2]; /* major = [0], minor = [1] */
267 char hw_version[2]; /* major = [0], minor = [1] */
268 char israw; /* needs Motorola microcode */
269 char has_fx; /* has FX processor (Tropez+) */
270 char prog_status[WF_MAX_PROGRAM]; /* WF_SLOT_* */
271 char patch_status[WF_MAX_PATCH]; /* WF_SLOT_* */
272 char sample_status[WF_MAX_SAMPLE]; /* WF_ST_* | WF_SLOT_* */
273 int samples_used; /* how many */
274 char interrupts_on; /* h/w MPU interrupts enabled ? */
275 char rom_samples_rdonly; /* can we write on ROM samples */
276 wait_queue_head_t interrupt_sleeper;
277} dev;
278
279static DEFINE_SPINLOCK(lock);
280static int detect_wffx(void);
281static int wffx_ioctl (wavefront_fx_info *);
282static int wffx_init (void);
283
284static int wavefront_delete_sample (int sampnum);
285static int wavefront_find_free_sample (void);
286
287/* From wf_midi.c */
288
289extern int virtual_midi_enable (void);
290extern int virtual_midi_disable (void);
291extern int detect_wf_mpu (int, int);
292extern int install_wf_mpu (void);
293extern int uninstall_wf_mpu (void);
294
295typedef struct {
296 int cmd;
297 char *action;
298 unsigned int read_cnt;
299 unsigned int write_cnt;
300 int need_ack;
301} wavefront_command;
302
303static struct {
304 int errno;
305 const char *errstr;
306} wavefront_errors[] = {
307 { 0x01, "Bad sample number" },
308 { 0x02, "Out of sample memory" },
309 { 0x03, "Bad patch number" },
310 { 0x04, "Error in number of voices" },
311 { 0x06, "Sample load already in progress" },
312 { 0x0B, "No sample load request pending" },
313 { 0x0E, "Bad MIDI channel number" },
314 { 0x10, "Download Record Error" },
315 { 0x80, "Success" },
316 { 0 }
317};
318
319#define NEEDS_ACK 1
320
321static wavefront_command wavefront_commands[] = {
322 { WFC_SET_SYNTHVOL, "set synthesizer volume", 0, 1, NEEDS_ACK },
323 { WFC_GET_SYNTHVOL, "get synthesizer volume", 1, 0, 0},
324 { WFC_SET_NVOICES, "set number of voices", 0, 1, NEEDS_ACK },
325 { WFC_GET_NVOICES, "get number of voices", 1, 0, 0 },
326 { WFC_SET_TUNING, "set synthesizer tuning", 0, 2, NEEDS_ACK },
327 { WFC_GET_TUNING, "get synthesizer tuning", 2, 0, 0 },
328 { WFC_DISABLE_CHANNEL, "disable synth channel", 0, 1, NEEDS_ACK },
329 { WFC_ENABLE_CHANNEL, "enable synth channel", 0, 1, NEEDS_ACK },
330 { WFC_GET_CHANNEL_STATUS, "get synth channel status", 3, 0, 0 },
331 { WFC_MISYNTH_OFF, "disable midi-in to synth", 0, 0, NEEDS_ACK },
332 { WFC_MISYNTH_ON, "enable midi-in to synth", 0, 0, NEEDS_ACK },
333 { WFC_VMIDI_ON, "enable virtual midi mode", 0, 0, NEEDS_ACK },
334 { WFC_VMIDI_OFF, "disable virtual midi mode", 0, 0, NEEDS_ACK },
335 { WFC_MIDI_STATUS, "report midi status", 1, 0, 0 },
336 { WFC_FIRMWARE_VERSION, "report firmware version", 2, 0, 0 },
337 { WFC_HARDWARE_VERSION, "report hardware version", 2, 0, 0 },
338 { WFC_GET_NSAMPLES, "report number of samples", 2, 0, 0 },
339 { WFC_INSTOUT_LEVELS, "report instantaneous output levels", 7, 0, 0 },
340 { WFC_PEAKOUT_LEVELS, "report peak output levels", 7, 0, 0 },
341 { WFC_DOWNLOAD_SAMPLE, "download sample",
342 0, WF_SAMPLE_BYTES, NEEDS_ACK },
343 { WFC_DOWNLOAD_BLOCK, "download block", 0, 0, NEEDS_ACK},
344 { WFC_DOWNLOAD_SAMPLE_HEADER, "download sample header",
345 0, WF_SAMPLE_HDR_BYTES, NEEDS_ACK },
346 { WFC_UPLOAD_SAMPLE_HEADER, "upload sample header", 13, 2, 0 },
347
348 /* This command requires a variable number of bytes to be written.
349 There is a hack in wavefront_cmd() to support this. The actual
350 count is passed in as the read buffer ptr, cast appropriately.
351 Ugh.
352 */
353
354 { WFC_DOWNLOAD_MULTISAMPLE, "download multisample", 0, 0, NEEDS_ACK },
355
356 /* This one is a hack as well. We just read the first byte of the
357 response, don't fetch an ACK, and leave the rest to the
358 calling function. Ugly, ugly, ugly.
359 */
360
361 { WFC_UPLOAD_MULTISAMPLE, "upload multisample", 2, 1, 0 },
362 { WFC_DOWNLOAD_SAMPLE_ALIAS, "download sample alias",
363 0, WF_ALIAS_BYTES, NEEDS_ACK },
364 { WFC_UPLOAD_SAMPLE_ALIAS, "upload sample alias", WF_ALIAS_BYTES, 2, 0},
365 { WFC_DELETE_SAMPLE, "delete sample", 0, 2, NEEDS_ACK },
366 { WFC_IDENTIFY_SAMPLE_TYPE, "identify sample type", 5, 2, 0 },
367 { WFC_UPLOAD_SAMPLE_PARAMS, "upload sample parameters" },
368 { WFC_REPORT_FREE_MEMORY, "report free memory", 4, 0, 0 },
369 { WFC_DOWNLOAD_PATCH, "download patch", 0, 134, NEEDS_ACK },
370 { WFC_UPLOAD_PATCH, "upload patch", 132, 2, 0 },
371 { WFC_DOWNLOAD_PROGRAM, "download program", 0, 33, NEEDS_ACK },
372 { WFC_UPLOAD_PROGRAM, "upload program", 32, 1, 0 },
373 { WFC_DOWNLOAD_EDRUM_PROGRAM, "download enhanced drum program", 0, 9,
374 NEEDS_ACK},
375 { WFC_UPLOAD_EDRUM_PROGRAM, "upload enhanced drum program", 8, 1, 0},
376 { WFC_SET_EDRUM_CHANNEL, "set enhanced drum program channel",
377 0, 1, NEEDS_ACK },
378 { WFC_DISABLE_DRUM_PROGRAM, "disable drum program", 0, 1, NEEDS_ACK },
379 { WFC_REPORT_CHANNEL_PROGRAMS, "report channel program numbers",
380 32, 0, 0 },
381 { WFC_NOOP, "the no-op command", 0, 0, NEEDS_ACK },
382 { 0x00 }
383};
384
385static const char *
386wavefront_errorstr (int errnum)
387
388{
389 int i;
390
391 for (i = 0; wavefront_errors[i].errstr; i++) {
392 if (wavefront_errors[i].errno == errnum) {
393 return wavefront_errors[i].errstr;
394 }
395 }
396
397 return "Unknown WaveFront error";
398}
399
400static wavefront_command *
401wavefront_get_command (int cmd)
402
403{
404 int i;
405
406 for (i = 0; wavefront_commands[i].cmd != 0; i++) {
407 if (cmd == wavefront_commands[i].cmd) {
408 return &wavefront_commands[i];
409 }
410 }
411
412 return (wavefront_command *) 0;
413}
414
415static inline int
416wavefront_status (void)
417
418{
419 return inb (dev.status_port);
420}
421
422static int
423wavefront_wait (int mask)
424
425{
426 int i;
427
428 for (i = 0; i < wait_polls; i++)
429 if (wavefront_status() & mask)
430 return 1;
431
432 for (i = 0; i < sleep_tries; i++) {
433
434 if (wavefront_status() & mask) {
435 set_current_state(TASK_RUNNING);
436 return 1;
437 }
438
439 set_current_state(TASK_INTERRUPTIBLE);
440 schedule_timeout(sleep_length);
441 if (signal_pending(current))
442 break;
443 }
444
445 set_current_state(TASK_RUNNING);
446 return 0;
447}
448
449static int
450wavefront_read (void)
451
452{
453 if (wavefront_wait (STAT_CAN_READ))
454 return inb (dev.data_port);
455
456 DPRINT (WF_DEBUG_DATA, "read timeout.\n");
457
458 return -1;
459}
460
461static int
462wavefront_write (unsigned char data)
463
464{
465 if (wavefront_wait (STAT_CAN_WRITE)) {
466 outb (data, dev.data_port);
467 return 0;
468 }
469
470 DPRINT (WF_DEBUG_DATA, "write timeout.\n");
471
472 return -1;
473}
474
475static int
476wavefront_cmd (int cmd, unsigned char *rbuf, unsigned char *wbuf)
477
478{
479 int ack;
480 int i;
481 int c;
482 wavefront_command *wfcmd;
483
484 if ((wfcmd = wavefront_get_command (cmd)) == (wavefront_command *) 0) {
485 printk (KERN_WARNING LOGNAME "command 0x%x not supported.\n",
486 cmd);
487 return 1;
488 }
489
490 /* Hack to handle the one variable-size write command. See
491 wavefront_send_multisample() for the other half of this
492 gross and ugly strategy.
493 */
494
495 if (cmd == WFC_DOWNLOAD_MULTISAMPLE) {
496 wfcmd->write_cnt = (unsigned int) rbuf;
497 rbuf = NULL;
498 }
499
500 DPRINT (WF_DEBUG_CMD, "0x%x [%s] (%d,%d,%d)\n",
501 cmd, wfcmd->action, wfcmd->read_cnt,
502 wfcmd->write_cnt, wfcmd->need_ack);
503
504 if (wavefront_write (cmd)) {
505 DPRINT ((WF_DEBUG_IO|WF_DEBUG_CMD), "cannot request "
506 "0x%x [%s].\n",
507 cmd, wfcmd->action);
508 return 1;
509 }
510
511 if (wfcmd->write_cnt > 0) {
512 DPRINT (WF_DEBUG_DATA, "writing %d bytes "
513 "for 0x%x\n",
514 wfcmd->write_cnt, cmd);
515
516 for (i = 0; i < wfcmd->write_cnt; i++) {
517 if (wavefront_write (wbuf[i])) {
518 DPRINT (WF_DEBUG_IO, "bad write for byte "
519 "%d of 0x%x [%s].\n",
520 i, cmd, wfcmd->action);
521 return 1;
522 }
523
524 DPRINT (WF_DEBUG_DATA, "write[%d] = 0x%x\n",
525 i, wbuf[i]);
526 }
527 }
528
529 if (wfcmd->read_cnt > 0) {
530 DPRINT (WF_DEBUG_DATA, "reading %d ints "
531 "for 0x%x\n",
532 wfcmd->read_cnt, cmd);
533
534 for (i = 0; i < wfcmd->read_cnt; i++) {
535
536 if ((c = wavefront_read()) == -1) {
537 DPRINT (WF_DEBUG_IO, "bad read for byte "
538 "%d of 0x%x [%s].\n",
539 i, cmd, wfcmd->action);
540 return 1;
541 }
542
543 /* Now handle errors. Lots of special cases here */
544
545 if (c == 0xff) {
546 if ((c = wavefront_read ()) == -1) {
547 DPRINT (WF_DEBUG_IO, "bad read for "
548 "error byte at "
549 "read byte %d "
550 "of 0x%x [%s].\n",
551 i, cmd,
552 wfcmd->action);
553 return 1;
554 }
555
556 /* Can you believe this madness ? */
557
558 if (c == 1 &&
559 wfcmd->cmd == WFC_IDENTIFY_SAMPLE_TYPE) {
560 rbuf[0] = WF_ST_EMPTY;
561 return (0);
562
563 } else if (c == 3 &&
564 wfcmd->cmd == WFC_UPLOAD_PATCH) {
565
566 return 3;
567
568 } else if (c == 1 &&
569 wfcmd->cmd == WFC_UPLOAD_PROGRAM) {
570
571 return 1;
572
573 } else {
574
575 DPRINT (WF_DEBUG_IO, "error %d (%s) "
576 "during "
577 "read for byte "
578 "%d of 0x%x "
579 "[%s].\n",
580 c,
581 wavefront_errorstr (c),
582 i, cmd,
583 wfcmd->action);
584 return 1;
585
586 }
587
588 } else {
589 rbuf[i] = c;
590 }
591
592 DPRINT (WF_DEBUG_DATA, "read[%d] = 0x%x\n",i, rbuf[i]);
593 }
594 }
595
596 if ((wfcmd->read_cnt == 0 && wfcmd->write_cnt == 0) || wfcmd->need_ack) {
597
598 DPRINT (WF_DEBUG_CMD, "reading ACK for 0x%x\n", cmd);
599
600 /* Some commands need an ACK, but return zero instead
601 of the standard value.
602 */
603
604 if ((ack = wavefront_read()) == 0) {
605 ack = WF_ACK;
606 }
607
608 if (ack != WF_ACK) {
609 if (ack == -1) {
610 DPRINT (WF_DEBUG_IO, "cannot read ack for "
611 "0x%x [%s].\n",
612 cmd, wfcmd->action);
613 return 1;
614
615 } else {
616 int err = -1; /* something unknown */
617
618 if (ack == 0xff) { /* explicit error */
619
620 if ((err = wavefront_read ()) == -1) {
621 DPRINT (WF_DEBUG_DATA,
622 "cannot read err "
623 "for 0x%x [%s].\n",
624 cmd, wfcmd->action);
625 }
626 }
627
628 DPRINT (WF_DEBUG_IO, "0x%x [%s] "
629 "failed (0x%x, 0x%x, %s)\n",
630 cmd, wfcmd->action, ack, err,
631 wavefront_errorstr (err));
632
633 return -err;
634 }
635 }
636
637 DPRINT (WF_DEBUG_DATA, "ack received "
638 "for 0x%x [%s]\n",
639 cmd, wfcmd->action);
640 } else {
641
642 DPRINT (WF_DEBUG_CMD, "0x%x [%s] does not need "
643 "ACK (%d,%d,%d)\n",
644 cmd, wfcmd->action, wfcmd->read_cnt,
645 wfcmd->write_cnt, wfcmd->need_ack);
646 }
647
648 return 0;
649
650}
651
652/***********************************************************************
653WaveFront: data munging
654
655Things here are weird. All data written to the board cannot
656have its most significant bit set. Any data item with values
657potentially > 0x7F (127) must be split across multiple bytes.
658
659Sometimes, we need to munge numeric values that are represented on
660the x86 side as 8-32 bit values. Sometimes, we need to munge data
661that is represented on the x86 side as an array of bytes. The most
662efficient approach to handling both cases seems to be to use 2
663different functions for munging and 2 for de-munging. This avoids
664weird casting and worrying about bit-level offsets.
665
666**********************************************************************/
667
668static
669unsigned char *
670munge_int32 (unsigned int src,
671 unsigned char *dst,
672 unsigned int dst_size)
673{
674 int i;
675
676 for (i = 0;i < dst_size; i++) {
677 *dst = src & 0x7F; /* Mask high bit of LSB */
678 src = src >> 7; /* Rotate Right 7 bits */
679 /* Note: we leave the upper bits in place */
680
681 dst++;
682 };
683 return dst;
684};
685
686static int
687demunge_int32 (unsigned char* src, int src_size)
688
689{
690 int i;
691 int outval = 0;
692
693 for (i = src_size - 1; i >= 0; i--) {
694 outval=(outval<<7)+src[i];
695 }
696
697 return outval;
698};
699
700static
701unsigned char *
702munge_buf (unsigned char *src, unsigned char *dst, unsigned int dst_size)
703
704{
705 int i;
706 unsigned int last = dst_size / 2;
707
708 for (i = 0; i < last; i++) {
709 *dst++ = src[i] & 0x7f;
710 *dst++ = src[i] >> 7;
711 }
712 return dst;
713}
714
715static
716unsigned char *
717demunge_buf (unsigned char *src, unsigned char *dst, unsigned int src_bytes)
718
719{
720 int i;
721 unsigned char *end = src + src_bytes;
722
723 end = src + src_bytes;
724
725 /* NOTE: src and dst *CAN* point to the same address */
726
727 for (i = 0; src != end; i++) {
728 dst[i] = *src++;
729 dst[i] |= (*src++)<<7;
730 }
731
732 return dst;
733}
734
735/***********************************************************************
736WaveFront: sample, patch and program management.
737***********************************************************************/
738
739static int
740wavefront_delete_sample (int sample_num)
741
742{
743 unsigned char wbuf[2];
744 int x;
745
746 wbuf[0] = sample_num & 0x7f;
747 wbuf[1] = sample_num >> 7;
748
749 if ((x = wavefront_cmd (WFC_DELETE_SAMPLE, NULL, wbuf)) == 0) {
750 dev.sample_status[sample_num] = WF_ST_EMPTY;
751 }
752
753 return x;
754}
755
756static int
757wavefront_get_sample_status (int assume_rom)
758
759{
760 int i;
761 unsigned char rbuf[32], wbuf[32];
762 unsigned int sc_real, sc_alias, sc_multi;
763
764 /* check sample status */
765
766 if (wavefront_cmd (WFC_GET_NSAMPLES, rbuf, wbuf)) {
767 printk (KERN_WARNING LOGNAME "cannot request sample count.\n");
768 return -1;
769 }
770
771 sc_real = sc_alias = sc_multi = dev.samples_used = 0;
772
773 for (i = 0; i < WF_MAX_SAMPLE; i++) {
774
775 wbuf[0] = i & 0x7f;
776 wbuf[1] = i >> 7;
777
778 if (wavefront_cmd (WFC_IDENTIFY_SAMPLE_TYPE, rbuf, wbuf)) {
779 printk (KERN_WARNING LOGNAME
780 "cannot identify sample "
781 "type of slot %d\n", i);
782 dev.sample_status[i] = WF_ST_EMPTY;
783 continue;
784 }
785
786 dev.sample_status[i] = (WF_SLOT_FILLED|rbuf[0]);
787
788 if (assume_rom) {
789 dev.sample_status[i] |= WF_SLOT_ROM;
790 }
791
792 switch (rbuf[0] & WF_ST_MASK) {
793 case WF_ST_SAMPLE:
794 sc_real++;
795 break;
796 case WF_ST_MULTISAMPLE:
797 sc_multi++;
798 break;
799 case WF_ST_ALIAS:
800 sc_alias++;
801 break;
802 case WF_ST_EMPTY:
803 break;
804
805 default:
806 printk (KERN_WARNING LOGNAME "unknown sample type for "
807 "slot %d (0x%x)\n",
808 i, rbuf[0]);
809 }
810
811 if (rbuf[0] != WF_ST_EMPTY) {
812 dev.samples_used++;
813 }
814 }
815
816 printk (KERN_INFO LOGNAME
817 "%d samples used (%d real, %d aliases, %d multi), "
818 "%d empty\n", dev.samples_used, sc_real, sc_alias, sc_multi,
819 WF_MAX_SAMPLE - dev.samples_used);
820
821
822 return (0);
823
824}
825
826static int
827wavefront_get_patch_status (void)
828
829{
830 unsigned char patchbuf[WF_PATCH_BYTES];
831 unsigned char patchnum[2];
832 wavefront_patch *p;
833 int i, x, cnt, cnt2;
834
835 for (i = 0; i < WF_MAX_PATCH; i++) {
836 patchnum[0] = i & 0x7f;
837 patchnum[1] = i >> 7;
838
839 if ((x = wavefront_cmd (WFC_UPLOAD_PATCH, patchbuf,
840 patchnum)) == 0) {
841
842 dev.patch_status[i] |= WF_SLOT_FILLED;
843 p = (wavefront_patch *) patchbuf;
844 dev.sample_status
845 [p->sample_number|(p->sample_msb<<7)] |=
846 WF_SLOT_USED;
847
848 } else if (x == 3) { /* Bad patch number */
849 dev.patch_status[i] = 0;
850 } else {
851 printk (KERN_ERR LOGNAME "upload patch "
852 "error 0x%x\n", x);
853 dev.patch_status[i] = 0;
854 return 1;
855 }
856 }
857
858 /* program status has already filled in slot_used bits */
859
860 for (i = 0, cnt = 0, cnt2 = 0; i < WF_MAX_PATCH; i++) {
861 if (dev.patch_status[i] & WF_SLOT_FILLED) {
862 cnt++;
863 }
864 if (dev.patch_status[i] & WF_SLOT_USED) {
865 cnt2++;
866 }
867
868 }
869 printk (KERN_INFO LOGNAME
870 "%d patch slots filled, %d in use\n", cnt, cnt2);
871
872 return (0);
873}
874
875static int
876wavefront_get_program_status (void)
877
878{
879 unsigned char progbuf[WF_PROGRAM_BYTES];
880 wavefront_program prog;
881 unsigned char prognum;
882 int i, x, l, cnt;
883
884 for (i = 0; i < WF_MAX_PROGRAM; i++) {
885 prognum = i;
886
887 if ((x = wavefront_cmd (WFC_UPLOAD_PROGRAM, progbuf,
888 &prognum)) == 0) {
889
890 dev.prog_status[i] |= WF_SLOT_USED;
891
892 demunge_buf (progbuf, (unsigned char *) &prog,
893 WF_PROGRAM_BYTES);
894
895 for (l = 0; l < WF_NUM_LAYERS; l++) {
896 if (prog.layer[l].mute) {
897 dev.patch_status
898 [prog.layer[l].patch_number] |=
899 WF_SLOT_USED;
900 }
901 }
902 } else if (x == 1) { /* Bad program number */
903 dev.prog_status[i] = 0;
904 } else {
905 printk (KERN_ERR LOGNAME "upload program "
906 "error 0x%x\n", x);
907 dev.prog_status[i] = 0;
908 }
909 }
910
911 for (i = 0, cnt = 0; i < WF_MAX_PROGRAM; i++) {
912 if (dev.prog_status[i]) {
913 cnt++;
914 }
915 }
916
917 printk (KERN_INFO LOGNAME "%d programs slots in use\n", cnt);
918
919 return (0);
920}
921
922static int
923wavefront_send_patch (wavefront_patch_info *header)
924
925{
926 unsigned char buf[WF_PATCH_BYTES+2];
927 unsigned char *bptr;
928
929 DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n",
930 header->number);
931
932 dev.patch_status[header->number] |= WF_SLOT_FILLED;
933
934 bptr = buf;
935 bptr = munge_int32 (header->number, buf, 2);
936 munge_buf ((unsigned char *)&header->hdr.p, bptr, WF_PATCH_BYTES);
937
938 if (wavefront_cmd (WFC_DOWNLOAD_PATCH, NULL, buf)) {
939 printk (KERN_ERR LOGNAME "download patch failed\n");
940 return -(EIO);
941 }
942
943 return (0);
944}
945
946static int
947wavefront_send_program (wavefront_patch_info *header)
948
949{
950 unsigned char buf[WF_PROGRAM_BYTES+1];
951 int i;
952
953 DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n",
954 header->number);
955
956 dev.prog_status[header->number] = WF_SLOT_USED;
957
958 /* XXX need to zero existing SLOT_USED bit for program_status[i]
959 where `i' is the program that's being (potentially) overwritten.
960 */
961
962 for (i = 0; i < WF_NUM_LAYERS; i++) {
963 if (header->hdr.pr.layer[i].mute) {
964 dev.patch_status[header->hdr.pr.layer[i].patch_number] |=
965 WF_SLOT_USED;
966
967 /* XXX need to mark SLOT_USED for sample used by
968 patch_number, but this means we have to load it. Ick.
969 */
970 }
971 }
972
973 buf[0] = header->number;
974 munge_buf ((unsigned char *)&header->hdr.pr, &buf[1], WF_PROGRAM_BYTES);
975
976 if (wavefront_cmd (WFC_DOWNLOAD_PROGRAM, NULL, buf)) {
977 printk (KERN_WARNING LOGNAME "download patch failed\n");
978 return -(EIO);
979 }
980
981 return (0);
982}
983
984static int
985wavefront_freemem (void)
986
987{
988 char rbuf[8];
989
990 if (wavefront_cmd (WFC_REPORT_FREE_MEMORY, rbuf, NULL)) {
991 printk (KERN_WARNING LOGNAME "can't get memory stats.\n");
992 return -1;
993 } else {
994 return demunge_int32 (rbuf, 4);
995 }
996}
997
998static int
999wavefront_send_sample (wavefront_patch_info *header,
1000 UINT16 __user *dataptr,
1001 int data_is_unsigned)
1002
1003{
1004 /* samples are downloaded via a 16-bit wide i/o port
1005 (you could think of it as 2 adjacent 8-bit wide ports
1006 but its less efficient that way). therefore, all
1007 the blocksizes and so forth listed in the documentation,
1008 and used conventionally to refer to sample sizes,
1009 which are given in 8-bit units (bytes), need to be
1010 divided by 2.
1011 */
1012
1013 UINT16 sample_short;
1014 UINT32 length;
1015 UINT16 __user *data_end = NULL;
1016 unsigned int i;
1017 const int max_blksize = 4096/2;
1018 unsigned int written;
1019 unsigned int blocksize;
1020 int dma_ack;
1021 int blocknum;
1022 unsigned char sample_hdr[WF_SAMPLE_HDR_BYTES];
1023 unsigned char *shptr;
1024 int skip = 0;
1025 int initial_skip = 0;
1026
1027 DPRINT (WF_DEBUG_LOAD_PATCH, "sample %sdownload for slot %d, "
1028 "type %d, %d bytes from %p\n",
1029 header->size ? "" : "header ",
1030 header->number, header->subkey,
1031 header->size,
1032 header->dataptr);
1033
1034 if (header->number == WAVEFRONT_FIND_FREE_SAMPLE_SLOT) {
1035 int x;
1036
1037 if ((x = wavefront_find_free_sample ()) < 0) {
1038 return -ENOMEM;
1039 }
1040 printk (KERN_DEBUG LOGNAME "unspecified sample => %d\n", x);
1041 header->number = x;
1042 }
1043
1044 if (header->size) {
1045
1046 /* XXX it's a debatable point whether or not RDONLY semantics
1047 on the ROM samples should cover just the sample data or
1048 the sample header. For now, it only covers the sample data,
1049 so anyone is free at all times to rewrite sample headers.
1050
1051 My reason for this is that we have the sample headers
1052 available in the WFB file for General MIDI, and so these
1053 can always be reset if needed. The sample data, however,
1054 cannot be recovered without a complete reset and firmware
1055 reload of the ICS2115, which is a very expensive operation.
1056
1057 So, doing things this way allows us to honor the notion of
1058 "RESETSAMPLES" reasonably cheaply. Note however, that this
1059 is done purely at user level: there is no WFB parser in
1060 this driver, and so a complete reset (back to General MIDI,
1061 or theoretically some other configuration) is the
1062 responsibility of the user level library.
1063
1064 To try to do this in the kernel would be a little
1065 crazy: we'd need 158K of kernel space just to hold
1066 a copy of the patch/program/sample header data.
1067 */
1068
1069 if (dev.rom_samples_rdonly) {
1070 if (dev.sample_status[header->number] & WF_SLOT_ROM) {
1071 printk (KERN_ERR LOGNAME "sample slot %d "
1072 "write protected\n",
1073 header->number);
1074 return -EACCES;
1075 }
1076 }
1077
1078 wavefront_delete_sample (header->number);
1079 }
1080
1081 if (header->size) {
1082 dev.freemem = wavefront_freemem ();
1083
1084 if (dev.freemem < header->size) {
1085 printk (KERN_ERR LOGNAME
1086 "insufficient memory to "
1087 "load %d byte sample.\n",
1088 header->size);
1089 return -ENOMEM;
1090 }
1091
1092 }
1093
1094 skip = WF_GET_CHANNEL(&header->hdr.s);
1095
1096 if (skip > 0 && header->hdr.s.SampleResolution != LINEAR_16BIT) {
1097 printk (KERN_ERR LOGNAME "channel selection only "
1098 "possible on 16-bit samples");
1099 return -(EINVAL);
1100 }
1101
1102 switch (skip) {
1103 case 0:
1104 initial_skip = 0;
1105 skip = 1;
1106 break;
1107 case 1:
1108 initial_skip = 0;
1109 skip = 2;
1110 break;
1111 case 2:
1112 initial_skip = 1;
1113 skip = 2;
1114 break;
1115 case 3:
1116 initial_skip = 2;
1117 skip = 3;
1118 break;
1119 case 4:
1120 initial_skip = 3;
1121 skip = 4;
1122 break;
1123 case 5:
1124 initial_skip = 4;
1125 skip = 5;
1126 break;
1127 case 6:
1128 initial_skip = 5;
1129 skip = 6;
1130 break;
1131 }
1132
1133 DPRINT (WF_DEBUG_LOAD_PATCH, "channel selection: %d => "
1134 "initial skip = %d, skip = %d\n",
1135 WF_GET_CHANNEL (&header->hdr.s),
1136 initial_skip, skip);
1137
1138 /* Be safe, and zero the "Unused" bits ... */
1139
1140 WF_SET_CHANNEL(&header->hdr.s, 0);
1141
1142 /* adjust size for 16 bit samples by dividing by two. We always
1143 send 16 bits per write, even for 8 bit samples, so the length
1144 is always half the size of the sample data in bytes.
1145 */
1146
1147 length = header->size / 2;
1148
1149 /* the data we're sent has not been munged, and in fact, the
1150 header we have to send isn't just a munged copy either.
1151 so, build the sample header right here.
1152 */
1153
1154 shptr = &sample_hdr[0];
1155
1156 shptr = munge_int32 (header->number, shptr, 2);
1157
1158 if (header->size) {
1159 shptr = munge_int32 (length, shptr, 4);
1160 }
1161
1162 /* Yes, a 4 byte result doesn't contain all of the offset bits,
1163 but the offset only uses 24 bits.
1164 */
1165
1166 shptr = munge_int32 (*((UINT32 *) &header->hdr.s.sampleStartOffset),
1167 shptr, 4);
1168 shptr = munge_int32 (*((UINT32 *) &header->hdr.s.loopStartOffset),
1169 shptr, 4);
1170 shptr = munge_int32 (*((UINT32 *) &header->hdr.s.loopEndOffset),
1171 shptr, 4);
1172 shptr = munge_int32 (*((UINT32 *) &header->hdr.s.sampleEndOffset),
1173 shptr, 4);
1174
1175 /* This one is truly weird. What kind of weirdo decided that in
1176 a system dominated by 16 and 32 bit integers, they would use
1177 a just 12 bits ?
1178 */
1179
1180 shptr = munge_int32 (header->hdr.s.FrequencyBias, shptr, 3);
1181
1182 /* Why is this nybblified, when the MSB is *always* zero ?
1183 Anyway, we can't take address of bitfield, so make a
1184 good-faith guess at where it starts.
1185 */
1186
1187 shptr = munge_int32 (*(&header->hdr.s.FrequencyBias+1),
1188 shptr, 2);
1189
1190 if (wavefront_cmd (header->size ?
1191 WFC_DOWNLOAD_SAMPLE : WFC_DOWNLOAD_SAMPLE_HEADER,
1192 NULL, sample_hdr)) {
1193 printk (KERN_WARNING LOGNAME "sample %sdownload refused.\n",
1194 header->size ? "" : "header ");
1195 return -(EIO);
1196 }
1197
1198 if (header->size == 0) {
1199 goto sent; /* Sorry. Just had to have one somewhere */
1200 }
1201
1202 data_end = dataptr + length;
1203
1204 /* Do any initial skip over an unused channel's data */
1205
1206 dataptr += initial_skip;
1207
1208 for (written = 0, blocknum = 0;
1209 written < length; written += max_blksize, blocknum++) {
1210
1211 if ((length - written) > max_blksize) {
1212 blocksize = max_blksize;
1213 } else {
1214 /* round to nearest 16-byte value */
1215 blocksize = ((length-written+7)&~0x7);
1216 }
1217
1218 if (wavefront_cmd (WFC_DOWNLOAD_BLOCK, NULL, NULL)) {
1219 printk (KERN_WARNING LOGNAME "download block "
1220 "request refused.\n");
1221 return -(EIO);
1222 }
1223
1224 for (i = 0; i < blocksize; i++) {
1225
1226 if (dataptr < data_end) {
1227
1228 __get_user (sample_short, dataptr);
1229 dataptr += skip;
1230
1231 if (data_is_unsigned) { /* GUS ? */
1232
1233 if (WF_SAMPLE_IS_8BIT(&header->hdr.s)) {
1234
1235 /* 8 bit sample
1236 resolution, sign
1237 extend both bytes.
1238 */
1239
1240 ((unsigned char*)
1241 &sample_short)[0] += 0x7f;
1242 ((unsigned char*)
1243 &sample_short)[1] += 0x7f;
1244
1245 } else {
1246
1247 /* 16 bit sample
1248 resolution, sign
1249 extend the MSB.
1250 */
1251
1252 sample_short += 0x7fff;
1253 }
1254 }
1255
1256 } else {
1257
1258 /* In padding section of final block:
1259
1260 Don't fetch unsupplied data from
1261 user space, just continue with
1262 whatever the final value was.
1263 */
1264 }
1265
1266 if (i < blocksize - 1) {
1267 outw (sample_short, dev.block_port);
1268 } else {
1269 outw (sample_short, dev.last_block_port);
1270 }
1271 }
1272
1273 /* Get "DMA page acknowledge", even though its really
1274 nothing to do with DMA at all.
1275 */
1276
1277 if ((dma_ack = wavefront_read ()) != WF_DMA_ACK) {
1278 if (dma_ack == -1) {
1279 printk (KERN_ERR LOGNAME "upload sample "
1280 "DMA ack timeout\n");
1281 return -(EIO);
1282 } else {
1283 printk (KERN_ERR LOGNAME "upload sample "
1284 "DMA ack error 0x%x\n",
1285 dma_ack);
1286 return -(EIO);
1287 }
1288 }
1289 }
1290
1291 dev.sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_SAMPLE);
1292
1293 /* Note, label is here because sending the sample header shouldn't
1294 alter the sample_status info at all.
1295 */
1296
1297 sent:
1298 return (0);
1299}
1300
1301static int
1302wavefront_send_alias (wavefront_patch_info *header)
1303
1304{
1305 unsigned char alias_hdr[WF_ALIAS_BYTES];
1306
1307 DPRINT (WF_DEBUG_LOAD_PATCH, "download alias, %d is "
1308 "alias for %d\n",
1309 header->number,
1310 header->hdr.a.OriginalSample);
1311
1312 munge_int32 (header->number, &alias_hdr[0], 2);
1313 munge_int32 (header->hdr.a.OriginalSample, &alias_hdr[2], 2);
1314 munge_int32 (*((unsigned int *)&header->hdr.a.sampleStartOffset),
1315 &alias_hdr[4], 4);
1316 munge_int32 (*((unsigned int *)&header->hdr.a.loopStartOffset),
1317 &alias_hdr[8], 4);
1318 munge_int32 (*((unsigned int *)&header->hdr.a.loopEndOffset),
1319 &alias_hdr[12], 4);
1320 munge_int32 (*((unsigned int *)&header->hdr.a.sampleEndOffset),
1321 &alias_hdr[16], 4);
1322 munge_int32 (header->hdr.a.FrequencyBias, &alias_hdr[20], 3);
1323 munge_int32 (*(&header->hdr.a.FrequencyBias+1), &alias_hdr[23], 2);
1324
1325 if (wavefront_cmd (WFC_DOWNLOAD_SAMPLE_ALIAS, NULL, alias_hdr)) {
1326 printk (KERN_ERR LOGNAME "download alias failed.\n");
1327 return -(EIO);
1328 }
1329
1330 dev.sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_ALIAS);
1331
1332 return (0);
1333}
1334
1335static int
1336wavefront_send_multisample (wavefront_patch_info *header)
1337{
1338 int i;
1339 int num_samples;
1340 unsigned char msample_hdr[WF_MSAMPLE_BYTES];
1341
1342 munge_int32 (header->number, &msample_hdr[0], 2);
1343
1344 /* You'll recall at this point that the "number of samples" value
1345 in a wavefront_multisample struct is actually the log2 of the
1346 real number of samples.
1347 */
1348
1349 num_samples = (1<<(header->hdr.ms.NumberOfSamples&7));
1350 msample_hdr[2] = (unsigned char) header->hdr.ms.NumberOfSamples;
1351
1352 DPRINT (WF_DEBUG_LOAD_PATCH, "multi %d with %d=%d samples\n",
1353 header->number,
1354 header->hdr.ms.NumberOfSamples,
1355 num_samples);
1356
1357 for (i = 0; i < num_samples; i++) {
1358 DPRINT(WF_DEBUG_LOAD_PATCH|WF_DEBUG_DATA, "sample[%d] = %d\n",
1359 i, header->hdr.ms.SampleNumber[i]);
1360 munge_int32 (header->hdr.ms.SampleNumber[i],
1361 &msample_hdr[3+(i*2)], 2);
1362 }
1363
1364 /* Need a hack here to pass in the number of bytes
1365 to be written to the synth. This is ugly, and perhaps
1366 one day, I'll fix it.
1367 */
1368
1369 if (wavefront_cmd (WFC_DOWNLOAD_MULTISAMPLE,
1370 (unsigned char *) ((num_samples*2)+3),
1371 msample_hdr)) {
1372 printk (KERN_ERR LOGNAME "download of multisample failed.\n");
1373 return -(EIO);
1374 }
1375
1376 dev.sample_status[header->number] = (WF_SLOT_FILLED|WF_ST_MULTISAMPLE);
1377
1378 return (0);
1379}
1380
1381static int
1382wavefront_fetch_multisample (wavefront_patch_info *header)
1383{
1384 int i;
1385 unsigned char log_ns[1];
1386 unsigned char number[2];
1387 int num_samples;
1388
1389 munge_int32 (header->number, number, 2);
1390
1391 if (wavefront_cmd (WFC_UPLOAD_MULTISAMPLE, log_ns, number)) {
1392 printk (KERN_ERR LOGNAME "upload multisample failed.\n");
1393 return -(EIO);
1394 }
1395
1396 DPRINT (WF_DEBUG_DATA, "msample %d has %d samples\n",
1397 header->number, log_ns[0]);
1398
1399 header->hdr.ms.NumberOfSamples = log_ns[0];
1400
1401 /* get the number of samples ... */
1402
1403 num_samples = (1 << log_ns[0]);
1404
1405 for (i = 0; i < num_samples; i++) {
1406 s8 d[2];
1407
1408 if ((d[0] = wavefront_read ()) == -1) {
1409 printk (KERN_ERR LOGNAME "upload multisample failed "
1410 "during sample loop.\n");
1411 return -(EIO);
1412 }
1413
1414 if ((d[1] = wavefront_read ()) == -1) {
1415 printk (KERN_ERR LOGNAME "upload multisample failed "
1416 "during sample loop.\n");
1417 return -(EIO);
1418 }
1419
1420 header->hdr.ms.SampleNumber[i] =
1421 demunge_int32 ((unsigned char *) d, 2);
1422
1423 DPRINT (WF_DEBUG_DATA, "msample sample[%d] = %d\n",
1424 i, header->hdr.ms.SampleNumber[i]);
1425 }
1426
1427 return (0);
1428}
1429
1430
1431static int
1432wavefront_send_drum (wavefront_patch_info *header)
1433
1434{
1435 unsigned char drumbuf[WF_DRUM_BYTES];
1436 wavefront_drum *drum = &header->hdr.d;
1437 int i;
1438
1439 DPRINT (WF_DEBUG_LOAD_PATCH, "downloading edrum for MIDI "
1440 "note %d, patch = %d\n",
1441 header->number, drum->PatchNumber);
1442
1443 drumbuf[0] = header->number & 0x7f;
1444
1445 for (i = 0; i < 4; i++) {
1446 munge_int32 (((unsigned char *)drum)[i], &drumbuf[1+(i*2)], 2);
1447 }
1448
1449 if (wavefront_cmd (WFC_DOWNLOAD_EDRUM_PROGRAM, NULL, drumbuf)) {
1450 printk (KERN_ERR LOGNAME "download drum failed.\n");
1451 return -(EIO);
1452 }
1453
1454 return (0);
1455}
1456
1457static int
1458wavefront_find_free_sample (void)
1459
1460{
1461 int i;
1462
1463 for (i = 0; i < WF_MAX_SAMPLE; i++) {
1464 if (!(dev.sample_status[i] & WF_SLOT_FILLED)) {
1465 return i;
1466 }
1467 }
1468 printk (KERN_WARNING LOGNAME "no free sample slots!\n");
1469 return -1;
1470}
1471
1472static int
1473wavefront_find_free_patch (void)
1474
1475{
1476 int i;
1477
1478 for (i = 0; i < WF_MAX_PATCH; i++) {
1479 if (!(dev.patch_status[i] & WF_SLOT_FILLED)) {
1480 return i;
1481 }
1482 }
1483 printk (KERN_WARNING LOGNAME "no free patch slots!\n");
1484 return -1;
1485}
1486
1487static int
1488log2_2048(int n)
1489
1490{
1491 int tbl[]={0, 0, 2048, 3246, 4096, 4755, 5294, 5749, 6143,
1492 6492, 6803, 7084, 7342, 7578, 7797, 8001, 8192,
1493 8371, 8540, 8699, 8851, 8995, 9132, 9264, 9390,
1494 9510, 9626, 9738, 9845, 9949, 10049, 10146};
1495 int i;
1496
1497 /* Returns 2048*log2(n) */
1498
1499 /* FIXME: this is like doing integer math
1500 on quantum particles (RuN) */
1501
1502 i=0;
1503 while(n>=32*256) {
1504 n>>=8;
1505 i+=2048*8;
1506 }
1507 while(n>=32) {
1508 n>>=1;
1509 i+=2048;
1510 }
1511 i+=tbl[n];
1512 return(i);
1513}
1514
1515static int
1516wavefront_load_gus_patch (int devno, int format, const char __user *addr,
1517 int offs, int count, int pmgr_flag)
1518{
1519 struct patch_info guspatch;
1520 wavefront_patch_info *samp, *pat, *prog;
1521 wavefront_patch *patp;
1522 wavefront_sample *sampp;
1523 wavefront_program *progp;
1524
1525 int i,base_note;
1526 long sizeof_patch;
1527 int rc = -ENOMEM;
1528
1529 samp = kmalloc(3 * sizeof(wavefront_patch_info), GFP_KERNEL);
1530 if (!samp)
1531 goto free_fail;
1532 pat = samp + 1;
1533 prog = pat + 1;
1534
1535 /* Copy in the header of the GUS patch */
1536
1537 sizeof_patch = (long) &guspatch.data[0] - (long) &guspatch;
1538 if (copy_from_user(&((char *) &guspatch)[offs],
1539 &(addr)[offs], sizeof_patch - offs)) {
1540 rc = -EFAULT;
1541 goto free_fail;
1542 }
1543
1544 if ((i = wavefront_find_free_patch ()) == -1) {
1545 rc = -EBUSY;
1546 goto free_fail;
1547 }
1548 pat->number = i;
1549 pat->subkey = WF_ST_PATCH;
1550 patp = &pat->hdr.p;
1551
1552 if ((i = wavefront_find_free_sample ()) == -1) {
1553 rc = -EBUSY;
1554 goto free_fail;
1555 }
1556 samp->number = i;
1557 samp->subkey = WF_ST_SAMPLE;
1558 samp->size = guspatch.len;
1559 sampp = &samp->hdr.s;
1560
1561 prog->number = guspatch.instr_no;
1562 progp = &prog->hdr.pr;
1563
1564 /* Setup the patch structure */
1565
1566 patp->amplitude_bias=guspatch.volume;
1567 patp->portamento=0;
1568 patp->sample_number= samp->number & 0xff;
1569 patp->sample_msb= samp->number >> 8;
1570 patp->pitch_bend= /*12*/ 0;
1571 patp->mono=1;
1572 patp->retrigger=1;
1573 patp->nohold=(guspatch.mode & WAVE_SUSTAIN_ON) ? 0:1;
1574 patp->frequency_bias=0;
1575 patp->restart=0;
1576 patp->reuse=0;
1577 patp->reset_lfo=1;
1578 patp->fm_src2=0;
1579 patp->fm_src1=WF_MOD_MOD_WHEEL;
1580 patp->am_src=WF_MOD_PRESSURE;
1581 patp->am_amount=127;
1582 patp->fc1_mod_amount=0;
1583 patp->fc2_mod_amount=0;
1584 patp->fm_amount1=0;
1585 patp->fm_amount2=0;
1586 patp->envelope1.attack_level=127;
1587 patp->envelope1.decay1_level=127;
1588 patp->envelope1.decay2_level=127;
1589 patp->envelope1.sustain_level=127;
1590 patp->envelope1.release_level=0;
1591 patp->envelope2.attack_velocity=127;
1592 patp->envelope2.attack_level=127;
1593 patp->envelope2.decay1_level=127;
1594 patp->envelope2.decay2_level=127;
1595 patp->envelope2.sustain_level=127;
1596 patp->envelope2.release_level=0;
1597 patp->envelope2.attack_velocity=127;
1598 patp->randomizer=0;
1599
1600 /* Program for this patch */
1601
1602 progp->layer[0].patch_number= pat->number; /* XXX is this right ? */
1603 progp->layer[0].mute=1;
1604 progp->layer[0].pan_or_mod=1;
1605 progp->layer[0].pan=7;
1606 progp->layer[0].mix_level=127 /* guspatch.volume */;
1607 progp->layer[0].split_type=0;
1608 progp->layer[0].split_point=0;
1609 progp->layer[0].play_below=0;
1610
1611 for (i = 1; i < 4; i++) {
1612 progp->layer[i].mute=0;
1613 }
1614
1615 /* Sample data */
1616
1617 sampp->SampleResolution=((~guspatch.mode & WAVE_16_BITS)<<1);
1618
1619 for (base_note=0;
1620 note_to_freq (base_note) < guspatch.base_note;
1621 base_note++);
1622
1623 if ((guspatch.base_note-note_to_freq(base_note))
1624 >(note_to_freq(base_note)-guspatch.base_note))
1625 base_note++;
1626
1627 printk(KERN_DEBUG "ref freq=%d,base note=%d\n",
1628 guspatch.base_freq,
1629 base_note);
1630
1631 sampp->FrequencyBias = (29550 - log2_2048(guspatch.base_freq)
1632 + base_note*171);
1633 printk(KERN_DEBUG "Freq Bias is %d\n", sampp->FrequencyBias);
1634 sampp->Loop=(guspatch.mode & WAVE_LOOPING) ? 1:0;
1635 sampp->sampleStartOffset.Fraction=0;
1636 sampp->sampleStartOffset.Integer=0;
1637 sampp->loopStartOffset.Fraction=0;
1638 sampp->loopStartOffset.Integer=guspatch.loop_start
1639 >>((guspatch.mode&WAVE_16_BITS) ? 1:0);
1640 sampp->loopEndOffset.Fraction=0;
1641 sampp->loopEndOffset.Integer=guspatch.loop_end
1642 >>((guspatch.mode&WAVE_16_BITS) ? 1:0);
1643 sampp->sampleEndOffset.Fraction=0;
1644 sampp->sampleEndOffset.Integer=guspatch.len >> (guspatch.mode&1);
1645 sampp->Bidirectional=(guspatch.mode&WAVE_BIDIR_LOOP) ? 1:0;
1646 sampp->Reverse=(guspatch.mode&WAVE_LOOP_BACK) ? 1:0;
1647
1648 /* Now ship it down */
1649
1650 wavefront_send_sample (samp,
1651 (unsigned short __user *) &(addr)[sizeof_patch],
1652 (guspatch.mode & WAVE_UNSIGNED) ? 1:0);
1653 wavefront_send_patch (pat);
1654 wavefront_send_program (prog);
1655
1656 /* Now pan as best we can ... use the slave/internal MIDI device
1657 number if it exists (since it talks to the WaveFront), or the
1658 master otherwise.
1659 */
1660
1661 if (dev.mididev > 0) {
1662 midi_synth_controller (dev.mididev, guspatch.instr_no, 10,
1663 ((guspatch.panning << 4) > 127) ?
1664 127 : (guspatch.panning << 4));
1665 }
1666 rc = 0;
1667
1668free_fail:
1669 kfree(samp);
1670 return rc;
1671}
1672
1673static int
1674wavefront_load_patch (const char __user *addr)
1675
1676
1677{
1678 wavefront_patch_info header;
1679
1680 if (copy_from_user (&header, addr, sizeof(wavefront_patch_info) -
1681 sizeof(wavefront_any))) {
1682 printk (KERN_WARNING LOGNAME "bad address for load patch.\n");
1683 return -EFAULT;
1684 }
1685
1686 DPRINT (WF_DEBUG_LOAD_PATCH, "download "
1687 "Sample type: %d "
1688 "Sample number: %d "
1689 "Sample size: %d\n",
1690 header.subkey,
1691 header.number,
1692 header.size);
1693
1694 switch (header.subkey) {
1695 case WF_ST_SAMPLE: /* sample or sample_header, based on patch->size */
1696
1697 if (copy_from_user((unsigned char *) &header.hdr.s,
1698 (unsigned char __user *) header.hdrptr,
1699 sizeof (wavefront_sample)))
1700 return -EFAULT;
1701
1702 return wavefront_send_sample (&header, header.dataptr, 0);
1703
1704 case WF_ST_MULTISAMPLE:
1705
1706 if (copy_from_user(&header.hdr.s, header.hdrptr,
1707 sizeof(wavefront_multisample)))
1708 return -EFAULT;
1709
1710 return wavefront_send_multisample (&header);
1711
1712
1713 case WF_ST_ALIAS:
1714
1715 if (copy_from_user(&header.hdr.a, header.hdrptr,
1716 sizeof (wavefront_alias)))
1717 return -EFAULT;
1718
1719 return wavefront_send_alias (&header);
1720
1721 case WF_ST_DRUM:
1722 if (copy_from_user(&header.hdr.d, header.hdrptr,
1723 sizeof (wavefront_drum)))
1724 return -EFAULT;
1725
1726 return wavefront_send_drum (&header);
1727
1728 case WF_ST_PATCH:
1729 if (copy_from_user(&header.hdr.p, header.hdrptr,
1730 sizeof (wavefront_patch)))
1731 return -EFAULT;
1732
1733 return wavefront_send_patch (&header);
1734
1735 case WF_ST_PROGRAM:
1736 if (copy_from_user(&header.hdr.pr, header.hdrptr,
1737 sizeof (wavefront_program)))
1738 return -EFAULT;
1739
1740 return wavefront_send_program (&header);
1741
1742 default:
1743 printk (KERN_ERR LOGNAME "unknown patch type %d.\n",
1744 header.subkey);
1745 return -(EINVAL);
1746 }
1747
1748 return 0;
1749}
1750
1751/***********************************************************************
1752WaveFront: /dev/sequencer{,2} and other hardware-dependent interfaces
1753***********************************************************************/
1754
1755static void
1756process_sample_hdr (UCHAR8 *buf)
1757
1758{
1759 wavefront_sample s;
1760 UCHAR8 *ptr;
1761
1762 ptr = buf;
1763
1764 /* The board doesn't send us an exact copy of a "wavefront_sample"
1765 in response to an Upload Sample Header command. Instead, we
1766 have to convert the data format back into our data structure,
1767 just as in the Download Sample command, where we have to do
1768 something very similar in the reverse direction.
1769 */
1770
1771 *((UINT32 *) &s.sampleStartOffset) = demunge_int32 (ptr, 4); ptr += 4;
1772 *((UINT32 *) &s.loopStartOffset) = demunge_int32 (ptr, 4); ptr += 4;
1773 *((UINT32 *) &s.loopEndOffset) = demunge_int32 (ptr, 4); ptr += 4;
1774 *((UINT32 *) &s.sampleEndOffset) = demunge_int32 (ptr, 4); ptr += 4;
1775 *((UINT32 *) &s.FrequencyBias) = demunge_int32 (ptr, 3); ptr += 3;
1776
1777 s.SampleResolution = *ptr & 0x3;
1778 s.Loop = *ptr & 0x8;
1779 s.Bidirectional = *ptr & 0x10;
1780 s.Reverse = *ptr & 0x40;
1781
1782 /* Now copy it back to where it came from */
1783
1784 memcpy (buf, (unsigned char *) &s, sizeof (wavefront_sample));
1785}
1786
1787static int
1788wavefront_synth_control (int cmd, wavefront_control *wc)
1789
1790{
1791 unsigned char patchnumbuf[2];
1792 int i;
1793
1794 DPRINT (WF_DEBUG_CMD, "synth control with "
1795 "cmd 0x%x\n", wc->cmd);
1796
1797 /* Pre-handling of or for various commands */
1798
1799 switch (wc->cmd) {
1800 case WFC_DISABLE_INTERRUPTS:
1801 printk (KERN_INFO LOGNAME "interrupts disabled.\n");
1802 outb (0x80|0x20, dev.control_port);
1803 dev.interrupts_on = 0;
1804 return 0;
1805
1806 case WFC_ENABLE_INTERRUPTS:
1807 printk (KERN_INFO LOGNAME "interrupts enabled.\n");
1808 outb (0x80|0x40|0x20, dev.control_port);
1809 dev.interrupts_on = 1;
1810 return 0;
1811
1812 case WFC_INTERRUPT_STATUS:
1813 wc->rbuf[0] = dev.interrupts_on;
1814 return 0;
1815
1816 case WFC_ROMSAMPLES_RDONLY:
1817 dev.rom_samples_rdonly = wc->wbuf[0];
1818 wc->status = 0;
1819 return 0;
1820
1821 case WFC_IDENTIFY_SLOT_TYPE:
1822 i = wc->wbuf[0] | (wc->wbuf[1] << 7);
1823 if (i <0 || i >= WF_MAX_SAMPLE) {
1824 printk (KERN_WARNING LOGNAME "invalid slot ID %d\n",
1825 i);
1826 wc->status = EINVAL;
1827 return 0;
1828 }
1829 wc->rbuf[0] = dev.sample_status[i];
1830 wc->status = 0;
1831 return 0;
1832
1833 case WFC_DEBUG_DRIVER:
1834 dev.debug = wc->wbuf[0];
1835 printk (KERN_INFO LOGNAME "debug = 0x%x\n", dev.debug);
1836 return 0;
1837
1838 case WFC_FX_IOCTL:
1839 wffx_ioctl ((wavefront_fx_info *) &wc->wbuf[0]);
1840 return 0;
1841
1842 case WFC_UPLOAD_PATCH:
1843 munge_int32 (*((UINT32 *) wc->wbuf), patchnumbuf, 2);
1844 memcpy (wc->wbuf, patchnumbuf, 2);
1845 break;
1846
1847 case WFC_UPLOAD_MULTISAMPLE:
1848 /* multisamples have to be handled differently, and
1849 cannot be dealt with properly by wavefront_cmd() alone.
1850 */
1851 wc->status = wavefront_fetch_multisample
1852 ((wavefront_patch_info *) wc->rbuf);
1853 return 0;
1854
1855 case WFC_UPLOAD_SAMPLE_ALIAS:
1856 printk (KERN_INFO LOGNAME "support for sample alias upload "
1857 "being considered.\n");
1858 wc->status = EINVAL;
1859 return -EINVAL;
1860 }
1861
1862 wc->status = wavefront_cmd (wc->cmd, wc->rbuf, wc->wbuf);
1863
1864 /* Post-handling of certain commands.
1865
1866 In particular, if the command was an upload, demunge the data
1867 so that the user-level doesn't have to think about it.
1868 */
1869
1870 if (wc->status == 0) {
1871 switch (wc->cmd) {
1872 /* intercept any freemem requests so that we know
1873 we are always current with the user-level view
1874 of things.
1875 */
1876
1877 case WFC_REPORT_FREE_MEMORY:
1878 dev.freemem = demunge_int32 (wc->rbuf, 4);
1879 break;
1880
1881 case WFC_UPLOAD_PATCH:
1882 demunge_buf (wc->rbuf, wc->rbuf, WF_PATCH_BYTES);
1883 break;
1884
1885 case WFC_UPLOAD_PROGRAM:
1886 demunge_buf (wc->rbuf, wc->rbuf, WF_PROGRAM_BYTES);
1887 break;
1888
1889 case WFC_UPLOAD_EDRUM_PROGRAM:
1890 demunge_buf (wc->rbuf, wc->rbuf, WF_DRUM_BYTES - 1);
1891 break;
1892
1893 case WFC_UPLOAD_SAMPLE_HEADER:
1894 process_sample_hdr (wc->rbuf);
1895 break;
1896
1897 case WFC_UPLOAD_SAMPLE_ALIAS:
1898 printk (KERN_INFO LOGNAME "support for "
1899 "sample aliases still "
1900 "being considered.\n");
1901 break;
1902
1903 case WFC_VMIDI_OFF:
1904 if (virtual_midi_disable () < 0) {
1905 return -(EIO);
1906 }
1907 break;
1908
1909 case WFC_VMIDI_ON:
1910 if (virtual_midi_enable () < 0) {
1911 return -(EIO);
1912 }
1913 break;
1914 }
1915 }
1916
1917 return 0;
1918}
1919
1920
1921/***********************************************************************/
1922/* WaveFront: Linux file system interface (for access via raw synth) */
1923/***********************************************************************/
1924
1925static int
1926wavefront_open (struct inode *inode, struct file *file)
1927{
1928 /* XXX fix me */
1929 dev.opened = file->f_flags;
1930 return 0;
1931}
1932
1933static int
1934wavefront_release(struct inode *inode, struct file *file)
1935{
1936 lock_kernel();
1937 dev.opened = 0;
1938 dev.debug = 0;
1939 unlock_kernel();
1940 return 0;
1941}
1942
1943static int
1944wavefront_ioctl(struct inode *inode, struct file *file,
1945 unsigned int cmd, unsigned long arg)
1946{
1947 wavefront_control wc;
1948 int err;
1949
1950 switch (cmd) {
1951
1952 case WFCTL_WFCMD:
1953 if (copy_from_user(&wc, (void __user *) arg, sizeof (wc)))
1954 return -EFAULT;
1955
1956 if ((err = wavefront_synth_control (cmd, &wc)) == 0) {
1957 if (copy_to_user ((void __user *) arg, &wc, sizeof (wc)))
1958 return -EFAULT;
1959 }
1960
1961 return err;
1962
1963 case WFCTL_LOAD_SPP:
1964 return wavefront_load_patch ((const char __user *) arg);
1965
1966 default:
1967 printk (KERN_WARNING LOGNAME "invalid ioctl %#x\n", cmd);
1968 return -(EINVAL);
1969
1970 }
1971 return 0;
1972}
1973
1974static /*const*/ struct file_operations wavefront_fops = {
1975 .owner = THIS_MODULE,
1976 .llseek = no_llseek,
1977 .ioctl = wavefront_ioctl,
1978 .open = wavefront_open,
1979 .release = wavefront_release,
1980};
1981
1982
1983/***********************************************************************/
1984/* WaveFront: OSS installation and support interface */
1985/***********************************************************************/
1986
1987#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
1988
1989static struct synth_info wavefront_info =
1990{"Turtle Beach WaveFront", 0, SYNTH_TYPE_SAMPLE, SAMPLE_TYPE_WAVEFRONT,
1991 0, 32, 0, 0, SYNTH_CAP_INPUT};
1992
1993static int
1994wavefront_oss_open (int devno, int mode)
1995
1996{
1997 dev.opened = mode;
1998 return 0;
1999}
2000
2001static void
2002wavefront_oss_close (int devno)
2003
2004{
2005 dev.opened = 0;
2006 dev.debug = 0;
2007 return;
2008}
2009
2010static int
2011wavefront_oss_ioctl (int devno, unsigned int cmd, void __user * arg)
2012
2013{
2014 wavefront_control wc;
2015 int err;
2016
2017 switch (cmd) {
2018 case SNDCTL_SYNTH_INFO:
2019 if(copy_to_user(arg, &wavefront_info, sizeof (wavefront_info)))
2020 return -EFAULT;
2021 return 0;
2022
2023 case SNDCTL_SEQ_RESETSAMPLES:
2024// printk (KERN_WARNING LOGNAME "driver cannot reset samples.\n");
2025 return 0; /* don't force an error */
2026
2027 case SNDCTL_SEQ_PERCMODE:
2028 return 0; /* don't force an error */
2029
2030 case SNDCTL_SYNTH_MEMAVL:
2031 if ((dev.freemem = wavefront_freemem ()) < 0) {
2032 printk (KERN_ERR LOGNAME "cannot get memory size\n");
2033 return -EIO;
2034 } else {
2035 return dev.freemem;
2036 }
2037 break;
2038
2039 case SNDCTL_SYNTH_CONTROL:
2040 if(copy_from_user (&wc, arg, sizeof (wc)))
2041 err = -EFAULT;
2042 else if ((err = wavefront_synth_control (cmd, &wc)) == 0) {
2043 if(copy_to_user (arg, &wc, sizeof (wc)))
2044 err = -EFAULT;
2045 }
2046
2047 return err;
2048
2049 default:
2050 return -(EINVAL);
2051 }
2052}
2053
2054static int
2055wavefront_oss_load_patch (int devno, int format, const char __user *addr,
2056 int offs, int count, int pmgr_flag)
2057{
2058
2059 if (format == SYSEX_PATCH) { /* Handled by midi_synth.c */
2060 if (midi_load_patch == NULL) {
2061 printk (KERN_ERR LOGNAME
2062 "SYSEX not loadable: "
2063 "no midi patch loader!\n");
2064 return -(EINVAL);
2065 }
2066
2067 return midi_load_patch (devno, format, addr,
2068 offs, count, pmgr_flag);
2069
2070 } else if (format == GUS_PATCH) {
2071 return wavefront_load_gus_patch (devno, format,
2072 addr, offs, count, pmgr_flag);
2073
2074 } else if (format != WAVEFRONT_PATCH) {
2075 printk (KERN_ERR LOGNAME "unknown patch format %d\n", format);
2076 return -(EINVAL);
2077 }
2078
2079 if (count < sizeof (wavefront_patch_info)) {
2080 printk (KERN_ERR LOGNAME "sample header too short\n");
2081 return -(EINVAL);
2082 }
2083
2084 /* "addr" points to a user-space wavefront_patch_info */
2085
2086 return wavefront_load_patch (addr);
2087}
2088
2089static struct synth_operations wavefront_operations =
2090{
2091 .owner = THIS_MODULE,
2092 .id = "WaveFront",
2093 .info = &wavefront_info,
2094 .midi_dev = 0,
2095 .synth_type = SYNTH_TYPE_SAMPLE,
2096 .synth_subtype = SAMPLE_TYPE_WAVEFRONT,
2097 .open = wavefront_oss_open,
2098 .close = wavefront_oss_close,
2099 .ioctl = wavefront_oss_ioctl,
2100 .kill_note = midi_synth_kill_note,
2101 .start_note = midi_synth_start_note,
2102 .set_instr = midi_synth_set_instr,
2103 .reset = midi_synth_reset,
2104 .load_patch = midi_synth_load_patch,
2105 .aftertouch = midi_synth_aftertouch,
2106 .controller = midi_synth_controller,
2107 .panning = midi_synth_panning,
2108 .bender = midi_synth_bender,
2109 .setup_voice = midi_synth_setup_voice
2110};
2111#endif /* OSS_SUPPORT_SEQ */
2112
2113#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_STATIC_INSTALL
2114
2115static void __init attach_wavefront (struct address_info *hw_config)
2116{
2117 (void) install_wavefront ();
2118}
2119
2120static int __init probe_wavefront (struct address_info *hw_config)
2121{
2122 return !detect_wavefront (hw_config->irq, hw_config->io_base);
2123}
2124
2125static void __exit unload_wavefront (struct address_info *hw_config)
2126{
2127 (void) uninstall_wavefront ();
2128}
2129
2130#endif /* OSS_SUPPORT_STATIC_INSTALL */
2131
2132/***********************************************************************/
2133/* WaveFront: Linux modular sound kernel installation interface */
2134/***********************************************************************/
2135
2136static irqreturn_t
2137wavefrontintr(int irq, void *dev_id, struct pt_regs *dummy)
2138{
2139 struct wf_config *hw = dev_id;
2140
2141 /*
2142 Some comments on interrupts. I attempted a version of this
2143 driver that used interrupts throughout the code instead of
2144 doing busy and/or sleep-waiting. Alas, it appears that once
2145 the Motorola firmware is downloaded, the card *never*
2146 generates an RX interrupt. These are successfully generated
2147 during firmware loading, and after that wavefront_status()
2148 reports that an interrupt is pending on the card from time
2149 to time, but it never seems to be delivered to this
2150 driver. Note also that wavefront_status() continues to
2151 report that RX interrupts are enabled, suggesting that I
2152 didn't goof up and disable them by mistake.
2153
2154 Thus, I stepped back to a prior version of
2155 wavefront_wait(), the only place where this really
2156 matters. Its sad, but I've looked through the code to check
2157 on things, and I really feel certain that the Motorola
2158 firmware prevents RX-ready interrupts.
2159 */
2160
2161 if ((wavefront_status() & (STAT_INTR_READ|STAT_INTR_WRITE)) == 0) {
2162 return IRQ_NONE;
2163 }
2164
2165 hw->irq_ok = 1;
2166 hw->irq_cnt++;
2167 wake_up_interruptible (&hw->interrupt_sleeper);
2168 return IRQ_HANDLED;
2169}
2170
2171/* STATUS REGISTER
2172
21730 Host Rx Interrupt Enable (1=Enabled)
21741 Host Rx Register Full (1=Full)
21752 Host Rx Interrupt Pending (1=Interrupt)
21763 Unused
21774 Host Tx Interrupt (1=Enabled)
21785 Host Tx Register empty (1=Empty)
21796 Host Tx Interrupt Pending (1=Interrupt)
21807 Unused
2181*/
2182
2183static int
2184wavefront_interrupt_bits (int irq)
2185
2186{
2187 int bits;
2188
2189 switch (irq) {
2190 case 9:
2191 bits = 0x00;
2192 break;
2193 case 5:
2194 bits = 0x08;
2195 break;
2196 case 12:
2197 bits = 0x10;
2198 break;
2199 case 15:
2200 bits = 0x18;
2201 break;
2202
2203 default:
2204 printk (KERN_WARNING LOGNAME "invalid IRQ %d\n", irq);
2205 bits = -1;
2206 }
2207
2208 return bits;
2209}
2210
2211static void
2212wavefront_should_cause_interrupt (int val, int port, int timeout)
2213
2214{
2215 unsigned long flags;
2216
2217 /* this will not help on SMP - but at least it compiles */
2218 spin_lock_irqsave(&lock, flags);
2219 dev.irq_ok = 0;
2220 outb (val,port);
2221 interruptible_sleep_on_timeout (&dev.interrupt_sleeper, timeout);
2222 spin_unlock_irqrestore(&lock,flags);
2223}
2224
2225static int __init wavefront_hw_reset (void)
2226{
2227 int bits;
2228 int hwv[2];
2229 unsigned long irq_mask;
2230 short reported_irq;
2231
2232 /* IRQ already checked in init_module() */
2233
2234 bits = wavefront_interrupt_bits (dev.irq);
2235
2236 printk (KERN_DEBUG LOGNAME "autodetecting WaveFront IRQ\n");
2237
2238 irq_mask = probe_irq_on ();
2239
2240 outb (0x0, dev.control_port);
2241 outb (0x80 | 0x40 | bits, dev.data_port);
2242 wavefront_should_cause_interrupt(0x80|0x40|0x10|0x1,
2243 dev.control_port,
2244 (reset_time*HZ)/100);
2245
2246 reported_irq = probe_irq_off (irq_mask);
2247
2248 if (reported_irq != dev.irq) {
2249 if (reported_irq == 0) {
2250 printk (KERN_ERR LOGNAME
2251 "No unassigned interrupts detected "
2252 "after h/w reset\n");
2253 } else if (reported_irq < 0) {
2254 printk (KERN_ERR LOGNAME
2255 "Multiple unassigned interrupts detected "
2256 "after h/w reset\n");
2257 } else {
2258 printk (KERN_ERR LOGNAME "autodetected IRQ %d not the "
2259 "value provided (%d)\n", reported_irq,
2260 dev.irq);
2261 }
2262 dev.irq = -1;
2263 return 1;
2264 } else {
2265 printk (KERN_INFO LOGNAME "autodetected IRQ at %d\n",
2266 reported_irq);
2267 }
2268
2269 if (request_irq (dev.irq, wavefrontintr,
2270 IRQF_DISABLED|IRQF_SHARED,
2271 "wavefront synth", &dev) < 0) {
2272 printk (KERN_WARNING LOGNAME "IRQ %d not available!\n",
2273 dev.irq);
2274 return 1;
2275 }
2276
2277 /* try reset of port */
2278
2279 outb (0x0, dev.control_port);
2280
2281 /* At this point, the board is in reset, and the H/W initialization
2282 register is accessed at the same address as the data port.
2283
2284 Bit 7 - Enable IRQ Driver
2285 0 - Tri-state the Wave-Board drivers for the PC Bus IRQs
2286 1 - Enable IRQ selected by bits 5:3 to be driven onto the PC Bus.
2287
2288 Bit 6 - MIDI Interface Select
2289
2290 0 - Use the MIDI Input from the 26-pin WaveBlaster
2291 compatible header as the serial MIDI source
2292 1 - Use the MIDI Input from the 9-pin D connector as the
2293 serial MIDI source.
2294
2295 Bits 5:3 - IRQ Selection
2296 0 0 0 - IRQ 2/9
2297 0 0 1 - IRQ 5
2298 0 1 0 - IRQ 12
2299 0 1 1 - IRQ 15
2300 1 0 0 - Reserved
2301 1 0 1 - Reserved
2302 1 1 0 - Reserved
2303 1 1 1 - Reserved
2304
2305 Bits 2:1 - Reserved
2306 Bit 0 - Disable Boot ROM
2307 0 - memory accesses to 03FC30-03FFFFH utilize the internal Boot ROM
2308 1 - memory accesses to 03FC30-03FFFFH are directed to external
2309 storage.
2310
2311 */
2312
2313 /* configure hardware: IRQ, enable interrupts,
2314 plus external 9-pin MIDI interface selected
2315 */
2316
2317 outb (0x80 | 0x40 | bits, dev.data_port);
2318
2319 /* CONTROL REGISTER
2320
2321 0 Host Rx Interrupt Enable (1=Enabled) 0x1
2322 1 Unused 0x2
2323 2 Unused 0x4
2324 3 Unused 0x8
2325 4 Host Tx Interrupt Enable 0x10
2326 5 Mute (0=Mute; 1=Play) 0x20
2327 6 Master Interrupt Enable (1=Enabled) 0x40
2328 7 Master Reset (0=Reset; 1=Run) 0x80
2329
2330 Take us out of reset, mute output, master + TX + RX interrupts on.
2331
2332 We'll get an interrupt presumably to tell us that the TX
2333 register is clear.
2334 */
2335
2336 wavefront_should_cause_interrupt(0x80|0x40|0x10|0x1,
2337 dev.control_port,
2338 (reset_time*HZ)/100);
2339
2340 /* Note: data port is now the data port, not the h/w initialization
2341 port.
2342 */
2343
2344 if (!dev.irq_ok) {
2345 printk (KERN_WARNING LOGNAME
2346 "intr not received after h/w un-reset.\n");
2347 goto gone_bad;
2348 }
2349
2350 dev.interrupts_on = 1;
2351
2352 /* Note: data port is now the data port, not the h/w initialization
2353 port.
2354
2355 At this point, only "HW VERSION" or "DOWNLOAD OS" commands
2356 will work. So, issue one of them, and wait for TX
2357 interrupt. This can take a *long* time after a cold boot,
2358 while the ISC ROM does its RAM test. The SDK says up to 4
2359 seconds - with 12MB of RAM on a Tropez+, it takes a lot
2360 longer than that (~16secs). Note that the card understands
2361 the difference between a warm and a cold boot, so
2362 subsequent ISC2115 reboots (say, caused by module
2363 reloading) will get through this much faster.
2364
2365 XXX Interesting question: why is no RX interrupt received first ?
2366 */
2367
2368 wavefront_should_cause_interrupt(WFC_HARDWARE_VERSION,
2369 dev.data_port, ramcheck_time*HZ);
2370
2371 if (!dev.irq_ok) {
2372 printk (KERN_WARNING LOGNAME
2373 "post-RAM-check interrupt not received.\n");
2374 goto gone_bad;
2375 }
2376
2377 if (!wavefront_wait (STAT_CAN_READ)) {
2378 printk (KERN_WARNING LOGNAME
2379 "no response to HW version cmd.\n");
2380 goto gone_bad;
2381 }
2382
2383 if ((hwv[0] = wavefront_read ()) == -1) {
2384 printk (KERN_WARNING LOGNAME
2385 "board not responding correctly.\n");
2386 goto gone_bad;
2387 }
2388
2389 if (hwv[0] == 0xFF) { /* NAK */
2390
2391 /* Board's RAM test failed. Try to read error code,
2392 and tell us about it either way.
2393 */
2394
2395 if ((hwv[0] = wavefront_read ()) == -1) {
2396 printk (KERN_WARNING LOGNAME "on-board RAM test failed "
2397 "(bad error code).\n");
2398 } else {
2399 printk (KERN_WARNING LOGNAME "on-board RAM test failed "
2400 "(error code: 0x%x).\n",
2401 hwv[0]);
2402 }
2403 goto gone_bad;
2404 }
2405
2406 /* We're OK, just get the next byte of the HW version response */
2407
2408 if ((hwv[1] = wavefront_read ()) == -1) {
2409 printk (KERN_WARNING LOGNAME "incorrect h/w response.\n");
2410 goto gone_bad;
2411 }
2412
2413 printk (KERN_INFO LOGNAME "hardware version %d.%d\n",
2414 hwv[0], hwv[1]);
2415
2416 return 0;
2417
2418
2419 gone_bad:
2420 if (dev.irq >= 0) {
2421 free_irq (dev.irq, &dev);
2422 dev.irq = -1;
2423 }
2424 return (1);
2425}
2426
2427static int __init detect_wavefront (int irq, int io_base)
2428{
2429 unsigned char rbuf[4], wbuf[4];
2430
2431 /* TB docs say the device takes up 8 ports, but we know that
2432 if there is an FX device present (i.e. a Tropez+) it really
2433 consumes 16.
2434 */
2435
2436 if (!request_region (io_base, 16, "wavfront")) {
2437 printk (KERN_ERR LOGNAME "IO address range 0x%x - 0x%x "
2438 "already in use - ignored\n", dev.base,
2439 dev.base+15);
2440 return -1;
2441 }
2442
2443 dev.irq = irq;
2444 dev.base = io_base;
2445 dev.israw = 0;
2446 dev.debug = debug_default;
2447 dev.interrupts_on = 0;
2448 dev.irq_cnt = 0;
2449 dev.rom_samples_rdonly = 1; /* XXX default lock on ROM sample slots */
2450
2451 if (wavefront_cmd (WFC_FIRMWARE_VERSION, rbuf, wbuf) == 0) {
2452
2453 dev.fw_version[0] = rbuf[0];
2454 dev.fw_version[1] = rbuf[1];
2455 printk (KERN_INFO LOGNAME
2456 "firmware %d.%d already loaded.\n",
2457 rbuf[0], rbuf[1]);
2458
2459 /* check that a command actually works */
2460
2461 if (wavefront_cmd (WFC_HARDWARE_VERSION,
2462 rbuf, wbuf) == 0) {
2463 dev.hw_version[0] = rbuf[0];
2464 dev.hw_version[1] = rbuf[1];
2465 } else {
2466 printk (KERN_WARNING LOGNAME "not raw, but no "
2467 "hardware version!\n");
2468 release_region (io_base, 16);
2469 return 0;
2470 }
2471
2472 if (!wf_raw) {
2473 /* will re-acquire region in install_wavefront() */
2474 release_region (io_base, 16);
2475 return 1;
2476 } else {
2477 printk (KERN_INFO LOGNAME
2478 "reloading firmware anyway.\n");
2479 dev.israw = 1;
2480 }
2481
2482 } else {
2483
2484 dev.israw = 1;
2485 printk (KERN_INFO LOGNAME
2486 "no response to firmware probe, assume raw.\n");
2487
2488 }
2489
2490 init_waitqueue_head (&dev.interrupt_sleeper);
2491
2492 if (wavefront_hw_reset ()) {
2493 printk (KERN_WARNING LOGNAME "hardware reset failed\n");
2494 release_region (io_base, 16);
2495 return 0;
2496 }
2497
2498 /* Check for FX device, present only on Tropez+ */
2499
2500 dev.has_fx = (detect_wffx () == 0);
2501
2502 /* will re-acquire region in install_wavefront() */
2503 release_region (io_base, 16);
2504 return 1;
2505}
2506
2507#include "os.h"
2508#include <linux/fs.h>
2509#include <linux/mm.h>
2510#include <linux/slab.h>
2511#include <asm/uaccess.h>
2512
2513
2514static int
2515wavefront_download_firmware (char *path)
2516
2517{
2518 unsigned char section[WF_SECTION_MAX];
2519 char section_length; /* yes, just a char; max value is WF_SECTION_MAX */
2520 int section_cnt_downloaded = 0;
2521 int fd;
2522 int c;
2523 int i;
2524 mm_segment_t fs;
2525
2526 /* This tries to be a bit cleverer than the stuff Alan Cox did for
2527 the generic sound firmware, in that it actually knows
2528 something about the structure of the Motorola firmware. In
2529 particular, it uses a version that has been stripped of the
2530 20K of useless header information, and had section lengths
2531 added, making it possible to load the entire OS without any
2532 [kv]malloc() activity, since the longest entity we ever read is
2533 42 bytes (well, WF_SECTION_MAX) long.
2534 */
2535
2536 fs = get_fs();
2537 set_fs (get_ds());
2538
2539 if ((fd = sys_open (path, 0, 0)) < 0) {
2540 printk (KERN_WARNING LOGNAME "Unable to load \"%s\".\n",
2541 path);
2542 return 1;
2543 }
2544
2545 while (1) {
2546 int x;
2547
2548 if ((x = sys_read (fd, &section_length, sizeof (section_length))) !=
2549 sizeof (section_length)) {
2550 printk (KERN_ERR LOGNAME "firmware read error.\n");
2551 goto failure;
2552 }
2553
2554 if (section_length == 0) {
2555 break;
2556 }
2557
2558 if (sys_read (fd, section, section_length) != section_length) {
2559 printk (KERN_ERR LOGNAME "firmware section "
2560 "read error.\n");
2561 goto failure;
2562 }
2563
2564 /* Send command */
2565
2566 if (wavefront_write (WFC_DOWNLOAD_OS)) {
2567 goto failure;
2568 }
2569
2570 for (i = 0; i < section_length; i++) {
2571 if (wavefront_write (section[i])) {
2572 goto failure;
2573 }
2574 }
2575
2576 /* get ACK */
2577
2578 if (wavefront_wait (STAT_CAN_READ)) {
2579
2580 if ((c = inb (dev.data_port)) != WF_ACK) {
2581
2582 printk (KERN_ERR LOGNAME "download "
2583 "of section #%d not "
2584 "acknowledged, ack = 0x%x\n",
2585 section_cnt_downloaded + 1, c);
2586 goto failure;
2587
2588 }
2589
2590 } else {
2591 printk (KERN_ERR LOGNAME "time out for firmware ACK.\n");
2592 goto failure;
2593 }
2594
2595 }
2596
2597 sys_close (fd);
2598 set_fs (fs);
2599 return 0;
2600
2601 failure:
2602 sys_close (fd);
2603 set_fs (fs);
2604 printk (KERN_ERR "\nWaveFront: firmware download failed!!!\n");
2605 return 1;
2606}
2607
2608static int __init wavefront_config_midi (void)
2609{
2610 unsigned char rbuf[4], wbuf[4];
2611
2612 if (detect_wf_mpu (dev.irq, dev.base) < 0) {
2613 printk (KERN_WARNING LOGNAME
2614 "could not find working MIDI device\n");
2615 return -1;
2616 }
2617
2618 if ((dev.mididev = install_wf_mpu ()) < 0) {
2619 printk (KERN_WARNING LOGNAME
2620 "MIDI interfaces not configured\n");
2621 return -1;
2622 }
2623
2624 /* Route external MIDI to WaveFront synth (by default) */
2625
2626 if (wavefront_cmd (WFC_MISYNTH_ON, rbuf, wbuf)) {
2627 printk (KERN_WARNING LOGNAME
2628 "cannot enable MIDI-IN to synth routing.\n");
2629 /* XXX error ? */
2630 }
2631
2632
2633#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
2634 /* Get the regular MIDI patch loading function, so we can
2635 use it if we ever get handed a SYSEX patch. This is
2636 unlikely, because its so damn slow, but we may as well
2637 leave this functionality from maui.c behind, since it
2638 could be useful for sequencer applications that can
2639 only use MIDI to do patch loading.
2640 */
2641
2642 if (midi_devs[dev.mididev]->converter != NULL) {
2643 midi_load_patch = midi_devs[dev.mididev]->converter->load_patch;
2644 midi_devs[dev.mididev]->converter->load_patch =
2645 &wavefront_oss_load_patch;
2646 }
2647
2648#endif /* OSS_SUPPORT_SEQ */
2649
2650 /* Turn on Virtual MIDI, but first *always* turn it off,
2651 since otherwise consectutive reloads of the driver will
2652 never cause the hardware to generate the initial "internal" or
2653 "external" source bytes in the MIDI data stream. This
2654 is pretty important, since the internal hardware generally will
2655 be used to generate none or very little MIDI output, and
2656 thus the only source of MIDI data is actually external. Without
2657 the switch bytes, the driver will think it all comes from
2658 the internal interface. Duh.
2659 */
2660
2661 if (wavefront_cmd (WFC_VMIDI_OFF, rbuf, wbuf)) {
2662 printk (KERN_WARNING LOGNAME
2663 "virtual MIDI mode not disabled\n");
2664 return 0; /* We're OK, but missing the external MIDI dev */
2665 }
2666
2667 if ((dev.ext_mididev = virtual_midi_enable ()) < 0) {
2668 printk (KERN_WARNING LOGNAME "no virtual MIDI access.\n");
2669 } else {
2670 if (wavefront_cmd (WFC_VMIDI_ON, rbuf, wbuf)) {
2671 printk (KERN_WARNING LOGNAME
2672 "cannot enable virtual MIDI mode.\n");
2673 virtual_midi_disable ();
2674 }
2675 }
2676
2677 return 0;
2678}
2679
2680static int __init wavefront_do_reset (int atboot)
2681{
2682 char voices[1];
2683
2684 if (!atboot && wavefront_hw_reset ()) {
2685 printk (KERN_WARNING LOGNAME "hw reset failed.\n");
2686 goto gone_bad;
2687 }
2688
2689 if (dev.israw) {
2690 if (wavefront_download_firmware (ospath)) {
2691 goto gone_bad;
2692 }
2693
2694 dev.israw = 0;
2695
2696 /* Wait for the OS to get running. The protocol for
2697 this is non-obvious, and was determined by
2698 using port-IO tracing in DOSemu and some
2699 experimentation here.
2700
2701 Rather than using timed waits, use interrupts creatively.
2702 */
2703
2704 wavefront_should_cause_interrupt (WFC_NOOP,
2705 dev.data_port,
2706 (osrun_time*HZ));
2707
2708 if (!dev.irq_ok) {
2709 printk (KERN_WARNING LOGNAME
2710 "no post-OS interrupt.\n");
2711 goto gone_bad;
2712 }
2713
2714 /* Now, do it again ! */
2715
2716 wavefront_should_cause_interrupt (WFC_NOOP,
2717 dev.data_port, (10*HZ));
2718
2719 if (!dev.irq_ok) {
2720 printk (KERN_WARNING LOGNAME
2721 "no post-OS interrupt(2).\n");
2722 goto gone_bad;
2723 }
2724
2725 /* OK, no (RX/TX) interrupts any more, but leave mute
2726 in effect.
2727 */
2728
2729 outb (0x80|0x40, dev.control_port);
2730
2731 /* No need for the IRQ anymore */
2732
2733 free_irq (dev.irq, &dev);
2734
2735 }
2736
2737 if (dev.has_fx && fx_raw) {
2738 wffx_init ();
2739 }
2740
2741 /* SETUPSND.EXE asks for sample memory config here, but since i
2742 have no idea how to interpret the result, we'll forget
2743 about it.
2744 */
2745
2746 if ((dev.freemem = wavefront_freemem ()) < 0) {
2747 goto gone_bad;
2748 }
2749
2750 printk (KERN_INFO LOGNAME "available DRAM %dk\n", dev.freemem / 1024);
2751
2752 if (wavefront_write (0xf0) ||
2753 wavefront_write (1) ||
2754 (wavefront_read () < 0)) {
2755 dev.debug = 0;
2756 printk (KERN_WARNING LOGNAME "MPU emulation mode not set.\n");
2757 goto gone_bad;
2758 }
2759
2760 voices[0] = 32;
2761
2762 if (wavefront_cmd (WFC_SET_NVOICES, NULL, voices)) {
2763 printk (KERN_WARNING LOGNAME
2764 "cannot set number of voices to 32.\n");
2765 goto gone_bad;
2766 }
2767
2768
2769 return 0;
2770
2771 gone_bad:
2772 /* reset that sucker so that it doesn't bother us. */
2773
2774 outb (0x0, dev.control_port);
2775 dev.interrupts_on = 0;
2776 if (dev.irq >= 0) {
2777 free_irq (dev.irq, &dev);
2778 }
2779 return 1;
2780}
2781
2782static int __init wavefront_init (int atboot)
2783{
2784 int samples_are_from_rom;
2785
2786 if (dev.israw) {
2787 samples_are_from_rom = 1;
2788 } else {
2789 /* XXX is this always true ? */
2790 samples_are_from_rom = 0;
2791 }
2792
2793 if (dev.israw || fx_raw) {
2794 if (wavefront_do_reset (atboot)) {
2795 return -1;
2796 }
2797 }
2798
2799 wavefront_get_sample_status (samples_are_from_rom);
2800 wavefront_get_program_status ();
2801 wavefront_get_patch_status ();
2802
2803 /* Start normal operation: unreset, master interrupt enabled, no mute
2804 */
2805
2806 outb (0x80|0x40|0x20, dev.control_port);
2807
2808 return (0);
2809}
2810
2811static int __init install_wavefront (void)
2812{
2813 if (!request_region (dev.base+2, 6, "wavefront synth"))
2814 return -1;
2815
2816 if (dev.has_fx) {
2817 if (!request_region (dev.base+8, 8, "wavefront fx")) {
2818 release_region (dev.base+2, 6);
2819 return -1;
2820 }
2821 }
2822
2823 if ((dev.synth_dev = register_sound_synth (&wavefront_fops, -1)) < 0) {
2824 printk (KERN_ERR LOGNAME "cannot register raw synth\n");
2825 goto err_out;
2826 }
2827
2828#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
2829 if ((dev.oss_dev = sound_alloc_synthdev()) == -1) {
2830 printk (KERN_ERR LOGNAME "Too many sequencers\n");
2831 /* FIXME: leak: should unregister sound synth */
2832 goto err_out;
2833 } else {
2834 synth_devs[dev.oss_dev] = &wavefront_operations;
2835 }
2836#endif /* OSS_SUPPORT_SEQ */
2837
2838 if (wavefront_init (1) < 0) {
2839 printk (KERN_WARNING LOGNAME "initialization failed.\n");
2840
2841#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
2842 sound_unload_synthdev (dev.oss_dev);
2843#endif /* OSS_SUPPORT_SEQ */
2844
2845 goto err_out;
2846 }
2847
2848 if (wavefront_config_midi ()) {
2849 printk (KERN_WARNING LOGNAME "could not initialize MIDI.\n");
2850 }
2851
2852 return dev.oss_dev;
2853
2854err_out:
2855 release_region (dev.base+2, 6);
2856 if (dev.has_fx)
2857 release_region (dev.base+8, 8);
2858 return -1;
2859}
2860
2861static void __exit uninstall_wavefront (void)
2862{
2863 /* the first two i/o addresses are freed by the wf_mpu code */
2864 release_region (dev.base+2, 6);
2865
2866 if (dev.has_fx) {
2867 release_region (dev.base+8, 8);
2868 }
2869
2870 unregister_sound_synth (dev.synth_dev);
2871
2872#if OSS_SUPPORT_LEVEL & OSS_SUPPORT_SEQ
2873 sound_unload_synthdev (dev.oss_dev);
2874#endif /* OSS_SUPPORT_SEQ */
2875 uninstall_wf_mpu ();
2876}
2877
2878/***********************************************************************/
2879/* WaveFront FX control */
2880/***********************************************************************/
2881
2882#include "yss225.h"
2883
2884/* Control bits for the Load Control Register
2885 */
2886
2887#define FX_LSB_TRANSFER 0x01 /* transfer after DSP LSB byte written */
2888#define FX_MSB_TRANSFER 0x02 /* transfer after DSP MSB byte written */
2889#define FX_AUTO_INCR 0x04 /* auto-increment DSP address after transfer */
2890
2891static int
2892wffx_idle (void)
2893
2894{
2895 int i;
2896 unsigned int x = 0x80;
2897
2898 for (i = 0; i < 1000; i++) {
2899 x = inb (dev.fx_status);
2900 if ((x & 0x80) == 0) {
2901 break;
2902 }
2903 }
2904
2905 if (x & 0x80) {
2906 printk (KERN_ERR LOGNAME "FX device never idle.\n");
2907 return 0;
2908 }
2909
2910 return (1);
2911}
2912
2913int __init detect_wffx (void)
2914{
2915 /* This is a crude check, but its the best one I have for now.
2916 Certainly on the Maui and the Tropez, wffx_idle() will
2917 report "never idle", which suggests that this test should
2918 work OK.
2919 */
2920
2921 if (inb (dev.fx_status) & 0x80) {
2922 printk (KERN_INFO LOGNAME "Hmm, probably a Maui or Tropez.\n");
2923 return -1;
2924 }
2925
2926 return 0;
2927}
2928
2929static void
2930wffx_mute (int onoff)
2931
2932{
2933 if (!wffx_idle()) {
2934 return;
2935 }
2936
2937 outb (onoff ? 0x02 : 0x00, dev.fx_op);
2938}
2939
2940static int
2941wffx_memset (int page,
2942 int addr, int cnt, unsigned short *data)
2943{
2944 if (page < 0 || page > 7) {
2945 printk (KERN_ERR LOGNAME "FX memset: "
2946 "page must be >= 0 and <= 7\n");
2947 return -(EINVAL);
2948 }
2949
2950 if (addr < 0 || addr > 0x7f) {
2951 printk (KERN_ERR LOGNAME "FX memset: "
2952 "addr must be >= 0 and <= 7f\n");
2953 return -(EINVAL);
2954 }
2955
2956 if (cnt == 1) {
2957
2958 outb (FX_LSB_TRANSFER, dev.fx_lcr);
2959 outb (page, dev.fx_dsp_page);
2960 outb (addr, dev.fx_dsp_addr);
2961 outb ((data[0] >> 8), dev.fx_dsp_msb);
2962 outb ((data[0] & 0xff), dev.fx_dsp_lsb);
2963
2964 printk (KERN_INFO LOGNAME "FX: addr %d:%x set to 0x%x\n",
2965 page, addr, data[0]);
2966
2967 } else {
2968 int i;
2969
2970 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
2971 outb (page, dev.fx_dsp_page);
2972 outb (addr, dev.fx_dsp_addr);
2973
2974 for (i = 0; i < cnt; i++) {
2975 outb ((data[i] >> 8), dev.fx_dsp_msb);
2976 outb ((data[i] & 0xff), dev.fx_dsp_lsb);
2977 if (!wffx_idle ()) {
2978 break;
2979 }
2980 }
2981
2982 if (i != cnt) {
2983 printk (KERN_WARNING LOGNAME
2984 "FX memset "
2985 "(0x%x, 0x%x, %p, %d) incomplete\n",
2986 page, addr, data, cnt);
2987 return -(EIO);
2988 }
2989 }
2990
2991 return 0;
2992}
2993
2994static int
2995wffx_ioctl (wavefront_fx_info *r)
2996
2997{
2998 unsigned short page_data[256];
2999 unsigned short *pd;
3000
3001 switch (r->request) {
3002 case WFFX_MUTE:
3003 wffx_mute (r->data[0]);
3004 return 0;
3005
3006 case WFFX_MEMSET:
3007
3008 if (r->data[2] <= 0) {
3009 printk (KERN_ERR LOGNAME "cannot write "
3010 "<= 0 bytes to FX\n");
3011 return -(EINVAL);
3012 } else if (r->data[2] == 1) {
3013 pd = (unsigned short *) &r->data[3];
3014 } else {
3015 if (r->data[2] > sizeof (page_data)) {
3016 printk (KERN_ERR LOGNAME "cannot write "
3017 "> 255 bytes to FX\n");
3018 return -(EINVAL);
3019 }
3020 if (copy_from_user(page_data,
3021 (unsigned char __user *)r->data[3],
3022 r->data[2]))
3023 return -EFAULT;
3024 pd = page_data;
3025 }
3026
3027 return wffx_memset (r->data[0], /* page */
3028 r->data[1], /* addr */
3029 r->data[2], /* cnt */
3030 pd);
3031
3032 default:
3033 printk (KERN_WARNING LOGNAME
3034 "FX: ioctl %d not yet supported\n",
3035 r->request);
3036 return -(EINVAL);
3037 }
3038}
3039
3040/* YSS225 initialization.
3041
3042 This code was developed using DOSEMU. The Turtle Beach SETUPSND
3043 utility was run with I/O tracing in DOSEMU enabled, and a reconstruction
3044 of the port I/O done, using the Yamaha faxback document as a guide
3045 to add more logic to the code. Its really pretty weird.
3046
3047 There was an alternative approach of just dumping the whole I/O
3048 sequence as a series of port/value pairs and a simple loop
3049 that output it. However, I hope that eventually I'll get more
3050 control over what this code does, and so I tried to stick with
3051 a somewhat "algorithmic" approach.
3052*/
3053
3054static int __init wffx_init (void)
3055{
3056 int i;
3057 int j;
3058
3059 /* Set all bits for all channels on the MOD unit to zero */
3060 /* XXX But why do this twice ? */
3061
3062 for (j = 0; j < 2; j++) {
3063 for (i = 0x10; i <= 0xff; i++) {
3064
3065 if (!wffx_idle ()) {
3066 return (-1);
3067 }
3068
3069 outb (i, dev.fx_mod_addr);
3070 outb (0x0, dev.fx_mod_data);
3071 }
3072 }
3073
3074 if (!wffx_idle()) return (-1);
3075 outb (0x02, dev.fx_op); /* mute on */
3076
3077 if (!wffx_idle()) return (-1);
3078 outb (0x07, dev.fx_dsp_page);
3079 outb (0x44, dev.fx_dsp_addr);
3080 outb (0x00, dev.fx_dsp_msb);
3081 outb (0x00, dev.fx_dsp_lsb);
3082 if (!wffx_idle()) return (-1);
3083 outb (0x07, dev.fx_dsp_page);
3084 outb (0x42, dev.fx_dsp_addr);
3085 outb (0x00, dev.fx_dsp_msb);
3086 outb (0x00, dev.fx_dsp_lsb);
3087 if (!wffx_idle()) return (-1);
3088 outb (0x07, dev.fx_dsp_page);
3089 outb (0x43, dev.fx_dsp_addr);
3090 outb (0x00, dev.fx_dsp_msb);
3091 outb (0x00, dev.fx_dsp_lsb);
3092 if (!wffx_idle()) return (-1);
3093 outb (0x07, dev.fx_dsp_page);
3094 outb (0x7c, dev.fx_dsp_addr);
3095 outb (0x00, dev.fx_dsp_msb);
3096 outb (0x00, dev.fx_dsp_lsb);
3097 if (!wffx_idle()) return (-1);
3098 outb (0x07, dev.fx_dsp_page);
3099 outb (0x7e, dev.fx_dsp_addr);
3100 outb (0x00, dev.fx_dsp_msb);
3101 outb (0x00, dev.fx_dsp_lsb);
3102 if (!wffx_idle()) return (-1);
3103 outb (0x07, dev.fx_dsp_page);
3104 outb (0x46, dev.fx_dsp_addr);
3105 outb (0x00, dev.fx_dsp_msb);
3106 outb (0x00, dev.fx_dsp_lsb);
3107 if (!wffx_idle()) return (-1);
3108 outb (0x07, dev.fx_dsp_page);
3109 outb (0x49, dev.fx_dsp_addr);
3110 outb (0x00, dev.fx_dsp_msb);
3111 outb (0x00, dev.fx_dsp_lsb);
3112 if (!wffx_idle()) return (-1);
3113 outb (0x07, dev.fx_dsp_page);
3114 outb (0x47, dev.fx_dsp_addr);
3115 outb (0x00, dev.fx_dsp_msb);
3116 outb (0x00, dev.fx_dsp_lsb);
3117 if (!wffx_idle()) return (-1);
3118 outb (0x07, dev.fx_dsp_page);
3119 outb (0x4a, dev.fx_dsp_addr);
3120 outb (0x00, dev.fx_dsp_msb);
3121 outb (0x00, dev.fx_dsp_lsb);
3122
3123 /* either because of stupidity by TB's programmers, or because it
3124 actually does something, rezero the MOD page.
3125 */
3126 for (i = 0x10; i <= 0xff; i++) {
3127
3128 if (!wffx_idle ()) {
3129 return (-1);
3130 }
3131
3132 outb (i, dev.fx_mod_addr);
3133 outb (0x0, dev.fx_mod_data);
3134 }
3135 /* load page zero */
3136
3137 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3138 outb (0x00, dev.fx_dsp_page);
3139 outb (0x00, dev.fx_dsp_addr);
3140
3141 for (i = 0; i < sizeof (page_zero); i += 2) {
3142 outb (page_zero[i], dev.fx_dsp_msb);
3143 outb (page_zero[i+1], dev.fx_dsp_lsb);
3144 if (!wffx_idle()) return (-1);
3145 }
3146
3147 /* Now load page one */
3148
3149 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3150 outb (0x01, dev.fx_dsp_page);
3151 outb (0x00, dev.fx_dsp_addr);
3152
3153 for (i = 0; i < sizeof (page_one); i += 2) {
3154 outb (page_one[i], dev.fx_dsp_msb);
3155 outb (page_one[i+1], dev.fx_dsp_lsb);
3156 if (!wffx_idle()) return (-1);
3157 }
3158
3159 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3160 outb (0x02, dev.fx_dsp_page);
3161 outb (0x00, dev.fx_dsp_addr);
3162
3163 for (i = 0; i < sizeof (page_two); i++) {
3164 outb (page_two[i], dev.fx_dsp_lsb);
3165 if (!wffx_idle()) return (-1);
3166 }
3167
3168 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3169 outb (0x03, dev.fx_dsp_page);
3170 outb (0x00, dev.fx_dsp_addr);
3171
3172 for (i = 0; i < sizeof (page_three); i++) {
3173 outb (page_three[i], dev.fx_dsp_lsb);
3174 if (!wffx_idle()) return (-1);
3175 }
3176
3177 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3178 outb (0x04, dev.fx_dsp_page);
3179 outb (0x00, dev.fx_dsp_addr);
3180
3181 for (i = 0; i < sizeof (page_four); i++) {
3182 outb (page_four[i], dev.fx_dsp_lsb);
3183 if (!wffx_idle()) return (-1);
3184 }
3185
3186 /* Load memory area (page six) */
3187
3188 outb (FX_LSB_TRANSFER, dev.fx_lcr);
3189 outb (0x06, dev.fx_dsp_page);
3190
3191 for (i = 0; i < sizeof (page_six); i += 3) {
3192 outb (page_six[i], dev.fx_dsp_addr);
3193 outb (page_six[i+1], dev.fx_dsp_msb);
3194 outb (page_six[i+2], dev.fx_dsp_lsb);
3195 if (!wffx_idle()) return (-1);
3196 }
3197
3198 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3199 outb (0x07, dev.fx_dsp_page);
3200 outb (0x00, dev.fx_dsp_addr);
3201
3202 for (i = 0; i < sizeof (page_seven); i += 2) {
3203 outb (page_seven[i], dev.fx_dsp_msb);
3204 outb (page_seven[i+1], dev.fx_dsp_lsb);
3205 if (!wffx_idle()) return (-1);
3206 }
3207
3208 /* Now setup the MOD area. We do this algorithmically in order to
3209 save a little data space. It could be done in the same fashion
3210 as the "pages".
3211 */
3212
3213 for (i = 0x00; i <= 0x0f; i++) {
3214 outb (0x01, dev.fx_mod_addr);
3215 outb (i, dev.fx_mod_data);
3216 if (!wffx_idle()) return (-1);
3217 outb (0x02, dev.fx_mod_addr);
3218 outb (0x00, dev.fx_mod_data);
3219 if (!wffx_idle()) return (-1);
3220 }
3221
3222 for (i = 0xb0; i <= 0xbf; i++) {
3223 outb (i, dev.fx_mod_addr);
3224 outb (0x20, dev.fx_mod_data);
3225 if (!wffx_idle()) return (-1);
3226 }
3227
3228 for (i = 0xf0; i <= 0xff; i++) {
3229 outb (i, dev.fx_mod_addr);
3230 outb (0x20, dev.fx_mod_data);
3231 if (!wffx_idle()) return (-1);
3232 }
3233
3234 for (i = 0x10; i <= 0x1d; i++) {
3235 outb (i, dev.fx_mod_addr);
3236 outb (0xff, dev.fx_mod_data);
3237 if (!wffx_idle()) return (-1);
3238 }
3239
3240 outb (0x1e, dev.fx_mod_addr);
3241 outb (0x40, dev.fx_mod_data);
3242 if (!wffx_idle()) return (-1);
3243
3244 for (i = 0x1f; i <= 0x2d; i++) {
3245 outb (i, dev.fx_mod_addr);
3246 outb (0xff, dev.fx_mod_data);
3247 if (!wffx_idle()) return (-1);
3248 }
3249
3250 outb (0x2e, dev.fx_mod_addr);
3251 outb (0x00, dev.fx_mod_data);
3252 if (!wffx_idle()) return (-1);
3253
3254 for (i = 0x2f; i <= 0x3e; i++) {
3255 outb (i, dev.fx_mod_addr);
3256 outb (0x00, dev.fx_mod_data);
3257 if (!wffx_idle()) return (-1);
3258 }
3259
3260 outb (0x3f, dev.fx_mod_addr);
3261 outb (0x20, dev.fx_mod_data);
3262 if (!wffx_idle()) return (-1);
3263
3264 for (i = 0x40; i <= 0x4d; i++) {
3265 outb (i, dev.fx_mod_addr);
3266 outb (0x00, dev.fx_mod_data);
3267 if (!wffx_idle()) return (-1);
3268 }
3269
3270 outb (0x4e, dev.fx_mod_addr);
3271 outb (0x0e, dev.fx_mod_data);
3272 if (!wffx_idle()) return (-1);
3273 outb (0x4f, dev.fx_mod_addr);
3274 outb (0x0e, dev.fx_mod_data);
3275 if (!wffx_idle()) return (-1);
3276
3277
3278 for (i = 0x50; i <= 0x6b; i++) {
3279 outb (i, dev.fx_mod_addr);
3280 outb (0x00, dev.fx_mod_data);
3281 if (!wffx_idle()) return (-1);
3282 }
3283
3284 outb (0x6c, dev.fx_mod_addr);
3285 outb (0x40, dev.fx_mod_data);
3286 if (!wffx_idle()) return (-1);
3287
3288 outb (0x6d, dev.fx_mod_addr);
3289 outb (0x00, dev.fx_mod_data);
3290 if (!wffx_idle()) return (-1);
3291
3292 outb (0x6e, dev.fx_mod_addr);
3293 outb (0x40, dev.fx_mod_data);
3294 if (!wffx_idle()) return (-1);
3295
3296 outb (0x6f, dev.fx_mod_addr);
3297 outb (0x40, dev.fx_mod_data);
3298 if (!wffx_idle()) return (-1);
3299
3300 for (i = 0x70; i <= 0x7f; i++) {
3301 outb (i, dev.fx_mod_addr);
3302 outb (0xc0, dev.fx_mod_data);
3303 if (!wffx_idle()) return (-1);
3304 }
3305
3306 for (i = 0x80; i <= 0xaf; i++) {
3307 outb (i, dev.fx_mod_addr);
3308 outb (0x00, dev.fx_mod_data);
3309 if (!wffx_idle()) return (-1);
3310 }
3311
3312 for (i = 0xc0; i <= 0xdd; i++) {
3313 outb (i, dev.fx_mod_addr);
3314 outb (0x00, dev.fx_mod_data);
3315 if (!wffx_idle()) return (-1);
3316 }
3317
3318 outb (0xde, dev.fx_mod_addr);
3319 outb (0x10, dev.fx_mod_data);
3320 if (!wffx_idle()) return (-1);
3321 outb (0xdf, dev.fx_mod_addr);
3322 outb (0x10, dev.fx_mod_data);
3323 if (!wffx_idle()) return (-1);
3324
3325 for (i = 0xe0; i <= 0xef; i++) {
3326 outb (i, dev.fx_mod_addr);
3327 outb (0x00, dev.fx_mod_data);
3328 if (!wffx_idle()) return (-1);
3329 }
3330
3331 for (i = 0x00; i <= 0x0f; i++) {
3332 outb (0x01, dev.fx_mod_addr);
3333 outb (i, dev.fx_mod_data);
3334 outb (0x02, dev.fx_mod_addr);
3335 outb (0x01, dev.fx_mod_data);
3336 if (!wffx_idle()) return (-1);
3337 }
3338
3339 outb (0x02, dev.fx_op); /* mute on */
3340
3341 /* Now set the coefficients and so forth for the programs above */
3342
3343 for (i = 0; i < sizeof (coefficients); i += 4) {
3344 outb (coefficients[i], dev.fx_dsp_page);
3345 outb (coefficients[i+1], dev.fx_dsp_addr);
3346 outb (coefficients[i+2], dev.fx_dsp_msb);
3347 outb (coefficients[i+3], dev.fx_dsp_lsb);
3348 if (!wffx_idle()) return (-1);
3349 }
3350
3351 /* Some settings (?) that are too small to bundle into loops */
3352
3353 if (!wffx_idle()) return (-1);
3354 outb (0x1e, dev.fx_mod_addr);
3355 outb (0x14, dev.fx_mod_data);
3356 if (!wffx_idle()) return (-1);
3357 outb (0xde, dev.fx_mod_addr);
3358 outb (0x20, dev.fx_mod_data);
3359 if (!wffx_idle()) return (-1);
3360 outb (0xdf, dev.fx_mod_addr);
3361 outb (0x20, dev.fx_mod_data);
3362
3363 /* some more coefficients */
3364
3365 if (!wffx_idle()) return (-1);
3366 outb (0x06, dev.fx_dsp_page);
3367 outb (0x78, dev.fx_dsp_addr);
3368 outb (0x00, dev.fx_dsp_msb);
3369 outb (0x40, dev.fx_dsp_lsb);
3370 if (!wffx_idle()) return (-1);
3371 outb (0x07, dev.fx_dsp_page);
3372 outb (0x03, dev.fx_dsp_addr);
3373 outb (0x0f, dev.fx_dsp_msb);
3374 outb (0xff, dev.fx_dsp_lsb);
3375 if (!wffx_idle()) return (-1);
3376 outb (0x07, dev.fx_dsp_page);
3377 outb (0x0b, dev.fx_dsp_addr);
3378 outb (0x0f, dev.fx_dsp_msb);
3379 outb (0xff, dev.fx_dsp_lsb);
3380 if (!wffx_idle()) return (-1);
3381 outb (0x07, dev.fx_dsp_page);
3382 outb (0x02, dev.fx_dsp_addr);
3383 outb (0x00, dev.fx_dsp_msb);
3384 outb (0x00, dev.fx_dsp_lsb);
3385 if (!wffx_idle()) return (-1);
3386 outb (0x07, dev.fx_dsp_page);
3387 outb (0x0a, dev.fx_dsp_addr);
3388 outb (0x00, dev.fx_dsp_msb);
3389 outb (0x00, dev.fx_dsp_lsb);
3390 if (!wffx_idle()) return (-1);
3391 outb (0x07, dev.fx_dsp_page);
3392 outb (0x46, dev.fx_dsp_addr);
3393 outb (0x00, dev.fx_dsp_msb);
3394 outb (0x00, dev.fx_dsp_lsb);
3395 if (!wffx_idle()) return (-1);
3396 outb (0x07, dev.fx_dsp_page);
3397 outb (0x49, dev.fx_dsp_addr);
3398 outb (0x00, dev.fx_dsp_msb);
3399 outb (0x00, dev.fx_dsp_lsb);
3400
3401 /* Now, for some strange reason, lets reload every page
3402 and all the coefficients over again. I have *NO* idea
3403 why this is done. I do know that no sound is produced
3404 is this phase is omitted.
3405 */
3406
3407 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3408 outb (0x00, dev.fx_dsp_page);
3409 outb (0x10, dev.fx_dsp_addr);
3410
3411 for (i = 0; i < sizeof (page_zero_v2); i += 2) {
3412 outb (page_zero_v2[i], dev.fx_dsp_msb);
3413 outb (page_zero_v2[i+1], dev.fx_dsp_lsb);
3414 if (!wffx_idle()) return (-1);
3415 }
3416
3417 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3418 outb (0x01, dev.fx_dsp_page);
3419 outb (0x10, dev.fx_dsp_addr);
3420
3421 for (i = 0; i < sizeof (page_one_v2); i += 2) {
3422 outb (page_one_v2[i], dev.fx_dsp_msb);
3423 outb (page_one_v2[i+1], dev.fx_dsp_lsb);
3424 if (!wffx_idle()) return (-1);
3425 }
3426
3427 if (!wffx_idle()) return (-1);
3428 if (!wffx_idle()) return (-1);
3429
3430 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3431 outb (0x02, dev.fx_dsp_page);
3432 outb (0x10, dev.fx_dsp_addr);
3433
3434 for (i = 0; i < sizeof (page_two_v2); i++) {
3435 outb (page_two_v2[i], dev.fx_dsp_lsb);
3436 if (!wffx_idle()) return (-1);
3437 }
3438 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3439 outb (0x03, dev.fx_dsp_page);
3440 outb (0x10, dev.fx_dsp_addr);
3441
3442 for (i = 0; i < sizeof (page_three_v2); i++) {
3443 outb (page_three_v2[i], dev.fx_dsp_lsb);
3444 if (!wffx_idle()) return (-1);
3445 }
3446
3447 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3448 outb (0x04, dev.fx_dsp_page);
3449 outb (0x10, dev.fx_dsp_addr);
3450
3451 for (i = 0; i < sizeof (page_four_v2); i++) {
3452 outb (page_four_v2[i], dev.fx_dsp_lsb);
3453 if (!wffx_idle()) return (-1);
3454 }
3455
3456 outb (FX_LSB_TRANSFER, dev.fx_lcr);
3457 outb (0x06, dev.fx_dsp_page);
3458
3459 /* Page six v.2 is algorithmic */
3460
3461 for (i = 0x10; i <= 0x3e; i += 2) {
3462 outb (i, dev.fx_dsp_addr);
3463 outb (0x00, dev.fx_dsp_msb);
3464 outb (0x00, dev.fx_dsp_lsb);
3465 if (!wffx_idle()) return (-1);
3466 }
3467
3468 outb (FX_AUTO_INCR|FX_LSB_TRANSFER, dev.fx_lcr);
3469 outb (0x07, dev.fx_dsp_page);
3470 outb (0x10, dev.fx_dsp_addr);
3471
3472 for (i = 0; i < sizeof (page_seven_v2); i += 2) {
3473 outb (page_seven_v2[i], dev.fx_dsp_msb);
3474 outb (page_seven_v2[i+1], dev.fx_dsp_lsb);
3475 if (!wffx_idle()) return (-1);
3476 }
3477
3478 for (i = 0x00; i < sizeof(mod_v2); i += 2) {
3479 outb (mod_v2[i], dev.fx_mod_addr);
3480 outb (mod_v2[i+1], dev.fx_mod_data);
3481 if (!wffx_idle()) return (-1);
3482 }
3483
3484 for (i = 0; i < sizeof (coefficients2); i += 4) {
3485 outb (coefficients2[i], dev.fx_dsp_page);
3486 outb (coefficients2[i+1], dev.fx_dsp_addr);
3487 outb (coefficients2[i+2], dev.fx_dsp_msb);
3488 outb (coefficients2[i+3], dev.fx_dsp_lsb);
3489 if (!wffx_idle()) return (-1);
3490 }
3491
3492 for (i = 0; i < sizeof (coefficients3); i += 2) {
3493 int x;
3494
3495 outb (0x07, dev.fx_dsp_page);
3496 x = (i % 4) ? 0x4e : 0x4c;
3497 outb (x, dev.fx_dsp_addr);
3498 outb (coefficients3[i], dev.fx_dsp_msb);
3499 outb (coefficients3[i+1], dev.fx_dsp_lsb);
3500 }
3501
3502 outb (0x00, dev.fx_op); /* mute off */
3503 if (!wffx_idle()) return (-1);
3504
3505 return (0);
3506}
3507
3508static int io = -1;
3509static int irq = -1;
3510
3511MODULE_AUTHOR ("Paul Barton-Davis <pbd@op.net>");
3512MODULE_DESCRIPTION ("Turtle Beach WaveFront Linux Driver");
3513MODULE_LICENSE("GPL");
3514module_param (io, int, 0);
3515module_param (irq, int, 0);
3516
3517static int __init init_wavfront (void)
3518{
3519 printk ("Turtle Beach WaveFront Driver\n"
3520 "Copyright (C) by Hannu Solvainen, "
3521 "Paul Barton-Davis 1993-1998.\n");
3522
3523 /* XXX t'would be lovely to ask the CS4232 for these values, eh ? */
3524
3525 if (io == -1 || irq == -1) {
3526 printk (KERN_INFO LOGNAME "irq and io options must be set.\n");
3527 return -EINVAL;
3528 }
3529
3530 if (wavefront_interrupt_bits (irq) < 0) {
3531 printk (KERN_INFO LOGNAME
3532 "IRQ must be 9, 5, 12 or 15 (not %d)\n", irq);
3533 return -ENODEV;
3534 }
3535
3536 if (detect_wavefront (irq, io) < 0) {
3537 return -ENODEV;
3538 }
3539
3540 if (install_wavefront () < 0) {
3541 return -EIO;
3542 }
3543
3544 return 0;
3545}
3546
3547static void __exit cleanup_wavfront (void)
3548{
3549 uninstall_wavefront ();
3550}
3551
3552module_init(init_wavfront);
3553module_exit(cleanup_wavfront);
diff --git a/sound/oss/wf_midi.c b/sound/oss/wf_midi.c
deleted file mode 100644
index 75c0c143a759..000000000000
--- a/sound/oss/wf_midi.c
+++ /dev/null
@@ -1,880 +0,0 @@
1/*
2 * sound/oss/wf_midi.c
3 *
4 * The low level driver for the WaveFront ICS2115 MIDI interface(s)
5 * Note that there is also an MPU-401 emulation (actually, a UART-401
6 * emulation) on the CS4232 on the Tropez Plus. This code has nothing
7 * to do with that interface at all.
8 *
9 * The interface is essentially just a UART-401, but is has the
10 * interesting property of supporting what Turtle Beach called
11 * "Virtual MIDI" mode. In this mode, there are effectively *two*
12 * MIDI buses accessible via the interface, one that is routed
13 * solely to/from the external WaveFront synthesizer and the other
14 * corresponding to the pin/socket connector used to link external
15 * MIDI devices to the board.
16 *
17 * This driver fully supports this mode, allowing two distinct
18 * midi devices (/dev/midiNN and /dev/midiNN+1) to be used
19 * completely independently, giving 32 channels of MIDI routing,
20 * 16 to the WaveFront synth and 16 to the external MIDI bus.
21 *
22 * Switching between the two is accomplished externally by the driver
23 * using the two otherwise unused MIDI bytes. See the code for more details.
24 *
25 * NOTE: VIRTUAL MIDI MODE IS ON BY DEFAULT (see wavefront.c)
26 *
27 * The main reason to turn off Virtual MIDI mode is when you want to
28 * tightly couple the WaveFront synth with an external MIDI
29 * device. You won't be able to distinguish the source of any MIDI
30 * data except via SysEx ID, but thats probably OK, since for the most
31 * part, the WaveFront won't be sending any MIDI data at all.
32 *
33 * The main reason to turn on Virtual MIDI Mode is to provide two
34 * completely independent 16-channel MIDI buses, one to the
35 * WaveFront and one to any external MIDI devices. Given the 32
36 * voice nature of the WaveFront, its pretty easy to find a use
37 * for all 16 channels driving just that synth.
38 *
39 */
40
41/*
42 * Copyright (C) by Paul Barton-Davis 1998
43 * Some portions of this file are derived from work that is:
44 *
45 * CopyriGht (C) by Hannu Savolainen 1993-1996
46 *
47 * USS/Lite for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
48 * Version 2 (June 1991). See the "COPYING" file distributed with this software
49 * for more info.
50 */
51
52#include <linux/init.h>
53#include <linux/interrupt.h>
54#include <linux/spinlock.h>
55#include "sound_config.h"
56
57#include <linux/wavefront.h>
58
59#ifdef MODULE
60
61struct wf_mpu_config {
62 int base;
63#define DATAPORT(d) (d)->base
64#define COMDPORT(d) (d)->base+1
65#define STATPORT(d) (d)->base+1
66
67 int irq;
68 int opened;
69 int devno;
70 int synthno;
71 int mode;
72#define MODE_MIDI 1
73#define MODE_SYNTH 2
74
75 void (*inputintr) (int dev, unsigned char data);
76 char isvirtual; /* do virtual I/O stuff */
77};
78
79static struct wf_mpu_config devs[2];
80static struct wf_mpu_config *phys_dev = &devs[0];
81static struct wf_mpu_config *virt_dev = &devs[1];
82
83static void start_uart_mode (void);
84static DEFINE_SPINLOCK(lock);
85
86#define OUTPUT_READY 0x40
87#define INPUT_AVAIL 0x80
88#define MPU_ACK 0xFE
89#define UART_MODE_ON 0x3F
90
91static inline int wf_mpu_status (void)
92{
93 return inb (STATPORT (phys_dev));
94}
95
96static inline int input_avail (void)
97{
98 return !(wf_mpu_status() & INPUT_AVAIL);
99}
100
101static inline int output_ready (void)
102{
103 return !(wf_mpu_status() & OUTPUT_READY);
104}
105
106static inline int read_data (void)
107{
108 return inb (DATAPORT (phys_dev));
109}
110
111static inline void write_data (unsigned char byte)
112{
113 outb (byte, DATAPORT (phys_dev));
114}
115
116/*
117 * States for the input scanner (should be in dev_table.h)
118 */
119
120#define MST_SYSMSG 100 /* System message (sysx etc). */
121#define MST_MTC 102 /* Midi Time Code (MTC) qframe msg */
122#define MST_SONGSEL 103 /* Song select */
123#define MST_SONGPOS 104 /* Song position pointer */
124#define MST_TIMED 105 /* Leading timing byte rcvd */
125
126/* buffer space check for input scanner */
127
128#define BUFTEST(mi) if (mi->m_ptr >= MI_MAX || mi->m_ptr < 0) \
129{printk(KERN_ERR "WF-MPU: Invalid buffer pointer %d/%d, s=%d\n", \
130 mi->m_ptr, mi->m_left, mi->m_state);mi->m_ptr--;}
131
132static unsigned char len_tab[] = /* # of data bytes following a status
133 */
134{
135 2, /* 8x */
136 2, /* 9x */
137 2, /* Ax */
138 2, /* Bx */
139 1, /* Cx */
140 1, /* Dx */
141 2, /* Ex */
142 0 /* Fx */
143};
144
145static int
146wf_mpu_input_scanner (int devno, int synthdev, unsigned char midic)
147
148{
149 struct midi_input_info *mi = &midi_devs[devno]->in_info;
150
151 switch (mi->m_state) {
152 case MST_INIT:
153 switch (midic) {
154 case 0xf8:
155 /* Timer overflow */
156 break;
157
158 case 0xfc:
159 break;
160
161 case 0xfd:
162 /* XXX do something useful with this. If there is
163 an external MIDI timer (e.g. a hardware sequencer,
164 a useful timer can be derived ...
165
166 For now, no timer support.
167 */
168 break;
169
170 case 0xfe:
171 return MPU_ACK;
172 break;
173
174 case 0xf0:
175 case 0xf1:
176 case 0xf2:
177 case 0xf3:
178 case 0xf4:
179 case 0xf5:
180 case 0xf6:
181 case 0xf7:
182 break;
183
184 case 0xf9:
185 break;
186
187 case 0xff:
188 mi->m_state = MST_SYSMSG;
189 break;
190
191 default:
192 if (midic <= 0xef) {
193 mi->m_state = MST_TIMED;
194 }
195 else
196 printk (KERN_ERR "<MPU: Unknown event %02x> ",
197 midic);
198 }
199 break;
200
201 case MST_TIMED:
202 {
203 int msg = ((int) (midic & 0xf0) >> 4);
204
205 mi->m_state = MST_DATA;
206
207 if (msg < 8) { /* Data byte */
208
209 msg = ((int) (mi->m_prev_status & 0xf0) >> 4);
210 msg -= 8;
211 mi->m_left = len_tab[msg] - 1;
212
213 mi->m_ptr = 2;
214 mi->m_buf[0] = mi->m_prev_status;
215 mi->m_buf[1] = midic;
216
217 if (mi->m_left <= 0) {
218 mi->m_state = MST_INIT;
219 do_midi_msg (synthdev, mi->m_buf, mi->m_ptr);
220 mi->m_ptr = 0;
221 }
222 } else if (msg == 0xf) { /* MPU MARK */
223
224 mi->m_state = MST_INIT;
225
226 switch (midic) {
227 case 0xf8:
228 break;
229
230 case 0xf9:
231 break;
232
233 case 0xfc:
234 break;
235
236 default:
237 break;
238 }
239 } else {
240 mi->m_prev_status = midic;
241 msg -= 8;
242 mi->m_left = len_tab[msg];
243
244 mi->m_ptr = 1;
245 mi->m_buf[0] = midic;
246
247 if (mi->m_left <= 0) {
248 mi->m_state = MST_INIT;
249 do_midi_msg (synthdev, mi->m_buf, mi->m_ptr);
250 mi->m_ptr = 0;
251 }
252 }
253 }
254 break;
255
256 case MST_SYSMSG:
257 switch (midic) {
258 case 0xf0:
259 mi->m_state = MST_SYSEX;
260 break;
261
262 case 0xf1:
263 mi->m_state = MST_MTC;
264 break;
265
266 case 0xf2:
267 mi->m_state = MST_SONGPOS;
268 mi->m_ptr = 0;
269 break;
270
271 case 0xf3:
272 mi->m_state = MST_SONGSEL;
273 break;
274
275 case 0xf6:
276 mi->m_state = MST_INIT;
277
278 /*
279 * Real time messages
280 */
281 case 0xf8:
282 /* midi clock */
283 mi->m_state = MST_INIT;
284 /* XXX need ext MIDI timer support */
285 break;
286
287 case 0xfA:
288 mi->m_state = MST_INIT;
289 /* XXX need ext MIDI timer support */
290 break;
291
292 case 0xFB:
293 mi->m_state = MST_INIT;
294 /* XXX need ext MIDI timer support */
295 break;
296
297 case 0xFC:
298 mi->m_state = MST_INIT;
299 /* XXX need ext MIDI timer support */
300 break;
301
302 case 0xFE:
303 /* active sensing */
304 mi->m_state = MST_INIT;
305 break;
306
307 case 0xff:
308 mi->m_state = MST_INIT;
309 break;
310
311 default:
312 printk (KERN_ERR "unknown MIDI sysmsg %0x\n", midic);
313 mi->m_state = MST_INIT;
314 }
315 break;
316
317 case MST_MTC:
318 mi->m_state = MST_INIT;
319 break;
320
321 case MST_SYSEX:
322 if (midic == 0xf7) {
323 mi->m_state = MST_INIT;
324 } else {
325 /* XXX fix me */
326 }
327 break;
328
329 case MST_SONGPOS:
330 BUFTEST (mi);
331 mi->m_buf[mi->m_ptr++] = midic;
332 if (mi->m_ptr == 2) {
333 mi->m_state = MST_INIT;
334 mi->m_ptr = 0;
335 /* XXX need ext MIDI timer support */
336 }
337 break;
338
339 case MST_DATA:
340 BUFTEST (mi);
341 mi->m_buf[mi->m_ptr++] = midic;
342 if ((--mi->m_left) <= 0) {
343 mi->m_state = MST_INIT;
344 do_midi_msg (synthdev, mi->m_buf, mi->m_ptr);
345 mi->m_ptr = 0;
346 }
347 break;
348
349 default:
350 printk (KERN_ERR "Bad state %d ", mi->m_state);
351 mi->m_state = MST_INIT;
352 }
353
354 return 1;
355}
356
357static irqreturn_t
358wf_mpuintr(int irq, void *dev_id, struct pt_regs *dummy)
359
360{
361 struct wf_mpu_config *physical_dev = dev_id;
362 static struct wf_mpu_config *input_dev;
363 struct midi_input_info *mi = &midi_devs[physical_dev->devno]->in_info;
364 int n;
365
366 if (!input_avail()) { /* not for us */
367 return IRQ_NONE;
368 }
369
370 if (mi->m_busy)
371 return IRQ_HANDLED;
372 spin_lock(&lock);
373 mi->m_busy = 1;
374
375 if (!input_dev) {
376 input_dev = physical_dev;
377 }
378
379 n = 50; /* XXX why ? */
380
381 do {
382 unsigned char c = read_data ();
383
384 if (phys_dev->isvirtual) {
385
386 if (c == WF_EXTERNAL_SWITCH) {
387 input_dev = virt_dev;
388 continue;
389 } else if (c == WF_INTERNAL_SWITCH) {
390 input_dev = phys_dev;
391 continue;
392 } /* else just leave it as it is */
393
394 } else {
395 input_dev = phys_dev;
396 }
397
398 if (input_dev->mode == MODE_SYNTH) {
399
400 wf_mpu_input_scanner (input_dev->devno,
401 input_dev->synthno, c);
402
403 } else if (input_dev->opened & OPEN_READ) {
404
405 if (input_dev->inputintr) {
406 input_dev->inputintr (input_dev->devno, c);
407 }
408 }
409
410 } while (input_avail() && n-- > 0);
411
412 mi->m_busy = 0;
413 spin_unlock(&lock);
414 return IRQ_HANDLED;
415}
416
417static int
418wf_mpu_open (int dev, int mode,
419 void (*input) (int dev, unsigned char data),
420 void (*output) (int dev)
421 )
422{
423 struct wf_mpu_config *devc;
424
425 if (dev < 0 || dev >= num_midis || midi_devs[dev]==NULL)
426 return -(ENXIO);
427
428 if (phys_dev->devno == dev) {
429 devc = phys_dev;
430 } else if (phys_dev->isvirtual && virt_dev->devno == dev) {
431 devc = virt_dev;
432 } else {
433 printk (KERN_ERR "WF-MPU: unknown device number %d\n", dev);
434 return -(EINVAL);
435 }
436
437 if (devc->opened) {
438 return -(EBUSY);
439 }
440
441 devc->mode = MODE_MIDI;
442 devc->opened = mode;
443 devc->synthno = 0;
444
445 devc->inputintr = input;
446 return 0;
447}
448
449static void
450wf_mpu_close (int dev)
451{
452 struct wf_mpu_config *devc;
453
454 if (dev < 0 || dev >= num_midis || midi_devs[dev]==NULL)
455 return;
456
457 if (phys_dev->devno == dev) {
458 devc = phys_dev;
459 } else if (phys_dev->isvirtual && virt_dev->devno == dev) {
460 devc = virt_dev;
461 } else {
462 printk (KERN_ERR "WF-MPU: unknown device number %d\n", dev);
463 return;
464 }
465
466 devc->mode = 0;
467 devc->inputintr = NULL;
468 devc->opened = 0;
469}
470
471static int
472wf_mpu_out (int dev, unsigned char midi_byte)
473{
474 int timeout;
475 unsigned long flags;
476 static int lastoutdev = -1;
477 unsigned char switchch;
478
479 if (phys_dev->isvirtual && lastoutdev != dev) {
480
481 if (dev == phys_dev->devno) {
482 switchch = WF_INTERNAL_SWITCH;
483 } else if (dev == virt_dev->devno) {
484 switchch = WF_EXTERNAL_SWITCH;
485 } else {
486 printk (KERN_ERR "WF-MPU: bad device number %d", dev);
487 return (0);
488 }
489
490 /* XXX fix me */
491
492 for (timeout = 30000; timeout > 0 && !output_ready ();
493 timeout--);
494
495 spin_lock_irqsave(&lock,flags);
496
497 if (!output_ready ()) {
498 printk (KERN_WARNING "WF-MPU: Send switch "
499 "byte timeout\n");
500 spin_unlock_irqrestore(&lock,flags);
501 return 0;
502 }
503
504 write_data (switchch);
505 spin_unlock_irqrestore(&lock,flags);
506 }
507
508 lastoutdev = dev;
509
510 /*
511 * Sometimes it takes about 30000 loops before the output becomes ready
512 * (After reset). Normally it takes just about 10 loops.
513 */
514
515 /* XXX fix me */
516
517 for (timeout = 30000; timeout > 0 && !output_ready (); timeout--);
518
519 spin_lock_irqsave(&lock,flags);
520 if (!output_ready ()) {
521 spin_unlock_irqrestore(&lock,flags);
522 printk (KERN_WARNING "WF-MPU: Send data timeout\n");
523 return 0;
524 }
525
526 write_data (midi_byte);
527 spin_unlock_irqrestore(&lock,flags);
528
529 return 1;
530}
531
532static inline int wf_mpu_start_read (int dev) {
533 return 0;
534}
535
536static inline int wf_mpu_end_read (int dev) {
537 return 0;
538}
539
540static int wf_mpu_ioctl (int dev, unsigned cmd, void __user *arg)
541{
542 printk (KERN_WARNING
543 "WF-MPU: Intelligent mode not supported by hardware.\n");
544 return -(EINVAL);
545}
546
547static int wf_mpu_buffer_status (int dev)
548{
549 return 0;
550}
551
552static struct synth_operations wf_mpu_synth_operations[2];
553static struct midi_operations wf_mpu_midi_operations[2];
554
555static struct midi_operations wf_mpu_midi_proto =
556{
557 .owner = THIS_MODULE,
558 .info = {"WF-MPU MIDI", 0, MIDI_CAP_MPU401, SNDCARD_MPU401},
559 .in_info = {0}, /* in_info */
560 .open = wf_mpu_open,
561 .close = wf_mpu_close,
562 .ioctl = wf_mpu_ioctl,
563 .outputc = wf_mpu_out,
564 .start_read = wf_mpu_start_read,
565 .end_read = wf_mpu_end_read,
566 .buffer_status = wf_mpu_buffer_status,
567};
568
569static struct synth_info wf_mpu_synth_info_proto =
570{"WaveFront MPU-401 interface", 0,
571 SYNTH_TYPE_MIDI, MIDI_TYPE_MPU401, 0, 128, 0, 128, SYNTH_CAP_INPUT};
572
573static struct synth_info wf_mpu_synth_info[2];
574
575static int
576wf_mpu_synth_ioctl (int dev, unsigned int cmd, void __user *arg)
577{
578 int midi_dev;
579 int index;
580
581 midi_dev = synth_devs[dev]->midi_dev;
582
583 if (midi_dev < 0 || midi_dev > num_midis || midi_devs[midi_dev]==NULL)
584 return -(ENXIO);
585
586 if (midi_dev == phys_dev->devno) {
587 index = 0;
588 } else if (phys_dev->isvirtual && midi_dev == virt_dev->devno) {
589 index = 1;
590 } else {
591 return -(EINVAL);
592 }
593
594 switch (cmd) {
595
596 case SNDCTL_SYNTH_INFO:
597 if (copy_to_user(arg,
598 &wf_mpu_synth_info[index],
599 sizeof (struct synth_info)))
600 return -EFAULT;
601 return 0;
602
603 case SNDCTL_SYNTH_MEMAVL:
604 return 0x7fffffff;
605
606 default:
607 return -EINVAL;
608 }
609}
610
611static int
612wf_mpu_synth_open (int dev, int mode)
613{
614 int midi_dev;
615 struct wf_mpu_config *devc;
616
617 midi_dev = synth_devs[dev]->midi_dev;
618
619 if (midi_dev < 0 || midi_dev > num_midis || midi_devs[midi_dev]==NULL) {
620 return -(ENXIO);
621 }
622
623 if (phys_dev->devno == midi_dev) {
624 devc = phys_dev;
625 } else if (phys_dev->isvirtual && virt_dev->devno == midi_dev) {
626 devc = virt_dev;
627 } else {
628 printk (KERN_ERR "WF-MPU: unknown device number %d\n", dev);
629 return -(EINVAL);
630 }
631
632 if (devc->opened) {
633 return -(EBUSY);
634 }
635
636 devc->mode = MODE_SYNTH;
637 devc->synthno = dev;
638 devc->opened = mode;
639 devc->inputintr = NULL;
640 return 0;
641}
642
643static void
644wf_mpu_synth_close (int dev)
645{
646 int midi_dev;
647 struct wf_mpu_config *devc;
648
649 midi_dev = synth_devs[dev]->midi_dev;
650
651 if (phys_dev->devno == midi_dev) {
652 devc = phys_dev;
653 } else if (phys_dev->isvirtual && virt_dev->devno == midi_dev) {
654 devc = virt_dev;
655 } else {
656 printk (KERN_ERR "WF-MPU: unknown device number %d\n", dev);
657 return;
658 }
659
660 devc->inputintr = NULL;
661 devc->opened = 0;
662 devc->mode = 0;
663}
664
665#define _MIDI_SYNTH_C_
666#define MIDI_SYNTH_NAME "WaveFront (MIDI)"
667#define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT
668#include "midi_synth.h"
669
670static struct synth_operations wf_mpu_synth_proto =
671{
672 .owner = THIS_MODULE,
673 .id = "WaveFront (ICS2115)",
674 .info = NULL, /* info field, filled in during configuration */
675 .midi_dev = 0, /* MIDI dev XXX should this be -1 ? */
676 .synth_type = SYNTH_TYPE_MIDI,
677 .synth_subtype = SAMPLE_TYPE_WAVEFRONT,
678 .open = wf_mpu_synth_open,
679 .close = wf_mpu_synth_close,
680 .ioctl = wf_mpu_synth_ioctl,
681 .kill_note = midi_synth_kill_note,
682 .start_note = midi_synth_start_note,
683 .set_instr = midi_synth_set_instr,
684 .reset = midi_synth_reset,
685 .hw_control = midi_synth_hw_control,
686 .load_patch = midi_synth_load_patch,
687 .aftertouch = midi_synth_aftertouch,
688 .controller = midi_synth_controller,
689 .panning = midi_synth_panning,
690 .bender = midi_synth_bender,
691 .setup_voice = midi_synth_setup_voice,
692 .send_sysex = midi_synth_send_sysex
693};
694
695static int
696config_wf_mpu (struct wf_mpu_config *dev)
697
698{
699 int is_external;
700 char *name;
701 int index;
702
703 if (dev == phys_dev) {
704 name = "WaveFront internal MIDI";
705 is_external = 0;
706 index = 0;
707 memcpy ((char *) &wf_mpu_synth_operations[index],
708 (char *) &wf_mpu_synth_proto,
709 sizeof (struct synth_operations));
710 } else {
711 name = "WaveFront external MIDI";
712 is_external = 1;
713 index = 1;
714 /* no synth operations for an external MIDI interface */
715 }
716
717 memcpy ((char *) &wf_mpu_synth_info[dev->devno],
718 (char *) &wf_mpu_synth_info_proto,
719 sizeof (struct synth_info));
720
721 strcpy (wf_mpu_synth_info[index].name, name);
722
723 wf_mpu_synth_operations[index].midi_dev = dev->devno;
724 wf_mpu_synth_operations[index].info = &wf_mpu_synth_info[index];
725
726 memcpy ((char *) &wf_mpu_midi_operations[index],
727 (char *) &wf_mpu_midi_proto,
728 sizeof (struct midi_operations));
729
730 if (is_external) {
731 wf_mpu_midi_operations[index].converter = NULL;
732 } else {
733 wf_mpu_midi_operations[index].converter =
734 &wf_mpu_synth_operations[index];
735 }
736
737 strcpy (wf_mpu_midi_operations[index].info.name, name);
738
739 midi_devs[dev->devno] = &wf_mpu_midi_operations[index];
740 midi_devs[dev->devno]->in_info.m_busy = 0;
741 midi_devs[dev->devno]->in_info.m_state = MST_INIT;
742 midi_devs[dev->devno]->in_info.m_ptr = 0;
743 midi_devs[dev->devno]->in_info.m_left = 0;
744 midi_devs[dev->devno]->in_info.m_prev_status = 0;
745
746 devs[index].opened = 0;
747 devs[index].mode = 0;
748
749 return (0);
750}
751
752int virtual_midi_enable (void)
753
754{
755 if ((virt_dev->devno < 0) &&
756 (virt_dev->devno = sound_alloc_mididev()) == -1) {
757 printk (KERN_ERR
758 "WF-MPU: too many midi devices detected\n");
759 return -1;
760 }
761
762 config_wf_mpu (virt_dev);
763
764 phys_dev->isvirtual = 1;
765 return virt_dev->devno;
766}
767
768int
769virtual_midi_disable (void)
770
771{
772 unsigned long flags;
773
774 spin_lock_irqsave(&lock,flags);
775
776 wf_mpu_close (virt_dev->devno);
777 /* no synth on virt_dev, so no need to call wf_mpu_synth_close() */
778 phys_dev->isvirtual = 0;
779
780 spin_unlock_irqrestore(&lock,flags);
781
782 return 0;
783}
784
785int __init detect_wf_mpu (int irq, int io_base)
786{
787 if (!request_region(io_base, 2, "wavefront midi")) {
788 printk (KERN_WARNING "WF-MPU: I/O port %x already in use.\n",
789 io_base);
790 return -1;
791 }
792
793 phys_dev->base = io_base;
794 phys_dev->irq = irq;
795 phys_dev->devno = -1;
796 virt_dev->devno = -1;
797
798 return 0;
799}
800
801int __init install_wf_mpu (void)
802{
803 if ((phys_dev->devno = sound_alloc_mididev()) < 0){
804
805 printk (KERN_ERR "WF-MPU: Too many MIDI devices detected.\n");
806 release_region(phys_dev->base, 2);
807 return -1;
808 }
809
810 phys_dev->isvirtual = 0;
811
812 if (config_wf_mpu (phys_dev)) {
813
814 printk (KERN_WARNING
815 "WF-MPU: configuration for MIDI device %d failed\n",
816 phys_dev->devno);
817 sound_unload_mididev (phys_dev->devno);
818
819 }
820
821 /* OK, now we're configured to handle an interrupt ... */
822
823 if (request_irq (phys_dev->irq, wf_mpuintr, IRQF_DISABLED|IRQF_SHARED,
824 "wavefront midi", phys_dev) < 0) {
825
826 printk (KERN_ERR "WF-MPU: Failed to allocate IRQ%d\n",
827 phys_dev->irq);
828 return -1;
829
830 }
831
832 /* This being a WaveFront (ICS-2115) emulated MPU-401, we have
833 to switch it into UART (dumb) mode, because otherwise, it
834 won't do anything at all.
835 */
836
837 start_uart_mode ();
838
839 return phys_dev->devno;
840}
841
842void
843uninstall_wf_mpu (void)
844
845{
846 release_region (phys_dev->base, 2);
847 free_irq (phys_dev->irq, phys_dev);
848 sound_unload_mididev (phys_dev->devno);
849
850 if (virt_dev->devno >= 0) {
851 sound_unload_mididev (virt_dev->devno);
852 }
853}
854
855static void
856start_uart_mode (void)
857
858{
859 int ok, i;
860 unsigned long flags;
861
862 spin_lock_irqsave(&lock,flags);
863
864 /* XXX fix me */
865
866 for (i = 0; i < 30000 && !output_ready (); i++);
867
868 outb (UART_MODE_ON, COMDPORT(phys_dev));
869
870 for (ok = 0, i = 50000; i > 0 && !ok; i--) {
871 if (input_avail ()) {
872 if (read_data () == MPU_ACK) {
873 ok = 1;
874 }
875 }
876 }
877
878 spin_unlock_irqrestore(&lock,flags);
879}
880#endif
diff --git a/sound/oss/ymfpci.c b/sound/oss/ymfpci.c
deleted file mode 100644
index 4c89af9ea03c..000000000000
--- a/sound/oss/ymfpci.c
+++ /dev/null
@@ -1,2691 +0,0 @@
1/*
2 * Copyright 1999 Jaroslav Kysela <perex@suse.cz>
3 * Copyright 2000 Alan Cox <alan@redhat.com>
4 * Copyright 2001 Kai Germaschewski <kai@tp1.ruhr-uni-bochum.de>
5 * Copyright 2002 Pete Zaitcev <zaitcev@yahoo.com>
6 *
7 * Yamaha YMF7xx driver.
8 *
9 * This code is a result of high-speed collision
10 * between ymfpci.c of ALSA and cs46xx.c of Linux.
11 * -- Pete Zaitcev <zaitcev@yahoo.com>; 2000/09/18
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * TODO:
28 * - Use P44Slot for 44.1 playback (beware of idle buzzing in P44Slot).
29 * - 96KHz playback for DVD - use pitch of 2.0.
30 * - Retain DMA buffer on close, do not wait the end of frame.
31 * - Resolve XXX tagged questions.
32 * - Cannot play 5133Hz.
33 * - 2001/01/07 Consider if we can remove voice_lock, like so:
34 * : Allocate/deallocate voices in open/close under semafore.
35 * : We access voices in interrupt, that only for pcms that open.
36 * voice_lock around playback_prepare closes interrupts for insane duration.
37 * - Revisit the way voice_alloc is done - too confusing, overcomplicated.
38 * Should support various channel types, however.
39 * - Remove prog_dmabuf from read/write, leave it in open.
40 * - 2001/01/07 Replace the OPL3 part of CONFIG_SOUND_YMFPCI_LEGACY code with
41 * native synthesizer through a playback slot.
42 * - 2001/11/29 ac97_save_state
43 * Talk to Kai to remove ac97_save_state before it's too late!
44 * - Second AC97
45 * - Restore S/PDIF - Toshibas have it.
46 *
47 * Kai used pci_alloc_consistent for DMA buffer, which sounds a little
48 * unconventional. However, given how small our fragments can be,
49 * a little uncached access is perhaps better than endless flushing.
50 * On i386 and other I/O-coherent architectures pci_alloc_consistent
51 * is entirely harmless.
52 */
53
54#include <linux/module.h>
55#include <linux/init.h>
56#include <linux/interrupt.h>
57#include <linux/ioport.h>
58#include <linux/delay.h>
59#include <linux/pci.h>
60#include <linux/slab.h>
61#include <linux/poll.h>
62#include <linux/soundcard.h>
63#include <linux/ac97_codec.h>
64#include <linux/sound.h>
65
66#include <asm/io.h>
67#include <asm/dma.h>
68#include <asm/uaccess.h>
69
70#ifdef CONFIG_SOUND_YMFPCI_LEGACY
71# include "sound_config.h"
72# include "mpu401.h"
73#endif
74#include "ymfpci.h"
75
76/*
77 * I do not believe in debug levels as I never can guess what
78 * part of the code is going to be problematic in the future.
79 * Don't forget to run your klogd with -c 8.
80 *
81 * Example (do not remove):
82 * #define YMFDBG(fmt, arg...) do{ printk(KERN_DEBUG fmt, ##arg); }while(0)
83 */
84#define YMFDBGW(fmt, arg...) /* */ /* write counts */
85#define YMFDBGI(fmt, arg...) /* */ /* interrupts */
86#define YMFDBGX(fmt, arg...) /* */ /* ioctl */
87
88static int ymf_playback_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd);
89static void ymf_capture_trigger(ymfpci_t *unit, struct ymf_pcm *ypcm, int cmd);
90static void ymfpci_voice_free(ymfpci_t *unit, ymfpci_voice_t *pvoice);
91static int ymf_capture_alloc(struct ymf_unit *unit, int *pbank);
92static int ymf_playback_prepare(struct ymf_state *state);
93static int ymf_capture_prepare(struct ymf_state *state);
94static struct ymf_state *ymf_state_alloc(ymfpci_t *unit);
95
96static void ymfpci_aclink_reset(struct pci_dev * pci);
97static void ymfpci_disable_dsp(ymfpci_t *unit);
98static void ymfpci_download_image(ymfpci_t *codec);
99static void ymf_memload(ymfpci_t *unit);
100
101static DEFINE_SPINLOCK(ymf_devs_lock);
102static LIST_HEAD(ymf_devs);
103
104/*
105 * constants
106 */
107
108static struct pci_device_id ymf_id_tbl[] = {
109#define DEV(dev, data) \
110 { PCI_VENDOR_ID_YAMAHA, dev, PCI_ANY_ID, PCI_ANY_ID, 0, 0, \
111 (unsigned long)data }
112 DEV (PCI_DEVICE_ID_YAMAHA_724, "YMF724"),
113 DEV (PCI_DEVICE_ID_YAMAHA_724F, "YMF724F"),
114 DEV (PCI_DEVICE_ID_YAMAHA_740, "YMF740"),
115 DEV (PCI_DEVICE_ID_YAMAHA_740C, "YMF740C"),
116 DEV (PCI_DEVICE_ID_YAMAHA_744, "YMF744"),
117 DEV (PCI_DEVICE_ID_YAMAHA_754, "YMF754"),
118#undef DEV
119 { }
120};
121MODULE_DEVICE_TABLE(pci, ymf_id_tbl);
122
123/*
124 * common I/O routines
125 */
126
127static inline void ymfpci_writeb(ymfpci_t *codec, u32 offset, u8 val)
128{
129 writeb(val, codec->reg_area_virt + offset);
130}
131
132static inline u16 ymfpci_readw(ymfpci_t *codec, u32 offset)
133{
134 return readw(codec->reg_area_virt + offset);
135}
136
137static inline void ymfpci_writew(ymfpci_t *codec, u32 offset, u16 val)
138{
139 writew(val, codec->reg_area_virt + offset);
140}
141
142static inline u32 ymfpci_readl(ymfpci_t *codec, u32 offset)
143{
144 return readl(codec->reg_area_virt + offset);
145}
146
147static inline void ymfpci_writel(ymfpci_t *codec, u32 offset, u32 val)
148{
149 writel(val, codec->reg_area_virt + offset);
150}
151
152static int ymfpci_codec_ready(ymfpci_t *codec, int secondary, int sched)
153{
154 signed long end_time;
155 u32 reg = secondary ? YDSXGR_SECSTATUSADR : YDSXGR_PRISTATUSADR;
156
157 end_time = jiffies + 3 * (HZ / 4);
158 do {
159 if ((ymfpci_readw(codec, reg) & 0x8000) == 0)
160 return 0;
161 if (sched) {
162 set_current_state(TASK_UNINTERRUPTIBLE);
163 schedule_timeout(1);
164 }
165 } while (end_time - (signed long)jiffies >= 0);
166 printk(KERN_ERR "ymfpci_codec_ready: codec %i is not ready [0x%x]\n",
167 secondary, ymfpci_readw(codec, reg));
168 return -EBUSY;
169}
170
171static void ymfpci_codec_write(struct ac97_codec *dev, u8 reg, u16 val)
172{
173 ymfpci_t *codec = dev->private_data;
174 u32 cmd;
175
176 spin_lock(&codec->ac97_lock);
177 /* XXX Do make use of dev->id */
178 ymfpci_codec_ready(codec, 0, 0);
179 cmd = ((YDSXG_AC97WRITECMD | reg) << 16) | val;
180 ymfpci_writel(codec, YDSXGR_AC97CMDDATA, cmd);
181 spin_unlock(&codec->ac97_lock);
182}
183
184static u16 _ymfpci_codec_read(ymfpci_t *unit, u8 reg)
185{
186 int i;
187
188 if (ymfpci_codec_ready(unit, 0, 0))
189 return ~0;
190 ymfpci_writew(unit, YDSXGR_AC97CMDADR, YDSXG_AC97READCMD | reg);
191 if (ymfpci_codec_ready(unit, 0, 0))
192 return ~0;
193 if (unit->pci->device == PCI_DEVICE_ID_YAMAHA_744 && unit->rev < 2) {
194 for (i = 0; i < 600; i++)
195 ymfpci_readw(unit, YDSXGR_PRISTATUSDATA);
196 }
197 return ymfpci_readw(unit, YDSXGR_PRISTATUSDATA);
198}
199
200static u16 ymfpci_codec_read(struct ac97_codec *dev, u8 reg)
201{
202 ymfpci_t *unit = dev->private_data;
203 u16 ret;
204
205 spin_lock(&unit->ac97_lock);
206 ret = _ymfpci_codec_read(unit, reg);
207 spin_unlock(&unit->ac97_lock);
208
209 return ret;
210}
211
212/*
213 * Misc routines
214 */
215
216/*
217 * Calculate the actual sampling rate relatetively to the base clock (48kHz).
218 */
219static u32 ymfpci_calc_delta(u32 rate)
220{
221 switch (rate) {
222 case 8000: return 0x02aaab00;
223 case 11025: return 0x03accd00;
224 case 16000: return 0x05555500;
225 case 22050: return 0x07599a00;
226 case 32000: return 0x0aaaab00;
227 case 44100: return 0x0eb33300;
228 default: return ((rate << 16) / 48000) << 12;
229 }
230}
231
232static u32 def_rate[8] = {
233 100, 2000, 8000, 11025, 16000, 22050, 32000, 48000
234};
235
236static u32 ymfpci_calc_lpfK(u32 rate)
237{
238 u32 i;
239 static u32 val[8] = {
240 0x00570000, 0x06AA0000, 0x18B20000, 0x20930000,
241 0x2B9A0000, 0x35A10000, 0x3EAA0000, 0x40000000
242 };
243
244 if (rate == 44100)
245 return 0x40000000; /* FIXME: What's the right value? */
246 for (i = 0; i < 8; i++)
247 if (rate <= def_rate[i])
248 return val[i];
249 return val[0];
250}
251
252static u32 ymfpci_calc_lpfQ(u32 rate)
253{
254 u32 i;
255 static u32 val[8] = {
256 0x35280000, 0x34A70000, 0x32020000, 0x31770000,
257 0x31390000, 0x31C90000, 0x33D00000, 0x40000000
258 };
259
260 if (rate == 44100)
261 return 0x370A0000;
262 for (i = 0; i < 8; i++)
263 if (rate <= def_rate[i])
264 return val[i];
265 return val[0];
266}
267
268static u32 ymf_calc_lend(u32 rate)
269{
270 return (rate * YMF_SAMPF) / 48000;
271}
272
273/*
274 * We ever allow only a few formats, but let's be generic, for smaller surprise.
275 */
276static int ymf_pcm_format_width(int format)
277{
278 static int mask16 = AFMT_S16_LE|AFMT_S16_BE|AFMT_U16_LE|AFMT_U16_BE;
279
280 if ((format & (format-1)) != 0) {
281 printk(KERN_ERR "ymfpci: format 0x%x is not a power of 2\n", format);
282 return 8;
283 }
284
285 if (format == AFMT_IMA_ADPCM) return 4;
286 if ((format & mask16) != 0) return 16;
287 return 8;
288}
289
290static void ymf_pcm_update_shift(struct ymf_pcm_format *f)
291{
292 f->shift = 0;
293 if (f->voices == 2)
294 f->shift++;
295 if (ymf_pcm_format_width(f->format) == 16)
296 f->shift++;
297}
298
299/* Are you sure 32K is not too much? See if mpg123 skips on loaded systems. */
300#define DMABUF_DEFAULTORDER (15-PAGE_SHIFT)
301#define DMABUF_MINORDER 1
302
303/*
304 * Allocate DMA buffer
305 */
306static int alloc_dmabuf(ymfpci_t *unit, struct ymf_dmabuf *dmabuf)
307{
308 void *rawbuf = NULL;
309 dma_addr_t dma_addr;
310 int order;
311 struct page *map, *mapend;
312
313 /* alloc as big a chunk as we can */
314 for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
315 rawbuf = pci_alloc_consistent(unit->pci, PAGE_SIZE << order, &dma_addr);
316 if (rawbuf)
317 break;
318 }
319 if (!rawbuf)
320 return -ENOMEM;
321
322#if 0
323 printk(KERN_DEBUG "ymfpci: allocated %ld (order = %d) bytes at %p\n",
324 PAGE_SIZE << order, order, rawbuf);
325#endif
326
327 dmabuf->ready = dmabuf->mapped = 0;
328 dmabuf->rawbuf = rawbuf;
329 dmabuf->dma_addr = dma_addr;
330 dmabuf->buforder = order;
331
332 /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
333 mapend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
334 for (map = virt_to_page(rawbuf); map <= mapend; map++)
335 set_bit(PG_reserved, &map->flags);
336
337 return 0;
338}
339
340/*
341 * Free DMA buffer
342 */
343static void dealloc_dmabuf(ymfpci_t *unit, struct ymf_dmabuf *dmabuf)
344{
345 struct page *map, *mapend;
346
347 if (dmabuf->rawbuf) {
348 /* undo marking the pages as reserved */
349 mapend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
350 for (map = virt_to_page(dmabuf->rawbuf); map <= mapend; map++)
351 clear_bit(PG_reserved, &map->flags);
352
353 pci_free_consistent(unit->pci, PAGE_SIZE << dmabuf->buforder,
354 dmabuf->rawbuf, dmabuf->dma_addr);
355 }
356 dmabuf->rawbuf = NULL;
357 dmabuf->mapped = dmabuf->ready = 0;
358}
359
360static int prog_dmabuf(struct ymf_state *state, int rec)
361{
362 struct ymf_dmabuf *dmabuf;
363 int w_16;
364 unsigned bufsize;
365 unsigned long flags;
366 int redzone, redfrags;
367 int ret;
368
369 w_16 = ymf_pcm_format_width(state->format.format) == 16;
370 dmabuf = rec ? &state->rpcm.dmabuf : &state->wpcm.dmabuf;
371
372 spin_lock_irqsave(&state->unit->reg_lock, flags);
373 dmabuf->hwptr = dmabuf->swptr = 0;
374 dmabuf->total_bytes = 0;
375 dmabuf->count = 0;
376 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
377
378 /* allocate DMA buffer if not allocated yet */
379 if (!dmabuf->rawbuf)
380 if ((ret = alloc_dmabuf(state->unit, dmabuf)))
381 return ret;
382
383 /*
384 * Create fake fragment sizes and numbers for OSS ioctls.
385 * Import what Doom might have set with SNDCTL_DSP_SETFRAGMENT.
386 */
387 bufsize = PAGE_SIZE << dmabuf->buforder;
388 /* By default we give 4 big buffers. */
389 dmabuf->fragshift = (dmabuf->buforder + PAGE_SHIFT - 2);
390 if (dmabuf->ossfragshift > 3 &&
391 dmabuf->ossfragshift < dmabuf->fragshift) {
392 /* If OSS set smaller fragments, give more smaller buffers. */
393 dmabuf->fragshift = dmabuf->ossfragshift;
394 }
395 dmabuf->fragsize = 1 << dmabuf->fragshift;
396
397 dmabuf->numfrag = bufsize >> dmabuf->fragshift;
398 dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
399
400 if (dmabuf->ossmaxfrags >= 2) {
401 redzone = ymf_calc_lend(state->format.rate);
402 redzone <<= state->format.shift;
403 redzone *= 3;
404 redfrags = (redzone + dmabuf->fragsize-1) >> dmabuf->fragshift;
405
406 if (dmabuf->ossmaxfrags + redfrags < dmabuf->numfrag) {
407 dmabuf->numfrag = dmabuf->ossmaxfrags + redfrags;
408 dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
409 }
410 }
411
412 memset(dmabuf->rawbuf, w_16 ? 0 : 0x80, dmabuf->dmasize);
413
414 /*
415 * Now set up the ring
416 */
417
418 /* XXX ret = rec? cap_pre(): pbk_pre(); */
419 spin_lock_irqsave(&state->unit->voice_lock, flags);
420 if (rec) {
421 if ((ret = ymf_capture_prepare(state)) != 0) {
422 spin_unlock_irqrestore(&state->unit->voice_lock, flags);
423 return ret;
424 }
425 } else {
426 if ((ret = ymf_playback_prepare(state)) != 0) {
427 spin_unlock_irqrestore(&state->unit->voice_lock, flags);
428 return ret;
429 }
430 }
431 spin_unlock_irqrestore(&state->unit->voice_lock, flags);
432
433 /* set the ready flag for the dma buffer (this comment is not stupid) */
434 dmabuf->ready = 1;
435
436#if 0
437 printk(KERN_DEBUG "prog_dmabuf: rate %d format 0x%x,"
438 " numfrag %d fragsize %d dmasize %d\n",
439 state->format.rate, state->format.format, dmabuf->numfrag,
440 dmabuf->fragsize, dmabuf->dmasize);
441#endif
442
443 return 0;
444}
445
446static void ymf_start_dac(struct ymf_state *state)
447{
448 ymf_playback_trigger(state->unit, &state->wpcm, 1);
449}
450
451// static void ymf_start_adc(struct ymf_state *state)
452// {
453// ymf_capture_trigger(state->unit, &state->rpcm, 1);
454// }
455
456/*
457 * Wait until output is drained.
458 * This does not kill the hardware for the sake of ioctls.
459 */
460static void ymf_wait_dac(struct ymf_state *state)
461{
462 struct ymf_unit *unit = state->unit;
463 struct ymf_pcm *ypcm = &state->wpcm;
464 DECLARE_WAITQUEUE(waita, current);
465 unsigned long flags;
466
467 add_wait_queue(&ypcm->dmabuf.wait, &waita);
468
469 spin_lock_irqsave(&unit->reg_lock, flags);
470 if (ypcm->dmabuf.count != 0 && !ypcm->running) {
471 ymf_playback_trigger(unit, ypcm, 1);
472 }
473
474#if 0
475 if (file->f_flags & O_NONBLOCK) {
476 /*
477 * XXX Our mistake is to attach DMA buffer to state
478 * rather than to some per-device structure.
479 * Cannot skip waiting, can only make it shorter.
480 */
481 }
482#endif
483
484 set_current_state(TASK_UNINTERRUPTIBLE);
485 while (ypcm->running) {
486 spin_unlock_irqrestore(&unit->reg_lock, flags);
487 schedule();
488 spin_lock_irqsave(&unit->reg_lock, flags);
489 set_current_state(TASK_UNINTERRUPTIBLE);
490 }
491 spin_unlock_irqrestore(&unit->reg_lock, flags);
492
493 set_current_state(TASK_RUNNING);
494 remove_wait_queue(&ypcm->dmabuf.wait, &waita);
495
496 /*
497 * This function may take up to 4 seconds to reach this point
498 * (32K circular buffer, 8000 Hz). User notices.
499 */
500}
501
502/* Can just stop, without wait. Or can we? */
503static void ymf_stop_adc(struct ymf_state *state)
504{
505 struct ymf_unit *unit = state->unit;
506 unsigned long flags;
507
508 spin_lock_irqsave(&unit->reg_lock, flags);
509 ymf_capture_trigger(unit, &state->rpcm, 0);
510 spin_unlock_irqrestore(&unit->reg_lock, flags);
511}
512
513/*
514 * Hardware start management
515 */
516
517static void ymfpci_hw_start(ymfpci_t *unit)
518{
519 unsigned long flags;
520
521 spin_lock_irqsave(&unit->reg_lock, flags);
522 if (unit->start_count++ == 0) {
523 ymfpci_writel(unit, YDSXGR_MODE,
524 ymfpci_readl(unit, YDSXGR_MODE) | 3);
525 unit->active_bank = ymfpci_readl(unit, YDSXGR_CTRLSELECT) & 1;
526 }
527 spin_unlock_irqrestore(&unit->reg_lock, flags);
528}
529
530static void ymfpci_hw_stop(ymfpci_t *unit)
531{
532 unsigned long flags;
533 long timeout = 1000;
534
535 spin_lock_irqsave(&unit->reg_lock, flags);
536 if (--unit->start_count == 0) {
537 ymfpci_writel(unit, YDSXGR_MODE,
538 ymfpci_readl(unit, YDSXGR_MODE) & ~3);
539 while (timeout-- > 0) {
540 if ((ymfpci_readl(unit, YDSXGR_STATUS) & 2) == 0)
541 break;
542 }
543 }
544 spin_unlock_irqrestore(&unit->reg_lock, flags);
545}
546
547/*
548 * Playback voice management
549 */
550
551static int voice_alloc(ymfpci_t *codec, ymfpci_voice_type_t type, int pair, ymfpci_voice_t *rvoice[])
552{
553 ymfpci_voice_t *voice, *voice2;
554 int idx;
555
556 for (idx = 0; idx < YDSXG_PLAYBACK_VOICES; idx += pair ? 2 : 1) {
557 voice = &codec->voices[idx];
558 voice2 = pair ? &codec->voices[idx+1] : NULL;
559 if (voice->use || (voice2 && voice2->use))
560 continue;
561 voice->use = 1;
562 if (voice2)
563 voice2->use = 1;
564 switch (type) {
565 case YMFPCI_PCM:
566 voice->pcm = 1;
567 if (voice2)
568 voice2->pcm = 1;
569 break;
570 case YMFPCI_SYNTH:
571 voice->synth = 1;
572 break;
573 case YMFPCI_MIDI:
574 voice->midi = 1;
575 break;
576 }
577 ymfpci_hw_start(codec);
578 rvoice[0] = voice;
579 if (voice2) {
580 ymfpci_hw_start(codec);
581 rvoice[1] = voice2;
582 }
583 return 0;
584 }
585 return -EBUSY; /* Your audio channel is open by someone else. */
586}
587
588static void ymfpci_voice_free(ymfpci_t *unit, ymfpci_voice_t *pvoice)
589{
590 ymfpci_hw_stop(unit);
591 pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0;
592 pvoice->ypcm = NULL;
593}
594
595/*
596 */
597
598static void ymf_pcm_interrupt(ymfpci_t *codec, ymfpci_voice_t *voice)
599{
600 struct ymf_pcm *ypcm;
601 int redzone;
602 int pos, delta, swptr;
603 int played, distance;
604 struct ymf_state *state;
605 struct ymf_dmabuf *dmabuf;
606 char silence;
607
608 if ((ypcm = voice->ypcm) == NULL) {
609 return;
610 }
611 if ((state = ypcm->state) == NULL) {
612 ypcm->running = 0; // lock it
613 return;
614 }
615 dmabuf = &ypcm->dmabuf;
616 spin_lock(&codec->reg_lock);
617 if (ypcm->running) {
618 YMFDBGI("ymfpci: %d, intr bank %d count %d start 0x%x:%x\n",
619 voice->number, codec->active_bank, dmabuf->count,
620 le32_to_cpu(voice->bank[0].start),
621 le32_to_cpu(voice->bank[1].start));
622 silence = (ymf_pcm_format_width(state->format.format) == 16) ?
623 0 : 0x80;
624 /* We need actual left-hand-side redzone size here. */
625 redzone = ymf_calc_lend(state->format.rate);
626 redzone <<= (state->format.shift + 1);
627 swptr = dmabuf->swptr;
628
629 pos = le32_to_cpu(voice->bank[codec->active_bank].start);
630 pos <<= state->format.shift;
631 if (pos < 0 || pos >= dmabuf->dmasize) { /* ucode bug */
632 printk(KERN_ERR "ymfpci%d: runaway voice %d: hwptr %d=>%d dmasize %d\n",
633 codec->dev_audio, voice->number,
634 dmabuf->hwptr, pos, dmabuf->dmasize);
635 pos = 0;
636 }
637 if (pos < dmabuf->hwptr) {
638 delta = dmabuf->dmasize - dmabuf->hwptr;
639 memset(dmabuf->rawbuf + dmabuf->hwptr, silence, delta);
640 delta += pos;
641 memset(dmabuf->rawbuf, silence, pos);
642 } else {
643 delta = pos - dmabuf->hwptr;
644 memset(dmabuf->rawbuf + dmabuf->hwptr, silence, delta);
645 }
646 dmabuf->hwptr = pos;
647
648 if (dmabuf->count == 0) {
649 printk(KERN_ERR "ymfpci%d: %d: strain: hwptr %d\n",
650 codec->dev_audio, voice->number, dmabuf->hwptr);
651 ymf_playback_trigger(codec, ypcm, 0);
652 }
653
654 if (swptr <= pos) {
655 distance = pos - swptr;
656 } else {
657 distance = dmabuf->dmasize - (swptr - pos);
658 }
659 if (distance < redzone) {
660 /*
661 * hwptr inside redzone => DMA ran out of samples.
662 */
663 if (delta < dmabuf->count) {
664 /*
665 * Lost interrupt or other screwage.
666 */
667 printk(KERN_ERR "ymfpci%d: %d: lost: delta %d"
668 " hwptr %d swptr %d distance %d count %d\n",
669 codec->dev_audio, voice->number, delta,
670 dmabuf->hwptr, swptr, distance, dmabuf->count);
671 } else {
672 /*
673 * Normal end of DMA.
674 */
675 YMFDBGI("ymfpci%d: %d: done: delta %d"
676 " hwptr %d swptr %d distance %d count %d\n",
677 codec->dev_audio, voice->number, delta,
678 dmabuf->hwptr, swptr, distance, dmabuf->count);
679 }
680 played = dmabuf->count;
681 if (ypcm->running) {
682 ymf_playback_trigger(codec, ypcm, 0);
683 }
684 } else {
685 /*
686 * hwptr is chipping away towards a remote swptr.
687 * Calculate other distance and apply it to count.
688 */
689 if (swptr >= pos) {
690 distance = swptr - pos;
691 } else {
692 distance = dmabuf->dmasize - (pos - swptr);
693 }
694 if (distance < dmabuf->count) {
695 played = dmabuf->count - distance;
696 } else {
697 played = 0;
698 }
699 }
700
701 dmabuf->total_bytes += played;
702 dmabuf->count -= played;
703 if (dmabuf->count < dmabuf->dmasize / 2) {
704 wake_up(&dmabuf->wait);
705 }
706 }
707 spin_unlock(&codec->reg_lock);
708}
709
710static void ymf_cap_interrupt(ymfpci_t *unit, struct ymf_capture *cap)
711{
712 struct ymf_pcm *ypcm;
713 int redzone;
714 struct ymf_state *state;
715 struct ymf_dmabuf *dmabuf;
716 int pos, delta;
717 int cnt;
718
719 if ((ypcm = cap->ypcm) == NULL) {
720 return;
721 }
722 if ((state = ypcm->state) == NULL) {
723 ypcm->running = 0; // lock it
724 return;
725 }
726 dmabuf = &ypcm->dmabuf;
727 spin_lock(&unit->reg_lock);
728 if (ypcm->running) {
729 redzone = ymf_calc_lend(state->format.rate);
730 redzone <<= (state->format.shift + 1);
731
732 pos = le32_to_cpu(cap->bank[unit->active_bank].start);
733 // pos <<= state->format.shift;
734 if (pos < 0 || pos >= dmabuf->dmasize) { /* ucode bug */
735 printk(KERN_ERR "ymfpci%d: runaway capture %d: hwptr %d=>%d dmasize %d\n",
736 unit->dev_audio, ypcm->capture_bank_number,
737 dmabuf->hwptr, pos, dmabuf->dmasize);
738 pos = 0;
739 }
740 if (pos < dmabuf->hwptr) {
741 delta = dmabuf->dmasize - dmabuf->hwptr;
742 delta += pos;
743 } else {
744 delta = pos - dmabuf->hwptr;
745 }
746 dmabuf->hwptr = pos;
747
748 cnt = dmabuf->count;
749 cnt += delta;
750 if (cnt + redzone > dmabuf->dmasize) {
751 /* Overflow - bump swptr */
752 dmabuf->count = dmabuf->dmasize - redzone;
753 dmabuf->swptr = dmabuf->hwptr + redzone;
754 if (dmabuf->swptr >= dmabuf->dmasize) {
755 dmabuf->swptr -= dmabuf->dmasize;
756 }
757 } else {
758 dmabuf->count = cnt;
759 }
760
761 dmabuf->total_bytes += delta;
762 if (dmabuf->count) { /* && is_sleeping XXX */
763 wake_up(&dmabuf->wait);
764 }
765 }
766 spin_unlock(&unit->reg_lock);
767}
768
769static int ymf_playback_trigger(ymfpci_t *codec, struct ymf_pcm *ypcm, int cmd)
770{
771
772 if (ypcm->voices[0] == NULL) {
773 return -EINVAL;
774 }
775 if (cmd != 0) {
776 codec->ctrl_playback[ypcm->voices[0]->number + 1] =
777 cpu_to_le32(ypcm->voices[0]->bank_ba);
778 if (ypcm->voices[1] != NULL)
779 codec->ctrl_playback[ypcm->voices[1]->number + 1] =
780 cpu_to_le32(ypcm->voices[1]->bank_ba);
781 ypcm->running = 1;
782 } else {
783 codec->ctrl_playback[ypcm->voices[0]->number + 1] = 0;
784 if (ypcm->voices[1] != NULL)
785 codec->ctrl_playback[ypcm->voices[1]->number + 1] = 0;
786 ypcm->running = 0;
787 }
788 return 0;
789}
790
791static void ymf_capture_trigger(ymfpci_t *codec, struct ymf_pcm *ypcm, int cmd)
792{
793 u32 tmp;
794
795 if (cmd != 0) {
796 tmp = ymfpci_readl(codec, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number);
797 ymfpci_writel(codec, YDSXGR_MAPOFREC, tmp);
798 ypcm->running = 1;
799 } else {
800 tmp = ymfpci_readl(codec, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number);
801 ymfpci_writel(codec, YDSXGR_MAPOFREC, tmp);
802 ypcm->running = 0;
803 }
804}
805
806static int ymfpci_pcm_voice_alloc(struct ymf_pcm *ypcm, int voices)
807{
808 struct ymf_unit *unit;
809 int err;
810
811 unit = ypcm->state->unit;
812 if (ypcm->voices[1] != NULL && voices < 2) {
813 ymfpci_voice_free(unit, ypcm->voices[1]);
814 ypcm->voices[1] = NULL;
815 }
816 if (voices == 1 && ypcm->voices[0] != NULL)
817 return 0; /* already allocated */
818 if (voices == 2 && ypcm->voices[0] != NULL && ypcm->voices[1] != NULL)
819 return 0; /* already allocated */
820 if (voices > 1) {
821 if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) {
822 ymfpci_voice_free(unit, ypcm->voices[0]);
823 ypcm->voices[0] = NULL;
824 }
825 if ((err = voice_alloc(unit, YMFPCI_PCM, 1, ypcm->voices)) < 0)
826 return err;
827 ypcm->voices[0]->ypcm = ypcm;
828 ypcm->voices[1]->ypcm = ypcm;
829 } else {
830 if ((err = voice_alloc(unit, YMFPCI_PCM, 0, ypcm->voices)) < 0)
831 return err;
832 ypcm->voices[0]->ypcm = ypcm;
833 }
834 return 0;
835}
836
837static void ymf_pcm_init_voice(ymfpci_voice_t *voice, int stereo,
838 int rate, int w_16, unsigned long addr, unsigned int end, int spdif)
839{
840 u32 format;
841 u32 delta = ymfpci_calc_delta(rate);
842 u32 lpfQ = ymfpci_calc_lpfQ(rate);
843 u32 lpfK = ymfpci_calc_lpfK(rate);
844 ymfpci_playback_bank_t *bank;
845 int nbank;
846
847 /*
848 * The gain is a floating point number. According to the manual,
849 * bit 31 indicates a sign bit, bit 30 indicates an integer part,
850 * and bits [29:15] indicate a decimal fraction part. Thus,
851 * for a gain of 1.0 the constant of 0x40000000 is loaded.
852 */
853 unsigned default_gain = cpu_to_le32(0x40000000);
854
855 format = (stereo ? 0x00010000 : 0) | (w_16 ? 0 : 0x80000000);
856 if (stereo)
857 end >>= 1;
858 if (w_16)
859 end >>= 1;
860 for (nbank = 0; nbank < 2; nbank++) {
861 bank = &voice->bank[nbank];
862 bank->format = cpu_to_le32(format);
863 bank->loop_default = 0; /* 0-loops forever, otherwise count */
864 bank->base = cpu_to_le32(addr);
865 bank->loop_start = 0;
866 bank->loop_end = cpu_to_le32(end);
867 bank->loop_frac = 0;
868 bank->eg_gain_end = default_gain;
869 bank->lpfQ = cpu_to_le32(lpfQ);
870 bank->status = 0;
871 bank->num_of_frames = 0;
872 bank->loop_count = 0;
873 bank->start = 0;
874 bank->start_frac = 0;
875 bank->delta =
876 bank->delta_end = cpu_to_le32(delta);
877 bank->lpfK =
878 bank->lpfK_end = cpu_to_le32(lpfK);
879 bank->eg_gain = default_gain;
880 bank->lpfD1 =
881 bank->lpfD2 = 0;
882
883 bank->left_gain =
884 bank->right_gain =
885 bank->left_gain_end =
886 bank->right_gain_end =
887 bank->eff1_gain =
888 bank->eff2_gain =
889 bank->eff3_gain =
890 bank->eff1_gain_end =
891 bank->eff2_gain_end =
892 bank->eff3_gain_end = 0;
893
894 if (!stereo) {
895 if (!spdif) {
896 bank->left_gain =
897 bank->right_gain =
898 bank->left_gain_end =
899 bank->right_gain_end = default_gain;
900 } else {
901 bank->eff2_gain =
902 bank->eff2_gain_end =
903 bank->eff3_gain =
904 bank->eff3_gain_end = default_gain;
905 }
906 } else {
907 if (!spdif) {
908 if ((voice->number & 1) == 0) {
909 bank->left_gain =
910 bank->left_gain_end = default_gain;
911 } else {
912 bank->format |= cpu_to_le32(1);
913 bank->right_gain =
914 bank->right_gain_end = default_gain;
915 }
916 } else {
917 if ((voice->number & 1) == 0) {
918 bank->eff2_gain =
919 bank->eff2_gain_end = default_gain;
920 } else {
921 bank->format |= cpu_to_le32(1);
922 bank->eff3_gain =
923 bank->eff3_gain_end = default_gain;
924 }
925 }
926 }
927 }
928}
929
930/*
931 * XXX Capture channel allocation is entirely fake at the moment.
932 * We use only one channel and mark it busy as required.
933 */
934static int ymf_capture_alloc(struct ymf_unit *unit, int *pbank)
935{
936 struct ymf_capture *cap;
937 int cbank;
938
939 cbank = 1; /* Only ADC slot is used for now. */
940 cap = &unit->capture[cbank];
941 if (cap->use)
942 return -EBUSY;
943 cap->use = 1;
944 *pbank = cbank;
945 return 0;
946}
947
948static int ymf_playback_prepare(struct ymf_state *state)
949{
950 struct ymf_pcm *ypcm = &state->wpcm;
951 int err, nvoice;
952
953 if ((err = ymfpci_pcm_voice_alloc(ypcm, state->format.voices)) < 0) {
954 /* Somebody started 32 mpg123's in parallel? */
955 printk(KERN_INFO "ymfpci%d: cannot allocate voice\n",
956 state->unit->dev_audio);
957 return err;
958 }
959
960 for (nvoice = 0; nvoice < state->format.voices; nvoice++) {
961 ymf_pcm_init_voice(ypcm->voices[nvoice],
962 state->format.voices == 2, state->format.rate,
963 ymf_pcm_format_width(state->format.format) == 16,
964 ypcm->dmabuf.dma_addr, ypcm->dmabuf.dmasize,
965 ypcm->spdif);
966 }
967 return 0;
968}
969
970static int ymf_capture_prepare(struct ymf_state *state)
971{
972 ymfpci_t *unit = state->unit;
973 struct ymf_pcm *ypcm = &state->rpcm;
974 ymfpci_capture_bank_t * bank;
975 /* XXX This is confusing, gotta rename one of them banks... */
976 int nbank; /* flip-flop bank */
977 int cbank; /* input [super-]bank */
978 struct ymf_capture *cap;
979 u32 rate, format;
980
981 if (ypcm->capture_bank_number == -1) {
982 if (ymf_capture_alloc(unit, &cbank) != 0)
983 return -EBUSY;
984
985 ypcm->capture_bank_number = cbank;
986
987 cap = &unit->capture[cbank];
988 cap->bank = unit->bank_capture[cbank][0];
989 cap->ypcm = ypcm;
990 ymfpci_hw_start(unit);
991 }
992
993 // ypcm->frag_size = snd_pcm_lib_transfer_fragment(substream);
994 // frag_size is replaced with nonfragged byte-aligned rolling buffer
995 rate = ((48000 * 4096) / state->format.rate) - 1;
996 format = 0;
997 if (state->format.voices == 2)
998 format |= 2;
999 if (ymf_pcm_format_width(state->format.format) == 8)
1000 format |= 1;
1001 switch (ypcm->capture_bank_number) {
1002 case 0:
1003 ymfpci_writel(unit, YDSXGR_RECFORMAT, format);
1004 ymfpci_writel(unit, YDSXGR_RECSLOTSR, rate);
1005 break;
1006 case 1:
1007 ymfpci_writel(unit, YDSXGR_ADCFORMAT, format);
1008 ymfpci_writel(unit, YDSXGR_ADCSLOTSR, rate);
1009 break;
1010 }
1011 for (nbank = 0; nbank < 2; nbank++) {
1012 bank = unit->bank_capture[ypcm->capture_bank_number][nbank];
1013 bank->base = cpu_to_le32(ypcm->dmabuf.dma_addr);
1014 // bank->loop_end = ypcm->dmabuf.dmasize >> state->format.shift;
1015 bank->loop_end = cpu_to_le32(ypcm->dmabuf.dmasize);
1016 bank->start = 0;
1017 bank->num_of_loops = 0;
1018 }
1019#if 0 /* s/pdif */
1020 if (state->digital.dig_valid)
1021 /*state->digital.type == SND_PCM_DIG_AES_IEC958*/
1022 ymfpci_writew(codec, YDSXGR_SPDIFOUTSTATUS,
1023 state->digital.dig_status[0] | (state->digital.dig_status[1] << 8));
1024#endif
1025 return 0;
1026}
1027
1028static irqreturn_t ymf_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1029{
1030 ymfpci_t *codec = dev_id;
1031 u32 status, nvoice, mode;
1032 struct ymf_voice *voice;
1033 struct ymf_capture *cap;
1034
1035 status = ymfpci_readl(codec, YDSXGR_STATUS);
1036 if (status & 0x80000000) {
1037 codec->active_bank = ymfpci_readl(codec, YDSXGR_CTRLSELECT) & 1;
1038 spin_lock(&codec->voice_lock);
1039 for (nvoice = 0; nvoice < YDSXG_PLAYBACK_VOICES; nvoice++) {
1040 voice = &codec->voices[nvoice];
1041 if (voice->use)
1042 ymf_pcm_interrupt(codec, voice);
1043 }
1044 for (nvoice = 0; nvoice < YDSXG_CAPTURE_VOICES; nvoice++) {
1045 cap = &codec->capture[nvoice];
1046 if (cap->use)
1047 ymf_cap_interrupt(codec, cap);
1048 }
1049 spin_unlock(&codec->voice_lock);
1050 spin_lock(&codec->reg_lock);
1051 ymfpci_writel(codec, YDSXGR_STATUS, 0x80000000);
1052 mode = ymfpci_readl(codec, YDSXGR_MODE) | 2;
1053 ymfpci_writel(codec, YDSXGR_MODE, mode);
1054 spin_unlock(&codec->reg_lock);
1055 }
1056
1057 status = ymfpci_readl(codec, YDSXGR_INTFLAG);
1058 if (status & 1) {
1059 /* timer handler */
1060 ymfpci_writel(codec, YDSXGR_INTFLAG, ~0);
1061 }
1062 return IRQ_HANDLED;
1063}
1064
1065static void ymf_pcm_free_substream(struct ymf_pcm *ypcm)
1066{
1067 unsigned long flags;
1068 struct ymf_unit *unit;
1069
1070 unit = ypcm->state->unit;
1071
1072 if (ypcm->type == PLAYBACK_VOICE) {
1073 spin_lock_irqsave(&unit->voice_lock, flags);
1074 if (ypcm->voices[1])
1075 ymfpci_voice_free(unit, ypcm->voices[1]);
1076 if (ypcm->voices[0])
1077 ymfpci_voice_free(unit, ypcm->voices[0]);
1078 spin_unlock_irqrestore(&unit->voice_lock, flags);
1079 } else {
1080 if (ypcm->capture_bank_number != -1) {
1081 unit->capture[ypcm->capture_bank_number].use = 0;
1082 ypcm->capture_bank_number = -1;
1083 ymfpci_hw_stop(unit);
1084 }
1085 }
1086}
1087
1088static struct ymf_state *ymf_state_alloc(ymfpci_t *unit)
1089{
1090 struct ymf_pcm *ypcm;
1091 struct ymf_state *state;
1092
1093 if ((state = kmalloc(sizeof(struct ymf_state), GFP_KERNEL)) == NULL) {
1094 goto out0;
1095 }
1096 memset(state, 0, sizeof(struct ymf_state));
1097
1098 ypcm = &state->wpcm;
1099 ypcm->state = state;
1100 ypcm->type = PLAYBACK_VOICE;
1101 ypcm->capture_bank_number = -1;
1102 init_waitqueue_head(&ypcm->dmabuf.wait);
1103
1104 ypcm = &state->rpcm;
1105 ypcm->state = state;
1106 ypcm->type = CAPTURE_AC97;
1107 ypcm->capture_bank_number = -1;
1108 init_waitqueue_head(&ypcm->dmabuf.wait);
1109
1110 state->unit = unit;
1111
1112 state->format.format = AFMT_U8;
1113 state->format.rate = 8000;
1114 state->format.voices = 1;
1115 ymf_pcm_update_shift(&state->format);
1116
1117 return state;
1118
1119out0:
1120 return NULL;
1121}
1122
1123/* AES/IEC958 channel status bits */
1124#define SND_PCM_AES0_PROFESSIONAL (1<<0) /* 0 = consumer, 1 = professional */
1125#define SND_PCM_AES0_NONAUDIO (1<<1) /* 0 = audio, 1 = non-audio */
1126#define SND_PCM_AES0_PRO_EMPHASIS (7<<2) /* mask - emphasis */
1127#define SND_PCM_AES0_PRO_EMPHASIS_NOTID (0<<2) /* emphasis not indicated */
1128#define SND_PCM_AES0_PRO_EMPHASIS_NONE (1<<2) /* none emphasis */
1129#define SND_PCM_AES0_PRO_EMPHASIS_5015 (3<<2) /* 50/15us emphasis */
1130#define SND_PCM_AES0_PRO_EMPHASIS_CCITT (7<<2) /* CCITT J.17 emphasis */
1131#define SND_PCM_AES0_PRO_FREQ_UNLOCKED (1<<5) /* source sample frequency: 0 = locked, 1 = unlocked */
1132#define SND_PCM_AES0_PRO_FS (3<<6) /* mask - sample frequency */
1133#define SND_PCM_AES0_PRO_FS_NOTID (0<<6) /* fs not indicated */
1134#define SND_PCM_AES0_PRO_FS_44100 (1<<6) /* 44.1kHz */
1135#define SND_PCM_AES0_PRO_FS_48000 (2<<6) /* 48kHz */
1136#define SND_PCM_AES0_PRO_FS_32000 (3<<6) /* 32kHz */
1137#define SND_PCM_AES0_CON_NOT_COPYRIGHT (1<<2) /* 0 = copyright, 1 = not copyright */
1138#define SND_PCM_AES0_CON_EMPHASIS (7<<3) /* mask - emphasis */
1139#define SND_PCM_AES0_CON_EMPHASIS_NONE (0<<3) /* none emphasis */
1140#define SND_PCM_AES0_CON_EMPHASIS_5015 (1<<3) /* 50/15us emphasis */
1141#define SND_PCM_AES0_CON_MODE (3<<6) /* mask - mode */
1142#define SND_PCM_AES1_PRO_MODE (15<<0) /* mask - channel mode */
1143#define SND_PCM_AES1_PRO_MODE_NOTID (0<<0) /* not indicated */
1144#define SND_PCM_AES1_PRO_MODE_STEREOPHONIC (2<<0) /* stereophonic - ch A is left */
1145#define SND_PCM_AES1_PRO_MODE_SINGLE (4<<0) /* single channel */
1146#define SND_PCM_AES1_PRO_MODE_TWO (8<<0) /* two channels */
1147#define SND_PCM_AES1_PRO_MODE_PRIMARY (12<<0) /* primary/secondary */
1148#define SND_PCM_AES1_PRO_MODE_BYTE3 (15<<0) /* vector to byte 3 */
1149#define SND_PCM_AES1_PRO_USERBITS (15<<4) /* mask - user bits */
1150#define SND_PCM_AES1_PRO_USERBITS_NOTID (0<<4) /* not indicated */
1151#define SND_PCM_AES1_PRO_USERBITS_192 (8<<4) /* 192-bit structure */
1152#define SND_PCM_AES1_PRO_USERBITS_UDEF (12<<4) /* user defined application */
1153#define SND_PCM_AES1_CON_CATEGORY 0x7f
1154#define SND_PCM_AES1_CON_GENERAL 0x00
1155#define SND_PCM_AES1_CON_EXPERIMENTAL 0x40
1156#define SND_PCM_AES1_CON_SOLIDMEM_MASK 0x0f
1157#define SND_PCM_AES1_CON_SOLIDMEM_ID 0x08
1158#define SND_PCM_AES1_CON_BROADCAST1_MASK 0x07
1159#define SND_PCM_AES1_CON_BROADCAST1_ID 0x04
1160#define SND_PCM_AES1_CON_DIGDIGCONV_MASK 0x07
1161#define SND_PCM_AES1_CON_DIGDIGCONV_ID 0x02
1162#define SND_PCM_AES1_CON_ADC_COPYRIGHT_MASK 0x1f
1163#define SND_PCM_AES1_CON_ADC_COPYRIGHT_ID 0x06
1164#define SND_PCM_AES1_CON_ADC_MASK 0x1f
1165#define SND_PCM_AES1_CON_ADC_ID 0x16
1166#define SND_PCM_AES1_CON_BROADCAST2_MASK 0x0f
1167#define SND_PCM_AES1_CON_BROADCAST2_ID 0x0e
1168#define SND_PCM_AES1_CON_LASEROPT_MASK 0x07
1169#define SND_PCM_AES1_CON_LASEROPT_ID 0x01
1170#define SND_PCM_AES1_CON_MUSICAL_MASK 0x07
1171#define SND_PCM_AES1_CON_MUSICAL_ID 0x05
1172#define SND_PCM_AES1_CON_MAGNETIC_MASK 0x07
1173#define SND_PCM_AES1_CON_MAGNETIC_ID 0x03
1174#define SND_PCM_AES1_CON_IEC908_CD (SND_PCM_AES1_CON_LASEROPT_ID|0x00)
1175#define SND_PCM_AES1_CON_NON_IEC908_CD (SND_PCM_AES1_CON_LASEROPT_ID|0x08)
1176#define SND_PCM_AES1_CON_PCM_CODER (SND_PCM_AES1_CON_DIGDIGCONV_ID|0x00)
1177#define SND_PCM_AES1_CON_SAMPLER (SND_PCM_AES1_CON_DIGDIGCONV_ID|0x20)
1178#define SND_PCM_AES1_CON_MIXER (SND_PCM_AES1_CON_DIGDIGCONV_ID|0x10)
1179#define SND_PCM_AES1_CON_RATE_CONVERTER (SND_PCM_AES1_CON_DIGDIGCONV_ID|0x18)
1180#define SND_PCM_AES1_CON_SYNTHESIZER (SND_PCM_AES1_CON_MUSICAL_ID|0x00)
1181#define SND_PCM_AES1_CON_MICROPHONE (SND_PCM_AES1_CON_MUSICAL_ID|0x08)
1182#define SND_PCM_AES1_CON_DAT (SND_PCM_AES1_CON_MAGNETIC_ID|0x00)
1183#define SND_PCM_AES1_CON_VCR (SND_PCM_AES1_CON_MAGNETIC_ID|0x08)
1184#define SND_PCM_AES1_CON_ORIGINAL (1<<7) /* this bits depends on the category code */
1185#define SND_PCM_AES2_PRO_SBITS (7<<0) /* mask - sample bits */
1186#define SND_PCM_AES2_PRO_SBITS_20 (2<<0) /* 20-bit - coordination */
1187#define SND_PCM_AES2_PRO_SBITS_24 (4<<0) /* 24-bit - main audio */
1188#define SND_PCM_AES2_PRO_SBITS_UDEF (6<<0) /* user defined application */
1189#define SND_PCM_AES2_PRO_WORDLEN (7<<3) /* mask - source word length */
1190#define SND_PCM_AES2_PRO_WORDLEN_NOTID (0<<3) /* not indicated */
1191#define SND_PCM_AES2_PRO_WORDLEN_22_18 (2<<3) /* 22-bit or 18-bit */
1192#define SND_PCM_AES2_PRO_WORDLEN_23_19 (4<<3) /* 23-bit or 19-bit */
1193#define SND_PCM_AES2_PRO_WORDLEN_24_20 (5<<3) /* 24-bit or 20-bit */
1194#define SND_PCM_AES2_PRO_WORDLEN_20_16 (6<<3) /* 20-bit or 16-bit */
1195#define SND_PCM_AES2_CON_SOURCE (15<<0) /* mask - source number */
1196#define SND_PCM_AES2_CON_SOURCE_UNSPEC (0<<0) /* unspecified */
1197#define SND_PCM_AES2_CON_CHANNEL (15<<4) /* mask - channel number */
1198#define SND_PCM_AES2_CON_CHANNEL_UNSPEC (0<<4) /* unspecified */
1199#define SND_PCM_AES3_CON_FS (15<<0) /* mask - sample frequency */
1200#define SND_PCM_AES3_CON_FS_44100 (0<<0) /* 44.1kHz */
1201#define SND_PCM_AES3_CON_FS_48000 (2<<0) /* 48kHz */
1202#define SND_PCM_AES3_CON_FS_32000 (3<<0) /* 32kHz */
1203#define SND_PCM_AES3_CON_CLOCK (3<<4) /* mask - clock accuracy */
1204#define SND_PCM_AES3_CON_CLOCK_1000PPM (0<<4) /* 1000 ppm */
1205#define SND_PCM_AES3_CON_CLOCK_50PPM (1<<4) /* 50 ppm */
1206#define SND_PCM_AES3_CON_CLOCK_VARIABLE (2<<4) /* variable pitch */
1207
1208/*
1209 * User interface
1210 */
1211
1212/*
1213 * in this loop, dmabuf.count signifies the amount of data that is
1214 * waiting to be copied to the user's buffer. it is filled by the dma
1215 * machine and drained by this loop.
1216 */
1217static ssize_t
1218ymf_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
1219{
1220 struct ymf_state *state = (struct ymf_state *)file->private_data;
1221 struct ymf_dmabuf *dmabuf = &state->rpcm.dmabuf;
1222 struct ymf_unit *unit = state->unit;
1223 DECLARE_WAITQUEUE(waita, current);
1224 ssize_t ret;
1225 unsigned long flags;
1226 unsigned int swptr;
1227 int cnt; /* This many to go in this revolution */
1228
1229 if (dmabuf->mapped)
1230 return -ENXIO;
1231 if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
1232 return ret;
1233 ret = 0;
1234
1235 add_wait_queue(&dmabuf->wait, &waita);
1236 set_current_state(TASK_INTERRUPTIBLE);
1237 while (count > 0) {
1238 spin_lock_irqsave(&unit->reg_lock, flags);
1239 if (unit->suspended) {
1240 spin_unlock_irqrestore(&unit->reg_lock, flags);
1241 schedule();
1242 set_current_state(TASK_INTERRUPTIBLE);
1243 if (signal_pending(current)) {
1244 if (!ret) ret = -EAGAIN;
1245 break;
1246 }
1247 continue;
1248 }
1249 swptr = dmabuf->swptr;
1250 cnt = dmabuf->dmasize - swptr;
1251 if (dmabuf->count < cnt)
1252 cnt = dmabuf->count;
1253 spin_unlock_irqrestore(&unit->reg_lock, flags);
1254
1255 if (cnt > count)
1256 cnt = count;
1257 if (cnt <= 0) {
1258 unsigned long tmo;
1259 /* buffer is empty, start the dma machine and wait for data to be
1260 recorded */
1261 spin_lock_irqsave(&state->unit->reg_lock, flags);
1262 if (!state->rpcm.running) {
1263 ymf_capture_trigger(state->unit, &state->rpcm, 1);
1264 }
1265 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1266 if (file->f_flags & O_NONBLOCK) {
1267 if (!ret) ret = -EAGAIN;
1268 break;
1269 }
1270 /* This isnt strictly right for the 810 but it'll do */
1271 tmo = (dmabuf->dmasize * HZ) / (state->format.rate * 2);
1272 tmo >>= state->format.shift;
1273 /* There are two situations when sleep_on_timeout returns, one is when
1274 the interrupt is serviced correctly and the process is waked up by
1275 ISR ON TIME. Another is when timeout is expired, which means that
1276 either interrupt is NOT serviced correctly (pending interrupt) or it
1277 is TOO LATE for the process to be scheduled to run (scheduler latency)
1278 which results in a (potential) buffer overrun. And worse, there is
1279 NOTHING we can do to prevent it. */
1280 tmo = schedule_timeout(tmo);
1281 spin_lock_irqsave(&state->unit->reg_lock, flags);
1282 set_current_state(TASK_INTERRUPTIBLE);
1283 if (tmo == 0 && dmabuf->count == 0) {
1284 printk(KERN_ERR "ymfpci%d: recording schedule timeout, "
1285 "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
1286 state->unit->dev_audio,
1287 dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
1288 dmabuf->hwptr, dmabuf->swptr);
1289 }
1290 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1291 if (signal_pending(current)) {
1292 if (!ret) ret = -ERESTARTSYS;
1293 break;
1294 }
1295 continue;
1296 }
1297
1298 if (copy_to_user(buffer, dmabuf->rawbuf + swptr, cnt)) {
1299 if (!ret) ret = -EFAULT;
1300 break;
1301 }
1302
1303 swptr = (swptr + cnt) % dmabuf->dmasize;
1304
1305 spin_lock_irqsave(&unit->reg_lock, flags);
1306 if (unit->suspended) {
1307 spin_unlock_irqrestore(&unit->reg_lock, flags);
1308 continue;
1309 }
1310
1311 dmabuf->swptr = swptr;
1312 dmabuf->count -= cnt;
1313 // spin_unlock_irqrestore(&unit->reg_lock, flags);
1314
1315 count -= cnt;
1316 buffer += cnt;
1317 ret += cnt;
1318 // spin_lock_irqsave(&unit->reg_lock, flags);
1319 if (!state->rpcm.running) {
1320 ymf_capture_trigger(unit, &state->rpcm, 1);
1321 }
1322 spin_unlock_irqrestore(&unit->reg_lock, flags);
1323 }
1324 set_current_state(TASK_RUNNING);
1325 remove_wait_queue(&dmabuf->wait, &waita);
1326
1327 return ret;
1328}
1329
1330static ssize_t
1331ymf_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
1332{
1333 struct ymf_state *state = (struct ymf_state *)file->private_data;
1334 struct ymf_dmabuf *dmabuf = &state->wpcm.dmabuf;
1335 struct ymf_unit *unit = state->unit;
1336 DECLARE_WAITQUEUE(waita, current);
1337 ssize_t ret;
1338 unsigned long flags;
1339 unsigned int swptr;
1340 int cnt; /* This many to go in this revolution */
1341 int redzone;
1342 int delay;
1343
1344 YMFDBGW("ymf_write: count %d\n", count);
1345
1346 if (dmabuf->mapped)
1347 return -ENXIO;
1348 if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
1349 return ret;
1350 ret = 0;
1351
1352 /*
1353 * Alan's cs46xx works without a red zone - marvel of ingenuity.
1354 * We are not so brilliant... Red zone does two things:
1355 * 1. allows for safe start after a pause as we have no way
1356 * to know what the actual, relentlessly advancing, hwptr is.
1357 * 2. makes computations in ymf_pcm_interrupt simpler.
1358 */
1359 redzone = ymf_calc_lend(state->format.rate) << state->format.shift;
1360 redzone *= 3; /* 2 redzone + 1 possible uncertainty reserve. */
1361
1362 add_wait_queue(&dmabuf->wait, &waita);
1363 set_current_state(TASK_INTERRUPTIBLE);
1364 while (count > 0) {
1365 spin_lock_irqsave(&unit->reg_lock, flags);
1366 if (unit->suspended) {
1367 spin_unlock_irqrestore(&unit->reg_lock, flags);
1368 schedule();
1369 set_current_state(TASK_INTERRUPTIBLE);
1370 if (signal_pending(current)) {
1371 if (!ret) ret = -EAGAIN;
1372 break;
1373 }
1374 continue;
1375 }
1376 if (dmabuf->count < 0) {
1377 printk(KERN_ERR
1378 "ymf_write: count %d, was legal in cs46xx\n",
1379 dmabuf->count);
1380 dmabuf->count = 0;
1381 }
1382 if (dmabuf->count == 0) {
1383 swptr = dmabuf->hwptr;
1384 if (state->wpcm.running) {
1385 /*
1386 * Add uncertainty reserve.
1387 */
1388 cnt = ymf_calc_lend(state->format.rate);
1389 cnt <<= state->format.shift;
1390 if ((swptr += cnt) >= dmabuf->dmasize) {
1391 swptr -= dmabuf->dmasize;
1392 }
1393 }
1394 dmabuf->swptr = swptr;
1395 } else {
1396 /*
1397 * XXX This is not right if dmabuf->count is small -
1398 * about 2*x frame size or less. We cannot count on
1399 * on appending and not causing an artefact.
1400 * Should use a variation of the count==0 case above.
1401 */
1402 swptr = dmabuf->swptr;
1403 }
1404 cnt = dmabuf->dmasize - swptr;
1405 if (dmabuf->count + cnt > dmabuf->dmasize - redzone)
1406 cnt = (dmabuf->dmasize - redzone) - dmabuf->count;
1407 spin_unlock_irqrestore(&unit->reg_lock, flags);
1408
1409 if (cnt > count)
1410 cnt = count;
1411 if (cnt <= 0) {
1412 YMFDBGW("ymf_write: full, count %d swptr %d\n",
1413 dmabuf->count, dmabuf->swptr);
1414 /*
1415 * buffer is full, start the dma machine and
1416 * wait for data to be played
1417 */
1418 spin_lock_irqsave(&unit->reg_lock, flags);
1419 if (!state->wpcm.running) {
1420 ymf_playback_trigger(unit, &state->wpcm, 1);
1421 }
1422 spin_unlock_irqrestore(&unit->reg_lock, flags);
1423 if (file->f_flags & O_NONBLOCK) {
1424 if (!ret) ret = -EAGAIN;
1425 break;
1426 }
1427 schedule();
1428 set_current_state(TASK_INTERRUPTIBLE);
1429 if (signal_pending(current)) {
1430 if (!ret) ret = -ERESTARTSYS;
1431 break;
1432 }
1433 continue;
1434 }
1435 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, cnt)) {
1436 if (!ret) ret = -EFAULT;
1437 break;
1438 }
1439
1440 if ((swptr += cnt) >= dmabuf->dmasize) {
1441 swptr -= dmabuf->dmasize;
1442 }
1443
1444 spin_lock_irqsave(&unit->reg_lock, flags);
1445 if (unit->suspended) {
1446 spin_unlock_irqrestore(&unit->reg_lock, flags);
1447 continue;
1448 }
1449 dmabuf->swptr = swptr;
1450 dmabuf->count += cnt;
1451
1452 /*
1453 * Start here is a bad idea - may cause startup click
1454 * in /bin/play when dmabuf is not full yet.
1455 * However, some broken applications do not make
1456 * any use of SNDCTL_DSP_SYNC (Doom is the worst).
1457 * One frame is about 5.3ms, Doom write size is 46ms.
1458 */
1459 delay = state->format.rate / 20; /* 50ms */
1460 delay <<= state->format.shift;
1461 if (dmabuf->count >= delay && !state->wpcm.running) {
1462 ymf_playback_trigger(unit, &state->wpcm, 1);
1463 }
1464
1465 spin_unlock_irqrestore(&unit->reg_lock, flags);
1466
1467 count -= cnt;
1468 buffer += cnt;
1469 ret += cnt;
1470 }
1471
1472 set_current_state(TASK_RUNNING);
1473 remove_wait_queue(&dmabuf->wait, &waita);
1474
1475 YMFDBGW("ymf_write: ret %d dmabuf.count %d\n", ret, dmabuf->count);
1476 return ret;
1477}
1478
1479static unsigned int ymf_poll(struct file *file, struct poll_table_struct *wait)
1480{
1481 struct ymf_state *state = (struct ymf_state *)file->private_data;
1482 struct ymf_dmabuf *dmabuf;
1483 int redzone;
1484 unsigned long flags;
1485 unsigned int mask = 0;
1486
1487 if (file->f_mode & FMODE_WRITE)
1488 poll_wait(file, &state->wpcm.dmabuf.wait, wait);
1489 if (file->f_mode & FMODE_READ)
1490 poll_wait(file, &state->rpcm.dmabuf.wait, wait);
1491
1492 spin_lock_irqsave(&state->unit->reg_lock, flags);
1493 if (file->f_mode & FMODE_READ) {
1494 dmabuf = &state->rpcm.dmabuf;
1495 if (dmabuf->count >= (signed)dmabuf->fragsize)
1496 mask |= POLLIN | POLLRDNORM;
1497 }
1498 if (file->f_mode & FMODE_WRITE) {
1499 redzone = ymf_calc_lend(state->format.rate);
1500 redzone <<= state->format.shift;
1501 redzone *= 3;
1502
1503 dmabuf = &state->wpcm.dmabuf;
1504 if (dmabuf->mapped) {
1505 if (dmabuf->count >= (signed)dmabuf->fragsize)
1506 mask |= POLLOUT | POLLWRNORM;
1507 } else {
1508 /*
1509 * Don't select unless a full fragment is available.
1510 * Otherwise artsd does GETOSPACE, sees 0, and loops.
1511 */
1512 if (dmabuf->count + redzone + dmabuf->fragsize
1513 <= dmabuf->dmasize)
1514 mask |= POLLOUT | POLLWRNORM;
1515 }
1516 }
1517 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1518
1519 return mask;
1520}
1521
1522static int ymf_mmap(struct file *file, struct vm_area_struct *vma)
1523{
1524 struct ymf_state *state = (struct ymf_state *)file->private_data;
1525 struct ymf_dmabuf *dmabuf = &state->wpcm.dmabuf;
1526 int ret;
1527 unsigned long size;
1528
1529 if (vma->vm_flags & VM_WRITE) {
1530 if ((ret = prog_dmabuf(state, 0)) != 0)
1531 return ret;
1532 } else if (vma->vm_flags & VM_READ) {
1533 if ((ret = prog_dmabuf(state, 1)) != 0)
1534 return ret;
1535 } else
1536 return -EINVAL;
1537
1538 if (vma->vm_pgoff != 0)
1539 return -EINVAL;
1540 size = vma->vm_end - vma->vm_start;
1541 if (size > (PAGE_SIZE << dmabuf->buforder))
1542 return -EINVAL;
1543 if (remap_pfn_range(vma, vma->vm_start,
1544 virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
1545 size, vma->vm_page_prot))
1546 return -EAGAIN;
1547 dmabuf->mapped = 1;
1548
1549/* P3 */ printk(KERN_INFO "ymfpci: using memory mapped sound, untested!\n");
1550 return 0;
1551}
1552
1553static int ymf_ioctl(struct inode *inode, struct file *file,
1554 unsigned int cmd, unsigned long arg)
1555{
1556 struct ymf_state *state = (struct ymf_state *)file->private_data;
1557 struct ymf_dmabuf *dmabuf;
1558 unsigned long flags;
1559 audio_buf_info abinfo;
1560 count_info cinfo;
1561 int redzone;
1562 int val;
1563 void __user *argp = (void __user *)arg;
1564 int __user *p = argp;
1565
1566 switch (cmd) {
1567 case OSS_GETVERSION:
1568 YMFDBGX("ymf_ioctl: cmd 0x%x(GETVER) arg 0x%lx\n", cmd, arg);
1569 return put_user(SOUND_VERSION, p);
1570
1571 case SNDCTL_DSP_RESET:
1572 YMFDBGX("ymf_ioctl: cmd 0x%x(RESET)\n", cmd);
1573 if (file->f_mode & FMODE_WRITE) {
1574 ymf_wait_dac(state);
1575 dmabuf = &state->wpcm.dmabuf;
1576 spin_lock_irqsave(&state->unit->reg_lock, flags);
1577 dmabuf->ready = 0;
1578 dmabuf->swptr = dmabuf->hwptr;
1579 dmabuf->count = dmabuf->total_bytes = 0;
1580 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1581 }
1582 if (file->f_mode & FMODE_READ) {
1583 ymf_stop_adc(state);
1584 dmabuf = &state->rpcm.dmabuf;
1585 spin_lock_irqsave(&state->unit->reg_lock, flags);
1586 dmabuf->ready = 0;
1587 dmabuf->swptr = dmabuf->hwptr;
1588 dmabuf->count = dmabuf->total_bytes = 0;
1589 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1590 }
1591 return 0;
1592
1593 case SNDCTL_DSP_SYNC:
1594 YMFDBGX("ymf_ioctl: cmd 0x%x(SYNC)\n", cmd);
1595 if (file->f_mode & FMODE_WRITE) {
1596 dmabuf = &state->wpcm.dmabuf;
1597 if (file->f_flags & O_NONBLOCK) {
1598 spin_lock_irqsave(&state->unit->reg_lock, flags);
1599 if (dmabuf->count != 0 && !state->wpcm.running) {
1600 ymf_start_dac(state);
1601 }
1602 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1603 } else {
1604 ymf_wait_dac(state);
1605 }
1606 }
1607 /* XXX What does this do for reading? dmabuf->count=0; ? */
1608 return 0;
1609
1610 case SNDCTL_DSP_SPEED: /* set smaple rate */
1611 if (get_user(val, p))
1612 return -EFAULT;
1613 YMFDBGX("ymf_ioctl: cmd 0x%x(SPEED) sp %d\n", cmd, val);
1614 if (val >= 8000 && val <= 48000) {
1615 if (file->f_mode & FMODE_WRITE) {
1616 ymf_wait_dac(state);
1617 dmabuf = &state->wpcm.dmabuf;
1618 spin_lock_irqsave(&state->unit->reg_lock, flags);
1619 dmabuf->ready = 0;
1620 state->format.rate = val;
1621 ymf_pcm_update_shift(&state->format);
1622 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1623 }
1624 if (file->f_mode & FMODE_READ) {
1625 ymf_stop_adc(state);
1626 dmabuf = &state->rpcm.dmabuf;
1627 spin_lock_irqsave(&state->unit->reg_lock, flags);
1628 dmabuf->ready = 0;
1629 state->format.rate = val;
1630 ymf_pcm_update_shift(&state->format);
1631 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1632 }
1633 }
1634 return put_user(state->format.rate, p);
1635
1636 /*
1637 * OSS manual does not mention SNDCTL_DSP_STEREO at all.
1638 * All channels are mono and if you want stereo, you
1639 * play into two channels with SNDCTL_DSP_CHANNELS.
1640 * However, mpg123 calls it. I wonder, why Michael Hipp used it.
1641 */
1642 case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
1643 if (get_user(val, p))
1644 return -EFAULT;
1645 YMFDBGX("ymf_ioctl: cmd 0x%x(STEREO) st %d\n", cmd, val);
1646 if (file->f_mode & FMODE_WRITE) {
1647 ymf_wait_dac(state);
1648 dmabuf = &state->wpcm.dmabuf;
1649 spin_lock_irqsave(&state->unit->reg_lock, flags);
1650 dmabuf->ready = 0;
1651 state->format.voices = val ? 2 : 1;
1652 ymf_pcm_update_shift(&state->format);
1653 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1654 }
1655 if (file->f_mode & FMODE_READ) {
1656 ymf_stop_adc(state);
1657 dmabuf = &state->rpcm.dmabuf;
1658 spin_lock_irqsave(&state->unit->reg_lock, flags);
1659 dmabuf->ready = 0;
1660 state->format.voices = val ? 2 : 1;
1661 ymf_pcm_update_shift(&state->format);
1662 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1663 }
1664 return 0;
1665
1666 case SNDCTL_DSP_GETBLKSIZE:
1667 YMFDBGX("ymf_ioctl: cmd 0x%x(GETBLK)\n", cmd);
1668 if (file->f_mode & FMODE_WRITE) {
1669 if ((val = prog_dmabuf(state, 0)))
1670 return val;
1671 val = state->wpcm.dmabuf.fragsize;
1672 YMFDBGX("ymf_ioctl: GETBLK w %d\n", val);
1673 return put_user(val, p);
1674 }
1675 if (file->f_mode & FMODE_READ) {
1676 if ((val = prog_dmabuf(state, 1)))
1677 return val;
1678 val = state->rpcm.dmabuf.fragsize;
1679 YMFDBGX("ymf_ioctl: GETBLK r %d\n", val);
1680 return put_user(val, p);
1681 }
1682 return -EINVAL;
1683
1684 case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format*/
1685 YMFDBGX("ymf_ioctl: cmd 0x%x(GETFMTS)\n", cmd);
1686 return put_user(AFMT_S16_LE|AFMT_U8, p);
1687
1688 case SNDCTL_DSP_SETFMT: /* Select sample format */
1689 if (get_user(val, p))
1690 return -EFAULT;
1691 YMFDBGX("ymf_ioctl: cmd 0x%x(SETFMT) fmt %d\n", cmd, val);
1692 if (val == AFMT_S16_LE || val == AFMT_U8) {
1693 if (file->f_mode & FMODE_WRITE) {
1694 ymf_wait_dac(state);
1695 dmabuf = &state->wpcm.dmabuf;
1696 spin_lock_irqsave(&state->unit->reg_lock, flags);
1697 dmabuf->ready = 0;
1698 state->format.format = val;
1699 ymf_pcm_update_shift(&state->format);
1700 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1701 }
1702 if (file->f_mode & FMODE_READ) {
1703 ymf_stop_adc(state);
1704 dmabuf = &state->rpcm.dmabuf;
1705 spin_lock_irqsave(&state->unit->reg_lock, flags);
1706 dmabuf->ready = 0;
1707 state->format.format = val;
1708 ymf_pcm_update_shift(&state->format);
1709 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1710 }
1711 }
1712 return put_user(state->format.format, p);
1713
1714 case SNDCTL_DSP_CHANNELS:
1715 if (get_user(val, p))
1716 return -EFAULT;
1717 YMFDBGX("ymf_ioctl: cmd 0x%x(CHAN) ch %d\n", cmd, val);
1718 if (val != 0) {
1719 if (file->f_mode & FMODE_WRITE) {
1720 ymf_wait_dac(state);
1721 if (val == 1 || val == 2) {
1722 spin_lock_irqsave(&state->unit->reg_lock, flags);
1723 dmabuf = &state->wpcm.dmabuf;
1724 dmabuf->ready = 0;
1725 state->format.voices = val;
1726 ymf_pcm_update_shift(&state->format);
1727 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1728 }
1729 }
1730 if (file->f_mode & FMODE_READ) {
1731 ymf_stop_adc(state);
1732 if (val == 1 || val == 2) {
1733 spin_lock_irqsave(&state->unit->reg_lock, flags);
1734 dmabuf = &state->rpcm.dmabuf;
1735 dmabuf->ready = 0;
1736 state->format.voices = val;
1737 ymf_pcm_update_shift(&state->format);
1738 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1739 }
1740 }
1741 }
1742 return put_user(state->format.voices, p);
1743
1744 case SNDCTL_DSP_POST:
1745 YMFDBGX("ymf_ioctl: cmd 0x%x(POST)\n", cmd);
1746 /*
1747 * Quoting OSS PG:
1748 * The ioctl SNDCTL_DSP_POST is a lightweight version of
1749 * SNDCTL_DSP_SYNC. It just tells to the driver that there
1750 * is likely to be a pause in the output. This makes it
1751 * possible for the device to handle the pause more
1752 * intelligently. This ioctl doesn't block the application.
1753 *
1754 * The paragraph above is a clumsy way to say "flush ioctl".
1755 * This ioctl is used by mpg123.
1756 */
1757 spin_lock_irqsave(&state->unit->reg_lock, flags);
1758 if (state->wpcm.dmabuf.count != 0 && !state->wpcm.running) {
1759 ymf_start_dac(state);
1760 }
1761 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1762 return 0;
1763
1764 case SNDCTL_DSP_SETFRAGMENT:
1765 if (get_user(val, p))
1766 return -EFAULT;
1767 YMFDBGX("ymf_ioctl: cmd 0x%x(SETFRAG) fr 0x%04x:%04x(%d:%d)\n",
1768 cmd,
1769 (val >> 16) & 0xFFFF, val & 0xFFFF,
1770 (val >> 16) & 0xFFFF, val & 0xFFFF);
1771 dmabuf = &state->wpcm.dmabuf;
1772 dmabuf->ossfragshift = val & 0xffff;
1773 dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
1774 if (dmabuf->ossfragshift < 4)
1775 dmabuf->ossfragshift = 4;
1776 if (dmabuf->ossfragshift > 15)
1777 dmabuf->ossfragshift = 15;
1778 return 0;
1779
1780 case SNDCTL_DSP_GETOSPACE:
1781 YMFDBGX("ymf_ioctl: cmd 0x%x(GETOSPACE)\n", cmd);
1782 if (!(file->f_mode & FMODE_WRITE))
1783 return -EINVAL;
1784 dmabuf = &state->wpcm.dmabuf;
1785 if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
1786 return val;
1787 redzone = ymf_calc_lend(state->format.rate);
1788 redzone <<= state->format.shift;
1789 redzone *= 3;
1790 spin_lock_irqsave(&state->unit->reg_lock, flags);
1791 abinfo.fragsize = dmabuf->fragsize;
1792 abinfo.bytes = dmabuf->dmasize - dmabuf->count - redzone;
1793 abinfo.fragstotal = dmabuf->numfrag;
1794 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
1795 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1796 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1797
1798 case SNDCTL_DSP_GETISPACE:
1799 YMFDBGX("ymf_ioctl: cmd 0x%x(GETISPACE)\n", cmd);
1800 if (!(file->f_mode & FMODE_READ))
1801 return -EINVAL;
1802 dmabuf = &state->rpcm.dmabuf;
1803 if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
1804 return val;
1805 spin_lock_irqsave(&state->unit->reg_lock, flags);
1806 abinfo.fragsize = dmabuf->fragsize;
1807 abinfo.bytes = dmabuf->count;
1808 abinfo.fragstotal = dmabuf->numfrag;
1809 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
1810 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1811 return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
1812
1813 case SNDCTL_DSP_NONBLOCK:
1814 YMFDBGX("ymf_ioctl: cmd 0x%x(NONBLOCK)\n", cmd);
1815 file->f_flags |= O_NONBLOCK;
1816 return 0;
1817
1818 case SNDCTL_DSP_GETCAPS:
1819 YMFDBGX("ymf_ioctl: cmd 0x%x(GETCAPS)\n", cmd);
1820 /* return put_user(DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP,
1821 p); */
1822 return put_user(0, p);
1823
1824 case SNDCTL_DSP_GETIPTR:
1825 YMFDBGX("ymf_ioctl: cmd 0x%x(GETIPTR)\n", cmd);
1826 if (!(file->f_mode & FMODE_READ))
1827 return -EINVAL;
1828 dmabuf = &state->rpcm.dmabuf;
1829 spin_lock_irqsave(&state->unit->reg_lock, flags);
1830 cinfo.bytes = dmabuf->total_bytes;
1831 cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
1832 cinfo.ptr = dmabuf->hwptr;
1833 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1834 YMFDBGX("ymf_ioctl: GETIPTR ptr %d bytes %d\n",
1835 cinfo.ptr, cinfo.bytes);
1836 return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
1837
1838 case SNDCTL_DSP_GETOPTR:
1839 YMFDBGX("ymf_ioctl: cmd 0x%x(GETOPTR)\n", cmd);
1840 if (!(file->f_mode & FMODE_WRITE))
1841 return -EINVAL;
1842 dmabuf = &state->wpcm.dmabuf;
1843 spin_lock_irqsave(&state->unit->reg_lock, flags);
1844 cinfo.bytes = dmabuf->total_bytes;
1845 cinfo.blocks = dmabuf->count >> dmabuf->fragshift;
1846 cinfo.ptr = dmabuf->hwptr;
1847 spin_unlock_irqrestore(&state->unit->reg_lock, flags);
1848 YMFDBGX("ymf_ioctl: GETOPTR ptr %d bytes %d\n",
1849 cinfo.ptr, cinfo.bytes);
1850 return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
1851
1852 case SNDCTL_DSP_SETDUPLEX:
1853 YMFDBGX("ymf_ioctl: cmd 0x%x(SETDUPLEX)\n", cmd);
1854 return 0; /* Always duplex */
1855
1856 case SOUND_PCM_READ_RATE:
1857 YMFDBGX("ymf_ioctl: cmd 0x%x(READ_RATE)\n", cmd);
1858 return put_user(state->format.rate, p);
1859
1860 case SOUND_PCM_READ_CHANNELS:
1861 YMFDBGX("ymf_ioctl: cmd 0x%x(READ_CH)\n", cmd);
1862 return put_user(state->format.voices, p);
1863
1864 case SOUND_PCM_READ_BITS:
1865 YMFDBGX("ymf_ioctl: cmd 0x%x(READ_BITS)\n", cmd);
1866 return put_user(AFMT_S16_LE, p);
1867
1868 case SNDCTL_DSP_MAPINBUF:
1869 case SNDCTL_DSP_MAPOUTBUF:
1870 case SNDCTL_DSP_SETSYNCRO:
1871 case SOUND_PCM_WRITE_FILTER:
1872 case SOUND_PCM_READ_FILTER:
1873 YMFDBGX("ymf_ioctl: cmd 0x%x unsupported\n", cmd);
1874 return -ENOTTY;
1875
1876 default:
1877 /*
1878 * Some programs mix up audio devices and ioctls
1879 * or perhaps they expect "universal" ioctls,
1880 * for instance we get SNDCTL_TMR_CONTINUE here.
1881 * (mpg123 -g 100 ends here too - to be fixed.)
1882 */
1883 YMFDBGX("ymf_ioctl: cmd 0x%x unknown\n", cmd);
1884 break;
1885 }
1886 return -ENOTTY;
1887}
1888
1889/*
1890 * open(2)
1891 * We use upper part of the minor to distinguish between soundcards.
1892 * Channels are opened with a clone open.
1893 */
1894static int ymf_open(struct inode *inode, struct file *file)
1895{
1896 struct list_head *list;
1897 ymfpci_t *unit = NULL;
1898 int minor;
1899 struct ymf_state *state;
1900 int err;
1901
1902 minor = iminor(inode);
1903 if ((minor & 0x0F) == 3) { /* /dev/dspN */
1904 ;
1905 } else {
1906 return -ENXIO;
1907 }
1908
1909 unit = NULL; /* gcc warns */
1910 spin_lock(&ymf_devs_lock);
1911 list_for_each(list, &ymf_devs) {
1912 unit = list_entry(list, ymfpci_t, ymf_devs);
1913 if (((unit->dev_audio ^ minor) & ~0x0F) == 0)
1914 break;
1915 }
1916 spin_unlock(&ymf_devs_lock);
1917 if (unit == NULL)
1918 return -ENODEV;
1919
1920 mutex_lock(&unit->open_mutex);
1921
1922 if ((state = ymf_state_alloc(unit)) == NULL) {
1923 mutex_unlock(&unit->open_mutex);
1924 return -ENOMEM;
1925 }
1926 list_add_tail(&state->chain, &unit->states);
1927
1928 file->private_data = state;
1929
1930 /*
1931 * ymf_read and ymf_write that we borrowed from cs46xx
1932 * allocate buffers with prog_dmabuf(). We call prog_dmabuf
1933 * here so that in case of DMA memory exhaustion open
1934 * fails rather than write.
1935 *
1936 * XXX prog_dmabuf allocates voice. Should allocate explicitly, above.
1937 */
1938 if (file->f_mode & FMODE_WRITE) {
1939 if (!state->wpcm.dmabuf.ready) {
1940 if ((err = prog_dmabuf(state, 0)) != 0) {
1941 goto out_nodma;
1942 }
1943 }
1944 }
1945 if (file->f_mode & FMODE_READ) {
1946 if (!state->rpcm.dmabuf.ready) {
1947 if ((err = prog_dmabuf(state, 1)) != 0) {
1948 goto out_nodma;
1949 }
1950 }
1951 }
1952
1953#if 0 /* test if interrupts work */
1954 ymfpci_writew(unit, YDSXGR_TIMERCOUNT, 0xfffe); /* ~ 680ms */
1955 ymfpci_writeb(unit, YDSXGR_TIMERCTRL,
1956 (YDSXGR_TIMERCTRL_TEN|YDSXGR_TIMERCTRL_TIEN));
1957#endif
1958 mutex_unlock(&unit->open_mutex);
1959
1960 return nonseekable_open(inode, file);
1961
1962out_nodma:
1963 /*
1964 * XXX Broken custom: "goto out_xxx" in other place is
1965 * a nestable exception, but here it is not nestable due to semaphore.
1966 * XXX Doubtful technique of self-describing objects....
1967 */
1968 dealloc_dmabuf(unit, &state->wpcm.dmabuf);
1969 dealloc_dmabuf(unit, &state->rpcm.dmabuf);
1970 ymf_pcm_free_substream(&state->wpcm);
1971 ymf_pcm_free_substream(&state->rpcm);
1972
1973 list_del(&state->chain);
1974 kfree(state);
1975
1976 mutex_unlock(&unit->open_mutex);
1977 return err;
1978}
1979
1980static int ymf_release(struct inode *inode, struct file *file)
1981{
1982 struct ymf_state *state = (struct ymf_state *)file->private_data;
1983 ymfpci_t *unit = state->unit;
1984
1985#if 0 /* test if interrupts work */
1986 ymfpci_writeb(unit, YDSXGR_TIMERCTRL, 0);
1987#endif
1988
1989 mutex_lock(&unit->open_mutex);
1990
1991 /*
1992 * XXX Solve the case of O_NONBLOCK close - don't deallocate here.
1993 * Deallocate when unloading the driver and we can wait.
1994 */
1995 ymf_wait_dac(state);
1996 ymf_stop_adc(state); /* fortunately, it's immediate */
1997 dealloc_dmabuf(unit, &state->wpcm.dmabuf);
1998 dealloc_dmabuf(unit, &state->rpcm.dmabuf);
1999 ymf_pcm_free_substream(&state->wpcm);
2000 ymf_pcm_free_substream(&state->rpcm);
2001
2002 list_del(&state->chain);
2003 file->private_data = NULL; /* Can you tell I programmed Solaris */
2004 kfree(state);
2005
2006 mutex_unlock(&unit->open_mutex);
2007
2008 return 0;
2009}
2010
2011/*
2012 * Mixer operations are based on cs46xx.
2013 */
2014static int ymf_open_mixdev(struct inode *inode, struct file *file)
2015{
2016 int minor = iminor(inode);
2017 struct list_head *list;
2018 ymfpci_t *unit;
2019 int i;
2020
2021 spin_lock(&ymf_devs_lock);
2022 list_for_each(list, &ymf_devs) {
2023 unit = list_entry(list, ymfpci_t, ymf_devs);
2024 for (i = 0; i < NR_AC97; i++) {
2025 if (unit->ac97_codec[i] != NULL &&
2026 unit->ac97_codec[i]->dev_mixer == minor) {
2027 spin_unlock(&ymf_devs_lock);
2028 goto match;
2029 }
2030 }
2031 }
2032 spin_unlock(&ymf_devs_lock);
2033 return -ENODEV;
2034
2035 match:
2036 file->private_data = unit->ac97_codec[i];
2037
2038 return nonseekable_open(inode, file);
2039}
2040
2041static int ymf_ioctl_mixdev(struct inode *inode, struct file *file,
2042 unsigned int cmd, unsigned long arg)
2043{
2044 struct ac97_codec *codec = (struct ac97_codec *)file->private_data;
2045
2046 return codec->mixer_ioctl(codec, cmd, arg);
2047}
2048
2049static int ymf_release_mixdev(struct inode *inode, struct file *file)
2050{
2051 return 0;
2052}
2053
2054static /*const*/ struct file_operations ymf_fops = {
2055 .owner = THIS_MODULE,
2056 .llseek = no_llseek,
2057 .read = ymf_read,
2058 .write = ymf_write,
2059 .poll = ymf_poll,
2060 .ioctl = ymf_ioctl,
2061 .mmap = ymf_mmap,
2062 .open = ymf_open,
2063 .release = ymf_release,
2064};
2065
2066static /*const*/ struct file_operations ymf_mixer_fops = {
2067 .owner = THIS_MODULE,
2068 .llseek = no_llseek,
2069 .ioctl = ymf_ioctl_mixdev,
2070 .open = ymf_open_mixdev,
2071 .release = ymf_release_mixdev,
2072};
2073
2074/*
2075 */
2076
2077static int ymf_suspend(struct pci_dev *pcidev, pm_message_t unused)
2078{
2079 struct ymf_unit *unit = pci_get_drvdata(pcidev);
2080 unsigned long flags;
2081 struct ymf_dmabuf *dmabuf;
2082 struct list_head *p;
2083 struct ymf_state *state;
2084 struct ac97_codec *codec;
2085 int i;
2086
2087 spin_lock_irqsave(&unit->reg_lock, flags);
2088
2089 unit->suspended = 1;
2090
2091 for (i = 0; i < NR_AC97; i++) {
2092 if ((codec = unit->ac97_codec[i]) != NULL)
2093 ac97_save_state(codec);
2094 }
2095
2096 list_for_each(p, &unit->states) {
2097 state = list_entry(p, struct ymf_state, chain);
2098
2099 dmabuf = &state->wpcm.dmabuf;
2100 dmabuf->hwptr = dmabuf->swptr = 0;
2101 dmabuf->total_bytes = 0;
2102 dmabuf->count = 0;
2103
2104 dmabuf = &state->rpcm.dmabuf;
2105 dmabuf->hwptr = dmabuf->swptr = 0;
2106 dmabuf->total_bytes = 0;
2107 dmabuf->count = 0;
2108 }
2109
2110 ymfpci_writel(unit, YDSXGR_NATIVEDACOUTVOL, 0);
2111 ymfpci_disable_dsp(unit);
2112
2113 spin_unlock_irqrestore(&unit->reg_lock, flags);
2114
2115 return 0;
2116}
2117
2118static int ymf_resume(struct pci_dev *pcidev)
2119{
2120 struct ymf_unit *unit = pci_get_drvdata(pcidev);
2121 unsigned long flags;
2122 struct list_head *p;
2123 struct ymf_state *state;
2124 struct ac97_codec *codec;
2125 int i;
2126
2127 ymfpci_aclink_reset(unit->pci);
2128 ymfpci_codec_ready(unit, 0, 1); /* prints diag if not ready. */
2129
2130#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2131 /* XXX At this time the legacy registers are probably deprogrammed. */
2132#endif
2133
2134 ymfpci_download_image(unit);
2135
2136 ymf_memload(unit);
2137
2138 spin_lock_irqsave(&unit->reg_lock, flags);
2139
2140 if (unit->start_count) {
2141 ymfpci_writel(unit, YDSXGR_MODE, 3);
2142 unit->active_bank = ymfpci_readl(unit, YDSXGR_CTRLSELECT) & 1;
2143 }
2144
2145 for (i = 0; i < NR_AC97; i++) {
2146 if ((codec = unit->ac97_codec[i]) != NULL)
2147 ac97_restore_state(codec);
2148 }
2149
2150 unit->suspended = 0;
2151 list_for_each(p, &unit->states) {
2152 state = list_entry(p, struct ymf_state, chain);
2153 wake_up(&state->wpcm.dmabuf.wait);
2154 wake_up(&state->rpcm.dmabuf.wait);
2155 }
2156
2157 spin_unlock_irqrestore(&unit->reg_lock, flags);
2158 return 0;
2159}
2160
2161/*
2162 * initialization routines
2163 */
2164
2165#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2166
2167static int ymfpci_setup_legacy(ymfpci_t *unit, struct pci_dev *pcidev)
2168{
2169 int v;
2170 int mpuio = -1, oplio = -1;
2171
2172 switch (unit->iomidi) {
2173 case 0x330:
2174 mpuio = 0;
2175 break;
2176 case 0x300:
2177 mpuio = 1;
2178 break;
2179 case 0x332:
2180 mpuio = 2;
2181 break;
2182 case 0x334:
2183 mpuio = 3;
2184 break;
2185 default: ;
2186 }
2187
2188 switch (unit->iosynth) {
2189 case 0x388:
2190 oplio = 0;
2191 break;
2192 case 0x398:
2193 oplio = 1;
2194 break;
2195 case 0x3a0:
2196 oplio = 2;
2197 break;
2198 case 0x3a8:
2199 oplio = 3;
2200 break;
2201 default: ;
2202 }
2203
2204 if (mpuio >= 0 || oplio >= 0) {
2205 /* 0x0020: 1 - 10 bits of I/O address decoded, 0 - 16 bits. */
2206 v = 0x001e;
2207 pci_write_config_word(pcidev, PCIR_LEGCTRL, v);
2208
2209 switch (pcidev->device) {
2210 case PCI_DEVICE_ID_YAMAHA_724:
2211 case PCI_DEVICE_ID_YAMAHA_740:
2212 case PCI_DEVICE_ID_YAMAHA_724F:
2213 case PCI_DEVICE_ID_YAMAHA_740C:
2214 v = 0x8800;
2215 if (mpuio >= 0) { v |= mpuio<<4; }
2216 if (oplio >= 0) { v |= oplio; }
2217 pci_write_config_word(pcidev, PCIR_ELEGCTRL, v);
2218 break;
2219
2220 case PCI_DEVICE_ID_YAMAHA_744:
2221 case PCI_DEVICE_ID_YAMAHA_754:
2222 v = 0x8800;
2223 pci_write_config_word(pcidev, PCIR_ELEGCTRL, v);
2224 if (oplio >= 0) {
2225 pci_write_config_word(pcidev, PCIR_OPLADR, unit->iosynth);
2226 }
2227 if (mpuio >= 0) {
2228 pci_write_config_word(pcidev, PCIR_MPUADR, unit->iomidi);
2229 }
2230 break;
2231
2232 default:
2233 printk(KERN_ERR "ymfpci: Unknown device ID: 0x%x\n",
2234 pcidev->device);
2235 return -EINVAL;
2236 }
2237 }
2238
2239 return 0;
2240}
2241#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
2242
2243static void ymfpci_aclink_reset(struct pci_dev * pci)
2244{
2245 u8 cmd;
2246
2247 /*
2248 * In the 744, 754 only 0x01 exists, 0x02 is undefined.
2249 * It does not seem to hurt to trip both regardless of revision.
2250 */
2251 pci_read_config_byte(pci, PCIR_DSXGCTRL, &cmd);
2252 pci_write_config_byte(pci, PCIR_DSXGCTRL, cmd & 0xfc);
2253 pci_write_config_byte(pci, PCIR_DSXGCTRL, cmd | 0x03);
2254 pci_write_config_byte(pci, PCIR_DSXGCTRL, cmd & 0xfc);
2255
2256 pci_write_config_word(pci, PCIR_DSXPWRCTRL1, 0);
2257 pci_write_config_word(pci, PCIR_DSXPWRCTRL2, 0);
2258}
2259
2260static void ymfpci_enable_dsp(ymfpci_t *codec)
2261{
2262 ymfpci_writel(codec, YDSXGR_CONFIG, 0x00000001);
2263}
2264
2265static void ymfpci_disable_dsp(ymfpci_t *codec)
2266{
2267 u32 val;
2268 int timeout = 1000;
2269
2270 val = ymfpci_readl(codec, YDSXGR_CONFIG);
2271 if (val)
2272 ymfpci_writel(codec, YDSXGR_CONFIG, 0x00000000);
2273 while (timeout-- > 0) {
2274 val = ymfpci_readl(codec, YDSXGR_STATUS);
2275 if ((val & 0x00000002) == 0)
2276 break;
2277 }
2278}
2279
2280#include "ymfpci_image.h"
2281
2282static void ymfpci_download_image(ymfpci_t *codec)
2283{
2284 int i, ver_1e;
2285 u16 ctrl;
2286
2287 ymfpci_writel(codec, YDSXGR_NATIVEDACOUTVOL, 0x00000000);
2288 ymfpci_disable_dsp(codec);
2289 ymfpci_writel(codec, YDSXGR_MODE, 0x00010000);
2290 ymfpci_writel(codec, YDSXGR_MODE, 0x00000000);
2291 ymfpci_writel(codec, YDSXGR_MAPOFREC, 0x00000000);
2292 ymfpci_writel(codec, YDSXGR_MAPOFEFFECT, 0x00000000);
2293 ymfpci_writel(codec, YDSXGR_PLAYCTRLBASE, 0x00000000);
2294 ymfpci_writel(codec, YDSXGR_RECCTRLBASE, 0x00000000);
2295 ymfpci_writel(codec, YDSXGR_EFFCTRLBASE, 0x00000000);
2296 ctrl = ymfpci_readw(codec, YDSXGR_GLOBALCTRL);
2297 ymfpci_writew(codec, YDSXGR_GLOBALCTRL, ctrl & ~0x0007);
2298
2299 /* setup DSP instruction code */
2300 for (i = 0; i < YDSXG_DSPLENGTH / 4; i++)
2301 ymfpci_writel(codec, YDSXGR_DSPINSTRAM + (i << 2), DspInst[i]);
2302
2303 switch (codec->pci->device) {
2304 case PCI_DEVICE_ID_YAMAHA_724F:
2305 case PCI_DEVICE_ID_YAMAHA_740C:
2306 case PCI_DEVICE_ID_YAMAHA_744:
2307 case PCI_DEVICE_ID_YAMAHA_754:
2308 ver_1e = 1;
2309 break;
2310 default:
2311 ver_1e = 0;
2312 }
2313
2314 if (ver_1e) {
2315 /* setup control instruction code */
2316 for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++)
2317 ymfpci_writel(codec, YDSXGR_CTRLINSTRAM + (i << 2), CntrlInst1E[i]);
2318 } else {
2319 for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++)
2320 ymfpci_writel(codec, YDSXGR_CTRLINSTRAM + (i << 2), CntrlInst[i]);
2321 }
2322
2323 ymfpci_enable_dsp(codec);
2324
2325 /* 0.02s sounds not too bad, we may do schedule_timeout() later. */
2326 mdelay(20); /* seems we need some delay after downloading image.. */
2327}
2328
2329static int ymfpci_memalloc(ymfpci_t *codec)
2330{
2331 unsigned int playback_ctrl_size;
2332 unsigned int bank_size_playback;
2333 unsigned int bank_size_capture;
2334 unsigned int bank_size_effect;
2335 unsigned int size;
2336 unsigned int off;
2337 char *ptr;
2338 dma_addr_t pba;
2339 int voice, bank;
2340
2341 playback_ctrl_size = 4 + 4 * YDSXG_PLAYBACK_VOICES;
2342 bank_size_playback = ymfpci_readl(codec, YDSXGR_PLAYCTRLSIZE) << 2;
2343 bank_size_capture = ymfpci_readl(codec, YDSXGR_RECCTRLSIZE) << 2;
2344 bank_size_effect = ymfpci_readl(codec, YDSXGR_EFFCTRLSIZE) << 2;
2345 codec->work_size = YDSXG_DEFAULT_WORK_SIZE;
2346
2347 size = ((playback_ctrl_size + 0x00ff) & ~0x00ff) +
2348 ((bank_size_playback * 2 * YDSXG_PLAYBACK_VOICES + 0xff) & ~0xff) +
2349 ((bank_size_capture * 2 * YDSXG_CAPTURE_VOICES + 0xff) & ~0xff) +
2350 ((bank_size_effect * 2 * YDSXG_EFFECT_VOICES + 0xff) & ~0xff) +
2351 codec->work_size;
2352
2353 ptr = pci_alloc_consistent(codec->pci, size + 0xff, &pba);
2354 if (ptr == NULL)
2355 return -ENOMEM;
2356 codec->dma_area_va = ptr;
2357 codec->dma_area_ba = pba;
2358 codec->dma_area_size = size + 0xff;
2359
2360 off = (unsigned long)ptr & 0xff;
2361 if (off) {
2362 ptr += 0x100 - off;
2363 pba += 0x100 - off;
2364 }
2365
2366 /*
2367 * Hardware requires only ptr[playback_ctrl_size] zeroed,
2368 * but in our judgement it is a wrong kind of savings, so clear it all.
2369 */
2370 memset(ptr, 0, size);
2371
2372 codec->ctrl_playback = (u32 *)ptr;
2373 codec->ctrl_playback_ba = pba;
2374 codec->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES);
2375 ptr += (playback_ctrl_size + 0x00ff) & ~0x00ff;
2376 pba += (playback_ctrl_size + 0x00ff) & ~0x00ff;
2377
2378 off = 0;
2379 for (voice = 0; voice < YDSXG_PLAYBACK_VOICES; voice++) {
2380 codec->voices[voice].number = voice;
2381 codec->voices[voice].bank =
2382 (ymfpci_playback_bank_t *) (ptr + off);
2383 codec->voices[voice].bank_ba = pba + off;
2384 off += 2 * bank_size_playback; /* 2 banks */
2385 }
2386 off = (off + 0xff) & ~0xff;
2387 ptr += off;
2388 pba += off;
2389
2390 off = 0;
2391 codec->bank_base_capture = pba;
2392 for (voice = 0; voice < YDSXG_CAPTURE_VOICES; voice++)
2393 for (bank = 0; bank < 2; bank++) {
2394 codec->bank_capture[voice][bank] =
2395 (ymfpci_capture_bank_t *) (ptr + off);
2396 off += bank_size_capture;
2397 }
2398 off = (off + 0xff) & ~0xff;
2399 ptr += off;
2400 pba += off;
2401
2402 off = 0;
2403 codec->bank_base_effect = pba;
2404 for (voice = 0; voice < YDSXG_EFFECT_VOICES; voice++)
2405 for (bank = 0; bank < 2; bank++) {
2406 codec->bank_effect[voice][bank] =
2407 (ymfpci_effect_bank_t *) (ptr + off);
2408 off += bank_size_effect;
2409 }
2410 off = (off + 0xff) & ~0xff;
2411 ptr += off;
2412 pba += off;
2413
2414 codec->work_base = pba;
2415
2416 return 0;
2417}
2418
2419static void ymfpci_memfree(ymfpci_t *codec)
2420{
2421 ymfpci_writel(codec, YDSXGR_PLAYCTRLBASE, 0);
2422 ymfpci_writel(codec, YDSXGR_RECCTRLBASE, 0);
2423 ymfpci_writel(codec, YDSXGR_EFFCTRLBASE, 0);
2424 ymfpci_writel(codec, YDSXGR_WORKBASE, 0);
2425 ymfpci_writel(codec, YDSXGR_WORKSIZE, 0);
2426 pci_free_consistent(codec->pci,
2427 codec->dma_area_size, codec->dma_area_va, codec->dma_area_ba);
2428}
2429
2430static void ymf_memload(ymfpci_t *unit)
2431{
2432
2433 ymfpci_writel(unit, YDSXGR_PLAYCTRLBASE, unit->ctrl_playback_ba);
2434 ymfpci_writel(unit, YDSXGR_RECCTRLBASE, unit->bank_base_capture);
2435 ymfpci_writel(unit, YDSXGR_EFFCTRLBASE, unit->bank_base_effect);
2436 ymfpci_writel(unit, YDSXGR_WORKBASE, unit->work_base);
2437 ymfpci_writel(unit, YDSXGR_WORKSIZE, unit->work_size >> 2);
2438
2439 /* S/PDIF output initialization */
2440 ymfpci_writew(unit, YDSXGR_SPDIFOUTCTRL, 0);
2441 ymfpci_writew(unit, YDSXGR_SPDIFOUTSTATUS,
2442 SND_PCM_AES0_CON_EMPHASIS_NONE |
2443 (SND_PCM_AES1_CON_ORIGINAL << 8) |
2444 (SND_PCM_AES1_CON_PCM_CODER << 8));
2445
2446 /* S/PDIF input initialization */
2447 ymfpci_writew(unit, YDSXGR_SPDIFINCTRL, 0);
2448
2449 /* move this volume setup to mixer */
2450 ymfpci_writel(unit, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff);
2451 ymfpci_writel(unit, YDSXGR_BUF441OUTVOL, 0);
2452 ymfpci_writel(unit, YDSXGR_NATIVEADCINVOL, 0x3fff3fff);
2453 ymfpci_writel(unit, YDSXGR_NATIVEDACINVOL, 0x3fff3fff);
2454}
2455
2456static int ymf_ac97_init(ymfpci_t *unit, int num_ac97)
2457{
2458 struct ac97_codec *codec;
2459 u16 eid;
2460
2461 if ((codec = ac97_alloc_codec()) == NULL)
2462 return -ENOMEM;
2463
2464 /* initialize some basic codec information, other fields will be filled
2465 in ac97_probe_codec */
2466 codec->private_data = unit;
2467 codec->id = num_ac97;
2468
2469 codec->codec_read = ymfpci_codec_read;
2470 codec->codec_write = ymfpci_codec_write;
2471
2472 if (ac97_probe_codec(codec) == 0) {
2473 printk(KERN_ERR "ymfpci: ac97_probe_codec failed\n");
2474 goto out_kfree;
2475 }
2476
2477 eid = ymfpci_codec_read(codec, AC97_EXTENDED_ID);
2478 if (eid==0xFFFF) {
2479 printk(KERN_WARNING "ymfpci: no codec attached ?\n");
2480 goto out_kfree;
2481 }
2482
2483 unit->ac97_features = eid;
2484
2485 if ((codec->dev_mixer = register_sound_mixer(&ymf_mixer_fops, -1)) < 0) {
2486 printk(KERN_ERR "ymfpci: couldn't register mixer!\n");
2487 goto out_kfree;
2488 }
2489
2490 unit->ac97_codec[num_ac97] = codec;
2491
2492 return 0;
2493 out_kfree:
2494 ac97_release_codec(codec);
2495 return -ENODEV;
2496}
2497
2498#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2499# ifdef MODULE
2500static int mpu_io;
2501static int synth_io;
2502module_param(mpu_io, int, 0);
2503module_param(synth_io, int, 0);
2504# else
2505static int mpu_io = 0x330;
2506static int synth_io = 0x388;
2507# endif
2508static int assigned;
2509#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
2510
2511static int __devinit ymf_probe_one(struct pci_dev *pcidev, const struct pci_device_id *ent)
2512{
2513 u16 ctrl;
2514 unsigned long base;
2515 ymfpci_t *codec;
2516
2517 int err;
2518
2519 if ((err = pci_enable_device(pcidev)) != 0) {
2520 printk(KERN_ERR "ymfpci: pci_enable_device failed\n");
2521 return err;
2522 }
2523 base = pci_resource_start(pcidev, 0);
2524
2525 if ((codec = kmalloc(sizeof(ymfpci_t), GFP_KERNEL)) == NULL) {
2526 printk(KERN_ERR "ymfpci: no core\n");
2527 return -ENOMEM;
2528 }
2529 memset(codec, 0, sizeof(*codec));
2530
2531 spin_lock_init(&codec->reg_lock);
2532 spin_lock_init(&codec->voice_lock);
2533 spin_lock_init(&codec->ac97_lock);
2534 mutex_init(&codec->open_mutex);
2535 INIT_LIST_HEAD(&codec->states);
2536 codec->pci = pcidev;
2537
2538 pci_read_config_byte(pcidev, PCI_REVISION_ID, &codec->rev);
2539
2540 if (request_mem_region(base, 0x8000, "ymfpci") == NULL) {
2541 printk(KERN_ERR "ymfpci: unable to request mem region\n");
2542 goto out_free;
2543 }
2544
2545 if ((codec->reg_area_virt = ioremap(base, 0x8000)) == NULL) {
2546 printk(KERN_ERR "ymfpci: unable to map registers\n");
2547 goto out_release_region;
2548 }
2549
2550 pci_set_master(pcidev);
2551
2552 printk(KERN_INFO "ymfpci: %s at 0x%lx IRQ %d\n",
2553 (char *)ent->driver_data, base, pcidev->irq);
2554
2555 ymfpci_aclink_reset(pcidev);
2556 if (ymfpci_codec_ready(codec, 0, 1) < 0)
2557 goto out_unmap;
2558
2559#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2560 if (assigned == 0) {
2561 codec->iomidi = mpu_io;
2562 codec->iosynth = synth_io;
2563 if (ymfpci_setup_legacy(codec, pcidev) < 0)
2564 goto out_unmap;
2565 assigned = 1;
2566 }
2567#endif
2568
2569 ymfpci_download_image(codec);
2570
2571 if (ymfpci_memalloc(codec) < 0)
2572 goto out_disable_dsp;
2573 ymf_memload(codec);
2574
2575 if (request_irq(pcidev->irq, ymf_interrupt, IRQF_SHARED, "ymfpci", codec) != 0) {
2576 printk(KERN_ERR "ymfpci: unable to request IRQ %d\n",
2577 pcidev->irq);
2578 goto out_memfree;
2579 }
2580
2581 /* register /dev/dsp */
2582 if ((codec->dev_audio = register_sound_dsp(&ymf_fops, -1)) < 0) {
2583 printk(KERN_ERR "ymfpci: unable to register dsp\n");
2584 goto out_free_irq;
2585 }
2586
2587 /*
2588 * Poke just the primary for the moment.
2589 */
2590 if ((err = ymf_ac97_init(codec, 0)) != 0)
2591 goto out_unregister_sound_dsp;
2592
2593#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2594 codec->opl3_data.name = "ymfpci";
2595 codec->mpu_data.name = "ymfpci";
2596
2597 codec->opl3_data.io_base = codec->iosynth;
2598 codec->opl3_data.irq = -1;
2599
2600 codec->mpu_data.io_base = codec->iomidi;
2601 codec->mpu_data.irq = -1; /* May be different from our PCI IRQ. */
2602
2603 if (codec->iomidi) {
2604 if (!probe_uart401(&codec->mpu_data, THIS_MODULE)) {
2605 codec->iomidi = 0; /* XXX kludge */
2606 }
2607 }
2608#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
2609
2610 /* put it into driver list */
2611 spin_lock(&ymf_devs_lock);
2612 list_add_tail(&codec->ymf_devs, &ymf_devs);
2613 spin_unlock(&ymf_devs_lock);
2614 pci_set_drvdata(pcidev, codec);
2615
2616 return 0;
2617
2618 out_unregister_sound_dsp:
2619 unregister_sound_dsp(codec->dev_audio);
2620 out_free_irq:
2621 free_irq(pcidev->irq, codec);
2622 out_memfree:
2623 ymfpci_memfree(codec);
2624 out_disable_dsp:
2625 ymfpci_disable_dsp(codec);
2626 ctrl = ymfpci_readw(codec, YDSXGR_GLOBALCTRL);
2627 ymfpci_writew(codec, YDSXGR_GLOBALCTRL, ctrl & ~0x0007);
2628 ymfpci_writel(codec, YDSXGR_STATUS, ~0);
2629 out_unmap:
2630 iounmap(codec->reg_area_virt);
2631 out_release_region:
2632 release_mem_region(pci_resource_start(pcidev, 0), 0x8000);
2633 out_free:
2634 if (codec->ac97_codec[0])
2635 ac97_release_codec(codec->ac97_codec[0]);
2636 return -ENODEV;
2637}
2638
2639static void __devexit ymf_remove_one(struct pci_dev *pcidev)
2640{
2641 __u16 ctrl;
2642 ymfpci_t *codec = pci_get_drvdata(pcidev);
2643
2644 /* remove from list of devices */
2645 spin_lock(&ymf_devs_lock);
2646 list_del(&codec->ymf_devs);
2647 spin_unlock(&ymf_devs_lock);
2648
2649 unregister_sound_mixer(codec->ac97_codec[0]->dev_mixer);
2650 ac97_release_codec(codec->ac97_codec[0]);
2651 unregister_sound_dsp(codec->dev_audio);
2652 free_irq(pcidev->irq, codec);
2653 ymfpci_memfree(codec);
2654 ymfpci_writel(codec, YDSXGR_STATUS, ~0);
2655 ymfpci_disable_dsp(codec);
2656 ctrl = ymfpci_readw(codec, YDSXGR_GLOBALCTRL);
2657 ymfpci_writew(codec, YDSXGR_GLOBALCTRL, ctrl & ~0x0007);
2658 iounmap(codec->reg_area_virt);
2659 release_mem_region(pci_resource_start(pcidev, 0), 0x8000);
2660#ifdef CONFIG_SOUND_YMFPCI_LEGACY
2661 if (codec->iomidi) {
2662 unload_uart401(&codec->mpu_data);
2663 }
2664#endif /* CONFIG_SOUND_YMFPCI_LEGACY */
2665}
2666
2667MODULE_AUTHOR("Jaroslav Kysela");
2668MODULE_DESCRIPTION("Yamaha YMF7xx PCI Audio");
2669MODULE_LICENSE("GPL");
2670
2671static struct pci_driver ymfpci_driver = {
2672 .name = "ymfpci",
2673 .id_table = ymf_id_tbl,
2674 .probe = ymf_probe_one,
2675 .remove = __devexit_p(ymf_remove_one),
2676 .suspend = ymf_suspend,
2677 .resume = ymf_resume
2678};
2679
2680static int __init ymf_init_module(void)
2681{
2682 return pci_register_driver(&ymfpci_driver);
2683}
2684
2685static void __exit ymf_cleanup_module (void)
2686{
2687 pci_unregister_driver(&ymfpci_driver);
2688}
2689
2690module_init(ymf_init_module);
2691module_exit(ymf_cleanup_module);
diff --git a/sound/oss/ymfpci.h b/sound/oss/ymfpci.h
deleted file mode 100644
index 75a751fb9966..000000000000
--- a/sound/oss/ymfpci.h
+++ /dev/null
@@ -1,360 +0,0 @@
1#ifndef __YMFPCI_H
2#define __YMFPCI_H
3
4/*
5 * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
6 * Definitions for Yahama YMF724/740/744/754 chips
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24#include <linux/mutex.h>
25
26/*
27 * Direct registers
28 */
29
30/* #define YMFREG(codec, reg) (codec->port + YDSXGR_##reg) */
31
32#define YDSXGR_INTFLAG 0x0004
33#define YDSXGR_ACTIVITY 0x0006
34#define YDSXGR_GLOBALCTRL 0x0008
35#define YDSXGR_ZVCTRL 0x000A
36#define YDSXGR_TIMERCTRL 0x0010
37#define YDSXGR_TIMERCTRL_TEN 0x0001
38#define YDSXGR_TIMERCTRL_TIEN 0x0002
39#define YDSXGR_TIMERCOUNT 0x0012
40#define YDSXGR_SPDIFOUTCTRL 0x0018
41#define YDSXGR_SPDIFOUTSTATUS 0x001C
42#define YDSXGR_EEPROMCTRL 0x0020
43#define YDSXGR_SPDIFINCTRL 0x0034
44#define YDSXGR_SPDIFINSTATUS 0x0038
45#define YDSXGR_DSPPROGRAMDL 0x0048
46#define YDSXGR_DLCNTRL 0x004C
47#define YDSXGR_GPIOININTFLAG 0x0050
48#define YDSXGR_GPIOININTENABLE 0x0052
49#define YDSXGR_GPIOINSTATUS 0x0054
50#define YDSXGR_GPIOOUTCTRL 0x0056
51#define YDSXGR_GPIOFUNCENABLE 0x0058
52#define YDSXGR_GPIOTYPECONFIG 0x005A
53#define YDSXGR_AC97CMDDATA 0x0060
54#define YDSXGR_AC97CMDADR 0x0062
55#define YDSXGR_PRISTATUSDATA 0x0064
56#define YDSXGR_PRISTATUSADR 0x0066
57#define YDSXGR_SECSTATUSDATA 0x0068
58#define YDSXGR_SECSTATUSADR 0x006A
59#define YDSXGR_SECCONFIG 0x0070
60#define YDSXGR_LEGACYOUTVOL 0x0080
61#define YDSXGR_LEGACYOUTVOLL 0x0080
62#define YDSXGR_LEGACYOUTVOLR 0x0082
63#define YDSXGR_NATIVEDACOUTVOL 0x0084
64#define YDSXGR_NATIVEDACOUTVOLL 0x0084
65#define YDSXGR_NATIVEDACOUTVOLR 0x0086
66#define YDSXGR_SPDIFOUTVOL 0x0088
67#define YDSXGR_SPDIFOUTVOLL 0x0088
68#define YDSXGR_SPDIFOUTVOLR 0x008A
69#define YDSXGR_AC3OUTVOL 0x008C
70#define YDSXGR_AC3OUTVOLL 0x008C
71#define YDSXGR_AC3OUTVOLR 0x008E
72#define YDSXGR_PRIADCOUTVOL 0x0090
73#define YDSXGR_PRIADCOUTVOLL 0x0090
74#define YDSXGR_PRIADCOUTVOLR 0x0092
75#define YDSXGR_LEGACYLOOPVOL 0x0094
76#define YDSXGR_LEGACYLOOPVOLL 0x0094
77#define YDSXGR_LEGACYLOOPVOLR 0x0096
78#define YDSXGR_NATIVEDACLOOPVOL 0x0098
79#define YDSXGR_NATIVEDACLOOPVOLL 0x0098
80#define YDSXGR_NATIVEDACLOOPVOLR 0x009A
81#define YDSXGR_SPDIFLOOPVOL 0x009C
82#define YDSXGR_SPDIFLOOPVOLL 0x009E
83#define YDSXGR_SPDIFLOOPVOLR 0x009E
84#define YDSXGR_AC3LOOPVOL 0x00A0
85#define YDSXGR_AC3LOOPVOLL 0x00A0
86#define YDSXGR_AC3LOOPVOLR 0x00A2
87#define YDSXGR_PRIADCLOOPVOL 0x00A4
88#define YDSXGR_PRIADCLOOPVOLL 0x00A4
89#define YDSXGR_PRIADCLOOPVOLR 0x00A6
90#define YDSXGR_NATIVEADCINVOL 0x00A8
91#define YDSXGR_NATIVEADCINVOLL 0x00A8
92#define YDSXGR_NATIVEADCINVOLR 0x00AA
93#define YDSXGR_NATIVEDACINVOL 0x00AC
94#define YDSXGR_NATIVEDACINVOLL 0x00AC
95#define YDSXGR_NATIVEDACINVOLR 0x00AE
96#define YDSXGR_BUF441OUTVOL 0x00B0
97#define YDSXGR_BUF441OUTVOLL 0x00B0
98#define YDSXGR_BUF441OUTVOLR 0x00B2
99#define YDSXGR_BUF441LOOPVOL 0x00B4
100#define YDSXGR_BUF441LOOPVOLL 0x00B4
101#define YDSXGR_BUF441LOOPVOLR 0x00B6
102#define YDSXGR_SPDIFOUTVOL2 0x00B8
103#define YDSXGR_SPDIFOUTVOL2L 0x00B8
104#define YDSXGR_SPDIFOUTVOL2R 0x00BA
105#define YDSXGR_SPDIFLOOPVOL2 0x00BC
106#define YDSXGR_SPDIFLOOPVOL2L 0x00BC
107#define YDSXGR_SPDIFLOOPVOL2R 0x00BE
108#define YDSXGR_ADCSLOTSR 0x00C0
109#define YDSXGR_RECSLOTSR 0x00C4
110#define YDSXGR_ADCFORMAT 0x00C8
111#define YDSXGR_RECFORMAT 0x00CC
112#define YDSXGR_P44SLOTSR 0x00D0
113#define YDSXGR_STATUS 0x0100
114#define YDSXGR_CTRLSELECT 0x0104
115#define YDSXGR_MODE 0x0108
116#define YDSXGR_SAMPLECOUNT 0x010C
117#define YDSXGR_NUMOFSAMPLES 0x0110
118#define YDSXGR_CONFIG 0x0114
119#define YDSXGR_PLAYCTRLSIZE 0x0140
120#define YDSXGR_RECCTRLSIZE 0x0144
121#define YDSXGR_EFFCTRLSIZE 0x0148
122#define YDSXGR_WORKSIZE 0x014C
123#define YDSXGR_MAPOFREC 0x0150
124#define YDSXGR_MAPOFEFFECT 0x0154
125#define YDSXGR_PLAYCTRLBASE 0x0158
126#define YDSXGR_RECCTRLBASE 0x015C
127#define YDSXGR_EFFCTRLBASE 0x0160
128#define YDSXGR_WORKBASE 0x0164
129#define YDSXGR_DSPINSTRAM 0x1000
130#define YDSXGR_CTRLINSTRAM 0x4000
131
132#define YDSXG_AC97READCMD 0x8000
133#define YDSXG_AC97WRITECMD 0x0000
134
135#define PCIR_LEGCTRL 0x40
136#define PCIR_ELEGCTRL 0x42
137#define PCIR_DSXGCTRL 0x48
138#define PCIR_DSXPWRCTRL1 0x4a
139#define PCIR_DSXPWRCTRL2 0x4e
140#define PCIR_OPLADR 0x60
141#define PCIR_SBADR 0x62
142#define PCIR_MPUADR 0x64
143
144#define YDSXG_DSPLENGTH 0x0080
145#define YDSXG_CTRLLENGTH 0x3000
146
147#define YDSXG_DEFAULT_WORK_SIZE 0x0400
148
149#define YDSXG_PLAYBACK_VOICES 64
150#define YDSXG_CAPTURE_VOICES 2
151#define YDSXG_EFFECT_VOICES 5
152
153/* maxinum number of AC97 codecs connected, AC97 2.0 defined 4 */
154#define NR_AC97 2
155
156#define YMF_SAMPF 256 /* Samples per frame @48000 */
157
158/*
159 * The slot/voice control bank (2 of these per voice)
160 */
161
162typedef struct stru_ymfpci_playback_bank {
163 u32 format;
164 u32 loop_default;
165 u32 base; /* 32-bit address */
166 u32 loop_start; /* 32-bit offset */
167 u32 loop_end; /* 32-bit offset */
168 u32 loop_frac; /* 8-bit fraction - loop_start */
169 u32 delta_end; /* pitch delta end */
170 u32 lpfK_end;
171 u32 eg_gain_end;
172 u32 left_gain_end;
173 u32 right_gain_end;
174 u32 eff1_gain_end;
175 u32 eff2_gain_end;
176 u32 eff3_gain_end;
177 u32 lpfQ;
178 u32 status; /* P3: Always 0 for some reason. */
179 u32 num_of_frames;
180 u32 loop_count;
181 u32 start; /* P3: J. reads this to know where chip is. */
182 u32 start_frac;
183 u32 delta;
184 u32 lpfK;
185 u32 eg_gain;
186 u32 left_gain;
187 u32 right_gain;
188 u32 eff1_gain;
189 u32 eff2_gain;
190 u32 eff3_gain;
191 u32 lpfD1;
192 u32 lpfD2;
193} ymfpci_playback_bank_t;
194
195typedef struct stru_ymfpci_capture_bank {
196 u32 base; /* 32-bit address (aligned at 4) */
197 u32 loop_end; /* size in BYTES (aligned at 4) */
198 u32 start; /* 32-bit offset */
199 u32 num_of_loops; /* counter */
200} ymfpci_capture_bank_t;
201
202typedef struct stru_ymfpci_effect_bank {
203 u32 base; /* 32-bit address */
204 u32 loop_end; /* 32-bit offset */
205 u32 start; /* 32-bit offset */
206 u32 temp;
207} ymfpci_effect_bank_t;
208
209typedef struct ymf_voice ymfpci_voice_t;
210/*
211 * Throughout the code Yaroslav names YMF unit pointer "codec"
212 * even though it does not correspond to any codec. Must be historic.
213 * We replace it with "unit" over time.
214 * AC97 parts use "codec" to denote a codec, naturally.
215 */
216typedef struct ymf_unit ymfpci_t;
217
218typedef enum {
219 YMFPCI_PCM,
220 YMFPCI_SYNTH,
221 YMFPCI_MIDI
222} ymfpci_voice_type_t;
223
224struct ymf_voice {
225 // ymfpci_t *codec;
226 int number;
227 char use, pcm, synth, midi; // bool
228 ymfpci_playback_bank_t *bank;
229 struct ymf_pcm *ypcm;
230 dma_addr_t bank_ba;
231};
232
233struct ymf_capture {
234 // struct ymf_unit *unit;
235 int use;
236 ymfpci_capture_bank_t *bank;
237 struct ymf_pcm *ypcm;
238};
239
240struct ymf_unit {
241 u8 rev; /* PCI revision */
242 void __iomem *reg_area_virt;
243 void *dma_area_va;
244 dma_addr_t dma_area_ba;
245 unsigned int dma_area_size;
246
247 dma_addr_t bank_base_capture;
248 dma_addr_t bank_base_effect;
249 dma_addr_t work_base;
250 unsigned int work_size;
251
252 u32 *ctrl_playback;
253 dma_addr_t ctrl_playback_ba;
254 ymfpci_playback_bank_t *bank_playback[YDSXG_PLAYBACK_VOICES][2];
255 ymfpci_capture_bank_t *bank_capture[YDSXG_CAPTURE_VOICES][2];
256 ymfpci_effect_bank_t *bank_effect[YDSXG_EFFECT_VOICES][2];
257
258 int start_count;
259 int suspended;
260
261 u32 active_bank;
262 struct ymf_voice voices[YDSXG_PLAYBACK_VOICES];
263 struct ymf_capture capture[YDSXG_CAPTURE_VOICES];
264
265 struct ac97_codec *ac97_codec[NR_AC97];
266 u16 ac97_features;
267
268 struct pci_dev *pci;
269
270#ifdef CONFIG_SOUND_YMFPCI_LEGACY
271 /* legacy hardware resources */
272 unsigned int iosynth, iomidi;
273 struct address_info opl3_data, mpu_data;
274#endif
275
276 spinlock_t reg_lock;
277 spinlock_t voice_lock;
278 spinlock_t ac97_lock;
279
280 /* soundcore stuff */
281 int dev_audio;
282 struct mutex open_mutex;
283
284 struct list_head ymf_devs;
285 struct list_head states; /* List of states for this unit */
286};
287
288struct ymf_dmabuf {
289 dma_addr_t dma_addr;
290 void *rawbuf;
291 unsigned buforder;
292
293 /* OSS buffer management stuff */
294 unsigned numfrag;
295 unsigned fragshift;
296
297 /* our buffer acts like a circular ring */
298 unsigned hwptr; /* where dma last started */
299 unsigned swptr; /* where driver last clear/filled */
300 int count; /* fill count */
301 unsigned total_bytes; /* total bytes dmaed by hardware */
302
303 wait_queue_head_t wait; /* put process on wait queue when no more space in buffer */
304
305 /* redundant, but makes calculations easier */
306 unsigned fragsize;
307 unsigned dmasize; /* Total rawbuf[] size */
308
309 /* OSS stuff */
310 unsigned mapped:1;
311 unsigned ready:1;
312 unsigned ossfragshift;
313 int ossmaxfrags;
314 unsigned subdivision;
315};
316
317struct ymf_pcm_format {
318 int format; /* OSS format */
319 int rate; /* rate in Hz */
320 int voices; /* number of voices */
321 int shift; /* redundant, computed from the above */
322};
323
324typedef enum {
325 PLAYBACK_VOICE,
326 CAPTURE_REC,
327 CAPTURE_AC97,
328 EFFECT_DRY_LEFT,
329 EFFECT_DRY_RIGHT,
330 EFFECT_EFF1,
331 EFFECT_EFF2,
332 EFFECT_EFF3
333} ymfpci_pcm_type_t;
334
335/* This is variant record, but we hate unions. Little waste on pointers []. */
336struct ymf_pcm {
337 ymfpci_pcm_type_t type;
338 struct ymf_state *state;
339
340 ymfpci_voice_t *voices[2];
341 int capture_bank_number;
342
343 struct ymf_dmabuf dmabuf;
344 int running;
345 int spdif;
346};
347
348/*
349 * "Software" or virtual channel, an instance of opened /dev/dsp.
350 * It may have two physical channels (pcms) for duplex operations.
351 */
352
353struct ymf_state {
354 struct list_head chain;
355 struct ymf_unit *unit; /* backpointer */
356 struct ymf_pcm rpcm, wpcm;
357 struct ymf_pcm_format format;
358};
359
360#endif /* __YMFPCI_H */
diff --git a/sound/oss/ymfpci_image.h b/sound/oss/ymfpci_image.h
deleted file mode 100644
index 112f2fff6c8e..000000000000
--- a/sound/oss/ymfpci_image.h
+++ /dev/null
@@ -1,1565 +0,0 @@
1#ifndef _HWMCODE_
2#define _HWMCODE_
3
4static u32 DspInst[YDSXG_DSPLENGTH / 4] = {
5 0x00000081, 0x000001a4, 0x0000000a, 0x0000002f,
6 0x00080253, 0x01800317, 0x0000407b, 0x0000843f,
7 0x0001483c, 0x0001943c, 0x0005d83c, 0x00001c3c,
8 0x0000c07b, 0x00050c3f, 0x0121503c, 0x00000000,
9 0x00000000, 0x00000000, 0x00000000, 0x00000000,
10 0x00000000, 0x00000000, 0x00000000, 0x00000000,
11 0x00000000, 0x00000000, 0x00000000, 0x00000000,
12 0x00000000, 0x00000000, 0x00000000, 0x00000000
13};
14
15static u32 CntrlInst[YDSXG_CTRLLENGTH / 4] = {
16 0x000007, 0x240007, 0x0C0007, 0x1C0007,
17 0x060007, 0x700002, 0x000020, 0x030040,
18 0x007104, 0x004286, 0x030040, 0x000F0D,
19 0x000810, 0x20043A, 0x000282, 0x00020D,
20 0x000810, 0x20043A, 0x001282, 0x200E82,
21 0x001A82, 0x032D0D, 0x000810, 0x10043A,
22 0x02D38D, 0x000810, 0x18043A, 0x00010D,
23 0x020015, 0x0000FD, 0x000020, 0x038860,
24 0x039060, 0x038060, 0x038040, 0x038040,
25 0x038040, 0x018040, 0x000A7D, 0x038040,
26 0x038040, 0x018040, 0x200402, 0x000882,
27 0x08001A, 0x000904, 0x015986, 0x000007,
28 0x260007, 0x000007, 0x000007, 0x018A06,
29 0x000007, 0x030C8D, 0x000810, 0x18043A,
30 0x260007, 0x00087D, 0x018042, 0x00160A,
31 0x04A206, 0x000007, 0x00218D, 0x000810,
32 0x08043A, 0x21C206, 0x000007, 0x0007FD,
33 0x018042, 0x08000A, 0x000904, 0x029386,
34 0x000195, 0x090D04, 0x000007, 0x000820,
35 0x0000F5, 0x000B7D, 0x01F060, 0x0000FD,
36 0x032206, 0x018040, 0x000A7D, 0x038042,
37 0x13804A, 0x18000A, 0x001820, 0x059060,
38 0x058860, 0x018040, 0x0000FD, 0x018042,
39 0x70000A, 0x000115, 0x071144, 0x032386,
40 0x030000, 0x007020, 0x034A06, 0x018040,
41 0x00348D, 0x000810, 0x08043A, 0x21EA06,
42 0x000007, 0x02D38D, 0x000810, 0x18043A,
43 0x018206, 0x000007, 0x240007, 0x000F8D,
44 0x000810, 0x00163A, 0x002402, 0x005C02,
45 0x0028FD, 0x000020, 0x018040, 0x08000D,
46 0x000815, 0x510984, 0x000007, 0x00004D,
47 0x000E5D, 0x000E02, 0x00418D, 0x000810,
48 0x08043A, 0x2C8A06, 0x000007, 0x00008D,
49 0x000924, 0x000F02, 0x00458D, 0x000810,
50 0x08043A, 0x2C8A06, 0x000007, 0x00387D,
51 0x018042, 0x08000A, 0x001015, 0x010984,
52 0x018386, 0x000007, 0x01AA06, 0x000007,
53 0x0008FD, 0x018042, 0x18000A, 0x001904,
54 0x218086, 0x280007, 0x001810, 0x28043A,
55 0x280C02, 0x00000D, 0x000810, 0x28143A,
56 0x08808D, 0x000820, 0x0002FD, 0x018040,
57 0x200007, 0x00020D, 0x189904, 0x000007,
58 0x00402D, 0x0000BD, 0x0002FD, 0x018042,
59 0x08000A, 0x000904, 0x055A86, 0x000007,
60 0x000100, 0x000A20, 0x00047D, 0x018040,
61 0x018042, 0x20000A, 0x003015, 0x012144,
62 0x034986, 0x000007, 0x002104, 0x034986,
63 0x000007, 0x000F8D, 0x000810, 0x280C3A,
64 0x023944, 0x06C986, 0x000007, 0x001810,
65 0x28043A, 0x08810D, 0x000820, 0x0002FD,
66 0x018040, 0x200007, 0x002810, 0x78003A,
67 0x00688D, 0x000810, 0x08043A, 0x288A06,
68 0x000007, 0x00400D, 0x001015, 0x189904,
69 0x292904, 0x393904, 0x000007, 0x060206,
70 0x000007, 0x0004F5, 0x00007D, 0x000020,
71 0x00008D, 0x010860, 0x018040, 0x00047D,
72 0x038042, 0x21804A, 0x18000A, 0x021944,
73 0x215886, 0x000007, 0x004075, 0x71F104,
74 0x000007, 0x010042, 0x28000A, 0x002904,
75 0x212086, 0x000007, 0x003C0D, 0x30A904,
76 0x000007, 0x00077D, 0x018042, 0x08000A,
77 0x000904, 0x07DA86, 0x00057D, 0x002820,
78 0x03B060, 0x07F206, 0x018040, 0x003020,
79 0x03A860, 0x018040, 0x0002FD, 0x018042,
80 0x08000A, 0x000904, 0x07FA86, 0x000007,
81 0x00057D, 0x018042, 0x28040A, 0x000E8D,
82 0x000810, 0x280C3A, 0x00000D, 0x000810,
83 0x28143A, 0x09000D, 0x000820, 0x0002FD,
84 0x018040, 0x200007, 0x003DFD, 0x000020,
85 0x018040, 0x00107D, 0x008D8D, 0x000810,
86 0x08043A, 0x288A06, 0x000007, 0x000815,
87 0x08001A, 0x010984, 0x095186, 0x00137D,
88 0x200500, 0x280F20, 0x338F60, 0x3B8F60,
89 0x438F60, 0x4B8F60, 0x538F60, 0x5B8F60,
90 0x038A60, 0x018040, 0x007FBD, 0x383DC4,
91 0x000007, 0x001A7D, 0x001375, 0x018042,
92 0x09004A, 0x10000A, 0x0B8D04, 0x139504,
93 0x000007, 0x000820, 0x019060, 0x001104,
94 0x212086, 0x010040, 0x0017FD, 0x018042,
95 0x08000A, 0x000904, 0x212286, 0x000007,
96 0x00197D, 0x038042, 0x09804A, 0x10000A,
97 0x000924, 0x001664, 0x0011FD, 0x038042,
98 0x2B804A, 0x19804A, 0x00008D, 0x218944,
99 0x000007, 0x002244, 0x0AE186, 0x000007,
100 0x001A64, 0x002A24, 0x00197D, 0x080102,
101 0x100122, 0x000820, 0x039060, 0x018040,
102 0x003DFD, 0x00008D, 0x000820, 0x018040,
103 0x001375, 0x001A7D, 0x010042, 0x09804A,
104 0x10000A, 0x00021D, 0x0189E4, 0x2992E4,
105 0x309144, 0x000007, 0x00060D, 0x000A15,
106 0x000C1D, 0x001025, 0x00A9E4, 0x012BE4,
107 0x000464, 0x01B3E4, 0x0232E4, 0x000464,
108 0x000464, 0x000464, 0x000464, 0x00040D,
109 0x08B1C4, 0x000007, 0x000820, 0x000BF5,
110 0x030040, 0x00197D, 0x038042, 0x09804A,
111 0x000A24, 0x08000A, 0x080E64, 0x000007,
112 0x100122, 0x000820, 0x031060, 0x010040,
113 0x0064AC, 0x00027D, 0x000020, 0x018040,
114 0x00107D, 0x018042, 0x0011FD, 0x3B804A,
115 0x09804A, 0x20000A, 0x000095, 0x1A1144,
116 0x00A144, 0x0D2086, 0x00040D, 0x00B984,
117 0x0D2186, 0x0018FD, 0x018042, 0x0010FD,
118 0x09804A, 0x28000A, 0x000095, 0x010924,
119 0x002A64, 0x0D1186, 0x000007, 0x002904,
120 0x0D2286, 0x000007, 0x0D2A06, 0x080002,
121 0x00008D, 0x00387D, 0x000820, 0x018040,
122 0x00127D, 0x018042, 0x10000A, 0x003904,
123 0x0DD186, 0x00080D, 0x7FFFB5, 0x00B984,
124 0x0DA186, 0x000025, 0x0E7A06, 0x00002D,
125 0x000015, 0x00082D, 0x02C78D, 0x000820,
126 0x0EC206, 0x00000D, 0x7F8035, 0x00B984,
127 0x0E7186, 0x400025, 0x00008D, 0x110944,
128 0x000007, 0x00018D, 0x109504, 0x000007,
129 0x009164, 0x000424, 0x000424, 0x000424,
130 0x100102, 0x280002, 0x02C68D, 0x000820,
131 0x0EC206, 0x00018D, 0x00042D, 0x00008D,
132 0x109504, 0x000007, 0x00020D, 0x109184,
133 0x000007, 0x02C70D, 0x000820, 0x00008D,
134 0x0038FD, 0x018040, 0x003BFD, 0x001020,
135 0x03A860, 0x000815, 0x313184, 0x212184,
136 0x000007, 0x03B060, 0x03A060, 0x018040,
137 0x0022FD, 0x000095, 0x010924, 0x000424,
138 0x000424, 0x001264, 0x100102, 0x000820,
139 0x039060, 0x018040, 0x001924, 0x00FB8D,
140 0x00397D, 0x000820, 0x058040, 0x038042,
141 0x09844A, 0x000606, 0x08040A, 0x000424,
142 0x000424, 0x00117D, 0x018042, 0x08000A,
143 0x000A24, 0x280502, 0x280C02, 0x09800D,
144 0x000820, 0x0002FD, 0x018040, 0x200007,
145 0x0022FD, 0x018042, 0x08000A, 0x000095,
146 0x280DC4, 0x011924, 0x00197D, 0x018042,
147 0x0011FD, 0x09804A, 0x10000A, 0x0000B5,
148 0x113144, 0x0A8D04, 0x000007, 0x080A44,
149 0x129504, 0x000007, 0x0023FD, 0x001020,
150 0x038040, 0x101244, 0x000007, 0x000820,
151 0x039060, 0x018040, 0x0002FD, 0x018042,
152 0x08000A, 0x000904, 0x10FA86, 0x000007,
153 0x003BFD, 0x000100, 0x000A10, 0x0B807A,
154 0x13804A, 0x090984, 0x000007, 0x000095,
155 0x013D04, 0x118086, 0x10000A, 0x100002,
156 0x090984, 0x000007, 0x038042, 0x11804A,
157 0x090D04, 0x000007, 0x10000A, 0x090D84,
158 0x000007, 0x00257D, 0x000820, 0x018040,
159 0x00010D, 0x000810, 0x28143A, 0x00127D,
160 0x018042, 0x20000A, 0x00197D, 0x018042,
161 0x00117D, 0x31804A, 0x10000A, 0x003124,
162 0x01280D, 0x00397D, 0x000820, 0x058040,
163 0x038042, 0x09844A, 0x000606, 0x08040A,
164 0x300102, 0x003124, 0x000424, 0x000424,
165 0x001224, 0x280502, 0x001A4C, 0x130186,
166 0x700002, 0x00002D, 0x030000, 0x00387D,
167 0x018042, 0x10000A, 0x132A06, 0x002124,
168 0x0000AD, 0x100002, 0x00010D, 0x000924,
169 0x006B24, 0x01368D, 0x00397D, 0x000820,
170 0x058040, 0x038042, 0x09844A, 0x000606,
171 0x08040A, 0x003264, 0x00008D, 0x000A24,
172 0x001020, 0x00227D, 0x018040, 0x013C0D,
173 0x000810, 0x08043A, 0x29D206, 0x000007,
174 0x002820, 0x00207D, 0x018040, 0x00117D,
175 0x038042, 0x13804A, 0x33800A, 0x00387D,
176 0x018042, 0x08000A, 0x000904, 0x163A86,
177 0x000007, 0x00008D, 0x030964, 0x01478D,
178 0x00397D, 0x000820, 0x058040, 0x038042,
179 0x09844A, 0x000606, 0x08040A, 0x380102,
180 0x000424, 0x000424, 0x001224, 0x0002FD,
181 0x018042, 0x08000A, 0x000904, 0x14A286,
182 0x000007, 0x280502, 0x001A4C, 0x163986,
183 0x000007, 0x032164, 0x00632C, 0x003DFD,
184 0x018042, 0x08000A, 0x000095, 0x090904,
185 0x000007, 0x000820, 0x001A4C, 0x156186,
186 0x018040, 0x030000, 0x157A06, 0x002124,
187 0x00010D, 0x000924, 0x006B24, 0x015B8D,
188 0x00397D, 0x000820, 0x058040, 0x038042,
189 0x09844A, 0x000606, 0x08040A, 0x003A64,
190 0x000095, 0x001224, 0x0002FD, 0x018042,
191 0x08000A, 0x000904, 0x15DA86, 0x000007,
192 0x01628D, 0x000810, 0x08043A, 0x29D206,
193 0x000007, 0x14D206, 0x000007, 0x007020,
194 0x08010A, 0x10012A, 0x0020FD, 0x038860,
195 0x039060, 0x018040, 0x00227D, 0x018042,
196 0x003DFD, 0x08000A, 0x31844A, 0x000904,
197 0x16D886, 0x18008B, 0x00008D, 0x189904,
198 0x00312C, 0x17AA06, 0x000007, 0x00324C,
199 0x173386, 0x000007, 0x001904, 0x173086,
200 0x000007, 0x000095, 0x199144, 0x00222C,
201 0x003124, 0x00636C, 0x000E3D, 0x001375,
202 0x000BFD, 0x010042, 0x09804A, 0x10000A,
203 0x038AEC, 0x0393EC, 0x00224C, 0x17A986,
204 0x000007, 0x00008D, 0x189904, 0x00226C,
205 0x00322C, 0x30050A, 0x301DAB, 0x002083,
206 0x0018FD, 0x018042, 0x08000A, 0x018924,
207 0x300502, 0x001083, 0x001875, 0x010042,
208 0x10000A, 0x00008D, 0x010924, 0x001375,
209 0x330542, 0x330CCB, 0x332CCB, 0x3334CB,
210 0x333CCB, 0x3344CB, 0x334CCB, 0x3354CB,
211 0x305C8B, 0x006083, 0x0002F5, 0x010042,
212 0x08000A, 0x000904, 0x187A86, 0x000007,
213 0x001E2D, 0x0005FD, 0x018042, 0x08000A,
214 0x028924, 0x280502, 0x00060D, 0x000810,
215 0x280C3A, 0x00008D, 0x000810, 0x28143A,
216 0x0A808D, 0x000820, 0x0002F5, 0x010040,
217 0x220007, 0x001275, 0x030042, 0x21004A,
218 0x00008D, 0x1A0944, 0x000007, 0x01980D,
219 0x000810, 0x08043A, 0x2B2206, 0x000007,
220 0x0001F5, 0x030042, 0x0D004A, 0x10000A,
221 0x089144, 0x000007, 0x000820, 0x010040,
222 0x0025F5, 0x0A3144, 0x000007, 0x000820,
223 0x032860, 0x030040, 0x00217D, 0x038042,
224 0x0B804A, 0x10000A, 0x000820, 0x031060,
225 0x030040, 0x00008D, 0x000124, 0x00012C,
226 0x000E64, 0x001A64, 0x00636C, 0x08010A,
227 0x10012A, 0x000820, 0x031060, 0x030040,
228 0x0020FD, 0x018042, 0x08000A, 0x00227D,
229 0x018042, 0x10000A, 0x000820, 0x031060,
230 0x030040, 0x00197D, 0x018042, 0x08000A,
231 0x0022FD, 0x038042, 0x10000A, 0x000820,
232 0x031060, 0x030040, 0x090D04, 0x000007,
233 0x000820, 0x030040, 0x038042, 0x0B804A,
234 0x10000A, 0x000820, 0x031060, 0x030040,
235 0x038042, 0x13804A, 0x19804A, 0x110D04,
236 0x198D04, 0x000007, 0x08000A, 0x001020,
237 0x031860, 0x030860, 0x030040, 0x00008D,
238 0x0B0944, 0x000007, 0x000820, 0x010040,
239 0x0005F5, 0x030042, 0x08000A, 0x000820,
240 0x010040, 0x0000F5, 0x010042, 0x08000A,
241 0x000904, 0x1C6086, 0x001E75, 0x030042,
242 0x01044A, 0x000C0A, 0x1C7206, 0x000007,
243 0x000402, 0x000C02, 0x00177D, 0x001AF5,
244 0x018042, 0x03144A, 0x031C4A, 0x03244A,
245 0x032C4A, 0x03344A, 0x033C4A, 0x03444A,
246 0x004C0A, 0x00043D, 0x0013F5, 0x001AFD,
247 0x030042, 0x0B004A, 0x1B804A, 0x13804A,
248 0x20000A, 0x089144, 0x19A144, 0x0389E4,
249 0x0399EC, 0x005502, 0x005D0A, 0x030042,
250 0x0B004A, 0x1B804A, 0x13804A, 0x20000A,
251 0x089144, 0x19A144, 0x0389E4, 0x0399EC,
252 0x006502, 0x006D0A, 0x030042, 0x0B004A,
253 0x19004A, 0x2B804A, 0x13804A, 0x21804A,
254 0x30000A, 0x089144, 0x19A144, 0x2AB144,
255 0x0389E4, 0x0399EC, 0x007502, 0x007D0A,
256 0x03A9E4, 0x000702, 0x00107D, 0x000415,
257 0x018042, 0x08000A, 0x0109E4, 0x000F02,
258 0x002AF5, 0x0019FD, 0x010042, 0x09804A,
259 0x10000A, 0x000934, 0x001674, 0x0029F5,
260 0x010042, 0x10000A, 0x00917C, 0x002075,
261 0x010042, 0x08000A, 0x000904, 0x1ED286,
262 0x0026F5, 0x0027F5, 0x030042, 0x09004A,
263 0x10000A, 0x000A3C, 0x00167C, 0x001A75,
264 0x000BFD, 0x010042, 0x51804A, 0x48000A,
265 0x160007, 0x001075, 0x010042, 0x282C0A,
266 0x281D12, 0x282512, 0x001F32, 0x1E0007,
267 0x0E0007, 0x001975, 0x010042, 0x002DF5,
268 0x0D004A, 0x10000A, 0x009144, 0x1FB286,
269 0x010042, 0x28340A, 0x000E5D, 0x00008D,
270 0x000375, 0x000820, 0x010040, 0x05D2F4,
271 0x54D104, 0x00735C, 0x205386, 0x000007,
272 0x0C0007, 0x080007, 0x0A0007, 0x02040D,
273 0x000810, 0x08043A, 0x332206, 0x000007,
274 0x205A06, 0x000007, 0x080007, 0x002275,
275 0x010042, 0x20000A, 0x002104, 0x212086,
276 0x001E2D, 0x0002F5, 0x010042, 0x08000A,
277 0x000904, 0x209286, 0x000007, 0x002010,
278 0x30043A, 0x00057D, 0x0180C3, 0x08000A,
279 0x028924, 0x280502, 0x280C02, 0x0A810D,
280 0x000820, 0x0002F5, 0x010040, 0x220007,
281 0x0004FD, 0x018042, 0x70000A, 0x030000,
282 0x007020, 0x06FA06, 0x018040, 0x02180D,
283 0x000810, 0x08043A, 0x2B2206, 0x000007,
284 0x0002FD, 0x018042, 0x08000A, 0x000904,
285 0x218A86, 0x000007, 0x01F206, 0x000007,
286 0x000875, 0x0009FD, 0x00010D, 0x220A06,
287 0x000295, 0x000B75, 0x00097D, 0x00000D,
288 0x000515, 0x010042, 0x18000A, 0x001904,
289 0x287886, 0x0006F5, 0x001020, 0x010040,
290 0x0004F5, 0x000820, 0x010040, 0x000775,
291 0x010042, 0x09804A, 0x10000A, 0x001124,
292 0x000904, 0x22BA86, 0x000815, 0x080102,
293 0x101204, 0x22DA06, 0x000575, 0x081204,
294 0x000007, 0x100102, 0x000575, 0x000425,
295 0x021124, 0x100102, 0x000820, 0x031060,
296 0x010040, 0x001924, 0x287886, 0x00008D,
297 0x000464, 0x009D04, 0x278886, 0x180102,
298 0x000575, 0x010042, 0x28040A, 0x00018D,
299 0x000924, 0x280D02, 0x00000D, 0x000924,
300 0x281502, 0x10000D, 0x000820, 0x0002F5,
301 0x010040, 0x200007, 0x001175, 0x0002FD,
302 0x018042, 0x08000A, 0x000904, 0x23C286,
303 0x000007, 0x000100, 0x080B20, 0x130B60,
304 0x1B0B60, 0x030A60, 0x010040, 0x050042,
305 0x3D004A, 0x35004A, 0x2D004A, 0x20000A,
306 0x0006F5, 0x010042, 0x28140A, 0x0004F5,
307 0x010042, 0x08000A, 0x000315, 0x010D04,
308 0x24CA86, 0x004015, 0x000095, 0x010D04,
309 0x24B886, 0x100022, 0x10002A, 0x24E206,
310 0x000007, 0x333104, 0x2AA904, 0x000007,
311 0x032124, 0x280502, 0x001124, 0x000424,
312 0x000424, 0x003224, 0x00292C, 0x00636C,
313 0x25F386, 0x000007, 0x02B164, 0x000464,
314 0x000464, 0x00008D, 0x000A64, 0x280D02,
315 0x10008D, 0x000820, 0x0002F5, 0x010040,
316 0x220007, 0x00008D, 0x38B904, 0x000007,
317 0x03296C, 0x30010A, 0x0002F5, 0x010042,
318 0x08000A, 0x000904, 0x25BA86, 0x000007,
319 0x02312C, 0x28050A, 0x00008D, 0x01096C,
320 0x280D0A, 0x10010D, 0x000820, 0x0002F5,
321 0x010040, 0x220007, 0x001124, 0x000424,
322 0x000424, 0x003224, 0x300102, 0x032944,
323 0x267A86, 0x000007, 0x300002, 0x0004F5,
324 0x010042, 0x08000A, 0x000315, 0x010D04,
325 0x26C086, 0x003124, 0x000464, 0x300102,
326 0x0002F5, 0x010042, 0x08000A, 0x000904,
327 0x26CA86, 0x000007, 0x003124, 0x300502,
328 0x003924, 0x300583, 0x000883, 0x0005F5,
329 0x010042, 0x28040A, 0x00008D, 0x008124,
330 0x280D02, 0x00008D, 0x008124, 0x281502,
331 0x10018D, 0x000820, 0x0002F5, 0x010040,
332 0x220007, 0x001025, 0x000575, 0x030042,
333 0x09004A, 0x10000A, 0x0A0904, 0x121104,
334 0x000007, 0x001020, 0x050860, 0x050040,
335 0x0006FD, 0x018042, 0x09004A, 0x10000A,
336 0x0000A5, 0x0A0904, 0x121104, 0x000007,
337 0x000820, 0x019060, 0x010040, 0x0002F5,
338 0x010042, 0x08000A, 0x000904, 0x284286,
339 0x000007, 0x230A06, 0x000007, 0x000606,
340 0x000007, 0x0002F5, 0x010042, 0x08000A,
341 0x000904, 0x289286, 0x000007, 0x000100,
342 0x080B20, 0x138B60, 0x1B8B60, 0x238B60,
343 0x2B8B60, 0x338B60, 0x3B8B60, 0x438B60,
344 0x4B8B60, 0x538B60, 0x5B8B60, 0x638B60,
345 0x6B8B60, 0x738B60, 0x7B8B60, 0x038F60,
346 0x0B8F60, 0x138F60, 0x1B8F60, 0x238F60,
347 0x2B8F60, 0x338F60, 0x3B8F60, 0x438F60,
348 0x4B8F60, 0x538F60, 0x5B8F60, 0x638F60,
349 0x6B8F60, 0x738F60, 0x7B8F60, 0x038A60,
350 0x000606, 0x018040, 0x00008D, 0x000A64,
351 0x280D02, 0x000A24, 0x00027D, 0x018042,
352 0x10000A, 0x001224, 0x0003FD, 0x018042,
353 0x08000A, 0x000904, 0x2A8286, 0x000007,
354 0x00018D, 0x000A24, 0x000464, 0x000464,
355 0x080102, 0x000924, 0x000424, 0x000424,
356 0x100102, 0x02000D, 0x009144, 0x2AD986,
357 0x000007, 0x0001FD, 0x018042, 0x08000A,
358 0x000A44, 0x2ABB86, 0x018042, 0x0A000D,
359 0x000820, 0x0002FD, 0x018040, 0x200007,
360 0x00027D, 0x001020, 0x000606, 0x018040,
361 0x0002F5, 0x010042, 0x08000A, 0x000904,
362 0x2B2A86, 0x000007, 0x00037D, 0x018042,
363 0x08000A, 0x000904, 0x2B5A86, 0x000007,
364 0x000075, 0x002E7D, 0x010042, 0x0B804A,
365 0x000020, 0x000904, 0x000686, 0x010040,
366 0x31844A, 0x30048B, 0x000883, 0x00008D,
367 0x000810, 0x28143A, 0x00008D, 0x000810,
368 0x280C3A, 0x000675, 0x010042, 0x08000A,
369 0x003815, 0x010924, 0x280502, 0x0B000D,
370 0x000820, 0x0002F5, 0x010040, 0x000606,
371 0x220007, 0x000464, 0x000464, 0x000606,
372 0x000007, 0x000134, 0x007F8D, 0x00093C,
373 0x281D12, 0x282512, 0x001F32, 0x0E0007,
374 0x00010D, 0x00037D, 0x000820, 0x018040,
375 0x05D2F4, 0x000007, 0x080007, 0x00037D,
376 0x018042, 0x08000A, 0x000904, 0x2D0286,
377 0x000007, 0x000606, 0x000007, 0x000007,
378 0x000012, 0x100007, 0x320007, 0x600007,
379 0x100080, 0x48001A, 0x004904, 0x2D6186,
380 0x000007, 0x001210, 0x58003A, 0x000145,
381 0x5C5D04, 0x000007, 0x000080, 0x48001A,
382 0x004904, 0x2DB186, 0x000007, 0x001210,
383 0x50003A, 0x005904, 0x2E0886, 0x000045,
384 0x0000C5, 0x7FFFF5, 0x7FFF7D, 0x07D524,
385 0x004224, 0x500102, 0x200502, 0x000082,
386 0x40001A, 0x004104, 0x2E3986, 0x000007,
387 0x003865, 0x40001A, 0x004020, 0x00104D,
388 0x04C184, 0x301B86, 0x000040, 0x040007,
389 0x000165, 0x000145, 0x004020, 0x000040,
390 0x000765, 0x080080, 0x40001A, 0x004104,
391 0x2EC986, 0x000007, 0x001210, 0x40003A,
392 0x004104, 0x2F2286, 0x00004D, 0x0000CD,
393 0x004810, 0x20043A, 0x000882, 0x40001A,
394 0x004104, 0x2F3186, 0x000007, 0x004820,
395 0x005904, 0x300886, 0x000040, 0x0007E5,
396 0x200480, 0x2816A0, 0x3216E0, 0x3A16E0,
397 0x4216E0, 0x021260, 0x000040, 0x000032,
398 0x400075, 0x00007D, 0x07D574, 0x200512,
399 0x000082, 0x40001A, 0x004104, 0x2FE186,
400 0x000007, 0x037206, 0x640007, 0x060007,
401 0x0000E5, 0x000020, 0x000040, 0x000A65,
402 0x000020, 0x020040, 0x020040, 0x000040,
403 0x000165, 0x000042, 0x70000A, 0x007104,
404 0x30A286, 0x000007, 0x018206, 0x640007,
405 0x050000, 0x007020, 0x000040, 0x037206,
406 0x640007, 0x000007, 0x00306D, 0x028860,
407 0x029060, 0x08000A, 0x028860, 0x008040,
408 0x100012, 0x00100D, 0x009184, 0x314186,
409 0x000E0D, 0x009184, 0x325186, 0x000007,
410 0x300007, 0x001020, 0x003B6D, 0x008040,
411 0x000080, 0x08001A, 0x000904, 0x316186,
412 0x000007, 0x001220, 0x000DED, 0x008040,
413 0x008042, 0x10000A, 0x40000D, 0x109544,
414 0x000007, 0x001020, 0x000DED, 0x008040,
415 0x008042, 0x20040A, 0x000082, 0x08001A,
416 0x000904, 0x31F186, 0x000007, 0x003B6D,
417 0x008042, 0x08000A, 0x000E15, 0x010984,
418 0x329B86, 0x600007, 0x08001A, 0x000C15,
419 0x010984, 0x328386, 0x000020, 0x1A0007,
420 0x0002ED, 0x008040, 0x620007, 0x00306D,
421 0x028042, 0x0A804A, 0x000820, 0x0A804A,
422 0x000606, 0x10804A, 0x000007, 0x282512,
423 0x001F32, 0x05D2F4, 0x54D104, 0x00735C,
424 0x000786, 0x000007, 0x0C0007, 0x0A0007,
425 0x1C0007, 0x003465, 0x020040, 0x004820,
426 0x025060, 0x40000A, 0x024060, 0x000040,
427 0x454944, 0x000007, 0x004020, 0x003AE5,
428 0x000040, 0x0028E5, 0x000042, 0x48000A,
429 0x004904, 0x386886, 0x002C65, 0x000042,
430 0x40000A, 0x0000D5, 0x454104, 0x000007,
431 0x000655, 0x054504, 0x34F286, 0x0001D5,
432 0x054504, 0x34F086, 0x002B65, 0x000042,
433 0x003AE5, 0x50004A, 0x40000A, 0x45C3D4,
434 0x000007, 0x454504, 0x000007, 0x0000CD,
435 0x444944, 0x000007, 0x454504, 0x000007,
436 0x00014D, 0x554944, 0x000007, 0x045144,
437 0x34E986, 0x002C65, 0x000042, 0x48000A,
438 0x4CD104, 0x000007, 0x04C144, 0x34F386,
439 0x000007, 0x160007, 0x002CE5, 0x040042,
440 0x40000A, 0x004020, 0x000040, 0x002965,
441 0x000042, 0x40000A, 0x004104, 0x356086,
442 0x000007, 0x002402, 0x36A206, 0x005C02,
443 0x0025E5, 0x000042, 0x40000A, 0x004274,
444 0x002AE5, 0x000042, 0x40000A, 0x004274,
445 0x500112, 0x0029E5, 0x000042, 0x40000A,
446 0x004234, 0x454104, 0x000007, 0x004020,
447 0x000040, 0x003EE5, 0x000020, 0x000040,
448 0x002DE5, 0x400152, 0x50000A, 0x045144,
449 0x364A86, 0x0000C5, 0x003EE5, 0x004020,
450 0x000040, 0x002BE5, 0x000042, 0x40000A,
451 0x404254, 0x000007, 0x002AE5, 0x004020,
452 0x000040, 0x500132, 0x040134, 0x005674,
453 0x0029E5, 0x020042, 0x42000A, 0x000042,
454 0x50000A, 0x05417C, 0x0028E5, 0x000042,
455 0x48000A, 0x0000C5, 0x4CC144, 0x371086,
456 0x0026E5, 0x0027E5, 0x020042, 0x40004A,
457 0x50000A, 0x00423C, 0x00567C, 0x0028E5,
458 0x004820, 0x000040, 0x281D12, 0x282512,
459 0x001F72, 0x002965, 0x000042, 0x40000A,
460 0x004104, 0x37AA86, 0x0E0007, 0x160007,
461 0x1E0007, 0x003EE5, 0x000042, 0x40000A,
462 0x004104, 0x37E886, 0x002D65, 0x000042,
463 0x28340A, 0x003465, 0x020042, 0x42004A,
464 0x004020, 0x4A004A, 0x50004A, 0x05D2F4,
465 0x54D104, 0x00735C, 0x385186, 0x000007,
466 0x000606, 0x080007, 0x0C0007, 0x080007,
467 0x0A0007, 0x0001E5, 0x020045, 0x004020,
468 0x000060, 0x000365, 0x000040, 0x002E65,
469 0x001A20, 0x0A1A60, 0x000040, 0x003465,
470 0x020042, 0x42004A, 0x004020, 0x4A004A,
471 0x000606, 0x50004A, 0x000000, 0x000000,
472 0x000000, 0x000000, 0x000000, 0x000000,
473 0x000000, 0x000000, 0x000000, 0x000000,
474 0x000000, 0x000000, 0x000000, 0x000000,
475 0x000000, 0x000000, 0x000000, 0x000000,
476 0x000000, 0x000000, 0x000000, 0x000000,
477 0x000000, 0x000000, 0x000000, 0x000000,
478 0x000000, 0x000000, 0x000000, 0x000000,
479 0x000000, 0x000000, 0x000000, 0x000000,
480 0x000000, 0x000000, 0x000000, 0x000000,
481 0x000000, 0x000000, 0x000000, 0x000000,
482 0x000000, 0x000000, 0x000000, 0x000000,
483 0x000000, 0x000000, 0x000000, 0x000000,
484 0x000000, 0x000000, 0x000000, 0x000000,
485 0x000000, 0x000000, 0x000000, 0x000000,
486 0x000000, 0x000000, 0x000000, 0x000000,
487 0x000000, 0x000000, 0x000000, 0x000000,
488 0x000000, 0x000000, 0x000000, 0x000000,
489 0x000000, 0x000000, 0x000000, 0x000000,
490 0x000000, 0x000000, 0x000000, 0x000000,
491 0x000000, 0x000000, 0x000000, 0x000000,
492 0x000000, 0x000000, 0x000000, 0x000000,
493 0x000000, 0x000000, 0x000000, 0x000000,
494 0x000000, 0x000000, 0x000000, 0x000000,
495 0x000000, 0x000000, 0x000000, 0x000000,
496 0x000000, 0x000000, 0x000000, 0x000000,
497 0x000000, 0x000000, 0x000000, 0x000000,
498 0x000000, 0x000000, 0x000000, 0x000000,
499 0x000000, 0x000000, 0x000000, 0x000000,
500 0x000000, 0x000000, 0x000000, 0x000000,
501 0x000000, 0x000000, 0x000000, 0x000000,
502 0x000000, 0x000000, 0x000000, 0x000000,
503 0x000000, 0x000000, 0x000000, 0x000000,
504 0x000000, 0x000000, 0x000000, 0x000000,
505 0x000000, 0x000000, 0x000000, 0x000000,
506 0x000000, 0x000000, 0x000000, 0x000000,
507 0x000000, 0x000000, 0x000000, 0x000000,
508 0x000000, 0x000000, 0x000000, 0x000000,
509 0x000000, 0x000000, 0x000000, 0x000000,
510 0x000000, 0x000000, 0x000000, 0x000000,
511 0x000000, 0x000000, 0x000000, 0x000000,
512 0x000000, 0x000000, 0x000000, 0x000000,
513 0x000000, 0x000000, 0x000000, 0x000000,
514 0x000000, 0x000000, 0x000000, 0x000000,
515 0x000000, 0x000000, 0x000000, 0x000000,
516 0x000000, 0x000000, 0x000000, 0x000000,
517 0x000000, 0x000000, 0x000000, 0x000000,
518 0x000000, 0x000000, 0x000000, 0x000000,
519 0x000000, 0x000000, 0x000000, 0x000000,
520 0x000000, 0x000000, 0x000000, 0x000000,
521 0x000000, 0x000000, 0x000000, 0x000000,
522 0x000000, 0x000000, 0x000000, 0x000000,
523 0x000000, 0x000000, 0x000000, 0x000000,
524 0x000000, 0x000000, 0x000000, 0x000000,
525 0x000000, 0x000000, 0x000000, 0x000000,
526 0x000000, 0x000000, 0x000000, 0x000000,
527 0x000000, 0x000000, 0x000000, 0x000000,
528 0x000000, 0x000000, 0x000000, 0x000000,
529 0x000000, 0x000000, 0x000000, 0x000000,
530 0x000000, 0x000000, 0x000000, 0x000000,
531 0x000000, 0x000000, 0x000000, 0x000000,
532 0x000000, 0x000000, 0x000000, 0x000000,
533 0x000000, 0x000000, 0x000000, 0x000000,
534 0x000000, 0x000000, 0x000000, 0x000000,
535 0x000000, 0x000000, 0x000000, 0x000000,
536 0x000000, 0x000000, 0x000000, 0x000000,
537 0x000000, 0x000000, 0x000000, 0x000000,
538 0x000000, 0x000000, 0x000000, 0x000000,
539 0x000000, 0x000000, 0x000000, 0x000000,
540 0x000000, 0x000000, 0x000000, 0x000000,
541 0x000000, 0x000000, 0x000000, 0x000000,
542 0x000000, 0x000000, 0x000000, 0x000000,
543 0x000000, 0x000000, 0x000000, 0x000000,
544 0x000000, 0x000000, 0x000000, 0x000000,
545 0x000000, 0x000000, 0x000000, 0x000000,
546 0x000000, 0x000000, 0x000000, 0x000000,
547 0x000000, 0x000000, 0x000000, 0x000000,
548 0x000000, 0x000000, 0x000000, 0x000000,
549 0x000000, 0x000000, 0x000000, 0x000000,
550 0x000000, 0x000000, 0x000000, 0x000000,
551 0x000000, 0x000000, 0x000000, 0x000000,
552 0x000000, 0x000000, 0x000000, 0x000000,
553 0x000000, 0x000000, 0x000000, 0x000000,
554 0x000000, 0x000000, 0x000000, 0x000000,
555 0x000000, 0x000000, 0x000000, 0x000000,
556 0x000000, 0x000000, 0x000000, 0x000000,
557 0x000000, 0x000000, 0x000000, 0x000000,
558 0x000000, 0x000000, 0x000000, 0x000000,
559 0x000000, 0x000000, 0x000000, 0x000000,
560 0x000000, 0x000000, 0x000000, 0x000000,
561 0x000000, 0x000000, 0x000000, 0x000000,
562 0x000000, 0x000000, 0x000000, 0x000000,
563 0x000000, 0x000000, 0x000000, 0x000000,
564 0x000000, 0x000000, 0x000000, 0x000000,
565 0x000000, 0x000000, 0x000000, 0x000000,
566 0x000000, 0x000000, 0x000000, 0x000000,
567 0x000000, 0x000000, 0x000000, 0x000000,
568 0x000000, 0x000000, 0x000000, 0x000000,
569 0x000000, 0x000000, 0x000000, 0x000000,
570 0x000000, 0x000000, 0x000000, 0x000000,
571 0x000000, 0x000000, 0x000000, 0x000000,
572 0x000000, 0x000000, 0x000000, 0x000000,
573 0x000000, 0x000000, 0x000000, 0x000000,
574 0x000000, 0x000000, 0x000000, 0x000000,
575 0x000000, 0x000000, 0x000000, 0x000000,
576 0x000000, 0x000000, 0x000000, 0x000000,
577 0x000000, 0x000000, 0x000000, 0x000000,
578 0x000000, 0x000000, 0x000000, 0x000000,
579 0x000000, 0x000000, 0x000000, 0x000000,
580 0x000000, 0x000000, 0x000000, 0x000000,
581 0x000000, 0x000000, 0x000000, 0x000000,
582 0x000000, 0x000000, 0x000000, 0x000000,
583 0x000000, 0x000000, 0x000000, 0x000000,
584 0x000000, 0x000000, 0x000000, 0x000000,
585 0x000000, 0x000000, 0x000000, 0x000000,
586 0x000000, 0x000000, 0x000000, 0x000000,
587 0x000000, 0x000000, 0x000000, 0x000000,
588 0x000000, 0x000000, 0x000000, 0x000000,
589 0x000000, 0x000000, 0x000000, 0x000000,
590 0x000000, 0x000000, 0x000000, 0x000000,
591 0x000000, 0x000000, 0x000000, 0x000000,
592 0x000000, 0x000000, 0x000000, 0x000000,
593 0x000000, 0x000000, 0x000000, 0x000000,
594 0x000000, 0x000000, 0x000000, 0x000000,
595 0x000000, 0x000000, 0x000000, 0x000000,
596 0x000000, 0x000000, 0x000000, 0x000000,
597 0x000000, 0x000000, 0x000000, 0x000000,
598 0x000000, 0x000000, 0x000000, 0x000000,
599 0x000000, 0x000000, 0x000000, 0x000000,
600 0x000000, 0x000000, 0x000000, 0x000000,
601 0x000000, 0x000000, 0x000000, 0x000000,
602 0x000000, 0x000000, 0x000000, 0x000000,
603 0x000000, 0x000000, 0x000000, 0x000000,
604 0x000000, 0x000000, 0x000000, 0x000000,
605 0x000000, 0x000000, 0x000000, 0x000000,
606 0x000000, 0x000000, 0x000000, 0x000000,
607 0x000000, 0x000000, 0x000000, 0x000000,
608 0x000000, 0x000000, 0x000000, 0x000000,
609 0x000000, 0x000000, 0x000000, 0x000000,
610 0x000000, 0x000000, 0x000000, 0x000000,
611 0x000000, 0x000000, 0x000000, 0x000000,
612 0x000000, 0x000000, 0x000000, 0x000000,
613 0x000000, 0x000000, 0x000000, 0x000000,
614 0x000000, 0x000000, 0x000000, 0x000000,
615 0x000000, 0x000000, 0x000000, 0x000000,
616 0x000000, 0x000000, 0x000000, 0x000000,
617 0x000000, 0x000000, 0x000000, 0x000000,
618 0x000000, 0x000000, 0x000000, 0x000000,
619 0x000000, 0x000000, 0x000000, 0x000000,
620 0x000000, 0x000000, 0x000000, 0x000000,
621 0x000000, 0x000000, 0x000000, 0x000000,
622 0x000000, 0x000000, 0x000000, 0x000000,
623 0x000000, 0x000000, 0x000000, 0x000000,
624 0x000000, 0x000000, 0x000000, 0x000000,
625 0x000000, 0x000000, 0x000000, 0x000000,
626 0x000000, 0x000000, 0x000000, 0x000000,
627 0x000000, 0x000000, 0x000000, 0x000000,
628 0x000000, 0x000000, 0x000000, 0x000000,
629 0x000000, 0x000000, 0x000000, 0x000000,
630 0x000000, 0x000000, 0x000000, 0x000000,
631 0x000000, 0x000000, 0x000000, 0x000000,
632 0x000000, 0x000000, 0x000000, 0x000000,
633 0x000000, 0x000000, 0x000000, 0x000000,
634 0x000000, 0x000000, 0x000000, 0x000000,
635 0x000000, 0x000000, 0x000000, 0x000000,
636 0x000000, 0x000000, 0x000000, 0x000000,
637 0x000000, 0x000000, 0x000000, 0x000000,
638 0x000000, 0x000000, 0x000000, 0x000000,
639 0x000000, 0x000000, 0x000000, 0x000000,
640 0x000000, 0x000000, 0x000000, 0x000000,
641 0x000000, 0x000000, 0x000000, 0x000000,
642 0x000000, 0x000000, 0x000000, 0x000000,
643 0x000000, 0x000000, 0x000000, 0x000000,
644 0x000000, 0x000000, 0x000000, 0x000000,
645 0x000000, 0x000000, 0x000000, 0x000000,
646 0x000000, 0x000000, 0x000000, 0x000000,
647 0x000000, 0x000000, 0x000000, 0x000000,
648 0x000000, 0x000000, 0x000000, 0x000000,
649 0x000000, 0x000000, 0x000000, 0x000000,
650 0x000000, 0x000000, 0x000000, 0x000000,
651 0x000000, 0x000000, 0x000000, 0x000000,
652 0x000000, 0x000000, 0x000000, 0x000000,
653 0x000000, 0x000000, 0x000000, 0x000000,
654 0x000000, 0x000000, 0x000000, 0x000000,
655 0x000000, 0x000000, 0x000000, 0x000000,
656 0x000000, 0x000000, 0x000000, 0x000000,
657 0x000000, 0x000000, 0x000000, 0x000000,
658 0x000000, 0x000000, 0x000000, 0x000000,
659 0x000000, 0x000000, 0x000000, 0x000000,
660 0x000000, 0x000000, 0x000000, 0x000000,
661 0x000000, 0x000000, 0x000000, 0x000000,
662 0x000000, 0x000000, 0x000000, 0x000000,
663 0x000000, 0x000000, 0x000000, 0x000000,
664 0x000000, 0x000000, 0x000000, 0x000000,
665 0x000000, 0x000000, 0x000000, 0x000000,
666 0x000000, 0x000000, 0x000000, 0x000000,
667 0x000000, 0x000000, 0x000000, 0x000000,
668 0x000000, 0x000000, 0x000000, 0x000000,
669 0x000000, 0x000000, 0x000000, 0x000000,
670 0x000000, 0x000000, 0x000000, 0x000000,
671 0x000000, 0x000000, 0x000000, 0x000000,
672 0x000000, 0x000000, 0x000000, 0x000000,
673 0x000000, 0x000000, 0x000000, 0x000000,
674 0x000000, 0x000000, 0x000000, 0x000000,
675 0x000000, 0x000000, 0x000000, 0x000000,
676 0x000000, 0x000000, 0x000000, 0x000000,
677 0x000000, 0x000000, 0x000000, 0x000000,
678 0x000000, 0x000000, 0x000000, 0x000000,
679 0x000000, 0x000000, 0x000000, 0x000000,
680 0x000000, 0x000000, 0x000000, 0x000000,
681 0x000000, 0x000000, 0x000000, 0x000000,
682 0x000000, 0x000000, 0x000000, 0x000000,
683 0x000000, 0x000000, 0x000000, 0x000000,
684 0x000000, 0x000000, 0x000000, 0x000000,
685 0x000000, 0x000000, 0x000000, 0x000000,
686 0x000000, 0x000000, 0x000000, 0x000000,
687 0x000000, 0x000000, 0x000000, 0x000000,
688 0x000000, 0x000000, 0x000000, 0x000000,
689 0x000000, 0x000000, 0x000000, 0x000000,
690 0x000000, 0x000000, 0x000000, 0x000000,
691 0x000000, 0x000000, 0x000000, 0x000000,
692 0x000000, 0x000000, 0x000000, 0x000000,
693 0x000000, 0x000000, 0x000000, 0x000000,
694 0x000000, 0x000000, 0x000000, 0x000000,
695 0x000000, 0x000000, 0x000000, 0x000000,
696 0x000000, 0x000000, 0x000000, 0x000000,
697 0x000000, 0x000000, 0x000000, 0x000000,
698 0x000000, 0x000000, 0x000000, 0x000000,
699 0x000000, 0x000000, 0x000000, 0x000000,
700 0x000000, 0x000000, 0x000000, 0x000000,
701 0x000000, 0x000000, 0x000000, 0x000000,
702 0x000000, 0x000000, 0x000000, 0x000000,
703 0x000000, 0x000000, 0x000000, 0x000000,
704 0x000000, 0x000000, 0x000000, 0x000000,
705 0x000000, 0x000000, 0x000000, 0x000000,
706 0x000000, 0x000000, 0x000000, 0x000000,
707 0x000000, 0x000000, 0x000000, 0x000000,
708 0x000000, 0x000000, 0x000000, 0x000000,
709 0x000000, 0x000000, 0x000000, 0x000000,
710 0x000000, 0x000000, 0x000000, 0x000000,
711 0x000000, 0x000000, 0x000000, 0x000000,
712 0x000000, 0x000000, 0x000000, 0x000000,
713 0x000000, 0x000000, 0x000000, 0x000000,
714 0x000000, 0x000000, 0x000000, 0x000000,
715 0x000000, 0x000000, 0x000000, 0x000000,
716 0x000000, 0x000000, 0x000000, 0x000000,
717 0x000000, 0x000000, 0x000000, 0x000000,
718 0x000000, 0x000000, 0x000000, 0x000000,
719 0x000000, 0x000000, 0x000000, 0x000000,
720 0x000000, 0x000000, 0x000000, 0x000000,
721 0x000000, 0x000000, 0x000000, 0x000000,
722 0x000000, 0x000000, 0x000000, 0x000000,
723 0x000000, 0x000000, 0x000000, 0x000000,
724 0x000000, 0x000000, 0x000000, 0x000000,
725 0x000000, 0x000000, 0x000000, 0x000000,
726 0x000000, 0x000000, 0x000000, 0x000000,
727 0x000000, 0x000000, 0x000000, 0x000000,
728 0x000000, 0x000000, 0x000000, 0x000000,
729 0x000000, 0x000000, 0x000000, 0x000000,
730 0x000000, 0x000000, 0x000000, 0x000000,
731 0x000000, 0x000000, 0x000000, 0x000000,
732 0x000000, 0x000000, 0x000000, 0x000000,
733 0x000000, 0x000000, 0x000000, 0x000000,
734 0x000000, 0x000000, 0x000000, 0x000000,
735 0x000000, 0x000000, 0x000000, 0x000000,
736 0x000000, 0x000000, 0x000000, 0x000000,
737 0x000000, 0x000000, 0x000000, 0x000000,
738 0x000000, 0x000000, 0x000000, 0x000000,
739 0x000000, 0x000000, 0x000000, 0x000000,
740 0x000000, 0x000000, 0x000000, 0x000000,
741 0x000000, 0x000000, 0x000000, 0x000000,
742 0x000000, 0x000000, 0x000000, 0x000000,
743 0x000000, 0x000000, 0x000000, 0x000000,
744 0x000000, 0x000000, 0x000000, 0x000000,
745 0x000000, 0x000000, 0x000000, 0x000000,
746 0x000000, 0x000000, 0x000000, 0x000000,
747 0x000000, 0x000000, 0x000000, 0x000000,
748 0x000000, 0x000000, 0x000000, 0x000000,
749 0x000000, 0x000000, 0x000000, 0x000000,
750 0x000000, 0x000000, 0x000000, 0x000000,
751 0x000000, 0x000000, 0x000000, 0x000000,
752 0x000000, 0x000000, 0x000000, 0x000000,
753 0x000000, 0x000000, 0x000000, 0x000000,
754 0x000000, 0x000000, 0x000000, 0x000000,
755 0x000000, 0x000000, 0x000000, 0x000000,
756 0x000000, 0x000000, 0x000000, 0x000000,
757 0x000000, 0x000000, 0x000000, 0x000000,
758 0x000000, 0x000000, 0x000000, 0x000000,
759 0x000000, 0x000000, 0x000000, 0x000000,
760 0x000000, 0x000000, 0x000000, 0x000000,
761 0x000000, 0x000000, 0x000000, 0x000000,
762 0x000000, 0x000000, 0x000000, 0x000000,
763 0x000000, 0x000000, 0x000000, 0x000000,
764 0x000000, 0x000000, 0x000000, 0x000000,
765 0x000000, 0x000000, 0x000000, 0x000000,
766 0x000000, 0x000000, 0x000000, 0x000000,
767 0x000000, 0x000000, 0x000000, 0x000000,
768 0x000000, 0x000000, 0x000000, 0x000000,
769 0x000000, 0x000000, 0x000000, 0x000000,
770 0x000000, 0x000000, 0x000000, 0x000000,
771 0x000000, 0x000000, 0x000000, 0x000000,
772 0x000000, 0x000000, 0x000000, 0x000000,
773 0x000000, 0x000000, 0x000000, 0x000000,
774 0x000000, 0x000000, 0x000000, 0x000000,
775 0x000000, 0x000000, 0x000000, 0x000000,
776 0x000000, 0x000000, 0x000000, 0x000000,
777 0x000000, 0x000000, 0x000000, 0x000000,
778 0x000000, 0x000000, 0x000000, 0x000000,
779 0x000000, 0x000000, 0x000000, 0x000000,
780 0x000000, 0x000000, 0x000000, 0x000000,
781 0x000000, 0x000000, 0x000000, 0x000000,
782 0x000000, 0x000000, 0x000000, 0x000000,
783 0x000000, 0x000000, 0x000000, 0x000000
784};
785
786// --------------------------------------------
787// DS-1E Controller InstructionRAM Code
788// 1999/06/21
789// Buf441 slot is Enabled.
790// --------------------------------------------
791// 04/09 creat
792// 04/12 stop nise fix
793// 06/21 WorkingOff timming
794static u32 CntrlInst1E[YDSXG_CTRLLENGTH / 4] = {
795 0x000007, 0x240007, 0x0C0007, 0x1C0007,
796 0x060007, 0x700002, 0x000020, 0x030040,
797 0x007104, 0x004286, 0x030040, 0x000F0D,
798 0x000810, 0x20043A, 0x000282, 0x00020D,
799 0x000810, 0x20043A, 0x001282, 0x200E82,
800 0x00800D, 0x000810, 0x20043A, 0x001A82,
801 0x03460D, 0x000810, 0x10043A, 0x02EC0D,
802 0x000810, 0x18043A, 0x00010D, 0x020015,
803 0x0000FD, 0x000020, 0x038860, 0x039060,
804 0x038060, 0x038040, 0x038040, 0x038040,
805 0x018040, 0x000A7D, 0x038040, 0x038040,
806 0x018040, 0x200402, 0x000882, 0x08001A,
807 0x000904, 0x017186, 0x000007, 0x260007,
808 0x400007, 0x000007, 0x03258D, 0x000810,
809 0x18043A, 0x260007, 0x284402, 0x00087D,
810 0x018042, 0x00160A, 0x05A206, 0x000007,
811 0x440007, 0x00230D, 0x000810, 0x08043A,
812 0x22FA06, 0x000007, 0x0007FD, 0x018042,
813 0x08000A, 0x000904, 0x02AB86, 0x000195,
814 0x090D04, 0x000007, 0x000820, 0x0000F5,
815 0x000B7D, 0x01F060, 0x0000FD, 0x033A06,
816 0x018040, 0x000A7D, 0x038042, 0x13804A,
817 0x18000A, 0x001820, 0x059060, 0x058860,
818 0x018040, 0x0000FD, 0x018042, 0x70000A,
819 0x000115, 0x071144, 0x033B86, 0x030000,
820 0x007020, 0x036206, 0x018040, 0x00360D,
821 0x000810, 0x08043A, 0x232206, 0x000007,
822 0x02EC0D, 0x000810, 0x18043A, 0x019A06,
823 0x000007, 0x240007, 0x000F8D, 0x000810,
824 0x00163A, 0x002402, 0x005C02, 0x0028FD,
825 0x000020, 0x018040, 0x08000D, 0x000815,
826 0x510984, 0x000007, 0x00004D, 0x000E5D,
827 0x000E02, 0x00430D, 0x000810, 0x08043A,
828 0x2E1206, 0x000007, 0x00008D, 0x000924,
829 0x000F02, 0x00470D, 0x000810, 0x08043A,
830 0x2E1206, 0x000007, 0x480480, 0x001210,
831 0x28043A, 0x00778D, 0x000810, 0x280C3A,
832 0x00068D, 0x000810, 0x28143A, 0x284402,
833 0x03258D, 0x000810, 0x18043A, 0x07FF8D,
834 0x000820, 0x0002FD, 0x018040, 0x260007,
835 0x200007, 0x0002FD, 0x018042, 0x08000A,
836 0x000904, 0x051286, 0x000007, 0x240007,
837 0x02EC0D, 0x000810, 0x18043A, 0x00387D,
838 0x018042, 0x08000A, 0x001015, 0x010984,
839 0x019B86, 0x000007, 0x01B206, 0x000007,
840 0x0008FD, 0x018042, 0x18000A, 0x001904,
841 0x22B886, 0x280007, 0x001810, 0x28043A,
842 0x280C02, 0x00000D, 0x000810, 0x28143A,
843 0x08808D, 0x000820, 0x0002FD, 0x018040,
844 0x200007, 0x00020D, 0x189904, 0x000007,
845 0x00402D, 0x0000BD, 0x0002FD, 0x018042,
846 0x08000A, 0x000904, 0x065A86, 0x000007,
847 0x000100, 0x000A20, 0x00047D, 0x018040,
848 0x018042, 0x20000A, 0x003015, 0x012144,
849 0x036186, 0x000007, 0x002104, 0x036186,
850 0x000007, 0x000F8D, 0x000810, 0x280C3A,
851 0x023944, 0x07C986, 0x000007, 0x001810,
852 0x28043A, 0x08810D, 0x000820, 0x0002FD,
853 0x018040, 0x200007, 0x002810, 0x78003A,
854 0x00788D, 0x000810, 0x08043A, 0x2A1206,
855 0x000007, 0x00400D, 0x001015, 0x189904,
856 0x292904, 0x393904, 0x000007, 0x070206,
857 0x000007, 0x0004F5, 0x00007D, 0x000020,
858 0x00008D, 0x010860, 0x018040, 0x00047D,
859 0x038042, 0x21804A, 0x18000A, 0x021944,
860 0x229086, 0x000007, 0x004075, 0x71F104,
861 0x000007, 0x010042, 0x28000A, 0x002904,
862 0x225886, 0x000007, 0x003C0D, 0x30A904,
863 0x000007, 0x00077D, 0x018042, 0x08000A,
864 0x000904, 0x08DA86, 0x00057D, 0x002820,
865 0x03B060, 0x08F206, 0x018040, 0x003020,
866 0x03A860, 0x018040, 0x0002FD, 0x018042,
867 0x08000A, 0x000904, 0x08FA86, 0x000007,
868 0x00057D, 0x018042, 0x28040A, 0x000E8D,
869 0x000810, 0x280C3A, 0x00000D, 0x000810,
870 0x28143A, 0x09000D, 0x000820, 0x0002FD,
871 0x018040, 0x200007, 0x003DFD, 0x000020,
872 0x018040, 0x00107D, 0x009D8D, 0x000810,
873 0x08043A, 0x2A1206, 0x000007, 0x000815,
874 0x08001A, 0x010984, 0x0A5186, 0x00137D,
875 0x200500, 0x280F20, 0x338F60, 0x3B8F60,
876 0x438F60, 0x4B8F60, 0x538F60, 0x5B8F60,
877 0x038A60, 0x018040, 0x00107D, 0x018042,
878 0x08000A, 0x000215, 0x010984, 0x3A8186,
879 0x000007, 0x007FBD, 0x383DC4, 0x000007,
880 0x001A7D, 0x001375, 0x018042, 0x09004A,
881 0x10000A, 0x0B8D04, 0x139504, 0x000007,
882 0x000820, 0x019060, 0x001104, 0x225886,
883 0x010040, 0x0017FD, 0x018042, 0x08000A,
884 0x000904, 0x225A86, 0x000007, 0x00197D,
885 0x038042, 0x09804A, 0x10000A, 0x000924,
886 0x001664, 0x0011FD, 0x038042, 0x2B804A,
887 0x19804A, 0x00008D, 0x218944, 0x000007,
888 0x002244, 0x0C1986, 0x000007, 0x001A64,
889 0x002A24, 0x00197D, 0x080102, 0x100122,
890 0x000820, 0x039060, 0x018040, 0x003DFD,
891 0x00008D, 0x000820, 0x018040, 0x001375,
892 0x001A7D, 0x010042, 0x09804A, 0x10000A,
893 0x00021D, 0x0189E4, 0x2992E4, 0x309144,
894 0x000007, 0x00060D, 0x000A15, 0x000C1D,
895 0x001025, 0x00A9E4, 0x012BE4, 0x000464,
896 0x01B3E4, 0x0232E4, 0x000464, 0x000464,
897 0x000464, 0x000464, 0x00040D, 0x08B1C4,
898 0x000007, 0x000820, 0x000BF5, 0x030040,
899 0x00197D, 0x038042, 0x09804A, 0x000A24,
900 0x08000A, 0x080E64, 0x000007, 0x100122,
901 0x000820, 0x031060, 0x010040, 0x0064AC,
902 0x00027D, 0x000020, 0x018040, 0x00107D,
903 0x018042, 0x0011FD, 0x3B804A, 0x09804A,
904 0x20000A, 0x000095, 0x1A1144, 0x00A144,
905 0x0E5886, 0x00040D, 0x00B984, 0x0E5986,
906 0x0018FD, 0x018042, 0x0010FD, 0x09804A,
907 0x28000A, 0x000095, 0x010924, 0x002A64,
908 0x0E4986, 0x000007, 0x002904, 0x0E5A86,
909 0x000007, 0x0E6206, 0x080002, 0x00008D,
910 0x00387D, 0x000820, 0x018040, 0x00127D,
911 0x018042, 0x10000A, 0x003904, 0x0F0986,
912 0x00080D, 0x7FFFB5, 0x00B984, 0x0ED986,
913 0x000025, 0x0FB206, 0x00002D, 0x000015,
914 0x00082D, 0x02E00D, 0x000820, 0x0FFA06,
915 0x00000D, 0x7F8035, 0x00B984, 0x0FA986,
916 0x400025, 0x00008D, 0x110944, 0x000007,
917 0x00018D, 0x109504, 0x000007, 0x009164,
918 0x000424, 0x000424, 0x000424, 0x100102,
919 0x280002, 0x02DF0D, 0x000820, 0x0FFA06,
920 0x00018D, 0x00042D, 0x00008D, 0x109504,
921 0x000007, 0x00020D, 0x109184, 0x000007,
922 0x02DF8D, 0x000820, 0x00008D, 0x0038FD,
923 0x018040, 0x003BFD, 0x001020, 0x03A860,
924 0x000815, 0x313184, 0x212184, 0x000007,
925 0x03B060, 0x03A060, 0x018040, 0x0022FD,
926 0x000095, 0x010924, 0x000424, 0x000424,
927 0x001264, 0x100102, 0x000820, 0x039060,
928 0x018040, 0x001924, 0x010F0D, 0x00397D,
929 0x000820, 0x058040, 0x038042, 0x09844A,
930 0x000606, 0x08040A, 0x000424, 0x000424,
931 0x00117D, 0x018042, 0x08000A, 0x000A24,
932 0x280502, 0x280C02, 0x09800D, 0x000820,
933 0x0002FD, 0x018040, 0x200007, 0x0022FD,
934 0x018042, 0x08000A, 0x000095, 0x280DC4,
935 0x011924, 0x00197D, 0x018042, 0x0011FD,
936 0x09804A, 0x10000A, 0x0000B5, 0x113144,
937 0x0A8D04, 0x000007, 0x080A44, 0x129504,
938 0x000007, 0x0023FD, 0x001020, 0x038040,
939 0x101244, 0x000007, 0x000820, 0x039060,
940 0x018040, 0x0002FD, 0x018042, 0x08000A,
941 0x000904, 0x123286, 0x000007, 0x003BFD,
942 0x000100, 0x000A10, 0x0B807A, 0x13804A,
943 0x090984, 0x000007, 0x000095, 0x013D04,
944 0x12B886, 0x10000A, 0x100002, 0x090984,
945 0x000007, 0x038042, 0x11804A, 0x090D04,
946 0x000007, 0x10000A, 0x090D84, 0x000007,
947 0x00257D, 0x000820, 0x018040, 0x00010D,
948 0x000810, 0x28143A, 0x00127D, 0x018042,
949 0x20000A, 0x00197D, 0x018042, 0x00117D,
950 0x31804A, 0x10000A, 0x003124, 0x013B8D,
951 0x00397D, 0x000820, 0x058040, 0x038042,
952 0x09844A, 0x000606, 0x08040A, 0x300102,
953 0x003124, 0x000424, 0x000424, 0x001224,
954 0x280502, 0x001A4C, 0x143986, 0x700002,
955 0x00002D, 0x030000, 0x00387D, 0x018042,
956 0x10000A, 0x146206, 0x002124, 0x0000AD,
957 0x100002, 0x00010D, 0x000924, 0x006B24,
958 0x014A0D, 0x00397D, 0x000820, 0x058040,
959 0x038042, 0x09844A, 0x000606, 0x08040A,
960 0x003264, 0x00008D, 0x000A24, 0x001020,
961 0x00227D, 0x018040, 0x014F8D, 0x000810,
962 0x08043A, 0x2B5A06, 0x000007, 0x002820,
963 0x00207D, 0x018040, 0x00117D, 0x038042,
964 0x13804A, 0x33800A, 0x00387D, 0x018042,
965 0x08000A, 0x000904, 0x177286, 0x000007,
966 0x00008D, 0x030964, 0x015B0D, 0x00397D,
967 0x000820, 0x058040, 0x038042, 0x09844A,
968 0x000606, 0x08040A, 0x380102, 0x000424,
969 0x000424, 0x001224, 0x0002FD, 0x018042,
970 0x08000A, 0x000904, 0x15DA86, 0x000007,
971 0x280502, 0x001A4C, 0x177186, 0x000007,
972 0x032164, 0x00632C, 0x003DFD, 0x018042,
973 0x08000A, 0x000095, 0x090904, 0x000007,
974 0x000820, 0x001A4C, 0x169986, 0x018040,
975 0x030000, 0x16B206, 0x002124, 0x00010D,
976 0x000924, 0x006B24, 0x016F0D, 0x00397D,
977 0x000820, 0x058040, 0x038042, 0x09844A,
978 0x000606, 0x08040A, 0x003A64, 0x000095,
979 0x001224, 0x0002FD, 0x018042, 0x08000A,
980 0x000904, 0x171286, 0x000007, 0x01760D,
981 0x000810, 0x08043A, 0x2B5A06, 0x000007,
982 0x160A06, 0x000007, 0x007020, 0x08010A,
983 0x10012A, 0x0020FD, 0x038860, 0x039060,
984 0x018040, 0x00227D, 0x018042, 0x003DFD,
985 0x08000A, 0x31844A, 0x000904, 0x181086,
986 0x18008B, 0x00008D, 0x189904, 0x00312C,
987 0x18E206, 0x000007, 0x00324C, 0x186B86,
988 0x000007, 0x001904, 0x186886, 0x000007,
989 0x000095, 0x199144, 0x00222C, 0x003124,
990 0x00636C, 0x000E3D, 0x001375, 0x000BFD,
991 0x010042, 0x09804A, 0x10000A, 0x038AEC,
992 0x0393EC, 0x00224C, 0x18E186, 0x000007,
993 0x00008D, 0x189904, 0x00226C, 0x00322C,
994 0x30050A, 0x301DAB, 0x002083, 0x0018FD,
995 0x018042, 0x08000A, 0x018924, 0x300502,
996 0x001083, 0x001875, 0x010042, 0x10000A,
997 0x00008D, 0x010924, 0x001375, 0x330542,
998 0x330CCB, 0x332CCB, 0x3334CB, 0x333CCB,
999 0x3344CB, 0x334CCB, 0x3354CB, 0x305C8B,
1000 0x006083, 0x0002F5, 0x010042, 0x08000A,
1001 0x000904, 0x19B286, 0x000007, 0x001E2D,
1002 0x0005FD, 0x018042, 0x08000A, 0x028924,
1003 0x280502, 0x00060D, 0x000810, 0x280C3A,
1004 0x00008D, 0x000810, 0x28143A, 0x0A808D,
1005 0x000820, 0x0002F5, 0x010040, 0x220007,
1006 0x001275, 0x030042, 0x21004A, 0x00008D,
1007 0x1A0944, 0x000007, 0x01AB8D, 0x000810,
1008 0x08043A, 0x2CAA06, 0x000007, 0x0001F5,
1009 0x030042, 0x0D004A, 0x10000A, 0x089144,
1010 0x000007, 0x000820, 0x010040, 0x0025F5,
1011 0x0A3144, 0x000007, 0x000820, 0x032860,
1012 0x030040, 0x00217D, 0x038042, 0x0B804A,
1013 0x10000A, 0x000820, 0x031060, 0x030040,
1014 0x00008D, 0x000124, 0x00012C, 0x000E64,
1015 0x001A64, 0x00636C, 0x08010A, 0x10012A,
1016 0x000820, 0x031060, 0x030040, 0x0020FD,
1017 0x018042, 0x08000A, 0x00227D, 0x018042,
1018 0x10000A, 0x000820, 0x031060, 0x030040,
1019 0x00197D, 0x018042, 0x08000A, 0x0022FD,
1020 0x038042, 0x10000A, 0x000820, 0x031060,
1021 0x030040, 0x090D04, 0x000007, 0x000820,
1022 0x030040, 0x038042, 0x0B804A, 0x10000A,
1023 0x000820, 0x031060, 0x030040, 0x038042,
1024 0x13804A, 0x19804A, 0x110D04, 0x198D04,
1025 0x000007, 0x08000A, 0x001020, 0x031860,
1026 0x030860, 0x030040, 0x00008D, 0x0B0944,
1027 0x000007, 0x000820, 0x010040, 0x0005F5,
1028 0x030042, 0x08000A, 0x000820, 0x010040,
1029 0x0000F5, 0x010042, 0x08000A, 0x000904,
1030 0x1D9886, 0x001E75, 0x030042, 0x01044A,
1031 0x000C0A, 0x1DAA06, 0x000007, 0x000402,
1032 0x000C02, 0x00177D, 0x001AF5, 0x018042,
1033 0x03144A, 0x031C4A, 0x03244A, 0x032C4A,
1034 0x03344A, 0x033C4A, 0x03444A, 0x004C0A,
1035 0x00043D, 0x0013F5, 0x001AFD, 0x030042,
1036 0x0B004A, 0x1B804A, 0x13804A, 0x20000A,
1037 0x089144, 0x19A144, 0x0389E4, 0x0399EC,
1038 0x005502, 0x005D0A, 0x030042, 0x0B004A,
1039 0x1B804A, 0x13804A, 0x20000A, 0x089144,
1040 0x19A144, 0x0389E4, 0x0399EC, 0x006502,
1041 0x006D0A, 0x030042, 0x0B004A, 0x19004A,
1042 0x2B804A, 0x13804A, 0x21804A, 0x30000A,
1043 0x089144, 0x19A144, 0x2AB144, 0x0389E4,
1044 0x0399EC, 0x007502, 0x007D0A, 0x03A9E4,
1045 0x000702, 0x00107D, 0x000415, 0x018042,
1046 0x08000A, 0x0109E4, 0x000F02, 0x002AF5,
1047 0x0019FD, 0x010042, 0x09804A, 0x10000A,
1048 0x000934, 0x001674, 0x0029F5, 0x010042,
1049 0x10000A, 0x00917C, 0x002075, 0x010042,
1050 0x08000A, 0x000904, 0x200A86, 0x0026F5,
1051 0x0027F5, 0x030042, 0x09004A, 0x10000A,
1052 0x000A3C, 0x00167C, 0x001A75, 0x000BFD,
1053 0x010042, 0x51804A, 0x48000A, 0x160007,
1054 0x001075, 0x010042, 0x282C0A, 0x281D12,
1055 0x282512, 0x001F32, 0x1E0007, 0x0E0007,
1056 0x001975, 0x010042, 0x002DF5, 0x0D004A,
1057 0x10000A, 0x009144, 0x20EA86, 0x010042,
1058 0x28340A, 0x000E5D, 0x00008D, 0x000375,
1059 0x000820, 0x010040, 0x05D2F4, 0x54D104,
1060 0x00735C, 0x218B86, 0x000007, 0x0C0007,
1061 0x080007, 0x0A0007, 0x02178D, 0x000810,
1062 0x08043A, 0x34B206, 0x000007, 0x219206,
1063 0x000007, 0x080007, 0x002275, 0x010042,
1064 0x20000A, 0x002104, 0x225886, 0x001E2D,
1065 0x0002F5, 0x010042, 0x08000A, 0x000904,
1066 0x21CA86, 0x000007, 0x002010, 0x30043A,
1067 0x00057D, 0x0180C3, 0x08000A, 0x028924,
1068 0x280502, 0x280C02, 0x0A810D, 0x000820,
1069 0x0002F5, 0x010040, 0x220007, 0x0004FD,
1070 0x018042, 0x70000A, 0x030000, 0x007020,
1071 0x07FA06, 0x018040, 0x022B8D, 0x000810,
1072 0x08043A, 0x2CAA06, 0x000007, 0x0002FD,
1073 0x018042, 0x08000A, 0x000904, 0x22C286,
1074 0x000007, 0x020206, 0x000007, 0x000875,
1075 0x0009FD, 0x00010D, 0x234206, 0x000295,
1076 0x000B75, 0x00097D, 0x00000D, 0x000515,
1077 0x010042, 0x18000A, 0x001904, 0x2A0086,
1078 0x0006F5, 0x001020, 0x010040, 0x0004F5,
1079 0x000820, 0x010040, 0x000775, 0x010042,
1080 0x09804A, 0x10000A, 0x001124, 0x000904,
1081 0x23F286, 0x000815, 0x080102, 0x101204,
1082 0x241206, 0x000575, 0x081204, 0x000007,
1083 0x100102, 0x000575, 0x000425, 0x021124,
1084 0x100102, 0x000820, 0x031060, 0x010040,
1085 0x001924, 0x2A0086, 0x00008D, 0x000464,
1086 0x009D04, 0x291086, 0x180102, 0x000575,
1087 0x010042, 0x28040A, 0x00018D, 0x000924,
1088 0x280D02, 0x00000D, 0x000924, 0x281502,
1089 0x10000D, 0x000820, 0x0002F5, 0x010040,
1090 0x200007, 0x001175, 0x0002FD, 0x018042,
1091 0x08000A, 0x000904, 0x24FA86, 0x000007,
1092 0x000100, 0x080B20, 0x130B60, 0x1B0B60,
1093 0x030A60, 0x010040, 0x050042, 0x3D004A,
1094 0x35004A, 0x2D004A, 0x20000A, 0x0006F5,
1095 0x010042, 0x28140A, 0x0004F5, 0x010042,
1096 0x08000A, 0x000315, 0x010D04, 0x260286,
1097 0x004015, 0x000095, 0x010D04, 0x25F086,
1098 0x100022, 0x10002A, 0x261A06, 0x000007,
1099 0x333104, 0x2AA904, 0x000007, 0x032124,
1100 0x280502, 0x284402, 0x001124, 0x400102,
1101 0x000424, 0x000424, 0x003224, 0x00292C,
1102 0x00636C, 0x277386, 0x000007, 0x02B164,
1103 0x000464, 0x000464, 0x00008D, 0x000A64,
1104 0x280D02, 0x10008D, 0x000820, 0x0002F5,
1105 0x010040, 0x220007, 0x00008D, 0x38B904,
1106 0x000007, 0x03296C, 0x30010A, 0x0002F5,
1107 0x010042, 0x08000A, 0x000904, 0x270286,
1108 0x000007, 0x00212C, 0x28050A, 0x00316C,
1109 0x00046C, 0x00046C, 0x28450A, 0x001124,
1110 0x006B64, 0x100102, 0x00008D, 0x01096C,
1111 0x280D0A, 0x10010D, 0x000820, 0x0002F5,
1112 0x010040, 0x220007, 0x004124, 0x000424,
1113 0x000424, 0x003224, 0x300102, 0x032944,
1114 0x27FA86, 0x000007, 0x300002, 0x0004F5,
1115 0x010042, 0x08000A, 0x000315, 0x010D04,
1116 0x284086, 0x003124, 0x000464, 0x300102,
1117 0x0002F5, 0x010042, 0x08000A, 0x000904,
1118 0x284A86, 0x000007, 0x284402, 0x003124,
1119 0x300502, 0x003924, 0x300583, 0x000883,
1120 0x0005F5, 0x010042, 0x28040A, 0x00008D,
1121 0x008124, 0x280D02, 0x00008D, 0x008124,
1122 0x281502, 0x10018D, 0x000820, 0x0002F5,
1123 0x010040, 0x220007, 0x001025, 0x000575,
1124 0x030042, 0x09004A, 0x10000A, 0x0A0904,
1125 0x121104, 0x000007, 0x001020, 0x050860,
1126 0x050040, 0x0006FD, 0x018042, 0x09004A,
1127 0x10000A, 0x0000A5, 0x0A0904, 0x121104,
1128 0x000007, 0x000820, 0x019060, 0x010040,
1129 0x0002F5, 0x010042, 0x08000A, 0x000904,
1130 0x29CA86, 0x000007, 0x244206, 0x000007,
1131 0x000606, 0x000007, 0x0002F5, 0x010042,
1132 0x08000A, 0x000904, 0x2A1A86, 0x000007,
1133 0x000100, 0x080B20, 0x138B60, 0x1B8B60,
1134 0x238B60, 0x2B8B60, 0x338B60, 0x3B8B60,
1135 0x438B60, 0x4B8B60, 0x538B60, 0x5B8B60,
1136 0x638B60, 0x6B8B60, 0x738B60, 0x7B8B60,
1137 0x038F60, 0x0B8F60, 0x138F60, 0x1B8F60,
1138 0x238F60, 0x2B8F60, 0x338F60, 0x3B8F60,
1139 0x438F60, 0x4B8F60, 0x538F60, 0x5B8F60,
1140 0x638F60, 0x6B8F60, 0x738F60, 0x7B8F60,
1141 0x038A60, 0x000606, 0x018040, 0x00008D,
1142 0x000A64, 0x280D02, 0x000A24, 0x00027D,
1143 0x018042, 0x10000A, 0x001224, 0x0003FD,
1144 0x018042, 0x08000A, 0x000904, 0x2C0A86,
1145 0x000007, 0x00018D, 0x000A24, 0x000464,
1146 0x000464, 0x080102, 0x000924, 0x000424,
1147 0x000424, 0x100102, 0x02000D, 0x009144,
1148 0x2C6186, 0x000007, 0x0001FD, 0x018042,
1149 0x08000A, 0x000A44, 0x2C4386, 0x018042,
1150 0x0A000D, 0x000820, 0x0002FD, 0x018040,
1151 0x200007, 0x00027D, 0x001020, 0x000606,
1152 0x018040, 0x0002F5, 0x010042, 0x08000A,
1153 0x000904, 0x2CB286, 0x000007, 0x00037D,
1154 0x018042, 0x08000A, 0x000904, 0x2CE286,
1155 0x000007, 0x000075, 0x002E7D, 0x010042,
1156 0x0B804A, 0x000020, 0x000904, 0x000686,
1157 0x010040, 0x31844A, 0x30048B, 0x000883,
1158 0x00008D, 0x000810, 0x28143A, 0x00008D,
1159 0x000810, 0x280C3A, 0x000675, 0x010042,
1160 0x08000A, 0x003815, 0x010924, 0x280502,
1161 0x0B000D, 0x000820, 0x0002F5, 0x010040,
1162 0x000606, 0x220007, 0x000464, 0x000464,
1163 0x000606, 0x000007, 0x000134, 0x007F8D,
1164 0x00093C, 0x281D12, 0x282512, 0x001F32,
1165 0x0E0007, 0x00010D, 0x00037D, 0x000820,
1166 0x018040, 0x05D2F4, 0x000007, 0x080007,
1167 0x00037D, 0x018042, 0x08000A, 0x000904,
1168 0x2E8A86, 0x000007, 0x000606, 0x000007,
1169 0x000007, 0x000012, 0x100007, 0x320007,
1170 0x600007, 0x460007, 0x100080, 0x48001A,
1171 0x004904, 0x2EF186, 0x000007, 0x001210,
1172 0x58003A, 0x000145, 0x5C5D04, 0x000007,
1173 0x000080, 0x48001A, 0x004904, 0x2F4186,
1174 0x000007, 0x001210, 0x50003A, 0x005904,
1175 0x2F9886, 0x000045, 0x0000C5, 0x7FFFF5,
1176 0x7FFF7D, 0x07D524, 0x004224, 0x500102,
1177 0x200502, 0x000082, 0x40001A, 0x004104,
1178 0x2FC986, 0x000007, 0x003865, 0x40001A,
1179 0x004020, 0x00104D, 0x04C184, 0x31AB86,
1180 0x000040, 0x040007, 0x000165, 0x000145,
1181 0x004020, 0x000040, 0x000765, 0x080080,
1182 0x40001A, 0x004104, 0x305986, 0x000007,
1183 0x001210, 0x40003A, 0x004104, 0x30B286,
1184 0x00004D, 0x0000CD, 0x004810, 0x20043A,
1185 0x000882, 0x40001A, 0x004104, 0x30C186,
1186 0x000007, 0x004820, 0x005904, 0x319886,
1187 0x000040, 0x0007E5, 0x200480, 0x2816A0,
1188 0x3216E0, 0x3A16E0, 0x4216E0, 0x021260,
1189 0x000040, 0x000032, 0x400075, 0x00007D,
1190 0x07D574, 0x200512, 0x000082, 0x40001A,
1191 0x004104, 0x317186, 0x000007, 0x038A06,
1192 0x640007, 0x0000E5, 0x000020, 0x000040,
1193 0x000A65, 0x000020, 0x020040, 0x020040,
1194 0x000040, 0x000165, 0x000042, 0x70000A,
1195 0x007104, 0x323286, 0x000007, 0x060007,
1196 0x019A06, 0x640007, 0x050000, 0x007020,
1197 0x000040, 0x038A06, 0x640007, 0x000007,
1198 0x00306D, 0x028860, 0x029060, 0x08000A,
1199 0x028860, 0x008040, 0x100012, 0x00100D,
1200 0x009184, 0x32D186, 0x000E0D, 0x009184,
1201 0x33E186, 0x000007, 0x300007, 0x001020,
1202 0x003B6D, 0x008040, 0x000080, 0x08001A,
1203 0x000904, 0x32F186, 0x000007, 0x001220,
1204 0x000DED, 0x008040, 0x008042, 0x10000A,
1205 0x40000D, 0x109544, 0x000007, 0x001020,
1206 0x000DED, 0x008040, 0x008042, 0x20040A,
1207 0x000082, 0x08001A, 0x000904, 0x338186,
1208 0x000007, 0x003B6D, 0x008042, 0x08000A,
1209 0x000E15, 0x010984, 0x342B86, 0x600007,
1210 0x08001A, 0x000C15, 0x010984, 0x341386,
1211 0x000020, 0x1A0007, 0x0002ED, 0x008040,
1212 0x620007, 0x00306D, 0x028042, 0x0A804A,
1213 0x000820, 0x0A804A, 0x000606, 0x10804A,
1214 0x000007, 0x282512, 0x001F32, 0x05D2F4,
1215 0x54D104, 0x00735C, 0x000786, 0x000007,
1216 0x0C0007, 0x0A0007, 0x1C0007, 0x003465,
1217 0x020040, 0x004820, 0x025060, 0x40000A,
1218 0x024060, 0x000040, 0x454944, 0x000007,
1219 0x004020, 0x003AE5, 0x000040, 0x0028E5,
1220 0x000042, 0x48000A, 0x004904, 0x39F886,
1221 0x002C65, 0x000042, 0x40000A, 0x0000D5,
1222 0x454104, 0x000007, 0x000655, 0x054504,
1223 0x368286, 0x0001D5, 0x054504, 0x368086,
1224 0x002B65, 0x000042, 0x003AE5, 0x50004A,
1225 0x40000A, 0x45C3D4, 0x000007, 0x454504,
1226 0x000007, 0x0000CD, 0x444944, 0x000007,
1227 0x454504, 0x000007, 0x00014D, 0x554944,
1228 0x000007, 0x045144, 0x367986, 0x002C65,
1229 0x000042, 0x48000A, 0x4CD104, 0x000007,
1230 0x04C144, 0x368386, 0x000007, 0x160007,
1231 0x002CE5, 0x040042, 0x40000A, 0x004020,
1232 0x000040, 0x002965, 0x000042, 0x40000A,
1233 0x004104, 0x36F086, 0x000007, 0x002402,
1234 0x383206, 0x005C02, 0x0025E5, 0x000042,
1235 0x40000A, 0x004274, 0x002AE5, 0x000042,
1236 0x40000A, 0x004274, 0x500112, 0x0029E5,
1237 0x000042, 0x40000A, 0x004234, 0x454104,
1238 0x000007, 0x004020, 0x000040, 0x003EE5,
1239 0x000020, 0x000040, 0x002DE5, 0x400152,
1240 0x50000A, 0x045144, 0x37DA86, 0x0000C5,
1241 0x003EE5, 0x004020, 0x000040, 0x002BE5,
1242 0x000042, 0x40000A, 0x404254, 0x000007,
1243 0x002AE5, 0x004020, 0x000040, 0x500132,
1244 0x040134, 0x005674, 0x0029E5, 0x020042,
1245 0x42000A, 0x000042, 0x50000A, 0x05417C,
1246 0x0028E5, 0x000042, 0x48000A, 0x0000C5,
1247 0x4CC144, 0x38A086, 0x0026E5, 0x0027E5,
1248 0x020042, 0x40004A, 0x50000A, 0x00423C,
1249 0x00567C, 0x0028E5, 0x004820, 0x000040,
1250 0x281D12, 0x282512, 0x001F72, 0x002965,
1251 0x000042, 0x40000A, 0x004104, 0x393A86,
1252 0x0E0007, 0x160007, 0x1E0007, 0x003EE5,
1253 0x000042, 0x40000A, 0x004104, 0x397886,
1254 0x002D65, 0x000042, 0x28340A, 0x003465,
1255 0x020042, 0x42004A, 0x004020, 0x4A004A,
1256 0x50004A, 0x05D2F4, 0x54D104, 0x00735C,
1257 0x39E186, 0x000007, 0x000606, 0x080007,
1258 0x0C0007, 0x080007, 0x0A0007, 0x0001E5,
1259 0x020045, 0x004020, 0x000060, 0x000365,
1260 0x000040, 0x002E65, 0x001A20, 0x0A1A60,
1261 0x000040, 0x003465, 0x020042, 0x42004A,
1262 0x004020, 0x4A004A, 0x000606, 0x50004A,
1263 0x0017FD, 0x018042, 0x08000A, 0x000904,
1264 0x225A86, 0x000007, 0x00107D, 0x018042,
1265 0x0011FD, 0x33804A, 0x19804A, 0x20000A,
1266 0x000095, 0x2A1144, 0x01A144, 0x3B9086,
1267 0x00040D, 0x00B184, 0x3B9186, 0x0018FD,
1268 0x018042, 0x0010FD, 0x09804A, 0x38000A,
1269 0x000095, 0x010924, 0x003A64, 0x3B8186,
1270 0x000007, 0x003904, 0x3B9286, 0x000007,
1271 0x3B9A06, 0x00000D, 0x00008D, 0x000820,
1272 0x00387D, 0x018040, 0x700002, 0x00117D,
1273 0x018042, 0x00197D, 0x29804A, 0x30000A,
1274 0x380002, 0x003124, 0x000424, 0x000424,
1275 0x002A24, 0x280502, 0x00068D, 0x000810,
1276 0x28143A, 0x00750D, 0x00B124, 0x002264,
1277 0x3D0386, 0x284402, 0x000810, 0x280C3A,
1278 0x0B800D, 0x000820, 0x0002FD, 0x018040,
1279 0x200007, 0x00758D, 0x00B124, 0x100102,
1280 0x012144, 0x3E4986, 0x001810, 0x10003A,
1281 0x00387D, 0x018042, 0x08000A, 0x000904,
1282 0x3E4886, 0x030000, 0x3E4A06, 0x0000BD,
1283 0x00008D, 0x023164, 0x000A64, 0x280D02,
1284 0x0B808D, 0x000820, 0x0002FD, 0x018040,
1285 0x200007, 0x00387D, 0x018042, 0x08000A,
1286 0x000904, 0x3E3286, 0x030000, 0x0002FD,
1287 0x018042, 0x08000A, 0x000904, 0x3D8286,
1288 0x000007, 0x002810, 0x28043A, 0x00750D,
1289 0x030924, 0x002264, 0x280D02, 0x02316C,
1290 0x28450A, 0x0B810D, 0x000820, 0x0002FD,
1291 0x018040, 0x200007, 0x00008D, 0x000A24,
1292 0x3E4A06, 0x100102, 0x001810, 0x10003A,
1293 0x0000BD, 0x003810, 0x30043A, 0x00187D,
1294 0x018042, 0x0018FD, 0x09804A, 0x20000A,
1295 0x0000AD, 0x028924, 0x07212C, 0x001010,
1296 0x300583, 0x300D8B, 0x3014BB, 0x301C83,
1297 0x002083, 0x00137D, 0x038042, 0x33844A,
1298 0x33ACCB, 0x33B4CB, 0x33BCCB, 0x33C4CB,
1299 0x33CCCB, 0x33D4CB, 0x305C8B, 0x006083,
1300 0x001E0D, 0x0005FD, 0x018042, 0x20000A,
1301 0x020924, 0x00068D, 0x00A96C, 0x00009D,
1302 0x0002FD, 0x018042, 0x08000A, 0x000904,
1303 0x3F6A86, 0x000007, 0x280502, 0x280D0A,
1304 0x284402, 0x001810, 0x28143A, 0x0C008D,
1305 0x000820, 0x0002FD, 0x018040, 0x220007,
1306 0x003904, 0x225886, 0x001E0D, 0x00057D,
1307 0x018042, 0x20000A, 0x020924, 0x0000A5,
1308 0x0002FD, 0x018042, 0x08000A, 0x000904,
1309 0x402A86, 0x000007, 0x280502, 0x280C02,
1310 0x002010, 0x28143A, 0x0C010D, 0x000820,
1311 0x0002FD, 0x018040, 0x225A06, 0x220007,
1312 0x000000, 0x000000, 0x000000, 0x000000,
1313 0x000000, 0x000000, 0x000000, 0x000000,
1314 0x000000, 0x000000, 0x000000, 0x000000,
1315 0x000000, 0x000000, 0x000000, 0x000000,
1316 0x000000, 0x000000, 0x000000, 0x000000,
1317 0x000000, 0x000000, 0x000000, 0x000000,
1318 0x000000, 0x000000, 0x000000, 0x000000,
1319 0x000000, 0x000000, 0x000000, 0x000000,
1320 0x000000, 0x000000, 0x000000, 0x000000,
1321 0x000000, 0x000000, 0x000000, 0x000000,
1322 0x000000, 0x000000, 0x000000, 0x000000,
1323 0x000000, 0x000000, 0x000000, 0x000000,
1324 0x000000, 0x000000, 0x000000, 0x000000,
1325 0x000000, 0x000000, 0x000000, 0x000000,
1326 0x000000, 0x000000, 0x000000, 0x000000,
1327 0x000000, 0x000000, 0x000000, 0x000000,
1328 0x000000, 0x000000, 0x000000, 0x000000,
1329 0x000000, 0x000000, 0x000000, 0x000000,
1330 0x000000, 0x000000, 0x000000, 0x000000,
1331 0x000000, 0x000000, 0x000000, 0x000000,
1332 0x000000, 0x000000, 0x000000, 0x000000,
1333 0x000000, 0x000000, 0x000000, 0x000000,
1334 0x000000, 0x000000, 0x000000, 0x000000,
1335 0x000000, 0x000000, 0x000000, 0x000000,
1336 0x000000, 0x000000, 0x000000, 0x000000,
1337 0x000000, 0x000000, 0x000000, 0x000000,
1338 0x000000, 0x000000, 0x000000, 0x000000,
1339 0x000000, 0x000000, 0x000000, 0x000000,
1340 0x000000, 0x000000, 0x000000, 0x000000,
1341 0x000000, 0x000000, 0x000000, 0x000000,
1342 0x000000, 0x000000, 0x000000, 0x000000,
1343 0x000000, 0x000000, 0x000000, 0x000000,
1344 0x000000, 0x000000, 0x000000, 0x000000,
1345 0x000000, 0x000000, 0x000000, 0x000000,
1346 0x000000, 0x000000, 0x000000, 0x000000,
1347 0x000000, 0x000000, 0x000000, 0x000000,
1348 0x000000, 0x000000, 0x000000, 0x000000,
1349 0x000000, 0x000000, 0x000000, 0x000000,
1350 0x000000, 0x000000, 0x000000, 0x000000,
1351 0x000000, 0x000000, 0x000000, 0x000000,
1352 0x000000, 0x000000, 0x000000, 0x000000,
1353 0x000000, 0x000000, 0x000000, 0x000000,
1354 0x000000, 0x000000, 0x000000, 0x000000,
1355 0x000000, 0x000000, 0x000000, 0x000000,
1356 0x000000, 0x000000, 0x000000, 0x000000,
1357 0x000000, 0x000000, 0x000000, 0x000000,
1358 0x000000, 0x000000, 0x000000, 0x000000,
1359 0x000000, 0x000000, 0x000000, 0x000000,
1360 0x000000, 0x000000, 0x000000, 0x000000,
1361 0x000000, 0x000000, 0x000000, 0x000000,
1362 0x000000, 0x000000, 0x000000, 0x000000,
1363 0x000000, 0x000000, 0x000000, 0x000000,
1364 0x000000, 0x000000, 0x000000, 0x000000,
1365 0x000000, 0x000000, 0x000000, 0x000000,
1366 0x000000, 0x000000, 0x000000, 0x000000,
1367 0x000000, 0x000000, 0x000000, 0x000000,
1368 0x000000, 0x000000, 0x000000, 0x000000,
1369 0x000000, 0x000000, 0x000000, 0x000000,
1370 0x000000, 0x000000, 0x000000, 0x000000,
1371 0x000000, 0x000000, 0x000000, 0x000000,
1372 0x000000, 0x000000, 0x000000, 0x000000,
1373 0x000000, 0x000000, 0x000000, 0x000000,
1374 0x000000, 0x000000, 0x000000, 0x000000,
1375 0x000000, 0x000000, 0x000000, 0x000000,
1376 0x000000, 0x000000, 0x000000, 0x000000,
1377 0x000000, 0x000000, 0x000000, 0x000000,
1378 0x000000, 0x000000, 0x000000, 0x000000,
1379 0x000000, 0x000000, 0x000000, 0x000000,
1380 0x000000, 0x000000, 0x000000, 0x000000,
1381 0x000000, 0x000000, 0x000000, 0x000000,
1382 0x000000, 0x000000, 0x000000, 0x000000,
1383 0x000000, 0x000000, 0x000000, 0x000000,
1384 0x000000, 0x000000, 0x000000, 0x000000,
1385 0x000000, 0x000000, 0x000000, 0x000000,
1386 0x000000, 0x000000, 0x000000, 0x000000,
1387 0x000000, 0x000000, 0x000000, 0x000000,
1388 0x000000, 0x000000, 0x000000, 0x000000,
1389 0x000000, 0x000000, 0x000000, 0x000000,
1390 0x000000, 0x000000, 0x000000, 0x000000,
1391 0x000000, 0x000000, 0x000000, 0x000000,
1392 0x000000, 0x000000, 0x000000, 0x000000,
1393 0x000000, 0x000000, 0x000000, 0x000000,
1394 0x000000, 0x000000, 0x000000, 0x000000,
1395 0x000000, 0x000000, 0x000000, 0x000000,
1396 0x000000, 0x000000, 0x000000, 0x000000,
1397 0x000000, 0x000000, 0x000000, 0x000000,
1398 0x000000, 0x000000, 0x000000, 0x000000,
1399 0x000000, 0x000000, 0x000000, 0x000000,
1400 0x000000, 0x000000, 0x000000, 0x000000,
1401 0x000000, 0x000000, 0x000000, 0x000000,
1402 0x000000, 0x000000, 0x000000, 0x000000,
1403 0x000000, 0x000000, 0x000000, 0x000000,
1404 0x000000, 0x000000, 0x000000, 0x000000,
1405 0x000000, 0x000000, 0x000000, 0x000000,
1406 0x000000, 0x000000, 0x000000, 0x000000,
1407 0x000000, 0x000000, 0x000000, 0x000000,
1408 0x000000, 0x000000, 0x000000, 0x000000,
1409 0x000000, 0x000000, 0x000000, 0x000000,
1410 0x000000, 0x000000, 0x000000, 0x000000,
1411 0x000000, 0x000000, 0x000000, 0x000000,
1412 0x000000, 0x000000, 0x000000, 0x000000,
1413 0x000000, 0x000000, 0x000000, 0x000000,
1414 0x000000, 0x000000, 0x000000, 0x000000,
1415 0x000000, 0x000000, 0x000000, 0x000000,
1416 0x000000, 0x000000, 0x000000, 0x000000,
1417 0x000000, 0x000000, 0x000000, 0x000000,
1418 0x000000, 0x000000, 0x000000, 0x000000,
1419 0x000000, 0x000000, 0x000000, 0x000000,
1420 0x000000, 0x000000, 0x000000, 0x000000,
1421 0x000000, 0x000000, 0x000000, 0x000000,
1422 0x000000, 0x000000, 0x000000, 0x000000,
1423 0x000000, 0x000000, 0x000000, 0x000000,
1424 0x000000, 0x000000, 0x000000, 0x000000,
1425 0x000000, 0x000000, 0x000000, 0x000000,
1426 0x000000, 0x000000, 0x000000, 0x000000,
1427 0x000000, 0x000000, 0x000000, 0x000000,
1428 0x000000, 0x000000, 0x000000, 0x000000,
1429 0x000000, 0x000000, 0x000000, 0x000000,
1430 0x000000, 0x000000, 0x000000, 0x000000,
1431 0x000000, 0x000000, 0x000000, 0x000000,
1432 0x000000, 0x000000, 0x000000, 0x000000,
1433 0x000000, 0x000000, 0x000000, 0x000000,
1434 0x000000, 0x000000, 0x000000, 0x000000,
1435 0x000000, 0x000000, 0x000000, 0x000000,
1436 0x000000, 0x000000, 0x000000, 0x000000,
1437 0x000000, 0x000000, 0x000000, 0x000000,
1438 0x000000, 0x000000, 0x000000, 0x000000,
1439 0x000000, 0x000000, 0x000000, 0x000000,
1440 0x000000, 0x000000, 0x000000, 0x000000,
1441 0x000000, 0x000000, 0x000000, 0x000000,
1442 0x000000, 0x000000, 0x000000, 0x000000,
1443 0x000000, 0x000000, 0x000000, 0x000000,
1444 0x000000, 0x000000, 0x000000, 0x000000,
1445 0x000000, 0x000000, 0x000000, 0x000000,
1446 0x000000, 0x000000, 0x000000, 0x000000,
1447 0x000000, 0x000000, 0x000000, 0x000000,
1448 0x000000, 0x000000, 0x000000, 0x000000,
1449 0x000000, 0x000000, 0x000000, 0x000000,
1450 0x000000, 0x000000, 0x000000, 0x000000,
1451 0x000000, 0x000000, 0x000000, 0x000000,
1452 0x000000, 0x000000, 0x000000, 0x000000,
1453 0x000000, 0x000000, 0x000000, 0x000000,
1454 0x000000, 0x000000, 0x000000, 0x000000,
1455 0x000000, 0x000000, 0x000000, 0x000000,
1456 0x000000, 0x000000, 0x000000, 0x000000,
1457 0x000000, 0x000000, 0x000000, 0x000000,
1458 0x000000, 0x000000, 0x000000, 0x000000,
1459 0x000000, 0x000000, 0x000000, 0x000000,
1460 0x000000, 0x000000, 0x000000, 0x000000,
1461 0x000000, 0x000000, 0x000000, 0x000000,
1462 0x000000, 0x000000, 0x000000, 0x000000,
1463 0x000000, 0x000000, 0x000000, 0x000000,
1464 0x000000, 0x000000, 0x000000, 0x000000,
1465 0x000000, 0x000000, 0x000000, 0x000000,
1466 0x000000, 0x000000, 0x000000, 0x000000,
1467 0x000000, 0x000000, 0x000000, 0x000000,
1468 0x000000, 0x000000, 0x000000, 0x000000,
1469 0x000000, 0x000000, 0x000000, 0x000000,
1470 0x000000, 0x000000, 0x000000, 0x000000,
1471 0x000000, 0x000000, 0x000000, 0x000000,
1472 0x000000, 0x000000, 0x000000, 0x000000,
1473 0x000000, 0x000000, 0x000000, 0x000000,
1474 0x000000, 0x000000, 0x000000, 0x000000,
1475 0x000000, 0x000000, 0x000000, 0x000000,
1476 0x000000, 0x000000, 0x000000, 0x000000,
1477 0x000000, 0x000000, 0x000000, 0x000000,
1478 0x000000, 0x000000, 0x000000, 0x000000,
1479 0x000000, 0x000000, 0x000000, 0x000000,
1480 0x000000, 0x000000, 0x000000, 0x000000,
1481 0x000000, 0x000000, 0x000000, 0x000000,
1482 0x000000, 0x000000, 0x000000, 0x000000,
1483 0x000000, 0x000000, 0x000000, 0x000000,
1484 0x000000, 0x000000, 0x000000, 0x000000,
1485 0x000000, 0x000000, 0x000000, 0x000000,
1486 0x000000, 0x000000, 0x000000, 0x000000,
1487 0x000000, 0x000000, 0x000000, 0x000000,
1488 0x000000, 0x000000, 0x000000, 0x000000,
1489 0x000000, 0x000000, 0x000000, 0x000000,
1490 0x000000, 0x000000, 0x000000, 0x000000,
1491 0x000000, 0x000000, 0x000000, 0x000000,
1492 0x000000, 0x000000, 0x000000, 0x000000,
1493 0x000000, 0x000000, 0x000000, 0x000000,
1494 0x000000, 0x000000, 0x000000, 0x000000,
1495 0x000000, 0x000000, 0x000000, 0x000000,
1496 0x000000, 0x000000, 0x000000, 0x000000,
1497 0x000000, 0x000000, 0x000000, 0x000000,
1498 0x000000, 0x000000, 0x000000, 0x000000,
1499 0x000000, 0x000000, 0x000000, 0x000000,
1500 0x000000, 0x000000, 0x000000, 0x000000,
1501 0x000000, 0x000000, 0x000000, 0x000000,
1502 0x000000, 0x000000, 0x000000, 0x000000,
1503 0x000000, 0x000000, 0x000000, 0x000000,
1504 0x000000, 0x000000, 0x000000, 0x000000,
1505 0x000000, 0x000000, 0x000000, 0x000000,
1506 0x000000, 0x000000, 0x000000, 0x000000,
1507 0x000000, 0x000000, 0x000000, 0x000000,
1508 0x000000, 0x000000, 0x000000, 0x000000,
1509 0x000000, 0x000000, 0x000000, 0x000000,
1510 0x000000, 0x000000, 0x000000, 0x000000,
1511 0x000000, 0x000000, 0x000000, 0x000000,
1512 0x000000, 0x000000, 0x000000, 0x000000,
1513 0x000000, 0x000000, 0x000000, 0x000000,
1514 0x000000, 0x000000, 0x000000, 0x000000,
1515 0x000000, 0x000000, 0x000000, 0x000000,
1516 0x000000, 0x000000, 0x000000, 0x000000,
1517 0x000000, 0x000000, 0x000000, 0x000000,
1518 0x000000, 0x000000, 0x000000, 0x000000,
1519 0x000000, 0x000000, 0x000000, 0x000000,
1520 0x000000, 0x000000, 0x000000, 0x000000,
1521 0x000000, 0x000000, 0x000000, 0x000000,
1522 0x000000, 0x000000, 0x000000, 0x000000,
1523 0x000000, 0x000000, 0x000000, 0x000000,
1524 0x000000, 0x000000, 0x000000, 0x000000,
1525 0x000000, 0x000000, 0x000000, 0x000000,
1526 0x000000, 0x000000, 0x000000, 0x000000,
1527 0x000000, 0x000000, 0x000000, 0x000000,
1528 0x000000, 0x000000, 0x000000, 0x000000,
1529 0x000000, 0x000000, 0x000000, 0x000000,
1530 0x000000, 0x000000, 0x000000, 0x000000,
1531 0x000000, 0x000000, 0x000000, 0x000000,
1532 0x000000, 0x000000, 0x000000, 0x000000,
1533 0x000000, 0x000000, 0x000000, 0x000000,
1534 0x000000, 0x000000, 0x000000, 0x000000,
1535 0x000000, 0x000000, 0x000000, 0x000000,
1536 0x000000, 0x000000, 0x000000, 0x000000,
1537 0x000000, 0x000000, 0x000000, 0x000000,
1538 0x000000, 0x000000, 0x000000, 0x000000,
1539 0x000000, 0x000000, 0x000000, 0x000000,
1540 0x000000, 0x000000, 0x000000, 0x000000,
1541 0x000000, 0x000000, 0x000000, 0x000000,
1542 0x000000, 0x000000, 0x000000, 0x000000,
1543 0x000000, 0x000000, 0x000000, 0x000000,
1544 0x000000, 0x000000, 0x000000, 0x000000,
1545 0x000000, 0x000000, 0x000000, 0x000000,
1546 0x000000, 0x000000, 0x000000, 0x000000,
1547 0x000000, 0x000000, 0x000000, 0x000000,
1548 0x000000, 0x000000, 0x000000, 0x000000,
1549 0x000000, 0x000000, 0x000000, 0x000000,
1550 0x000000, 0x000000, 0x000000, 0x000000,
1551 0x000000, 0x000000, 0x000000, 0x000000,
1552 0x000000, 0x000000, 0x000000, 0x000000,
1553 0x000000, 0x000000, 0x000000, 0x000000,
1554 0x000000, 0x000000, 0x000000, 0x000000,
1555 0x000000, 0x000000, 0x000000, 0x000000,
1556 0x000000, 0x000000, 0x000000, 0x000000,
1557 0x000000, 0x000000, 0x000000, 0x000000,
1558 0x000000, 0x000000, 0x000000, 0x000000,
1559 0x000000, 0x000000, 0x000000, 0x000000,
1560 0x000000, 0x000000, 0x000000, 0x000000,
1561 0x000000, 0x000000, 0x000000, 0x000000,
1562 0x000000, 0x000000, 0x000000, 0x000000
1563};
1564
1565#endif //_HWMCODE_
diff --git a/sound/oss/yss225.c b/sound/oss/yss225.c
deleted file mode 100644
index e700400576d8..000000000000
--- a/sound/oss/yss225.c
+++ /dev/null
@@ -1,319 +0,0 @@
1#include <linux/init.h>
2
3unsigned char page_zero[] __initdata = {
40x01, 0x7c, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0x00,
50x11, 0x00, 0x20, 0x00, 0x32, 0x00, 0x40, 0x00, 0x13, 0x00, 0x00,
60x00, 0x14, 0x02, 0x76, 0x00, 0x60, 0x00, 0x80, 0x02, 0x00, 0x00,
70x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
80x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
90x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
100x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
110x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
120x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
130x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
140x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
150x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x19,
160x01, 0x1a, 0x01, 0x20, 0x01, 0x40, 0x01, 0x17, 0x00, 0x00, 0x01,
170x80, 0x01, 0x20, 0x00, 0x10, 0x01, 0xa0, 0x03, 0xd1, 0x00, 0x00,
180x01, 0xf2, 0x02, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0xf4, 0x02,
190xe0, 0x00, 0x15, 0x00, 0x00, 0x00, 0x16, 0x00, 0x00, 0x00, 0x17,
200x00, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x50, 0x00, 0x00, 0x00,
210x40, 0x00, 0x00, 0x00, 0x71, 0x02, 0x00, 0x00, 0x60, 0x00, 0x00,
220x00, 0x92, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb3, 0x02,
230x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00, 0x40,
240x00, 0x80, 0x00, 0xf5, 0x00, 0x20, 0x00, 0x70, 0x00, 0xa0, 0x02,
250x11, 0x00, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
260x02, 0x00, 0x00, 0x20, 0x00, 0x10, 0x00, 0x17, 0x00, 0x1b, 0x00,
270x1d, 0x02, 0xdf
28};
29
30unsigned char page_one[] __initdata = {
310x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x19, 0x00,
320x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd8, 0x00, 0x00,
330x02, 0x20, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x01,
340xc0, 0x01, 0xfa, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
380x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
390x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
400x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
410x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
420x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x40, 0x02, 0x60,
430x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x02, 0x80, 0x00,
440x00, 0x02, 0xfb, 0x02, 0xa0, 0x00, 0x00, 0x00, 0x1b, 0x02, 0xd7,
450x00, 0x00, 0x02, 0xf7, 0x03, 0x20, 0x03, 0x00, 0x00, 0x00, 0x00,
460x1c, 0x03, 0x3c, 0x00, 0x00, 0x03, 0x3f, 0x00, 0x00, 0x03, 0xc0,
470x00, 0x00, 0x03, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x03, 0x5d, 0x00,
480x00, 0x03, 0xc0, 0x00, 0x00, 0x03, 0x7d, 0x00, 0x00, 0x03, 0xc0,
490x00, 0x00, 0x03, 0x9e, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x03,
500xbe, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
510x00, 0x00, 0x00, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
520xdb, 0x00, 0x00, 0x02, 0xdb, 0x00, 0x00, 0x02, 0xe0, 0x00, 0x00,
530x02, 0xfb, 0x00, 0x00, 0x02, 0xc0, 0x02, 0x40, 0x02, 0xfb, 0x02,
540x60, 0x00, 0x1b
55};
56
57unsigned char page_two[] __initdata = {
580xc4, 0x00, 0x44, 0x07, 0x44, 0x00, 0x40, 0x25, 0x01, 0x06, 0xc4,
590x07, 0x40, 0x25, 0x01, 0x00, 0x46, 0x46, 0x00, 0x00, 0x00, 0x00,
600x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
610x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
620x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
630x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x07,
640x05, 0x05, 0x05, 0x04, 0x07, 0x05, 0x04, 0x07, 0x05, 0x44, 0x46,
650x44, 0x46, 0x46, 0x07, 0x05, 0x44, 0x46, 0x05, 0x46, 0x05, 0x46,
660x05, 0x46, 0x05, 0x44, 0x46, 0x05, 0x07, 0x44, 0x46, 0x05, 0x07,
670x44, 0x46, 0x05, 0x07, 0x44, 0x46, 0x05, 0x07, 0x44, 0x05, 0x05,
680x05, 0x44, 0x05, 0x05, 0x05, 0x46, 0x05, 0x46, 0x05, 0x46, 0x05,
690x46, 0x05, 0x46, 0x07, 0x46, 0x07, 0x44
70};
71
72unsigned char page_three[] __initdata = {
730x07, 0x40, 0x00, 0x00, 0x00, 0x47, 0x00, 0x40, 0x00, 0x40, 0x06,
740x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
750x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
770x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
780x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x80,
790xc0, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00,
800x60, 0x00, 0x70, 0x00, 0x40, 0x00, 0x40, 0x00, 0x42, 0x00, 0x40,
810x00, 0x02, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
820x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
830x00, 0x42, 0x00, 0x40, 0x00, 0x42, 0x00, 0x02, 0x00, 0x02, 0x00,
840x02, 0x00, 0x42, 0x00, 0xc0, 0x00, 0x40
85};
86
87unsigned char page_four[] __initdata = {
880x63, 0x03, 0x26, 0x02, 0x2c, 0x00, 0x24, 0x00, 0x2e, 0x02, 0x02,
890x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
900x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
930x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
940x20, 0x00, 0x60, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20,
950x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x00, 0x20, 0x00, 0x60, 0x00,
960x20, 0x00, 0x60, 0x00, 0x20, 0x00, 0x60, 0x00, 0x20, 0x00, 0x60,
970x00, 0x20, 0x00, 0x60, 0x00, 0x20, 0x00, 0x60, 0x00, 0x20, 0x00,
980x20, 0x00, 0x22, 0x02, 0x22, 0x02, 0x20, 0x00, 0x60, 0x00, 0x22,
990x02, 0x62, 0x02, 0x20, 0x01, 0x21, 0x01
100};
101
102unsigned char page_six[] __initdata = {
1030x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x00, 0x06, 0x00,
1040x00, 0x08, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x0e,
1050x00, 0x00, 0x10, 0x00, 0x00, 0x12, 0x00, 0x00, 0x14, 0x00, 0x00,
1060x16, 0x00, 0x00, 0x18, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x1c, 0x00,
1070x00, 0x1e, 0x00, 0x00, 0x20, 0x00, 0x00, 0x22, 0x00, 0x00, 0x24,
1080x00, 0x00, 0x26, 0x00, 0x00, 0x28, 0x00, 0x00, 0x2a, 0x00, 0x00,
1090x2c, 0x00, 0x00, 0x2e, 0x00, 0x00, 0x30, 0x00, 0x00, 0x32, 0x00,
1100x00, 0x34, 0x00, 0x00, 0x36, 0x00, 0x00, 0x38, 0x00, 0x00, 0x3a,
1110x00, 0x00, 0x3c, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x40, 0x00, 0x00,
1120x42, 0x03, 0x00, 0x44, 0x01, 0x00, 0x46, 0x0a, 0x21, 0x48, 0x0d,
1130x23, 0x4a, 0x23, 0x1b, 0x4c, 0x37, 0x8f, 0x4e, 0x45, 0x77, 0x50,
1140x52, 0xe2, 0x52, 0x1c, 0x92, 0x54, 0x1c, 0x52, 0x56, 0x07, 0x00,
1150x58, 0x2f, 0xc6, 0x5a, 0x0b, 0x00, 0x5c, 0x30, 0x06, 0x5e, 0x17,
1160x00, 0x60, 0x3d, 0xda, 0x62, 0x29, 0x00, 0x64, 0x3e, 0x41, 0x66,
1170x39, 0x00, 0x68, 0x4c, 0x48, 0x6a, 0x49, 0x00, 0x6c, 0x4c, 0x6c,
1180x6e, 0x11, 0xd2, 0x70, 0x16, 0x0c, 0x72, 0x00, 0x00, 0x74, 0x00,
1190x80, 0x76, 0x0f, 0x00, 0x78, 0x00, 0x80, 0x7a, 0x13, 0x00, 0x7c,
1200x80, 0x00, 0x7e, 0x80, 0x80
121};
122
123unsigned char page_seven[] __initdata = {
1240x0f, 0xff, 0x00, 0x00, 0x08, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00,
1250x00, 0x00, 0x00, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00,
1260x08, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x0f,
1270xff, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1280x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1290x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1300x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1310x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1320x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1330x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1340x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1350x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1360x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1370x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff,
1380x0f, 0xff, 0x0f, 0xff, 0x02, 0xe9, 0x06, 0x8c, 0x06, 0x8c, 0x0f,
1390xff, 0x1a, 0x75, 0x0d, 0x8b, 0x04, 0xe9, 0x0b, 0x16, 0x1a, 0x38,
1400x0d, 0xc8, 0x04, 0x6f, 0x0b, 0x91, 0x0f, 0xff, 0x06, 0x40, 0x06,
1410x40, 0x02, 0x8f, 0x0f, 0xff, 0x06, 0x62, 0x06, 0x62, 0x02, 0x7b,
1420x0f, 0xff, 0x06, 0x97, 0x06, 0x97, 0x02, 0x52, 0x0f, 0xff, 0x06,
1430xf6, 0x06, 0xf6, 0x02, 0x19, 0x05, 0x55, 0x05, 0x55, 0x05, 0x55,
1440x05, 0x55, 0x05, 0x55, 0x05, 0x55, 0x05, 0x55, 0x05, 0x55, 0x14,
1450xda, 0x0d, 0x93, 0x04, 0xda, 0x05, 0x93, 0x14, 0xda, 0x0d, 0x93,
1460x04, 0xda, 0x05, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1470x00, 0x02, 0x00
148};
149
150unsigned char page_zero_v2[] __initdata = {
1510x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1520x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1530x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1540x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1550x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1560x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1570x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1580x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1590x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
160};
161
162unsigned char page_one_v2[] __initdata = {
1630x01, 0xc0, 0x01, 0xfa, 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00,
1640x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1650x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1660x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1670x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1680x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1690x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1700x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1710x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
172};
173
174unsigned char page_two_v2[] __initdata = {
1750x46, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1770x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1780x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1790x00, 0x00, 0x00, 0x00
180};
181unsigned char page_three_v2[] __initdata = {
1820x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1830x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1840x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1850x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1860x00, 0x00, 0x00, 0x00
187};
188unsigned char page_four_v2[] __initdata = {
1890x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1900x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1910x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1920x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1930x00, 0x00, 0x00, 0x00
194};
195
196unsigned char page_seven_v2[] __initdata = {
1970x0f, 0xff, 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1980x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1990x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2000x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2010x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2020x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2030x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2040x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2050x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
206};
207unsigned char mod_v2[] __initdata = {
2080x01, 0x00, 0x02, 0x00, 0x01, 0x01, 0x02, 0x00, 0x01, 0x02, 0x02,
2090x00, 0x01, 0x03, 0x02, 0x00, 0x01, 0x04, 0x02, 0x00, 0x01, 0x05,
2100x02, 0x00, 0x01, 0x06, 0x02, 0x00, 0x01, 0x07, 0x02, 0x00, 0xb0,
2110x20, 0xb1, 0x20, 0xb2, 0x20, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x20,
2120xb6, 0x20, 0xb7, 0x20, 0xf0, 0x20, 0xf1, 0x20, 0xf2, 0x20, 0xf3,
2130x20, 0xf4, 0x20, 0xf5, 0x20, 0xf6, 0x20, 0xf7, 0x20, 0x10, 0xff,
2140x11, 0xff, 0x12, 0xff, 0x13, 0xff, 0x14, 0xff, 0x15, 0xff, 0x16,
2150xff, 0x17, 0xff, 0x20, 0xff, 0x21, 0xff, 0x22, 0xff, 0x23, 0xff,
2160x24, 0xff, 0x25, 0xff, 0x26, 0xff, 0x27, 0xff, 0x30, 0x00, 0x31,
2170x00, 0x32, 0x00, 0x33, 0x00, 0x34, 0x00, 0x35, 0x00, 0x36, 0x00,
2180x37, 0x00, 0x40, 0x00, 0x41, 0x00, 0x42, 0x00, 0x43, 0x00, 0x44,
2190x00, 0x45, 0x00, 0x46, 0x00, 0x47, 0x00, 0x50, 0x00, 0x51, 0x00,
2200x52, 0x00, 0x53, 0x00, 0x54, 0x00, 0x55, 0x00, 0x56, 0x00, 0x57,
2210x00, 0x60, 0x00, 0x61, 0x00, 0x62, 0x00, 0x63, 0x00, 0x64, 0x00,
2220x65, 0x00, 0x66, 0x00, 0x67, 0x00, 0x70, 0xc0, 0x71, 0xc0, 0x72,
2230xc0, 0x73, 0xc0, 0x74, 0xc0, 0x75, 0xc0, 0x76, 0xc0, 0x77, 0xc0,
2240x80, 0x00, 0x81, 0x00, 0x82, 0x00, 0x83, 0x00, 0x84, 0x00, 0x85,
2250x00, 0x86, 0x00, 0x87, 0x00, 0x90, 0x00, 0x91, 0x00, 0x92, 0x00,
2260x93, 0x00, 0x94, 0x00, 0x95, 0x00, 0x96, 0x00, 0x97, 0x00, 0xa0,
2270x00, 0xa1, 0x00, 0xa2, 0x00, 0xa3, 0x00, 0xa4, 0x00, 0xa5, 0x00,
2280xa6, 0x00, 0xa7, 0x00, 0xc0, 0x00, 0xc1, 0x00, 0xc2, 0x00, 0xc3,
2290x00, 0xc4, 0x00, 0xc5, 0x00, 0xc6, 0x00, 0xc7, 0x00, 0xd0, 0x00,
2300xd1, 0x00, 0xd2, 0x00, 0xd3, 0x00, 0xd4, 0x00, 0xd5, 0x00, 0xd6,
2310x00, 0xd7, 0x00, 0xe0, 0x00, 0xe1, 0x00, 0xe2, 0x00, 0xe3, 0x00,
2320xe4, 0x00, 0xe5, 0x00, 0xe6, 0x00, 0xe7, 0x00, 0x01, 0x00, 0x02,
2330x01, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x02, 0x01, 0x01, 0x03,
2340x02, 0x01, 0x01, 0x04, 0x02, 0x01, 0x01, 0x05, 0x02, 0x01, 0x01,
2350x06, 0x02, 0x01, 0x01, 0x07, 0x02, 0x01
236};
237unsigned char coefficients[] __initdata = {
2380x07, 0x46, 0x00, 0x00, 0x07, 0x49, 0x00, 0x00, 0x00, 0x4b, 0x03,
2390x11, 0x00, 0x4d, 0x01, 0x32, 0x07, 0x46, 0x00, 0x00, 0x07, 0x49,
2400x00, 0x00, 0x07, 0x40, 0x00, 0x00, 0x07, 0x41, 0x00, 0x00, 0x01,
2410x40, 0x02, 0x40, 0x01, 0x41, 0x02, 0x60, 0x07, 0x40, 0x00, 0x00,
2420x07, 0x41, 0x00, 0x00, 0x07, 0x47, 0x00, 0x00, 0x07, 0x4a, 0x00,
2430x00, 0x00, 0x47, 0x01, 0x00, 0x00, 0x4a, 0x01, 0x20, 0x07, 0x47,
2440x00, 0x00, 0x07, 0x4a, 0x00, 0x00, 0x07, 0x7c, 0x00, 0x00, 0x07,
2450x7e, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1c, 0x07, 0x7c, 0x00, 0x00,
2460x07, 0x7e, 0x00, 0x00, 0x07, 0x44, 0x00, 0x00, 0x00, 0x44, 0x01,
2470x00, 0x07, 0x44, 0x00, 0x00, 0x07, 0x42, 0x00, 0x00, 0x07, 0x43,
2480x00, 0x00, 0x00, 0x42, 0x01, 0x1a, 0x00, 0x43, 0x01, 0x20, 0x07,
2490x42, 0x00, 0x00, 0x07, 0x43, 0x00, 0x00, 0x07, 0x40, 0x00, 0x00,
2500x07, 0x41, 0x00, 0x00, 0x01, 0x40, 0x02, 0x40, 0x01, 0x41, 0x02,
2510x60, 0x07, 0x40, 0x00, 0x00, 0x07, 0x41, 0x00, 0x00, 0x07, 0x44,
2520x0f, 0xff, 0x07, 0x42, 0x00, 0x00, 0x07, 0x43, 0x00, 0x00, 0x07,
2530x40, 0x00, 0x00, 0x07, 0x41, 0x00, 0x00, 0x07, 0x51, 0x06, 0x40,
2540x07, 0x50, 0x06, 0x40, 0x07, 0x4f, 0x03, 0x81, 0x07, 0x53, 0x1a,
2550x76, 0x07, 0x54, 0x0d, 0x8b, 0x07, 0x55, 0x04, 0xe9, 0x07, 0x56,
2560x0b, 0x17, 0x07, 0x57, 0x1a, 0x38, 0x07, 0x58, 0x0d, 0xc9, 0x07,
2570x59, 0x04, 0x6f, 0x07, 0x5a, 0x0b, 0x91, 0x07, 0x73, 0x14, 0xda,
2580x07, 0x74, 0x0d, 0x93, 0x07, 0x75, 0x04, 0xd9, 0x07, 0x76, 0x05,
2590x93, 0x07, 0x77, 0x14, 0xda, 0x07, 0x78, 0x0d, 0x93, 0x07, 0x79,
2600x04, 0xd9, 0x07, 0x7a, 0x05, 0x93, 0x07, 0x5e, 0x03, 0x68, 0x07,
2610x5c, 0x04, 0x31, 0x07, 0x5d, 0x04, 0x31, 0x07, 0x62, 0x03, 0x52,
2620x07, 0x60, 0x04, 0x76, 0x07, 0x61, 0x04, 0x76, 0x07, 0x66, 0x03,
2630x2e, 0x07, 0x64, 0x04, 0xda, 0x07, 0x65, 0x04, 0xda, 0x07, 0x6a,
2640x02, 0xf6, 0x07, 0x68, 0x05, 0x62, 0x07, 0x69, 0x05, 0x62, 0x06,
2650x46, 0x0a, 0x22, 0x06, 0x48, 0x0d, 0x24, 0x06, 0x6e, 0x11, 0xd3,
2660x06, 0x70, 0x15, 0xcb, 0x06, 0x52, 0x20, 0x93, 0x06, 0x54, 0x20,
2670x54, 0x06, 0x4a, 0x27, 0x1d, 0x06, 0x58, 0x2f, 0xc8, 0x06, 0x5c,
2680x30, 0x07, 0x06, 0x4c, 0x37, 0x90, 0x06, 0x60, 0x3d, 0xdb, 0x06,
2690x64, 0x3e, 0x42, 0x06, 0x4e, 0x45, 0x78, 0x06, 0x68, 0x4c, 0x48,
2700x06, 0x6c, 0x4c, 0x6c, 0x06, 0x50, 0x52, 0xe2, 0x06, 0x42, 0x02,
2710xba
272};
273unsigned char coefficients2[] __initdata = {
2740x07, 0x46, 0x00, 0x00, 0x07, 0x49, 0x00, 0x00, 0x07, 0x45, 0x0f,
2750xff, 0x07, 0x48, 0x0f, 0xff, 0x07, 0x7b, 0x04, 0xcc, 0x07, 0x7d,
2760x04, 0xcc, 0x07, 0x7c, 0x00, 0x00, 0x07, 0x7e, 0x00, 0x00, 0x07,
2770x46, 0x00, 0x00, 0x07, 0x49, 0x00, 0x00, 0x07, 0x47, 0x00, 0x00,
2780x07, 0x4a, 0x00, 0x00, 0x07, 0x4c, 0x00, 0x00, 0x07, 0x4e, 0x00, 0x00
279};
280unsigned char coefficients3[] __initdata = {
2810x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x28, 0x00, 0x51, 0x00,
2820x51, 0x00, 0x7a, 0x00, 0x7a, 0x00, 0xa3, 0x00, 0xa3, 0x00, 0xcc,
2830x00, 0xcc, 0x00, 0xf5, 0x00, 0xf5, 0x01, 0x1e, 0x01, 0x1e, 0x01,
2840x47, 0x01, 0x47, 0x01, 0x70, 0x01, 0x70, 0x01, 0x99, 0x01, 0x99,
2850x01, 0xc2, 0x01, 0xc2, 0x01, 0xeb, 0x01, 0xeb, 0x02, 0x14, 0x02,
2860x14, 0x02, 0x3d, 0x02, 0x3d, 0x02, 0x66, 0x02, 0x66, 0x02, 0x8f,
2870x02, 0x8f, 0x02, 0xb8, 0x02, 0xb8, 0x02, 0xe1, 0x02, 0xe1, 0x03,
2880x0a, 0x03, 0x0a, 0x03, 0x33, 0x03, 0x33, 0x03, 0x5c, 0x03, 0x5c,
2890x03, 0x85, 0x03, 0x85, 0x03, 0xae, 0x03, 0xae, 0x03, 0xd7, 0x03,
2900xd7, 0x04, 0x00, 0x04, 0x00, 0x04, 0x28, 0x04, 0x28, 0x04, 0x51,
2910x04, 0x51, 0x04, 0x7a, 0x04, 0x7a, 0x04, 0xa3, 0x04, 0xa3, 0x04,
2920xcc, 0x04, 0xcc, 0x04, 0xf5, 0x04, 0xf5, 0x05, 0x1e, 0x05, 0x1e,
2930x05, 0x47, 0x05, 0x47, 0x05, 0x70, 0x05, 0x70, 0x05, 0x99, 0x05,
2940x99, 0x05, 0xc2, 0x05, 0xc2, 0x05, 0xeb, 0x05, 0xeb, 0x06, 0x14,
2950x06, 0x14, 0x06, 0x3d, 0x06, 0x3d, 0x06, 0x66, 0x06, 0x66, 0x06,
2960x8f, 0x06, 0x8f, 0x06, 0xb8, 0x06, 0xb8, 0x06, 0xe1, 0x06, 0xe1,
2970x07, 0x0a, 0x07, 0x0a, 0x07, 0x33, 0x07, 0x33, 0x07, 0x5c, 0x07,
2980x5c, 0x07, 0x85, 0x07, 0x85, 0x07, 0xae, 0x07, 0xae, 0x07, 0xd7,
2990x07, 0xd7, 0x08, 0x00, 0x08, 0x00, 0x08, 0x28, 0x08, 0x28, 0x08,
3000x51, 0x08, 0x51, 0x08, 0x7a, 0x08, 0x7a, 0x08, 0xa3, 0x08, 0xa3,
3010x08, 0xcc, 0x08, 0xcc, 0x08, 0xf5, 0x08, 0xf5, 0x09, 0x1e, 0x09,
3020x1e, 0x09, 0x47, 0x09, 0x47, 0x09, 0x70, 0x09, 0x70, 0x09, 0x99,
3030x09, 0x99, 0x09, 0xc2, 0x09, 0xc2, 0x09, 0xeb, 0x09, 0xeb, 0x0a,
3040x14, 0x0a, 0x14, 0x0a, 0x3d, 0x0a, 0x3d, 0x0a, 0x66, 0x0a, 0x66,
3050x0a, 0x8f, 0x0a, 0x8f, 0x0a, 0xb8, 0x0a, 0xb8, 0x0a, 0xe1, 0x0a,
3060xe1, 0x0b, 0x0a, 0x0b, 0x0a, 0x0b, 0x33, 0x0b, 0x33, 0x0b, 0x5c,
3070x0b, 0x5c, 0x0b, 0x85, 0x0b, 0x85, 0x0b, 0xae, 0x0b, 0xae, 0x0b,
3080xd7, 0x0b, 0xd7, 0x0c, 0x00, 0x0c, 0x00, 0x0c, 0x28, 0x0c, 0x28,
3090x0c, 0x51, 0x0c, 0x51, 0x0c, 0x7a, 0x0c, 0x7a, 0x0c, 0xa3, 0x0c,
3100xa3, 0x0c, 0xcc, 0x0c, 0xcc, 0x0c, 0xf5, 0x0c, 0xf5, 0x0d, 0x1e,
3110x0d, 0x1e, 0x0d, 0x47, 0x0d, 0x47, 0x0d, 0x70, 0x0d, 0x70, 0x0d,
3120x99, 0x0d, 0x99, 0x0d, 0xc2, 0x0d, 0xc2, 0x0d, 0xeb, 0x0d, 0xeb,
3130x0e, 0x14, 0x0e, 0x14, 0x0e, 0x3d, 0x0e, 0x3d, 0x0e, 0x66, 0x0e,
3140x66, 0x0e, 0x8f, 0x0e, 0x8f, 0x0e, 0xb8, 0x0e, 0xb8, 0x0e, 0xe1,
3150x0e, 0xe1, 0x0f, 0x0a, 0x0f, 0x0a, 0x0f, 0x33, 0x0f, 0x33, 0x0f,
3160x5c, 0x0f, 0x5c, 0x0f, 0x85, 0x0f, 0x85, 0x0f, 0xae, 0x0f, 0xae,
3170x0f, 0xd7, 0x0f, 0xd7, 0x0f, 0xff, 0x0f, 0xff
318};
319
diff --git a/sound/oss/yss225.h b/sound/oss/yss225.h
deleted file mode 100644
index 56d8b6b5e432..000000000000
--- a/sound/oss/yss225.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef __yss255_h__
2#define __yss255_h__
3
4extern unsigned char page_zero[256];
5extern unsigned char page_one[256];
6extern unsigned char page_two[128];
7extern unsigned char page_three[128];
8extern unsigned char page_four[128];
9extern unsigned char page_six[192];
10extern unsigned char page_seven[256];
11extern unsigned char page_zero_v2[96];
12extern unsigned char page_one_v2[96];
13extern unsigned char page_two_v2[48];
14extern unsigned char page_three_v2[48];
15extern unsigned char page_four_v2[48];
16extern unsigned char page_seven_v2[96];
17extern unsigned char mod_v2[304];
18extern unsigned char coefficients[364];
19extern unsigned char coefficients2[56];
20extern unsigned char coefficients3[404];
21
22
23#endif /* __ys225_h__ */
24
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 0b0a016ca6d6..5322c50c9617 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -366,25 +366,6 @@ int register_sound_dsp(const struct file_operations *fops, int dev)
366EXPORT_SYMBOL(register_sound_dsp); 366EXPORT_SYMBOL(register_sound_dsp);
367 367
368/** 368/**
369 * register_sound_synth - register a synth device
370 * @fops: File operations for the driver
371 * @dev: Unit number to allocate
372 *
373 * Allocate a synth device. Unit is the number of the synth device requested.
374 * Pass -1 to request the next free synth unit. On success the allocated
375 * number is returned, on failure a negative error code is returned.
376 */
377
378
379int register_sound_synth(const struct file_operations *fops, int dev)
380{
381 return sound_insert_unit(&chains[9], fops, dev, 9, 137,
382 "synth", S_IRUSR | S_IWUSR, NULL);
383}
384
385EXPORT_SYMBOL(register_sound_synth);
386
387/**
388 * unregister_sound_special - unregister a special sound device 369 * unregister_sound_special - unregister a special sound device
389 * @unit: unit number to allocate 370 * @unit: unit number to allocate
390 * 371 *
@@ -449,21 +430,6 @@ void unregister_sound_dsp(int unit)
449 430
450EXPORT_SYMBOL(unregister_sound_dsp); 431EXPORT_SYMBOL(unregister_sound_dsp);
451 432
452/**
453 * unregister_sound_synth - unregister a synth device
454 * @unit: unit number to allocate
455 *
456 * Release a sound device that was allocated with register_sound_synth().
457 * The unit passed is the return value from the register function.
458 */
459
460void unregister_sound_synth(int unit)
461{
462 return sound_remove_unit(&chains[9], unit);
463}
464
465EXPORT_SYMBOL(unregister_sound_synth);
466
467/* 433/*
468 * Now our file operations 434 * Now our file operations
469 */ 435 */