aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/ser_a2232.c52
-rw-r--r--drivers/char/vme_scc.c59
-rw-r--r--drivers/crypto/talitos.c49
-rw-r--r--drivers/firewire/Kconfig9
-rw-r--r--drivers/firewire/fw-card.c2
-rw-r--r--drivers/firewire/fw-cdev.c6
-rw-r--r--drivers/firewire/fw-ohci.c37
-rw-r--r--drivers/firewire/fw-topology.c2
-rw-r--r--drivers/firewire/fw-transaction.c79
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/infiniband/core/ucm.c10
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c1
-rw-r--r--drivers/infiniband/hw/mlx4/user.h1
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2034
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h23
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig22
-rw-r--r--drivers/isdn/Kconfig4
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/hardware/Makefile1
-rw-r--r--drivers/isdn/hardware/mISDN/Kconfig26
-rw-r--r--drivers/isdn/hardware/mISDN/Makefile7
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h1204
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_pci.h228
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c5320
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2256
-rw-r--r--drivers/isdn/mISDN/Kconfig44
-rw-r--r--drivers/isdn/mISDN/Makefile13
-rw-r--r--drivers/isdn/mISDN/core.c244
-rw-r--r--drivers/isdn/mISDN/core.h77
-rw-r--r--drivers/isdn/mISDN/dsp.h263
-rw-r--r--drivers/isdn/mISDN/dsp_audio.c434
-rw-r--r--drivers/isdn/mISDN/dsp_biquad.h65
-rw-r--r--drivers/isdn/mISDN/dsp_blowfish.c672
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c1886
-rw-r--r--drivers/isdn/mISDN/dsp_core.c1191
-rw-r--r--drivers/isdn/mISDN/dsp_dtmf.c303
-rw-r--r--drivers/isdn/mISDN/dsp_ecdis.h110
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.c138
-rw-r--r--drivers/isdn/mISDN/dsp_hwec.h10
-rw-r--r--drivers/isdn/mISDN/dsp_pipeline.c348
-rw-r--r--drivers/isdn/mISDN/dsp_tones.c551
-rw-r--r--drivers/isdn/mISDN/fsm.c183
-rw-r--r--drivers/isdn/mISDN/fsm.h67
-rw-r--r--drivers/isdn/mISDN/hwchannel.c365
-rw-r--r--drivers/isdn/mISDN/l1oip.h91
-rw-r--r--drivers/isdn/mISDN/l1oip_codec.c374
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c1518
-rw-r--r--drivers/isdn/mISDN/layer1.c403
-rw-r--r--drivers/isdn/mISDN/layer1.h26
-rw-r--r--drivers/isdn/mISDN/layer2.c2216
-rw-r--r--drivers/isdn/mISDN/layer2.h140
-rw-r--r--drivers/isdn/mISDN/socket.c781
-rw-r--r--drivers/isdn/mISDN/stack.c674
-rw-r--r--drivers/isdn/mISDN/tei.c1340
-rw-r--r--drivers/isdn/mISDN/timerdev.c301
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/message/fusion/mptbase.c24
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptfc.c8
-rw-r--r--drivers/message/fusion/mptlan.c26
-rw-r--r--drivers/message/fusion/mptsas.c54
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/atmel-ssc.c1
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/bus.c8
-rw-r--r--drivers/mmc/core/core.h7
-rw-r--r--drivers/mmc/core/debugfs.c225
-rw-r--r--drivers/mmc/core/host.c8
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h2
-rw-r--r--drivers/mmc/host/atmel-mci.c206
-rw-r--r--drivers/mmc/host/imxmmc.c50
-rw-r--r--drivers/mmc/host/mmc_spi.c3
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/afs.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c17
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c2
-rw-r--r--drivers/mtd/chips/cfi_probe.c1
-rw-r--r--drivers/mtd/chips/cfi_util.c3
-rw-r--r--drivers/mtd/chips/chipreg.c2
-rw-r--r--drivers/mtd/chips/gen_probe.c5
-rw-r--r--drivers/mtd/chips/jedec_probe.c133
-rw-r--r--drivers/mtd/chips/map_absent.c1
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/cmdlinepart.c4
-rw-r--r--drivers/mtd/devices/Kconfig1
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/block2mtd.c6
-rw-r--r--drivers/mtd/devices/doc2000.c2
-rw-r--r--drivers/mtd/devices/doc2001.c2
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/docecc.c2
-rw-r--r--drivers/mtd/devices/docprobe.c5
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/m25p80.c22
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/devices/ms02-nv.h2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c135
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/phram.c2
-rw-r--r--drivers/mtd/devices/pmc551.c2
-rw-r--r--drivers/mtd/devices/slram.c2
-rw-r--r--drivers/mtd/ftl.c3
-rw-r--r--drivers/mtd/inftlcore.c5
-rw-r--r--drivers/mtd/inftlmount.c4
-rw-r--r--drivers/mtd/maps/Kconfig30
-rw-r--r--drivers/mtd/maps/Makefile3
-rw-r--r--drivers/mtd/maps/amd76xrom.c1
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c2
-rw-r--r--drivers/mtd/maps/bast-flash.c226
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c219
-rw-r--r--drivers/mtd/maps/cdb89712.c1
-rw-r--r--drivers/mtd/maps/ceiva.c1
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c2
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/dc21285.c2
-rw-r--r--drivers/mtd/maps/dilnetpc.c2
-rw-r--r--drivers/mtd/maps/dmv182.c2
-rw-r--r--drivers/mtd/maps/ebony.c2
-rw-r--r--drivers/mtd/maps/edb7312.c2
-rw-r--r--drivers/mtd/maps/fortunet.c1
-rw-r--r--drivers/mtd/maps/h720x-flash.c2
-rw-r--r--drivers/mtd/maps/ichxrom.c1
-rw-r--r--drivers/mtd/maps/impa7.c2
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ipaq-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/maps/l440gx.c2
-rw-r--r--drivers/mtd/maps/map_funcs.c2
-rw-r--r--drivers/mtd/maps/mbx860.c2
-rw-r--r--drivers/mtd/maps/netsc520.c2
-rw-r--r--drivers/mtd/maps/nettel.c2
-rw-r--r--drivers/mtd/maps/octagon-5066.c1
-rw-r--r--drivers/mtd/maps/omap-toto-flash.c2
-rw-r--r--drivers/mtd/maps/pci.c2
-rw-r--r--drivers/mtd/maps/pcmciamtd.c5
-rw-r--r--drivers/mtd/maps/physmap.c24
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/maps/redwood.c2
-rw-r--r--drivers/mtd/maps/rpxlite.c2
-rw-r--r--drivers/mtd/maps/sa1100-flash.c2
-rw-r--r--drivers/mtd/maps/sbc8240.c3
-rw-r--r--drivers/mtd/maps/sbc_gxx.c2
-rw-r--r--drivers/mtd/maps/sc520cdp.c2
-rw-r--r--drivers/mtd/maps/scb2_flash.c1
-rw-r--r--drivers/mtd/maps/scx200_docflash.c2
-rw-r--r--drivers/mtd/maps/sharpsl-flash.c2
-rw-r--r--drivers/mtd/maps/solutionengine.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/maps/tqm8xxl.c2
-rw-r--r--drivers/mtd/maps/ts5500_flash.c2
-rw-r--r--drivers/mtd/maps/tsunami_flash.c1
-rw-r--r--drivers/mtd/maps/uclinux.c2
-rw-r--r--drivers/mtd/maps/vmax301.c1
-rw-r--r--drivers/mtd/maps/walnut.c2
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c34
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdblock_ro.c2
-rw-r--r--drivers/mtd/mtdchar.c24
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c14
-rw-r--r--drivers/mtd/mtdpart.c448
-rw-r--r--drivers/mtd/nand/Kconfig28
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/atmel_nand.c (renamed from drivers/mtd/nand/at91_nand.c)278
-rw-r--r--drivers/mtd/nand/atmel_nand_ecc.h36
-rw-r--r--drivers/mtd/nand/au1550nd.c4
-rw-r--r--drivers/mtd/nand/autcpu12.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c6
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/edb7312.c2
-rw-r--r--drivers/mtd/nand/excite_nandflash.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c63
-rw-r--r--drivers/mtd/nand/h1910.c2
-rw-r--r--drivers/mtd/nand/nand_base.c87
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nandsim.c41
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c2
-rw-r--r--drivers/mtd/nand/rtc_from4.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c168
-rw-r--r--drivers/mtd/nand/sharpsl.c2
-rw-r--r--drivers/mtd/nand/spia.c2
-rw-r--r--drivers/mtd/nand/toto.c2
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/mtd/nftlcore.c5
-rw-r--r--drivers/mtd/nftlmount.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c54
-rw-r--r--drivers/mtd/redboot.c2
-rw-r--r--drivers/mtd/rfd_ftl.c2
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/mlx4/alloc.c1
-rw-r--r--drivers/net/mlx4/catas.c1
-rw-r--r--drivers/net/mlx4/cmd.c2
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c2
-rw-r--r--drivers/net/mlx4/fw.h2
-rw-r--r--drivers/net/mlx4/icm.c2
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c1
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mlx4/mcg.c1
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/mlx4/reset.c1
-rw-r--r--drivers/net/mlx4/srq.c1
-rw-r--r--drivers/net/ppp_generic.c6
-rw-r--r--drivers/s390/kvm/Makefile2
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c26
-rw-r--r--drivers/s390/net/qeth_l3_main.c30
-rw-r--r--drivers/scsi/3w-9xxx.c40
-rw-r--r--drivers/scsi/3w-9xxx.h9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/advansys.c2
-rw-r--r--drivers/scsi/aha152x.c12
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c46
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c18
-rw-r--r--drivers/scsi/arm/fas216.c4
-rw-r--r--drivers/scsi/ch.c1
-rw-r--r--drivers/scsi/device_handler/Kconfig8
-rw-r--r--drivers/scsi/device_handler/Makefile1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c446
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c802
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c644
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c348
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c262
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c204
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h44
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/ipr.h6
-rw-r--r--drivers/scsi/libsas/sas_ata.c16
-rw-r--r--drivers/scsi/libsas/sas_expander.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c4
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c30
-rw-r--r--drivers/scsi/libsrp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c4
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/nsp32_debug.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_debug.c2
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c118
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h12
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c55
-rw-r--r--drivers/scsi/scsi_debug.c12
-rw-r--r--drivers/scsi/scsi_devinfo.c6
-rw-r--r--drivers/scsi/scsi_error.c34
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_netlink.c8
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c13
-rw-r--r--drivers/scsi/scsi_sysfs.c4
-rw-r--r--drivers/scsi/scsi_tgt_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_fc.c12
-rw-r--r--drivers/scsi/scsi_transport_sas.c4
-rw-r--r--drivers/scsi/sd.c291
-rw-r--r--drivers/scsi/sd.h54
-rw-r--r--drivers/scsi/sd_dif.c538
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/stex.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/tmscsim.c8
-rw-r--r--drivers/scsi/wd7000.c8
-rw-r--r--drivers/scsi/zalon.c8
-rw-r--r--drivers/usb/serial/ipaq.c10
306 files changed, 30825 insertions, 3201 deletions
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 4ba3aec9e1cd..7b0c35207d9b 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -192,7 +192,7 @@ static inline void a2232_receive_char(struct a2232_port *port, int ch, int err)
192 Maybe one could implement a more efficient version by not only 192 Maybe one could implement a more efficient version by not only
193 transferring one character at a time. 193 transferring one character at a time.
194*/ 194*/
195 struct tty_struct *tty = port->gs.tty; 195 struct tty_struct *tty = port->gs.port.tty;
196 196
197#if 0 197#if 0
198 switch(err) { 198 switch(err) {
@@ -226,7 +226,7 @@ static void a2232_disable_tx_interrupts(void *ptr)
226 226
227 /* Does this here really have to be? */ 227 /* Does this here really have to be? */
228 local_irq_save(flags); 228 local_irq_save(flags);
229 port->gs.flags &= ~GS_TX_INTEN; 229 port->gs.port.flags &= ~GS_TX_INTEN;
230 local_irq_restore(flags); 230 local_irq_restore(flags);
231} 231}
232 232
@@ -242,7 +242,7 @@ static void a2232_enable_tx_interrupts(void *ptr)
242 242
243 /* Does this here really have to be? */ 243 /* Does this here really have to be? */
244 local_irq_save(flags); 244 local_irq_save(flags);
245 port->gs.flags |= GS_TX_INTEN; 245 port->gs.port.flags |= GS_TX_INTEN;
246 local_irq_restore(flags); 246 local_irq_restore(flags);
247} 247}
248 248
@@ -276,9 +276,9 @@ static void a2232_shutdown_port(void *ptr)
276 276
277 local_irq_save(flags); 277 local_irq_save(flags);
278 278
279 port->gs.flags &= ~GS_ACTIVE; 279 port->gs.port.flags &= ~GS_ACTIVE;
280 280
281 if (port->gs.tty && port->gs.tty->termios->c_cflag & HUPCL) { 281 if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) {
282 /* Set DTR and RTS to Low, flush output. 282 /* Set DTR and RTS to Low, flush output.
283 The NetBSD driver "msc.c" does it this way. */ 283 The NetBSD driver "msc.c" does it this way. */
284 stat->Command = ( (stat->Command & ~A2232CMD_CMask) | 284 stat->Command = ( (stat->Command & ~A2232CMD_CMask) |
@@ -309,7 +309,7 @@ static int a2232_set_real_termios(void *ptr)
309 volatile struct a2232status *status; 309 volatile struct a2232status *status;
310 volatile struct a2232memory *mem; 310 volatile struct a2232memory *mem;
311 311
312 if (!port->gs.tty || !port->gs.tty->termios) return 0; 312 if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
313 313
314 status = a2232stat(port->which_a2232, port->which_port_on_a2232); 314 status = a2232stat(port->which_a2232, port->which_port_on_a2232);
315 mem = a2232mem(port->which_a2232); 315 mem = a2232mem(port->which_a2232);
@@ -345,7 +345,7 @@ static int a2232_set_real_termios(void *ptr)
345 } 345 }
346 a2232_param |= rate; 346 a2232_param |= rate;
347 347
348 cflag = port->gs.tty->termios->c_cflag; 348 cflag = port->gs.port.tty->termios->c_cflag;
349 349
350 // get character size 350 // get character size
351 chsize = cflag & CSIZE; 351 chsize = cflag & CSIZE;
@@ -382,7 +382,7 @@ static int a2232_set_real_termios(void *ptr)
382 the conventional way of inserting START/STOP characters 382 the conventional way of inserting START/STOP characters
383 by hand in throttle()/unthrottle(). 383 by hand in throttle()/unthrottle().
384 */ 384 */
385 softflow = !!( port->gs.tty->termios->c_iflag & IXOFF ); 385 softflow = !!( port->gs.port.tty->termios->c_iflag & IXOFF );
386 386
387 // get Parity (Enabled/Disabled? If Enabled, Odd or Even?) 387 // get Parity (Enabled/Disabled? If Enabled, Odd or Even?)
388 parity = cflag & (PARENB | PARODD); 388 parity = cflag & (PARENB | PARODD);
@@ -400,9 +400,9 @@ static int a2232_set_real_termios(void *ptr)
400 /* Hmm. Maybe an own a2232_port structure 400 /* Hmm. Maybe an own a2232_port structure
401 member would be cleaner? */ 401 member would be cleaner? */
402 if (cflag & CLOCAL) 402 if (cflag & CLOCAL)
403 port->gs.flags &= ~ASYNC_CHECK_CD; 403 port->gs.port.flags &= ~ASYNC_CHECK_CD;
404 else 404 else
405 port->gs.flags |= ASYNC_CHECK_CD; 405 port->gs.port.flags |= ASYNC_CHECK_CD;
406 406
407 407
408 /* Now we have all parameters and can go to set them: */ 408 /* Now we have all parameters and can go to set them: */
@@ -482,18 +482,18 @@ static int a2232_open(struct tty_struct * tty, struct file * filp)
482 port = &a2232_ports[line]; 482 port = &a2232_ports[line];
483 483
484 tty->driver_data = port; 484 tty->driver_data = port;
485 port->gs.tty = tty; 485 port->gs.port.tty = tty;
486 port->gs.count++; 486 port->gs.port.count++;
487 retval = gs_init_port(&port->gs); 487 retval = gs_init_port(&port->gs);
488 if (retval) { 488 if (retval) {
489 port->gs.count--; 489 port->gs.port.count--;
490 return retval; 490 return retval;
491 } 491 }
492 port->gs.flags |= GS_ACTIVE; 492 port->gs.port.flags |= GS_ACTIVE;
493 retval = gs_block_til_ready(port, filp); 493 retval = gs_block_til_ready(port, filp);
494 494
495 if (retval) { 495 if (retval) {
496 port->gs.count--; 496 port->gs.port.count--;
497 return retval; 497 return retval;
498 } 498 }
499 499
@@ -522,7 +522,7 @@ int ch, err, n, p;
522 for (p = 0; p < NUMLINES; p++){ /* for every port on this board */ 522 for (p = 0; p < NUMLINES; p++){ /* for every port on this board */
523 err = 0; 523 err = 0;
524 port = &a2232_ports[n*NUMLINES+p]; 524 port = &a2232_ports[n*NUMLINES+p];
525 if ( port->gs.flags & GS_ACTIVE ){ /* if the port is used */ 525 if ( port->gs.port.flags & GS_ACTIVE ){ /* if the port is used */
526 526
527 status = a2232stat(n,p); 527 status = a2232stat(n,p);
528 528
@@ -577,8 +577,8 @@ int ch, err, n, p;
577 obuf = mem->OutBuf[p]; 577 obuf = mem->OutBuf[p];
578 bufpos = status->OutHead; 578 bufpos = status->OutHead;
579 while ( (port->gs.xmit_cnt > 0) && 579 while ( (port->gs.xmit_cnt > 0) &&
580 (!port->gs.tty->stopped) && 580 (!port->gs.port.tty->stopped) &&
581 (!port->gs.tty->hw_stopped) ){ /* While there are chars to transmit */ 581 (!port->gs.port.tty->hw_stopped) ){ /* While there are chars to transmit */
582 if (((bufpos+1) & A2232_IOBUFLENMASK) != status->OutTail) { /* If the A2232 buffer is not full */ 582 if (((bufpos+1) & A2232_IOBUFLENMASK) != status->OutTail) { /* If the A2232 buffer is not full */
583 ch = port->gs.xmit_buf[port->gs.xmit_tail]; /* get the next char to transmit */ 583 ch = port->gs.xmit_buf[port->gs.xmit_tail]; /* get the next char to transmit */
584 port->gs.xmit_tail = (port->gs.xmit_tail+1) & (SERIAL_XMIT_SIZE-1); /* modulo-addition for the gs.xmit_buf ring-buffer */ 584 port->gs.xmit_tail = (port->gs.xmit_tail+1) & (SERIAL_XMIT_SIZE-1); /* modulo-addition for the gs.xmit_buf ring-buffer */
@@ -592,8 +592,8 @@ int ch, err, n, p;
592 status->OutHead = bufpos; 592 status->OutHead = bufpos;
593 593
594 /* WakeUp if output buffer runs low */ 594 /* WakeUp if output buffer runs low */
595 if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.tty) { 595 if ((port->gs.xmit_cnt <= port->gs.wakeup_chars) && port->gs.port.tty) {
596 tty_wakeup(port->gs.tty); 596 tty_wakeup(port->gs.port.tty);
597 } 597 }
598 } // if the port is used 598 } // if the port is used
599 } // for every port on the board 599 } // for every port on the board
@@ -613,16 +613,16 @@ int ch, err, n, p;
613 struct a2232_port *port = &a2232_ports[n*7+p]; 613 struct a2232_port *port = &a2232_ports[n*7+p];
614 port->cd_status = !(ncd & 1); /* ncd&1 <=> CD is now off */ 614 port->cd_status = !(ncd & 1); /* ncd&1 <=> CD is now off */
615 615
616 if (!(port->gs.flags & ASYNC_CHECK_CD)) 616 if (!(port->gs.port.flags & ASYNC_CHECK_CD))
617 ; /* Don't report DCD changes */ 617 ; /* Don't report DCD changes */
618 else if (port->cd_status) { // if DCD on: DCD went UP! 618 else if (port->cd_status) { // if DCD on: DCD went UP!
619 619
620 /* Are we blocking in open?*/ 620 /* Are we blocking in open?*/
621 wake_up_interruptible(&port->gs.open_wait); 621 wake_up_interruptible(&port->gs.port.open_wait);
622 } 622 }
623 else { // if DCD off: DCD went DOWN! 623 else { // if DCD off: DCD went DOWN!
624 if (port->gs.tty) 624 if (port->gs.port.tty)
625 tty_hangup (port->gs.tty); 625 tty_hangup (port->gs.port.tty);
626 } 626 }
627 627
628 } // if CD changed for this port 628 } // if CD changed for this port
@@ -655,8 +655,8 @@ static void a2232_init_portstructs(void)
655#ifdef NEW_WRITE_LOCKING 655#ifdef NEW_WRITE_LOCKING
656 mutex_init(&(port->gs.port_write_mutex)); 656 mutex_init(&(port->gs.port_write_mutex));
657#endif 657#endif
658 init_waitqueue_head(&port->gs.open_wait); 658 init_waitqueue_head(&port->gs.port.open_wait);
659 init_waitqueue_head(&port->gs.close_wait); 659 init_waitqueue_head(&port->gs.port.close_wait);
660 } 660 }
661} 661}
662 662
diff --git a/drivers/char/vme_scc.c b/drivers/char/vme_scc.c
index 69c5afe97f19..1718b3c481db 100644
--- a/drivers/char/vme_scc.c
+++ b/drivers/char/vme_scc.c
@@ -183,8 +183,8 @@ static void scc_init_portstructs(void)
183#ifdef NEW_WRITE_LOCKING 183#ifdef NEW_WRITE_LOCKING
184 port->gs.port_write_mutex = MUTEX; 184 port->gs.port_write_mutex = MUTEX;
185#endif 185#endif
186 init_waitqueue_head(&port->gs.open_wait); 186 init_waitqueue_head(&port->gs.port.open_wait);
187 init_waitqueue_head(&port->gs.close_wait); 187 init_waitqueue_head(&port->gs.port.close_wait);
188 } 188 }
189} 189}
190 190
@@ -422,7 +422,7 @@ static irqreturn_t scc_rx_int(int irq, void *data)
422{ 422{
423 unsigned char ch; 423 unsigned char ch;
424 struct scc_port *port = data; 424 struct scc_port *port = data;
425 struct tty_struct *tty = port->gs.tty; 425 struct tty_struct *tty = port->gs.port.tty;
426 SCC_ACCESS_INIT(port); 426 SCC_ACCESS_INIT(port);
427 427
428 ch = SCCread_NB(RX_DATA_REG); 428 ch = SCCread_NB(RX_DATA_REG);
@@ -453,7 +453,7 @@ static irqreturn_t scc_rx_int(int irq, void *data)
453static irqreturn_t scc_spcond_int(int irq, void *data) 453static irqreturn_t scc_spcond_int(int irq, void *data)
454{ 454{
455 struct scc_port *port = data; 455 struct scc_port *port = data;
456 struct tty_struct *tty = port->gs.tty; 456 struct tty_struct *tty = port->gs.port.tty;
457 unsigned char stat, ch, err; 457 unsigned char stat, ch, err;
458 int int_pending_mask = port->channel == CHANNEL_A ? 458 int int_pending_mask = port->channel == CHANNEL_A ?
459 IPR_A_RX : IPR_B_RX; 459 IPR_A_RX : IPR_B_RX;
@@ -500,7 +500,7 @@ static irqreturn_t scc_tx_int(int irq, void *data)
500 struct scc_port *port = data; 500 struct scc_port *port = data;
501 SCC_ACCESS_INIT(port); 501 SCC_ACCESS_INIT(port);
502 502
503 if (!port->gs.tty) { 503 if (!port->gs.port.tty) {
504 printk(KERN_WARNING "scc_tx_int with NULL tty!\n"); 504 printk(KERN_WARNING "scc_tx_int with NULL tty!\n");
505 SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); 505 SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
506 SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); 506 SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET);
@@ -512,8 +512,9 @@ static irqreturn_t scc_tx_int(int irq, void *data)
512 SCCwrite(TX_DATA_REG, port->x_char); 512 SCCwrite(TX_DATA_REG, port->x_char);
513 port->x_char = 0; 513 port->x_char = 0;
514 } 514 }
515 else if ((port->gs.xmit_cnt <= 0) || port->gs.tty->stopped || 515 else if ((port->gs.xmit_cnt <= 0) ||
516 port->gs.tty->hw_stopped) 516 port->gs.port.tty->stopped ||
517 port->gs.port.tty->hw_stopped)
517 break; 518 break;
518 else { 519 else {
519 SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]); 520 SCCwrite(TX_DATA_REG, port->gs.xmit_buf[port->gs.xmit_tail++]);
@@ -522,15 +523,15 @@ static irqreturn_t scc_tx_int(int irq, void *data)
522 break; 523 break;
523 } 524 }
524 } 525 }
525 if ((port->gs.xmit_cnt <= 0) || port->gs.tty->stopped || 526 if ((port->gs.xmit_cnt <= 0) || port->gs.port.tty->stopped ||
526 port->gs.tty->hw_stopped) { 527 port->gs.port.tty->hw_stopped) {
527 /* disable tx interrupts */ 528 /* disable tx interrupts */
528 SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); 529 SCCmod (INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
529 SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */ 530 SCCwrite(COMMAND_REG, CR_TX_PENDING_RESET); /* disable tx_int on next tx underrun? */
530 port->gs.flags &= ~GS_TX_INTEN; 531 port->gs.port.flags &= ~GS_TX_INTEN;
531 } 532 }
532 if (port->gs.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars) 533 if (port->gs.port.tty && port->gs.xmit_cnt <= port->gs.wakeup_chars)
533 tty_wakeup(port->gs.tty); 534 tty_wakeup(port->gs.port.tty);
534 535
535 SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET); 536 SCCwrite_NB(COMMAND_REG, CR_HIGHEST_IUS_RESET);
536 return IRQ_HANDLED; 537 return IRQ_HANDLED;
@@ -550,14 +551,14 @@ static irqreturn_t scc_stat_int(int irq, void *data)
550 551
551 if (changed & SR_DCD) { 552 if (changed & SR_DCD) {
552 port->c_dcd = !!(sr & SR_DCD); 553 port->c_dcd = !!(sr & SR_DCD);
553 if (!(port->gs.flags & ASYNC_CHECK_CD)) 554 if (!(port->gs.port.flags & ASYNC_CHECK_CD))
554 ; /* Don't report DCD changes */ 555 ; /* Don't report DCD changes */
555 else if (port->c_dcd) { 556 else if (port->c_dcd) {
556 wake_up_interruptible(&port->gs.open_wait); 557 wake_up_interruptible(&port->gs.port.open_wait);
557 } 558 }
558 else { 559 else {
559 if (port->gs.tty) 560 if (port->gs.port.tty)
560 tty_hangup (port->gs.tty); 561 tty_hangup (port->gs.port.tty);
561 } 562 }
562 } 563 }
563 SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET); 564 SCCwrite(COMMAND_REG, CR_EXTSTAT_RESET);
@@ -578,7 +579,7 @@ static void scc_disable_tx_interrupts(void *ptr)
578 579
579 local_irq_save(flags); 580 local_irq_save(flags);
580 SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0); 581 SCCmod(INT_AND_DMA_REG, ~IDR_TX_INT_ENAB, 0);
581 port->gs.flags &= ~GS_TX_INTEN; 582 port->gs.port.flags &= ~GS_TX_INTEN;
582 local_irq_restore(flags); 583 local_irq_restore(flags);
583} 584}
584 585
@@ -636,8 +637,8 @@ static void scc_shutdown_port(void *ptr)
636{ 637{
637 struct scc_port *port = ptr; 638 struct scc_port *port = ptr;
638 639
639 port->gs.flags &= ~ GS_ACTIVE; 640 port->gs.port.flags &= ~ GS_ACTIVE;
640 if (port->gs.tty && port->gs.tty->termios->c_cflag & HUPCL) { 641 if (port->gs.port.tty && port->gs.port.tty->termios->c_cflag & HUPCL) {
641 scc_setsignals (port, 0, 0); 642 scc_setsignals (port, 0, 0);
642 } 643 }
643} 644}
@@ -652,14 +653,14 @@ static int scc_set_real_termios (void *ptr)
652 struct scc_port *port = ptr; 653 struct scc_port *port = ptr;
653 SCC_ACCESS_INIT(port); 654 SCC_ACCESS_INIT(port);
654 655
655 if (!port->gs.tty || !port->gs.tty->termios) return 0; 656 if (!port->gs.port.tty || !port->gs.port.tty->termios) return 0;
656 657
657 channel = port->channel; 658 channel = port->channel;
658 659
659 if (channel == CHANNEL_A) 660 if (channel == CHANNEL_A)
660 return 0; /* Settings controlled by boot PROM */ 661 return 0; /* Settings controlled by boot PROM */
661 662
662 cflag = port->gs.tty->termios->c_cflag; 663 cflag = port->gs.port.tty->termios->c_cflag;
663 baud = port->gs.baud; 664 baud = port->gs.baud;
664 chsize = (cflag & CSIZE) >> 4; 665 chsize = (cflag & CSIZE) >> 4;
665 666
@@ -678,9 +679,9 @@ static int scc_set_real_termios (void *ptr)
678 } 679 }
679 680
680 if (cflag & CLOCAL) 681 if (cflag & CLOCAL)
681 port->gs.flags &= ~ASYNC_CHECK_CD; 682 port->gs.port.flags &= ~ASYNC_CHECK_CD;
682 else 683 else
683 port->gs.flags |= ASYNC_CHECK_CD; 684 port->gs.port.flags |= ASYNC_CHECK_CD;
684 685
685#ifdef CONFIG_MVME147_SCC 686#ifdef CONFIG_MVME147_SCC
686 if (MACH_IS_MVME147) 687 if (MACH_IS_MVME147)
@@ -856,7 +857,7 @@ static int scc_open (struct tty_struct * tty, struct file * filp)
856 { COMMAND_REG, CR_EXTSTAT_RESET }, 857 { COMMAND_REG, CR_EXTSTAT_RESET },
857 }; 858 };
858#endif 859#endif
859 if (!(port->gs.flags & ASYNC_INITIALIZED)) { 860 if (!(port->gs.port.flags & ASYNC_INITIALIZED)) {
860 local_irq_save(flags); 861 local_irq_save(flags);
861#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC) 862#if defined(CONFIG_MVME147_SCC) || defined(CONFIG_MVME162_SCC)
862 if (MACH_IS_MVME147 || MACH_IS_MVME16x) { 863 if (MACH_IS_MVME147 || MACH_IS_MVME16x) {
@@ -880,18 +881,18 @@ static int scc_open (struct tty_struct * tty, struct file * filp)
880 } 881 }
881 882
882 tty->driver_data = port; 883 tty->driver_data = port;
883 port->gs.tty = tty; 884 port->gs.port.tty = tty;
884 port->gs.count++; 885 port->gs.port.count++;
885 retval = gs_init_port(&port->gs); 886 retval = gs_init_port(&port->gs);
886 if (retval) { 887 if (retval) {
887 port->gs.count--; 888 port->gs.port.count--;
888 return retval; 889 return retval;
889 } 890 }
890 port->gs.flags |= GS_ACTIVE; 891 port->gs.port.flags |= GS_ACTIVE;
891 retval = gs_block_til_ready(port, filp); 892 retval = gs_block_til_ready(port, filp);
892 893
893 if (retval) { 894 if (retval) {
894 port->gs.count--; 895 port->gs.port.count--;
895 return retval; 896 return retval;
896 } 897 }
897 898
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index b11943dadefd..681c15f42083 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -99,6 +99,9 @@ struct talitos_private {
99 /* next channel to be assigned next incoming descriptor */ 99 /* next channel to be assigned next incoming descriptor */
100 atomic_t last_chan; 100 atomic_t last_chan;
101 101
102 /* per-channel number of requests pending in channel h/w fifo */
103 atomic_t *submit_count;
104
102 /* per-channel request fifo */ 105 /* per-channel request fifo */
103 struct talitos_request **fifo; 106 struct talitos_request **fifo;
104 107
@@ -263,15 +266,15 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
263 266
264 spin_lock_irqsave(&priv->head_lock[ch], flags); 267 spin_lock_irqsave(&priv->head_lock[ch], flags);
265 268
266 head = priv->head[ch]; 269 if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
267 request = &priv->fifo[ch][head]; 270 /* h/w fifo is full */
268
269 if (request->desc) {
270 /* request queue is full */
271 spin_unlock_irqrestore(&priv->head_lock[ch], flags); 271 spin_unlock_irqrestore(&priv->head_lock[ch], flags);
272 return -EAGAIN; 272 return -EAGAIN;
273 } 273 }
274 274
275 head = priv->head[ch];
276 request = &priv->fifo[ch][head];
277
275 /* map descriptor and save caller data */ 278 /* map descriptor and save caller data */
276 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc), 279 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
277 DMA_BIDIRECTIONAL); 280 DMA_BIDIRECTIONAL);
@@ -335,6 +338,9 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
335 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1); 338 priv->tail[ch] = (tail + 1) & (priv->fifo_len - 1);
336 339
337 spin_unlock_irqrestore(&priv->tail_lock[ch], flags); 340 spin_unlock_irqrestore(&priv->tail_lock[ch], flags);
341
342 atomic_dec(&priv->submit_count[ch]);
343
338 saved_req.callback(dev, saved_req.desc, saved_req.context, 344 saved_req.callback(dev, saved_req.desc, saved_req.context,
339 status); 345 status);
340 /* channel may resume processing in single desc error case */ 346 /* channel may resume processing in single desc error case */
@@ -842,7 +848,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
842 848
843 /* adjust (decrease) last one (or two) entry's len to cryptlen */ 849 /* adjust (decrease) last one (or two) entry's len to cryptlen */
844 link_tbl_ptr--; 850 link_tbl_ptr--;
845 while (link_tbl_ptr->len <= (-cryptlen)) { 851 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
846 /* Empty this entry, and move to previous one */ 852 /* Empty this entry, and move to previous one */
847 cryptlen += be16_to_cpu(link_tbl_ptr->len); 853 cryptlen += be16_to_cpu(link_tbl_ptr->len);
848 link_tbl_ptr->len = 0; 854 link_tbl_ptr->len = 0;
@@ -874,7 +880,7 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
874 unsigned int cryptlen = areq->cryptlen; 880 unsigned int cryptlen = areq->cryptlen;
875 unsigned int authsize = ctx->authsize; 881 unsigned int authsize = ctx->authsize;
876 unsigned int ivsize; 882 unsigned int ivsize;
877 int sg_count; 883 int sg_count, ret;
878 884
879 /* hmac key */ 885 /* hmac key */
880 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 886 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
@@ -978,7 +984,12 @@ static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
978 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0, 984 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
979 DMA_FROM_DEVICE); 985 DMA_FROM_DEVICE);
980 986
981 return talitos_submit(dev, desc, callback, areq); 987 ret = talitos_submit(dev, desc, callback, areq);
988 if (ret != -EINPROGRESS) {
989 ipsec_esp_unmap(dev, edesc, areq);
990 kfree(edesc);
991 }
992 return ret;
982} 993}
983 994
984 995
@@ -1009,6 +1020,8 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1020 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1010 struct ipsec_esp_edesc *edesc; 1021 struct ipsec_esp_edesc *edesc;
1011 int src_nents, dst_nents, alloc_len, dma_len; 1022 int src_nents, dst_nents, alloc_len, dma_len;
1023 gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1024 GFP_ATOMIC;
1012 1025
1013 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) { 1026 if (areq->cryptlen + ctx->authsize > TALITOS_MAX_DATA_LEN) {
1014 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n"); 1027 dev_err(ctx->dev, "cryptlen exceeds h/w max limit\n");
@@ -1022,7 +1035,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1022 dst_nents = src_nents; 1035 dst_nents = src_nents;
1023 } else { 1036 } else {
1024 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize); 1037 dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize);
1025 dst_nents = (dst_nents == 1) ? 0 : src_nents; 1038 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1026 } 1039 }
1027 1040
1028 /* 1041 /*
@@ -1040,7 +1053,7 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
1040 alloc_len += icv_stashing ? ctx->authsize : 0; 1053 alloc_len += icv_stashing ? ctx->authsize : 0;
1041 } 1054 }
1042 1055
1043 edesc = kmalloc(alloc_len, GFP_DMA); 1056 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1044 if (!edesc) { 1057 if (!edesc) {
1045 dev_err(ctx->dev, "could not allocate edescriptor\n"); 1058 dev_err(ctx->dev, "could not allocate edescriptor\n");
1046 return ERR_PTR(-ENOMEM); 1059 return ERR_PTR(-ENOMEM);
@@ -1337,6 +1350,7 @@ static int __devexit talitos_remove(struct of_device *ofdev)
1337 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 1350 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
1338 talitos_unregister_rng(dev); 1351 talitos_unregister_rng(dev);
1339 1352
1353 kfree(priv->submit_count);
1340 kfree(priv->tail); 1354 kfree(priv->tail);
1341 kfree(priv->head); 1355 kfree(priv->head);
1342 1356
@@ -1466,9 +1480,6 @@ static int talitos_probe(struct of_device *ofdev,
1466 goto err_out; 1480 goto err_out;
1467 } 1481 }
1468 1482
1469 of_node_put(np);
1470 np = NULL;
1471
1472 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1483 priv->head_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
1473 GFP_KERNEL); 1484 GFP_KERNEL);
1474 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels, 1485 priv->tail_lock = kmalloc(sizeof(spinlock_t) * priv->num_channels,
@@ -1504,6 +1515,16 @@ static int talitos_probe(struct of_device *ofdev,
1504 } 1515 }
1505 } 1516 }
1506 1517
1518 priv->submit_count = kmalloc(sizeof(atomic_t) * priv->num_channels,
1519 GFP_KERNEL);
1520 if (!priv->submit_count) {
1521 dev_err(dev, "failed to allocate fifo submit count space\n");
1522 err = -ENOMEM;
1523 goto err_out;
1524 }
1525 for (i = 0; i < priv->num_channels; i++)
1526 atomic_set(&priv->submit_count[i], -priv->chfifo_len);
1527
1507 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1528 priv->head = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1508 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL); 1529 priv->tail = kzalloc(sizeof(int) * priv->num_channels, GFP_KERNEL);
1509 if (!priv->head || !priv->tail) { 1530 if (!priv->head || !priv->tail) {
@@ -1559,8 +1580,6 @@ static int talitos_probe(struct of_device *ofdev,
1559 1580
1560err_out: 1581err_out:
1561 talitos_remove(ofdev); 1582 talitos_remove(ofdev);
1562 if (np)
1563 of_node_put(np);
1564 1583
1565 return err; 1584 return err;
1566} 1585}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 76f26710fc16..fa6d6abefd4d 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -16,8 +16,13 @@ config FIREWIRE
16 enable the new stack. 16 enable the new stack.
17 17
18 To compile this driver as a module, say M here: the module will be 18 To compile this driver as a module, say M here: the module will be
19 called firewire-core. It functionally replaces ieee1394, raw1394, 19 called firewire-core.
20 and video1394. 20
21 This module functionally replaces ieee1394, raw1394, and video1394.
22 To access it from application programs, you generally need at least
23 libraw1394 version 2. IIDC/DCAM applications also need libdc1394
24 version 2. No libraries are required to access storage devices
25 through the firewire-sbp2 driver.
21 26
22config FIREWIRE_OHCI 27config FIREWIRE_OHCI
23 tristate "OHCI-1394 controllers" 28 tristate "OHCI-1394 controllers"
diff --git a/drivers/firewire/fw-card.c b/drivers/firewire/fw-card.c
index da873d795aad..bbd73a406e53 100644
--- a/drivers/firewire/fw-card.c
+++ b/drivers/firewire/fw-card.c
@@ -539,7 +539,7 @@ fw_core_remove_card(struct fw_card *card)
539 wait_for_completion(&card->done); 539 wait_for_completion(&card->done);
540 540
541 cancel_delayed_work_sync(&card->work); 541 cancel_delayed_work_sync(&card->work);
542 fw_flush_transactions(card); 542 WARN_ON(!list_empty(&card->transaction_list));
543 del_timer_sync(&card->flush_timer); 543 del_timer_sync(&card->flush_timer);
544} 544}
545EXPORT_SYMBOL(fw_core_remove_card); 545EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/fw-cdev.c b/drivers/firewire/fw-cdev.c
index c639915fc3cb..bc81d6fcd2fd 100644
--- a/drivers/firewire/fw-cdev.c
+++ b/drivers/firewire/fw-cdev.c
@@ -382,9 +382,9 @@ complete_transaction(struct fw_card *card, int rcode,
382 382
383 response->response.type = FW_CDEV_EVENT_RESPONSE; 383 response->response.type = FW_CDEV_EVENT_RESPONSE;
384 response->response.rcode = rcode; 384 response->response.rcode = rcode;
385 queue_event(client, &response->event, 385 queue_event(client, &response->event, &response->response,
386 &response->response, sizeof(response->response), 386 sizeof(response->response) + response->response.length,
387 response->response.data, response->response.length); 387 NULL, 0);
388} 388}
389 389
390static int ioctl_send_request(struct client *client, void *buffer) 390static int ioctl_send_request(struct client *client, void *buffer)
diff --git a/drivers/firewire/fw-ohci.c b/drivers/firewire/fw-ohci.c
index 566672e0bcff..251416f2148f 100644
--- a/drivers/firewire/fw-ohci.c
+++ b/drivers/firewire/fw-ohci.c
@@ -171,7 +171,6 @@ struct iso_context {
171struct fw_ohci { 171struct fw_ohci {
172 struct fw_card card; 172 struct fw_card card;
173 173
174 u32 version;
175 __iomem char *registers; 174 __iomem char *registers;
176 dma_addr_t self_id_bus; 175 dma_addr_t self_id_bus;
177 __le32 *self_id_cpu; 176 __le32 *self_id_cpu;
@@ -180,6 +179,8 @@ struct fw_ohci {
180 int generation; 179 int generation;
181 int request_generation; /* for timestamping incoming requests */ 180 int request_generation; /* for timestamping incoming requests */
182 u32 bus_seconds; 181 u32 bus_seconds;
182
183 bool use_dualbuffer;
183 bool old_uninorth; 184 bool old_uninorth;
184 bool bus_reset_packet_quirk; 185 bool bus_reset_packet_quirk;
185 186
@@ -1885,7 +1886,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
1885 } else { 1886 } else {
1886 mask = &ohci->ir_context_mask; 1887 mask = &ohci->ir_context_mask;
1887 list = ohci->ir_context_list; 1888 list = ohci->ir_context_list;
1888 if (ohci->version >= OHCI_VERSION_1_1) 1889 if (ohci->use_dualbuffer)
1889 callback = handle_ir_dualbuffer_packet; 1890 callback = handle_ir_dualbuffer_packet;
1890 else 1891 else
1891 callback = handle_ir_packet_per_buffer; 1892 callback = handle_ir_packet_per_buffer;
@@ -1949,7 +1950,7 @@ static int ohci_start_iso(struct fw_iso_context *base,
1949 } else { 1950 } else {
1950 index = ctx - ohci->ir_context_list; 1951 index = ctx - ohci->ir_context_list;
1951 control = IR_CONTEXT_ISOCH_HEADER; 1952 control = IR_CONTEXT_ISOCH_HEADER;
1952 if (ohci->version >= OHCI_VERSION_1_1) 1953 if (ohci->use_dualbuffer)
1953 control |= IR_CONTEXT_DUAL_BUFFER_MODE; 1954 control |= IR_CONTEXT_DUAL_BUFFER_MODE;
1954 match = (tags << 28) | (sync << 8) | ctx->base.channel; 1955 match = (tags << 28) | (sync << 8) | ctx->base.channel;
1955 if (cycle >= 0) { 1956 if (cycle >= 0) {
@@ -2279,7 +2280,7 @@ ohci_queue_iso(struct fw_iso_context *base,
2279 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 2280 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
2280 if (base->type == FW_ISO_CONTEXT_TRANSMIT) 2281 if (base->type == FW_ISO_CONTEXT_TRANSMIT)
2281 retval = ohci_queue_iso_transmit(base, packet, buffer, payload); 2282 retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
2282 else if (ctx->context.ohci->version >= OHCI_VERSION_1_1) 2283 else if (ctx->context.ohci->use_dualbuffer)
2283 retval = ohci_queue_iso_receive_dualbuffer(base, packet, 2284 retval = ohci_queue_iso_receive_dualbuffer(base, packet,
2284 buffer, payload); 2285 buffer, payload);
2285 else 2286 else
@@ -2341,7 +2342,7 @@ static int __devinit
2341pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) 2342pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2342{ 2343{
2343 struct fw_ohci *ohci; 2344 struct fw_ohci *ohci;
2344 u32 bus_options, max_receive, link_speed; 2345 u32 bus_options, max_receive, link_speed, version;
2345 u64 guid; 2346 u64 guid;
2346 int err; 2347 int err;
2347 size_t size; 2348 size_t size;
@@ -2366,12 +2367,6 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2366 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 2367 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
2367 pci_set_drvdata(dev, ohci); 2368 pci_set_drvdata(dev, ohci);
2368 2369
2369#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2370 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2371 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2372#endif
2373 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2374
2375 spin_lock_init(&ohci->lock); 2370 spin_lock_init(&ohci->lock);
2376 2371
2377 tasklet_init(&ohci->bus_reset_tasklet, 2372 tasklet_init(&ohci->bus_reset_tasklet,
@@ -2390,6 +2385,23 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2390 goto fail_iomem; 2385 goto fail_iomem;
2391 } 2386 }
2392 2387
2388 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2389 ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
2390
2391/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
2392#if !defined(CONFIG_X86_32)
2393 /* dual-buffer mode is broken with descriptor addresses above 2G */
2394 if (dev->vendor == PCI_VENDOR_ID_TI &&
2395 dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
2396 ohci->use_dualbuffer = false;
2397#endif
2398
2399#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
2400 ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
2401 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
2402#endif
2403 ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
2404
2393 ar_context_init(&ohci->ar_request_ctx, ohci, 2405 ar_context_init(&ohci->ar_request_ctx, ohci,
2394 OHCI1394_AsReqRcvContextControlSet); 2406 OHCI1394_AsReqRcvContextControlSet);
2395 2407
@@ -2441,9 +2453,8 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
2441 if (err < 0) 2453 if (err < 0)
2442 goto fail_self_id; 2454 goto fail_self_id;
2443 2455
2444 ohci->version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2445 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", 2456 fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
2446 dev->dev.bus_id, ohci->version >> 16, ohci->version & 0xff); 2457 dev->dev.bus_id, version >> 16, version & 0xff);
2447 return 0; 2458 return 0;
2448 2459
2449 fail_self_id: 2460 fail_self_id:
diff --git a/drivers/firewire/fw-topology.c b/drivers/firewire/fw-topology.c
index 213b0ff8f3d6..c1b81077c4a8 100644
--- a/drivers/firewire/fw-topology.c
+++ b/drivers/firewire/fw-topology.c
@@ -510,8 +510,6 @@ fw_core_handle_bus_reset(struct fw_card *card,
510 struct fw_node *local_node; 510 struct fw_node *local_node;
511 unsigned long flags; 511 unsigned long flags;
512 512
513 fw_flush_transactions(card);
514
515 spin_lock_irqsave(&card->lock, flags); 513 spin_lock_irqsave(&card->lock, flags);
516 514
517 /* 515 /*
diff --git a/drivers/firewire/fw-transaction.c b/drivers/firewire/fw-transaction.c
index 40db80752272..e5d1a0b64fcf 100644
--- a/drivers/firewire/fw-transaction.c
+++ b/drivers/firewire/fw-transaction.c
@@ -22,6 +22,7 @@
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/kref.h> 23#include <linux/kref.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mutex.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
@@ -151,7 +152,7 @@ transmit_complete_callback(struct fw_packet *packet,
151 152
152static void 153static void
153fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, 154fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
154 int node_id, int source_id, int generation, int speed, 155 int destination_id, int source_id, int generation, int speed,
155 unsigned long long offset, void *payload, size_t length) 156 unsigned long long offset, void *payload, size_t length)
156{ 157{
157 int ext_tcode; 158 int ext_tcode;
@@ -166,7 +167,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
166 HEADER_RETRY(RETRY_X) | 167 HEADER_RETRY(RETRY_X) |
167 HEADER_TLABEL(tlabel) | 168 HEADER_TLABEL(tlabel) |
168 HEADER_TCODE(tcode) | 169 HEADER_TCODE(tcode) |
169 HEADER_DESTINATION(node_id); 170 HEADER_DESTINATION(destination_id);
170 packet->header[1] = 171 packet->header[1] =
171 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); 172 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
172 packet->header[2] = 173 packet->header[2] =
@@ -252,7 +253,7 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
252 fw_transaction_callback_t callback, void *callback_data) 253 fw_transaction_callback_t callback, void *callback_data)
253{ 254{
254 unsigned long flags; 255 unsigned long flags;
255 int tlabel, source; 256 int tlabel;
256 257
257 /* 258 /*
258 * Bump the flush timer up 100ms first of all so we 259 * Bump the flush timer up 100ms first of all so we
@@ -268,7 +269,6 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
268 269
269 spin_lock_irqsave(&card->lock, flags); 270 spin_lock_irqsave(&card->lock, flags);
270 271
271 source = card->node_id;
272 tlabel = card->current_tlabel; 272 tlabel = card->current_tlabel;
273 if (card->tlabel_mask & (1 << tlabel)) { 273 if (card->tlabel_mask & (1 << tlabel)) {
274 spin_unlock_irqrestore(&card->lock, flags); 274 spin_unlock_irqrestore(&card->lock, flags);
@@ -279,77 +279,58 @@ fw_send_request(struct fw_card *card, struct fw_transaction *t,
279 card->current_tlabel = (card->current_tlabel + 1) & 0x1f; 279 card->current_tlabel = (card->current_tlabel + 1) & 0x1f;
280 card->tlabel_mask |= (1 << tlabel); 280 card->tlabel_mask |= (1 << tlabel);
281 281
282 list_add_tail(&t->link, &card->transaction_list);
283
284 spin_unlock_irqrestore(&card->lock, flags);
285
286 /* Initialize rest of transaction, fill out packet and send it. */
287 t->node_id = node_id; 282 t->node_id = node_id;
288 t->tlabel = tlabel; 283 t->tlabel = tlabel;
289 t->callback = callback; 284 t->callback = callback;
290 t->callback_data = callback_data; 285 t->callback_data = callback_data;
291 286
292 fw_fill_request(&t->packet, tcode, t->tlabel, 287 fw_fill_request(&t->packet, tcode, t->tlabel, node_id, card->node_id,
293 node_id, source, generation, 288 generation, speed, offset, payload, length);
294 speed, offset, payload, length);
295 t->packet.callback = transmit_complete_callback; 289 t->packet.callback = transmit_complete_callback;
296 290
291 list_add_tail(&t->link, &card->transaction_list);
292
293 spin_unlock_irqrestore(&card->lock, flags);
294
297 card->driver->send_request(card, &t->packet); 295 card->driver->send_request(card, &t->packet);
298} 296}
299EXPORT_SYMBOL(fw_send_request); 297EXPORT_SYMBOL(fw_send_request);
300 298
301struct fw_phy_packet { 299static DEFINE_MUTEX(phy_config_mutex);
302 struct fw_packet packet; 300static DECLARE_COMPLETION(phy_config_done);
303 struct completion done;
304 struct kref kref;
305};
306
307static void phy_packet_release(struct kref *kref)
308{
309 struct fw_phy_packet *p =
310 container_of(kref, struct fw_phy_packet, kref);
311 kfree(p);
312}
313 301
314static void transmit_phy_packet_callback(struct fw_packet *packet, 302static void transmit_phy_packet_callback(struct fw_packet *packet,
315 struct fw_card *card, int status) 303 struct fw_card *card, int status)
316{ 304{
317 struct fw_phy_packet *p = 305 complete(&phy_config_done);
318 container_of(packet, struct fw_phy_packet, packet);
319
320 complete(&p->done);
321 kref_put(&p->kref, phy_packet_release);
322} 306}
323 307
308static struct fw_packet phy_config_packet = {
309 .header_length = 8,
310 .payload_length = 0,
311 .speed = SCODE_100,
312 .callback = transmit_phy_packet_callback,
313};
314
324void fw_send_phy_config(struct fw_card *card, 315void fw_send_phy_config(struct fw_card *card,
325 int node_id, int generation, int gap_count) 316 int node_id, int generation, int gap_count)
326{ 317{
327 struct fw_phy_packet *p;
328 long timeout = DIV_ROUND_UP(HZ, 10); 318 long timeout = DIV_ROUND_UP(HZ, 10);
329 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) | 319 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG) |
330 PHY_CONFIG_ROOT_ID(node_id) | 320 PHY_CONFIG_ROOT_ID(node_id) |
331 PHY_CONFIG_GAP_COUNT(gap_count); 321 PHY_CONFIG_GAP_COUNT(gap_count);
332 322
333 p = kmalloc(sizeof(*p), GFP_KERNEL); 323 mutex_lock(&phy_config_mutex);
334 if (p == NULL) 324
335 return; 325 phy_config_packet.header[0] = data;
326 phy_config_packet.header[1] = ~data;
327 phy_config_packet.generation = generation;
328 INIT_COMPLETION(phy_config_done);
329
330 card->driver->send_request(card, &phy_config_packet);
331 wait_for_completion_timeout(&phy_config_done, timeout);
336 332
337 p->packet.header[0] = data; 333 mutex_unlock(&phy_config_mutex);
338 p->packet.header[1] = ~data;
339 p->packet.header_length = 8;
340 p->packet.payload_length = 0;
341 p->packet.speed = SCODE_100;
342 p->packet.generation = generation;
343 p->packet.callback = transmit_phy_packet_callback;
344 init_completion(&p->done);
345 kref_set(&p->kref, 2);
346
347 card->driver->send_request(card, &p->packet);
348 timeout = wait_for_completion_timeout(&p->done, timeout);
349 kref_put(&p->kref, phy_packet_release);
350
351 /* will leak p if the callback is never executed */
352 WARN_ON(timeout == 0);
353} 334}
354 335
355void fw_flush_transactions(struct fw_card *card) 336void fw_flush_transactions(struct fw_card *card)
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index e23399c7f773..001622eb86f9 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -153,12 +153,14 @@ int __init firmware_map_add_early(resource_size_t start, resource_size_t end,
153 153
154static ssize_t start_show(struct firmware_map_entry *entry, char *buf) 154static ssize_t start_show(struct firmware_map_entry *entry, char *buf)
155{ 155{
156 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->start); 156 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
157 (unsigned long long)entry->start);
157} 158}
158 159
159static ssize_t end_show(struct firmware_map_entry *entry, char *buf) 160static ssize_t end_show(struct firmware_map_entry *entry, char *buf)
160{ 161{
161 return snprintf(buf, PAGE_SIZE, "0x%llx\n", entry->end); 162 return snprintf(buf, PAGE_SIZE, "0x%llx\n",
163 (unsigned long long)entry->end);
162} 164}
163 165
164static ssize_t type_show(struct firmware_map_entry *entry, char *buf) 166static ssize_t type_show(struct firmware_map_entry *entry, char *buf)
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 9494005d1c9a..e603736682bf 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -43,7 +43,6 @@
43#include <linux/cdev.h> 43#include <linux/cdev.h>
44#include <linux/idr.h> 44#include <linux/idr.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/smp_lock.h>
47 46
48#include <asm/uaccess.h> 47#include <asm/uaccess.h>
49 48
@@ -1154,11 +1153,18 @@ static unsigned int ib_ucm_poll(struct file *filp,
1154 return mask; 1153 return mask;
1155} 1154}
1156 1155
1156/*
1157 * ib_ucm_open() does not need the BKL:
1158 *
1159 * - no global state is referred to;
1160 * - there is no ioctl method to race against;
1161 * - no further module initialization is required for open to work
1162 * after the device is registered.
1163 */
1157static int ib_ucm_open(struct inode *inode, struct file *filp) 1164static int ib_ucm_open(struct inode *inode, struct file *filp)
1158{ 1165{
1159 struct ib_ucm_file *file; 1166 struct ib_ucm_file *file;
1160 1167
1161 cycle_kernel_lock();
1162 file = kmalloc(sizeof(*file), GFP_KERNEL); 1168 file = kmalloc(sizeof(*file), GFP_KERNEL);
1163 if (!file) 1169 if (!file)
1164 return -ENOMEM; 1170 return -ENOMEM;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 195f97302fe5..b41dd26bbfa1 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -38,7 +38,6 @@
38#include <linux/in.h> 38#include <linux/in.h>
39#include <linux/in6.h> 39#include <linux/in6.h>
40#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
41#include <linux/smp_lock.h>
42 41
43#include <rdma/rdma_user_cm.h> 42#include <rdma/rdma_user_cm.h>
44#include <rdma/ib_marshall.h> 43#include <rdma/ib_marshall.h>
@@ -1149,6 +1148,14 @@ static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1149 return mask; 1148 return mask;
1150} 1149}
1151 1150
1151/*
1152 * ucma_open() does not need the BKL:
1153 *
1154 * - no global state is referred to;
1155 * - there is no ioctl method to race against;
1156 * - no further module initialization is required for open to work
1157 * after the device is registered.
1158 */
1152static int ucma_open(struct inode *inode, struct file *filp) 1159static int ucma_open(struct inode *inode, struct file *filp)
1153{ 1160{
1154 struct ucma_file *file; 1161 struct ucma_file *file;
@@ -1157,7 +1164,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
1157 if (!file) 1164 if (!file)
1158 return -ENOMEM; 1165 return -ENOMEM;
1159 1166
1160 lock_kernel();
1161 INIT_LIST_HEAD(&file->event_list); 1167 INIT_LIST_HEAD(&file->event_list);
1162 INIT_LIST_HEAD(&file->ctx_list); 1168 INIT_LIST_HEAD(&file->ctx_list);
1163 init_waitqueue_head(&file->poll_wait); 1169 init_waitqueue_head(&file->poll_wait);
@@ -1165,7 +1171,6 @@ static int ucma_open(struct inode *inode, struct file *filp)
1165 1171
1166 filp->private_data = file; 1172 filp->private_data = file;
1167 file->filp = filp; 1173 file->filp = filp;
1168 unlock_kernel();
1169 return 0; 1174 return 0;
1170} 1175}
1171 1176
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 0b191a4842ce..a1464574bfdd 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 38d6907ab521..a3c2851c0545 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index d26a91317d4d..6e2b0dc21b61 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index db2086faa4ed..a4cdb465cd1d 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 02a99bc4442e..f7bc7dd8578a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 12d6bc6f8007..d42565258fb7 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/mlx4/user.h b/drivers/infiniband/hw/mlx4/user.h
index e2d11be4525c..13beedeeef9f 100644
--- a/drivers/infiniband/hw/mlx4/user.h
+++ b/drivers/infiniband/hw/mlx4/user.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index d2884e778098..b0cab64e5e3d 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -276,6 +276,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
276 } 276 }
277 nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id); 277 nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
278 278
279 nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
279 kfree(nesqp->allocated_buffer); 280 kfree(nesqp->allocated_buffer);
280 281
281} 282}
@@ -289,7 +290,6 @@ void nes_rem_ref(struct ib_qp *ibqp)
289 struct nes_qp *nesqp; 290 struct nes_qp *nesqp;
290 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); 291 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
291 struct nes_device *nesdev = nesvnic->nesdev; 292 struct nes_device *nesdev = nesvnic->nesdev;
292 struct nes_adapter *nesadapter = nesdev->nesadapter;
293 struct nes_hw_cqp_wqe *cqp_wqe; 293 struct nes_hw_cqp_wqe *cqp_wqe;
294 struct nes_cqp_request *cqp_request; 294 struct nes_cqp_request *cqp_request;
295 u32 opcode; 295 u32 opcode;
@@ -303,8 +303,6 @@ void nes_rem_ref(struct ib_qp *ibqp)
303 } 303 }
304 304
305 if (atomic_dec_and_test(&nesqp->refcount)) { 305 if (atomic_dec_and_test(&nesqp->refcount)) {
306 nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
307
308 /* Destroy the QP */ 306 /* Destroy the QP */
309 cqp_request = nes_get_cqp_request(nesdev); 307 cqp_request = nes_get_cqp_request(nesdev);
310 if (cqp_request == NULL) { 308 if (cqp_request == NULL) {
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6aa531d5276d..9f0b964b2c99 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -74,36 +74,59 @@ atomic_t cm_nodes_destroyed;
74atomic_t cm_accel_dropped_pkts; 74atomic_t cm_accel_dropped_pkts;
75atomic_t cm_resets_recvd; 75atomic_t cm_resets_recvd;
76 76
77static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *); 77static inline int mini_cm_accelerated(struct nes_cm_core *,
78 struct nes_cm_node *);
78static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, 79static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
79 struct nes_vnic *, struct nes_cm_info *); 80 struct nes_vnic *, struct nes_cm_info *);
80static int add_ref_cm_node(struct nes_cm_node *);
81static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
82static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *); 81static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
83static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
84 void *, u32, void *, u32, u8);
85static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
86
87static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *, 82static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
88 struct nes_vnic *, 83 struct nes_vnic *, u16, void *, struct nes_cm_info *);
89 struct ietf_mpa_frame *, 84static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
90 struct nes_cm_info *);
91static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, 85static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
92 struct nes_cm_node *); 86 struct nes_cm_node *);
93static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, 87static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
94 struct nes_cm_node *); 88 struct nes_cm_node *);
95static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *); 89static void mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
96static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, 90 struct sk_buff *);
97 struct sk_buff *);
98static int mini_cm_dealloc_core(struct nes_cm_core *); 91static int mini_cm_dealloc_core(struct nes_cm_core *);
99static int mini_cm_get(struct nes_cm_core *); 92static int mini_cm_get(struct nes_cm_core *);
100static int mini_cm_set(struct nes_cm_core *, u32, u32); 93static int mini_cm_set(struct nes_cm_core *, u32, u32);
94
95static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
96 void *, u32, void *, u32, u8);
97static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
98static int add_ref_cm_node(struct nes_cm_node *);
99static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
100
101static int nes_cm_disconn_true(struct nes_qp *); 101static int nes_cm_disconn_true(struct nes_qp *);
102static int nes_cm_post_event(struct nes_cm_event *event); 102static int nes_cm_post_event(struct nes_cm_event *event);
103static int nes_disconnect(struct nes_qp *nesqp, int abrupt); 103static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
104static void nes_disconnect_worker(struct work_struct *work); 104static void nes_disconnect_worker(struct work_struct *work);
105static int send_ack(struct nes_cm_node *cm_node); 105
106static int send_mpa_request(struct nes_cm_node *, struct sk_buff *);
107static int send_syn(struct nes_cm_node *, u32, struct sk_buff *);
108static int send_reset(struct nes_cm_node *, struct sk_buff *);
109static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb);
106static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb); 110static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
111static void process_packet(struct nes_cm_node *, struct sk_buff *,
112 struct nes_cm_core *);
113
114static void active_open_err(struct nes_cm_node *, struct sk_buff *, int);
115static void passive_open_err(struct nes_cm_node *, struct sk_buff *, int);
116static void cleanup_retrans_entry(struct nes_cm_node *);
117static void handle_rcv_mpa(struct nes_cm_node *, struct sk_buff *,
118 enum nes_cm_event_type);
119static void free_retrans_entry(struct nes_cm_node *cm_node);
120static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
121 struct sk_buff *skb, int optionsize, int passive);
122
123/* CM event handler functions */
124static void cm_event_connected(struct nes_cm_event *);
125static void cm_event_connect_error(struct nes_cm_event *);
126static void cm_event_reset(struct nes_cm_event *);
127static void cm_event_mpa_req(struct nes_cm_event *);
128
129static void print_core(struct nes_cm_core *core);
107 130
108/* External CM API Interface */ 131/* External CM API Interface */
109/* instance of function pointers for client API */ 132/* instance of function pointers for client API */
@@ -158,11 +181,11 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
158 event->cm_info.loc_port = cm_node->loc_port; 181 event->cm_info.loc_port = cm_node->loc_port;
159 event->cm_info.cm_id = cm_node->cm_id; 182 event->cm_info.cm_id = cm_node->cm_id;
160 183
161 nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x]," 184 nes_debug(NES_DBG_CM, "cm_node=%p Created event=%p, type=%u, "
162 " src_addr=%08x[%x]\n", 185 "dst_addr=%08x[%x], src_addr=%08x[%x]\n",
163 event, type, 186 cm_node, event, type, event->cm_info.loc_addr,
164 event->cm_info.loc_addr, event->cm_info.loc_port, 187 event->cm_info.loc_port, event->cm_info.rem_addr,
165 event->cm_info.rem_addr, event->cm_info.rem_port); 188 event->cm_info.rem_port);
166 189
167 nes_cm_post_event(event); 190 nes_cm_post_event(event);
168 return event; 191 return event;
@@ -172,14 +195,11 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
172/** 195/**
173 * send_mpa_request 196 * send_mpa_request
174 */ 197 */
175static int send_mpa_request(struct nes_cm_node *cm_node) 198static int send_mpa_request(struct nes_cm_node *cm_node, struct sk_buff *skb)
176{ 199{
177 struct sk_buff *skb;
178 int ret; 200 int ret;
179
180 skb = get_free_pkt(cm_node);
181 if (!skb) { 201 if (!skb) {
182 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 202 nes_debug(NES_DBG_CM, "skb set to NULL\n");
183 return -1; 203 return -1;
184 } 204 }
185 205
@@ -188,9 +208,8 @@ static int send_mpa_request(struct nes_cm_node *cm_node)
188 cm_node->mpa_frame_size, SET_ACK); 208 cm_node->mpa_frame_size, SET_ACK);
189 209
190 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0); 210 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
191 if (ret < 0) { 211 if (ret < 0)
192 return ret; 212 return ret;
193 }
194 213
195 return 0; 214 return 0;
196} 215}
@@ -229,46 +248,12 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
229 248
230 249
231/** 250/**
232 * handle_exception_pkt - process an exception packet.
233 * We have been in a TSA state, and we have now received SW
234 * TCP/IP traffic should be a FIN request or IP pkt with options
235 */
236static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
237{
238 int ret = 0;
239 struct tcphdr *tcph = tcp_hdr(skb);
240
241 /* first check to see if this a FIN pkt */
242 if (tcph->fin) {
243 /* we need to ACK the FIN request */
244 send_ack(cm_node);
245
246 /* check which side we are (client/server) and set next state accordingly */
247 if (cm_node->tcp_cntxt.client)
248 cm_node->state = NES_CM_STATE_CLOSING;
249 else {
250 /* we are the server side */
251 cm_node->state = NES_CM_STATE_CLOSE_WAIT;
252 /* since this is a self contained CM we don't wait for */
253 /* an APP to close us, just send final FIN immediately */
254 ret = send_fin(cm_node, NULL);
255 cm_node->state = NES_CM_STATE_LAST_ACK;
256 }
257 } else {
258 ret = -EINVAL;
259 }
260
261 return ret;
262}
263
264
265/**
266 * form_cm_frame - get a free packet and build empty frame Use 251 * form_cm_frame - get a free packet and build empty frame Use
267 * node info to build. 252 * node info to build.
268 */ 253 */
269static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node, 254static struct sk_buff *form_cm_frame(struct sk_buff *skb,
270 void *options, u32 optionsize, void *data, 255 struct nes_cm_node *cm_node, void *options, u32 optionsize,
271 u32 datasize, u8 flags) 256 void *data, u32 datasize, u8 flags)
272{ 257{
273 struct tcphdr *tcph; 258 struct tcphdr *tcph;
274 struct iphdr *iph; 259 struct iphdr *iph;
@@ -332,10 +317,12 @@ static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm
332 cm_node->tcp_cntxt.loc_seq_num++; 317 cm_node->tcp_cntxt.loc_seq_num++;
333 tcph->syn = 1; 318 tcph->syn = 1;
334 } else 319 } else
335 cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */ 320 cm_node->tcp_cntxt.loc_seq_num += datasize;
336 321
337 if (flags & SET_FIN) 322 if (flags & SET_FIN) {
323 cm_node->tcp_cntxt.loc_seq_num++;
338 tcph->fin = 1; 324 tcph->fin = 1;
325 }
339 326
340 if (flags & SET_RST) 327 if (flags & SET_RST)
341 tcph->rst = 1; 328 tcph->rst = 1;
@@ -389,7 +376,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
389 int close_when_complete) 376 int close_when_complete)
390{ 377{
391 unsigned long flags; 378 unsigned long flags;
392 struct nes_cm_core *cm_core; 379 struct nes_cm_core *cm_core = cm_node->cm_core;
393 struct nes_timer_entry *new_send; 380 struct nes_timer_entry *new_send;
394 int ret = 0; 381 int ret = 0;
395 u32 was_timer_set; 382 u32 was_timer_set;
@@ -411,7 +398,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
411 new_send->close_when_complete = close_when_complete; 398 new_send->close_when_complete = close_when_complete;
412 399
413 if (type == NES_TIMER_TYPE_CLOSE) { 400 if (type == NES_TIMER_TYPE_CLOSE) {
414 new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */ 401 new_send->timetosend += (HZ/10);
415 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 402 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
416 list_add_tail(&new_send->list, &cm_node->recv_list); 403 list_add_tail(&new_send->list, &cm_node->recv_list);
417 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 404 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
@@ -420,36 +407,28 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
420 if (type == NES_TIMER_TYPE_SEND) { 407 if (type == NES_TIMER_TYPE_SEND) {
421 new_send->seq_num = ntohl(tcp_hdr(skb)->seq); 408 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
422 atomic_inc(&new_send->skb->users); 409 atomic_inc(&new_send->skb->users);
410 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
411 cm_node->send_entry = new_send;
412 add_ref_cm_node(cm_node);
413 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
414 new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
423 415
424 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev); 416 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
425 if (ret != NETDEV_TX_OK) { 417 if (ret != NETDEV_TX_OK) {
426 nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n", 418 nes_debug(NES_DBG_CM, "Error sending packet %p "
427 new_send, jiffies); 419 "(jiffies = %lu)\n", new_send, jiffies);
428 atomic_dec(&new_send->skb->users); 420 atomic_dec(&new_send->skb->users);
429 new_send->timetosend = jiffies; 421 new_send->timetosend = jiffies;
430 } else { 422 } else {
431 cm_packets_sent++; 423 cm_packets_sent++;
432 if (!send_retrans) { 424 if (!send_retrans) {
425 cleanup_retrans_entry(cm_node);
433 if (close_when_complete) 426 if (close_when_complete)
434 rem_ref_cm_node(cm_node->cm_core, cm_node); 427 rem_ref_cm_node(cm_core, cm_node);
435 dev_kfree_skb_any(new_send->skb);
436 kfree(new_send);
437 return ret; 428 return ret;
438 } 429 }
439 new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
440 } 430 }
441 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
442 list_add_tail(&new_send->list, &cm_node->retrans_list);
443 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
444 }
445 if (type == NES_TIMER_TYPE_RECV) {
446 new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
447 new_send->timetosend = jiffies;
448 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
449 list_add_tail(&new_send->list, &cm_node->recv_list);
450 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
451 } 431 }
452 cm_core = cm_node->cm_core;
453 432
454 was_timer_set = timer_pending(&cm_core->tcp_timer); 433 was_timer_set = timer_pending(&cm_core->tcp_timer);
455 434
@@ -476,23 +455,27 @@ static void nes_cm_timer_tick(unsigned long pass)
476 struct list_head *list_node, *list_node_temp; 455 struct list_head *list_node, *list_node_temp;
477 struct nes_cm_core *cm_core = g_cm_core; 456 struct nes_cm_core *cm_core = g_cm_core;
478 struct nes_qp *nesqp; 457 struct nes_qp *nesqp;
479 struct sk_buff *skb;
480 u32 settimer = 0; 458 u32 settimer = 0;
481 int ret = NETDEV_TX_OK; 459 int ret = NETDEV_TX_OK;
482 int node_done; 460 enum nes_cm_node_state last_state;
483 461
484 spin_lock_irqsave(&cm_core->ht_lock, flags); 462 spin_lock_irqsave(&cm_core->ht_lock, flags);
485 463
486 list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { 464 list_for_each_safe(list_node, list_core_temp,
465 &cm_core->connected_nodes) {
487 cm_node = container_of(list_node, struct nes_cm_node, list); 466 cm_node = container_of(list_node, struct nes_cm_node, list);
488 add_ref_cm_node(cm_node); 467 add_ref_cm_node(cm_node);
489 spin_unlock_irqrestore(&cm_core->ht_lock, flags); 468 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
490 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 469 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
491 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { 470 list_for_each_safe(list_core, list_node_temp,
492 recv_entry = container_of(list_core, struct nes_timer_entry, list); 471 &cm_node->recv_list) {
493 if ((time_after(recv_entry->timetosend, jiffies)) && 472 recv_entry = container_of(list_core,
494 (recv_entry->type == NES_TIMER_TYPE_CLOSE)) { 473 struct nes_timer_entry, list);
495 if (nexttimeout > recv_entry->timetosend || !settimer) { 474 if (!recv_entry)
475 break;
476 if (time_after(recv_entry->timetosend, jiffies)) {
477 if (nexttimeout > recv_entry->timetosend ||
478 !settimer) {
496 nexttimeout = recv_entry->timetosend; 479 nexttimeout = recv_entry->timetosend;
497 settimer = 1; 480 settimer = 1;
498 } 481 }
@@ -501,157 +484,143 @@ static void nes_cm_timer_tick(unsigned long pass)
501 list_del(&recv_entry->list); 484 list_del(&recv_entry->list);
502 cm_id = cm_node->cm_id; 485 cm_id = cm_node->cm_id;
503 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 486 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
504 if (recv_entry->type == NES_TIMER_TYPE_CLOSE) { 487 nesqp = (struct nes_qp *)recv_entry->skb;
505 nesqp = (struct nes_qp *)recv_entry->skb; 488 spin_lock_irqsave(&nesqp->lock, qplockflags);
506 spin_lock_irqsave(&nesqp->lock, qplockflags); 489 if (nesqp->cm_id) {
507 if (nesqp->cm_id) { 490 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
508 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: " 491 "refcount = %d: HIT A "
509 "****** HIT A NES_TIMER_TYPE_CLOSE" 492 "NES_TIMER_TYPE_CLOSE with something "
510 " with something to do!!! ******\n", 493 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
511 nesqp->hwqp.qp_id, cm_id, 494 atomic_read(&nesqp->refcount));
512 atomic_read(&nesqp->refcount)); 495 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
513 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; 496 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
514 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; 497 nesqp->ibqp_state = IB_QPS_ERR;
515 nesqp->ibqp_state = IB_QPS_ERR; 498 spin_unlock_irqrestore(&nesqp->lock,
516 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 499 qplockflags);
517 nes_cm_disconn(nesqp); 500 nes_cm_disconn(nesqp);
518 } else { 501 } else {
519 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 502 spin_unlock_irqrestore(&nesqp->lock,
520 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:" 503 qplockflags);
521 " ****** HIT A NES_TIMER_TYPE_CLOSE" 504 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, "
522 " with nothing to do!!! ******\n", 505 "refcount = %d: HIT A "
523 nesqp->hwqp.qp_id, cm_id, 506 "NES_TIMER_TYPE_CLOSE with nothing "
524 atomic_read(&nesqp->refcount)); 507 "to do!!!\n", nesqp->hwqp.qp_id, cm_id,
525 nes_rem_ref(&nesqp->ibqp); 508 atomic_read(&nesqp->refcount));
526 }
527 if (cm_id)
528 cm_id->rem_ref(cm_id);
529 } 509 }
510 if (cm_id)
511 cm_id->rem_ref(cm_id);
512
530 kfree(recv_entry); 513 kfree(recv_entry);
531 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 514 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
532 } 515 }
533 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 516 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
534 517
535 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 518 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
536 node_done = 0; 519 do {
537 list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) { 520 send_entry = cm_node->send_entry;
538 if (node_done) { 521 if (!send_entry)
539 break; 522 continue;
540 }
541 send_entry = container_of(list_core, struct nes_timer_entry, list);
542 if (time_after(send_entry->timetosend, jiffies)) { 523 if (time_after(send_entry->timetosend, jiffies)) {
543 if (cm_node->state != NES_CM_STATE_TSA) { 524 if (cm_node->state != NES_CM_STATE_TSA) {
544 if ((nexttimeout > send_entry->timetosend) || !settimer) { 525 if ((nexttimeout >
545 nexttimeout = send_entry->timetosend; 526 send_entry->timetosend) ||
527 !settimer) {
528 nexttimeout =
529 send_entry->timetosend;
546 settimer = 1; 530 settimer = 1;
531 continue;
547 } 532 }
548 node_done = 1;
549 continue;
550 } else { 533 } else {
551 list_del(&send_entry->list); 534 free_retrans_entry(cm_node);
552 skb = send_entry->skb;
553 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
554 dev_kfree_skb_any(skb);
555 kfree(send_entry);
556 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
557 continue; 535 continue;
558 } 536 }
559 } 537 }
560 if (send_entry->type == NES_TIMER_NODE_CLEANUP) { 538
561 list_del(&send_entry->list); 539 if ((cm_node->state == NES_CM_STATE_TSA) ||
562 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 540 (cm_node->state == NES_CM_STATE_CLOSED)) {
563 kfree(send_entry); 541 free_retrans_entry(cm_node);
564 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
565 continue;
566 }
567 if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
568 (cm_node->state == NES_CM_STATE_TSA) ||
569 (cm_node->state == NES_CM_STATE_CLOSED)) {
570 skb = send_entry->skb;
571 list_del(&send_entry->list);
572 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
573 kfree(send_entry);
574 dev_kfree_skb_any(skb);
575 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
576 continue; 542 continue;
577 } 543 }
578 544
579 if (!send_entry->retranscount || !send_entry->retrycount) { 545 if (!send_entry->retranscount ||
546 !send_entry->retrycount) {
580 cm_packets_dropped++; 547 cm_packets_dropped++;
581 skb = send_entry->skb; 548 last_state = cm_node->state;
582 list_del(&send_entry->list); 549 cm_node->state = NES_CM_STATE_CLOSED;
583 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 550 free_retrans_entry(cm_node);
584 dev_kfree_skb_any(skb); 551 spin_unlock_irqrestore(
585 kfree(send_entry); 552 &cm_node->retrans_list_lock, flags);
586 if (cm_node->state == NES_CM_STATE_SYN_RCVD) { 553 if (last_state == NES_CM_STATE_SYN_RCVD)
587 /* this node never even generated an indication up to the cm */
588 rem_ref_cm_node(cm_core, cm_node); 554 rem_ref_cm_node(cm_core, cm_node);
589 } else { 555 else
590 cm_node->state = NES_CM_STATE_CLOSED; 556 create_event(cm_node,
591 create_event(cm_node, NES_CM_EVENT_ABORTED); 557 NES_CM_EVENT_ABORTED);
592 } 558 spin_lock_irqsave(&cm_node->retrans_list_lock,
593 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 559 flags);
594 continue; 560 continue;
595 } 561 }
596 /* this seems like the correct place, but leave send entry unprotected */
597 /* spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); */
598 atomic_inc(&send_entry->skb->users); 562 atomic_inc(&send_entry->skb->users);
599 cm_packets_retrans++; 563 cm_packets_retrans++;
600 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p," 564 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
601 " jiffies = %lu, time to send = %lu, retranscount = %u, " 565 "for node %p, jiffies = %lu, time to send = "
602 "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n", 566 "%lu, retranscount = %u, send_entry->seq_num = "
603 send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount, 567 "0x%08X, cm_node->tcp_cntxt.rem_ack_num = "
604 send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num); 568 "0x%08X\n", send_entry, cm_node, jiffies,
605 569 send_entry->timetosend,
606 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 570 send_entry->retranscount,
571 send_entry->seq_num,
572 cm_node->tcp_cntxt.rem_ack_num);
573
574 spin_unlock_irqrestore(&cm_node->retrans_list_lock,
575 flags);
607 ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev); 576 ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
577 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
608 if (ret != NETDEV_TX_OK) { 578 if (ret != NETDEV_TX_OK) {
579 nes_debug(NES_DBG_CM, "rexmit failed for "
580 "node=%p\n", cm_node);
609 cm_packets_bounced++; 581 cm_packets_bounced++;
610 atomic_dec(&send_entry->skb->users); 582 atomic_dec(&send_entry->skb->users);
611 send_entry->retrycount--; 583 send_entry->retrycount--;
612 nexttimeout = jiffies + NES_SHORT_TIME; 584 nexttimeout = jiffies + NES_SHORT_TIME;
613 settimer = 1; 585 settimer = 1;
614 node_done = 1;
615 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
616 continue; 586 continue;
617 } else { 587 } else {
618 cm_packets_sent++; 588 cm_packets_sent++;
619 } 589 }
620 spin_lock_irqsave(&cm_node->retrans_list_lock, flags); 590 nes_debug(NES_DBG_CM, "Packet Sent: retrans count = "
621 list_del(&send_entry->list); 591 "%u, retry count = %u.\n",
622 nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n", 592 send_entry->retranscount,
623 send_entry->retranscount, send_entry->retrycount); 593 send_entry->retrycount);
624 if (send_entry->send_retrans) { 594 if (send_entry->send_retrans) {
625 send_entry->retranscount--; 595 send_entry->retranscount--;
626 send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT; 596 send_entry->timetosend = jiffies +
627 if (nexttimeout > send_entry->timetosend || !settimer) { 597 NES_RETRY_TIMEOUT;
598 if (nexttimeout > send_entry->timetosend ||
599 !settimer) {
628 nexttimeout = send_entry->timetosend; 600 nexttimeout = send_entry->timetosend;
629 settimer = 1; 601 settimer = 1;
630 } 602 }
631 list_add(&send_entry->list, &cm_node->retrans_list);
632 continue;
633 } else { 603 } else {
634 int close_when_complete; 604 int close_when_complete;
635 skb = send_entry->skb; 605 close_when_complete =
636 close_when_complete = send_entry->close_when_complete; 606 send_entry->close_when_complete;
637 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); 607 nes_debug(NES_DBG_CM, "cm_node=%p state=%d\n",
638 if (close_when_complete) { 608 cm_node, cm_node->state);
639 BUG_ON(atomic_read(&cm_node->ref_count) == 1); 609 free_retrans_entry(cm_node);
640 rem_ref_cm_node(cm_core, cm_node); 610 if (close_when_complete)
641 } 611 rem_ref_cm_node(cm_node->cm_core,
642 dev_kfree_skb_any(skb); 612 cm_node);
643 kfree(send_entry);
644 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
645 continue;
646 } 613 }
647 } 614 } while (0);
648 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
649
650 rem_ref_cm_node(cm_core, cm_node);
651 615
616 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
617 rem_ref_cm_node(cm_node->cm_core, cm_node);
652 spin_lock_irqsave(&cm_core->ht_lock, flags); 618 spin_lock_irqsave(&cm_core->ht_lock, flags);
653 if (ret != NETDEV_TX_OK) 619 if (ret != NETDEV_TX_OK) {
620 nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n",
621 cm_node);
654 break; 622 break;
623 }
655 } 624 }
656 spin_unlock_irqrestore(&cm_core->ht_lock, flags); 625 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
657 626
@@ -667,14 +636,14 @@ static void nes_cm_timer_tick(unsigned long pass)
667/** 636/**
668 * send_syn 637 * send_syn
669 */ 638 */
670static int send_syn(struct nes_cm_node *cm_node, u32 sendack) 639static int send_syn(struct nes_cm_node *cm_node, u32 sendack,
640 struct sk_buff *skb)
671{ 641{
672 int ret; 642 int ret;
673 int flags = SET_SYN; 643 int flags = SET_SYN;
674 struct sk_buff *skb;
675 char optionsbuffer[sizeof(struct option_mss) + 644 char optionsbuffer[sizeof(struct option_mss) +
676 sizeof(struct option_windowscale) + 645 sizeof(struct option_windowscale) + sizeof(struct option_base) +
677 sizeof(struct option_base) + 1]; 646 TCP_OPTIONS_PADDING];
678 647
679 int optionssize = 0; 648 int optionssize = 0;
680 /* Sending MSS option */ 649 /* Sending MSS option */
@@ -695,8 +664,7 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
695 options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; 664 options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
696 optionssize += sizeof(struct option_windowscale); 665 optionssize += sizeof(struct option_windowscale);
697 666
698 if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt) 667 if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)) {
699 ) {
700 options = (union all_known_options *)&optionsbuffer[optionssize]; 668 options = (union all_known_options *)&optionsbuffer[optionssize];
701 options->as_base.optionnum = OPTION_NUMBER_WRITE0; 669 options->as_base.optionnum = OPTION_NUMBER_WRITE0;
702 options->as_base.length = sizeof(struct option_base); 670 options->as_base.length = sizeof(struct option_base);
@@ -714,7 +682,8 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
714 options->as_end = OPTION_NUMBER_END; 682 options->as_end = OPTION_NUMBER_END;
715 optionssize += 1; 683 optionssize += 1;
716 684
717 skb = get_free_pkt(cm_node); 685 if (!skb)
686 skb = get_free_pkt(cm_node);
718 if (!skb) { 687 if (!skb) {
719 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 688 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
720 return -1; 689 return -1;
@@ -733,18 +702,18 @@ static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
733/** 702/**
734 * send_reset 703 * send_reset
735 */ 704 */
736static int send_reset(struct nes_cm_node *cm_node) 705static int send_reset(struct nes_cm_node *cm_node, struct sk_buff *skb)
737{ 706{
738 int ret; 707 int ret;
739 struct sk_buff *skb = get_free_pkt(cm_node);
740 int flags = SET_RST | SET_ACK; 708 int flags = SET_RST | SET_ACK;
741 709
710 if (!skb)
711 skb = get_free_pkt(cm_node);
742 if (!skb) { 712 if (!skb) {
743 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 713 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
744 return -1; 714 return -1;
745 } 715 }
746 716
747 add_ref_cm_node(cm_node);
748 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags); 717 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
749 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1); 718 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
750 719
@@ -755,10 +724,12 @@ static int send_reset(struct nes_cm_node *cm_node)
755/** 724/**
756 * send_ack 725 * send_ack
757 */ 726 */
758static int send_ack(struct nes_cm_node *cm_node) 727static int send_ack(struct nes_cm_node *cm_node, struct sk_buff *skb)
759{ 728{
760 int ret; 729 int ret;
761 struct sk_buff *skb = get_free_pkt(cm_node); 730
731 if (!skb)
732 skb = get_free_pkt(cm_node);
762 733
763 if (!skb) { 734 if (!skb) {
764 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n"); 735 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
@@ -922,7 +893,8 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
922 if (!cm_node || !cm_core) 893 if (!cm_node || !cm_core)
923 return -EINVAL; 894 return -EINVAL;
924 895
925 nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n"); 896 nes_debug(NES_DBG_CM, "Adding Node %p to Active Connection HT\n",
897 cm_node);
926 898
927 /* first, make an index into our hash table */ 899 /* first, make an index into our hash table */
928 hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr, 900 hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
@@ -946,10 +918,35 @@ static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node
946 * mini_cm_dec_refcnt_listen 918 * mini_cm_dec_refcnt_listen
947 */ 919 */
948static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, 920static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
949 struct nes_cm_listener *listener, int free_hanging_nodes) 921 struct nes_cm_listener *listener, int free_hanging_nodes)
950{ 922{
951 int ret = 1; 923 int ret = 1;
952 unsigned long flags; 924 unsigned long flags;
925 struct list_head *list_pos = NULL;
926 struct list_head *list_temp = NULL;
927 struct nes_cm_node *cm_node = NULL;
928
929 nes_debug(NES_DBG_CM, "attempting listener= %p free_nodes= %d, "
930 "refcnt=%d\n", listener, free_hanging_nodes,
931 atomic_read(&listener->ref_count));
932 /* free non-accelerated child nodes for this listener */
933 if (free_hanging_nodes) {
934 spin_lock_irqsave(&cm_core->ht_lock, flags);
935 list_for_each_safe(list_pos, list_temp,
936 &g_cm_core->connected_nodes) {
937 cm_node = container_of(list_pos, struct nes_cm_node,
938 list);
939 if ((cm_node->listener == listener) &&
940 (!cm_node->accelerated)) {
941 cleanup_retrans_entry(cm_node);
942 spin_unlock_irqrestore(&cm_core->ht_lock,
943 flags);
944 send_reset(cm_node, NULL);
945 spin_lock_irqsave(&cm_core->ht_lock, flags);
946 }
947 }
948 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
949 }
953 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 950 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
954 if (!atomic_dec_return(&listener->ref_count)) { 951 if (!atomic_dec_return(&listener->ref_count)) {
955 list_del(&listener->list); 952 list_del(&listener->list);
@@ -1067,18 +1064,18 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1067 cm_node->loc_port = cm_info->loc_port; 1064 cm_node->loc_port = cm_info->loc_port;
1068 cm_node->rem_port = cm_info->rem_port; 1065 cm_node->rem_port = cm_info->rem_port;
1069 cm_node->send_write0 = send_first; 1066 cm_node->send_write0 = send_first;
1070 nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n", 1067 nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT
1071 HIPQUAD(cm_node->loc_addr), cm_node->loc_port, 1068 ":%x, rem = " NIPQUAD_FMT ":%x\n",
1072 HIPQUAD(cm_node->rem_addr), cm_node->rem_port); 1069 HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
1070 HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
1073 cm_node->listener = listener; 1071 cm_node->listener = listener;
1074 cm_node->netdev = nesvnic->netdev; 1072 cm_node->netdev = nesvnic->netdev;
1075 cm_node->cm_id = cm_info->cm_id; 1073 cm_node->cm_id = cm_info->cm_id;
1076 memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN); 1074 memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
1077 1075
1078 nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", 1076 nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n", cm_node->listener,
1079 cm_node->listener, cm_node->cm_id); 1077 cm_node->cm_id);
1080 1078
1081 INIT_LIST_HEAD(&cm_node->retrans_list);
1082 spin_lock_init(&cm_node->retrans_list_lock); 1079 spin_lock_init(&cm_node->retrans_list_lock);
1083 INIT_LIST_HEAD(&cm_node->recv_list); 1080 INIT_LIST_HEAD(&cm_node->recv_list);
1084 spin_lock_init(&cm_node->recv_list_lock); 1081 spin_lock_init(&cm_node->recv_list_lock);
@@ -1142,10 +1139,9 @@ static int add_ref_cm_node(struct nes_cm_node *cm_node)
1142 * rem_ref_cm_node - destroy an instance of a cm node 1139 * rem_ref_cm_node - destroy an instance of a cm node
1143 */ 1140 */
1144static int rem_ref_cm_node(struct nes_cm_core *cm_core, 1141static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1145 struct nes_cm_node *cm_node) 1142 struct nes_cm_node *cm_node)
1146{ 1143{
1147 unsigned long flags, qplockflags; 1144 unsigned long flags, qplockflags;
1148 struct nes_timer_entry *send_entry;
1149 struct nes_timer_entry *recv_entry; 1145 struct nes_timer_entry *recv_entry;
1150 struct iw_cm_id *cm_id; 1146 struct iw_cm_id *cm_id;
1151 struct list_head *list_core, *list_node_temp; 1147 struct list_head *list_core, *list_node_temp;
@@ -1169,48 +1165,33 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1169 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1165 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1170 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0); 1166 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1171 } 1167 }
1172 1168 BUG_ON(cm_node->send_entry);
1173 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1174 list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
1175 send_entry = container_of(list_core, struct nes_timer_entry, list);
1176 list_del(&send_entry->list);
1177 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1178 dev_kfree_skb_any(send_entry->skb);
1179 kfree(send_entry);
1180 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1181 continue;
1182 }
1183 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1184
1185 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 1169 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1186 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) { 1170 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
1187 recv_entry = container_of(list_core, struct nes_timer_entry, list); 1171 recv_entry = container_of(list_core, struct nes_timer_entry,
1172 list);
1188 list_del(&recv_entry->list); 1173 list_del(&recv_entry->list);
1189 cm_id = cm_node->cm_id; 1174 cm_id = cm_node->cm_id;
1190 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags); 1175 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1191 if (recv_entry->type == NES_TIMER_TYPE_CLOSE) { 1176 nesqp = (struct nes_qp *)recv_entry->skb;
1192 nesqp = (struct nes_qp *)recv_entry->skb; 1177 spin_lock_irqsave(&nesqp->lock, qplockflags);
1193 spin_lock_irqsave(&nesqp->lock, qplockflags); 1178 if (nesqp->cm_id) {
1194 if (nesqp->cm_id) { 1179 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1195 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE" 1180 "NES_TIMER_TYPE_CLOSE with something to do!\n",
1196 " with something to do!!! ******\n", 1181 nesqp->hwqp.qp_id, cm_id);
1197 nesqp->hwqp.qp_id, cm_id); 1182 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
1198 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; 1183 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
1199 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; 1184 nesqp->ibqp_state = IB_QPS_ERR;
1200 nesqp->ibqp_state = IB_QPS_ERR; 1185 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1201 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 1186 nes_cm_disconn(nesqp);
1202 nes_cm_disconn(nesqp); 1187 } else {
1203 } else { 1188 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1204 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 1189 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: HIT A "
1205 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE" 1190 "NES_TIMER_TYPE_CLOSE with nothing to do!\n",
1206 " with nothing to do!!! ******\n", 1191 nesqp->hwqp.qp_id, cm_id);
1207 nesqp->hwqp.qp_id, cm_id);
1208 nes_rem_ref(&nesqp->ibqp);
1209 }
1210 cm_id->rem_ref(cm_id);
1211 } else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
1212 dev_kfree_skb_any(recv_entry->skb);
1213 } 1192 }
1193 cm_id->rem_ref(cm_id);
1194
1214 kfree(recv_entry); 1195 kfree(recv_entry);
1215 spin_lock_irqsave(&cm_node->recv_list_lock, flags); 1196 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1216 } 1197 }
@@ -1221,23 +1202,31 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1221 } else { 1202 } else {
1222 if (cm_node->apbvt_set && cm_node->nesvnic) { 1203 if (cm_node->apbvt_set && cm_node->nesvnic) {
1223 nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, 1204 nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
1224 PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn), 1205 PCI_FUNC(
1225 NES_MANAGE_APBVT_DEL); 1206 cm_node->nesvnic->nesdev->pcidev->devfn),
1207 NES_MANAGE_APBVT_DEL);
1226 } 1208 }
1227 } 1209 }
1228 1210
1229 kfree(cm_node);
1230 atomic_dec(&cm_core->node_cnt); 1211 atomic_dec(&cm_core->node_cnt);
1231 atomic_inc(&cm_nodes_destroyed); 1212 atomic_inc(&cm_nodes_destroyed);
1213 nesqp = cm_node->nesqp;
1214 if (nesqp) {
1215 nesqp->cm_node = NULL;
1216 nes_rem_ref(&nesqp->ibqp);
1217 cm_node->nesqp = NULL;
1218 }
1232 1219
1220 cm_node->freed = 1;
1221 kfree(cm_node);
1233 return 0; 1222 return 0;
1234} 1223}
1235 1224
1236
1237/** 1225/**
1238 * process_options 1226 * process_options
1239 */ 1227 */
1240static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet) 1228static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
1229 u32 optionsize, u32 syn_packet)
1241{ 1230{
1242 u32 tmp; 1231 u32 tmp;
1243 u32 offset = 0; 1232 u32 offset = 0;
@@ -1247,35 +1236,37 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1247 while (offset < optionsize) { 1236 while (offset < optionsize) {
1248 all_options = (union all_known_options *)(optionsloc + offset); 1237 all_options = (union all_known_options *)(optionsloc + offset);
1249 switch (all_options->as_base.optionnum) { 1238 switch (all_options->as_base.optionnum) {
1250 case OPTION_NUMBER_END: 1239 case OPTION_NUMBER_END:
1251 offset = optionsize; 1240 offset = optionsize;
1252 break; 1241 break;
1253 case OPTION_NUMBER_NONE: 1242 case OPTION_NUMBER_NONE:
1254 offset += 1; 1243 offset += 1;
1255 continue; 1244 continue;
1256 case OPTION_NUMBER_MSS: 1245 case OPTION_NUMBER_MSS:
1257 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n", 1246 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d "
1258 __func__, 1247 "Size: %d\n", __func__,
1259 all_options->as_mss.length, offset, optionsize); 1248 all_options->as_mss.length, offset, optionsize);
1260 got_mss_option = 1; 1249 got_mss_option = 1;
1261 if (all_options->as_mss.length != 4) { 1250 if (all_options->as_mss.length != 4) {
1262 return 1; 1251 return 1;
1263 } else { 1252 } else {
1264 tmp = ntohs(all_options->as_mss.mss); 1253 tmp = ntohs(all_options->as_mss.mss);
1265 if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) 1254 if (tmp > 0 && tmp <
1266 cm_node->tcp_cntxt.mss = tmp; 1255 cm_node->tcp_cntxt.mss)
1267 } 1256 cm_node->tcp_cntxt.mss = tmp;
1268 break; 1257 }
1269 case OPTION_NUMBER_WINDOW_SCALE: 1258 break;
1270 cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount; 1259 case OPTION_NUMBER_WINDOW_SCALE:
1271 break; 1260 cm_node->tcp_cntxt.snd_wscale =
1272 case OPTION_NUMBER_WRITE0: 1261 all_options->as_windowscale.shiftcount;
1273 cm_node->send_write0 = 1; 1262 break;
1274 break; 1263 case OPTION_NUMBER_WRITE0:
1275 default: 1264 cm_node->send_write0 = 1;
1276 nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n", 1265 break;
1277 all_options->as_base.optionnum); 1266 default:
1278 break; 1267 nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
1268 all_options->as_base.optionnum);
1269 break;
1279 } 1270 }
1280 offset += all_options->as_base.length; 1271 offset += all_options->as_base.length;
1281 } 1272 }
@@ -1284,300 +1275,491 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
1284 return 0; 1275 return 0;
1285} 1276}
1286 1277
1278static void drop_packet(struct sk_buff *skb)
1279{
1280 atomic_inc(&cm_accel_dropped_pkts);
1281 dev_kfree_skb_any(skb);
1282}
1287 1283
1288/** 1284static void handle_fin_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1289 * process_packet 1285 struct tcphdr *tcph)
1290 */
1291static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1292 struct nes_cm_core *cm_core)
1293{ 1286{
1294 int optionsize; 1287 atomic_inc(&cm_resets_recvd);
1295 int datasize; 1288 nes_debug(NES_DBG_CM, "Received FIN, cm_node = %p, state = %u. "
1296 int ret = 0; 1289 "refcnt=%d\n", cm_node, cm_node->state,
1297 struct tcphdr *tcph = tcp_hdr(skb); 1290 atomic_read(&cm_node->ref_count));
1298 u32 inc_sequence; 1291 cm_node->tcp_cntxt.rcv_nxt++;
1299 if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) { 1292 cleanup_retrans_entry(cm_node);
1300 inc_sequence = ntohl(tcph->seq); 1293 switch (cm_node->state) {
1301 cm_node->tcp_cntxt.rcv_nxt = inc_sequence; 1294 case NES_CM_STATE_SYN_RCVD:
1295 case NES_CM_STATE_SYN_SENT:
1296 case NES_CM_STATE_ESTABLISHED:
1297 case NES_CM_STATE_MPAREQ_SENT:
1298 cm_node->state = NES_CM_STATE_LAST_ACK;
1299 send_fin(cm_node, skb);
1300 break;
1301 case NES_CM_STATE_FIN_WAIT1:
1302 cm_node->state = NES_CM_STATE_CLOSING;
1303 send_ack(cm_node, skb);
1304 break;
1305 case NES_CM_STATE_FIN_WAIT2:
1306 cm_node->state = NES_CM_STATE_TIME_WAIT;
1307 send_ack(cm_node, skb);
1308 cm_node->state = NES_CM_STATE_CLOSED;
1309 break;
1310 case NES_CM_STATE_TSA:
1311 default:
1312 nes_debug(NES_DBG_CM, "Error Rcvd FIN for node-%p state = %d\n",
1313 cm_node, cm_node->state);
1314 drop_packet(skb);
1315 break;
1302 } 1316 }
1317}
1303 1318
1304 if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
1305 BUG_ON(!tcph);
1306 atomic_inc(&cm_accel_dropped_pkts);
1307 return -1;
1308 }
1309 1319
1310 if (tcph->rst) { 1320static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1311 atomic_inc(&cm_resets_recvd); 1321 struct tcphdr *tcph)
1312 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n", 1322{
1313 cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
1314 switch (cm_node->state) {
1315 case NES_CM_STATE_LISTENING:
1316 rem_ref_cm_node(cm_core, cm_node);
1317 break;
1318 case NES_CM_STATE_TSA:
1319 case NES_CM_STATE_CLOSED:
1320 break;
1321 case NES_CM_STATE_SYN_RCVD:
1322 nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
1323 " remote 0x%08X:%04X, node state = %u\n",
1324 cm_node->loc_addr, cm_node->loc_port,
1325 cm_node->rem_addr, cm_node->rem_port,
1326 cm_node->state);
1327 rem_ref_cm_node(cm_core, cm_node);
1328 break;
1329 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1330 case NES_CM_STATE_ESTABLISHED:
1331 case NES_CM_STATE_MPAREQ_SENT:
1332 default:
1333 nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
1334 " remote 0x%08X:%04X, node state = %u refcnt=%d\n",
1335 cm_node->loc_addr, cm_node->loc_port,
1336 cm_node->rem_addr, cm_node->rem_port,
1337 cm_node->state, atomic_read(&cm_node->ref_count));
1338 /* create event */
1339 cm_node->state = NES_CM_STATE_CLOSED;
1340 1323
1341 create_event(cm_node, NES_CM_EVENT_ABORTED); 1324 int reset = 0; /* whether to send reset in case of err.. */
1342 break; 1325 atomic_inc(&cm_resets_recvd);
1326 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
1327 " refcnt=%d\n", cm_node, cm_node->state,
1328 atomic_read(&cm_node->ref_count));
1329 cleanup_retrans_entry(cm_node);
1330 switch (cm_node->state) {
1331 case NES_CM_STATE_SYN_SENT:
1332 case NES_CM_STATE_MPAREQ_SENT:
1333 nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
1334 "listener=%p state=%d\n", __func__, __LINE__, cm_node,
1335 cm_node->listener, cm_node->state);
1336 active_open_err(cm_node, skb, reset);
1337 break;
1338 /* For PASSIVE open states, remove the cm_node event */
1339 case NES_CM_STATE_ESTABLISHED:
1340 case NES_CM_STATE_SYN_RCVD:
1341 case NES_CM_STATE_LISTENING:
1342 nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
1343 passive_open_err(cm_node, skb, reset);
1344 break;
1345 case NES_CM_STATE_TSA:
1346 default:
1347 break;
1348 }
1349}
1343 1350
1351static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb,
1352 enum nes_cm_event_type type)
1353{
1354
1355 int ret;
1356 int datasize = skb->len;
1357 u8 *dataloc = skb->data;
1358 ret = parse_mpa(cm_node, dataloc, datasize);
1359 if (ret < 0) {
1360 nes_debug(NES_DBG_CM, "didn't like MPA Request\n");
1361 if (type == NES_CM_EVENT_CONNECTED) {
1362 nes_debug(NES_DBG_CM, "%s[%u] create abort for "
1363 "cm_node=%p listener=%p state=%d\n", __func__,
1364 __LINE__, cm_node, cm_node->listener,
1365 cm_node->state);
1366 active_open_err(cm_node, skb, 1);
1367 } else {
1368 passive_open_err(cm_node, skb, 1);
1344 } 1369 }
1345 return -1; 1370 } else {
1371 cleanup_retrans_entry(cm_node);
1372 dev_kfree_skb_any(skb);
1373 if (type == NES_CM_EVENT_CONNECTED)
1374 cm_node->state = NES_CM_STATE_TSA;
1375 create_event(cm_node, type);
1376
1377 }
1378 return ;
1379}
1380
1381static void indicate_pkt_err(struct nes_cm_node *cm_node, struct sk_buff *skb)
1382{
1383 switch (cm_node->state) {
1384 case NES_CM_STATE_SYN_SENT:
1385 case NES_CM_STATE_MPAREQ_SENT:
1386 nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
1387 "listener=%p state=%d\n", __func__, __LINE__, cm_node,
1388 cm_node->listener, cm_node->state);
1389 active_open_err(cm_node, skb, 1);
1390 break;
1391 case NES_CM_STATE_ESTABLISHED:
1392 case NES_CM_STATE_SYN_RCVD:
1393 passive_open_err(cm_node, skb, 1);
1394 break;
1395 case NES_CM_STATE_TSA:
1396 default:
1397 drop_packet(skb);
1346 } 1398 }
1399}
1400
1401static int check_syn(struct nes_cm_node *cm_node, struct tcphdr *tcph,
1402 struct sk_buff *skb)
1403{
1404 int err;
1405
1406 err = ((ntohl(tcph->ack_seq) == cm_node->tcp_cntxt.loc_seq_num))? 0 : 1;
1407 if (err)
1408 active_open_err(cm_node, skb, 1);
1409
1410 return err;
1411}
1412
1413static int check_seq(struct nes_cm_node *cm_node, struct tcphdr *tcph,
1414 struct sk_buff *skb)
1415{
1416 int err = 0;
1417 u32 seq;
1418 u32 ack_seq;
1419 u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
1420 u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
1421 u32 rcv_wnd;
1422 seq = ntohl(tcph->seq);
1423 ack_seq = ntohl(tcph->ack_seq);
1424 rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
1425 if (ack_seq != loc_seq_num)
1426 err = 1;
1427 else if ((seq + rcv_wnd) < rcv_nxt)
1428 err = 1;
1429 if (err) {
1430 nes_debug(NES_DBG_CM, "%s[%u] create abort for cm_node=%p "
1431 "listener=%p state=%d\n", __func__, __LINE__, cm_node,
1432 cm_node->listener, cm_node->state);
1433 indicate_pkt_err(cm_node, skb);
1434 nes_debug(NES_DBG_CM, "seq ERROR cm_node =%p seq=0x%08X "
1435 "rcv_nxt=0x%08X rcv_wnd=0x%x\n", cm_node, seq, rcv_nxt,
1436 rcv_wnd);
1437 }
1438 return err;
1439}
1440
1441/*
1442 * handle_syn_pkt() is for Passive node. The syn packet is received when a node
1443 * is created with a listener or it may comein as rexmitted packet which in
1444 * that case will be just dropped.
1445 */
1446
1447static void handle_syn_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1448 struct tcphdr *tcph)
1449{
1450 int ret;
1451 u32 inc_sequence;
1452 int optionsize;
1347 1453
1348 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); 1454 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
1455 skb_pull(skb, tcph->doff << 2);
1456 inc_sequence = ntohl(tcph->seq);
1349 1457
1350 skb_pull(skb, ip_hdr(skb)->ihl << 2); 1458 switch (cm_node->state) {
1459 case NES_CM_STATE_SYN_SENT:
1460 case NES_CM_STATE_MPAREQ_SENT:
1461 /* Rcvd syn on active open connection*/
1462 active_open_err(cm_node, skb, 1);
1463 break;
1464 case NES_CM_STATE_LISTENING:
1465 /* Passive OPEN */
1466 cm_node->accept_pend = 1;
1467 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1468 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
1469 cm_node->listener->backlog) {
1470 nes_debug(NES_DBG_CM, "drop syn due to backlog "
1471 "pressure \n");
1472 cm_backlog_drops++;
1473 passive_open_err(cm_node, skb, 0);
1474 break;
1475 }
1476 ret = handle_tcp_options(cm_node, tcph, skb, optionsize,
1477 1);
1478 if (ret) {
1479 passive_open_err(cm_node, skb, 0);
1480 /* drop pkt */
1481 break;
1482 }
1483 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
1484 BUG_ON(cm_node->send_entry);
1485 cm_node->state = NES_CM_STATE_SYN_RCVD;
1486 send_syn(cm_node, 1, skb);
1487 break;
1488 case NES_CM_STATE_TSA:
1489 case NES_CM_STATE_ESTABLISHED:
1490 case NES_CM_STATE_FIN_WAIT1:
1491 case NES_CM_STATE_FIN_WAIT2:
1492 case NES_CM_STATE_MPAREQ_RCVD:
1493 case NES_CM_STATE_LAST_ACK:
1494 case NES_CM_STATE_CLOSING:
1495 case NES_CM_STATE_UNKNOWN:
1496 case NES_CM_STATE_CLOSED:
1497 default:
1498 drop_packet(skb);
1499 break;
1500 }
1501}
1502
1503static void handle_synack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1504 struct tcphdr *tcph)
1505{
1506
1507 int ret;
1508 u32 inc_sequence;
1509 int optionsize;
1510
1511 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
1351 skb_pull(skb, tcph->doff << 2); 1512 skb_pull(skb, tcph->doff << 2);
1513 inc_sequence = ntohl(tcph->seq);
1514 switch (cm_node->state) {
1515 case NES_CM_STATE_SYN_SENT:
1516 /* active open */
1517 if (check_syn(cm_node, tcph, skb))
1518 return;
1519 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1520 /* setup options */
1521 ret = handle_tcp_options(cm_node, tcph, skb, optionsize, 0);
1522 if (ret) {
1523 nes_debug(NES_DBG_CM, "cm_node=%p tcp_options failed\n",
1524 cm_node);
1525 break;
1526 }
1527 cleanup_retrans_entry(cm_node);
1528 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
1529 send_mpa_request(cm_node, skb);
1530 cm_node->state = NES_CM_STATE_MPAREQ_SENT;
1531 break;
1532 case NES_CM_STATE_MPAREQ_RCVD:
1533 /* passive open, so should not be here */
1534 passive_open_err(cm_node, skb, 1);
1535 break;
1536 case NES_CM_STATE_ESTABLISHED:
1537 case NES_CM_STATE_FIN_WAIT1:
1538 case NES_CM_STATE_FIN_WAIT2:
1539 case NES_CM_STATE_LAST_ACK:
1540 case NES_CM_STATE_TSA:
1541 case NES_CM_STATE_CLOSING:
1542 case NES_CM_STATE_UNKNOWN:
1543 case NES_CM_STATE_CLOSED:
1544 case NES_CM_STATE_MPAREQ_SENT:
1545 default:
1546 drop_packet(skb);
1547 break;
1548 }
1549}
1352 1550
1353 datasize = skb->len; 1551static void handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1552 struct tcphdr *tcph)
1553{
1554 int datasize = 0;
1555 u32 inc_sequence;
1556 u32 rem_seq_ack;
1557 u32 rem_seq;
1558 if (check_seq(cm_node, tcph, skb))
1559 return;
1560
1561 skb_pull(skb, tcph->doff << 2);
1354 inc_sequence = ntohl(tcph->seq); 1562 inc_sequence = ntohl(tcph->seq);
1355 nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X," 1563 rem_seq = ntohl(tcph->seq);
1356 " rcv_nxt = 0x%08X Flags: %s %s.\n", 1564 rem_seq_ack = ntohl(tcph->ack_seq);
1357 datasize, inc_sequence, ntohl(tcph->ack_seq), 1565 datasize = skb->len;
1358 cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""), 1566
1359 (tcph->ack ? "ACK":"")); 1567 switch (cm_node->state) {
1360 1568 case NES_CM_STATE_SYN_RCVD:
1361 if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt) 1569 /* Passive OPEN */
1362 ) { 1570 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1363 nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X," 1571 cm_node->state = NES_CM_STATE_ESTABLISHED;
1364 " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n", 1572 if (datasize) {
1365 datasize, inc_sequence, ntohl(tcph->ack_seq), 1573 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1366 cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":"")); 1574 cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
1367 if (cm_node->state == NES_CM_STATE_LISTENING) { 1575 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_MPA_REQ);
1368 rem_ref_cm_node(cm_core, cm_node); 1576 } else { /* rcvd ACK only */
1577 dev_kfree_skb_any(skb);
1578 cleanup_retrans_entry(cm_node);
1579 }
1580 break;
1581 case NES_CM_STATE_ESTABLISHED:
1582 /* Passive OPEN */
1583 /* We expect mpa frame to be received only */
1584 if (datasize) {
1585 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1586 cm_node->state = NES_CM_STATE_MPAREQ_RCVD;
1587 handle_rcv_mpa(cm_node, skb,
1588 NES_CM_EVENT_MPA_REQ);
1589 } else
1590 drop_packet(skb);
1591 break;
1592 case NES_CM_STATE_MPAREQ_SENT:
1593 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1594 if (datasize) {
1595 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1596 handle_rcv_mpa(cm_node, skb, NES_CM_EVENT_CONNECTED);
1597 } else { /* Could be just an ack pkt.. */
1598 cleanup_retrans_entry(cm_node);
1599 dev_kfree_skb_any(skb);
1369 } 1600 }
1370 return -1; 1601 break;
1602 case NES_CM_STATE_FIN_WAIT1:
1603 case NES_CM_STATE_SYN_SENT:
1604 case NES_CM_STATE_FIN_WAIT2:
1605 case NES_CM_STATE_TSA:
1606 case NES_CM_STATE_CLOSED:
1607 case NES_CM_STATE_MPAREQ_RCVD:
1608 case NES_CM_STATE_LAST_ACK:
1609 case NES_CM_STATE_CLOSING:
1610 case NES_CM_STATE_UNKNOWN:
1611 default:
1612 drop_packet(skb);
1613 break;
1371 } 1614 }
1615}
1372 1616
1373 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1374 1617
1375 1618
1619static int handle_tcp_options(struct nes_cm_node *cm_node, struct tcphdr *tcph,
1620 struct sk_buff *skb, int optionsize, int passive)
1621{
1622 u8 *optionsloc = (u8 *)&tcph[1];
1376 if (optionsize) { 1623 if (optionsize) {
1377 u8 *optionsloc = (u8 *)&tcph[1]; 1624 if (process_options(cm_node, optionsloc, optionsize,
1378 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) { 1625 (u32)tcph->syn)) {
1379 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node); 1626 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n",
1380 send_reset(cm_node); 1627 __func__, cm_node);
1381 if (cm_node->state != NES_CM_STATE_SYN_SENT) 1628 if (passive)
1382 rem_ref_cm_node(cm_core, cm_node); 1629 passive_open_err(cm_node, skb, 0);
1383 return 0; 1630 else
1631 active_open_err(cm_node, skb, 0);
1632 return 1;
1384 } 1633 }
1385 } else if (tcph->syn) 1634 }
1386 cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
1387 1635
1388 cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << 1636 cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
1389 cm_node->tcp_cntxt.snd_wscale; 1637 cm_node->tcp_cntxt.snd_wscale;
1390 1638
1391 if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) { 1639 if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
1392 cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; 1640 cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
1393 } 1641 return 0;
1642}
1394 1643
1395 if (tcph->ack) { 1644/*
1396 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); 1645 * active_open_err() will send reset() if flag set..
1397 switch (cm_node->state) { 1646 * It will also send ABORT event.
1398 case NES_CM_STATE_SYN_RCVD: 1647 */
1399 case NES_CM_STATE_SYN_SENT:
1400 /* read and stash current sequence number */
1401 if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
1402 nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
1403 " cm_node->tcp_cntxt.loc_seq_num\n");
1404 send_reset(cm_node);
1405 return 0;
1406 }
1407 if (cm_node->state == NES_CM_STATE_SYN_SENT)
1408 cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
1409 else {
1410 cm_node->state = NES_CM_STATE_ESTABLISHED;
1411 }
1412 break;
1413 case NES_CM_STATE_LAST_ACK:
1414 cm_node->state = NES_CM_STATE_CLOSED;
1415 break;
1416 case NES_CM_STATE_FIN_WAIT1:
1417 cm_node->state = NES_CM_STATE_FIN_WAIT2;
1418 break;
1419 case NES_CM_STATE_CLOSING:
1420 cm_node->state = NES_CM_STATE_TIME_WAIT;
1421 /* need to schedule this to happen in 2MSL timeouts */
1422 cm_node->state = NES_CM_STATE_CLOSED;
1423 break;
1424 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1425 case NES_CM_STATE_ESTABLISHED:
1426 case NES_CM_STATE_MPAREQ_SENT:
1427 case NES_CM_STATE_CLOSE_WAIT:
1428 case NES_CM_STATE_TIME_WAIT:
1429 case NES_CM_STATE_CLOSED:
1430 break;
1431 case NES_CM_STATE_LISTENING:
1432 nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
1433 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1434 send_reset(cm_node);
1435 /* send_reset bumps refcount, this should have been a new node */
1436 rem_ref_cm_node(cm_core, cm_node);
1437 return -1;
1438 break;
1439 case NES_CM_STATE_TSA:
1440 nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
1441 break;
1442 case NES_CM_STATE_UNKNOWN:
1443 case NES_CM_STATE_INITED:
1444 case NES_CM_STATE_ACCEPTING:
1445 case NES_CM_STATE_FIN_WAIT2:
1446 default:
1447 nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
1448 cm_node->state);
1449 send_reset(cm_node);
1450 break;
1451 }
1452 }
1453 1648
1454 if (tcph->syn) { 1649static void active_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
1455 if (cm_node->state == NES_CM_STATE_LISTENING) { 1650 int reset)
1456 /* do not exceed backlog */ 1651{
1457 atomic_inc(&cm_node->listener->pend_accepts_cnt); 1652 cleanup_retrans_entry(cm_node);
1458 if (atomic_read(&cm_node->listener->pend_accepts_cnt) > 1653 if (reset) {
1459 cm_node->listener->backlog) { 1654 nes_debug(NES_DBG_CM, "ERROR active err called for cm_node=%p, "
1460 nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n"); 1655 "state=%d\n", cm_node, cm_node->state);
1461 cm_backlog_drops++; 1656 add_ref_cm_node(cm_node);
1462 atomic_dec(&cm_node->listener->pend_accepts_cnt); 1657 send_reset(cm_node, skb);
1463 rem_ref_cm_node(cm_core, cm_node); 1658 } else
1464 return 0; 1659 dev_kfree_skb_any(skb);
1465 }
1466 cm_node->accept_pend = 1;
1467 1660
1468 } 1661 cm_node->state = NES_CM_STATE_CLOSED;
1469 if (datasize == 0) 1662 create_event(cm_node, NES_CM_EVENT_ABORTED);
1470 cm_node->tcp_cntxt.rcv_nxt ++; 1663}
1471 1664
1472 if (cm_node->state == NES_CM_STATE_LISTENING) { 1665/*
1473 cm_node->state = NES_CM_STATE_SYN_RCVD; 1666 * passive_open_err() will either do a reset() or will free up the skb and
1474 send_syn(cm_node, 1); 1667 * remove the cm_node.
1475 } 1668 */
1476 if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
1477 cm_node->state = NES_CM_STATE_ESTABLISHED;
1478 /* send final handshake ACK */
1479 ret = send_ack(cm_node);
1480 if (ret < 0)
1481 return ret;
1482 1669
1483 cm_node->state = NES_CM_STATE_MPAREQ_SENT; 1670static void passive_open_err(struct nes_cm_node *cm_node, struct sk_buff *skb,
1484 ret = send_mpa_request(cm_node); 1671 int reset)
1485 if (ret < 0) 1672{
1486 return ret; 1673 cleanup_retrans_entry(cm_node);
1487 } 1674 cm_node->state = NES_CM_STATE_CLOSED;
1675 if (reset) {
1676 nes_debug(NES_DBG_CM, "passive_open_err sending RST for "
1677 "cm_node=%p state =%d\n", cm_node, cm_node->state);
1678 send_reset(cm_node, skb);
1679 } else {
1680 dev_kfree_skb_any(skb);
1681 rem_ref_cm_node(cm_node->cm_core, cm_node);
1488 } 1682 }
1683}
1489 1684
1490 if (tcph->fin) { 1685/*
1491 cm_node->tcp_cntxt.rcv_nxt++; 1686 * free_retrans_entry() routines assumes that the retrans_list_lock has
1492 switch (cm_node->state) { 1687 * been acquired before calling.
1493 case NES_CM_STATE_SYN_RCVD: 1688 */
1494 case NES_CM_STATE_SYN_SENT: 1689static void free_retrans_entry(struct nes_cm_node *cm_node)
1495 case NES_CM_STATE_ONE_SIDE_ESTABLISHED: 1690{
1496 case NES_CM_STATE_ESTABLISHED: 1691 struct nes_timer_entry *send_entry;
1497 case NES_CM_STATE_ACCEPTING: 1692 send_entry = cm_node->send_entry;
1498 case NES_CM_STATE_MPAREQ_SENT: 1693 if (send_entry) {
1499 cm_node->state = NES_CM_STATE_CLOSE_WAIT; 1694 cm_node->send_entry = NULL;
1500 cm_node->state = NES_CM_STATE_LAST_ACK; 1695 dev_kfree_skb_any(send_entry->skb);
1501 ret = send_fin(cm_node, NULL); 1696 kfree(send_entry);
1502 break; 1697 rem_ref_cm_node(cm_node->cm_core, cm_node);
1503 case NES_CM_STATE_FIN_WAIT1:
1504 cm_node->state = NES_CM_STATE_CLOSING;
1505 ret = send_ack(cm_node);
1506 break;
1507 case NES_CM_STATE_FIN_WAIT2:
1508 cm_node->state = NES_CM_STATE_TIME_WAIT;
1509 cm_node->tcp_cntxt.loc_seq_num ++;
1510 ret = send_ack(cm_node);
1511 /* need to schedule this to happen in 2MSL timeouts */
1512 cm_node->state = NES_CM_STATE_CLOSED;
1513 break;
1514 case NES_CM_STATE_CLOSE_WAIT:
1515 case NES_CM_STATE_LAST_ACK:
1516 case NES_CM_STATE_CLOSING:
1517 case NES_CM_STATE_TSA:
1518 default:
1519 nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
1520 cm_node->state);
1521 ret = -EINVAL;
1522 break;
1523 }
1524 } 1698 }
1699}
1525 1700
1526 if (datasize) { 1701static void cleanup_retrans_entry(struct nes_cm_node *cm_node)
1527 u8 *dataloc = skb->data; 1702{
1528 /* figure out what state we are in and handle transition to next state */ 1703 unsigned long flags;
1529 switch (cm_node->state) {
1530 case NES_CM_STATE_LISTENING:
1531 case NES_CM_STATE_SYN_RCVD:
1532 case NES_CM_STATE_SYN_SENT:
1533 case NES_CM_STATE_FIN_WAIT1:
1534 case NES_CM_STATE_FIN_WAIT2:
1535 case NES_CM_STATE_CLOSE_WAIT:
1536 case NES_CM_STATE_LAST_ACK:
1537 case NES_CM_STATE_CLOSING:
1538 break;
1539 case NES_CM_STATE_MPAREQ_SENT:
1540 /* recv the mpa res frame, ret=frame len (incl priv data) */
1541 ret = parse_mpa(cm_node, dataloc, datasize);
1542 if (ret < 0)
1543 break;
1544 /* set the req frame payload len in skb */
1545 /* we are done handling this state, set node to a TSA state */
1546 cm_node->state = NES_CM_STATE_TSA;
1547 send_ack(cm_node);
1548 create_event(cm_node, NES_CM_EVENT_CONNECTED);
1549 break;
1550
1551 case NES_CM_STATE_ESTABLISHED:
1552 /* we are expecting an MPA req frame */
1553 ret = parse_mpa(cm_node, dataloc, datasize);
1554 if (ret < 0) {
1555 break;
1556 }
1557 cm_node->state = NES_CM_STATE_TSA;
1558 send_ack(cm_node);
1559 /* we got a valid MPA request, create an event */
1560 create_event(cm_node, NES_CM_EVENT_MPA_REQ);
1561 break;
1562 case NES_CM_STATE_TSA:
1563 handle_exception_pkt(cm_node, skb);
1564 break;
1565 case NES_CM_STATE_UNKNOWN:
1566 case NES_CM_STATE_INITED:
1567 default:
1568 ret = -1;
1569 }
1570 }
1571 1704
1572 return ret; 1705 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1706 free_retrans_entry(cm_node);
1707 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1573} 1708}
1574 1709
1710/**
1711 * process_packet
1712 * Returns skb if to be freed, else it will return NULL if already used..
1713 */
1714static void process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1715 struct nes_cm_core *cm_core)
1716{
1717 enum nes_tcpip_pkt_type pkt_type = NES_PKT_TYPE_UNKNOWN;
1718 struct tcphdr *tcph = tcp_hdr(skb);
1719 skb_pull(skb, ip_hdr(skb)->ihl << 2);
1720
1721 nes_debug(NES_DBG_CM, "process_packet: cm_node=%p state =%d syn=%d "
1722 "ack=%d rst=%d fin=%d\n", cm_node, cm_node->state, tcph->syn,
1723 tcph->ack, tcph->rst, tcph->fin);
1724
1725 if (tcph->rst)
1726 pkt_type = NES_PKT_TYPE_RST;
1727 else if (tcph->syn) {
1728 pkt_type = NES_PKT_TYPE_SYN;
1729 if (tcph->ack)
1730 pkt_type = NES_PKT_TYPE_SYNACK;
1731 } else if (tcph->fin)
1732 pkt_type = NES_PKT_TYPE_FIN;
1733 else if (tcph->ack)
1734 pkt_type = NES_PKT_TYPE_ACK;
1735
1736 switch (pkt_type) {
1737 case NES_PKT_TYPE_SYN:
1738 handle_syn_pkt(cm_node, skb, tcph);
1739 break;
1740 case NES_PKT_TYPE_SYNACK:
1741 handle_synack_pkt(cm_node, skb, tcph);
1742 break;
1743 case NES_PKT_TYPE_ACK:
1744 handle_ack_pkt(cm_node, skb, tcph);
1745 break;
1746 case NES_PKT_TYPE_RST:
1747 handle_rst_pkt(cm_node, skb, tcph);
1748 break;
1749 case NES_PKT_TYPE_FIN:
1750 handle_fin_pkt(cm_node, skb, tcph);
1751 break;
1752 default:
1753 drop_packet(skb);
1754 break;
1755 }
1756}
1575 1757
1576/** 1758/**
1577 * mini_cm_listen - create a listen node with params 1759 * mini_cm_listen - create a listen node with params
1578 */ 1760 */
1579static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, 1761static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1580 struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) 1762 struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
1581{ 1763{
1582 struct nes_cm_listener *listener; 1764 struct nes_cm_listener *listener;
1583 unsigned long flags; 1765 unsigned long flags;
@@ -1644,37 +1826,36 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1644/** 1826/**
1645 * mini_cm_connect - make a connection node with params 1827 * mini_cm_connect - make a connection node with params
1646 */ 1828 */
1647static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, 1829struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1648 struct nes_vnic *nesvnic, 1830 struct nes_vnic *nesvnic, u16 private_data_len,
1649 struct ietf_mpa_frame *mpa_frame, 1831 void *private_data, struct nes_cm_info *cm_info)
1650 struct nes_cm_info *cm_info)
1651{ 1832{
1652 int ret = 0; 1833 int ret = 0;
1653 struct nes_cm_node *cm_node; 1834 struct nes_cm_node *cm_node;
1654 struct nes_cm_listener *loopbackremotelistener; 1835 struct nes_cm_listener *loopbackremotelistener;
1655 struct nes_cm_node *loopbackremotenode; 1836 struct nes_cm_node *loopbackremotenode;
1656 struct nes_cm_info loopback_cm_info; 1837 struct nes_cm_info loopback_cm_info;
1657 1838 u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + private_data_len;
1658 u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) + 1839 struct ietf_mpa_frame *mpa_frame = NULL;
1659 ntohs(mpa_frame->priv_data_len);
1660
1661 cm_info->loc_addr = htonl(cm_info->loc_addr);
1662 cm_info->rem_addr = htonl(cm_info->rem_addr);
1663 cm_info->loc_port = htons(cm_info->loc_port);
1664 cm_info->rem_port = htons(cm_info->rem_port);
1665 1840
1666 /* create a CM connection node */ 1841 /* create a CM connection node */
1667 cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL); 1842 cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
1668 if (!cm_node) 1843 if (!cm_node)
1669 return NULL; 1844 return NULL;
1845 mpa_frame = &cm_node->mpa_frame;
1846 strcpy(mpa_frame->key, IEFT_MPA_KEY_REQ);
1847 mpa_frame->flags = IETF_MPA_FLAGS_CRC;
1848 mpa_frame->rev = IETF_MPA_VERSION;
1849 mpa_frame->priv_data_len = htons(private_data_len);
1670 1850
1671 /* set our node side to client (active) side */ 1851 /* set our node side to client (active) side */
1672 cm_node->tcp_cntxt.client = 1; 1852 cm_node->tcp_cntxt.client = 1;
1673 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; 1853 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
1674 1854
1675 if (cm_info->loc_addr == cm_info->rem_addr) { 1855 if (cm_info->loc_addr == cm_info->rem_addr) {
1676 loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr, 1856 loopbackremotelistener = find_listener(cm_core,
1677 cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE); 1857 ntohl(nesvnic->local_ipaddr), cm_node->rem_port,
1858 NES_CM_LISTENER_ACTIVE_STATE);
1678 if (loopbackremotelistener == NULL) { 1859 if (loopbackremotelistener == NULL) {
1679 create_event(cm_node, NES_CM_EVENT_ABORTED); 1860 create_event(cm_node, NES_CM_EVENT_ABORTED);
1680 } else { 1861 } else {
@@ -1683,26 +1864,35 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1683 loopback_cm_info.loc_port = cm_info->rem_port; 1864 loopback_cm_info.loc_port = cm_info->rem_port;
1684 loopback_cm_info.rem_port = cm_info->loc_port; 1865 loopback_cm_info.rem_port = cm_info->loc_port;
1685 loopback_cm_info.cm_id = loopbackremotelistener->cm_id; 1866 loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
1686 loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info, 1867 loopbackremotenode = make_cm_node(cm_core, nesvnic,
1687 loopbackremotelistener); 1868 &loopback_cm_info, loopbackremotelistener);
1688 loopbackremotenode->loopbackpartner = cm_node; 1869 loopbackremotenode->loopbackpartner = cm_node;
1689 loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; 1870 loopbackremotenode->tcp_cntxt.rcv_wscale =
1871 NES_CM_DEFAULT_RCV_WND_SCALE;
1690 cm_node->loopbackpartner = loopbackremotenode; 1872 cm_node->loopbackpartner = loopbackremotenode;
1691 memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data, 1873 memcpy(loopbackremotenode->mpa_frame_buf, private_data,
1692 mpa_frame_size); 1874 private_data_len);
1693 loopbackremotenode->mpa_frame_size = mpa_frame_size - 1875 loopbackremotenode->mpa_frame_size = private_data_len;
1694 sizeof(struct ietf_mpa_frame);
1695 1876
1696 /* we are done handling this state, set node to a TSA state */ 1877 /* we are done handling this state. */
1878 /* set node to a TSA state */
1697 cm_node->state = NES_CM_STATE_TSA; 1879 cm_node->state = NES_CM_STATE_TSA;
1698 cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num; 1880 cm_node->tcp_cntxt.rcv_nxt =
1699 loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num; 1881 loopbackremotenode->tcp_cntxt.loc_seq_num;
1700 cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; 1882 loopbackremotenode->tcp_cntxt.rcv_nxt =
1701 loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; 1883 cm_node->tcp_cntxt.loc_seq_num;
1702 cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd; 1884 cm_node->tcp_cntxt.max_snd_wnd =
1703 loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; 1885 loopbackremotenode->tcp_cntxt.rcv_wnd;
1704 cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale; 1886 loopbackremotenode->tcp_cntxt.max_snd_wnd =
1705 loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; 1887 cm_node->tcp_cntxt.rcv_wnd;
1888 cm_node->tcp_cntxt.snd_wnd =
1889 loopbackremotenode->tcp_cntxt.rcv_wnd;
1890 loopbackremotenode->tcp_cntxt.snd_wnd =
1891 cm_node->tcp_cntxt.rcv_wnd;
1892 cm_node->tcp_cntxt.snd_wscale =
1893 loopbackremotenode->tcp_cntxt.rcv_wscale;
1894 loopbackremotenode->tcp_cntxt.snd_wscale =
1895 cm_node->tcp_cntxt.rcv_wscale;
1706 1896
1707 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ); 1897 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
1708 } 1898 }
@@ -1712,16 +1902,29 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1712 /* set our node side to client (active) side */ 1902 /* set our node side to client (active) side */
1713 cm_node->tcp_cntxt.client = 1; 1903 cm_node->tcp_cntxt.client = 1;
1714 /* init our MPA frame ptr */ 1904 /* init our MPA frame ptr */
1715 memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size); 1905 memcpy(mpa_frame->priv_data, private_data, private_data_len);
1906
1716 cm_node->mpa_frame_size = mpa_frame_size; 1907 cm_node->mpa_frame_size = mpa_frame_size;
1717 1908
1718 /* send a syn and goto syn sent state */ 1909 /* send a syn and goto syn sent state */
1719 cm_node->state = NES_CM_STATE_SYN_SENT; 1910 cm_node->state = NES_CM_STATE_SYN_SENT;
1720 ret = send_syn(cm_node, 0); 1911 ret = send_syn(cm_node, 0, NULL);
1912
1913 if (ret) {
1914 /* error in sending the syn free up the cm_node struct */
1915 nes_debug(NES_DBG_CM, "Api - connect() FAILED: dest "
1916 "addr=0x%08X, port=0x%04x, cm_node=%p, cm_id = %p.\n",
1917 cm_node->rem_addr, cm_node->rem_port, cm_node,
1918 cm_node->cm_id);
1919 rem_ref_cm_node(cm_node->cm_core, cm_node);
1920 cm_node = NULL;
1921 }
1721 1922
1722 nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x," 1923 if (cm_node)
1723 " cm_node=%p, cm_id = %p.\n", 1924 nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X,"
1724 cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id); 1925 "port=0x%04x, cm_node=%p, cm_id = %p.\n",
1926 cm_node->rem_addr, cm_node->rem_port, cm_node,
1927 cm_node->cm_id);
1725 1928
1726 return cm_node; 1929 return cm_node;
1727} 1930}
@@ -1731,8 +1934,8 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1731 * mini_cm_accept - accept a connection 1934 * mini_cm_accept - accept a connection
1732 * This function is never called 1935 * This function is never called
1733 */ 1936 */
1734static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame, 1937static int mini_cm_accept(struct nes_cm_core *cm_core,
1735 struct nes_cm_node *cm_node) 1938 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
1736{ 1939{
1737 return 0; 1940 return 0;
1738} 1941}
@@ -1742,32 +1945,26 @@ static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mp
1742 * mini_cm_reject - reject and teardown a connection 1945 * mini_cm_reject - reject and teardown a connection
1743 */ 1946 */
1744static int mini_cm_reject(struct nes_cm_core *cm_core, 1947static int mini_cm_reject(struct nes_cm_core *cm_core,
1745 struct ietf_mpa_frame *mpa_frame, 1948 struct ietf_mpa_frame *mpa_frame, struct nes_cm_node *cm_node)
1746 struct nes_cm_node *cm_node)
1747{ 1949{
1748 int ret = 0; 1950 int ret = 0;
1749 struct sk_buff *skb;
1750 u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
1751 ntohs(mpa_frame->priv_data_len);
1752 1951
1753 skb = get_free_pkt(cm_node); 1952 nes_debug(NES_DBG_CM, "%s cm_node=%p type=%d state=%d\n",
1754 if (!skb) { 1953 __func__, cm_node, cm_node->tcp_cntxt.client, cm_node->state);
1755 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
1756 return -1;
1757 }
1758
1759 /* send an MPA Request frame */
1760 form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
1761 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
1762 1954
1955 if (cm_node->tcp_cntxt.client)
1956 return ret;
1957 cleanup_retrans_entry(cm_node);
1763 cm_node->state = NES_CM_STATE_CLOSED; 1958 cm_node->state = NES_CM_STATE_CLOSED;
1764 ret = send_fin(cm_node, NULL); 1959 ret = send_fin(cm_node, NULL);
1765 1960
1766 if (ret < 0) { 1961 if (cm_node->accept_pend) {
1767 printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n"); 1962 BUG_ON(!cm_node->listener);
1768 return ret; 1963 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1964 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1769 } 1965 }
1770 1966
1967 ret = send_reset(cm_node, NULL);
1771 return ret; 1968 return ret;
1772} 1969}
1773 1970
@@ -1783,35 +1980,39 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
1783 return -EINVAL; 1980 return -EINVAL;
1784 1981
1785 switch (cm_node->state) { 1982 switch (cm_node->state) {
1786 /* if passed in node is null, create a reference key node for node search */ 1983 case NES_CM_STATE_SYN_RCVD:
1787 /* check if we found an owner node for this pkt */ 1984 case NES_CM_STATE_SYN_SENT:
1788 case NES_CM_STATE_SYN_RCVD: 1985 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1789 case NES_CM_STATE_SYN_SENT: 1986 case NES_CM_STATE_ESTABLISHED:
1790 case NES_CM_STATE_ONE_SIDE_ESTABLISHED: 1987 case NES_CM_STATE_ACCEPTING:
1791 case NES_CM_STATE_ESTABLISHED: 1988 case NES_CM_STATE_MPAREQ_SENT:
1792 case NES_CM_STATE_ACCEPTING: 1989 case NES_CM_STATE_MPAREQ_RCVD:
1793 case NES_CM_STATE_MPAREQ_SENT: 1990 cleanup_retrans_entry(cm_node);
1794 cm_node->state = NES_CM_STATE_FIN_WAIT1; 1991 send_reset(cm_node, NULL);
1795 send_fin(cm_node, NULL); 1992 break;
1796 break; 1993 case NES_CM_STATE_CLOSE_WAIT:
1797 case NES_CM_STATE_CLOSE_WAIT: 1994 cm_node->state = NES_CM_STATE_LAST_ACK;
1798 cm_node->state = NES_CM_STATE_LAST_ACK; 1995 send_fin(cm_node, NULL);
1799 send_fin(cm_node, NULL); 1996 break;
1800 break; 1997 case NES_CM_STATE_FIN_WAIT1:
1801 case NES_CM_STATE_FIN_WAIT1: 1998 case NES_CM_STATE_FIN_WAIT2:
1802 case NES_CM_STATE_FIN_WAIT2: 1999 case NES_CM_STATE_LAST_ACK:
1803 case NES_CM_STATE_LAST_ACK: 2000 case NES_CM_STATE_TIME_WAIT:
1804 case NES_CM_STATE_TIME_WAIT: 2001 case NES_CM_STATE_CLOSING:
1805 case NES_CM_STATE_CLOSING: 2002 ret = -1;
1806 ret = -1; 2003 break;
1807 break; 2004 case NES_CM_STATE_LISTENING:
1808 case NES_CM_STATE_LISTENING: 2005 case NES_CM_STATE_UNKNOWN:
1809 case NES_CM_STATE_UNKNOWN: 2006 case NES_CM_STATE_INITED:
1810 case NES_CM_STATE_INITED: 2007 case NES_CM_STATE_CLOSED:
1811 case NES_CM_STATE_CLOSED: 2008 ret = rem_ref_cm_node(cm_core, cm_node);
1812 case NES_CM_STATE_TSA: 2009 break;
1813 ret = rem_ref_cm_node(cm_core, cm_node); 2010 case NES_CM_STATE_TSA:
1814 break; 2011 if (cm_node->send_entry)
2012 printk(KERN_ERR "ERROR Close got called from STATE_TSA "
2013 "send_entry=%p\n", cm_node->send_entry);
2014 ret = rem_ref_cm_node(cm_core, cm_node);
2015 break;
1815 } 2016 }
1816 cm_node->cm_id = NULL; 2017 cm_node->cm_id = NULL;
1817 return ret; 2018 return ret;
@@ -1822,25 +2023,30 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
1822 * recv_pkt - recv an ETHERNET packet, and process it through CM 2023 * recv_pkt - recv an ETHERNET packet, and process it through CM
1823 * node state machine 2024 * node state machine
1824 */ 2025 */
1825static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, 2026static void mini_cm_recv_pkt(struct nes_cm_core *cm_core,
1826 struct sk_buff *skb) 2027 struct nes_vnic *nesvnic, struct sk_buff *skb)
1827{ 2028{
1828 struct nes_cm_node *cm_node = NULL; 2029 struct nes_cm_node *cm_node = NULL;
1829 struct nes_cm_listener *listener = NULL; 2030 struct nes_cm_listener *listener = NULL;
1830 struct iphdr *iph; 2031 struct iphdr *iph;
1831 struct tcphdr *tcph; 2032 struct tcphdr *tcph;
1832 struct nes_cm_info nfo; 2033 struct nes_cm_info nfo;
1833 int ret = 0;
1834 2034
1835 if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) { 2035 if (!skb)
1836 ret = -EINVAL; 2036 return;
1837 goto out; 2037 if (skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
2038 dev_kfree_skb_any(skb);
2039 return;
1838 } 2040 }
1839 2041
1840 iph = (struct iphdr *)skb->data; 2042 iph = (struct iphdr *)skb->data;
1841 tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr)); 2043 tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
1842 skb_reset_network_header(skb); 2044 skb_reset_network_header(skb);
1843 skb_set_transport_header(skb, sizeof(*tcph)); 2045 skb_set_transport_header(skb, sizeof(*tcph));
2046 if (!tcph) {
2047 dev_kfree_skb_any(skb);
2048 return;
2049 }
1844 skb->len = ntohs(iph->tot_len); 2050 skb->len = ntohs(iph->tot_len);
1845 2051
1846 nfo.loc_addr = ntohl(iph->daddr); 2052 nfo.loc_addr = ntohl(iph->daddr);
@@ -1853,61 +2059,60 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
1853 NIPQUAD(iph->daddr), tcph->dest, 2059 NIPQUAD(iph->daddr), tcph->dest,
1854 NIPQUAD(iph->saddr), tcph->source); 2060 NIPQUAD(iph->saddr), tcph->source);
1855 2061
1856 /* note: this call is going to increment cm_node ref count */ 2062 do {
1857 cm_node = find_node(cm_core, 2063 cm_node = find_node(cm_core,
1858 nfo.rem_port, nfo.rem_addr, 2064 nfo.rem_port, nfo.rem_addr,
1859 nfo.loc_port, nfo.loc_addr); 2065 nfo.loc_port, nfo.loc_addr);
1860 2066
1861 if (!cm_node) {
1862 listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
1863 NES_CM_LISTENER_ACTIVE_STATE);
1864 if (listener) {
1865 nfo.cm_id = listener->cm_id;
1866 nfo.conn_type = listener->conn_type;
1867 } else {
1868 nfo.cm_id = NULL;
1869 nfo.conn_type = 0;
1870 }
1871
1872 cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
1873 if (!cm_node) { 2067 if (!cm_node) {
1874 nes_debug(NES_DBG_CM, "Unable to allocate node\n"); 2068 /* Only type of packet accepted are for */
2069 /* the PASSIVE open (syn only) */
2070 if ((!tcph->syn) || (tcph->ack)) {
2071 cm_packets_dropped++;
2072 break;
2073 }
2074 listener = find_listener(cm_core, nfo.loc_addr,
2075 nfo.loc_port,
2076 NES_CM_LISTENER_ACTIVE_STATE);
1875 if (listener) { 2077 if (listener) {
1876 nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n"); 2078 nfo.cm_id = listener->cm_id;
2079 nfo.conn_type = listener->conn_type;
2080 } else {
2081 nes_debug(NES_DBG_CM, "Unable to find listener "
2082 "for the pkt\n");
2083 cm_packets_dropped++;
2084 dev_kfree_skb_any(skb);
2085 break;
2086 }
2087
2088 cm_node = make_cm_node(cm_core, nesvnic, &nfo,
2089 listener);
2090 if (!cm_node) {
2091 nes_debug(NES_DBG_CM, "Unable to allocate "
2092 "node\n");
2093 cm_packets_dropped++;
1877 atomic_dec(&listener->ref_count); 2094 atomic_dec(&listener->ref_count);
2095 dev_kfree_skb_any(skb);
2096 break;
1878 } 2097 }
1879 ret = -1; 2098 if (!tcph->rst && !tcph->fin) {
1880 goto out; 2099 cm_node->state = NES_CM_STATE_LISTENING;
1881 } 2100 } else {
1882 if (!listener) { 2101 cm_packets_dropped++;
1883 nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n", 2102 rem_ref_cm_node(cm_core, cm_node);
1884 nfo.loc_port, atomic_read(&cm_node->ref_count)); 2103 dev_kfree_skb_any(skb);
1885 if (!tcph->rst) { 2104 break;
1886 nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
1887 " rem_port=%d refcnt=%d\n",
1888 nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
1889
1890 cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
1891 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1892 send_reset(cm_node);
1893 } 2105 }
2106 add_ref_cm_node(cm_node);
2107 } else if (cm_node->state == NES_CM_STATE_TSA) {
1894 rem_ref_cm_node(cm_core, cm_node); 2108 rem_ref_cm_node(cm_core, cm_node);
1895 ret = -1; 2109 atomic_inc(&cm_accel_dropped_pkts);
1896 goto out; 2110 dev_kfree_skb_any(skb);
2111 break;
1897 } 2112 }
1898 add_ref_cm_node(cm_node); 2113 process_packet(cm_node, skb, cm_core);
1899 cm_node->state = NES_CM_STATE_LISTENING; 2114 rem_ref_cm_node(cm_core, cm_node);
1900 } 2115 } while (0);
1901
1902 nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
1903 cm_node, skb->data);
1904 process_packet(cm_node, skb, cm_core);
1905
1906 rem_ref_cm_node(cm_core, cm_node);
1907 out:
1908 if (skb)
1909 dev_kfree_skb_any(skb);
1910 return ret;
1911} 2116}
1912 2117
1913 2118
@@ -2107,15 +2312,12 @@ int nes_cm_disconn(struct nes_qp *nesqp)
2107 if (nesqp->disconn_pending == 0) { 2312 if (nesqp->disconn_pending == 0) {
2108 nesqp->disconn_pending++; 2313 nesqp->disconn_pending++;
2109 spin_unlock_irqrestore(&nesqp->lock, flags); 2314 spin_unlock_irqrestore(&nesqp->lock, flags);
2110 /* nes_add_ref(&nesqp->ibqp); */
2111 /* init our disconnect work element, to */ 2315 /* init our disconnect work element, to */
2112 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker); 2316 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
2113 2317
2114 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work); 2318 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
2115 } else { 2319 } else
2116 spin_unlock_irqrestore(&nesqp->lock, flags); 2320 spin_unlock_irqrestore(&nesqp->lock, flags);
2117 nes_rem_ref(&nesqp->ibqp);
2118 }
2119 2321
2120 return 0; 2322 return 0;
2121} 2323}
@@ -2161,7 +2363,6 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2161 nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n", 2363 nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
2162 nesqp->hwqp.qp_id); 2364 nesqp->hwqp.qp_id);
2163 spin_unlock_irqrestore(&nesqp->lock, flags); 2365 spin_unlock_irqrestore(&nesqp->lock, flags);
2164 nes_rem_ref(&nesqp->ibqp);
2165 return -1; 2366 return -1;
2166 } 2367 }
2167 2368
@@ -2182,30 +2383,31 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2182 atomic_inc(&cm_disconnects); 2383 atomic_inc(&cm_disconnects);
2183 cm_event.event = IW_CM_EVENT_DISCONNECT; 2384 cm_event.event = IW_CM_EVENT_DISCONNECT;
2184 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) { 2385 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
2185 issued_disconnect_reset = 1;
2186 cm_event.status = IW_CM_EVENT_STATUS_RESET; 2386 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2187 nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for " 2387 nes_debug(NES_DBG_CM, "Generating a CM "
2188 " QP%u, cm_id = %p. \n", 2388 "Disconnect Event (status reset) for "
2189 nesqp->hwqp.qp_id, cm_id); 2389 "QP%u, cm_id = %p. \n",
2190 } else { 2390 nesqp->hwqp.qp_id, cm_id);
2391 } else
2191 cm_event.status = IW_CM_EVENT_STATUS_OK; 2392 cm_event.status = IW_CM_EVENT_STATUS_OK;
2192 }
2193 2393
2194 cm_event.local_addr = cm_id->local_addr; 2394 cm_event.local_addr = cm_id->local_addr;
2195 cm_event.remote_addr = cm_id->remote_addr; 2395 cm_event.remote_addr = cm_id->remote_addr;
2196 cm_event.private_data = NULL; 2396 cm_event.private_data = NULL;
2197 cm_event.private_data_len = 0; 2397 cm_event.private_data_len = 0;
2198 2398
2199 nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for " 2399 nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event"
2200 " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n", 2400 " for QP%u, SQ Head = %u, SQ Tail = %u. "
2201 nesqp->hwqp.qp_id, 2401 "cm_id = %p, refcount = %u.\n",
2202 nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id, 2402 nesqp->hwqp.qp_id, nesqp->hwqp.sq_head,
2203 atomic_read(&nesqp->refcount)); 2403 nesqp->hwqp.sq_tail, cm_id,
2404 atomic_read(&nesqp->refcount));
2204 2405
2205 spin_unlock_irqrestore(&nesqp->lock, flags); 2406 spin_unlock_irqrestore(&nesqp->lock, flags);
2206 ret = cm_id->event_handler(cm_id, &cm_event); 2407 ret = cm_id->event_handler(cm_id, &cm_event);
2207 if (ret) 2408 if (ret)
2208 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); 2409 nes_debug(NES_DBG_CM, "OFA CM event_handler "
2410 "returned, ret=%d\n", ret);
2209 spin_lock_irqsave(&nesqp->lock, flags); 2411 spin_lock_irqsave(&nesqp->lock, flags);
2210 } 2412 }
2211 2413
@@ -2247,31 +2449,24 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2247 if (nesqp->flush_issued == 0) { 2449 if (nesqp->flush_issued == 0) {
2248 nesqp->flush_issued = 1; 2450 nesqp->flush_issued = 1;
2249 spin_unlock_irqrestore(&nesqp->lock, flags); 2451 spin_unlock_irqrestore(&nesqp->lock, flags);
2250 flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1); 2452 flush_wqes(nesvnic->nesdev, nesqp,
2251 } else { 2453 NES_CQP_FLUSH_RQ, 1);
2454 } else
2252 spin_unlock_irqrestore(&nesqp->lock, flags); 2455 spin_unlock_irqrestore(&nesqp->lock, flags);
2253 }
2254
2255 /* This reference is from either ModifyQP or the AE processing,
2256 there is still a race here with modifyqp */
2257 nes_rem_ref(&nesqp->ibqp);
2258
2259 } else { 2456 } else {
2260 cm_id = nesqp->cm_id; 2457 cm_id = nesqp->cm_id;
2261 spin_unlock_irqrestore(&nesqp->lock, flags); 2458 spin_unlock_irqrestore(&nesqp->lock, flags);
2262 /* check to see if the inbound reset beat the outbound reset */ 2459 /* check to see if the inbound reset beat the outbound reset */
2263 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) { 2460 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
2264 nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset" 2461 nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
2265 " beating the outbound reset.\n", 2462 "due to inbound reset beating the "
2266 nesqp->hwqp.qp_id); 2463 "outbound reset.\n", nesqp->hwqp.qp_id);
2267 nes_rem_ref(&nesqp->ibqp);
2268 } 2464 }
2269 } 2465 }
2270 } else { 2466 } else {
2271 nesqp->disconn_pending = 0; 2467 nesqp->disconn_pending = 0;
2272 spin_unlock_irqrestore(&nesqp->lock, flags); 2468 spin_unlock_irqrestore(&nesqp->lock, flags);
2273 } 2469 }
2274 nes_rem_ref(&nesqp->ibqp);
2275 2470
2276 return 0; 2471 return 0;
2277} 2472}
@@ -2349,71 +2544,82 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2349 nesdev = nesvnic->nesdev; 2544 nesdev = nesvnic->nesdev;
2350 adapter = nesdev->nesadapter; 2545 adapter = nesdev->nesadapter;
2351 2546
2352 nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
2353 nesvnic, nesvnic->netdev, nesvnic->netdev->name);
2354
2355 /* since this is from a listen, we were able to put node handle into cm_id */
2356 cm_node = (struct nes_cm_node *)cm_id->provider_data; 2547 cm_node = (struct nes_cm_node *)cm_id->provider_data;
2548 nes_debug(NES_DBG_CM, "nes_accept: cm_node= %p nesvnic=%p, netdev=%p,"
2549 "%s\n", cm_node, nesvnic, nesvnic->netdev,
2550 nesvnic->netdev->name);
2357 2551
2358 /* associate the node with the QP */ 2552 /* associate the node with the QP */
2359 nesqp->cm_node = (void *)cm_node; 2553 nesqp->cm_node = (void *)cm_node;
2554 cm_node->nesqp = nesqp;
2555 nes_add_ref(&nesqp->ibqp);
2360 2556
2361 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n", 2557 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
2362 nesqp->hwqp.qp_id, cm_node, jiffies); 2558 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
2363 atomic_inc(&cm_accepts); 2559 atomic_inc(&cm_accepts);
2364 2560
2365 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n", 2561 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2366 atomic_read(&nesvnic->netdev->refcnt)); 2562 atomic_read(&nesvnic->netdev->refcnt));
2367 2563
2368 /* allocate the ietf frame and space for private data */ 2564 /* allocate the ietf frame and space for private data */
2369 nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev, 2565 nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
2370 sizeof(struct ietf_mpa_frame) + conn_param->private_data_len, 2566 sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
2371 &nesqp->ietf_frame_pbase); 2567 &nesqp->ietf_frame_pbase);
2372
2373 if (!nesqp->ietf_frame) {
2374 nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
2375 return -ENOMEM;
2376 }
2377 2568
2569 if (!nesqp->ietf_frame) {
2570 nes_debug(NES_DBG_CM, "Unable to allocate memory for private "
2571 "data\n");
2572 return -ENOMEM;
2573 }
2378 2574
2379 /* setup the MPA frame */
2380 nesqp->private_data_len = conn_param->private_data_len;
2381 memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
2382 2575
2383 memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data, 2576 /* setup the MPA frame */
2384 conn_param->private_data_len); 2577 nesqp->private_data_len = conn_param->private_data_len;
2578 memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
2385 2579
2386 nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len); 2580 memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
2387 nesqp->ietf_frame->rev = mpa_version; 2581 conn_param->private_data_len);
2388 nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
2389 2582
2390 /* setup our first outgoing iWarp send WQE (the IETF frame response) */ 2583 nesqp->ietf_frame->priv_data_len =
2391 wqe = &nesqp->hwqp.sq_vbase[0]; 2584 cpu_to_be16(conn_param->private_data_len);
2585 nesqp->ietf_frame->rev = mpa_version;
2586 nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
2392 2587
2393 if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) { 2588 /* setup our first outgoing iWarp send WQE (the IETF frame response) */
2394 u64temp = (unsigned long)nesqp; 2589 wqe = &nesqp->hwqp.sq_vbase[0];
2395 u64temp |= NES_SW_CONTEXT_ALIGN>>1; 2590
2396 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, 2591 if (cm_id->remote_addr.sin_addr.s_addr !=
2397 u64temp); 2592 cm_id->local_addr.sin_addr.s_addr) {
2398 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = 2593 u64temp = (unsigned long)nesqp;
2399 cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU); 2594 u64temp |= NES_SW_CONTEXT_ALIGN>>1;
2400 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 2595 set_wqe_64bit_value(wqe->wqe_words,
2401 cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); 2596 NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
2402 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 2597 u64temp);
2403 cpu_to_le32((u32)nesqp->ietf_frame_pbase); 2598 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
2404 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 2599 cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING |
2405 cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32)); 2600 NES_IWARP_SQ_WQE_WRPDU);
2406 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 2601 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
2407 cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); 2602 cpu_to_le32(conn_param->private_data_len +
2408 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; 2603 sizeof(struct ietf_mpa_frame));
2409 2604 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
2410 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( 2605 cpu_to_le32((u32)nesqp->ietf_frame_pbase);
2411 NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU); 2606 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
2412 } else { 2607 cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
2413 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 2608 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
2414 NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM)); 2609 cpu_to_le32(conn_param->private_data_len +
2415 } 2610 sizeof(struct ietf_mpa_frame));
2416 nesqp->skip_lsmm = 1; 2611 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
2612
2613 nesqp->nesqp_context->ird_ord_sizes |=
2614 cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
2615 NES_QPCONTEXT_ORDIRD_WRPDU);
2616 } else {
2617 nesqp->nesqp_context->ird_ord_sizes |=
2618 cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
2619 NES_QPCONTEXT_ORDIRD_WRPDU |
2620 NES_QPCONTEXT_ORDIRD_ALSMM));
2621 }
2622 nesqp->skip_lsmm = 1;
2417 2623
2418 2624
2419 /* Cache the cm_id in the qp */ 2625 /* Cache the cm_id in the qp */
@@ -2424,55 +2630,75 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2424 cm_id->provider_data = nesqp; 2630 cm_id->provider_data = nesqp;
2425 nesqp->active_conn = 0; 2631 nesqp->active_conn = 0;
2426 2632
2633 if (cm_node->state == NES_CM_STATE_TSA)
2634 nes_debug(NES_DBG_CM, "Already state = TSA for cm_node=%p\n",
2635 cm_node);
2636
2427 nes_cm_init_tsa_conn(nesqp, cm_node); 2637 nes_cm_init_tsa_conn(nesqp, cm_node);
2428 2638
2429 nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); 2639 nesqp->nesqp_context->tcpPorts[0] =
2430 nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); 2640 cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
2431 nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); 2641 nesqp->nesqp_context->tcpPorts[1] =
2642 cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
2643
2644 if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
2645 nesqp->nesqp_context->ip0 =
2646 cpu_to_le32(ntohl(nesvnic->local_ipaddr));
2647 else
2648 nesqp->nesqp_context->ip0 =
2649 cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
2432 2650
2433 nesqp->nesqp_context->misc2 |= cpu_to_le32( 2651 nesqp->nesqp_context->misc2 |= cpu_to_le32(
2434 (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); 2652 (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
2653 NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
2435 2654
2436 nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32( 2655 nesqp->nesqp_context->arp_index_vlan |=
2437 nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL, 2656 cpu_to_le32(nes_arp_table(nesdev,
2657 le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
2438 NES_ARP_RESOLVE) << 16); 2658 NES_ARP_RESOLVE) << 16);
2439 2659
2440 nesqp->nesqp_context->ts_val_delta = cpu_to_le32( 2660 nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
2441 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); 2661 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
2442 2662
2443 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); 2663 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
2444 2664
2445 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( 2665 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
2446 ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); 2666 ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
2447 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); 2667 nesqp->nesqp_context->ird_ord_sizes |=
2668 cpu_to_le32((u32)conn_param->ord);
2448 2669
2449 memset(&nes_quad, 0, sizeof(nes_quad)); 2670 memset(&nes_quad, 0, sizeof(nes_quad));
2450 nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); 2671 nes_quad.DstIpAdrIndex =
2451 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; 2672 cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
2452 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; 2673 if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
2453 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; 2674 nes_quad.SrcIpadr = nesvnic->local_ipaddr;
2675 else
2676 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
2677 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
2678 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2454 2679
2455 /* Produce hash key */ 2680 /* Produce hash key */
2456 crc_value = get_crc_value(&nes_quad); 2681 crc_value = get_crc_value(&nes_quad);
2457 nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff); 2682 nesqp->hte_index = cpu_to_be32(crc_value ^ 0xffffffff);
2458 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n", 2683 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
2459 nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask); 2684 nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
2460 2685
2461 nesqp->hte_index &= adapter->hte_index_mask; 2686 nesqp->hte_index &= adapter->hte_index_mask;
2462 nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index); 2687 nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
2463 2688
2464 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); 2689 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
2465 2690
2466 nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X," 2691 nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
2467 " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n", 2692 "0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
2468 nesqp->hwqp.qp_id, 2693 "private data length=%zu.\n", nesqp->hwqp.qp_id,
2469 ntohl(cm_id->remote_addr.sin_addr.s_addr), 2694 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2470 ntohs(cm_id->remote_addr.sin_port), 2695 ntohs(cm_id->remote_addr.sin_port),
2471 ntohl(cm_id->local_addr.sin_addr.s_addr), 2696 ntohl(cm_id->local_addr.sin_addr.s_addr),
2472 ntohs(cm_id->local_addr.sin_port), 2697 ntohs(cm_id->local_addr.sin_port),
2473 le32_to_cpu(nesqp->nesqp_context->rcv_nxt), 2698 le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
2474 le32_to_cpu(nesqp->nesqp_context->snd_nxt), 2699 le32_to_cpu(nesqp->nesqp_context->snd_nxt),
2475 conn_param->private_data_len+sizeof(struct ietf_mpa_frame)); 2700 conn_param->private_data_len +
2701 sizeof(struct ietf_mpa_frame));
2476 2702
2477 attr.qp_state = IB_QPS_RTS; 2703 attr.qp_state = IB_QPS_RTS;
2478 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); 2704 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
@@ -2489,15 +2715,16 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2489 cm_event.private_data_len = 0; 2715 cm_event.private_data_len = 0;
2490 ret = cm_id->event_handler(cm_id, &cm_event); 2716 ret = cm_id->event_handler(cm_id, &cm_event);
2491 if (cm_node->loopbackpartner) { 2717 if (cm_node->loopbackpartner) {
2492 cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len; 2718 cm_node->loopbackpartner->mpa_frame_size =
2719 nesqp->private_data_len;
2493 /* copy entire MPA frame to our cm_node's frame */ 2720 /* copy entire MPA frame to our cm_node's frame */
2494 memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data, 2721 memcpy(cm_node->loopbackpartner->mpa_frame_buf,
2495 nesqp->private_data_len); 2722 nesqp->ietf_frame->priv_data, nesqp->private_data_len);
2496 create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED); 2723 create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
2497 } 2724 }
2498 if (ret) 2725 if (ret)
2499 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 2726 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2500 __func__, __LINE__, ret); 2727 "ret=%d\n", __func__, __LINE__, ret);
2501 2728
2502 return 0; 2729 return 0;
2503} 2730}
@@ -2555,74 +2782,61 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2555 if (!nesdev) 2782 if (!nesdev)
2556 return -EINVAL; 2783 return -EINVAL;
2557 2784
2558 atomic_inc(&cm_connects); 2785 nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
2559 2786 "0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
2560 nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) + 2787 ntohl(nesvnic->local_ipaddr),
2561 conn_param->private_data_len, GFP_KERNEL); 2788 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2562 if (!nesqp->ietf_frame) 2789 ntohs(cm_id->remote_addr.sin_port),
2563 return -ENOMEM; 2790 ntohl(cm_id->local_addr.sin_addr.s_addr),
2791 ntohs(cm_id->local_addr.sin_port));
2564 2792
2565 /* set qp as having an active connection */ 2793 atomic_inc(&cm_connects);
2566 nesqp->active_conn = 1; 2794 nesqp->active_conn = 1;
2567 2795
2568 nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
2569 nesqp->hwqp.qp_id,
2570 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2571 ntohs(cm_id->remote_addr.sin_port),
2572 ntohl(cm_id->local_addr.sin_addr.s_addr),
2573 ntohs(cm_id->local_addr.sin_port));
2574
2575 /* cache the cm_id in the qp */ 2796 /* cache the cm_id in the qp */
2576 nesqp->cm_id = cm_id; 2797 nesqp->cm_id = cm_id;
2577 2798
2578 cm_id->provider_data = nesqp; 2799 cm_id->provider_data = nesqp;
2579 2800
2580 /* copy the private data */
2581 if (conn_param->private_data_len) {
2582 memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
2583 conn_param->private_data_len);
2584 }
2585
2586 nesqp->private_data_len = conn_param->private_data_len; 2801 nesqp->private_data_len = conn_param->private_data_len;
2587 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); 2802 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
2588 nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); 2803 nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
2589 nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len); 2804 nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
2590 2805 conn_param->private_data_len);
2591 strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
2592 nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
2593 nesqp->ietf_frame->rev = IETF_MPA_VERSION;
2594 nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
2595 2806
2596 if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr) 2807 if (cm_id->local_addr.sin_addr.s_addr !=
2808 cm_id->remote_addr.sin_addr.s_addr)
2597 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), 2809 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
2598 PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); 2810 PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
2599 2811
2600 /* set up the connection params for the node */ 2812 /* set up the connection params for the node */
2601 cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr); 2813 cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr);
2602 cm_info.loc_port = (cm_id->local_addr.sin_port); 2814 cm_info.loc_port = htons(cm_id->local_addr.sin_port);
2603 cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr); 2815 cm_info.rem_addr = htonl(cm_id->remote_addr.sin_addr.s_addr);
2604 cm_info.rem_port = (cm_id->remote_addr.sin_port); 2816 cm_info.rem_port = htons(cm_id->remote_addr.sin_port);
2605 cm_info.cm_id = cm_id; 2817 cm_info.cm_id = cm_id;
2606 cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; 2818 cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
2607 2819
2608 cm_id->add_ref(cm_id); 2820 cm_id->add_ref(cm_id);
2609 nes_add_ref(&nesqp->ibqp);
2610 2821
2611 /* create a connect CM node connection */ 2822 /* create a connect CM node connection */
2612 cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info); 2823 cm_node = g_cm_core->api->connect(g_cm_core, nesvnic,
2824 conn_param->private_data_len, (void *)conn_param->private_data,
2825 &cm_info);
2613 if (!cm_node) { 2826 if (!cm_node) {
2614 if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr) 2827 if (cm_id->local_addr.sin_addr.s_addr !=
2828 cm_id->remote_addr.sin_addr.s_addr)
2615 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), 2829 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
2616 PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); 2830 PCI_FUNC(nesdev->pcidev->devfn),
2617 nes_rem_ref(&nesqp->ibqp); 2831 NES_MANAGE_APBVT_DEL);
2618 kfree(nesqp->ietf_frame); 2832
2619 nesqp->ietf_frame = NULL;
2620 cm_id->rem_ref(cm_id); 2833 cm_id->rem_ref(cm_id);
2621 return -ENOMEM; 2834 return -ENOMEM;
2622 } 2835 }
2623 2836
2624 cm_node->apbvt_set = 1; 2837 cm_node->apbvt_set = 1;
2625 nesqp->cm_node = cm_node; 2838 nesqp->cm_node = cm_node;
2839 cm_node->nesqp = nesqp;
2626 2840
2627 return 0; 2841 return 0;
2628} 2842}
@@ -2664,7 +2878,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
2664 2878
2665 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); 2879 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
2666 if (!cm_node) { 2880 if (!cm_node) {
2667 printk("%s[%u] Error returned from listen API call\n", 2881 printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
2668 __func__, __LINE__); 2882 __func__, __LINE__);
2669 return -ENOMEM; 2883 return -ENOMEM;
2670 } 2884 }
@@ -2672,10 +2886,13 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
2672 cm_id->provider_data = cm_node; 2886 cm_id->provider_data = cm_node;
2673 2887
2674 if (!cm_node->reused_node) { 2888 if (!cm_node->reused_node) {
2675 err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port), 2889 err = nes_manage_apbvt(nesvnic,
2676 PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); 2890 ntohs(cm_id->local_addr.sin_port),
2891 PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
2892 NES_MANAGE_APBVT_ADD);
2677 if (err) { 2893 if (err) {
2678 printk("nes_manage_apbvt call returned %d.\n", err); 2894 printk(KERN_ERR "nes_manage_apbvt call returned %d.\n",
2895 err);
2679 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); 2896 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
2680 return err; 2897 return err;
2681 } 2898 }
@@ -2795,53 +3012,70 @@ static void cm_event_connected(struct nes_cm_event *event)
2795 nes_cm_init_tsa_conn(nesqp, cm_node); 3012 nes_cm_init_tsa_conn(nesqp, cm_node);
2796 3013
2797 /* set the QP tsa context */ 3014 /* set the QP tsa context */
2798 nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port)); 3015 nesqp->nesqp_context->tcpPorts[0] =
2799 nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port)); 3016 cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
2800 nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr)); 3017 nesqp->nesqp_context->tcpPorts[1] =
3018 cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
3019 if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
3020 nesqp->nesqp_context->ip0 =
3021 cpu_to_le32(ntohl(nesvnic->local_ipaddr));
3022 else
3023 nesqp->nesqp_context->ip0 =
3024 cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
2801 3025
2802 nesqp->nesqp_context->misc2 |= cpu_to_le32( 3026 nesqp->nesqp_context->misc2 |= cpu_to_le32(
2803 (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT); 3027 (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
3028 NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
2804 nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32( 3029 nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
2805 nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), 3030 nes_arp_table(nesdev,
3031 le32_to_cpu(nesqp->nesqp_context->ip0),
2806 NULL, NES_ARP_RESOLVE) << 16); 3032 NULL, NES_ARP_RESOLVE) << 16);
2807 nesqp->nesqp_context->ts_val_delta = cpu_to_le32( 3033 nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
2808 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW)); 3034 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
2809 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id); 3035 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
2810 nesqp->nesqp_context->ird_ord_sizes |= 3036 nesqp->nesqp_context->ird_ord_sizes |=
2811 cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); 3037 cpu_to_le32((u32)1 <<
3038 NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
2812 3039
2813 /* Adjust tail for not having a LSMM */ 3040 /* Adjust tail for not having a LSMM */
2814 nesqp->hwqp.sq_tail = 1; 3041 nesqp->hwqp.sq_tail = 1;
2815 3042
2816#if defined(NES_SEND_FIRST_WRITE) 3043#if defined(NES_SEND_FIRST_WRITE)
2817 if (cm_node->send_write0) { 3044 if (cm_node->send_write0) {
2818 nes_debug(NES_DBG_CM, "Sending first write.\n"); 3045 nes_debug(NES_DBG_CM, "Sending first write.\n");
2819 wqe = &nesqp->hwqp.sq_vbase[0]; 3046 wqe = &nesqp->hwqp.sq_vbase[0];
2820 u64temp = (unsigned long)nesqp; 3047 u64temp = (unsigned long)nesqp;
2821 u64temp |= NES_SW_CONTEXT_ALIGN>>1; 3048 u64temp |= NES_SW_CONTEXT_ALIGN>>1;
2822 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, 3049 set_wqe_64bit_value(wqe->wqe_words,
2823 u64temp); 3050 NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp);
2824 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW); 3051 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
2825 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0; 3052 cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
2826 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0; 3053 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
2827 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0; 3054 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
2828 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0; 3055 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
2829 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0; 3056 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
2830 3057 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
2831 /* use the reserved spot on the WQ for the extra first WQE */ 3058
2832 nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | 3059 /* use the reserved spot on the WQ for the extra first WQE */
2833 NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM)); 3060 nesqp->nesqp_context->ird_ord_sizes &=
2834 nesqp->skip_lsmm = 1; 3061 cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
2835 nesqp->hwqp.sq_tail = 0; 3062 NES_QPCONTEXT_ORDIRD_WRPDU |
2836 nes_write32(nesdev->regs + NES_WQE_ALLOC, 3063 NES_QPCONTEXT_ORDIRD_ALSMM));
2837 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); 3064 nesqp->skip_lsmm = 1;
2838 } 3065 nesqp->hwqp.sq_tail = 0;
3066 nes_write32(nesdev->regs + NES_WQE_ALLOC,
3067 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
3068 }
2839#endif 3069#endif
2840 3070
2841 memset(&nes_quad, 0, sizeof(nes_quad)); 3071 memset(&nes_quad, 0, sizeof(nes_quad));
2842 3072
2843 nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); 3073 nes_quad.DstIpAdrIndex =
2844 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr; 3074 cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
3075 if (ipv4_is_loopback(cm_id->remote_addr.sin_addr.s_addr))
3076 nes_quad.SrcIpadr = nesvnic->local_ipaddr;
3077 else
3078 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
2845 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port; 3079 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
2846 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port; 3080 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2847 3081
@@ -2858,10 +3092,6 @@ static void cm_event_connected(struct nes_cm_event *event)
2858 nesqp->private_data_len = (u8) cm_node->mpa_frame_size; 3092 nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
2859 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node); 3093 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
2860 3094
2861 /* modify QP state to rts */
2862 attr.qp_state = IB_QPS_RTS;
2863 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
2864
2865 /* notify OF layer we successfully created the requested connection */ 3095 /* notify OF layer we successfully created the requested connection */
2866 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 3096 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
2867 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; 3097 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
@@ -2870,20 +3100,21 @@ static void cm_event_connected(struct nes_cm_event *event)
2870 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; 3100 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
2871 cm_event.remote_addr = cm_id->remote_addr; 3101 cm_event.remote_addr = cm_id->remote_addr;
2872 3102
2873 cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; 3103 cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
2874 cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size; 3104 cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
2875 3105
2876 cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr; 3106 cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
2877 ret = cm_id->event_handler(cm_id, &cm_event); 3107 ret = cm_id->event_handler(cm_id, &cm_event);
2878 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); 3108 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2879 3109
2880 if (ret) 3110 if (ret)
2881 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3111 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2882 __func__, __LINE__, ret); 3112 "ret=%d\n", __func__, __LINE__, ret);
2883 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n", 3113 attr.qp_state = IB_QPS_RTS;
2884 nesqp->hwqp.qp_id, jiffies ); 3114 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
2885 3115
2886 nes_rem_ref(&nesqp->ibqp); 3116 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = "
3117 "%lu\n", nesqp->hwqp.qp_id, jiffies);
2887 3118
2888 return; 3119 return;
2889} 3120}
@@ -2927,17 +3158,19 @@ static void cm_event_connect_error(struct nes_cm_event *event)
2927 cm_event.private_data = NULL; 3158 cm_event.private_data = NULL;
2928 cm_event.private_data_len = 0; 3159 cm_event.private_data_len = 0;
2929 3160
2930 nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n", 3161 nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, "
2931 cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr); 3162 "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
3163 cm_event.remote_addr.sin_addr.s_addr);
2932 3164
2933 ret = cm_id->event_handler(cm_id, &cm_event); 3165 ret = cm_id->event_handler(cm_id, &cm_event);
2934 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); 3166 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2935 if (ret) 3167 if (ret)
2936 printk("%s[%u] OFA CM event_handler returned, ret=%d\n", 3168 printk(KERN_ERR "%s[%u] OFA CM event_handler returned, "
2937 __func__, __LINE__, ret); 3169 "ret=%d\n", __func__, __LINE__, ret);
2938 nes_rem_ref(&nesqp->ibqp); 3170 nes_rem_ref(&nesqp->ibqp);
2939 cm_id->rem_ref(cm_id); 3171 cm_id->rem_ref(cm_id);
2940 3172
3173 rem_ref_cm_node(event->cm_node->cm_core, event->cm_node);
2941 return; 3174 return;
2942} 3175}
2943 3176
@@ -3040,7 +3273,8 @@ static int nes_cm_post_event(struct nes_cm_event *event)
3040 add_ref_cm_node(event->cm_node); 3273 add_ref_cm_node(event->cm_node);
3041 event->cm_info.cm_id->add_ref(event->cm_info.cm_id); 3274 event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
3042 INIT_WORK(&event->event_work, nes_cm_event_handler); 3275 INIT_WORK(&event->event_work, nes_cm_event_handler);
3043 nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event); 3276 nes_debug(NES_DBG_CM, "cm_node=%p queue_work, event=%p\n",
3277 event->cm_node, event);
3044 3278
3045 queue_work(event->cm_node->cm_core->event_wq, &event->event_work); 3279 queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
3046 3280
@@ -3056,46 +3290,48 @@ static int nes_cm_post_event(struct nes_cm_event *event)
3056 */ 3290 */
3057static void nes_cm_event_handler(struct work_struct *work) 3291static void nes_cm_event_handler(struct work_struct *work)
3058{ 3292{
3059 struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work); 3293 struct nes_cm_event *event = container_of(work, struct nes_cm_event,
3294 event_work);
3060 struct nes_cm_core *cm_core; 3295 struct nes_cm_core *cm_core;
3061 3296
3062 if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) { 3297 if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core))
3063 return; 3298 return;
3064 } 3299
3065 cm_core = event->cm_node->cm_core; 3300 cm_core = event->cm_node->cm_core;
3066 nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n", 3301 nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
3067 event, event->type, atomic_read(&cm_core->events_posted)); 3302 event, event->type, atomic_read(&cm_core->events_posted));
3068 3303
3069 switch (event->type) { 3304 switch (event->type) {
3070 case NES_CM_EVENT_MPA_REQ: 3305 case NES_CM_EVENT_MPA_REQ:
3071 cm_event_mpa_req(event); 3306 cm_event_mpa_req(event);
3072 nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n"); 3307 nes_debug(NES_DBG_CM, "cm_node=%p CM Event: MPA REQUEST\n",
3073 break; 3308 event->cm_node);
3074 case NES_CM_EVENT_RESET: 3309 break;
3075 nes_debug(NES_DBG_CM, "CM Event: RESET\n"); 3310 case NES_CM_EVENT_RESET:
3076 cm_event_reset(event); 3311 nes_debug(NES_DBG_CM, "cm_node = %p CM Event: RESET\n",
3077 break; 3312 event->cm_node);
3078 case NES_CM_EVENT_CONNECTED: 3313 cm_event_reset(event);
3079 if ((!event->cm_node->cm_id) || 3314 break;
3080 (event->cm_node->state != NES_CM_STATE_TSA)) { 3315 case NES_CM_EVENT_CONNECTED:
3081 break; 3316 if ((!event->cm_node->cm_id) ||
3082 } 3317 (event->cm_node->state != NES_CM_STATE_TSA))
3083 cm_event_connected(event);
3084 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
3085 break; 3318 break;
3086 case NES_CM_EVENT_ABORTED: 3319 cm_event_connected(event);
3087 if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) { 3320 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
3088 break; 3321 break;
3089 } 3322 case NES_CM_EVENT_ABORTED:
3090 cm_event_connect_error(event); 3323 if ((!event->cm_node->cm_id) ||
3091 nes_debug(NES_DBG_CM, "CM Event: ABORTED\n"); 3324 (event->cm_node->state == NES_CM_STATE_TSA))
3092 break;
3093 case NES_CM_EVENT_DROPPED_PKT:
3094 nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
3095 break;
3096 default:
3097 nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
3098 break; 3325 break;
3326 cm_event_connect_error(event);
3327 nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
3328 break;
3329 case NES_CM_EVENT_DROPPED_PKT:
3330 nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
3331 break;
3332 default:
3333 nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
3334 break;
3099 } 3335 }
3100 3336
3101 atomic_dec(&cm_core->events_posted); 3337 atomic_dec(&cm_core->events_posted);
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index 7717cb2ab500..367b3d290140 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -83,6 +83,8 @@ enum nes_timer_type {
83#define SET_FIN 4 83#define SET_FIN 4
84#define SET_RST 8 84#define SET_RST 8
85 85
86#define TCP_OPTIONS_PADDING 3
87
86struct option_base { 88struct option_base {
87 u8 optionnum; 89 u8 optionnum;
88 u8 length; 90 u8 length;
@@ -177,6 +179,7 @@ enum nes_cm_node_state {
177 NES_CM_STATE_ESTABLISHED, 179 NES_CM_STATE_ESTABLISHED,
178 NES_CM_STATE_ACCEPTING, 180 NES_CM_STATE_ACCEPTING,
179 NES_CM_STATE_MPAREQ_SENT, 181 NES_CM_STATE_MPAREQ_SENT,
182 NES_CM_STATE_MPAREQ_RCVD,
180 NES_CM_STATE_TSA, 183 NES_CM_STATE_TSA,
181 NES_CM_STATE_FIN_WAIT1, 184 NES_CM_STATE_FIN_WAIT1,
182 NES_CM_STATE_FIN_WAIT2, 185 NES_CM_STATE_FIN_WAIT2,
@@ -187,6 +190,16 @@ enum nes_cm_node_state {
187 NES_CM_STATE_CLOSED 190 NES_CM_STATE_CLOSED
188}; 191};
189 192
193enum nes_tcpip_pkt_type {
194 NES_PKT_TYPE_UNKNOWN,
195 NES_PKT_TYPE_SYN,
196 NES_PKT_TYPE_SYNACK,
197 NES_PKT_TYPE_ACK,
198 NES_PKT_TYPE_FIN,
199 NES_PKT_TYPE_RST
200};
201
202
190/* type of nes connection */ 203/* type of nes connection */
191enum nes_cm_conn_type { 204enum nes_cm_conn_type {
192 NES_CM_IWARP_CONN_TYPE, 205 NES_CM_IWARP_CONN_TYPE,
@@ -257,7 +270,9 @@ struct nes_cm_node {
257 struct net_device *netdev; 270 struct net_device *netdev;
258 271
259 struct nes_cm_node *loopbackpartner; 272 struct nes_cm_node *loopbackpartner;
260 struct list_head retrans_list; 273
274 struct nes_timer_entry *send_entry;
275
261 spinlock_t retrans_list_lock; 276 spinlock_t retrans_list_lock;
262 struct list_head recv_list; 277 struct list_head recv_list;
263 spinlock_t recv_list_lock; 278 spinlock_t recv_list_lock;
@@ -276,6 +291,8 @@ struct nes_cm_node {
276 struct nes_vnic *nesvnic; 291 struct nes_vnic *nesvnic;
277 int apbvt_set; 292 int apbvt_set;
278 int accept_pend; 293 int accept_pend;
294 int freed;
295 struct nes_qp *nesqp;
279}; 296};
280 297
281/* structure for client or CM to fill when making CM api calls. */ 298/* structure for client or CM to fill when making CM api calls. */
@@ -366,14 +383,14 @@ struct nes_cm_ops {
366 struct nes_cm_info *); 383 struct nes_cm_info *);
367 int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *); 384 int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
368 struct nes_cm_node * (*connect)(struct nes_cm_core *, 385 struct nes_cm_node * (*connect)(struct nes_cm_core *,
369 struct nes_vnic *, struct ietf_mpa_frame *, 386 struct nes_vnic *, u16, void *,
370 struct nes_cm_info *); 387 struct nes_cm_info *);
371 int (*close)(struct nes_cm_core *, struct nes_cm_node *); 388 int (*close)(struct nes_cm_core *, struct nes_cm_node *);
372 int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *, 389 int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
373 struct nes_cm_node *); 390 struct nes_cm_node *);
374 int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *, 391 int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
375 struct nes_cm_node *); 392 struct nes_cm_node *);
376 int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *, 393 void (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
377 struct sk_buff *); 394 struct sk_buff *);
378 int (*destroy_cm_core)(struct nes_cm_core *); 395 int (*destroy_cm_core)(struct nes_cm_core *);
379 int (*get)(struct nes_cm_core *); 396 int (*get)(struct nes_cm_core *);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 85f26d19a32b..1513d4066f1b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2814,7 +2814,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2814 nesqp = *((struct nes_qp **)&context); 2814 nesqp = *((struct nes_qp **)&context);
2815 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 2815 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
2816 nesqp->cm_id->add_ref(nesqp->cm_id); 2816 nesqp->cm_id->add_ref(nesqp->cm_id);
2817 nes_add_ref(&nesqp->ibqp);
2818 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp, 2817 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
2819 NES_TIMER_TYPE_CLOSE, 1, 0); 2818 NES_TIMER_TYPE_CLOSE, 1, 0);
2820 nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d)," 2819 nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
@@ -2838,7 +2837,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2838 if (async_event_id == NES_AEQE_AEID_RESET_SENT) { 2837 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
2839 tcp_state = NES_AEQE_TCP_STATE_CLOSED; 2838 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2840 } 2839 }
2841 nes_add_ref(&nesqp->ibqp);
2842 spin_lock_irqsave(&nesqp->lock, flags); 2840 spin_lock_irqsave(&nesqp->lock, flags);
2843 nesqp->hw_iwarp_state = iwarp_state; 2841 nesqp->hw_iwarp_state = iwarp_state;
2844 nesqp->hw_tcp_state = tcp_state; 2842 nesqp->hw_tcp_state = tcp_state;
@@ -2876,7 +2874,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2876 } 2874 }
2877 spin_unlock_irqrestore(&nesqp->lock, flags); 2875 spin_unlock_irqrestore(&nesqp->lock, flags);
2878 if (next_iwarp_state) { 2876 if (next_iwarp_state) {
2879 nes_add_ref(&nesqp->ibqp);
2880 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X," 2877 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
2881 " also added another reference\n", 2878 " also added another reference\n",
2882 nesqp->hwqp.qp_id, next_iwarp_state); 2879 nesqp->hwqp.qp_id, next_iwarp_state);
@@ -2888,7 +2885,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2888 /* FIN Received but ib state not RTS, 2885 /* FIN Received but ib state not RTS,
2889 close complete will be on its way */ 2886 close complete will be on its way */
2890 spin_unlock_irqrestore(&nesqp->lock, flags); 2887 spin_unlock_irqrestore(&nesqp->lock, flags);
2891 nes_rem_ref(&nesqp->ibqp);
2892 return; 2888 return;
2893 } 2889 }
2894 spin_unlock_irqrestore(&nesqp->lock, flags); 2890 spin_unlock_irqrestore(&nesqp->lock, flags);
@@ -2922,7 +2918,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2922 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || 2918 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2923 ((nesqp->ibqp_state == IB_QPS_RTS)&& 2919 ((nesqp->ibqp_state == IB_QPS_RTS)&&
2924 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2920 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2925 nes_add_ref(&nesqp->ibqp);
2926 nes_cm_disconn(nesqp); 2921 nes_cm_disconn(nesqp);
2927 } else { 2922 } else {
2928 nesqp->in_disconnect = 0; 2923 nesqp->in_disconnect = 0;
@@ -2931,7 +2926,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
2931 break; 2926 break;
2932 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES: 2927 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
2933 nesqp = *((struct nes_qp **)&context); 2928 nesqp = *((struct nes_qp **)&context);
2934 nes_add_ref(&nesqp->ibqp);
2935 spin_lock_irqsave(&nesqp->lock, flags); 2929 spin_lock_irqsave(&nesqp->lock, flags);
2936 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR; 2930 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
2937 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; 2931 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
@@ -3042,7 +3036,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3042 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3036 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3043 } 3037 }
3044 /* tell cm to disconnect, cm will queue work to thread */ 3038 /* tell cm to disconnect, cm will queue work to thread */
3045 nes_add_ref(&nesqp->ibqp);
3046 nes_cm_disconn(nesqp); 3039 nes_cm_disconn(nesqp);
3047 break; 3040 break;
3048 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: 3041 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
@@ -3062,7 +3055,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3062 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3055 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3063 } 3056 }
3064 /* tell cm to disconnect, cm will queue work to thread */ 3057 /* tell cm to disconnect, cm will queue work to thread */
3065 nes_add_ref(&nesqp->ibqp);
3066 nes_cm_disconn(nesqp); 3058 nes_cm_disconn(nesqp);
3067 break; 3059 break;
3068 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR: 3060 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
@@ -3082,7 +3074,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3082 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context); 3074 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
3083 } 3075 }
3084 /* tell cm to disconnect, cm will queue work to thread */ 3076 /* tell cm to disconnect, cm will queue work to thread */
3085 nes_add_ref(&nesqp->ibqp);
3086 nes_cm_disconn(nesqp); 3077 nes_cm_disconn(nesqp);
3087 break; 3078 break;
3088 /* TODO: additional AEs need to be here */ 3079 /* TODO: additional AEs need to be here */
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index e3939d13484e..d79942e84979 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2867,7 +2867,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2867 nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state, 2867 nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
2868 nesqp->iwarp_state, atomic_read(&nesqp->refcount)); 2868 nesqp->iwarp_state, atomic_read(&nesqp->refcount));
2869 2869
2870 nes_add_ref(&nesqp->ibqp);
2871 spin_lock_irqsave(&nesqp->lock, qplockflags); 2870 spin_lock_irqsave(&nesqp->lock, qplockflags);
2872 2871
2873 nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X," 2872 nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
@@ -2882,7 +2881,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2882 nesqp->hwqp.qp_id); 2881 nesqp->hwqp.qp_id);
2883 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) { 2882 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
2884 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2883 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2885 nes_rem_ref(&nesqp->ibqp);
2886 return -EINVAL; 2884 return -EINVAL;
2887 } 2885 }
2888 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE; 2886 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
@@ -2893,7 +2891,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2893 nesqp->hwqp.qp_id); 2891 nesqp->hwqp.qp_id);
2894 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) { 2892 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
2895 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2893 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2896 nes_rem_ref(&nesqp->ibqp);
2897 return -EINVAL; 2894 return -EINVAL;
2898 } 2895 }
2899 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE; 2896 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
@@ -2904,14 +2901,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2904 nesqp->hwqp.qp_id); 2901 nesqp->hwqp.qp_id);
2905 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) { 2902 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
2906 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2903 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2907 nes_rem_ref(&nesqp->ibqp);
2908 return -EINVAL; 2904 return -EINVAL;
2909 } 2905 }
2910 if (nesqp->cm_id == NULL) { 2906 if (nesqp->cm_id == NULL) {
2911 nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n", 2907 nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
2912 nesqp->hwqp.qp_id ); 2908 nesqp->hwqp.qp_id );
2913 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2909 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2914 nes_rem_ref(&nesqp->ibqp);
2915 return -EINVAL; 2910 return -EINVAL;
2916 } 2911 }
2917 next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS; 2912 next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
@@ -2929,7 +2924,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2929 nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail); 2924 nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
2930 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) { 2925 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
2931 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2926 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2932 nes_rem_ref(&nesqp->ibqp);
2933 return 0; 2927 return 0;
2934 } else { 2928 } else {
2935 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) { 2929 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
@@ -2937,7 +2931,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2937 " ignored due to current iWARP state\n", 2931 " ignored due to current iWARP state\n",
2938 nesqp->hwqp.qp_id); 2932 nesqp->hwqp.qp_id);
2939 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2933 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2940 nes_rem_ref(&nesqp->ibqp);
2941 return -EINVAL; 2934 return -EINVAL;
2942 } 2935 }
2943 if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) { 2936 if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
@@ -2969,7 +2962,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2969 nesqp->hwqp.qp_id); 2962 nesqp->hwqp.qp_id);
2970 if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) { 2963 if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
2971 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2964 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2972 nes_rem_ref(&nesqp->ibqp);
2973 return -EINVAL; 2965 return -EINVAL;
2974 } 2966 }
2975 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */ 2967 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
@@ -2982,7 +2974,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2982 case IB_QPS_RESET: 2974 case IB_QPS_RESET:
2983 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) { 2975 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
2984 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 2976 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
2985 nes_rem_ref(&nesqp->ibqp);
2986 return -EINVAL; 2977 return -EINVAL;
2987 } 2978 }
2988 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n", 2979 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
@@ -3008,7 +2999,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3008 break; 2999 break;
3009 default: 3000 default:
3010 spin_unlock_irqrestore(&nesqp->lock, qplockflags); 3001 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3011 nes_rem_ref(&nesqp->ibqp);
3012 return -EINVAL; 3002 return -EINVAL;
3013 break; 3003 break;
3014 } 3004 }
@@ -3088,7 +3078,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3088 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3078 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3089 original_last_aeq, nesqp->last_aeq); 3079 original_last_aeq, nesqp->last_aeq);
3090 /* this one is for the cm_disconnect thread */ 3080 /* this one is for the cm_disconnect thread */
3091 nes_add_ref(&nesqp->ibqp);
3092 spin_lock_irqsave(&nesqp->lock, qplockflags); 3081 spin_lock_irqsave(&nesqp->lock, qplockflags);
3093 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED; 3082 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3094 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT; 3083 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
@@ -3097,14 +3086,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3097 } else { 3086 } else {
3098 nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n", 3087 nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
3099 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount)); 3088 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
3100 nes_rem_ref(&nesqp->ibqp);
3101 } 3089 }
3102 } else { 3090 } else {
3103 spin_lock_irqsave(&nesqp->lock, qplockflags); 3091 spin_lock_irqsave(&nesqp->lock, qplockflags);
3104 if (nesqp->cm_id) { 3092 if (nesqp->cm_id) {
3105 /* These two are for the timer thread */ 3093 /* These two are for the timer thread */
3106 if (atomic_inc_return(&nesqp->close_timer_started) == 1) { 3094 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3107 nes_add_ref(&nesqp->ibqp);
3108 nesqp->cm_id->add_ref(nesqp->cm_id); 3095 nesqp->cm_id->add_ref(nesqp->cm_id);
3109 nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d)," 3096 nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
3110 " need ae to finish up, original_last_aeq = 0x%04X." 3097 " need ae to finish up, original_last_aeq = 0x%04X."
@@ -3128,14 +3115,12 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3128 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", 3115 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3129 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3116 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3130 original_last_aeq, nesqp->last_aeq); 3117 original_last_aeq, nesqp->last_aeq);
3131 nes_rem_ref(&nesqp->ibqp);
3132 } 3118 }
3133 } else { 3119 } else {
3134 nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up," 3120 nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
3135 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", 3121 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3136 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3122 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3137 original_last_aeq, nesqp->last_aeq); 3123 original_last_aeq, nesqp->last_aeq);
3138 nes_rem_ref(&nesqp->ibqp);
3139 } 3124 }
3140 3125
3141 err = 0; 3126 err = 0;
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 691525cf394a..9d9a9dc51f18 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -11,16 +11,17 @@ config INFINIBAND_IPOIB
11 11
12config INFINIBAND_IPOIB_CM 12config INFINIBAND_IPOIB_CM
13 bool "IP-over-InfiniBand Connected Mode support" 13 bool "IP-over-InfiniBand Connected Mode support"
14 depends on INFINIBAND_IPOIB && EXPERIMENTAL 14 depends on INFINIBAND_IPOIB
15 default n 15 default n
16 ---help--- 16 ---help---
17 This option enables experimental support for IPoIB connected mode. 17 This option enables support for IPoIB connected mode. After
18 After enabling this option, you need to switch to connected mode through 18 enabling this option, you need to switch to connected mode
19 /sys/class/net/ibXXX/mode to actually create connections, and then increase 19 through /sys/class/net/ibXXX/mode to actually create
20 the interface MTU with e.g. ifconfig ib0 mtu 65520. 20 connections, and then increase the interface MTU with
21 e.g. ifconfig ib0 mtu 65520.
21 22
22 WARNING: Enabling connected mode will trigger some 23 WARNING: Enabling connected mode will trigger some packet
23 packet drops for multicast and UD mode traffic from this interface, 24 drops for multicast and UD mode traffic from this interface,
24 unless you limit mtu for these destinations to 2044. 25 unless you limit mtu for these destinations to 2044.
25 26
26config INFINIBAND_IPOIB_DEBUG 27config INFINIBAND_IPOIB_DEBUG
@@ -33,9 +34,10 @@ config INFINIBAND_IPOIB_DEBUG
33 debug_level and mcast_debug_level module parameters (which 34 debug_level and mcast_debug_level module parameters (which
34 can also be set after the driver is loaded through sysfs). 35 can also be set after the driver is loaded through sysfs).
35 36
36 This option also creates an "ipoib_debugfs," which can be 37 This option also creates a directory tree under ipoib/ in
37 mounted to expose debugging information about IB multicast 38 debugfs, which contains files that expose debugging
38 groups used by the IPoIB driver. 39 information about IB multicast groups used by the IPoIB
40 driver.
39 41
40config INFINIBAND_IPOIB_DEBUG_DATA 42config INFINIBAND_IPOIB_DEBUG_DATA
41 bool "IP-over-InfiniBand data path debugging" 43 bool "IP-over-InfiniBand data path debugging"
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 66f946aa30b3..3d113c6e4a70 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig ISDN 5menuconfig ISDN
6 tristate "ISDN support" 6 bool "ISDN support"
7 depends on NET 7 depends on NET
8 depends on !S390 8 depends on !S390
9 ---help--- 9 ---help---
@@ -21,6 +21,8 @@ menuconfig ISDN
21 21
22if ISDN 22if ISDN
23 23
24source "drivers/isdn/mISDN/Kconfig"
25
24menuconfig ISDN_I4L 26menuconfig ISDN_I4L
25 tristate "Old ISDN4Linux (deprecated)" 27 tristate "Old ISDN4Linux (deprecated)"
26 ---help--- 28 ---help---
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 988142c30a6d..8380a4568d11 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_ISDN_I4L) += i4l/ 5obj-$(CONFIG_ISDN_I4L) += i4l/
6obj-$(CONFIG_ISDN_CAPI) += capi/ 6obj-$(CONFIG_ISDN_CAPI) += capi/
7obj-$(CONFIG_MISDN) += mISDN/
7obj-$(CONFIG_ISDN_CAPI) += hardware/ 8obj-$(CONFIG_ISDN_CAPI) += hardware/
8obj-$(CONFIG_ISDN_DIVERSION) += divert/ 9obj-$(CONFIG_ISDN_DIVERSION) += divert/
9obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/ 10obj-$(CONFIG_ISDN_DRV_HISAX) += hisax/
diff --git a/drivers/isdn/hardware/Makefile b/drivers/isdn/hardware/Makefile
index 11c8a183948c..a5d8fce4c4c4 100644
--- a/drivers/isdn/hardware/Makefile
+++ b/drivers/isdn/hardware/Makefile
@@ -4,3 +4,4 @@
4 4
5obj-$(CONFIG_CAPI_AVM) += avm/ 5obj-$(CONFIG_CAPI_AVM) += avm/
6obj-$(CONFIG_CAPI_EICON) += eicon/ 6obj-$(CONFIG_CAPI_EICON) += eicon/
7obj-$(CONFIG_MISDN) += mISDN/
diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig
new file mode 100644
index 000000000000..9cd5f5f62280
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/Kconfig
@@ -0,0 +1,26 @@
1#
2# Hardware for mISDN
3#
4comment "mISDN hardware drivers"
5
6config MISDN_HFCPCI
7 tristate "Support for HFC PCI cards"
8 depends on MISDN
9 depends on PCI
10 depends on VIRT_TO_BUS
11 help
12 Enable support for cards with Cologne Chip AG's
13 HFC PCI chip.
14
15config MISDN_HFCMULTI
16 tristate "Support for HFC multiport cards (HFC-4S/8S/E1)"
17 depends on PCI
18 depends on MISDN
19 help
20 Enable support for cards with Cologne Chip AG's HFC multiport
21 chip. There are three types of chips that are quite similar,
22 but the interface is different:
23 * HFC-4S (4 S/T interfaces on one chip)
24 * HFC-8S (8 S/T interfaces on one chip)
25 * HFC-E1 (E1 interface for 2Mbit ISDN)
26
diff --git a/drivers/isdn/hardware/mISDN/Makefile b/drivers/isdn/hardware/mISDN/Makefile
new file mode 100644
index 000000000000..1e7ca5332ad7
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the modular ISDN hardware drivers
3#
4#
5
6obj-$(CONFIG_MISDN_HFCPCI) += hfcpci.o
7obj-$(CONFIG_MISDN_HFCMULTI) += hfcmulti.o
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
new file mode 100644
index 000000000000..a33d87afc843
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -0,0 +1,1204 @@
1/*
2 * see notice in hfc_multi.c
3 */
4
5extern void ztdummy_extern_interrupt(void);
6extern void ztdummy_register_interrupt(void);
7extern int ztdummy_unregister_interrupt(void);
8
9#define DEBUG_HFCMULTI_FIFO 0x00010000
10#define DEBUG_HFCMULTI_CRC 0x00020000
11#define DEBUG_HFCMULTI_INIT 0x00040000
12#define DEBUG_HFCMULTI_PLXSD 0x00080000
13#define DEBUG_HFCMULTI_MODE 0x00100000
14#define DEBUG_HFCMULTI_MSG 0x00200000
15#define DEBUG_HFCMULTI_STATE 0x00400000
16#define DEBUG_HFCMULTI_SYNC 0x01000000
17#define DEBUG_HFCMULTI_DTMF 0x02000000
18#define DEBUG_HFCMULTI_LOCK 0x80000000
19
20#define PCI_ENA_REGIO 0x01
21#define PCI_ENA_MEMIO 0x02
22
23/*
24 * NOTE: some registers are assigned multiple times due to different modes
25 * also registers are assigned differen for HFC-4s/8s and HFC-E1
26 */
27
28/*
29#define MAX_FRAME_SIZE 2048
30*/
31
32struct hfc_chan {
33 struct dchannel *dch; /* link if channel is a D-channel */
34 struct bchannel *bch; /* link if channel is a B-channel */
35 int port; /* the interface port this */
36 /* channel is associated with */
37 int nt_timer; /* -1 if off, 0 if elapsed, >0 if running */
38 int los, ais, slip_tx, slip_rx, rdi; /* current alarms */
39 int jitter;
40 u_long cfg; /* port configuration */
41 int sync; /* sync state (used by E1) */
42 u_int protocol; /* current protocol */
43 int slot_tx; /* current pcm slot */
44 int bank_tx; /* current pcm bank */
45 int slot_rx;
46 int bank_rx;
47 int conf; /* conference setting of TX slot */
48 int txpending; /* if there is currently data in */
49 /* the FIFO 0=no, 1=yes, 2=splloop */
50 int rx_off; /* set to turn fifo receive off */
51 int coeff_count; /* curren coeff block */
52 s32 *coeff; /* memory pointer to 8 coeff blocks */
53};
54
55
56struct hfcm_hw {
57 u_char r_ctrl;
58 u_char r_irq_ctrl;
59 u_char r_cirm;
60 u_char r_ram_sz;
61 u_char r_pcm_md0;
62 u_char r_irqmsk_misc;
63 u_char r_dtmf;
64 u_char r_st_sync;
65 u_char r_sci_msk;
66 u_char r_tx0, r_tx1;
67 u_char a_st_ctrl0[8];
68 timer_t timer;
69};
70
71
72/* for each stack these flags are used (cfg) */
73#define HFC_CFG_NONCAP_TX 1 /* S/T TX interface has less capacity */
74#define HFC_CFG_DIS_ECHANNEL 2 /* disable E-channel processing */
75#define HFC_CFG_REG_ECHANNEL 3 /* register E-channel */
76#define HFC_CFG_OPTICAL 4 /* the E1 interface is optical */
77#define HFC_CFG_REPORT_LOS 5 /* the card should report loss of signal */
78#define HFC_CFG_REPORT_AIS 6 /* the card should report alarm ind. sign. */
79#define HFC_CFG_REPORT_SLIP 7 /* the card should report bit slips */
80#define HFC_CFG_REPORT_RDI 8 /* the card should report remote alarm */
81#define HFC_CFG_DTMF 9 /* enable DTMF-detection */
82#define HFC_CFG_CRC4 10 /* disable CRC-4 Multiframe mode, */
83 /* use double frame instead. */
84
85#define HFC_CHIP_EXRAM_128 0 /* external ram 128k */
86#define HFC_CHIP_EXRAM_512 1 /* external ram 256k */
87#define HFC_CHIP_REVISION0 2 /* old fifo handling */
88#define HFC_CHIP_PCM_SLAVE 3 /* PCM is slave */
89#define HFC_CHIP_PCM_MASTER 4 /* PCM is master */
90#define HFC_CHIP_RX_SYNC 5 /* disable pll sync for pcm */
91#define HFC_CHIP_DTMF 6 /* DTMF decoding is enabled */
92#define HFC_CHIP_ULAW 7 /* ULAW mode */
93#define HFC_CHIP_CLOCK2 8 /* double clock mode */
94#define HFC_CHIP_E1CLOCK_GET 9 /* always get clock from E1 interface */
95#define HFC_CHIP_E1CLOCK_PUT 10 /* always put clock from E1 interface */
96#define HFC_CHIP_WATCHDOG 11 /* whether we should send signals */
97 /* to the watchdog */
98#define HFC_CHIP_B410P 12 /* whether we have a b410p with echocan in */
99 /* hw */
100#define HFC_CHIP_PLXSD 13 /* whether we have a Speech-Design PLX */
101
102#define HFC_IO_MODE_PCIMEM 0x00 /* normal memory mapped IO */
103#define HFC_IO_MODE_REGIO 0x01 /* PCI io access */
104#define HFC_IO_MODE_PLXSD 0x02 /* access HFC via PLX9030 */
105
106/* table entry in the PCI devices list */
107struct hm_map {
108 char *vendor_name;
109 char *card_name;
110 int type;
111 int ports;
112 int clock2;
113 int leds;
114 int opticalsupport;
115 int dip_type;
116 int io_mode;
117};
118
119struct hfc_multi {
120 struct list_head list;
121 struct hm_map *mtyp;
122 int id;
123 int pcm; /* id of pcm bus */
124 int type;
125 int ports;
126
127 u_int irq; /* irq used by card */
128 u_int irqcnt;
129 struct pci_dev *pci_dev;
130 int io_mode; /* selects mode */
131#ifdef HFC_REGISTER_DEBUG
132 void (*HFC_outb)(struct hfc_multi *hc, u_char reg,
133 u_char val, const char *function, int line);
134 void (*HFC_outb_nodebug)(struct hfc_multi *hc, u_char reg,
135 u_char val, const char *function, int line);
136 u_char (*HFC_inb)(struct hfc_multi *hc, u_char reg,
137 const char *function, int line);
138 u_char (*HFC_inb_nodebug)(struct hfc_multi *hc, u_char reg,
139 const char *function, int line);
140 u_short (*HFC_inw)(struct hfc_multi *hc, u_char reg,
141 const char *function, int line);
142 u_short (*HFC_inw_nodebug)(struct hfc_multi *hc, u_char reg,
143 const char *function, int line);
144 void (*HFC_wait)(struct hfc_multi *hc,
145 const char *function, int line);
146 void (*HFC_wait_nodebug)(struct hfc_multi *hc,
147 const char *function, int line);
148#else
149 void (*HFC_outb)(struct hfc_multi *hc, u_char reg,
150 u_char val);
151 void (*HFC_outb_nodebug)(struct hfc_multi *hc, u_char reg,
152 u_char val);
153 u_char (*HFC_inb)(struct hfc_multi *hc, u_char reg);
154 u_char (*HFC_inb_nodebug)(struct hfc_multi *hc, u_char reg);
155 u_short (*HFC_inw)(struct hfc_multi *hc, u_char reg);
156 u_short (*HFC_inw_nodebug)(struct hfc_multi *hc, u_char reg);
157 void (*HFC_wait)(struct hfc_multi *hc);
158 void (*HFC_wait_nodebug)(struct hfc_multi *hc);
159#endif
160 void (*read_fifo)(struct hfc_multi *hc, u_char *data,
161 int len);
162 void (*write_fifo)(struct hfc_multi *hc, u_char *data,
163 int len);
164 u_long pci_origmembase, plx_origmembase, dsp_origmembase;
165 u_char *pci_membase; /* PCI memory (MUST BE BYTE POINTER) */
166 u_char *plx_membase; /* PLX memory */
167 u_char *dsp_membase; /* DSP on PLX */
168 u_long pci_iobase; /* PCI IO */
169 struct hfcm_hw hw; /* remember data of write-only-registers */
170
171 u_long chip; /* chip configuration */
172 int masterclk; /* port that provides master clock -1=off */
173 int dtmf; /* flag that dtmf is currently in process */
174 int Flen; /* F-buffer size */
175 int Zlen; /* Z-buffer size (must be int for calculation)*/
176 int max_trans; /* maximum transparent fifo fill */
177 int Zmin; /* Z-buffer offset */
178 int DTMFbase; /* base address of DTMF coefficients */
179
180 u_int slots; /* number of PCM slots */
181 u_int leds; /* type of leds */
182 u_int ledcount; /* used to animate leds */
183 u_long ledstate; /* save last state of leds */
184 int opticalsupport; /* has the e1 board */
185 /* an optical Interface */
186 int dslot; /* channel # of d-channel (E1) default 16 */
187
188 u_long wdcount; /* every 500 ms we need to */
189 /* send the watchdog a signal */
190 u_char wdbyte; /* watchdog toggle byte */
191 u_int activity[8]; /* if there is any action on this */
192 /* port (will be cleared after */
193 /* showing led-states) */
194 int e1_state; /* keep track of last state */
195 int e1_getclock; /* if sync is retrieved from interface */
196 int syncronized; /* keep track of existing sync interface */
197 int e1_resync; /* resync jobs */
198
199 spinlock_t lock; /* the lock */
200
201 /*
202 * the channel index is counted from 0, regardless where the channel
203 * is located on the hfc-channel.
204 * the bch->channel is equvalent to the hfc-channel
205 */
206 struct hfc_chan chan[32];
207 u_char created[8]; /* what port is created */
208 signed char slot_owner[256]; /* owner channel of slot */
209};
210
211/* PLX GPIOs */
212#define PLX_GPIO4_DIR_BIT 13
213#define PLX_GPIO4_BIT 14
214#define PLX_GPIO5_DIR_BIT 16
215#define PLX_GPIO5_BIT 17
216#define PLX_GPIO6_DIR_BIT 19
217#define PLX_GPIO6_BIT 20
218#define PLX_GPIO7_DIR_BIT 22
219#define PLX_GPIO7_BIT 23
220#define PLX_GPIO8_DIR_BIT 25
221#define PLX_GPIO8_BIT 26
222
223#define PLX_GPIO4 (1 << PLX_GPIO4_BIT)
224#define PLX_GPIO5 (1 << PLX_GPIO5_BIT)
225#define PLX_GPIO6 (1 << PLX_GPIO6_BIT)
226#define PLX_GPIO7 (1 << PLX_GPIO7_BIT)
227#define PLX_GPIO8 (1 << PLX_GPIO8_BIT)
228
229#define PLX_GPIO4_DIR (1 << PLX_GPIO4_DIR_BIT)
230#define PLX_GPIO5_DIR (1 << PLX_GPIO5_DIR_BIT)
231#define PLX_GPIO6_DIR (1 << PLX_GPIO6_DIR_BIT)
232#define PLX_GPIO7_DIR (1 << PLX_GPIO7_DIR_BIT)
233#define PLX_GPIO8_DIR (1 << PLX_GPIO8_DIR_BIT)
234
235#define PLX_TERM_ON PLX_GPIO7
236#define PLX_SLAVE_EN_N PLX_GPIO5
237#define PLX_MASTER_EN PLX_GPIO6
238#define PLX_SYNC_O_EN PLX_GPIO4
239#define PLX_DSP_RES_N PLX_GPIO8
240/* GPIO4..8 Enable & Set to OUT, SLAVE_EN_N = 1 */
241#define PLX_GPIOC_INIT (PLX_GPIO4_DIR | PLX_GPIO5_DIR | PLX_GPIO6_DIR \
242 | PLX_GPIO7_DIR | PLX_GPIO8_DIR | PLX_SLAVE_EN_N)
243
244/* PLX Interrupt Control/STATUS */
245#define PLX_INTCSR_LINTI1_ENABLE 0x01
246#define PLX_INTCSR_LINTI1_STATUS 0x04
247#define PLX_INTCSR_LINTI2_ENABLE 0x08
248#define PLX_INTCSR_LINTI2_STATUS 0x20
249#define PLX_INTCSR_PCIINT_ENABLE 0x40
250
251/* PLX Registers */
252#define PLX_INTCSR 0x4c
253#define PLX_CNTRL 0x50
254#define PLX_GPIOC 0x54
255
256
257/*
258 * REGISTER SETTING FOR HFC-4S/8S AND HFC-E1
259 */
260
261/* write only registers */
262#define R_CIRM 0x00
263#define R_CTRL 0x01
264#define R_BRG_PCM_CFG 0x02
265#define R_RAM_ADDR0 0x08
266#define R_RAM_ADDR1 0x09
267#define R_RAM_ADDR2 0x0A
268#define R_FIRST_FIFO 0x0B
269#define R_RAM_SZ 0x0C
270#define R_FIFO_MD 0x0D
271#define R_INC_RES_FIFO 0x0E
272#define R_FSM_IDX 0x0F
273#define R_FIFO 0x0F
274#define R_SLOT 0x10
275#define R_IRQMSK_MISC 0x11
276#define R_SCI_MSK 0x12
277#define R_IRQ_CTRL 0x13
278#define R_PCM_MD0 0x14
279#define R_PCM_MD1 0x15
280#define R_PCM_MD2 0x15
281#define R_SH0H 0x15
282#define R_SH1H 0x15
283#define R_SH0L 0x15
284#define R_SH1L 0x15
285#define R_SL_SEL0 0x15
286#define R_SL_SEL1 0x15
287#define R_SL_SEL2 0x15
288#define R_SL_SEL3 0x15
289#define R_SL_SEL4 0x15
290#define R_SL_SEL5 0x15
291#define R_SL_SEL6 0x15
292#define R_SL_SEL7 0x15
293#define R_ST_SEL 0x16
294#define R_ST_SYNC 0x17
295#define R_CONF_EN 0x18
296#define R_TI_WD 0x1A
297#define R_BERT_WD_MD 0x1B
298#define R_DTMF 0x1C
299#define R_DTMF_N 0x1D
300#define R_E1_WR_STA 0x20
301#define R_E1_RD_STA 0x20
302#define R_LOS0 0x22
303#define R_LOS1 0x23
304#define R_RX0 0x24
305#define R_RX_FR0 0x25
306#define R_RX_FR1 0x26
307#define R_TX0 0x28
308#define R_TX1 0x29
309#define R_TX_FR0 0x2C
310
311#define R_TX_FR1 0x2D
312#define R_TX_FR2 0x2E
313#define R_JATT_ATT 0x2F /* undocumented */
314#define A_ST_RD_STATE 0x30
315#define A_ST_WR_STATE 0x30
316#define R_RX_OFF 0x30
317#define A_ST_CTRL0 0x31
318#define R_SYNC_OUT 0x31
319#define A_ST_CTRL1 0x32
320#define A_ST_CTRL2 0x33
321#define A_ST_SQ_WR 0x34
322#define R_TX_OFF 0x34
323#define R_SYNC_CTRL 0x35
324#define A_ST_CLK_DLY 0x37
325#define R_PWM0 0x38
326#define R_PWM1 0x39
327#define A_ST_B1_TX 0x3C
328#define A_ST_B2_TX 0x3D
329#define A_ST_D_TX 0x3E
330#define R_GPIO_OUT0 0x40
331#define R_GPIO_OUT1 0x41
332#define R_GPIO_EN0 0x42
333#define R_GPIO_EN1 0x43
334#define R_GPIO_SEL 0x44
335#define R_BRG_CTRL 0x45
336#define R_PWM_MD 0x46
337#define R_BRG_MD 0x47
338#define R_BRG_TIM0 0x48
339#define R_BRG_TIM1 0x49
340#define R_BRG_TIM2 0x4A
341#define R_BRG_TIM3 0x4B
342#define R_BRG_TIM_SEL01 0x4C
343#define R_BRG_TIM_SEL23 0x4D
344#define R_BRG_TIM_SEL45 0x4E
345#define R_BRG_TIM_SEL67 0x4F
346#define A_SL_CFG 0xD0
347#define A_CONF 0xD1
348#define A_CH_MSK 0xF4
349#define A_CON_HDLC 0xFA
350#define A_SUBCH_CFG 0xFB
351#define A_CHANNEL 0xFC
352#define A_FIFO_SEQ 0xFD
353#define A_IRQ_MSK 0xFF
354
355/* read only registers */
356#define A_Z12 0x04
357#define A_Z1L 0x04
358#define A_Z1 0x04
359#define A_Z1H 0x05
360#define A_Z2L 0x06
361#define A_Z2 0x06
362#define A_Z2H 0x07
363#define A_F1 0x0C
364#define A_F12 0x0C
365#define A_F2 0x0D
366#define R_IRQ_OVIEW 0x10
367#define R_IRQ_MISC 0x11
368#define R_IRQ_STATECH 0x12
369#define R_CONF_OFLOW 0x14
370#define R_RAM_USE 0x15
371#define R_CHIP_ID 0x16
372#define R_BERT_STA 0x17
373#define R_F0_CNTL 0x18
374#define R_F0_CNTH 0x19
375#define R_BERT_EC 0x1A
376#define R_BERT_ECL 0x1A
377#define R_BERT_ECH 0x1B
378#define R_STATUS 0x1C
379#define R_CHIP_RV 0x1F
380#define R_STATE 0x20
381#define R_SYNC_STA 0x24
382#define R_RX_SL0_0 0x25
383#define R_RX_SL0_1 0x26
384#define R_RX_SL0_2 0x27
385#define R_JATT_DIR 0x2b /* undocumented */
386#define R_SLIP 0x2c
387#define A_ST_RD_STA 0x30
388#define R_FAS_EC 0x30
389#define R_FAS_ECL 0x30
390#define R_FAS_ECH 0x31
391#define R_VIO_EC 0x32
392#define R_VIO_ECL 0x32
393#define R_VIO_ECH 0x33
394#define A_ST_SQ_RD 0x34
395#define R_CRC_EC 0x34
396#define R_CRC_ECL 0x34
397#define R_CRC_ECH 0x35
398#define R_E_EC 0x36
399#define R_E_ECL 0x36
400#define R_E_ECH 0x37
401#define R_SA6_SA13_EC 0x38
402#define R_SA6_SA13_ECL 0x38
403#define R_SA6_SA13_ECH 0x39
404#define R_SA6_SA23_EC 0x3A
405#define R_SA6_SA23_ECL 0x3A
406#define R_SA6_SA23_ECH 0x3B
407#define A_ST_B1_RX 0x3C
408#define A_ST_B2_RX 0x3D
409#define A_ST_D_RX 0x3E
410#define A_ST_E_RX 0x3F
411#define R_GPIO_IN0 0x40
412#define R_GPIO_IN1 0x41
413#define R_GPI_IN0 0x44
414#define R_GPI_IN1 0x45
415#define R_GPI_IN2 0x46
416#define R_GPI_IN3 0x47
417#define R_INT_DATA 0x88
418#define R_IRQ_FIFO_BL0 0xC8
419#define R_IRQ_FIFO_BL1 0xC9
420#define R_IRQ_FIFO_BL2 0xCA
421#define R_IRQ_FIFO_BL3 0xCB
422#define R_IRQ_FIFO_BL4 0xCC
423#define R_IRQ_FIFO_BL5 0xCD
424#define R_IRQ_FIFO_BL6 0xCE
425#define R_IRQ_FIFO_BL7 0xCF
426
427/* read and write registers */
428#define A_FIFO_DATA0 0x80
429#define A_FIFO_DATA1 0x80
430#define A_FIFO_DATA2 0x80
431#define A_FIFO_DATA0_NOINC 0x84
432#define A_FIFO_DATA1_NOINC 0x84
433#define A_FIFO_DATA2_NOINC 0x84
434#define R_RAM_DATA 0xC0
435
436
437/*
438 * BIT SETTING FOR HFC-4S/8S AND HFC-E1
439 */
440
441/* chapter 2: universal bus interface */
442/* R_CIRM */
443#define V_IRQ_SEL 0x01
444#define V_SRES 0x08
445#define V_HFCRES 0x10
446#define V_PCMRES 0x20
447#define V_STRES 0x40
448#define V_ETRES 0x40
449#define V_RLD_EPR 0x80
450/* R_CTRL */
451#define V_FIFO_LPRIO 0x02
452#define V_SLOW_RD 0x04
453#define V_EXT_RAM 0x08
454#define V_CLK_OFF 0x20
455#define V_ST_CLK 0x40
456/* R_RAM_ADDR0 */
457#define V_RAM_ADDR2 0x01
458#define V_ADDR_RES 0x40
459#define V_ADDR_INC 0x80
460/* R_RAM_SZ */
461#define V_RAM_SZ 0x01
462#define V_PWM0_16KHZ 0x10
463#define V_PWM1_16KHZ 0x20
464#define V_FZ_MD 0x80
465/* R_CHIP_ID */
466#define V_PNP_IRQ 0x01
467#define V_CHIP_ID 0x10
468
469/* chapter 3: data flow */
470/* R_FIRST_FIFO */
471#define V_FIRST_FIRO_DIR 0x01
472#define V_FIRST_FIFO_NUM 0x02
473/* R_FIFO_MD */
474#define V_FIFO_MD 0x01
475#define V_CSM_MD 0x04
476#define V_FSM_MD 0x08
477#define V_FIFO_SZ 0x10
478/* R_FIFO */
479#define V_FIFO_DIR 0x01
480#define V_FIFO_NUM 0x02
481#define V_REV 0x80
482/* R_SLOT */
483#define V_SL_DIR 0x01
484#define V_SL_NUM 0x02
485/* A_SL_CFG */
486#define V_CH_DIR 0x01
487#define V_CH_SEL 0x02
488#define V_ROUTING 0x40
489/* A_CON_HDLC */
490#define V_IFF 0x01
491#define V_HDLC_TRP 0x02
492#define V_TRP_IRQ 0x04
493#define V_DATA_FLOW 0x20
494/* A_SUBCH_CFG */
495#define V_BIT_CNT 0x01
496#define V_START_BIT 0x08
497#define V_LOOP_FIFO 0x40
498#define V_INV_DATA 0x80
499/* A_CHANNEL */
500#define V_CH_DIR0 0x01
501#define V_CH_NUM0 0x02
502/* A_FIFO_SEQ */
503#define V_NEXT_FIFO_DIR 0x01
504#define V_NEXT_FIFO_NUM 0x02
505#define V_SEQ_END 0x40
506
507/* chapter 4: FIFO handling and HDLC controller */
508/* R_INC_RES_FIFO */
509#define V_INC_F 0x01
510#define V_RES_F 0x02
511#define V_RES_LOST 0x04
512
513/* chapter 5: S/T interface */
514/* R_SCI_MSK */
515#define V_SCI_MSK_ST0 0x01
516#define V_SCI_MSK_ST1 0x02
517#define V_SCI_MSK_ST2 0x04
518#define V_SCI_MSK_ST3 0x08
519#define V_SCI_MSK_ST4 0x10
520#define V_SCI_MSK_ST5 0x20
521#define V_SCI_MSK_ST6 0x40
522#define V_SCI_MSK_ST7 0x80
523/* R_ST_SEL */
524#define V_ST_SEL 0x01
525#define V_MULT_ST 0x08
526/* R_ST_SYNC */
527#define V_SYNC_SEL 0x01
528#define V_AUTO_SYNC 0x08
529/* A_ST_WR_STA */
530#define V_ST_SET_STA 0x01
531#define V_ST_LD_STA 0x10
532#define V_ST_ACT 0x20
533#define V_SET_G2_G3 0x80
534/* A_ST_CTRL0 */
535#define V_B1_EN 0x01
536#define V_B2_EN 0x02
537#define V_ST_MD 0x04
538#define V_D_PRIO 0x08
539#define V_SQ_EN 0x10
540#define V_96KHZ 0x20
541#define V_TX_LI 0x40
542#define V_ST_STOP 0x80
543/* A_ST_CTRL1 */
544#define V_G2_G3_EN 0x01
545#define V_D_HI 0x04
546#define V_E_IGNO 0x08
547#define V_E_LO 0x10
548#define V_B12_SWAP 0x80
549/* A_ST_CTRL2 */
550#define V_B1_RX_EN 0x01
551#define V_B2_RX_EN 0x02
552#define V_ST_TRIS 0x40
553/* A_ST_CLK_DLY */
554#define V_ST_CK_DLY 0x01
555#define V_ST_SMPL 0x10
556/* A_ST_D_TX */
557#define V_ST_D_TX 0x40
558/* R_IRQ_STATECH */
559#define V_SCI_ST0 0x01
560#define V_SCI_ST1 0x02
561#define V_SCI_ST2 0x04
562#define V_SCI_ST3 0x08
563#define V_SCI_ST4 0x10
564#define V_SCI_ST5 0x20
565#define V_SCI_ST6 0x40
566#define V_SCI_ST7 0x80
567/* A_ST_RD_STA */
568#define V_ST_STA 0x01
569#define V_FR_SYNC_ST 0x10
570#define V_TI2_EXP 0x20
571#define V_INFO0 0x40
572#define V_G2_G3 0x80
573/* A_ST_SQ_RD */
574#define V_ST_SQ 0x01
575#define V_MF_RX_RDY 0x10
576#define V_MF_TX_RDY 0x80
577/* A_ST_D_RX */
578#define V_ST_D_RX 0x40
579/* A_ST_E_RX */
580#define V_ST_E_RX 0x40
581
582/* chapter 5: E1 interface */
583/* R_E1_WR_STA */
584/* R_E1_RD_STA */
585#define V_E1_SET_STA 0x01
586#define V_E1_LD_STA 0x10
587/* R_RX0 */
588#define V_RX_CODE 0x01
589#define V_RX_FBAUD 0x04
590#define V_RX_CMI 0x08
591#define V_RX_INV_CMI 0x10
592#define V_RX_INV_CLK 0x20
593#define V_RX_INV_DATA 0x40
594#define V_AIS_ITU 0x80
595/* R_RX_FR0 */
596#define V_NO_INSYNC 0x01
597#define V_AUTO_RESYNC 0x02
598#define V_AUTO_RECO 0x04
599#define V_SWORD_COND 0x08
600#define V_SYNC_LOSS 0x10
601#define V_XCRC_SYNC 0x20
602#define V_MF_RESYNC 0x40
603#define V_RESYNC 0x80
604/* R_RX_FR1 */
605#define V_RX_MF 0x01
606#define V_RX_MF_SYNC 0x02
607#define V_RX_SL0_RAM 0x04
608#define V_ERR_SIM 0x20
609#define V_RES_NMF 0x40
610/* R_TX0 */
611#define V_TX_CODE 0x01
612#define V_TX_FBAUD 0x04
613#define V_TX_CMI_CODE 0x08
614#define V_TX_INV_CMI_CODE 0x10
615#define V_TX_INV_CLK 0x20
616#define V_TX_INV_DATA 0x40
617#define V_OUT_EN 0x80
618/* R_TX1 */
619#define V_INV_CLK 0x01
620#define V_EXCHG_DATA_LI 0x02
621#define V_AIS_OUT 0x04
622#define V_ATX 0x20
623#define V_NTRI 0x40
624#define V_AUTO_ERR_RES 0x80
625/* R_TX_FR0 */
626#define V_TRP_FAS 0x01
627#define V_TRP_NFAS 0x02
628#define V_TRP_RAL 0x04
629#define V_TRP_SA 0x08
630/* R_TX_FR1 */
631#define V_TX_FAS 0x01
632#define V_TX_NFAS 0x02
633#define V_TX_RAL 0x04
634#define V_TX_SA 0x08
635/* R_TX_FR2 */
636#define V_TX_MF 0x01
637#define V_TRP_SL0 0x02
638#define V_TX_SL0_RAM 0x04
639#define V_TX_E 0x10
640#define V_NEG_E 0x20
641#define V_XS12_ON 0x40
642#define V_XS15_ON 0x80
643/* R_RX_OFF */
644#define V_RX_SZ 0x01
645#define V_RX_INIT 0x04
646/* R_SYNC_OUT */
647#define V_SYNC_E1_RX 0x01
648#define V_IPATS0 0x20
649#define V_IPATS1 0x40
650#define V_IPATS2 0x80
651/* R_TX_OFF */
652#define V_TX_SZ 0x01
653#define V_TX_INIT 0x04
654/* R_SYNC_CTRL */
655#define V_EXT_CLK_SYNC 0x01
656#define V_SYNC_OFFS 0x02
657#define V_PCM_SYNC 0x04
658#define V_NEG_CLK 0x08
659#define V_HCLK 0x10
660/*
661#define V_JATT_AUTO_DEL 0x20
662#define V_JATT_AUTO 0x40
663*/
664#define V_JATT_OFF 0x80
665/* R_STATE */
666#define V_E1_STA 0x01
667#define V_ALT_FR_RX 0x40
668#define V_ALT_FR_TX 0x80
669/* R_SYNC_STA */
670#define V_RX_STA 0x01
671#define V_FR_SYNC_E1 0x04
672#define V_SIG_LOS 0x08
673#define V_MFA_STA 0x10
674#define V_AIS 0x40
675#define V_NO_MF_SYNC 0x80
676/* R_RX_SL0_0 */
677#define V_SI_FAS 0x01
678#define V_SI_NFAS 0x02
679#define V_A 0x04
680#define V_CRC_OK 0x08
681#define V_TX_E1 0x10
682#define V_TX_E2 0x20
683#define V_RX_E1 0x40
684#define V_RX_E2 0x80
685/* R_SLIP */
686#define V_SLIP_RX 0x01
687#define V_FOSLIP_RX 0x08
688#define V_SLIP_TX 0x10
689#define V_FOSLIP_TX 0x80
690
691/* chapter 6: PCM interface */
692/* R_PCM_MD0 */
693#define V_PCM_MD 0x01
694#define V_C4_POL 0x02
695#define V_F0_NEG 0x04
696#define V_F0_LEN 0x08
697#define V_PCM_ADDR 0x10
698/* R_SL_SEL0 */
699#define V_SL_SEL0 0x01
700#define V_SH_SEL0 0x80
701/* R_SL_SEL1 */
702#define V_SL_SEL1 0x01
703#define V_SH_SEL1 0x80
704/* R_SL_SEL2 */
705#define V_SL_SEL2 0x01
706#define V_SH_SEL2 0x80
707/* R_SL_SEL3 */
708#define V_SL_SEL3 0x01
709#define V_SH_SEL3 0x80
710/* R_SL_SEL4 */
711#define V_SL_SEL4 0x01
712#define V_SH_SEL4 0x80
713/* R_SL_SEL5 */
714#define V_SL_SEL5 0x01
715#define V_SH_SEL5 0x80
716/* R_SL_SEL6 */
717#define V_SL_SEL6 0x01
718#define V_SH_SEL6 0x80
719/* R_SL_SEL7 */
720#define V_SL_SEL7 0x01
721#define V_SH_SEL7 0x80
722/* R_PCM_MD1 */
723#define V_ODEC_CON 0x01
724#define V_PLL_ADJ 0x04
725#define V_PCM_DR 0x10
726#define V_PCM_LOOP 0x40
727/* R_PCM_MD2 */
728#define V_SYNC_PLL 0x02
729#define V_SYNC_SRC 0x04
730#define V_SYNC_OUT 0x08
731#define V_ICR_FR_TIME 0x40
732#define V_EN_PLL 0x80
733
734/* chapter 7: pulse width modulation */
735/* R_PWM_MD */
736#define V_EXT_IRQ_EN 0x08
737#define V_PWM0_MD 0x10
738#define V_PWM1_MD 0x40
739
740/* chapter 8: multiparty audio conferences */
741/* R_CONF_EN */
742#define V_CONF_EN 0x01
743#define V_ULAW 0x80
744/* A_CONF */
745#define V_CONF_NUM 0x01
746#define V_NOISE_SUPPR 0x08
747#define V_ATT_LEV 0x20
748#define V_CONF_SL 0x80
749/* R_CONF_OFLOW */
750#define V_CONF_OFLOW0 0x01
751#define V_CONF_OFLOW1 0x02
752#define V_CONF_OFLOW2 0x04
753#define V_CONF_OFLOW3 0x08
754#define V_CONF_OFLOW4 0x10
755#define V_CONF_OFLOW5 0x20
756#define V_CONF_OFLOW6 0x40
757#define V_CONF_OFLOW7 0x80
758
759/* chapter 9: DTMF contoller */
760/* R_DTMF0 */
761#define V_DTMF_EN 0x01
762#define V_HARM_SEL 0x02
763#define V_DTMF_RX_CH 0x04
764#define V_DTMF_STOP 0x08
765#define V_CHBL_SEL 0x10
766#define V_RST_DTMF 0x40
767#define V_ULAW_SEL 0x80
768
769/* chapter 10: BERT */
770/* R_BERT_WD_MD */
771#define V_PAT_SEQ 0x01
772#define V_BERT_ERR 0x08
773#define V_AUTO_WD_RES 0x20
774#define V_WD_RES 0x80
775/* R_BERT_STA */
776#define V_BERT_SYNC_SRC 0x01
777#define V_BERT_SYNC 0x10
778#define V_BERT_INV_DATA 0x20
779
780/* chapter 11: auxiliary interface */
781/* R_BRG_PCM_CFG */
782#define V_BRG_EN 0x01
783#define V_BRG_MD 0x02
784#define V_PCM_CLK 0x20
785#define V_ADDR_WRDLY 0x40
786/* R_BRG_CTRL */
787#define V_BRG_CS 0x01
788#define V_BRG_ADDR 0x08
789#define V_BRG_CS_SRC 0x80
790/* R_BRG_MD */
791#define V_BRG_MD0 0x01
792#define V_BRG_MD1 0x02
793#define V_BRG_MD2 0x04
794#define V_BRG_MD3 0x08
795#define V_BRG_MD4 0x10
796#define V_BRG_MD5 0x20
797#define V_BRG_MD6 0x40
798#define V_BRG_MD7 0x80
799/* R_BRG_TIM0 */
800#define V_BRG_TIM0_IDLE 0x01
801#define V_BRG_TIM0_CLK 0x10
802/* R_BRG_TIM1 */
803#define V_BRG_TIM1_IDLE 0x01
804#define V_BRG_TIM1_CLK 0x10
805/* R_BRG_TIM2 */
806#define V_BRG_TIM2_IDLE 0x01
807#define V_BRG_TIM2_CLK 0x10
808/* R_BRG_TIM3 */
809#define V_BRG_TIM3_IDLE 0x01
810#define V_BRG_TIM3_CLK 0x10
811/* R_BRG_TIM_SEL01 */
812#define V_BRG_WR_SEL0 0x01
813#define V_BRG_RD_SEL0 0x04
814#define V_BRG_WR_SEL1 0x10
815#define V_BRG_RD_SEL1 0x40
816/* R_BRG_TIM_SEL23 */
817#define V_BRG_WR_SEL2 0x01
818#define V_BRG_RD_SEL2 0x04
819#define V_BRG_WR_SEL3 0x10
820#define V_BRG_RD_SEL3 0x40
821/* R_BRG_TIM_SEL45 */
822#define V_BRG_WR_SEL4 0x01
823#define V_BRG_RD_SEL4 0x04
824#define V_BRG_WR_SEL5 0x10
825#define V_BRG_RD_SEL5 0x40
826/* R_BRG_TIM_SEL67 */
827#define V_BRG_WR_SEL6 0x01
828#define V_BRG_RD_SEL6 0x04
829#define V_BRG_WR_SEL7 0x10
830#define V_BRG_RD_SEL7 0x40
831
832/* chapter 12: clock, reset, interrupt, timer and watchdog */
833/* R_IRQMSK_MISC */
834#define V_STA_IRQMSK 0x01
835#define V_TI_IRQMSK 0x02
836#define V_PROC_IRQMSK 0x04
837#define V_DTMF_IRQMSK 0x08
838#define V_IRQ1S_MSK 0x10
839#define V_SA6_IRQMSK 0x20
840#define V_RX_EOMF_MSK 0x40
841#define V_TX_EOMF_MSK 0x80
842/* R_IRQ_CTRL */
843#define V_FIFO_IRQ 0x01
844#define V_GLOB_IRQ_EN 0x08
845#define V_IRQ_POL 0x10
846/* R_TI_WD */
847#define V_EV_TS 0x01
848#define V_WD_TS 0x10
849/* A_IRQ_MSK */
850#define V_IRQ 0x01
851#define V_BERT_EN 0x02
852#define V_MIX_IRQ 0x04
853/* R_IRQ_OVIEW */
854#define V_IRQ_FIFO_BL0 0x01
855#define V_IRQ_FIFO_BL1 0x02
856#define V_IRQ_FIFO_BL2 0x04
857#define V_IRQ_FIFO_BL3 0x08
858#define V_IRQ_FIFO_BL4 0x10
859#define V_IRQ_FIFO_BL5 0x20
860#define V_IRQ_FIFO_BL6 0x40
861#define V_IRQ_FIFO_BL7 0x80
862/* R_IRQ_MISC */
863#define V_STA_IRQ 0x01
864#define V_TI_IRQ 0x02
865#define V_IRQ_PROC 0x04
866#define V_DTMF_IRQ 0x08
867#define V_IRQ1S 0x10
868#define V_SA6_IRQ 0x20
869#define V_RX_EOMF 0x40
870#define V_TX_EOMF 0x80
871/* R_STATUS */
872#define V_BUSY 0x01
873#define V_PROC 0x02
874#define V_DTMF_STA 0x04
875#define V_LOST_STA 0x08
876#define V_SYNC_IN 0x10
877#define V_EXT_IRQSTA 0x20
878#define V_MISC_IRQSTA 0x40
879#define V_FR_IRQSTA 0x80
880/* R_IRQ_FIFO_BL0 */
881#define V_IRQ_FIFO0_TX 0x01
882#define V_IRQ_FIFO0_RX 0x02
883#define V_IRQ_FIFO1_TX 0x04
884#define V_IRQ_FIFO1_RX 0x08
885#define V_IRQ_FIFO2_TX 0x10
886#define V_IRQ_FIFO2_RX 0x20
887#define V_IRQ_FIFO3_TX 0x40
888#define V_IRQ_FIFO3_RX 0x80
889/* R_IRQ_FIFO_BL1 */
890#define V_IRQ_FIFO4_TX 0x01
891#define V_IRQ_FIFO4_RX 0x02
892#define V_IRQ_FIFO5_TX 0x04
893#define V_IRQ_FIFO5_RX 0x08
894#define V_IRQ_FIFO6_TX 0x10
895#define V_IRQ_FIFO6_RX 0x20
896#define V_IRQ_FIFO7_TX 0x40
897#define V_IRQ_FIFO7_RX 0x80
898/* R_IRQ_FIFO_BL2 */
899#define V_IRQ_FIFO8_TX 0x01
900#define V_IRQ_FIFO8_RX 0x02
901#define V_IRQ_FIFO9_TX 0x04
902#define V_IRQ_FIFO9_RX 0x08
903#define V_IRQ_FIFO10_TX 0x10
904#define V_IRQ_FIFO10_RX 0x20
905#define V_IRQ_FIFO11_TX 0x40
906#define V_IRQ_FIFO11_RX 0x80
907/* R_IRQ_FIFO_BL3 */
908#define V_IRQ_FIFO12_TX 0x01
909#define V_IRQ_FIFO12_RX 0x02
910#define V_IRQ_FIFO13_TX 0x04
911#define V_IRQ_FIFO13_RX 0x08
912#define V_IRQ_FIFO14_TX 0x10
913#define V_IRQ_FIFO14_RX 0x20
914#define V_IRQ_FIFO15_TX 0x40
915#define V_IRQ_FIFO15_RX 0x80
916/* R_IRQ_FIFO_BL4 */
917#define V_IRQ_FIFO16_TX 0x01
918#define V_IRQ_FIFO16_RX 0x02
919#define V_IRQ_FIFO17_TX 0x04
920#define V_IRQ_FIFO17_RX 0x08
921#define V_IRQ_FIFO18_TX 0x10
922#define V_IRQ_FIFO18_RX 0x20
923#define V_IRQ_FIFO19_TX 0x40
924#define V_IRQ_FIFO19_RX 0x80
925/* R_IRQ_FIFO_BL5 */
926#define V_IRQ_FIFO20_TX 0x01
927#define V_IRQ_FIFO20_RX 0x02
928#define V_IRQ_FIFO21_TX 0x04
929#define V_IRQ_FIFO21_RX 0x08
930#define V_IRQ_FIFO22_TX 0x10
931#define V_IRQ_FIFO22_RX 0x20
932#define V_IRQ_FIFO23_TX 0x40
933#define V_IRQ_FIFO23_RX 0x80
934/* R_IRQ_FIFO_BL6 */
935#define V_IRQ_FIFO24_TX 0x01
936#define V_IRQ_FIFO24_RX 0x02
937#define V_IRQ_FIFO25_TX 0x04
938#define V_IRQ_FIFO25_RX 0x08
939#define V_IRQ_FIFO26_TX 0x10
940#define V_IRQ_FIFO26_RX 0x20
941#define V_IRQ_FIFO27_TX 0x40
942#define V_IRQ_FIFO27_RX 0x80
943/* R_IRQ_FIFO_BL7 */
944#define V_IRQ_FIFO28_TX 0x01
945#define V_IRQ_FIFO28_RX 0x02
946#define V_IRQ_FIFO29_TX 0x04
947#define V_IRQ_FIFO29_RX 0x08
948#define V_IRQ_FIFO30_TX 0x10
949#define V_IRQ_FIFO30_RX 0x20
950#define V_IRQ_FIFO31_TX 0x40
951#define V_IRQ_FIFO31_RX 0x80
952
953/* chapter 13: general purpose I/O pins (GPIO) and input pins (GPI) */
954/* R_GPIO_OUT0 */
955#define V_GPIO_OUT0 0x01
956#define V_GPIO_OUT1 0x02
957#define V_GPIO_OUT2 0x04
958#define V_GPIO_OUT3 0x08
959#define V_GPIO_OUT4 0x10
960#define V_GPIO_OUT5 0x20
961#define V_GPIO_OUT6 0x40
962#define V_GPIO_OUT7 0x80
963/* R_GPIO_OUT1 */
964#define V_GPIO_OUT8 0x01
965#define V_GPIO_OUT9 0x02
966#define V_GPIO_OUT10 0x04
967#define V_GPIO_OUT11 0x08
968#define V_GPIO_OUT12 0x10
969#define V_GPIO_OUT13 0x20
970#define V_GPIO_OUT14 0x40
971#define V_GPIO_OUT15 0x80
972/* R_GPIO_EN0 */
973#define V_GPIO_EN0 0x01
974#define V_GPIO_EN1 0x02
975#define V_GPIO_EN2 0x04
976#define V_GPIO_EN3 0x08
977#define V_GPIO_EN4 0x10
978#define V_GPIO_EN5 0x20
979#define V_GPIO_EN6 0x40
980#define V_GPIO_EN7 0x80
981/* R_GPIO_EN1 */
982#define V_GPIO_EN8 0x01
983#define V_GPIO_EN9 0x02
984#define V_GPIO_EN10 0x04
985#define V_GPIO_EN11 0x08
986#define V_GPIO_EN12 0x10
987#define V_GPIO_EN13 0x20
988#define V_GPIO_EN14 0x40
989#define V_GPIO_EN15 0x80
990/* R_GPIO_SEL */
991#define V_GPIO_SEL0 0x01
992#define V_GPIO_SEL1 0x02
993#define V_GPIO_SEL2 0x04
994#define V_GPIO_SEL3 0x08
995#define V_GPIO_SEL4 0x10
996#define V_GPIO_SEL5 0x20
997#define V_GPIO_SEL6 0x40
998#define V_GPIO_SEL7 0x80
999/* R_GPIO_IN0 */
1000#define V_GPIO_IN0 0x01
1001#define V_GPIO_IN1 0x02
1002#define V_GPIO_IN2 0x04
1003#define V_GPIO_IN3 0x08
1004#define V_GPIO_IN4 0x10
1005#define V_GPIO_IN5 0x20
1006#define V_GPIO_IN6 0x40
1007#define V_GPIO_IN7 0x80
1008/* R_GPIO_IN1 */
1009#define V_GPIO_IN8 0x01
1010#define V_GPIO_IN9 0x02
1011#define V_GPIO_IN10 0x04
1012#define V_GPIO_IN11 0x08
1013#define V_GPIO_IN12 0x10
1014#define V_GPIO_IN13 0x20
1015#define V_GPIO_IN14 0x40
1016#define V_GPIO_IN15 0x80
1017/* R_GPI_IN0 */
1018#define V_GPI_IN0 0x01
1019#define V_GPI_IN1 0x02
1020#define V_GPI_IN2 0x04
1021#define V_GPI_IN3 0x08
1022#define V_GPI_IN4 0x10
1023#define V_GPI_IN5 0x20
1024#define V_GPI_IN6 0x40
1025#define V_GPI_IN7 0x80
1026/* R_GPI_IN1 */
1027#define V_GPI_IN8 0x01
1028#define V_GPI_IN9 0x02
1029#define V_GPI_IN10 0x04
1030#define V_GPI_IN11 0x08
1031#define V_GPI_IN12 0x10
1032#define V_GPI_IN13 0x20
1033#define V_GPI_IN14 0x40
1034#define V_GPI_IN15 0x80
1035/* R_GPI_IN2 */
1036#define V_GPI_IN16 0x01
1037#define V_GPI_IN17 0x02
1038#define V_GPI_IN18 0x04
1039#define V_GPI_IN19 0x08
1040#define V_GPI_IN20 0x10
1041#define V_GPI_IN21 0x20
1042#define V_GPI_IN22 0x40
1043#define V_GPI_IN23 0x80
1044/* R_GPI_IN3 */
1045#define V_GPI_IN24 0x01
1046#define V_GPI_IN25 0x02
1047#define V_GPI_IN26 0x04
1048#define V_GPI_IN27 0x08
1049#define V_GPI_IN28 0x10
1050#define V_GPI_IN29 0x20
1051#define V_GPI_IN30 0x40
1052#define V_GPI_IN31 0x80
1053
1054/* map of all registers, used for debugging */
1055
1056#ifdef HFC_REGISTER_DEBUG
1057struct hfc_register_names {
1058 char *name;
1059 u_char reg;
1060} hfc_register_names[] = {
1061 /* write registers */
1062 {"R_CIRM", 0x00},
1063 {"R_CTRL", 0x01},
1064 {"R_BRG_PCM_CFG ", 0x02},
1065 {"R_RAM_ADDR0", 0x08},
1066 {"R_RAM_ADDR1", 0x09},
1067 {"R_RAM_ADDR2", 0x0A},
1068 {"R_FIRST_FIFO", 0x0B},
1069 {"R_RAM_SZ", 0x0C},
1070 {"R_FIFO_MD", 0x0D},
1071 {"R_INC_RES_FIFO", 0x0E},
1072 {"R_FIFO / R_FSM_IDX", 0x0F},
1073 {"R_SLOT", 0x10},
1074 {"R_IRQMSK_MISC", 0x11},
1075 {"R_SCI_MSK", 0x12},
1076 {"R_IRQ_CTRL", 0x13},
1077 {"R_PCM_MD0", 0x14},
1078 {"R_0x15", 0x15},
1079 {"R_ST_SEL", 0x16},
1080 {"R_ST_SYNC", 0x17},
1081 {"R_CONF_EN", 0x18},
1082 {"R_TI_WD", 0x1A},
1083 {"R_BERT_WD_MD", 0x1B},
1084 {"R_DTMF", 0x1C},
1085 {"R_DTMF_N", 0x1D},
1086 {"R_E1_XX_STA", 0x20},
1087 {"R_LOS0", 0x22},
1088 {"R_LOS1", 0x23},
1089 {"R_RX0", 0x24},
1090 {"R_RX_FR0", 0x25},
1091 {"R_RX_FR1", 0x26},
1092 {"R_TX0", 0x28},
1093 {"R_TX1", 0x29},
1094 {"R_TX_FR0", 0x2C},
1095 {"R_TX_FR1", 0x2D},
1096 {"R_TX_FR2", 0x2E},
1097 {"R_JATT_ATT", 0x2F},
1098 {"A_ST_xx_STA/R_RX_OFF", 0x30},
1099 {"A_ST_CTRL0/R_SYNC_OUT", 0x31},
1100 {"A_ST_CTRL1", 0x32},
1101 {"A_ST_CTRL2", 0x33},
1102 {"A_ST_SQ_WR", 0x34},
1103 {"R_TX_OFF", 0x34},
1104 {"R_SYNC_CTRL", 0x35},
1105 {"A_ST_CLK_DLY", 0x37},
1106 {"R_PWM0", 0x38},
1107 {"R_PWM1", 0x39},
1108 {"A_ST_B1_TX", 0x3C},
1109 {"A_ST_B2_TX", 0x3D},
1110 {"A_ST_D_TX", 0x3E},
1111 {"R_GPIO_OUT0", 0x40},
1112 {"R_GPIO_OUT1", 0x41},
1113 {"R_GPIO_EN0", 0x42},
1114 {"R_GPIO_EN1", 0x43},
1115 {"R_GPIO_SEL", 0x44},
1116 {"R_BRG_CTRL", 0x45},
1117 {"R_PWM_MD", 0x46},
1118 {"R_BRG_MD", 0x47},
1119 {"R_BRG_TIM0", 0x48},
1120 {"R_BRG_TIM1", 0x49},
1121 {"R_BRG_TIM2", 0x4A},
1122 {"R_BRG_TIM3", 0x4B},
1123 {"R_BRG_TIM_SEL01", 0x4C},
1124 {"R_BRG_TIM_SEL23", 0x4D},
1125 {"R_BRG_TIM_SEL45", 0x4E},
1126 {"R_BRG_TIM_SEL67", 0x4F},
1127 {"A_FIFO_DATA0-2", 0x80},
1128 {"A_FIFO_DATA0-2_NOINC", 0x84},
1129 {"R_RAM_DATA", 0xC0},
1130 {"A_SL_CFG", 0xD0},
1131 {"A_CONF", 0xD1},
1132 {"A_CH_MSK", 0xF4},
1133 {"A_CON_HDLC", 0xFA},
1134 {"A_SUBCH_CFG", 0xFB},
1135 {"A_CHANNEL", 0xFC},
1136 {"A_FIFO_SEQ", 0xFD},
1137 {"A_IRQ_MSK", 0xFF},
1138 {NULL, 0},
1139
1140 /* read registers */
1141 {"A_Z1", 0x04},
1142 {"A_Z1H", 0x05},
1143 {"A_Z2", 0x06},
1144 {"A_Z2H", 0x07},
1145 {"A_F1", 0x0C},
1146 {"A_F2", 0x0D},
1147 {"R_IRQ_OVIEW", 0x10},
1148 {"R_IRQ_MISC", 0x11},
1149 {"R_IRQ_STATECH", 0x12},
1150 {"R_CONF_OFLOW", 0x14},
1151 {"R_RAM_USE", 0x15},
1152 {"R_CHIP_ID", 0x16},
1153 {"R_BERT_STA", 0x17},
1154 {"R_F0_CNTL", 0x18},
1155 {"R_F0_CNTH", 0x19},
1156 {"R_BERT_ECL", 0x1A},
1157 {"R_BERT_ECH", 0x1B},
1158 {"R_STATUS", 0x1C},
1159 {"R_CHIP_RV", 0x1F},
1160 {"R_STATE", 0x20},
1161 {"R_SYNC_STA", 0x24},
1162 {"R_RX_SL0_0", 0x25},
1163 {"R_RX_SL0_1", 0x26},
1164 {"R_RX_SL0_2", 0x27},
1165 {"R_JATT_DIR", 0x2b},
1166 {"R_SLIP", 0x2c},
1167 {"A_ST_RD_STA", 0x30},
1168 {"R_FAS_ECL", 0x30},
1169 {"R_FAS_ECH", 0x31},
1170 {"R_VIO_ECL", 0x32},
1171 {"R_VIO_ECH", 0x33},
1172 {"R_CRC_ECL / A_ST_SQ_RD", 0x34},
1173 {"R_CRC_ECH", 0x35},
1174 {"R_E_ECL", 0x36},
1175 {"R_E_ECH", 0x37},
1176 {"R_SA6_SA13_ECL", 0x38},
1177 {"R_SA6_SA13_ECH", 0x39},
1178 {"R_SA6_SA23_ECL", 0x3A},
1179 {"R_SA6_SA23_ECH", 0x3B},
1180 {"A_ST_B1_RX", 0x3C},
1181 {"A_ST_B2_RX", 0x3D},
1182 {"A_ST_D_RX", 0x3E},
1183 {"A_ST_E_RX", 0x3F},
1184 {"R_GPIO_IN0", 0x40},
1185 {"R_GPIO_IN1", 0x41},
1186 {"R_GPI_IN0", 0x44},
1187 {"R_GPI_IN1", 0x45},
1188 {"R_GPI_IN2", 0x46},
1189 {"R_GPI_IN3", 0x47},
1190 {"A_FIFO_DATA0-2", 0x80},
1191 {"A_FIFO_DATA0-2_NOINC", 0x84},
1192 {"R_INT_DATA", 0x88},
1193 {"R_RAM_DATA", 0xC0},
1194 {"R_IRQ_FIFO_BL0", 0xC8},
1195 {"R_IRQ_FIFO_BL1", 0xC9},
1196 {"R_IRQ_FIFO_BL2", 0xCA},
1197 {"R_IRQ_FIFO_BL3", 0xCB},
1198 {"R_IRQ_FIFO_BL4", 0xCC},
1199 {"R_IRQ_FIFO_BL5", 0xCD},
1200 {"R_IRQ_FIFO_BL6", 0xCE},
1201 {"R_IRQ_FIFO_BL7", 0xCF},
1202};
1203#endif /* HFC_REGISTER_DEBUG */
1204
diff --git a/drivers/isdn/hardware/mISDN/hfc_pci.h b/drivers/isdn/hardware/mISDN/hfc_pci.h
new file mode 100644
index 000000000000..fd2c9be6d849
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfc_pci.h
@@ -0,0 +1,228 @@
1/*
2 * specific defines for CCD's HFC 2BDS0 PCI chips
3 *
4 * Author Werner Cornelius (werner@isdn4linux.de)
5 *
6 * Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24/*
25 * thresholds for transparent B-channel mode
26 * change mask and threshold simultaneously
27 */
28#define HFCPCI_BTRANS_THRESHOLD 128
29#define HFCPCI_BTRANS_MAX 256
30#define HFCPCI_BTRANS_THRESMASK 0x00
31
32/* defines for PCI config */
33#define PCI_ENA_MEMIO 0x02
34#define PCI_ENA_MASTER 0x04
35
36/* GCI/IOM bus monitor registers */
37#define HCFPCI_C_I 0x08
38#define HFCPCI_TRxR 0x0C
39#define HFCPCI_MON1_D 0x28
40#define HFCPCI_MON2_D 0x2C
41
42/* GCI/IOM bus timeslot registers */
43#define HFCPCI_B1_SSL 0x80
44#define HFCPCI_B2_SSL 0x84
45#define HFCPCI_AUX1_SSL 0x88
46#define HFCPCI_AUX2_SSL 0x8C
47#define HFCPCI_B1_RSL 0x90
48#define HFCPCI_B2_RSL 0x94
49#define HFCPCI_AUX1_RSL 0x98
50#define HFCPCI_AUX2_RSL 0x9C
51
52/* GCI/IOM bus data registers */
53#define HFCPCI_B1_D 0xA0
54#define HFCPCI_B2_D 0xA4
55#define HFCPCI_AUX1_D 0xA8
56#define HFCPCI_AUX2_D 0xAC
57
58/* GCI/IOM bus configuration registers */
59#define HFCPCI_MST_EMOD 0xB4
60#define HFCPCI_MST_MODE 0xB8
61#define HFCPCI_CONNECT 0xBC
62
63
64/* Interrupt and status registers */
65#define HFCPCI_FIFO_EN 0x44
66#define HFCPCI_TRM 0x48
67#define HFCPCI_B_MODE 0x4C
68#define HFCPCI_CHIP_ID 0x58
69#define HFCPCI_CIRM 0x60
70#define HFCPCI_CTMT 0x64
71#define HFCPCI_INT_M1 0x68
72#define HFCPCI_INT_M2 0x6C
73#define HFCPCI_INT_S1 0x78
74#define HFCPCI_INT_S2 0x7C
75#define HFCPCI_STATUS 0x70
76
77/* S/T section registers */
78#define HFCPCI_STATES 0xC0
79#define HFCPCI_SCTRL 0xC4
80#define HFCPCI_SCTRL_E 0xC8
81#define HFCPCI_SCTRL_R 0xCC
82#define HFCPCI_SQ 0xD0
83#define HFCPCI_CLKDEL 0xDC
84#define HFCPCI_B1_REC 0xF0
85#define HFCPCI_B1_SEND 0xF0
86#define HFCPCI_B2_REC 0xF4
87#define HFCPCI_B2_SEND 0xF4
88#define HFCPCI_D_REC 0xF8
89#define HFCPCI_D_SEND 0xF8
90#define HFCPCI_E_REC 0xFC
91
92
93/* bits in status register (READ) */
94#define HFCPCI_PCI_PROC 0x02
95#define HFCPCI_NBUSY 0x04
96#define HFCPCI_TIMER_ELAP 0x10
97#define HFCPCI_STATINT 0x20
98#define HFCPCI_FRAMEINT 0x40
99#define HFCPCI_ANYINT 0x80
100
101/* bits in CTMT (Write) */
102#define HFCPCI_CLTIMER 0x80
103#define HFCPCI_TIM3_125 0x04
104#define HFCPCI_TIM25 0x10
105#define HFCPCI_TIM50 0x14
106#define HFCPCI_TIM400 0x18
107#define HFCPCI_TIM800 0x1C
108#define HFCPCI_AUTO_TIMER 0x20
109#define HFCPCI_TRANSB2 0x02
110#define HFCPCI_TRANSB1 0x01
111
112/* bits in CIRM (Write) */
113#define HFCPCI_AUX_MSK 0x07
114#define HFCPCI_RESET 0x08
115#define HFCPCI_B1_REV 0x40
116#define HFCPCI_B2_REV 0x80
117
118/* bits in INT_M1 and INT_S1 */
119#define HFCPCI_INTS_B1TRANS 0x01
120#define HFCPCI_INTS_B2TRANS 0x02
121#define HFCPCI_INTS_DTRANS 0x04
122#define HFCPCI_INTS_B1REC 0x08
123#define HFCPCI_INTS_B2REC 0x10
124#define HFCPCI_INTS_DREC 0x20
125#define HFCPCI_INTS_L1STATE 0x40
126#define HFCPCI_INTS_TIMER 0x80
127
128/* bits in INT_M2 */
129#define HFCPCI_PROC_TRANS 0x01
130#define HFCPCI_GCI_I_CHG 0x02
131#define HFCPCI_GCI_MON_REC 0x04
132#define HFCPCI_IRQ_ENABLE 0x08
133#define HFCPCI_PMESEL 0x80
134
135/* bits in STATES */
136#define HFCPCI_STATE_MSK 0x0F
137#define HFCPCI_LOAD_STATE 0x10
138#define HFCPCI_ACTIVATE 0x20
139#define HFCPCI_DO_ACTION 0x40
140#define HFCPCI_NT_G2_G3 0x80
141
142/* bits in HFCD_MST_MODE */
143#define HFCPCI_MASTER 0x01
144#define HFCPCI_SLAVE 0x00
145#define HFCPCI_F0IO_POSITIV 0x02
146#define HFCPCI_F0_NEGATIV 0x04
147#define HFCPCI_F0_2C4 0x08
148/* remaining bits are for codecs control */
149
150/* bits in HFCD_SCTRL */
151#define SCTRL_B1_ENA 0x01
152#define SCTRL_B2_ENA 0x02
153#define SCTRL_MODE_TE 0x00
154#define SCTRL_MODE_NT 0x04
155#define SCTRL_LOW_PRIO 0x08
156#define SCTRL_SQ_ENA 0x10
157#define SCTRL_TEST 0x20
158#define SCTRL_NONE_CAP 0x40
159#define SCTRL_PWR_DOWN 0x80
160
161/* bits in SCTRL_E */
162#define HFCPCI_AUTO_AWAKE 0x01
163#define HFCPCI_DBIT_1 0x04
164#define HFCPCI_IGNORE_COL 0x08
165#define HFCPCI_CHG_B1_B2 0x80
166
167/* bits in FIFO_EN register */
168#define HFCPCI_FIFOEN_B1 0x03
169#define HFCPCI_FIFOEN_B2 0x0C
170#define HFCPCI_FIFOEN_DTX 0x10
171#define HFCPCI_FIFOEN_B1TX 0x01
172#define HFCPCI_FIFOEN_B1RX 0x02
173#define HFCPCI_FIFOEN_B2TX 0x04
174#define HFCPCI_FIFOEN_B2RX 0x08
175
176
177/* definitions of fifo memory area */
178#define MAX_D_FRAMES 15
179#define MAX_B_FRAMES 31
180#define B_SUB_VAL 0x200
181#define B_FIFO_SIZE (0x2000 - B_SUB_VAL)
182#define D_FIFO_SIZE 512
183#define D_FREG_MASK 0xF
184
185struct zt {
186 unsigned short z1; /* Z1 pointer 16 Bit */
187 unsigned short z2; /* Z2 pointer 16 Bit */
188};
189
190struct dfifo {
191 u_char data[D_FIFO_SIZE]; /* FIFO data space */
192 u_char fill1[0x20A0-D_FIFO_SIZE]; /* reserved, do not use */
193 u_char f1, f2; /* f pointers */
194 u_char fill2[0x20C0-0x20A2]; /* reserved, do not use */
195 /* mask index with D_FREG_MASK for access */
196 struct zt za[MAX_D_FRAMES+1];
197 u_char fill3[0x4000-0x2100]; /* align 16K */
198};
199
200struct bzfifo {
201 struct zt za[MAX_B_FRAMES+1]; /* only range 0x0..0x1F allowed */
202 u_char f1, f2; /* f pointers */
203 u_char fill[0x2100-0x2082]; /* alignment */
204};
205
206
207union fifo_area {
208 struct {
209 struct dfifo d_tx; /* D-send channel */
210 struct dfifo d_rx; /* D-receive channel */
211 } d_chan;
212 struct {
213 u_char fill1[0x200];
214 u_char txdat_b1[B_FIFO_SIZE];
215 struct bzfifo txbz_b1;
216 struct bzfifo txbz_b2;
217 u_char txdat_b2[B_FIFO_SIZE];
218 u_char fill2[D_FIFO_SIZE];
219 u_char rxdat_b1[B_FIFO_SIZE];
220 struct bzfifo rxbz_b1;
221 struct bzfifo rxbz_b2;
222 u_char rxdat_b2[B_FIFO_SIZE];
223 } b_chans;
224 u_char fill[32768];
225};
226
227#define Write_hfc(a, b, c) (writeb(c, (a->hw.pci_io)+b))
228#define Read_hfc(a, b) (readb((a->hw.pci_io)+b))
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
new file mode 100644
index 000000000000..2649ea55a9e8
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -0,0 +1,5320 @@
1/*
2 * hfcmulti.c low level driver for hfc-4s/hfc-8s/hfc-e1 based cards
3 *
4 * Author Andreas Eversberg (jolly@eversberg.eu)
5 * ported to mqueue mechanism:
6 * Peter Sprenger (sprengermoving-bytes.de)
7 *
8 * inspired by existing hfc-pci driver:
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil (kkeil@suse.de)
11 * Copyright 2008 by Andreas Eversberg (jolly@eversberg.eu)
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 *
28 * Thanks to Cologne Chip AG for this great controller!
29 */
30
31/*
32 * module parameters:
33 * type:
34 * By default (0), the card is automatically detected.
35 * Or use the following combinations:
36 * Bit 0-7 = 0x00001 = HFC-E1 (1 port)
37 * or Bit 0-7 = 0x00004 = HFC-4S (4 ports)
38 * or Bit 0-7 = 0x00008 = HFC-8S (8 ports)
39 * Bit 8 = 0x00100 = uLaw (instead of aLaw)
40 * Bit 9 = 0x00200 = Disable DTMF detect on all B-channels via hardware
41 * Bit 10 = spare
42 * Bit 11 = 0x00800 = Force PCM bus into slave mode. (otherwhise auto)
43 * or Bit 12 = 0x01000 = Force PCM bus into master mode. (otherwhise auto)
44 * Bit 13 = spare
45 * Bit 14 = 0x04000 = Use external ram (128K)
46 * Bit 15 = 0x08000 = Use external ram (512K)
47 * Bit 16 = 0x10000 = Use 64 timeslots instead of 32
48 * or Bit 17 = 0x20000 = Use 128 timeslots instead of anything else
49 * Bit 18 = spare
50 * Bit 19 = 0x80000 = Send the Watchdog a Signal (Dual E1 with Watchdog)
51 * (all other bits are reserved and shall be 0)
52 * example: 0x20204 one HFC-4S with dtmf detection and 128 timeslots on PCM
53 * bus (PCM master)
54 *
55 * port: (optional or required for all ports on all installed cards)
56 * HFC-4S/HFC-8S only bits:
57 * Bit 0 = 0x001 = Use master clock for this S/T interface
58 * (ony once per chip).
59 * Bit 1 = 0x002 = transmitter line setup (non capacitive mode)
60 * Don't use this unless you know what you are doing!
61 * Bit 2 = 0x004 = Disable E-channel. (No E-channel processing)
62 * example: 0x0001,0x0000,0x0000,0x0000 one HFC-4S with master clock
63 * received from port 1
64 *
65 * HFC-E1 only bits:
66 * Bit 0 = 0x0001 = interface: 0=copper, 1=optical
67 * Bit 1 = 0x0002 = reserved (later for 32 B-channels transparent mode)
68 * Bit 2 = 0x0004 = Report LOS
69 * Bit 3 = 0x0008 = Report AIS
70 * Bit 4 = 0x0010 = Report SLIP
71 * Bit 5 = 0x0020 = Report RDI
72 * Bit 8 = 0x0100 = Turn off CRC-4 Multiframe Mode, use double frame
73 * mode instead.
74 * Bit 9 = 0x0200 = Force get clock from interface, even in NT mode.
75 * or Bit 10 = 0x0400 = Force put clock to interface, even in TE mode.
76 * Bit 11 = 0x0800 = Use direct RX clock for PCM sync rather than PLL.
77 * (E1 only)
78 * Bit 12-13 = 0xX000 = elastic jitter buffer (1-3), Set both bits to 0
79 * for default.
80 * (all other bits are reserved and shall be 0)
81 *
82 * debug:
83 * NOTE: only one debug value must be given for all cards
84 * enable debugging (see hfc_multi.h for debug options)
85 *
86 * poll:
87 * NOTE: only one poll value must be given for all cards
88 * Give the number of samples for each fifo process.
89 * By default 128 is used. Decrease to reduce delay, increase to
90 * reduce cpu load. If unsure, don't mess with it!
91 * Valid is 8, 16, 32, 64, 128, 256.
92 *
93 * pcm:
94 * NOTE: only one pcm value must be given for every card.
95 * The PCM bus id tells the mISDNdsp module about the connected PCM bus.
96 * By default (0), the PCM bus id is 100 for the card that is PCM master.
97 * If multiple cards are PCM master (because they are not interconnected),
98 * each card with PCM master will have increasing PCM id.
99 * All PCM busses with the same ID are expected to be connected and have
100 * common time slots slots.
101 * Only one chip of the PCM bus must be master, the others slave.
102 * -1 means no support of PCM bus not even.
103 * Omit this value, if all cards are interconnected or none is connected.
104 * If unsure, don't give this parameter.
105 *
106 * dslot:
107 * NOTE: only one poll value must be given for every card.
108 * Also this value must be given for non-E1 cards. If omitted, the E1
109 * card has D-channel on time slot 16, which is default.
110 * If 1..15 or 17..31, an alternate time slot is used for D-channel.
111 * In this case, the application must be able to handle this.
112 * If -1 is given, the D-channel is disabled and all 31 slots can be used
113 * for B-channel. (only for specific applications)
114 * If you don't know how to use it, you don't need it!
115 *
116 * iomode:
117 * NOTE: only one mode value must be given for every card.
118 * -> See hfc_multi.h for HFC_IO_MODE_* values
119 * By default, the IO mode is pci memory IO (MEMIO).
120 * Some cards requre specific IO mode, so it cannot be changed.
121 * It may be usefull to set IO mode to register io (REGIO) to solve
122 * PCI bridge problems.
123 * If unsure, don't give this parameter.
124 *
125 * clockdelay_nt:
126 * NOTE: only one clockdelay_nt value must be given once for all cards.
127 * Give the value of the clock control register (A_ST_CLK_DLY)
128 * of the S/T interfaces in NT mode.
129 * This register is needed for the TBR3 certification, so don't change it.
130 *
131 * clockdelay_te:
132 * NOTE: only one clockdelay_te value must be given once
133 * Give the value of the clock control register (A_ST_CLK_DLY)
134 * of the S/T interfaces in TE mode.
135 * This register is needed for the TBR3 certification, so don't change it.
136 */
137
138/*
139 * debug register access (never use this, it will flood your system log)
140 * #define HFC_REGISTER_DEBUG
141 */
142
143static const char *hfcmulti_revision = "2.00";
144
145#include <linux/module.h>
146#include <linux/pci.h>
147#include <linux/delay.h>
148#include <linux/mISDNhw.h>
149#include <linux/mISDNdsp.h>
150
151/*
152#define IRQCOUNT_DEBUG
153#define IRQ_DEBUG
154*/
155
156#include "hfc_multi.h"
157#ifdef ECHOPREP
158#include "gaintab.h"
159#endif
160
161#define MAX_CARDS 8
162#define MAX_PORTS (8 * MAX_CARDS)
163
164static LIST_HEAD(HFClist);
165static spinlock_t HFClock; /* global hfc list lock */
166
167static void ph_state_change(struct dchannel *);
168static void (*hfc_interrupt)(void);
169static void (*register_interrupt)(void);
170static int (*unregister_interrupt)(void);
171static int interrupt_registered;
172
173static struct hfc_multi *syncmaster;
174int plxsd_master; /* if we have a master card (yet) */
175static spinlock_t plx_lock; /* may not acquire other lock inside */
176EXPORT_SYMBOL(plx_lock);
177
178#define TYP_E1 1
179#define TYP_4S 4
180#define TYP_8S 8
181
182static int poll_timer = 6; /* default = 128 samples = 16ms */
183/* number of POLL_TIMER interrupts for G2 timeout (ca 1s) */
184static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 };
185#define CLKDEL_TE 0x0f /* CLKDEL in TE mode */
186#define CLKDEL_NT 0x6c /* CLKDEL in NT mode
187 (0x60 MUST be included!) */
188static u_char silence = 0xff; /* silence by LAW */
189
190#define DIP_4S 0x1 /* DIP Switches for Beronet 1S/2S/4S cards */
191#define DIP_8S 0x2 /* DIP Switches for Beronet 8S+ cards */
192#define DIP_E1 0x3 /* DIP Switches for Beronet E1 cards */
193
194/*
195 * module stuff
196 */
197
198static uint type[MAX_CARDS];
199static uint pcm[MAX_CARDS];
200static uint dslot[MAX_CARDS];
201static uint iomode[MAX_CARDS];
202static uint port[MAX_PORTS];
203static uint debug;
204static uint poll;
205static uint timer;
206static uint clockdelay_te = CLKDEL_TE;
207static uint clockdelay_nt = CLKDEL_NT;
208
209static int HFC_cnt, Port_cnt, PCM_cnt = 99;
210
211MODULE_AUTHOR("Andreas Eversberg");
212MODULE_LICENSE("GPL");
213module_param(debug, uint, S_IRUGO | S_IWUSR);
214module_param(poll, uint, S_IRUGO | S_IWUSR);
215module_param(timer, uint, S_IRUGO | S_IWUSR);
216module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
217module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
218module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
219module_param_array(pcm, uint, NULL, S_IRUGO | S_IWUSR);
220module_param_array(dslot, uint, NULL, S_IRUGO | S_IWUSR);
221module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
222module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
223
224#ifdef HFC_REGISTER_DEBUG
225#define HFC_outb(hc, reg, val) \
226 (hc->HFC_outb(hc, reg, val, __func__, __LINE__))
227#define HFC_outb_nodebug(hc, reg, val) \
228 (hc->HFC_outb_nodebug(hc, reg, val, __func__, __LINE__))
229#define HFC_inb(hc, reg) \
230 (hc->HFC_inb(hc, reg, __func__, __LINE__))
231#define HFC_inb_nodebug(hc, reg) \
232 (hc->HFC_inb_nodebug(hc, reg, __func__, __LINE__))
233#define HFC_inw(hc, reg) \
234 (hc->HFC_inw(hc, reg, __func__, __LINE__))
235#define HFC_inw_nodebug(hc, reg) \
236 (hc->HFC_inw_nodebug(hc, reg, __func__, __LINE__))
237#define HFC_wait(hc) \
238 (hc->HFC_wait(hc, __func__, __LINE__))
239#define HFC_wait_nodebug(hc) \
240 (hc->HFC_wait_nodebug(hc, __func__, __LINE__))
241#else
242#define HFC_outb(hc, reg, val) (hc->HFC_outb(hc, reg, val))
243#define HFC_outb_nodebug(hc, reg, val) (hc->HFC_outb_nodebug(hc, reg, val))
244#define HFC_inb(hc, reg) (hc->HFC_inb(hc, reg))
245#define HFC_inb_nodebug(hc, reg) (hc->HFC_inb_nodebug(hc, reg))
246#define HFC_inw(hc, reg) (hc->HFC_inw(hc, reg))
247#define HFC_inw_nodebug(hc, reg) (hc->HFC_inw_nodebug(hc, reg))
248#define HFC_wait(hc) (hc->HFC_wait(hc))
249#define HFC_wait_nodebug(hc) (hc->HFC_wait_nodebug(hc))
250#endif
251
252/* HFC_IO_MODE_PCIMEM */
253static void
254#ifdef HFC_REGISTER_DEBUG
255HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val,
256 const char *function, int line)
257#else
258HFC_outb_pcimem(struct hfc_multi *hc, u_char reg, u_char val)
259#endif
260{
261 writeb(val, (hc->pci_membase)+reg);
262}
263static u_char
264#ifdef HFC_REGISTER_DEBUG
265HFC_inb_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
266#else
267HFC_inb_pcimem(struct hfc_multi *hc, u_char reg)
268#endif
269{
270 return readb((hc->pci_membase)+reg);
271}
272static u_short
273#ifdef HFC_REGISTER_DEBUG
274HFC_inw_pcimem(struct hfc_multi *hc, u_char reg, const char *function, int line)
275#else
276HFC_inw_pcimem(struct hfc_multi *hc, u_char reg)
277#endif
278{
279 return readw((hc->pci_membase)+reg);
280}
281static void
282#ifdef HFC_REGISTER_DEBUG
283HFC_wait_pcimem(struct hfc_multi *hc, const char *function, int line)
284#else
285HFC_wait_pcimem(struct hfc_multi *hc)
286#endif
287{
288 while (readb((hc->pci_membase)+R_STATUS) & V_BUSY);
289}
290
291/* HFC_IO_MODE_REGIO */
292static void
293#ifdef HFC_REGISTER_DEBUG
294HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val,
295 const char *function, int line)
296#else
297HFC_outb_regio(struct hfc_multi *hc, u_char reg, u_char val)
298#endif
299{
300 outb(reg, (hc->pci_iobase)+4);
301 outb(val, hc->pci_iobase);
302}
303static u_char
304#ifdef HFC_REGISTER_DEBUG
305HFC_inb_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
306#else
307HFC_inb_regio(struct hfc_multi *hc, u_char reg)
308#endif
309{
310 outb(reg, (hc->pci_iobase)+4);
311 return inb(hc->pci_iobase);
312}
313static u_short
314#ifdef HFC_REGISTER_DEBUG
315HFC_inw_regio(struct hfc_multi *hc, u_char reg, const char *function, int line)
316#else
317HFC_inw_regio(struct hfc_multi *hc, u_char reg)
318#endif
319{
320 outb(reg, (hc->pci_iobase)+4);
321 return inw(hc->pci_iobase);
322}
323static void
324#ifdef HFC_REGISTER_DEBUG
325HFC_wait_regio(struct hfc_multi *hc, const char *function, int line)
326#else
327HFC_wait_regio(struct hfc_multi *hc)
328#endif
329{
330 outb(R_STATUS, (hc->pci_iobase)+4);
331 while (inb(hc->pci_iobase) & V_BUSY);
332}
333
334#ifdef HFC_REGISTER_DEBUG
335static void
336HFC_outb_debug(struct hfc_multi *hc, u_char reg, u_char val,
337 const char *function, int line)
338{
339 char regname[256] = "", bits[9] = "xxxxxxxx";
340 int i;
341
342 i = -1;
343 while (hfc_register_names[++i].name) {
344 if (hfc_register_names[i].reg == reg)
345 strcat(regname, hfc_register_names[i].name);
346 }
347 if (regname[0] == '\0')
348 strcpy(regname, "register");
349
350 bits[7] = '0'+(!!(val&1));
351 bits[6] = '0'+(!!(val&2));
352 bits[5] = '0'+(!!(val&4));
353 bits[4] = '0'+(!!(val&8));
354 bits[3] = '0'+(!!(val&16));
355 bits[2] = '0'+(!!(val&32));
356 bits[1] = '0'+(!!(val&64));
357 bits[0] = '0'+(!!(val&128));
358 printk(KERN_DEBUG
359 "HFC_outb(chip %d, %02x=%s, 0x%02x=%s); in %s() line %d\n",
360 hc->id, reg, regname, val, bits, function, line);
361 HFC_outb_nodebug(hc, reg, val);
362}
363static u_char
364HFC_inb_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
365{
366 char regname[256] = "", bits[9] = "xxxxxxxx";
367 u_char val = HFC_inb_nodebug(hc, reg);
368 int i;
369
370 i = 0;
371 while (hfc_register_names[i++].name)
372 ;
373 while (hfc_register_names[++i].name) {
374 if (hfc_register_names[i].reg == reg)
375 strcat(regname, hfc_register_names[i].name);
376 }
377 if (regname[0] == '\0')
378 strcpy(regname, "register");
379
380 bits[7] = '0'+(!!(val&1));
381 bits[6] = '0'+(!!(val&2));
382 bits[5] = '0'+(!!(val&4));
383 bits[4] = '0'+(!!(val&8));
384 bits[3] = '0'+(!!(val&16));
385 bits[2] = '0'+(!!(val&32));
386 bits[1] = '0'+(!!(val&64));
387 bits[0] = '0'+(!!(val&128));
388 printk(KERN_DEBUG
389 "HFC_inb(chip %d, %02x=%s) = 0x%02x=%s; in %s() line %d\n",
390 hc->id, reg, regname, val, bits, function, line);
391 return val;
392}
393static u_short
394HFC_inw_debug(struct hfc_multi *hc, u_char reg, const char *function, int line)
395{
396 char regname[256] = "";
397 u_short val = HFC_inw_nodebug(hc, reg);
398 int i;
399
400 i = 0;
401 while (hfc_register_names[i++].name)
402 ;
403 while (hfc_register_names[++i].name) {
404 if (hfc_register_names[i].reg == reg)
405 strcat(regname, hfc_register_names[i].name);
406 }
407 if (regname[0] == '\0')
408 strcpy(regname, "register");
409
410 printk(KERN_DEBUG
411 "HFC_inw(chip %d, %02x=%s) = 0x%04x; in %s() line %d\n",
412 hc->id, reg, regname, val, function, line);
413 return val;
414}
415static void
416HFC_wait_debug(struct hfc_multi *hc, const char *function, int line)
417{
418 printk(KERN_DEBUG "HFC_wait(chip %d); in %s() line %d\n",
419 hc->id, function, line);
420 HFC_wait_nodebug(hc);
421}
422#endif
423
424/* write fifo data (REGIO) */
425void
426write_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
427{
428 outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
429 while (len>>2) {
430 outl(*(u32 *)data, hc->pci_iobase);
431 data += 4;
432 len -= 4;
433 }
434 while (len>>1) {
435 outw(*(u16 *)data, hc->pci_iobase);
436 data += 2;
437 len -= 2;
438 }
439 while (len) {
440 outb(*data, hc->pci_iobase);
441 data++;
442 len--;
443 }
444}
445/* write fifo data (PCIMEM) */
446void
447write_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
448{
449 while (len>>2) {
450 writel(*(u32 *)data, (hc->pci_membase)+A_FIFO_DATA0);
451 data += 4;
452 len -= 4;
453 }
454 while (len>>1) {
455 writew(*(u16 *)data, (hc->pci_membase)+A_FIFO_DATA0);
456 data += 2;
457 len -= 2;
458 }
459 while (len) {
460 writeb(*data, (hc->pci_membase)+A_FIFO_DATA0);
461 data++;
462 len--;
463 }
464}
465/* read fifo data (REGIO) */
466void
467read_fifo_regio(struct hfc_multi *hc, u_char *data, int len)
468{
469 outb(A_FIFO_DATA0, (hc->pci_iobase)+4);
470 while (len>>2) {
471 *(u32 *)data = inl(hc->pci_iobase);
472 data += 4;
473 len -= 4;
474 }
475 while (len>>1) {
476 *(u16 *)data = inw(hc->pci_iobase);
477 data += 2;
478 len -= 2;
479 }
480 while (len) {
481 *data = inb(hc->pci_iobase);
482 data++;
483 len--;
484 }
485}
486
487/* read fifo data (PCIMEM) */
488void
489read_fifo_pcimem(struct hfc_multi *hc, u_char *data, int len)
490{
491 while (len>>2) {
492 *(u32 *)data =
493 readl((hc->pci_membase)+A_FIFO_DATA0);
494 data += 4;
495 len -= 4;
496 }
497 while (len>>1) {
498 *(u16 *)data =
499 readw((hc->pci_membase)+A_FIFO_DATA0);
500 data += 2;
501 len -= 2;
502 }
503 while (len) {
504 *data = readb((hc->pci_membase)+A_FIFO_DATA0);
505 data++;
506 len--;
507 }
508}
509
510
511static void
512enable_hwirq(struct hfc_multi *hc)
513{
514 hc->hw.r_irq_ctrl |= V_GLOB_IRQ_EN;
515 HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
516}
517
518static void
519disable_hwirq(struct hfc_multi *hc)
520{
521 hc->hw.r_irq_ctrl &= ~((u_char)V_GLOB_IRQ_EN);
522 HFC_outb(hc, R_IRQ_CTRL, hc->hw.r_irq_ctrl);
523}
524
525#define NUM_EC 2
526#define MAX_TDM_CHAN 32
527
528
529inline void
530enablepcibridge(struct hfc_multi *c)
531{
532 HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); /* was _io before */
533}
534
535inline void
536disablepcibridge(struct hfc_multi *c)
537{
538 HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x2); /* was _io before */
539}
540
541inline unsigned char
542readpcibridge(struct hfc_multi *hc, unsigned char address)
543{
544 unsigned short cipv;
545 unsigned char data;
546
547 if (!hc->pci_iobase)
548 return 0;
549
550 /* slow down a PCI read access by 1 PCI clock cycle */
551 HFC_outb(hc, R_CTRL, 0x4); /*was _io before*/
552
553 if (address == 0)
554 cipv = 0x4000;
555 else
556 cipv = 0x5800;
557
558 /* select local bridge port address by writing to CIP port */
559 /* data = HFC_inb(c, cipv); * was _io before */
560 outw(cipv, hc->pci_iobase + 4);
561 data = inb(hc->pci_iobase);
562
563 /* restore R_CTRL for normal PCI read cycle speed */
564 HFC_outb(hc, R_CTRL, 0x0); /* was _io before */
565
566 return data;
567}
568
569inline void
570writepcibridge(struct hfc_multi *hc, unsigned char address, unsigned char data)
571{
572 unsigned short cipv;
573 unsigned int datav;
574
575 if (!hc->pci_iobase)
576 return;
577
578 if (address == 0)
579 cipv = 0x4000;
580 else
581 cipv = 0x5800;
582
583 /* select local bridge port address by writing to CIP port */
584 outw(cipv, hc->pci_iobase + 4);
585 /* define a 32 bit dword with 4 identical bytes for write sequence */
586 datav = data | ((__u32) data << 8) | ((__u32) data << 16) |
587 ((__u32) data << 24);
588
589 /*
590 * write this 32 bit dword to the bridge data port
591 * this will initiate a write sequence of up to 4 writes to the same
592 * address on the local bus interface the number of write accesses
593 * is undefined but >=1 and depends on the next PCI transaction
594 * during write sequence on the local bus
595 */
596 outl(datav, hc->pci_iobase);
597}
598
599inline void
600cpld_set_reg(struct hfc_multi *hc, unsigned char reg)
601{
602 /* Do data pin read low byte */
603 HFC_outb(hc, R_GPIO_OUT1, reg);
604}
605
606inline void
607cpld_write_reg(struct hfc_multi *hc, unsigned char reg, unsigned char val)
608{
609 cpld_set_reg(hc, reg);
610
611 enablepcibridge(hc);
612 writepcibridge(hc, 1, val);
613 disablepcibridge(hc);
614
615 return;
616}
617
618inline unsigned char
619cpld_read_reg(struct hfc_multi *hc, unsigned char reg)
620{
621 unsigned char bytein;
622
623 cpld_set_reg(hc, reg);
624
625 /* Do data pin read low byte */
626 HFC_outb(hc, R_GPIO_OUT1, reg);
627
628 enablepcibridge(hc);
629 bytein = readpcibridge(hc, 1);
630 disablepcibridge(hc);
631
632 return bytein;
633}
634
635inline void
636vpm_write_address(struct hfc_multi *hc, unsigned short addr)
637{
638 cpld_write_reg(hc, 0, 0xff & addr);
639 cpld_write_reg(hc, 1, 0x01 & (addr >> 8));
640}
641
642inline unsigned short
643vpm_read_address(struct hfc_multi *c)
644{
645 unsigned short addr;
646 unsigned short highbit;
647
648 addr = cpld_read_reg(c, 0);
649 highbit = cpld_read_reg(c, 1);
650
651 addr = addr | (highbit << 8);
652
653 return addr & 0x1ff;
654}
655
656inline unsigned char
657vpm_in(struct hfc_multi *c, int which, unsigned short addr)
658{
659 unsigned char res;
660
661 vpm_write_address(c, addr);
662
663 if (!which)
664 cpld_set_reg(c, 2);
665 else
666 cpld_set_reg(c, 3);
667
668 enablepcibridge(c);
669 res = readpcibridge(c, 1);
670 disablepcibridge(c);
671
672 cpld_set_reg(c, 0);
673
674 return res;
675}
676
677inline void
678vpm_out(struct hfc_multi *c, int which, unsigned short addr,
679 unsigned char data)
680{
681 vpm_write_address(c, addr);
682
683 enablepcibridge(c);
684
685 if (!which)
686 cpld_set_reg(c, 2);
687 else
688 cpld_set_reg(c, 3);
689
690 writepcibridge(c, 1, data);
691
692 cpld_set_reg(c, 0);
693
694 disablepcibridge(c);
695
696 {
697 unsigned char regin;
698 regin = vpm_in(c, which, addr);
699 if (regin != data)
700 printk(KERN_DEBUG "Wrote 0x%x to register 0x%x but got back "
701 "0x%x\n", data, addr, regin);
702 }
703
704}
705
706
707void
708vpm_init(struct hfc_multi *wc)
709{
710 unsigned char reg;
711 unsigned int mask;
712 unsigned int i, x, y;
713 unsigned int ver;
714
715 for (x = 0; x < NUM_EC; x++) {
716 /* Setup GPIO's */
717 if (!x) {
718 ver = vpm_in(wc, x, 0x1a0);
719 printk(KERN_DEBUG "VPM: Chip %d: ver %02x\n", x, ver);
720 }
721
722 for (y = 0; y < 4; y++) {
723 vpm_out(wc, x, 0x1a8 + y, 0x00); /* GPIO out */
724 vpm_out(wc, x, 0x1ac + y, 0x00); /* GPIO dir */
725 vpm_out(wc, x, 0x1b0 + y, 0x00); /* GPIO sel */
726 }
727
728 /* Setup TDM path - sets fsync and tdm_clk as inputs */
729 reg = vpm_in(wc, x, 0x1a3); /* misc_con */
730 vpm_out(wc, x, 0x1a3, reg & ~2);
731
732 /* Setup Echo length (256 taps) */
733 vpm_out(wc, x, 0x022, 1);
734 vpm_out(wc, x, 0x023, 0xff);
735
736 /* Setup timeslots */
737 vpm_out(wc, x, 0x02f, 0x00);
738 mask = 0x02020202 << (x * 4);
739
740 /* Setup the tdm channel masks for all chips */
741 for (i = 0; i < 4; i++)
742 vpm_out(wc, x, 0x33 - i, (mask >> (i << 3)) & 0xff);
743
744 /* Setup convergence rate */
745 printk(KERN_DEBUG "VPM: A-law mode\n");
746 reg = 0x00 | 0x10 | 0x01;
747 vpm_out(wc, x, 0x20, reg);
748 printk(KERN_DEBUG "VPM reg 0x20 is %x\n", reg);
749 /*vpm_out(wc, x, 0x20, (0x00 | 0x08 | 0x20 | 0x10)); */
750
751 vpm_out(wc, x, 0x24, 0x02);
752 reg = vpm_in(wc, x, 0x24);
753 printk(KERN_DEBUG "NLP Thresh is set to %d (0x%x)\n", reg, reg);
754
755 /* Initialize echo cans */
756 for (i = 0; i < MAX_TDM_CHAN; i++) {
757 if (mask & (0x00000001 << i))
758 vpm_out(wc, x, i, 0x00);
759 }
760
761 /*
762 * ARM arch at least disallows a udelay of
763 * more than 2ms... it gives a fake "__bad_udelay"
764 * reference at link-time.
765 * long delays in kernel code are pretty sucky anyway
766 * for now work around it using 5 x 2ms instead of 1 x 10ms
767 */
768
769 udelay(2000);
770 udelay(2000);
771 udelay(2000);
772 udelay(2000);
773 udelay(2000);
774
775 /* Put in bypass mode */
776 for (i = 0; i < MAX_TDM_CHAN; i++) {
777 if (mask & (0x00000001 << i))
778 vpm_out(wc, x, i, 0x01);
779 }
780
781 /* Enable bypass */
782 for (i = 0; i < MAX_TDM_CHAN; i++) {
783 if (mask & (0x00000001 << i))
784 vpm_out(wc, x, 0x78 + i, 0x01);
785 }
786
787 }
788}
789
790void
791vpm_check(struct hfc_multi *hctmp)
792{
793 unsigned char gpi2;
794
795 gpi2 = HFC_inb(hctmp, R_GPI_IN2);
796
797 if ((gpi2 & 0x3) != 0x3)
798 printk(KERN_DEBUG "Got interrupt 0x%x from VPM!\n", gpi2);
799}
800
801
802/*
803 * Interface to enable/disable the HW Echocan
804 *
805 * these functions are called within a spin_lock_irqsave on
806 * the channel instance lock, so we are not disturbed by irqs
807 *
808 * we can later easily change the interface to make other
809 * things configurable, for now we configure the taps
810 *
811 */
812
813void
814vpm_echocan_on(struct hfc_multi *hc, int ch, int taps)
815{
816 unsigned int timeslot;
817 unsigned int unit;
818 struct bchannel *bch = hc->chan[ch].bch;
819#ifdef TXADJ
820 int txadj = -4;
821 struct sk_buff *skb;
822#endif
823 if (hc->chan[ch].protocol != ISDN_P_B_RAW)
824 return;
825
826 if (!bch)
827 return;
828
829#ifdef TXADJ
830 skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
831 sizeof(int), &txadj, GFP_ATOMIC);
832 if (skb)
833 recv_Bchannel_skb(bch, skb);
834#endif
835
836 timeslot = ((ch/4)*8) + ((ch%4)*4) + 1;
837 unit = ch % 4;
838
839 printk(KERN_NOTICE "vpm_echocan_on called taps [%d] on timeslot %d\n",
840 taps, timeslot);
841
842 vpm_out(hc, unit, timeslot, 0x7e);
843}
844
845void
846vpm_echocan_off(struct hfc_multi *hc, int ch)
847{
848 unsigned int timeslot;
849 unsigned int unit;
850 struct bchannel *bch = hc->chan[ch].bch;
851#ifdef TXADJ
852 int txadj = 0;
853 struct sk_buff *skb;
854#endif
855
856 if (hc->chan[ch].protocol != ISDN_P_B_RAW)
857 return;
858
859 if (!bch)
860 return;
861
862#ifdef TXADJ
863 skb = _alloc_mISDN_skb(PH_CONTROL_IND, HFC_VOL_CHANGE_TX,
864 sizeof(int), &txadj, GFP_ATOMIC);
865 if (skb)
866 recv_Bchannel_skb(bch, skb);
867#endif
868
869 timeslot = ((ch/4)*8) + ((ch%4)*4) + 1;
870 unit = ch % 4;
871
872 printk(KERN_NOTICE "vpm_echocan_off called on timeslot %d\n",
873 timeslot);
874 /* FILLME */
875 vpm_out(hc, unit, timeslot, 0x01);
876}
877
878
879/*
880 * Speech Design resync feature
881 * NOTE: This is called sometimes outside interrupt handler.
882 * We must lock irqsave, so no other interrupt (other card) will occurr!
883 * Also multiple interrupts may nest, so must lock each access (lists, card)!
884 */
885static inline void
886hfcmulti_resync(struct hfc_multi *locked, struct hfc_multi *newmaster, int rm)
887{
888 struct hfc_multi *hc, *next, *pcmmaster = 0;
889 u_int *plx_acc_32, pv;
890 u_long flags;
891
892 spin_lock_irqsave(&HFClock, flags);
893 spin_lock(&plx_lock); /* must be locked inside other locks */
894
895 if (debug & DEBUG_HFCMULTI_PLXSD)
896 printk(KERN_DEBUG "%s: RESYNC(syncmaster=0x%p)\n",
897 __func__, syncmaster);
898
899 /* select new master */
900 if (newmaster) {
901 if (debug & DEBUG_HFCMULTI_PLXSD)
902 printk(KERN_DEBUG "using provided controller\n");
903 } else {
904 list_for_each_entry_safe(hc, next, &HFClist, list) {
905 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
906 if (hc->syncronized) {
907 newmaster = hc;
908 break;
909 }
910 }
911 }
912 }
913
914 /* Disable sync of all cards */
915 list_for_each_entry_safe(hc, next, &HFClist, list) {
916 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
917 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
918 pv = readl(plx_acc_32);
919 pv &= ~PLX_SYNC_O_EN;
920 writel(pv, plx_acc_32);
921 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
922 pcmmaster = hc;
923 if (hc->type == 1) {
924 if (debug & DEBUG_HFCMULTI_PLXSD)
925 printk(KERN_DEBUG
926 "Schedule SYNC_I\n");
927 hc->e1_resync |= 1; /* get SYNC_I */
928 }
929 }
930 }
931 }
932
933 if (newmaster) {
934 hc = newmaster;
935 if (debug & DEBUG_HFCMULTI_PLXSD)
936 printk(KERN_DEBUG "id=%d (0x%p) = syncronized with "
937 "interface.\n", hc->id, hc);
938 /* Enable new sync master */
939 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
940 pv = readl(plx_acc_32);
941 pv |= PLX_SYNC_O_EN;
942 writel(pv, plx_acc_32);
943 /* switch to jatt PLL, if not disabled by RX_SYNC */
944 if (hc->type == 1 && !test_bit(HFC_CHIP_RX_SYNC, &hc->chip)) {
945 if (debug & DEBUG_HFCMULTI_PLXSD)
946 printk(KERN_DEBUG "Schedule jatt PLL\n");
947 hc->e1_resync |= 2; /* switch to jatt */
948 }
949 } else {
950 if (pcmmaster) {
951 hc = pcmmaster;
952 if (debug & DEBUG_HFCMULTI_PLXSD)
953 printk(KERN_DEBUG
954 "id=%d (0x%p) = PCM master syncronized "
955 "with QUARTZ\n", hc->id, hc);
956 if (hc->type == 1) {
957 /* Use the crystal clock for the PCM
958 master card */
959 if (debug & DEBUG_HFCMULTI_PLXSD)
960 printk(KERN_DEBUG
961 "Schedule QUARTZ for HFC-E1\n");
962 hc->e1_resync |= 4; /* switch quartz */
963 } else {
964 if (debug & DEBUG_HFCMULTI_PLXSD)
965 printk(KERN_DEBUG
966 "QUARTZ is automatically "
967 "enabled by HFC-%dS\n", hc->type);
968 }
969 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
970 pv = readl(plx_acc_32);
971 pv |= PLX_SYNC_O_EN;
972 writel(pv, plx_acc_32);
973 } else
974 if (!rm)
975 printk(KERN_ERR "%s no pcm master, this MUST "
976 "not happen!\n", __func__);
977 }
978 syncmaster = newmaster;
979
980 spin_unlock(&plx_lock);
981 spin_unlock_irqrestore(&HFClock, flags);
982}
983
984/* This must be called AND hc must be locked irqsave!!! */
985inline void
986plxsd_checksync(struct hfc_multi *hc, int rm)
987{
988 if (hc->syncronized) {
989 if (syncmaster == NULL) {
990 if (debug & DEBUG_HFCMULTI_PLXSD)
991 printk(KERN_WARNING "%s: GOT sync on card %d"
992 " (id=%d)\n", __func__, hc->id + 1,
993 hc->id);
994 hfcmulti_resync(hc, hc, rm);
995 }
996 } else {
997 if (syncmaster == hc) {
998 if (debug & DEBUG_HFCMULTI_PLXSD)
999 printk(KERN_WARNING "%s: LOST sync on card %d"
1000 " (id=%d)\n", __func__, hc->id + 1,
1001 hc->id);
1002 hfcmulti_resync(hc, NULL, rm);
1003 }
1004 }
1005}
1006
1007
1008/*
1009 * free hardware resources used by driver
1010 */
1011static void
1012release_io_hfcmulti(struct hfc_multi *hc)
1013{
1014 u_int *plx_acc_32, pv;
1015 u_long plx_flags;
1016
1017 if (debug & DEBUG_HFCMULTI_INIT)
1018 printk(KERN_DEBUG "%s: entered\n", __func__);
1019
1020 /* soft reset also masks all interrupts */
1021 hc->hw.r_cirm |= V_SRES;
1022 HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
1023 udelay(1000);
1024 hc->hw.r_cirm &= ~V_SRES;
1025 HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
1026 udelay(1000); /* instead of 'wait' that may cause locking */
1027
1028 /* release Speech Design card, if PLX was initialized */
1029 if (test_bit(HFC_CHIP_PLXSD, &hc->chip) && hc->plx_membase) {
1030 if (debug & DEBUG_HFCMULTI_PLXSD)
1031 printk(KERN_DEBUG "%s: release PLXSD card %d\n",
1032 __func__, hc->id + 1);
1033 spin_lock_irqsave(&plx_lock, plx_flags);
1034 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
1035 writel(PLX_GPIOC_INIT, plx_acc_32);
1036 pv = readl(plx_acc_32);
1037 /* Termination off */
1038 pv &= ~PLX_TERM_ON;
1039 /* Disconnect the PCM */
1040 pv |= PLX_SLAVE_EN_N;
1041 pv &= ~PLX_MASTER_EN;
1042 pv &= ~PLX_SYNC_O_EN;
1043 /* Put the DSP in Reset */
1044 pv &= ~PLX_DSP_RES_N;
1045 writel(pv, plx_acc_32);
1046 if (debug & DEBUG_HFCMULTI_INIT)
1047 printk(KERN_WARNING "%s: PCM off: PLX_GPIO=%x\n",
1048 __func__, pv);
1049 spin_unlock_irqrestore(&plx_lock, plx_flags);
1050 }
1051
1052 /* disable memory mapped ports / io ports */
1053 test_and_clear_bit(HFC_CHIP_PLXSD, &hc->chip); /* prevent resync */
1054 pci_write_config_word(hc->pci_dev, PCI_COMMAND, 0);
1055 if (hc->pci_membase)
1056 iounmap((void *)hc->pci_membase);
1057 if (hc->plx_membase)
1058 iounmap((void *)hc->plx_membase);
1059 if (hc->pci_iobase)
1060 release_region(hc->pci_iobase, 8);
1061
1062 if (hc->pci_dev) {
1063 pci_disable_device(hc->pci_dev);
1064 pci_set_drvdata(hc->pci_dev, NULL);
1065 }
1066 if (debug & DEBUG_HFCMULTI_INIT)
1067 printk(KERN_DEBUG "%s: done\n", __func__);
1068}
1069
1070/*
1071 * function called to reset the HFC chip. A complete software reset of chip
1072 * and fifos is done. All configuration of the chip is done.
1073 */
1074
1075static int
1076init_chip(struct hfc_multi *hc)
1077{
1078 u_long flags, val, val2 = 0, rev;
1079 int i, err = 0;
1080 u_char r_conf_en, rval;
1081 u_int *plx_acc_32, pv;
1082 u_long plx_flags, hfc_flags;
1083 int plx_count;
1084 struct hfc_multi *pos, *next, *plx_last_hc;
1085
1086 spin_lock_irqsave(&hc->lock, flags);
1087 /* reset all registers */
1088 memset(&hc->hw, 0, sizeof(struct hfcm_hw));
1089
1090 /* revision check */
1091 if (debug & DEBUG_HFCMULTI_INIT)
1092 printk(KERN_DEBUG "%s: entered\n", __func__);
1093 val = HFC_inb(hc, R_CHIP_ID)>>4;
1094 if (val != 0x8 && val != 0xc && val != 0xe) {
1095 printk(KERN_INFO "HFC_multi: unknown CHIP_ID:%x\n", (u_int)val);
1096 err = -EIO;
1097 goto out;
1098 }
1099 rev = HFC_inb(hc, R_CHIP_RV);
1100 printk(KERN_INFO
1101 "HFC_multi: detected HFC with chip ID=0x%lx revision=%ld%s\n",
1102 val, rev, (rev == 0) ? " (old FIFO handling)" : "");
1103 if (rev == 0) {
1104 test_and_set_bit(HFC_CHIP_REVISION0, &hc->chip);
1105 printk(KERN_WARNING
1106 "HFC_multi: NOTE: Your chip is revision 0, "
1107 "ask Cologne Chip for update. Newer chips "
1108 "have a better FIFO handling. Old chips "
1109 "still work but may have slightly lower "
1110 "HDLC transmit performance.\n");
1111 }
1112 if (rev > 1) {
1113 printk(KERN_WARNING "HFC_multi: WARNING: This driver doesn't "
1114 "consider chip revision = %ld. The chip / "
1115 "bridge may not work.\n", rev);
1116 }
1117
1118 /* set s-ram size */
1119 hc->Flen = 0x10;
1120 hc->Zmin = 0x80;
1121 hc->Zlen = 384;
1122 hc->DTMFbase = 0x1000;
1123 if (test_bit(HFC_CHIP_EXRAM_128, &hc->chip)) {
1124 if (debug & DEBUG_HFCMULTI_INIT)
1125 printk(KERN_DEBUG "%s: changing to 128K extenal RAM\n",
1126 __func__);
1127 hc->hw.r_ctrl |= V_EXT_RAM;
1128 hc->hw.r_ram_sz = 1;
1129 hc->Flen = 0x20;
1130 hc->Zmin = 0xc0;
1131 hc->Zlen = 1856;
1132 hc->DTMFbase = 0x2000;
1133 }
1134 if (test_bit(HFC_CHIP_EXRAM_512, &hc->chip)) {
1135 if (debug & DEBUG_HFCMULTI_INIT)
1136 printk(KERN_DEBUG "%s: changing to 512K extenal RAM\n",
1137 __func__);
1138 hc->hw.r_ctrl |= V_EXT_RAM;
1139 hc->hw.r_ram_sz = 2;
1140 hc->Flen = 0x20;
1141 hc->Zmin = 0xc0;
1142 hc->Zlen = 8000;
1143 hc->DTMFbase = 0x2000;
1144 }
1145 hc->max_trans = poll << 1;
1146 if (hc->max_trans > hc->Zlen)
1147 hc->max_trans = hc->Zlen;
1148
1149 /* Speech Design PLX bridge */
1150 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
1151 if (debug & DEBUG_HFCMULTI_PLXSD)
1152 printk(KERN_DEBUG "%s: initializing PLXSD card %d\n",
1153 __func__, hc->id + 1);
1154 spin_lock_irqsave(&plx_lock, plx_flags);
1155 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
1156 writel(PLX_GPIOC_INIT, plx_acc_32);
1157 pv = readl(plx_acc_32);
1158 /* The first and the last cards are terminating the PCM bus */
1159 pv |= PLX_TERM_ON; /* hc is currently the last */
1160 /* Disconnect the PCM */
1161 pv |= PLX_SLAVE_EN_N;
1162 pv &= ~PLX_MASTER_EN;
1163 pv &= ~PLX_SYNC_O_EN;
1164 /* Put the DSP in Reset */
1165 pv &= ~PLX_DSP_RES_N;
1166 writel(pv, plx_acc_32);
1167 spin_unlock_irqrestore(&plx_lock, plx_flags);
1168 if (debug & DEBUG_HFCMULTI_INIT)
1169 printk(KERN_WARNING "%s: slave/term: PLX_GPIO=%x\n",
1170 __func__, pv);
1171 /*
1172 * If we are the 3rd PLXSD card or higher, we must turn
1173 * termination of last PLXSD card off.
1174 */
1175 spin_lock_irqsave(&HFClock, hfc_flags);
1176 plx_count = 0;
1177 plx_last_hc = NULL;
1178 list_for_each_entry_safe(pos, next, &HFClist, list) {
1179 if (test_bit(HFC_CHIP_PLXSD, &pos->chip)) {
1180 plx_count++;
1181 if (pos != hc)
1182 plx_last_hc = pos;
1183 }
1184 }
1185 if (plx_count >= 3) {
1186 if (debug & DEBUG_HFCMULTI_PLXSD)
1187 printk(KERN_DEBUG "%s: card %d is between, so "
1188 "we disable termination\n",
1189 __func__, plx_last_hc->id + 1);
1190 spin_lock_irqsave(&plx_lock, plx_flags);
1191 plx_acc_32 = (u_int *)(plx_last_hc->plx_membase
1192 + PLX_GPIOC);
1193 pv = readl(plx_acc_32);
1194 pv &= ~PLX_TERM_ON;
1195 writel(pv, plx_acc_32);
1196 spin_unlock_irqrestore(&plx_lock, plx_flags);
1197 if (debug & DEBUG_HFCMULTI_INIT)
1198 printk(KERN_WARNING "%s: term off: PLX_GPIO=%x\n",
1199 __func__, pv);
1200 }
1201 spin_unlock_irqrestore(&HFClock, hfc_flags);
1202 hc->hw.r_pcm_md0 = V_F0_LEN; /* shift clock for DSP */
1203 }
1204
1205 /* we only want the real Z2 read-pointer for revision > 0 */
1206 if (!test_bit(HFC_CHIP_REVISION0, &hc->chip))
1207 hc->hw.r_ram_sz |= V_FZ_MD;
1208
1209 /* select pcm mode */
1210 if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
1211 if (debug & DEBUG_HFCMULTI_INIT)
1212 printk(KERN_DEBUG "%s: setting PCM into slave mode\n",
1213 __func__);
1214 } else
1215 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip) && !plxsd_master) {
1216 if (debug & DEBUG_HFCMULTI_INIT)
1217 printk(KERN_DEBUG "%s: setting PCM into master mode\n",
1218 __func__);
1219 hc->hw.r_pcm_md0 |= V_PCM_MD;
1220 } else {
1221 if (debug & DEBUG_HFCMULTI_INIT)
1222 printk(KERN_DEBUG "%s: performing PCM auto detect\n",
1223 __func__);
1224 }
1225
1226 /* soft reset */
1227 HFC_outb(hc, R_CTRL, hc->hw.r_ctrl);
1228 HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
1229 HFC_outb(hc, R_FIFO_MD, 0);
1230 hc->hw.r_cirm = V_SRES | V_HFCRES | V_PCMRES | V_STRES | V_RLD_EPR;
1231 HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
1232 udelay(100);
1233 hc->hw.r_cirm = 0;
1234 HFC_outb(hc, R_CIRM, hc->hw.r_cirm);
1235 udelay(100);
1236 HFC_outb(hc, R_RAM_SZ, hc->hw.r_ram_sz);
1237
1238 /* Speech Design PLX bridge pcm and sync mode */
1239 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
1240 spin_lock_irqsave(&plx_lock, plx_flags);
1241 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
1242 pv = readl(plx_acc_32);
1243 /* Connect PCM */
1244 if (hc->hw.r_pcm_md0 & V_PCM_MD) {
1245 pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
1246 pv |= PLX_SYNC_O_EN;
1247 if (debug & DEBUG_HFCMULTI_INIT)
1248 printk(KERN_WARNING "%s: master: PLX_GPIO=%x\n",
1249 __func__, pv);
1250 } else {
1251 pv &= ~(PLX_MASTER_EN | PLX_SLAVE_EN_N);
1252 pv &= ~PLX_SYNC_O_EN;
1253 if (debug & DEBUG_HFCMULTI_INIT)
1254 printk(KERN_WARNING "%s: slave: PLX_GPIO=%x\n",
1255 __func__, pv);
1256 }
1257 writel(pv, plx_acc_32);
1258 spin_unlock_irqrestore(&plx_lock, plx_flags);
1259 }
1260
1261 /* PCM setup */
1262 HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x90);
1263 if (hc->slots == 32)
1264 HFC_outb(hc, R_PCM_MD1, 0x00);
1265 if (hc->slots == 64)
1266 HFC_outb(hc, R_PCM_MD1, 0x10);
1267 if (hc->slots == 128)
1268 HFC_outb(hc, R_PCM_MD1, 0x20);
1269 HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0xa0);
1270 if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
1271 HFC_outb(hc, R_PCM_MD2, V_SYNC_SRC); /* sync via SYNC_I / O */
1272 else
1273 HFC_outb(hc, R_PCM_MD2, 0x00); /* sync from interface */
1274 HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
1275 for (i = 0; i < 256; i++) {
1276 HFC_outb_nodebug(hc, R_SLOT, i);
1277 HFC_outb_nodebug(hc, A_SL_CFG, 0);
1278 HFC_outb_nodebug(hc, A_CONF, 0);
1279 hc->slot_owner[i] = -1;
1280 }
1281
1282 /* set clock speed */
1283 if (test_bit(HFC_CHIP_CLOCK2, &hc->chip)) {
1284 if (debug & DEBUG_HFCMULTI_INIT)
1285 printk(KERN_DEBUG
1286 "%s: setting double clock\n", __func__);
1287 HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
1288 }
1289
1290 /* B410P GPIO */
1291 if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
1292 printk(KERN_NOTICE "Setting GPIOs\n");
1293 HFC_outb(hc, R_GPIO_SEL, 0x30);
1294 HFC_outb(hc, R_GPIO_EN1, 0x3);
1295 udelay(1000);
1296 printk(KERN_NOTICE "calling vpm_init\n");
1297 vpm_init(hc);
1298 }
1299
1300 /* check if R_F0_CNT counts (8 kHz frame count) */
1301 val = HFC_inb(hc, R_F0_CNTL);
1302 val += HFC_inb(hc, R_F0_CNTH) << 8;
1303 if (debug & DEBUG_HFCMULTI_INIT)
1304 printk(KERN_DEBUG
1305 "HFC_multi F0_CNT %ld after reset\n", val);
1306 spin_unlock_irqrestore(&hc->lock, flags);
1307 set_current_state(TASK_UNINTERRUPTIBLE);
1308 schedule_timeout((HZ/100)?:1); /* Timeout minimum 10ms */
1309 spin_lock_irqsave(&hc->lock, flags);
1310 val2 = HFC_inb(hc, R_F0_CNTL);
1311 val2 += HFC_inb(hc, R_F0_CNTH) << 8;
1312 if (debug & DEBUG_HFCMULTI_INIT)
1313 printk(KERN_DEBUG
1314 "HFC_multi F0_CNT %ld after 10 ms (1st try)\n",
1315 val2);
1316 if (val2 >= val+8) { /* 1 ms */
1317 /* it counts, so we keep the pcm mode */
1318 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
1319 printk(KERN_INFO "controller is PCM bus MASTER\n");
1320 else
1321 if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip))
1322 printk(KERN_INFO "controller is PCM bus SLAVE\n");
1323 else {
1324 test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
1325 printk(KERN_INFO "controller is PCM bus SLAVE "
1326 "(auto detected)\n");
1327 }
1328 } else {
1329 /* does not count */
1330 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)) {
1331controller_fail:
1332 printk(KERN_ERR "HFC_multi ERROR, getting no 125us "
1333 "pulse. Seems that controller fails.\n");
1334 err = -EIO;
1335 goto out;
1336 }
1337 if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
1338 printk(KERN_INFO "controller is PCM bus SLAVE "
1339 "(ignoring missing PCM clock)\n");
1340 } else {
1341 /* only one pcm master */
1342 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
1343 && plxsd_master) {
1344 printk(KERN_ERR "HFC_multi ERROR, no clock "
1345 "on another Speech Design card found. "
1346 "Please be sure to connect PCM cable.\n");
1347 err = -EIO;
1348 goto out;
1349 }
1350 /* retry with master clock */
1351 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
1352 spin_lock_irqsave(&plx_lock, plx_flags);
1353 plx_acc_32 = (u_int *)(hc->plx_membase +
1354 PLX_GPIOC);
1355 pv = readl(plx_acc_32);
1356 pv |= PLX_MASTER_EN | PLX_SLAVE_EN_N;
1357 pv |= PLX_SYNC_O_EN;
1358 writel(pv, plx_acc_32);
1359 spin_unlock_irqrestore(&plx_lock, plx_flags);
1360 if (debug & DEBUG_HFCMULTI_INIT)
1361 printk(KERN_WARNING "%s: master: PLX_GPIO"
1362 "=%x\n", __func__, pv);
1363 }
1364 hc->hw.r_pcm_md0 |= V_PCM_MD;
1365 HFC_outb(hc, R_PCM_MD0, hc->hw.r_pcm_md0 | 0x00);
1366 spin_unlock_irqrestore(&hc->lock, flags);
1367 set_current_state(TASK_UNINTERRUPTIBLE);
1368 schedule_timeout((HZ/100)?:1); /* Timeout min. 10ms */
1369 spin_lock_irqsave(&hc->lock, flags);
1370 val2 = HFC_inb(hc, R_F0_CNTL);
1371 val2 += HFC_inb(hc, R_F0_CNTH) << 8;
1372 if (debug & DEBUG_HFCMULTI_INIT)
1373 printk(KERN_DEBUG "HFC_multi F0_CNT %ld after "
1374 "10 ms (2nd try)\n", val2);
1375 if (val2 >= val+8) { /* 1 ms */
1376 test_and_set_bit(HFC_CHIP_PCM_MASTER,
1377 &hc->chip);
1378 printk(KERN_INFO "controller is PCM bus MASTER "
1379 "(auto detected)\n");
1380 } else
1381 goto controller_fail;
1382 }
1383 }
1384
1385 /* Release the DSP Reset */
1386 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
1387 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip))
1388 plxsd_master = 1;
1389 spin_lock_irqsave(&plx_lock, plx_flags);
1390 plx_acc_32 = (u_int *)(hc->plx_membase+PLX_GPIOC);
1391 pv = readl(plx_acc_32);
1392 pv |= PLX_DSP_RES_N;
1393 writel(pv, plx_acc_32);
1394 spin_unlock_irqrestore(&plx_lock, plx_flags);
1395 if (debug & DEBUG_HFCMULTI_INIT)
1396 printk(KERN_WARNING "%s: reset off: PLX_GPIO=%x\n",
1397 __func__, pv);
1398 }
1399
1400 /* pcm id */
1401 if (hc->pcm)
1402 printk(KERN_INFO "controller has given PCM BUS ID %d\n",
1403 hc->pcm);
1404 else {
1405 if (test_bit(HFC_CHIP_PCM_MASTER, &hc->chip)
1406 || test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
1407 PCM_cnt++; /* SD has proprietary bridging */
1408 }
1409 hc->pcm = PCM_cnt;
1410 printk(KERN_INFO "controller has PCM BUS ID %d "
1411 "(auto selected)\n", hc->pcm);
1412 }
1413
1414 /* set up timer */
1415 HFC_outb(hc, R_TI_WD, poll_timer);
1416 hc->hw.r_irqmsk_misc |= V_TI_IRQMSK;
1417
1418 /*
1419 * set up 125us interrupt, only if function pointer is available
1420 * and module parameter timer is set
1421 */
1422 if (timer && hfc_interrupt && register_interrupt) {
1423 /* only one chip should use this interrupt */
1424 timer = 0;
1425 interrupt_registered = 1;
1426 hc->hw.r_irqmsk_misc |= V_PROC_IRQMSK;
1427 /* deactivate other interrupts in ztdummy */
1428 register_interrupt();
1429 }
1430
1431 /* set E1 state machine IRQ */
1432 if (hc->type == 1)
1433 hc->hw.r_irqmsk_misc |= V_STA_IRQMSK;
1434
1435 /* set DTMF detection */
1436 if (test_bit(HFC_CHIP_DTMF, &hc->chip)) {
1437 if (debug & DEBUG_HFCMULTI_INIT)
1438 printk(KERN_DEBUG "%s: enabling DTMF detection "
1439 "for all B-channel\n", __func__);
1440 hc->hw.r_dtmf = V_DTMF_EN | V_DTMF_STOP;
1441 if (test_bit(HFC_CHIP_ULAW, &hc->chip))
1442 hc->hw.r_dtmf |= V_ULAW_SEL;
1443 HFC_outb(hc, R_DTMF_N, 102 - 1);
1444 hc->hw.r_irqmsk_misc |= V_DTMF_IRQMSK;
1445 }
1446
1447 /* conference engine */
1448 if (test_bit(HFC_CHIP_ULAW, &hc->chip))
1449 r_conf_en = V_CONF_EN | V_ULAW;
1450 else
1451 r_conf_en = V_CONF_EN;
1452 HFC_outb(hc, R_CONF_EN, r_conf_en);
1453
1454 /* setting leds */
1455 switch (hc->leds) {
1456 case 1: /* HFC-E1 OEM */
1457 if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
1458 HFC_outb(hc, R_GPIO_SEL, 0x32);
1459 else
1460 HFC_outb(hc, R_GPIO_SEL, 0x30);
1461
1462 HFC_outb(hc, R_GPIO_EN1, 0x0f);
1463 HFC_outb(hc, R_GPIO_OUT1, 0x00);
1464
1465 HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
1466 break;
1467
1468 case 2: /* HFC-4S OEM */
1469 case 3:
1470 HFC_outb(hc, R_GPIO_SEL, 0xf0);
1471 HFC_outb(hc, R_GPIO_EN1, 0xff);
1472 HFC_outb(hc, R_GPIO_OUT1, 0x00);
1473 break;
1474 }
1475
1476 /* set master clock */
1477 if (hc->masterclk >= 0) {
1478 if (debug & DEBUG_HFCMULTI_INIT)
1479 printk(KERN_DEBUG "%s: setting ST master clock "
1480 "to port %d (0..%d)\n",
1481 __func__, hc->masterclk, hc->ports-1);
1482 hc->hw.r_st_sync = hc->masterclk | V_AUTO_SYNC;
1483 HFC_outb(hc, R_ST_SYNC, hc->hw.r_st_sync);
1484 }
1485
1486 /* setting misc irq */
1487 HFC_outb(hc, R_IRQMSK_MISC, hc->hw.r_irqmsk_misc);
1488 if (debug & DEBUG_HFCMULTI_INIT)
1489 printk(KERN_DEBUG "r_irqmsk_misc.2: 0x%x\n",
1490 hc->hw.r_irqmsk_misc);
1491
1492 /* RAM access test */
1493 HFC_outb(hc, R_RAM_ADDR0, 0);
1494 HFC_outb(hc, R_RAM_ADDR1, 0);
1495 HFC_outb(hc, R_RAM_ADDR2, 0);
1496 for (i = 0; i < 256; i++) {
1497 HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
1498 HFC_outb_nodebug(hc, R_RAM_DATA, ((i*3)&0xff));
1499 }
1500 for (i = 0; i < 256; i++) {
1501 HFC_outb_nodebug(hc, R_RAM_ADDR0, i);
1502 HFC_inb_nodebug(hc, R_RAM_DATA);
1503 rval = HFC_inb_nodebug(hc, R_INT_DATA);
1504 if (rval != ((i * 3) & 0xff)) {
1505 printk(KERN_DEBUG
1506 "addr:%x val:%x should:%x\n", i, rval,
1507 (i * 3) & 0xff);
1508 err++;
1509 }
1510 }
1511 if (err) {
1512 printk(KERN_DEBUG "aborting - %d RAM access errors\n", err);
1513 err = -EIO;
1514 goto out;
1515 }
1516
1517 if (debug & DEBUG_HFCMULTI_INIT)
1518 printk(KERN_DEBUG "%s: done\n", __func__);
1519out:
1520 spin_unlock_irqrestore(&hc->lock, flags);
1521 return err;
1522}
1523
1524
1525/*
1526 * control the watchdog
1527 */
1528static void
1529hfcmulti_watchdog(struct hfc_multi *hc)
1530{
1531 hc->wdcount++;
1532
1533 if (hc->wdcount > 10) {
1534 hc->wdcount = 0;
1535 hc->wdbyte = hc->wdbyte == V_GPIO_OUT2 ?
1536 V_GPIO_OUT3 : V_GPIO_OUT2;
1537
1538 /* printk("Sending Watchdog Kill %x\n",hc->wdbyte); */
1539 HFC_outb(hc, R_GPIO_EN0, V_GPIO_EN2 | V_GPIO_EN3);
1540 HFC_outb(hc, R_GPIO_OUT0, hc->wdbyte);
1541 }
1542}
1543
1544
1545
1546/*
1547 * output leds
1548 */
1549static void
1550hfcmulti_leds(struct hfc_multi *hc)
1551{
1552 unsigned long lled;
1553 unsigned long leddw;
1554 int i, state, active, leds;
1555 struct dchannel *dch;
1556 int led[4];
1557
1558 hc->ledcount += poll;
1559 if (hc->ledcount > 4096) {
1560 hc->ledcount -= 4096;
1561 hc->ledstate = 0xAFFEAFFE;
1562 }
1563
1564 switch (hc->leds) {
1565 case 1: /* HFC-E1 OEM */
1566 /* 2 red blinking: NT mode deactivate
1567 * 2 red steady: TE mode deactivate
1568 * left green: L1 active
1569 * left red: frame sync, but no L1
1570 * right green: L2 active
1571 */
1572 if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */
1573 if (hc->chan[hc->dslot].dch->dev.D.protocol
1574 != ISDN_P_NT_E1) {
1575 led[0] = 1;
1576 led[1] = 1;
1577 } else if (hc->ledcount>>11) {
1578 led[0] = 1;
1579 led[1] = 1;
1580 } else {
1581 led[0] = 0;
1582 led[1] = 0;
1583 }
1584 led[2] = 0;
1585 led[3] = 0;
1586 } else { /* with frame sync */
1587 /* TODO make it work */
1588 led[0] = 0;
1589 led[1] = 0;
1590 led[2] = 0;
1591 led[3] = 1;
1592 }
1593 leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
1594 /* leds are inverted */
1595 if (leds != (int)hc->ledstate) {
1596 HFC_outb_nodebug(hc, R_GPIO_OUT1, leds);
1597 hc->ledstate = leds;
1598 }
1599 break;
1600
1601 case 2: /* HFC-4S OEM */
1602 /* red blinking = PH_DEACTIVATE NT Mode
1603 * red steady = PH_DEACTIVATE TE Mode
1604 * green steady = PH_ACTIVATE
1605 */
1606 for (i = 0; i < 4; i++) {
1607 state = 0;
1608 active = -1;
1609 dch = hc->chan[(i << 2) | 2].dch;
1610 if (dch) {
1611 state = dch->state;
1612 if (dch->dev.D.protocol == ISDN_P_NT_S0)
1613 active = 3;
1614 else
1615 active = 7;
1616 }
1617 if (state) {
1618 if (state == active) {
1619 led[i] = 1; /* led green */
1620 } else
1621 if (dch->dev.D.protocol == ISDN_P_TE_S0)
1622 /* TE mode: led red */
1623 led[i] = 2;
1624 else
1625 if (hc->ledcount>>11)
1626 /* led red */
1627 led[i] = 2;
1628 else
1629 /* led off */
1630 led[i] = 0;
1631 } else
1632 led[i] = 0; /* led off */
1633 }
1634 if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
1635 leds = 0;
1636 for (i = 0; i < 4; i++) {
1637 if (led[i] == 1) {
1638 /*green*/
1639 leds |= (0x2 << (i * 2));
1640 } else if (led[i] == 2) {
1641 /*red*/
1642 leds |= (0x1 << (i * 2));
1643 }
1644 }
1645 if (leds != (int)hc->ledstate) {
1646 vpm_out(hc, 0, 0x1a8 + 3, leds);
1647 hc->ledstate = leds;
1648 }
1649 } else {
1650 leds = ((led[3] > 0) << 0) | ((led[1] > 0) << 1) |
1651 ((led[0] > 0) << 2) | ((led[2] > 0) << 3) |
1652 ((led[3] & 1) << 4) | ((led[1] & 1) << 5) |
1653 ((led[0] & 1) << 6) | ((led[2] & 1) << 7);
1654 if (leds != (int)hc->ledstate) {
1655 HFC_outb_nodebug(hc, R_GPIO_EN1, leds & 0x0F);
1656 HFC_outb_nodebug(hc, R_GPIO_OUT1, leds >> 4);
1657 hc->ledstate = leds;
1658 }
1659 }
1660 break;
1661
1662 case 3: /* HFC 1S/2S Beronet */
1663 /* red blinking = PH_DEACTIVATE NT Mode
1664 * red steady = PH_DEACTIVATE TE Mode
1665 * green steady = PH_ACTIVATE
1666 */
1667 for (i = 0; i < 2; i++) {
1668 state = 0;
1669 active = -1;
1670 dch = hc->chan[(i << 2) | 2].dch;
1671 if (dch) {
1672 state = dch->state;
1673 if (dch->dev.D.protocol == ISDN_P_NT_S0)
1674 active = 3;
1675 else
1676 active = 7;
1677 }
1678 if (state) {
1679 if (state == active) {
1680 led[i] = 1; /* led green */
1681 } else
1682 if (dch->dev.D.protocol == ISDN_P_TE_S0)
1683 /* TE mode: led red */
1684 led[i] = 2;
1685 else
1686 if (hc->ledcount >> 11)
1687 /* led red */
1688 led[i] = 2;
1689 else
1690 /* led off */
1691 led[i] = 0;
1692 } else
1693 led[i] = 0; /* led off */
1694 }
1695
1696
1697 leds = (led[0] > 0) | ((led[1] > 0)<<1) | ((led[0]&1)<<2)
1698 | ((led[1]&1)<<3);
1699 if (leds != (int)hc->ledstate) {
1700 HFC_outb_nodebug(hc, R_GPIO_EN1,
1701 ((led[0] > 0) << 2) | ((led[1] > 0) << 3));
1702 HFC_outb_nodebug(hc, R_GPIO_OUT1,
1703 ((led[0] & 1) << 2) | ((led[1] & 1) << 3));
1704 hc->ledstate = leds;
1705 }
1706 break;
1707 case 8: /* HFC 8S+ Beronet */
1708 lled = 0;
1709
1710 for (i = 0; i < 8; i++) {
1711 state = 0;
1712 active = -1;
1713 dch = hc->chan[(i << 2) | 2].dch;
1714 if (dch) {
1715 state = dch->state;
1716 if (dch->dev.D.protocol == ISDN_P_NT_S0)
1717 active = 3;
1718 else
1719 active = 7;
1720 }
1721 if (state) {
1722 if (state == active) {
1723 lled |= 0 << i;
1724 } else
1725 if (hc->ledcount >> 11)
1726 lled |= 0 << i;
1727 else
1728 lled |= 1 << i;
1729 } else
1730 lled |= 1 << i;
1731 }
1732 leddw = lled << 24 | lled << 16 | lled << 8 | lled;
1733 if (leddw != hc->ledstate) {
1734 /* HFC_outb(hc, R_BRG_PCM_CFG, 1);
1735 HFC_outb(c, R_BRG_PCM_CFG, (0x0 << 6) | 0x3); */
1736 /* was _io before */
1737 HFC_outb_nodebug(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
1738 outw(0x4000, hc->pci_iobase + 4);
1739 outl(leddw, hc->pci_iobase);
1740 HFC_outb_nodebug(hc, R_BRG_PCM_CFG, V_PCM_CLK);
1741 hc->ledstate = leddw;
1742 }
1743 break;
1744 }
1745}
1746/*
1747 * read dtmf coefficients
1748 */
1749
1750static void
1751hfcmulti_dtmf(struct hfc_multi *hc)
1752{
1753 s32 *coeff;
1754 u_int mantissa;
1755 int co, ch;
1756 struct bchannel *bch = NULL;
1757 u8 exponent;
1758 int dtmf = 0;
1759 int addr;
1760 u16 w_float;
1761 struct sk_buff *skb;
1762 struct mISDNhead *hh;
1763
1764 if (debug & DEBUG_HFCMULTI_DTMF)
1765 printk(KERN_DEBUG "%s: dtmf detection irq\n", __func__);
1766 for (ch = 0; ch <= 31; ch++) {
1767 /* only process enabled B-channels */
1768 bch = hc->chan[ch].bch;
1769 if (!bch)
1770 continue;
1771 if (!hc->created[hc->chan[ch].port])
1772 continue;
1773 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
1774 continue;
1775 if (debug & DEBUG_HFCMULTI_DTMF)
1776 printk(KERN_DEBUG "%s: dtmf channel %d:",
1777 __func__, ch);
1778 coeff = &(hc->chan[ch].coeff[hc->chan[ch].coeff_count * 16]);
1779 dtmf = 1;
1780 for (co = 0; co < 8; co++) {
1781 /* read W(n-1) coefficient */
1782 addr = hc->DTMFbase + ((co<<7) | (ch<<2));
1783 HFC_outb_nodebug(hc, R_RAM_ADDR0, addr);
1784 HFC_outb_nodebug(hc, R_RAM_ADDR1, addr>>8);
1785 HFC_outb_nodebug(hc, R_RAM_ADDR2, (addr>>16)
1786 | V_ADDR_INC);
1787 w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
1788 w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
1789 if (debug & DEBUG_HFCMULTI_DTMF)
1790 printk(" %04x", w_float);
1791
1792 /* decode float (see chip doc) */
1793 mantissa = w_float & 0x0fff;
1794 if (w_float & 0x8000)
1795 mantissa |= 0xfffff000;
1796 exponent = (w_float>>12) & 0x7;
1797 if (exponent) {
1798 mantissa ^= 0x1000;
1799 mantissa <<= (exponent-1);
1800 }
1801
1802 /* store coefficient */
1803 coeff[co<<1] = mantissa;
1804
1805 /* read W(n) coefficient */
1806 w_float = HFC_inb_nodebug(hc, R_RAM_DATA);
1807 w_float |= (HFC_inb_nodebug(hc, R_RAM_DATA) << 8);
1808 if (debug & DEBUG_HFCMULTI_DTMF)
1809 printk(" %04x", w_float);
1810
1811 /* decode float (see chip doc) */
1812 mantissa = w_float & 0x0fff;
1813 if (w_float & 0x8000)
1814 mantissa |= 0xfffff000;
1815 exponent = (w_float>>12) & 0x7;
1816 if (exponent) {
1817 mantissa ^= 0x1000;
1818 mantissa <<= (exponent-1);
1819 }
1820
1821 /* store coefficient */
1822 coeff[(co<<1)|1] = mantissa;
1823 }
1824 if (debug & DEBUG_HFCMULTI_DTMF)
1825 printk("%s: DTMF ready %08x %08x %08x %08x "
1826 "%08x %08x %08x %08x\n", __func__,
1827 coeff[0], coeff[1], coeff[2], coeff[3],
1828 coeff[4], coeff[5], coeff[6], coeff[7]);
1829 hc->chan[ch].coeff_count++;
1830 if (hc->chan[ch].coeff_count == 8) {
1831 hc->chan[ch].coeff_count = 0;
1832 skb = mI_alloc_skb(512, GFP_ATOMIC);
1833 if (!skb) {
1834 printk(KERN_WARNING "%s: No memory for skb\n",
1835 __func__);
1836 continue;
1837 }
1838 hh = mISDN_HEAD_P(skb);
1839 hh->prim = PH_CONTROL_IND;
1840 hh->id = DTMF_HFC_COEF;
1841 memcpy(skb_put(skb, 512), hc->chan[ch].coeff, 512);
1842 recv_Bchannel_skb(bch, skb);
1843 }
1844 }
1845
1846 /* restart DTMF processing */
1847 hc->dtmf = dtmf;
1848 if (dtmf)
1849 HFC_outb_nodebug(hc, R_DTMF, hc->hw.r_dtmf | V_RST_DTMF);
1850}
1851
1852
1853/*
1854 * fill fifo as much as possible
1855 */
1856
1857static void
1858hfcmulti_tx(struct hfc_multi *hc, int ch)
1859{
1860 int i, ii, temp, len = 0;
1861 int Zspace, z1, z2; /* must be int for calculation */
1862 int Fspace, f1, f2;
1863 u_char *d;
1864 int *txpending, slot_tx;
1865 struct bchannel *bch;
1866 struct dchannel *dch;
1867 struct sk_buff **sp = NULL;
1868 int *idxp;
1869
1870 bch = hc->chan[ch].bch;
1871 dch = hc->chan[ch].dch;
1872 if ((!dch) && (!bch))
1873 return;
1874
1875 txpending = &hc->chan[ch].txpending;
1876 slot_tx = hc->chan[ch].slot_tx;
1877 if (dch) {
1878 if (!test_bit(FLG_ACTIVE, &dch->Flags))
1879 return;
1880 sp = &dch->tx_skb;
1881 idxp = &dch->tx_idx;
1882 } else {
1883 if (!test_bit(FLG_ACTIVE, &bch->Flags))
1884 return;
1885 sp = &bch->tx_skb;
1886 idxp = &bch->tx_idx;
1887 }
1888 if (*sp)
1889 len = (*sp)->len;
1890
1891 if ((!len) && *txpending != 1)
1892 return; /* no data */
1893
1894 if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
1895 (hc->chan[ch].protocol == ISDN_P_B_RAW) &&
1896 (hc->chan[ch].slot_rx < 0) &&
1897 (hc->chan[ch].slot_tx < 0))
1898 HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch << 1));
1899 else
1900 HFC_outb_nodebug(hc, R_FIFO, ch << 1);
1901 HFC_wait_nodebug(hc);
1902
1903 if (*txpending == 2) {
1904 /* reset fifo */
1905 HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
1906 HFC_wait_nodebug(hc);
1907 HFC_outb(hc, A_SUBCH_CFG, 0);
1908 *txpending = 1;
1909 }
1910next_frame:
1911 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
1912 f1 = HFC_inb_nodebug(hc, A_F1);
1913 f2 = HFC_inb_nodebug(hc, A_F2);
1914 while (f2 != (temp = HFC_inb_nodebug(hc, A_F2))) {
1915 if (debug & DEBUG_HFCMULTI_FIFO)
1916 printk(KERN_DEBUG
1917 "%s(card %d): reread f2 because %d!=%d\n",
1918 __func__, hc->id + 1, temp, f2);
1919 f2 = temp; /* repeat until F2 is equal */
1920 }
1921 Fspace = f2 - f1 - 1;
1922 if (Fspace < 0)
1923 Fspace += hc->Flen;
1924 /*
1925 * Old FIFO handling doesn't give us the current Z2 read
1926 * pointer, so we cannot send the next frame before the fifo
1927 * is empty. It makes no difference except for a slightly
1928 * lower performance.
1929 */
1930 if (test_bit(HFC_CHIP_REVISION0, &hc->chip)) {
1931 if (f1 != f2)
1932 Fspace = 0;
1933 else
1934 Fspace = 1;
1935 }
1936 /* one frame only for ST D-channels, to allow resending */
1937 if (hc->type != 1 && dch) {
1938 if (f1 != f2)
1939 Fspace = 0;
1940 }
1941 /* F-counter full condition */
1942 if (Fspace == 0)
1943 return;
1944 }
1945 z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
1946 z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
1947 while (z2 != (temp = (HFC_inw_nodebug(hc, A_Z2) - hc->Zmin))) {
1948 if (debug & DEBUG_HFCMULTI_FIFO)
1949 printk(KERN_DEBUG "%s(card %d): reread z2 because "
1950 "%d!=%d\n", __func__, hc->id + 1, temp, z2);
1951 z2 = temp; /* repeat unti Z2 is equal */
1952 }
1953 Zspace = z2 - z1;
1954 if (Zspace <= 0)
1955 Zspace += hc->Zlen;
1956 Zspace -= 4; /* keep not too full, so pointers will not overrun */
1957 /* fill transparent data only to maxinum transparent load (minus 4) */
1958 if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
1959 Zspace = Zspace - hc->Zlen + hc->max_trans;
1960 if (Zspace <= 0) /* no space of 4 bytes */
1961 return;
1962
1963 /* if no data */
1964 if (!len) {
1965 if (z1 == z2) { /* empty */
1966 /* if done with FIFO audio data during PCM connection */
1967 if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) &&
1968 *txpending && slot_tx >= 0) {
1969 if (debug & DEBUG_HFCMULTI_MODE)
1970 printk(KERN_DEBUG
1971 "%s: reconnecting PCM due to no "
1972 "more FIFO data: channel %d "
1973 "slot_tx %d\n",
1974 __func__, ch, slot_tx);
1975 /* connect slot */
1976 HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
1977 V_HDLC_TRP | V_IFF);
1978 HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
1979 HFC_wait_nodebug(hc);
1980 HFC_outb(hc, A_CON_HDLC, 0xc0 | 0x00 |
1981 V_HDLC_TRP | V_IFF);
1982 HFC_outb_nodebug(hc, R_FIFO, ch<<1);
1983 HFC_wait_nodebug(hc);
1984 }
1985 *txpending = 0;
1986 }
1987 return; /* no data */
1988 }
1989
1990 /* if audio data and connected slot */
1991 if (bch && (!test_bit(FLG_HDLC, &bch->Flags)) && (!*txpending)
1992 && slot_tx >= 0) {
1993 if (debug & DEBUG_HFCMULTI_MODE)
1994 printk(KERN_DEBUG "%s: disconnecting PCM due to "
1995 "FIFO data: channel %d slot_tx %d\n",
1996 __func__, ch, slot_tx);
1997 /* disconnect slot */
1998 HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
1999 HFC_outb_nodebug(hc, R_FIFO, ch<<1 | 1);
2000 HFC_wait_nodebug(hc);
2001 HFC_outb(hc, A_CON_HDLC, 0x80 | 0x00 | V_HDLC_TRP | V_IFF);
2002 HFC_outb_nodebug(hc, R_FIFO, ch<<1);
2003 HFC_wait_nodebug(hc);
2004 }
2005 *txpending = 1;
2006
2007 /* show activity */
2008 hc->activity[hc->chan[ch].port] = 1;
2009
2010 /* fill fifo to what we have left */
2011 ii = len;
2012 if (dch || test_bit(FLG_HDLC, &bch->Flags))
2013 temp = 1;
2014 else
2015 temp = 0;
2016 i = *idxp;
2017 d = (*sp)->data + i;
2018 if (ii - i > Zspace)
2019 ii = Zspace + i;
2020 if (debug & DEBUG_HFCMULTI_FIFO)
2021 printk(KERN_DEBUG "%s(card %d): fifo(%d) has %d bytes space "
2022 "left (z1=%04x, z2=%04x) sending %d of %d bytes %s\n",
2023 __func__, hc->id + 1, ch, Zspace, z1, z2, ii-i, len-i,
2024 temp ? "HDLC":"TRANS");
2025
2026
2027 /* Have to prep the audio data */
2028 hc->write_fifo(hc, d, ii - i);
2029 *idxp = ii;
2030
2031 /* if not all data has been written */
2032 if (ii != len) {
2033 /* NOTE: fifo is started by the calling function */
2034 return;
2035 }
2036
2037 /* if all data has been written, terminate frame */
2038 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
2039 /* increment f-counter */
2040 HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
2041 HFC_wait_nodebug(hc);
2042 }
2043
2044 /* send confirm, since get_net_bframe will not do it with trans */
2045 if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
2046 confirm_Bsend(bch);
2047
2048 /* check for next frame */
2049 dev_kfree_skb(*sp);
2050 if (bch && get_next_bframe(bch)) { /* hdlc is confirmed here */
2051 len = (*sp)->len;
2052 goto next_frame;
2053 }
2054 if (dch && get_next_dframe(dch)) {
2055 len = (*sp)->len;
2056 goto next_frame;
2057 }
2058
2059 /*
2060 * now we have no more data, so in case of transparent,
2061 * we set the last byte in fifo to 'silence' in case we will get
2062 * no more data at all. this prevents sending an undefined value.
2063 */
2064 if (bch && test_bit(FLG_TRANSPARENT, &bch->Flags))
2065 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
2066}
2067
2068
2069/* NOTE: only called if E1 card is in active state */
2070static void
2071hfcmulti_rx(struct hfc_multi *hc, int ch)
2072{
2073 int temp;
2074 int Zsize, z1, z2 = 0; /* = 0, to make GCC happy */
2075 int f1 = 0, f2 = 0; /* = 0, to make GCC happy */
2076 int again = 0;
2077 struct bchannel *bch;
2078 struct dchannel *dch;
2079 struct sk_buff *skb, **sp = NULL;
2080 int maxlen;
2081
2082 bch = hc->chan[ch].bch;
2083 dch = hc->chan[ch].dch;
2084 if ((!dch) && (!bch))
2085 return;
2086 if (dch) {
2087 if (!test_bit(FLG_ACTIVE, &dch->Flags))
2088 return;
2089 sp = &dch->rx_skb;
2090 maxlen = dch->maxlen;
2091 } else {
2092 if (!test_bit(FLG_ACTIVE, &bch->Flags))
2093 return;
2094 sp = &bch->rx_skb;
2095 maxlen = bch->maxlen;
2096 }
2097next_frame:
2098 /* on first AND before getting next valid frame, R_FIFO must be written
2099 to. */
2100 if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
2101 (hc->chan[ch].protocol == ISDN_P_B_RAW) &&
2102 (hc->chan[ch].slot_rx < 0) &&
2103 (hc->chan[ch].slot_tx < 0))
2104 HFC_outb_nodebug(hc, R_FIFO, 0x20 | (ch<<1) | 1);
2105 else
2106 HFC_outb_nodebug(hc, R_FIFO, (ch<<1)|1);
2107 HFC_wait_nodebug(hc);
2108
2109 /* ignore if rx is off BUT change fifo (above) to start pending TX */
2110 if (hc->chan[ch].rx_off)
2111 return;
2112
2113 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
2114 f1 = HFC_inb_nodebug(hc, A_F1);
2115 while (f1 != (temp = HFC_inb_nodebug(hc, A_F1))) {
2116 if (debug & DEBUG_HFCMULTI_FIFO)
2117 printk(KERN_DEBUG
2118 "%s(card %d): reread f1 because %d!=%d\n",
2119 __func__, hc->id + 1, temp, f1);
2120 f1 = temp; /* repeat until F1 is equal */
2121 }
2122 f2 = HFC_inb_nodebug(hc, A_F2);
2123 }
2124 z1 = HFC_inw_nodebug(hc, A_Z1) - hc->Zmin;
2125 while (z1 != (temp = (HFC_inw_nodebug(hc, A_Z1) - hc->Zmin))) {
2126 if (debug & DEBUG_HFCMULTI_FIFO)
2127 printk(KERN_DEBUG "%s(card %d): reread z2 because "
2128 "%d!=%d\n", __func__, hc->id + 1, temp, z2);
2129 z1 = temp; /* repeat until Z1 is equal */
2130 }
2131 z2 = HFC_inw_nodebug(hc, A_Z2) - hc->Zmin;
2132 Zsize = z1 - z2;
2133 if ((dch || test_bit(FLG_HDLC, &bch->Flags)) && f1 != f2)
2134 /* complete hdlc frame */
2135 Zsize++;
2136 if (Zsize < 0)
2137 Zsize += hc->Zlen;
2138 /* if buffer is empty */
2139 if (Zsize <= 0)
2140 return;
2141
2142 if (*sp == NULL) {
2143 *sp = mI_alloc_skb(maxlen + 3, GFP_ATOMIC);
2144 if (*sp == NULL) {
2145 printk(KERN_DEBUG "%s: No mem for rx_skb\n",
2146 __func__);
2147 return;
2148 }
2149 }
2150 /* show activity */
2151 hc->activity[hc->chan[ch].port] = 1;
2152
2153 /* empty fifo with what we have */
2154 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
2155 if (debug & DEBUG_HFCMULTI_FIFO)
2156 printk(KERN_DEBUG "%s(card %d): fifo(%d) reading %d "
2157 "bytes (z1=%04x, z2=%04x) HDLC %s (f1=%d, f2=%d) "
2158 "got=%d (again %d)\n", __func__, hc->id + 1, ch,
2159 Zsize, z1, z2, (f1 == f2) ? "fragment" : "COMPLETE",
2160 f1, f2, Zsize + (*sp)->len, again);
2161 /* HDLC */
2162 if ((Zsize + (*sp)->len) > (maxlen + 3)) {
2163 if (debug & DEBUG_HFCMULTI_FIFO)
2164 printk(KERN_DEBUG
2165 "%s(card %d): hdlc-frame too large.\n",
2166 __func__, hc->id + 1);
2167 skb_trim(*sp, 0);
2168 HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
2169 HFC_wait_nodebug(hc);
2170 return;
2171 }
2172
2173 hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
2174
2175 if (f1 != f2) {
2176 /* increment Z2,F2-counter */
2177 HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_INC_F);
2178 HFC_wait_nodebug(hc);
2179 /* check size */
2180 if ((*sp)->len < 4) {
2181 if (debug & DEBUG_HFCMULTI_FIFO)
2182 printk(KERN_DEBUG
2183 "%s(card %d): Frame below minimum "
2184 "size\n", __func__, hc->id + 1);
2185 skb_trim(*sp, 0);
2186 goto next_frame;
2187 }
2188 /* there is at least one complete frame, check crc */
2189 if ((*sp)->data[(*sp)->len - 1]) {
2190 if (debug & DEBUG_HFCMULTI_CRC)
2191 printk(KERN_DEBUG
2192 "%s: CRC-error\n", __func__);
2193 skb_trim(*sp, 0);
2194 goto next_frame;
2195 }
2196 skb_trim(*sp, (*sp)->len - 3);
2197 if ((*sp)->len < MISDN_COPY_SIZE) {
2198 skb = *sp;
2199 *sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
2200 if (*sp) {
2201 memcpy(skb_put(*sp, skb->len),
2202 skb->data, skb->len);
2203 skb_trim(skb, 0);
2204 } else {
2205 printk(KERN_DEBUG "%s: No mem\n",
2206 __func__);
2207 *sp = skb;
2208 skb = NULL;
2209 }
2210 } else {
2211 skb = NULL;
2212 }
2213 if (debug & DEBUG_HFCMULTI_FIFO) {
2214 printk(KERN_DEBUG "%s(card %d):",
2215 __func__, hc->id + 1);
2216 temp = 0;
2217 while (temp < (*sp)->len)
2218 printk(" %02x", (*sp)->data[temp++]);
2219 printk("\n");
2220 }
2221 if (dch)
2222 recv_Dchannel(dch);
2223 else
2224 recv_Bchannel(bch);
2225 *sp = skb;
2226 again++;
2227 goto next_frame;
2228 }
2229 /* there is an incomplete frame */
2230 } else {
2231 /* transparent */
2232 if (Zsize > skb_tailroom(*sp))
2233 Zsize = skb_tailroom(*sp);
2234 hc->read_fifo(hc, skb_put(*sp, Zsize), Zsize);
2235 if (((*sp)->len) < MISDN_COPY_SIZE) {
2236 skb = *sp;
2237 *sp = mI_alloc_skb(skb->len, GFP_ATOMIC);
2238 if (*sp) {
2239 memcpy(skb_put(*sp, skb->len),
2240 skb->data, skb->len);
2241 skb_trim(skb, 0);
2242 } else {
2243 printk(KERN_DEBUG "%s: No mem\n", __func__);
2244 *sp = skb;
2245 skb = NULL;
2246 }
2247 } else {
2248 skb = NULL;
2249 }
2250 if (debug & DEBUG_HFCMULTI_FIFO)
2251 printk(KERN_DEBUG
2252 "%s(card %d): fifo(%d) reading %d bytes "
2253 "(z1=%04x, z2=%04x) TRANS\n",
2254 __func__, hc->id + 1, ch, Zsize, z1, z2);
2255 /* only bch is transparent */
2256 recv_Bchannel(bch);
2257 *sp = skb;
2258 }
2259}
2260
2261
2262/*
2263 * Interrupt handler
2264 */
2265static void
2266signal_state_up(struct dchannel *dch, int info, char *msg)
2267{
2268 struct sk_buff *skb;
2269 int id, data = info;
2270
2271 if (debug & DEBUG_HFCMULTI_STATE)
2272 printk(KERN_DEBUG "%s: %s\n", __func__, msg);
2273
2274 id = TEI_SAPI | (GROUP_TEI << 8); /* manager address */
2275
2276 skb = _alloc_mISDN_skb(MPH_INFORMATION_IND, id, sizeof(data), &data,
2277 GFP_ATOMIC);
2278 if (!skb)
2279 return;
2280 recv_Dchannel_skb(dch, skb);
2281}
2282
2283static inline void
2284handle_timer_irq(struct hfc_multi *hc)
2285{
2286 int ch, temp;
2287 struct dchannel *dch;
2288 u_long flags;
2289
2290 /* process queued resync jobs */
2291 if (hc->e1_resync) {
2292 /* lock, so e1_resync gets not changed */
2293 spin_lock_irqsave(&HFClock, flags);
2294 if (hc->e1_resync & 1) {
2295 if (debug & DEBUG_HFCMULTI_PLXSD)
2296 printk(KERN_DEBUG "Enable SYNC_I\n");
2297 HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC);
2298 /* disable JATT, if RX_SYNC is set */
2299 if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
2300 HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
2301 }
2302 if (hc->e1_resync & 2) {
2303 if (debug & DEBUG_HFCMULTI_PLXSD)
2304 printk(KERN_DEBUG "Enable jatt PLL\n");
2305 HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
2306 }
2307 if (hc->e1_resync & 4) {
2308 if (debug & DEBUG_HFCMULTI_PLXSD)
2309 printk(KERN_DEBUG
2310 "Enable QUARTZ for HFC-E1\n");
2311 /* set jatt to quartz */
2312 HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC
2313 | V_JATT_OFF);
2314 /* switch to JATT, in case it is not already */
2315 HFC_outb(hc, R_SYNC_OUT, 0);
2316 }
2317 hc->e1_resync = 0;
2318 spin_unlock_irqrestore(&HFClock, flags);
2319 }
2320
2321 if (hc->type != 1 || hc->e1_state == 1)
2322 for (ch = 0; ch <= 31; ch++) {
2323 if (hc->created[hc->chan[ch].port]) {
2324 hfcmulti_tx(hc, ch);
2325 /* fifo is started when switching to rx-fifo */
2326 hfcmulti_rx(hc, ch);
2327 if (hc->chan[ch].dch &&
2328 hc->chan[ch].nt_timer > -1) {
2329 dch = hc->chan[ch].dch;
2330 if (!(--hc->chan[ch].nt_timer)) {
2331 schedule_event(dch,
2332 FLG_PHCHANGE);
2333 if (debug &
2334 DEBUG_HFCMULTI_STATE)
2335 printk(KERN_DEBUG
2336 "%s: nt_timer at "
2337 "state %x\n",
2338 __func__,
2339 dch->state);
2340 }
2341 }
2342 }
2343 }
2344 if (hc->type == 1 && hc->created[0]) {
2345 dch = hc->chan[hc->dslot].dch;
2346 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
2347 /* LOS */
2348 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
2349 if (!temp && hc->chan[hc->dslot].los)
2350 signal_state_up(dch, L1_SIGNAL_LOS_ON,
2351 "LOS detected");
2352 if (temp && !hc->chan[hc->dslot].los)
2353 signal_state_up(dch, L1_SIGNAL_LOS_OFF,
2354 "LOS gone");
2355 hc->chan[hc->dslot].los = temp;
2356 }
2357 if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) {
2358 /* AIS */
2359 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
2360 if (!temp && hc->chan[hc->dslot].ais)
2361 signal_state_up(dch, L1_SIGNAL_AIS_ON,
2362 "AIS detected");
2363 if (temp && !hc->chan[hc->dslot].ais)
2364 signal_state_up(dch, L1_SIGNAL_AIS_OFF,
2365 "AIS gone");
2366 hc->chan[hc->dslot].ais = temp;
2367 }
2368 if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) {
2369 /* SLIP */
2370 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
2371 if (!temp && hc->chan[hc->dslot].slip_rx)
2372 signal_state_up(dch, L1_SIGNAL_SLIP_RX,
2373 " bit SLIP detected RX");
2374 hc->chan[hc->dslot].slip_rx = temp;
2375 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
2376 if (!temp && hc->chan[hc->dslot].slip_tx)
2377 signal_state_up(dch, L1_SIGNAL_SLIP_TX,
2378 " bit SLIP detected TX");
2379 hc->chan[hc->dslot].slip_tx = temp;
2380 }
2381 if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) {
2382 /* RDI */
2383 temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
2384 if (!temp && hc->chan[hc->dslot].rdi)
2385 signal_state_up(dch, L1_SIGNAL_RDI_ON,
2386 "RDI detected");
2387 if (temp && !hc->chan[hc->dslot].rdi)
2388 signal_state_up(dch, L1_SIGNAL_RDI_OFF,
2389 "RDI gone");
2390 hc->chan[hc->dslot].rdi = temp;
2391 }
2392 temp = HFC_inb_nodebug(hc, R_JATT_DIR);
2393 switch (hc->chan[hc->dslot].sync) {
2394 case 0:
2395 if ((temp & 0x60) == 0x60) {
2396 if (debug & DEBUG_HFCMULTI_SYNC)
2397 printk(KERN_DEBUG
2398 "%s: (id=%d) E1 now "
2399 "in clock sync\n",
2400 __func__, hc->id);
2401 HFC_outb(hc, R_RX_OFF,
2402 hc->chan[hc->dslot].jitter | V_RX_INIT);
2403 HFC_outb(hc, R_TX_OFF,
2404 hc->chan[hc->dslot].jitter | V_RX_INIT);
2405 hc->chan[hc->dslot].sync = 1;
2406 goto check_framesync;
2407 }
2408 break;
2409 case 1:
2410 if ((temp & 0x60) != 0x60) {
2411 if (debug & DEBUG_HFCMULTI_SYNC)
2412 printk(KERN_DEBUG
2413 "%s: (id=%d) E1 "
2414 "lost clock sync\n",
2415 __func__, hc->id);
2416 hc->chan[hc->dslot].sync = 0;
2417 break;
2418 }
2419check_framesync:
2420 temp = HFC_inb_nodebug(hc, R_SYNC_STA);
2421 if (temp == 0x27) {
2422 if (debug & DEBUG_HFCMULTI_SYNC)
2423 printk(KERN_DEBUG
2424 "%s: (id=%d) E1 "
2425 "now in frame sync\n",
2426 __func__, hc->id);
2427 hc->chan[hc->dslot].sync = 2;
2428 }
2429 break;
2430 case 2:
2431 if ((temp & 0x60) != 0x60) {
2432 if (debug & DEBUG_HFCMULTI_SYNC)
2433 printk(KERN_DEBUG
2434 "%s: (id=%d) E1 lost "
2435 "clock & frame sync\n",
2436 __func__, hc->id);
2437 hc->chan[hc->dslot].sync = 0;
2438 break;
2439 }
2440 temp = HFC_inb_nodebug(hc, R_SYNC_STA);
2441 if (temp != 0x27) {
2442 if (debug & DEBUG_HFCMULTI_SYNC)
2443 printk(KERN_DEBUG
2444 "%s: (id=%d) E1 "
2445 "lost frame sync\n",
2446 __func__, hc->id);
2447 hc->chan[hc->dslot].sync = 1;
2448 }
2449 break;
2450 }
2451 }
2452
2453 if (test_bit(HFC_CHIP_WATCHDOG, &hc->chip))
2454 hfcmulti_watchdog(hc);
2455
2456 if (hc->leds)
2457 hfcmulti_leds(hc);
2458}
2459
2460static void
2461ph_state_irq(struct hfc_multi *hc, u_char r_irq_statech)
2462{
2463 struct dchannel *dch;
2464 int ch;
2465 int active;
2466 u_char st_status, temp;
2467
2468 /* state machine */
2469 for (ch = 0; ch <= 31; ch++) {
2470 if (hc->chan[ch].dch) {
2471 dch = hc->chan[ch].dch;
2472 if (r_irq_statech & 1) {
2473 HFC_outb_nodebug(hc, R_ST_SEL,
2474 hc->chan[ch].port);
2475 /* undocumented: delay after R_ST_SEL */
2476 udelay(1);
2477 /* undocumented: status changes during read */
2478 st_status = HFC_inb_nodebug(hc, A_ST_RD_STATE);
2479 while (st_status != (temp =
2480 HFC_inb_nodebug(hc, A_ST_RD_STATE))) {
2481 if (debug & DEBUG_HFCMULTI_STATE)
2482 printk(KERN_DEBUG "%s: reread "
2483 "STATE because %d!=%d\n",
2484 __func__, temp,
2485 st_status);
2486 st_status = temp; /* repeat */
2487 }
2488
2489 /* Speech Design TE-sync indication */
2490 if (test_bit(HFC_CHIP_PLXSD, &hc->chip) &&
2491 dch->dev.D.protocol == ISDN_P_TE_S0) {
2492 if (st_status & V_FR_SYNC_ST)
2493 hc->syncronized |=
2494 (1 << hc->chan[ch].port);
2495 else
2496 hc->syncronized &=
2497 ~(1 << hc->chan[ch].port);
2498 }
2499 dch->state = st_status & 0x0f;
2500 if (dch->dev.D.protocol == ISDN_P_NT_S0)
2501 active = 3;
2502 else
2503 active = 7;
2504 if (dch->state == active) {
2505 HFC_outb_nodebug(hc, R_FIFO,
2506 (ch << 1) | 1);
2507 HFC_wait_nodebug(hc);
2508 HFC_outb_nodebug(hc,
2509 R_INC_RES_FIFO, V_RES_F);
2510 HFC_wait_nodebug(hc);
2511 dch->tx_idx = 0;
2512 }
2513 schedule_event(dch, FLG_PHCHANGE);
2514 if (debug & DEBUG_HFCMULTI_STATE)
2515 printk(KERN_DEBUG
2516 "%s: S/T newstate %x port %d\n",
2517 __func__, dch->state,
2518 hc->chan[ch].port);
2519 }
2520 r_irq_statech >>= 1;
2521 }
2522 }
2523 if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
2524 plxsd_checksync(hc, 0);
2525}
2526
2527static void
2528fifo_irq(struct hfc_multi *hc, int block)
2529{
2530 int ch, j;
2531 struct dchannel *dch;
2532 struct bchannel *bch;
2533 u_char r_irq_fifo_bl;
2534
2535 r_irq_fifo_bl = HFC_inb_nodebug(hc, R_IRQ_FIFO_BL0 + block);
2536 j = 0;
2537 while (j < 8) {
2538 ch = (block << 2) + (j >> 1);
2539 dch = hc->chan[ch].dch;
2540 bch = hc->chan[ch].bch;
2541 if (((!dch) && (!bch)) || (!hc->created[hc->chan[ch].port])) {
2542 j += 2;
2543 continue;
2544 }
2545 if (dch && (r_irq_fifo_bl & (1 << j)) &&
2546 test_bit(FLG_ACTIVE, &dch->Flags)) {
2547 hfcmulti_tx(hc, ch);
2548 /* start fifo */
2549 HFC_outb_nodebug(hc, R_FIFO, 0);
2550 HFC_wait_nodebug(hc);
2551 }
2552 if (bch && (r_irq_fifo_bl & (1 << j)) &&
2553 test_bit(FLG_ACTIVE, &bch->Flags)) {
2554 hfcmulti_tx(hc, ch);
2555 /* start fifo */
2556 HFC_outb_nodebug(hc, R_FIFO, 0);
2557 HFC_wait_nodebug(hc);
2558 }
2559 j++;
2560 if (dch && (r_irq_fifo_bl & (1 << j)) &&
2561 test_bit(FLG_ACTIVE, &dch->Flags)) {
2562 hfcmulti_rx(hc, ch);
2563 }
2564 if (bch && (r_irq_fifo_bl & (1 << j)) &&
2565 test_bit(FLG_ACTIVE, &bch->Flags)) {
2566 hfcmulti_rx(hc, ch);
2567 }
2568 j++;
2569 }
2570}
2571
2572#ifdef IRQ_DEBUG
2573int irqsem;
2574#endif
2575static irqreturn_t
2576hfcmulti_interrupt(int intno, void *dev_id)
2577{
2578#ifdef IRQCOUNT_DEBUG
2579 static int iq1 = 0, iq2 = 0, iq3 = 0, iq4 = 0,
2580 iq5 = 0, iq6 = 0, iqcnt = 0;
2581#endif
2582 static int count;
2583 struct hfc_multi *hc = dev_id;
2584 struct dchannel *dch;
2585 u_char r_irq_statech, status, r_irq_misc, r_irq_oview;
2586 int i;
2587 u_short *plx_acc, wval;
2588 u_char e1_syncsta, temp;
2589 u_long flags;
2590
2591 if (!hc) {
2592 printk(KERN_ERR "HFC-multi: Spurious interrupt!\n");
2593 return IRQ_NONE;
2594 }
2595
2596 spin_lock(&hc->lock);
2597
2598#ifdef IRQ_DEBUG
2599 if (irqsem)
2600 printk(KERN_ERR "irq for card %d during irq from "
2601 "card %d, this is no bug.\n", hc->id + 1, irqsem);
2602 irqsem = hc->id + 1;
2603#endif
2604
2605 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
2606 spin_lock_irqsave(&plx_lock, flags);
2607 plx_acc = (u_short *)(hc->plx_membase + PLX_INTCSR);
2608 wval = readw(plx_acc);
2609 spin_unlock_irqrestore(&plx_lock, flags);
2610 if (!(wval & PLX_INTCSR_LINTI1_STATUS))
2611 goto irq_notforus;
2612 }
2613
2614 status = HFC_inb_nodebug(hc, R_STATUS);
2615 r_irq_statech = HFC_inb_nodebug(hc, R_IRQ_STATECH);
2616#ifdef IRQCOUNT_DEBUG
2617 if (r_irq_statech)
2618 iq1++;
2619 if (status & V_DTMF_STA)
2620 iq2++;
2621 if (status & V_LOST_STA)
2622 iq3++;
2623 if (status & V_EXT_IRQSTA)
2624 iq4++;
2625 if (status & V_MISC_IRQSTA)
2626 iq5++;
2627 if (status & V_FR_IRQSTA)
2628 iq6++;
2629 if (iqcnt++ > 5000) {
2630 printk(KERN_ERR "iq1:%x iq2:%x iq3:%x iq4:%x iq5:%x iq6:%x\n",
2631 iq1, iq2, iq3, iq4, iq5, iq6);
2632 iqcnt = 0;
2633 }
2634#endif
2635 if (!r_irq_statech &&
2636 !(status & (V_DTMF_STA | V_LOST_STA | V_EXT_IRQSTA |
2637 V_MISC_IRQSTA | V_FR_IRQSTA))) {
2638 /* irq is not for us */
2639 goto irq_notforus;
2640 }
2641 hc->irqcnt++;
2642 if (r_irq_statech) {
2643 if (hc->type != 1)
2644 ph_state_irq(hc, r_irq_statech);
2645 }
2646 if (status & V_EXT_IRQSTA)
2647 ; /* external IRQ */
2648 if (status & V_LOST_STA) {
2649 /* LOST IRQ */
2650 HFC_outb(hc, R_INC_RES_FIFO, V_RES_LOST); /* clear irq! */
2651 }
2652 if (status & V_MISC_IRQSTA) {
2653 /* misc IRQ */
2654 r_irq_misc = HFC_inb_nodebug(hc, R_IRQ_MISC);
2655 if (r_irq_misc & V_STA_IRQ) {
2656 if (hc->type == 1) {
2657 /* state machine */
2658 dch = hc->chan[hc->dslot].dch;
2659 e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
2660 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
2661 && hc->e1_getclock) {
2662 if (e1_syncsta & V_FR_SYNC_E1)
2663 hc->syncronized = 1;
2664 else
2665 hc->syncronized = 0;
2666 }
2667 /* undocumented: status changes during read */
2668 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA);
2669 while (dch->state != (temp =
2670 HFC_inb_nodebug(hc, R_E1_RD_STA))) {
2671 if (debug & DEBUG_HFCMULTI_STATE)
2672 printk(KERN_DEBUG "%s: reread "
2673 "STATE because %d!=%d\n",
2674 __func__, temp,
2675 dch->state);
2676 dch->state = temp; /* repeat */
2677 }
2678 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA)
2679 & 0x7;
2680 schedule_event(dch, FLG_PHCHANGE);
2681 if (debug & DEBUG_HFCMULTI_STATE)
2682 printk(KERN_DEBUG
2683 "%s: E1 (id=%d) newstate %x\n",
2684 __func__, hc->id, dch->state);
2685 if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
2686 plxsd_checksync(hc, 0);
2687 }
2688 }
2689 if (r_irq_misc & V_TI_IRQ)
2690 handle_timer_irq(hc);
2691
2692 if (r_irq_misc & V_DTMF_IRQ) {
2693 /* -> DTMF IRQ */
2694 hfcmulti_dtmf(hc);
2695 }
2696 /* TODO: REPLACE !!!! 125 us Interrupts are not acceptable */
2697 if (r_irq_misc & V_IRQ_PROC) {
2698 /* IRQ every 125us */
2699 count++;
2700 /* generate 1kHz signal */
2701 if (count == 8) {
2702 if (hfc_interrupt)
2703 hfc_interrupt();
2704 count = 0;
2705 }
2706 }
2707
2708 }
2709 if (status & V_FR_IRQSTA) {
2710 /* FIFO IRQ */
2711 r_irq_oview = HFC_inb_nodebug(hc, R_IRQ_OVIEW);
2712 for (i = 0; i < 8; i++) {
2713 if (r_irq_oview & (1 << i))
2714 fifo_irq(hc, i);
2715 }
2716 }
2717
2718#ifdef IRQ_DEBUG
2719 irqsem = 0;
2720#endif
2721 spin_unlock(&hc->lock);
2722 return IRQ_HANDLED;
2723
2724irq_notforus:
2725#ifdef IRQ_DEBUG
2726 irqsem = 0;
2727#endif
2728 spin_unlock(&hc->lock);
2729 return IRQ_NONE;
2730}
2731
2732
2733/*
2734 * timer callback for D-chan busy resolution. Currently no function
2735 */
2736
2737static void
2738hfcmulti_dbusy_timer(struct hfc_multi *hc)
2739{
2740}
2741
2742
2743/*
2744 * activate/deactivate hardware for selected channels and mode
2745 *
2746 * configure B-channel with the given protocol
2747 * ch eqals to the HFC-channel (0-31)
2748 * ch is the number of channel (0-4,4-7,8-11,12-15,16-19,20-23,24-27,28-31
2749 * for S/T, 1-31 for E1)
2750 * the hdlc interrupts will be set/unset
2751 */
2752static int
2753mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
2754 int bank_tx, int slot_rx, int bank_rx)
2755{
2756 int flow_tx = 0, flow_rx = 0, routing = 0;
2757 int oslot_tx, oslot_rx;
2758 int conf;
2759
2760 if (ch < 0 || ch > 31)
2761 return EINVAL;
2762 oslot_tx = hc->chan[ch].slot_tx;
2763 oslot_rx = hc->chan[ch].slot_rx;
2764 conf = hc->chan[ch].conf;
2765
2766 if (debug & DEBUG_HFCMULTI_MODE)
2767 printk(KERN_DEBUG
2768 "%s: card %d channel %d protocol %x slot old=%d new=%d "
2769 "bank new=%d (TX) slot old=%d new=%d bank new=%d (RX)\n",
2770 __func__, hc->id, ch, protocol, oslot_tx, slot_tx,
2771 bank_tx, oslot_rx, slot_rx, bank_rx);
2772
2773 if (oslot_tx >= 0 && slot_tx != oslot_tx) {
2774 /* remove from slot */
2775 if (debug & DEBUG_HFCMULTI_MODE)
2776 printk(KERN_DEBUG "%s: remove from slot %d (TX)\n",
2777 __func__, oslot_tx);
2778 if (hc->slot_owner[oslot_tx<<1] == ch) {
2779 HFC_outb(hc, R_SLOT, oslot_tx << 1);
2780 HFC_outb(hc, A_SL_CFG, 0);
2781 HFC_outb(hc, A_CONF, 0);
2782 hc->slot_owner[oslot_tx<<1] = -1;
2783 } else {
2784 if (debug & DEBUG_HFCMULTI_MODE)
2785 printk(KERN_DEBUG
2786 "%s: we are not owner of this tx slot "
2787 "anymore, channel %d is.\n",
2788 __func__, hc->slot_owner[oslot_tx<<1]);
2789 }
2790 }
2791
2792 if (oslot_rx >= 0 && slot_rx != oslot_rx) {
2793 /* remove from slot */
2794 if (debug & DEBUG_HFCMULTI_MODE)
2795 printk(KERN_DEBUG
2796 "%s: remove from slot %d (RX)\n",
2797 __func__, oslot_rx);
2798 if (hc->slot_owner[(oslot_rx << 1) | 1] == ch) {
2799 HFC_outb(hc, R_SLOT, (oslot_rx << 1) | V_SL_DIR);
2800 HFC_outb(hc, A_SL_CFG, 0);
2801 hc->slot_owner[(oslot_rx << 1) | 1] = -1;
2802 } else {
2803 if (debug & DEBUG_HFCMULTI_MODE)
2804 printk(KERN_DEBUG
2805 "%s: we are not owner of this rx slot "
2806 "anymore, channel %d is.\n",
2807 __func__,
2808 hc->slot_owner[(oslot_rx << 1) | 1]);
2809 }
2810 }
2811
2812 if (slot_tx < 0) {
2813 flow_tx = 0x80; /* FIFO->ST */
2814 /* disable pcm slot */
2815 hc->chan[ch].slot_tx = -1;
2816 hc->chan[ch].bank_tx = 0;
2817 } else {
2818 /* set pcm slot */
2819 if (hc->chan[ch].txpending)
2820 flow_tx = 0x80; /* FIFO->ST */
2821 else
2822 flow_tx = 0xc0; /* PCM->ST */
2823 /* put on slot */
2824 routing = bank_tx ? 0xc0 : 0x80;
2825 if (conf >= 0 || bank_tx > 1)
2826 routing = 0x40; /* loop */
2827 if (debug & DEBUG_HFCMULTI_MODE)
2828 printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
2829 " %d flow %02x routing %02x conf %d (TX)\n",
2830 __func__, ch, slot_tx, bank_tx,
2831 flow_tx, routing, conf);
2832 HFC_outb(hc, R_SLOT, slot_tx << 1);
2833 HFC_outb(hc, A_SL_CFG, (ch<<1) | routing);
2834 HFC_outb(hc, A_CONF, (conf < 0) ? 0 : (conf | V_CONF_SL));
2835 hc->slot_owner[slot_tx << 1] = ch;
2836 hc->chan[ch].slot_tx = slot_tx;
2837 hc->chan[ch].bank_tx = bank_tx;
2838 }
2839 if (slot_rx < 0) {
2840 /* disable pcm slot */
2841 flow_rx = 0x80; /* ST->FIFO */
2842 hc->chan[ch].slot_rx = -1;
2843 hc->chan[ch].bank_rx = 0;
2844 } else {
2845 /* set pcm slot */
2846 if (hc->chan[ch].txpending)
2847 flow_rx = 0x80; /* ST->FIFO */
2848 else
2849 flow_rx = 0xc0; /* ST->(FIFO,PCM) */
2850 /* put on slot */
2851 routing = bank_rx?0x80:0xc0; /* reversed */
2852 if (conf >= 0 || bank_rx > 1)
2853 routing = 0x40; /* loop */
2854 if (debug & DEBUG_HFCMULTI_MODE)
2855 printk(KERN_DEBUG "%s: put channel %d to slot %d bank"
2856 " %d flow %02x routing %02x conf %d (RX)\n",
2857 __func__, ch, slot_rx, bank_rx,
2858 flow_rx, routing, conf);
2859 HFC_outb(hc, R_SLOT, (slot_rx<<1) | V_SL_DIR);
2860 HFC_outb(hc, A_SL_CFG, (ch<<1) | V_CH_DIR | routing);
2861 hc->slot_owner[(slot_rx<<1)|1] = ch;
2862 hc->chan[ch].slot_rx = slot_rx;
2863 hc->chan[ch].bank_rx = bank_rx;
2864 }
2865
2866 switch (protocol) {
2867 case (ISDN_P_NONE):
2868 /* disable TX fifo */
2869 HFC_outb(hc, R_FIFO, ch << 1);
2870 HFC_wait(hc);
2871 HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 | V_IFF);
2872 HFC_outb(hc, A_SUBCH_CFG, 0);
2873 HFC_outb(hc, A_IRQ_MSK, 0);
2874 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2875 HFC_wait(hc);
2876 /* disable RX fifo */
2877 HFC_outb(hc, R_FIFO, (ch<<1)|1);
2878 HFC_wait(hc);
2879 HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00);
2880 HFC_outb(hc, A_SUBCH_CFG, 0);
2881 HFC_outb(hc, A_IRQ_MSK, 0);
2882 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2883 HFC_wait(hc);
2884 if (hc->chan[ch].bch && hc->type != 1) {
2885 hc->hw.a_st_ctrl0[hc->chan[ch].port] &=
2886 ((ch & 0x3) == 0)? ~V_B1_EN: ~V_B2_EN;
2887 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
2888 /* undocumented: delay after R_ST_SEL */
2889 udelay(1);
2890 HFC_outb(hc, A_ST_CTRL0,
2891 hc->hw.a_st_ctrl0[hc->chan[ch].port]);
2892 }
2893 if (hc->chan[ch].bch) {
2894 test_and_clear_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
2895 test_and_clear_bit(FLG_TRANSPARENT,
2896 &hc->chan[ch].bch->Flags);
2897 }
2898 break;
2899 case (ISDN_P_B_RAW): /* B-channel */
2900
2901 if (test_bit(HFC_CHIP_B410P, &hc->chip) &&
2902 (hc->chan[ch].slot_rx < 0) &&
2903 (hc->chan[ch].slot_tx < 0)) {
2904
2905 printk(KERN_DEBUG
2906 "Setting B-channel %d to echo cancelable "
2907 "state on PCM slot %d\n", ch,
2908 ((ch / 4) * 8) + ((ch % 4) * 4) + 1);
2909 printk(KERN_DEBUG
2910 "Enabling pass through for channel\n");
2911 vpm_out(hc, ch, ((ch / 4) * 8) +
2912 ((ch % 4) * 4) + 1, 0x01);
2913 /* rx path */
2914 /* S/T -> PCM */
2915 HFC_outb(hc, R_FIFO, (ch << 1));
2916 HFC_wait(hc);
2917 HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
2918 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
2919 ((ch % 4) * 4) + 1) << 1);
2920 HFC_outb(hc, A_SL_CFG, 0x80 | (ch << 1));
2921
2922 /* PCM -> FIFO */
2923 HFC_outb(hc, R_FIFO, 0x20 | (ch << 1) | 1);
2924 HFC_wait(hc);
2925 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
2926 HFC_outb(hc, A_SUBCH_CFG, 0);
2927 HFC_outb(hc, A_IRQ_MSK, 0);
2928 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2929 HFC_wait(hc);
2930 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
2931 ((ch % 4) * 4) + 1) << 1) | 1);
2932 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
2933
2934 /* tx path */
2935 /* PCM -> S/T */
2936 HFC_outb(hc, R_FIFO, (ch << 1) | 1);
2937 HFC_wait(hc);
2938 HFC_outb(hc, A_CON_HDLC, 0xc0 | V_HDLC_TRP | V_IFF);
2939 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
2940 ((ch % 4) * 4)) << 1) | 1);
2941 HFC_outb(hc, A_SL_CFG, 0x80 | 0x40 | (ch << 1) | 1);
2942
2943 /* FIFO -> PCM */
2944 HFC_outb(hc, R_FIFO, 0x20 | (ch << 1));
2945 HFC_wait(hc);
2946 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
2947 HFC_outb(hc, A_SUBCH_CFG, 0);
2948 HFC_outb(hc, A_IRQ_MSK, 0);
2949 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2950 HFC_wait(hc);
2951 /* tx silence */
2952 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
2953 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
2954 ((ch % 4) * 4)) << 1);
2955 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1));
2956 } else {
2957 /* enable TX fifo */
2958 HFC_outb(hc, R_FIFO, ch << 1);
2959 HFC_wait(hc);
2960 HFC_outb(hc, A_CON_HDLC, flow_tx | 0x00 |
2961 V_HDLC_TRP | V_IFF);
2962 HFC_outb(hc, A_SUBCH_CFG, 0);
2963 HFC_outb(hc, A_IRQ_MSK, 0);
2964 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2965 HFC_wait(hc);
2966 /* tx silence */
2967 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, silence);
2968 /* enable RX fifo */
2969 HFC_outb(hc, R_FIFO, (ch<<1)|1);
2970 HFC_wait(hc);
2971 HFC_outb(hc, A_CON_HDLC, flow_rx | 0x00 | V_HDLC_TRP);
2972 HFC_outb(hc, A_SUBCH_CFG, 0);
2973 HFC_outb(hc, A_IRQ_MSK, 0);
2974 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
2975 HFC_wait(hc);
2976 }
2977 if (hc->type != 1) {
2978 hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
2979 ((ch & 0x3) == 0) ? V_B1_EN : V_B2_EN;
2980 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
2981 /* undocumented: delay after R_ST_SEL */
2982 udelay(1);
2983 HFC_outb(hc, A_ST_CTRL0,
2984 hc->hw.a_st_ctrl0[hc->chan[ch].port]);
2985 }
2986 if (hc->chan[ch].bch)
2987 test_and_set_bit(FLG_TRANSPARENT,
2988 &hc->chan[ch].bch->Flags);
2989 break;
2990 case (ISDN_P_B_HDLC): /* B-channel */
2991 case (ISDN_P_TE_S0): /* D-channel */
2992 case (ISDN_P_NT_S0):
2993 case (ISDN_P_TE_E1):
2994 case (ISDN_P_NT_E1):
2995 /* enable TX fifo */
2996 HFC_outb(hc, R_FIFO, ch<<1);
2997 HFC_wait(hc);
2998 if (hc->type == 1 || hc->chan[ch].bch) {
2999 /* E1 or B-channel */
3000 HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04);
3001 HFC_outb(hc, A_SUBCH_CFG, 0);
3002 } else {
3003 /* D-Channel without HDLC fill flags */
3004 HFC_outb(hc, A_CON_HDLC, flow_tx | 0x04 | V_IFF);
3005 HFC_outb(hc, A_SUBCH_CFG, 2);
3006 }
3007 HFC_outb(hc, A_IRQ_MSK, V_IRQ);
3008 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3009 HFC_wait(hc);
3010 /* enable RX fifo */
3011 HFC_outb(hc, R_FIFO, (ch<<1)|1);
3012 HFC_wait(hc);
3013 HFC_outb(hc, A_CON_HDLC, flow_rx | 0x04);
3014 if (hc->type == 1 || hc->chan[ch].bch)
3015 HFC_outb(hc, A_SUBCH_CFG, 0); /* full 8 bits */
3016 else
3017 HFC_outb(hc, A_SUBCH_CFG, 2); /* 2 bits dchannel */
3018 HFC_outb(hc, A_IRQ_MSK, V_IRQ);
3019 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3020 HFC_wait(hc);
3021 if (hc->chan[ch].bch) {
3022 test_and_set_bit(FLG_HDLC, &hc->chan[ch].bch->Flags);
3023 if (hc->type != 1) {
3024 hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
3025 ((ch&0x3) == 0) ? V_B1_EN : V_B2_EN;
3026 HFC_outb(hc, R_ST_SEL, hc->chan[ch].port);
3027 /* undocumented: delay after R_ST_SEL */
3028 udelay(1);
3029 HFC_outb(hc, A_ST_CTRL0,
3030 hc->hw.a_st_ctrl0[hc->chan[ch].port]);
3031 }
3032 }
3033 break;
3034 default:
3035 printk(KERN_DEBUG "%s: protocol not known %x\n",
3036 __func__, protocol);
3037 hc->chan[ch].protocol = ISDN_P_NONE;
3038 return -ENOPROTOOPT;
3039 }
3040 hc->chan[ch].protocol = protocol;
3041 return 0;
3042}
3043
3044
3045/*
3046 * connect/disconnect PCM
3047 */
3048
3049static void
3050hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
3051 int slot_rx, int bank_rx)
3052{
3053 if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
3054 /* disable PCM */
3055 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
3056 return;
3057 }
3058
3059 /* enable pcm */
3060 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, slot_tx, bank_tx,
3061 slot_rx, bank_rx);
3062}
3063
3064/*
3065 * set/disable conference
3066 */
3067
3068static void
3069hfcmulti_conf(struct hfc_multi *hc, int ch, int num)
3070{
3071 if (num >= 0 && num <= 7)
3072 hc->chan[ch].conf = num;
3073 else
3074 hc->chan[ch].conf = -1;
3075 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, hc->chan[ch].slot_tx,
3076 hc->chan[ch].bank_tx, hc->chan[ch].slot_rx,
3077 hc->chan[ch].bank_rx);
3078}
3079
3080
3081/*
3082 * set/disable sample loop
3083 */
3084
3085/* NOTE: this function is experimental and therefore disabled */
3086
3087/*
3088 * Layer 1 callback function
3089 */
3090static int
3091hfcm_l1callback(struct dchannel *dch, u_int cmd)
3092{
3093 struct hfc_multi *hc = dch->hw;
3094 u_long flags;
3095
3096 switch (cmd) {
3097 case INFO3_P8:
3098 case INFO3_P10:
3099 break;
3100 case HW_RESET_REQ:
3101 /* start activation */
3102 spin_lock_irqsave(&hc->lock, flags);
3103 if (hc->type == 1) {
3104 if (debug & DEBUG_HFCMULTI_MSG)
3105 printk(KERN_DEBUG
3106 "%s: HW_RESET_REQ no BRI\n",
3107 __func__);
3108 } else {
3109 HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
3110 /* undocumented: delay after R_ST_SEL */
3111 udelay(1);
3112 HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 3); /* F3 */
3113 udelay(6); /* wait at least 5,21us */
3114 HFC_outb(hc, A_ST_WR_STATE, 3);
3115 HFC_outb(hc, A_ST_WR_STATE, 3 | (V_ST_ACT*3));
3116 /* activate */
3117 }
3118 spin_unlock_irqrestore(&hc->lock, flags);
3119 l1_event(dch->l1, HW_POWERUP_IND);
3120 break;
3121 case HW_DEACT_REQ:
3122 /* start deactivation */
3123 spin_lock_irqsave(&hc->lock, flags);
3124 if (hc->type == 1) {
3125 if (debug & DEBUG_HFCMULTI_MSG)
3126 printk(KERN_DEBUG
3127 "%s: HW_DEACT_REQ no BRI\n",
3128 __func__);
3129 } else {
3130 HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
3131 /* undocumented: delay after R_ST_SEL */
3132 udelay(1);
3133 HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT*2);
3134 /* deactivate */
3135 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
3136 hc->syncronized &=
3137 ~(1 << hc->chan[dch->slot].port);
3138 plxsd_checksync(hc, 0);
3139 }
3140 }
3141 skb_queue_purge(&dch->squeue);
3142 if (dch->tx_skb) {
3143 dev_kfree_skb(dch->tx_skb);
3144 dch->tx_skb = NULL;
3145 }
3146 dch->tx_idx = 0;
3147 if (dch->rx_skb) {
3148 dev_kfree_skb(dch->rx_skb);
3149 dch->rx_skb = NULL;
3150 }
3151 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
3152 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
3153 del_timer(&dch->timer);
3154 spin_unlock_irqrestore(&hc->lock, flags);
3155 break;
3156 case HW_POWERUP_REQ:
3157 spin_lock_irqsave(&hc->lock, flags);
3158 if (hc->type == 1) {
3159 if (debug & DEBUG_HFCMULTI_MSG)
3160 printk(KERN_DEBUG
3161 "%s: HW_POWERUP_REQ no BRI\n",
3162 __func__);
3163 } else {
3164 HFC_outb(hc, R_ST_SEL, hc->chan[dch->slot].port);
3165 /* undocumented: delay after R_ST_SEL */
3166 udelay(1);
3167 HFC_outb(hc, A_ST_WR_STATE, 3 | 0x10); /* activate */
3168 udelay(6); /* wait at least 5,21us */
3169 HFC_outb(hc, A_ST_WR_STATE, 3); /* activate */
3170 }
3171 spin_unlock_irqrestore(&hc->lock, flags);
3172 break;
3173 case PH_ACTIVATE_IND:
3174 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
3175 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
3176 GFP_ATOMIC);
3177 break;
3178 case PH_DEACTIVATE_IND:
3179 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
3180 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
3181 GFP_ATOMIC);
3182 break;
3183 default:
3184 if (dch->debug & DEBUG_HW)
3185 printk(KERN_DEBUG "%s: unknown command %x\n",
3186 __func__, cmd);
3187 return -1;
3188 }
3189 return 0;
3190}
3191
3192/*
3193 * Layer2 -> Layer 1 Transfer
3194 */
3195
3196static int
3197handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
3198{
3199 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
3200 struct dchannel *dch = container_of(dev, struct dchannel, dev);
3201 struct hfc_multi *hc = dch->hw;
3202 struct mISDNhead *hh = mISDN_HEAD_P(skb);
3203 int ret = -EINVAL;
3204 unsigned int id;
3205 u_long flags;
3206
3207 switch (hh->prim) {
3208 case PH_DATA_REQ:
3209 if (skb->len < 1)
3210 break;
3211 spin_lock_irqsave(&hc->lock, flags);
3212 ret = dchannel_senddata(dch, skb);
3213 if (ret > 0) { /* direct TX */
3214 id = hh->id; /* skb can be freed */
3215 hfcmulti_tx(hc, dch->slot);
3216 ret = 0;
3217 /* start fifo */
3218 HFC_outb(hc, R_FIFO, 0);
3219 HFC_wait(hc);
3220 spin_unlock_irqrestore(&hc->lock, flags);
3221 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
3222 } else
3223 spin_unlock_irqrestore(&hc->lock, flags);
3224 return ret;
3225 case PH_ACTIVATE_REQ:
3226 if (dch->dev.D.protocol != ISDN_P_TE_S0) {
3227 spin_lock_irqsave(&hc->lock, flags);
3228 ret = 0;
3229 if (debug & DEBUG_HFCMULTI_MSG)
3230 printk(KERN_DEBUG
3231 "%s: PH_ACTIVATE port %d (0..%d)\n",
3232 __func__, hc->chan[dch->slot].port,
3233 hc->ports-1);
3234 /* start activation */
3235 if (hc->type == 1) {
3236 ph_state_change(dch);
3237 if (debug & DEBUG_HFCMULTI_STATE)
3238 printk(KERN_DEBUG
3239 "%s: E1 report state %x \n",
3240 __func__, dch->state);
3241 } else {
3242 HFC_outb(hc, R_ST_SEL,
3243 hc->chan[dch->slot].port);
3244 /* undocumented: delay after R_ST_SEL */
3245 udelay(1);
3246 HFC_outb(hc, A_ST_WR_STATE, V_ST_LD_STA | 1);
3247 /* G1 */
3248 udelay(6); /* wait at least 5,21us */
3249 HFC_outb(hc, A_ST_WR_STATE, 1);
3250 HFC_outb(hc, A_ST_WR_STATE, 1 |
3251 (V_ST_ACT*3)); /* activate */
3252 dch->state = 1;
3253 }
3254 spin_unlock_irqrestore(&hc->lock, flags);
3255 } else
3256 ret = l1_event(dch->l1, hh->prim);
3257 break;
3258 case PH_DEACTIVATE_REQ:
3259 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
3260 if (dch->dev.D.protocol != ISDN_P_TE_S0) {
3261 spin_lock_irqsave(&hc->lock, flags);
3262 if (debug & DEBUG_HFCMULTI_MSG)
3263 printk(KERN_DEBUG
3264 "%s: PH_DEACTIVATE port %d (0..%d)\n",
3265 __func__, hc->chan[dch->slot].port,
3266 hc->ports-1);
3267 /* start deactivation */
3268 if (hc->type == 1) {
3269 if (debug & DEBUG_HFCMULTI_MSG)
3270 printk(KERN_DEBUG
3271 "%s: PH_DEACTIVATE no BRI\n",
3272 __func__);
3273 } else {
3274 HFC_outb(hc, R_ST_SEL,
3275 hc->chan[dch->slot].port);
3276 /* undocumented: delay after R_ST_SEL */
3277 udelay(1);
3278 HFC_outb(hc, A_ST_WR_STATE, V_ST_ACT * 2);
3279 /* deactivate */
3280 dch->state = 1;
3281 }
3282 skb_queue_purge(&dch->squeue);
3283 if (dch->tx_skb) {
3284 dev_kfree_skb(dch->tx_skb);
3285 dch->tx_skb = NULL;
3286 }
3287 dch->tx_idx = 0;
3288 if (dch->rx_skb) {
3289 dev_kfree_skb(dch->rx_skb);
3290 dch->rx_skb = NULL;
3291 }
3292 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
3293 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
3294 del_timer(&dch->timer);
3295#ifdef FIXME
3296 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
3297 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
3298#endif
3299 ret = 0;
3300 spin_unlock_irqrestore(&hc->lock, flags);
3301 } else
3302 ret = l1_event(dch->l1, hh->prim);
3303 break;
3304 }
3305 if (!ret)
3306 dev_kfree_skb(skb);
3307 return ret;
3308}
3309
3310static void
3311deactivate_bchannel(struct bchannel *bch)
3312{
3313 struct hfc_multi *hc = bch->hw;
3314 u_long flags;
3315
3316 spin_lock_irqsave(&hc->lock, flags);
3317 if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
3318 dev_kfree_skb(bch->next_skb);
3319 bch->next_skb = NULL;
3320 }
3321 if (bch->tx_skb) {
3322 dev_kfree_skb(bch->tx_skb);
3323 bch->tx_skb = NULL;
3324 }
3325 bch->tx_idx = 0;
3326 if (bch->rx_skb) {
3327 dev_kfree_skb(bch->rx_skb);
3328 bch->rx_skb = NULL;
3329 }
3330 hc->chan[bch->slot].coeff_count = 0;
3331 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
3332 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
3333 hc->chan[bch->slot].rx_off = 0;
3334 hc->chan[bch->slot].conf = -1;
3335 mode_hfcmulti(hc, bch->slot, ISDN_P_NONE, -1, 0, -1, 0);
3336 spin_unlock_irqrestore(&hc->lock, flags);
3337}
3338
3339static int
3340handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
3341{
3342 struct bchannel *bch = container_of(ch, struct bchannel, ch);
3343 struct hfc_multi *hc = bch->hw;
3344 int ret = -EINVAL;
3345 struct mISDNhead *hh = mISDN_HEAD_P(skb);
3346 unsigned int id;
3347 u_long flags;
3348
3349 switch (hh->prim) {
3350 case PH_DATA_REQ:
3351 if (!skb->len)
3352 break;
3353 spin_lock_irqsave(&hc->lock, flags);
3354 ret = bchannel_senddata(bch, skb);
3355 if (ret > 0) { /* direct TX */
3356 id = hh->id; /* skb can be freed */
3357 hfcmulti_tx(hc, bch->slot);
3358 ret = 0;
3359 /* start fifo */
3360 HFC_outb_nodebug(hc, R_FIFO, 0);
3361 HFC_wait_nodebug(hc);
3362 if (!test_bit(FLG_TRANSPARENT, &bch->Flags)) {
3363 spin_unlock_irqrestore(&hc->lock, flags);
3364 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
3365 } else
3366 spin_unlock_irqrestore(&hc->lock, flags);
3367 } else
3368 spin_unlock_irqrestore(&hc->lock, flags);
3369 return ret;
3370 case PH_ACTIVATE_REQ:
3371 if (debug & DEBUG_HFCMULTI_MSG)
3372 printk(KERN_DEBUG "%s: PH_ACTIVATE ch %d (0..32)\n",
3373 __func__, bch->slot);
3374 spin_lock_irqsave(&hc->lock, flags);
3375 /* activate B-channel if not already activated */
3376 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags)) {
3377 hc->chan[bch->slot].txpending = 0;
3378 ret = mode_hfcmulti(hc, bch->slot,
3379 ch->protocol,
3380 hc->chan[bch->slot].slot_tx,
3381 hc->chan[bch->slot].bank_tx,
3382 hc->chan[bch->slot].slot_rx,
3383 hc->chan[bch->slot].bank_rx);
3384 if (!ret) {
3385 if (ch->protocol == ISDN_P_B_RAW && !hc->dtmf
3386 && test_bit(HFC_CHIP_DTMF, &hc->chip)) {
3387 /* start decoder */
3388 hc->dtmf = 1;
3389 if (debug & DEBUG_HFCMULTI_DTMF)
3390 printk(KERN_DEBUG
3391 "%s: start dtmf decoder\n",
3392 __func__);
3393 HFC_outb(hc, R_DTMF, hc->hw.r_dtmf |
3394 V_RST_DTMF);
3395 }
3396 }
3397 } else
3398 ret = 0;
3399 spin_unlock_irqrestore(&hc->lock, flags);
3400 if (!ret)
3401 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
3402 GFP_KERNEL);
3403 break;
3404 case PH_CONTROL_REQ:
3405 spin_lock_irqsave(&hc->lock, flags);
3406 switch (hh->id) {
3407 case HFC_SPL_LOOP_ON: /* set sample loop */
3408 if (debug & DEBUG_HFCMULTI_MSG)
3409 printk(KERN_DEBUG
3410 "%s: HFC_SPL_LOOP_ON (len = %d)\n",
3411 __func__, skb->len);
3412 ret = 0;
3413 break;
3414 case HFC_SPL_LOOP_OFF: /* set silence */
3415 if (debug & DEBUG_HFCMULTI_MSG)
3416 printk(KERN_DEBUG "%s: HFC_SPL_LOOP_OFF\n",
3417 __func__);
3418 ret = 0;
3419 break;
3420 default:
3421 printk(KERN_ERR
3422 "%s: unknown PH_CONTROL_REQ info %x\n",
3423 __func__, hh->id);
3424 ret = -EINVAL;
3425 }
3426 spin_unlock_irqrestore(&hc->lock, flags);
3427 break;
3428 case PH_DEACTIVATE_REQ:
3429 deactivate_bchannel(bch); /* locked there */
3430 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0, NULL,
3431 GFP_KERNEL);
3432 ret = 0;
3433 break;
3434 }
3435 if (!ret)
3436 dev_kfree_skb(skb);
3437 return ret;
3438}
3439
3440/*
3441 * bchannel control function
3442 */
3443static int
3444channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
3445{
3446 int ret = 0;
3447 struct dsp_features *features =
3448 (struct dsp_features *)(*((u_long *)&cq->p1));
3449 struct hfc_multi *hc = bch->hw;
3450 int slot_tx;
3451 int bank_tx;
3452 int slot_rx;
3453 int bank_rx;
3454 int num;
3455
3456 switch (cq->op) {
3457 case MISDN_CTRL_GETOP:
3458 cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_HW_FEATURES_OP
3459 | MISDN_CTRL_RX_OFF;
3460 break;
3461 case MISDN_CTRL_RX_OFF: /* turn off / on rx stream */
3462 hc->chan[bch->slot].rx_off = !!cq->p1;
3463 if (!hc->chan[bch->slot].rx_off) {
3464 /* reset fifo on rx on */
3465 HFC_outb_nodebug(hc, R_FIFO, (bch->slot << 1) | 1);
3466 HFC_wait_nodebug(hc);
3467 HFC_outb_nodebug(hc, R_INC_RES_FIFO, V_RES_F);
3468 HFC_wait_nodebug(hc);
3469 }
3470 if (debug & DEBUG_HFCMULTI_MSG)
3471 printk(KERN_DEBUG "%s: RX_OFF request (nr=%d off=%d)\n",
3472 __func__, bch->nr, hc->chan[bch->slot].rx_off);
3473 break;
3474 case MISDN_CTRL_HW_FEATURES: /* fill features structure */
3475 if (debug & DEBUG_HFCMULTI_MSG)
3476 printk(KERN_DEBUG "%s: HW_FEATURE request\n",
3477 __func__);
3478 /* create confirm */
3479 features->hfc_id = hc->id;
3480 if (test_bit(HFC_CHIP_DTMF, &hc->chip))
3481 features->hfc_dtmf = 1;
3482 features->hfc_loops = 0;
3483 if (test_bit(HFC_CHIP_B410P, &hc->chip)) {
3484 features->hfc_echocanhw = 1;
3485 } else {
3486 features->pcm_id = hc->pcm;
3487 features->pcm_slots = hc->slots;
3488 features->pcm_banks = 2;
3489 }
3490 break;
3491 case MISDN_CTRL_HFC_PCM_CONN: /* connect to pcm timeslot (0..N) */
3492 slot_tx = cq->p1 & 0xff;
3493 bank_tx = cq->p1 >> 8;
3494 slot_rx = cq->p2 & 0xff;
3495 bank_rx = cq->p2 >> 8;
3496 if (debug & DEBUG_HFCMULTI_MSG)
3497 printk(KERN_DEBUG
3498 "%s: HFC_PCM_CONN slot %d bank %d (TX) "
3499 "slot %d bank %d (RX)\n",
3500 __func__, slot_tx, bank_tx,
3501 slot_rx, bank_rx);
3502 if (slot_tx < hc->slots && bank_tx <= 2 &&
3503 slot_rx < hc->slots && bank_rx <= 2)
3504 hfcmulti_pcm(hc, bch->slot,
3505 slot_tx, bank_tx, slot_rx, bank_rx);
3506 else {
3507 printk(KERN_WARNING
3508 "%s: HFC_PCM_CONN slot %d bank %d (TX) "
3509 "slot %d bank %d (RX) out of range\n",
3510 __func__, slot_tx, bank_tx,
3511 slot_rx, bank_rx);
3512 ret = -EINVAL;
3513 }
3514 break;
3515 case MISDN_CTRL_HFC_PCM_DISC: /* release interface from pcm timeslot */
3516 if (debug & DEBUG_HFCMULTI_MSG)
3517 printk(KERN_DEBUG "%s: HFC_PCM_DISC\n",
3518 __func__);
3519 hfcmulti_pcm(hc, bch->slot, -1, 0, -1, 0);
3520 break;
3521 case MISDN_CTRL_HFC_CONF_JOIN: /* join conference (0..7) */
3522 num = cq->p1 & 0xff;
3523 if (debug & DEBUG_HFCMULTI_MSG)
3524 printk(KERN_DEBUG "%s: HFC_CONF_JOIN conf %d\n",
3525 __func__, num);
3526 if (num <= 7)
3527 hfcmulti_conf(hc, bch->slot, num);
3528 else {
3529 printk(KERN_WARNING
3530 "%s: HW_CONF_JOIN conf %d out of range\n",
3531 __func__, num);
3532 ret = -EINVAL;
3533 }
3534 break;
3535 case MISDN_CTRL_HFC_CONF_SPLIT: /* split conference */
3536 if (debug & DEBUG_HFCMULTI_MSG)
3537 printk(KERN_DEBUG "%s: HFC_CONF_SPLIT\n", __func__);
3538 hfcmulti_conf(hc, bch->slot, -1);
3539 break;
3540 case MISDN_CTRL_HFC_ECHOCAN_ON:
3541 if (debug & DEBUG_HFCMULTI_MSG)
3542 printk(KERN_DEBUG "%s: HFC_ECHOCAN_ON\n", __func__);
3543 if (test_bit(HFC_CHIP_B410P, &hc->chip))
3544 vpm_echocan_on(hc, bch->slot, cq->p1);
3545 else
3546 ret = -EINVAL;
3547 break;
3548
3549 case MISDN_CTRL_HFC_ECHOCAN_OFF:
3550 if (debug & DEBUG_HFCMULTI_MSG)
3551 printk(KERN_DEBUG "%s: HFC_ECHOCAN_OFF\n",
3552 __func__);
3553 if (test_bit(HFC_CHIP_B410P, &hc->chip))
3554 vpm_echocan_off(hc, bch->slot);
3555 else
3556 ret = -EINVAL;
3557 break;
3558 default:
3559 printk(KERN_WARNING "%s: unknown Op %x\n",
3560 __func__, cq->op);
3561 ret = -EINVAL;
3562 break;
3563 }
3564 return ret;
3565}
3566
3567static int
3568hfcm_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
3569{
3570 struct bchannel *bch = container_of(ch, struct bchannel, ch);
3571 struct hfc_multi *hc = bch->hw;
3572 int err = -EINVAL;
3573 u_long flags;
3574
3575 if (bch->debug & DEBUG_HW)
3576 printk(KERN_DEBUG "%s: cmd:%x %p\n",
3577 __func__, cmd, arg);
3578 switch (cmd) {
3579 case CLOSE_CHANNEL:
3580 test_and_clear_bit(FLG_OPEN, &bch->Flags);
3581 if (test_bit(FLG_ACTIVE, &bch->Flags))
3582 deactivate_bchannel(bch); /* locked there */
3583 ch->protocol = ISDN_P_NONE;
3584 ch->peer = NULL;
3585 module_put(THIS_MODULE);
3586 err = 0;
3587 break;
3588 case CONTROL_CHANNEL:
3589 spin_lock_irqsave(&hc->lock, flags);
3590 err = channel_bctrl(bch, arg);
3591 spin_unlock_irqrestore(&hc->lock, flags);
3592 break;
3593 default:
3594 printk(KERN_WARNING "%s: unknown prim(%x)\n",
3595 __func__, cmd);
3596 }
3597 return err;
3598}
3599
3600/*
3601 * handle D-channel events
3602 *
3603 * handle state change event
3604 */
3605static void
3606ph_state_change(struct dchannel *dch)
3607{
3608 struct hfc_multi *hc = dch->hw;
3609 int ch, i;
3610
3611 if (!dch) {
3612 printk(KERN_WARNING "%s: ERROR given dch is NULL\n",
3613 __func__);
3614 return;
3615 }
3616 ch = dch->slot;
3617
3618 if (hc->type == 1) {
3619 if (dch->dev.D.protocol == ISDN_P_TE_E1) {
3620 if (debug & DEBUG_HFCMULTI_STATE)
3621 printk(KERN_DEBUG
3622 "%s: E1 TE (id=%d) newstate %x\n",
3623 __func__, hc->id, dch->state);
3624 } else {
3625 if (debug & DEBUG_HFCMULTI_STATE)
3626 printk(KERN_DEBUG
3627 "%s: E1 NT (id=%d) newstate %x\n",
3628 __func__, hc->id, dch->state);
3629 }
3630 switch (dch->state) {
3631 case (1):
3632 if (hc->e1_state != 1) {
3633 for (i = 1; i <= 31; i++) {
3634 /* reset fifos on e1 activation */
3635 HFC_outb_nodebug(hc, R_FIFO, (i << 1) | 1);
3636 HFC_wait_nodebug(hc);
3637 HFC_outb_nodebug(hc,
3638 R_INC_RES_FIFO, V_RES_F);
3639 HFC_wait_nodebug(hc);
3640 }
3641 }
3642 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
3643 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
3644 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
3645 break;
3646
3647 default:
3648 if (hc->e1_state != 1)
3649 return;
3650 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
3651 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
3652 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
3653 }
3654 hc->e1_state = dch->state;
3655 } else {
3656 if (dch->dev.D.protocol == ISDN_P_TE_S0) {
3657 if (debug & DEBUG_HFCMULTI_STATE)
3658 printk(KERN_DEBUG
3659 "%s: S/T TE newstate %x\n",
3660 __func__, dch->state);
3661 switch (dch->state) {
3662 case (0):
3663 l1_event(dch->l1, HW_RESET_IND);
3664 break;
3665 case (3):
3666 l1_event(dch->l1, HW_DEACT_IND);
3667 break;
3668 case (5):
3669 case (8):
3670 l1_event(dch->l1, ANYSIGNAL);
3671 break;
3672 case (6):
3673 l1_event(dch->l1, INFO2);
3674 break;
3675 case (7):
3676 l1_event(dch->l1, INFO4_P8);
3677 break;
3678 }
3679 } else {
3680 if (debug & DEBUG_HFCMULTI_STATE)
3681 printk(KERN_DEBUG "%s: S/T NT newstate %x\n",
3682 __func__, dch->state);
3683 switch (dch->state) {
3684 case (2):
3685 if (hc->chan[ch].nt_timer == 0) {
3686 hc->chan[ch].nt_timer = -1;
3687 HFC_outb(hc, R_ST_SEL,
3688 hc->chan[ch].port);
3689 /* undocumented: delay after R_ST_SEL */
3690 udelay(1);
3691 HFC_outb(hc, A_ST_WR_STATE, 4 |
3692 V_ST_LD_STA); /* G4 */
3693 udelay(6); /* wait at least 5,21us */
3694 HFC_outb(hc, A_ST_WR_STATE, 4);
3695 dch->state = 4;
3696 } else {
3697 /* one extra count for the next event */
3698 hc->chan[ch].nt_timer =
3699 nt_t1_count[poll_timer] + 1;
3700 HFC_outb(hc, R_ST_SEL,
3701 hc->chan[ch].port);
3702 /* undocumented: delay after R_ST_SEL */
3703 udelay(1);
3704 /* allow G2 -> G3 transition */
3705 HFC_outb(hc, A_ST_WR_STATE, 2 |
3706 V_SET_G2_G3);
3707 }
3708 break;
3709 case (1):
3710 hc->chan[ch].nt_timer = -1;
3711 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
3712 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
3713 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
3714 break;
3715 case (4):
3716 hc->chan[ch].nt_timer = -1;
3717 break;
3718 case (3):
3719 hc->chan[ch].nt_timer = -1;
3720 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
3721 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
3722 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
3723 break;
3724 }
3725 }
3726 }
3727}
3728
3729/*
3730 * called for card mode init message
3731 */
3732
3733static void
3734hfcmulti_initmode(struct dchannel *dch)
3735{
3736 struct hfc_multi *hc = dch->hw;
3737 u_char a_st_wr_state, r_e1_wr_sta;
3738 int i, pt;
3739
3740 if (debug & DEBUG_HFCMULTI_INIT)
3741 printk(KERN_DEBUG "%s: entered\n", __func__);
3742
3743 if (hc->type == 1) {
3744 hc->chan[hc->dslot].slot_tx = -1;
3745 hc->chan[hc->dslot].slot_rx = -1;
3746 hc->chan[hc->dslot].conf = -1;
3747 if (hc->dslot) {
3748 mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol,
3749 -1, 0, -1, 0);
3750 dch->timer.function = (void *) hfcmulti_dbusy_timer;
3751 dch->timer.data = (long) dch;
3752 init_timer(&dch->timer);
3753 }
3754 for (i = 1; i <= 31; i++) {
3755 if (i == hc->dslot)
3756 continue;
3757 hc->chan[i].slot_tx = -1;
3758 hc->chan[i].slot_rx = -1;
3759 hc->chan[i].conf = -1;
3760 mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
3761 }
3762 /* E1 */
3763 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) {
3764 HFC_outb(hc, R_LOS0, 255); /* 2 ms */
3765 HFC_outb(hc, R_LOS1, 255); /* 512 ms */
3766 }
3767 if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) {
3768 HFC_outb(hc, R_RX0, 0);
3769 hc->hw.r_tx0 = 0 | V_OUT_EN;
3770 } else {
3771 HFC_outb(hc, R_RX0, 1);
3772 hc->hw.r_tx0 = 1 | V_OUT_EN;
3773 }
3774 hc->hw.r_tx1 = V_ATX | V_NTRI;
3775 HFC_outb(hc, R_TX0, hc->hw.r_tx0);
3776 HFC_outb(hc, R_TX1, hc->hw.r_tx1);
3777 HFC_outb(hc, R_TX_FR0, 0x00);
3778 HFC_outb(hc, R_TX_FR1, 0xf8);
3779
3780 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
3781 HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
3782
3783 HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
3784
3785 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg))
3786 HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
3787
3788 if (dch->dev.D.protocol == ISDN_P_NT_E1) {
3789 if (debug & DEBUG_HFCMULTI_INIT)
3790 printk(KERN_DEBUG "%s: E1 port is NT-mode\n",
3791 __func__);
3792 r_e1_wr_sta = 0; /* G0 */
3793 hc->e1_getclock = 0;
3794 } else {
3795 if (debug & DEBUG_HFCMULTI_INIT)
3796 printk(KERN_DEBUG "%s: E1 port is TE-mode\n",
3797 __func__);
3798 r_e1_wr_sta = 0; /* F0 */
3799 hc->e1_getclock = 1;
3800 }
3801 if (test_bit(HFC_CHIP_RX_SYNC, &hc->chip))
3802 HFC_outb(hc, R_SYNC_OUT, V_SYNC_E1_RX);
3803 else
3804 HFC_outb(hc, R_SYNC_OUT, 0);
3805 if (test_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip))
3806 hc->e1_getclock = 1;
3807 if (test_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip))
3808 hc->e1_getclock = 0;
3809 if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
3810 /* SLAVE (clock master) */
3811 if (debug & DEBUG_HFCMULTI_INIT)
3812 printk(KERN_DEBUG
3813 "%s: E1 port is clock master "
3814 "(clock from PCM)\n", __func__);
3815 HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC | V_PCM_SYNC);
3816 } else {
3817 if (hc->e1_getclock) {
3818 /* MASTER (clock slave) */
3819 if (debug & DEBUG_HFCMULTI_INIT)
3820 printk(KERN_DEBUG
3821 "%s: E1 port is clock slave "
3822 "(clock to PCM)\n", __func__);
3823 HFC_outb(hc, R_SYNC_CTRL, V_SYNC_OFFS);
3824 } else {
3825 /* MASTER (clock master) */
3826 if (debug & DEBUG_HFCMULTI_INIT)
3827 printk(KERN_DEBUG "%s: E1 port is "
3828 "clock master "
3829 "(clock from QUARTZ)\n",
3830 __func__);
3831 HFC_outb(hc, R_SYNC_CTRL, V_EXT_CLK_SYNC |
3832 V_PCM_SYNC | V_JATT_OFF);
3833 HFC_outb(hc, R_SYNC_OUT, 0);
3834 }
3835 }
3836 HFC_outb(hc, R_JATT_ATT, 0x9c); /* undoc register */
3837 HFC_outb(hc, R_PWM_MD, V_PWM0_MD);
3838 HFC_outb(hc, R_PWM0, 0x50);
3839 HFC_outb(hc, R_PWM1, 0xff);
3840 /* state machine setup */
3841 HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta | V_E1_LD_STA);
3842 udelay(6); /* wait at least 5,21us */
3843 HFC_outb(hc, R_E1_WR_STA, r_e1_wr_sta);
3844 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
3845 hc->syncronized = 0;
3846 plxsd_checksync(hc, 0);
3847 }
3848 } else {
3849 i = dch->slot;
3850 hc->chan[i].slot_tx = -1;
3851 hc->chan[i].slot_rx = -1;
3852 hc->chan[i].conf = -1;
3853 mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
3854 dch->timer.function = (void *)hfcmulti_dbusy_timer;
3855 dch->timer.data = (long) dch;
3856 init_timer(&dch->timer);
3857 hc->chan[i - 2].slot_tx = -1;
3858 hc->chan[i - 2].slot_rx = -1;
3859 hc->chan[i - 2].conf = -1;
3860 mode_hfcmulti(hc, i - 2, ISDN_P_NONE, -1, 0, -1, 0);
3861 hc->chan[i - 1].slot_tx = -1;
3862 hc->chan[i - 1].slot_rx = -1;
3863 hc->chan[i - 1].conf = -1;
3864 mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
3865 /* ST */
3866 pt = hc->chan[i].port;
3867 /* select interface */
3868 HFC_outb(hc, R_ST_SEL, pt);
3869 /* undocumented: delay after R_ST_SEL */
3870 udelay(1);
3871 if (dch->dev.D.protocol == ISDN_P_NT_S0) {
3872 if (debug & DEBUG_HFCMULTI_INIT)
3873 printk(KERN_DEBUG
3874 "%s: ST port %d is NT-mode\n",
3875 __func__, pt);
3876 /* clock delay */
3877 HFC_outb(hc, A_ST_CLK_DLY, clockdelay_nt);
3878 a_st_wr_state = 1; /* G1 */
3879 hc->hw.a_st_ctrl0[pt] = V_ST_MD;
3880 } else {
3881 if (debug & DEBUG_HFCMULTI_INIT)
3882 printk(KERN_DEBUG
3883 "%s: ST port %d is TE-mode\n",
3884 __func__, pt);
3885 /* clock delay */
3886 HFC_outb(hc, A_ST_CLK_DLY, clockdelay_te);
3887 a_st_wr_state = 2; /* F2 */
3888 hc->hw.a_st_ctrl0[pt] = 0;
3889 }
3890 if (!test_bit(HFC_CFG_NONCAP_TX, &hc->chan[i].cfg))
3891 hc->hw.a_st_ctrl0[pt] |= V_TX_LI;
3892 /* line setup */
3893 HFC_outb(hc, A_ST_CTRL0, hc->hw.a_st_ctrl0[pt]);
3894 /* disable E-channel */
3895 if ((dch->dev.D.protocol == ISDN_P_NT_S0) ||
3896 test_bit(HFC_CFG_DIS_ECHANNEL, &hc->chan[i].cfg))
3897 HFC_outb(hc, A_ST_CTRL1, V_E_IGNO);
3898 else
3899 HFC_outb(hc, A_ST_CTRL1, 0);
3900 /* enable B-channel receive */
3901 HFC_outb(hc, A_ST_CTRL2, V_B1_RX_EN | V_B2_RX_EN);
3902 /* state machine setup */
3903 HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state | V_ST_LD_STA);
3904 udelay(6); /* wait at least 5,21us */
3905 HFC_outb(hc, A_ST_WR_STATE, a_st_wr_state);
3906 hc->hw.r_sci_msk |= 1 << pt;
3907 /* state machine interrupts */
3908 HFC_outb(hc, R_SCI_MSK, hc->hw.r_sci_msk);
3909 /* unset sync on port */
3910 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
3911 hc->syncronized &=
3912 ~(1 << hc->chan[dch->slot].port);
3913 plxsd_checksync(hc, 0);
3914 }
3915 }
3916 if (debug & DEBUG_HFCMULTI_INIT)
3917 printk("%s: done\n", __func__);
3918}
3919
3920
3921static int
3922open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
3923 struct channel_req *rq)
3924{
3925 int err = 0;
3926 u_long flags;
3927
3928 if (debug & DEBUG_HW_OPEN)
3929 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
3930 dch->dev.id, __builtin_return_address(0));
3931 if (rq->protocol == ISDN_P_NONE)
3932 return -EINVAL;
3933 if ((dch->dev.D.protocol != ISDN_P_NONE) &&
3934 (dch->dev.D.protocol != rq->protocol)) {
3935 if (debug & DEBUG_HFCMULTI_MODE)
3936 printk(KERN_WARNING "%s: change protocol %x to %x\n",
3937 __func__, dch->dev.D.protocol, rq->protocol);
3938 }
3939 if ((dch->dev.D.protocol == ISDN_P_TE_S0)
3940 && (rq->protocol != ISDN_P_TE_S0))
3941 l1_event(dch->l1, CLOSE_CHANNEL);
3942 if (dch->dev.D.protocol != rq->protocol) {
3943 if (rq->protocol == ISDN_P_TE_S0) {
3944 err = create_l1(dch, hfcm_l1callback);
3945 if (err)
3946 return err;
3947 }
3948 dch->dev.D.protocol = rq->protocol;
3949 spin_lock_irqsave(&hc->lock, flags);
3950 hfcmulti_initmode(dch);
3951 spin_unlock_irqrestore(&hc->lock, flags);
3952 }
3953
3954 if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) ||
3955 ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) ||
3956 ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) ||
3957 ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) {
3958 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
3959 0, NULL, GFP_KERNEL);
3960 }
3961 rq->ch = &dch->dev.D;
3962 if (!try_module_get(THIS_MODULE))
3963 printk(KERN_WARNING "%s:cannot get module\n", __func__);
3964 return 0;
3965}
3966
3967static int
3968open_bchannel(struct hfc_multi *hc, struct dchannel *dch,
3969 struct channel_req *rq)
3970{
3971 struct bchannel *bch;
3972 int ch;
3973
3974 if (!test_bit(rq->adr.channel, &dch->dev.channelmap[0]))
3975 return -EINVAL;
3976 if (rq->protocol == ISDN_P_NONE)
3977 return -EINVAL;
3978 if (hc->type == 1)
3979 ch = rq->adr.channel;
3980 else
3981 ch = (rq->adr.channel - 1) + (dch->slot - 2);
3982 bch = hc->chan[ch].bch;
3983 if (!bch) {
3984 printk(KERN_ERR "%s:internal error ch %d has no bch\n",
3985 __func__, ch);
3986 return -EINVAL;
3987 }
3988 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
3989 return -EBUSY; /* b-channel can be only open once */
3990 bch->ch.protocol = rq->protocol;
3991 hc->chan[ch].rx_off = 0;
3992 rq->ch = &bch->ch;
3993 if (!try_module_get(THIS_MODULE))
3994 printk(KERN_WARNING "%s:cannot get module\n", __func__);
3995 return 0;
3996}
3997
3998/*
3999 * device control function
4000 */
4001static int
4002channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4003{
4004 int ret = 0;
4005
4006 switch (cq->op) {
4007 case MISDN_CTRL_GETOP:
4008 cq->op = 0;
4009 break;
4010 default:
4011 printk(KERN_WARNING "%s: unknown Op %x\n",
4012 __func__, cq->op);
4013 ret = -EINVAL;
4014 break;
4015 }
4016 return ret;
4017}
4018
4019static int
4020hfcm_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
4021{
4022 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
4023 struct dchannel *dch = container_of(dev, struct dchannel, dev);
4024 struct hfc_multi *hc = dch->hw;
4025 struct channel_req *rq;
4026 int err = 0;
4027 u_long flags;
4028
4029 if (dch->debug & DEBUG_HW)
4030 printk(KERN_DEBUG "%s: cmd:%x %p\n",
4031 __func__, cmd, arg);
4032 switch (cmd) {
4033 case OPEN_CHANNEL:
4034 rq = arg;
4035 switch (rq->protocol) {
4036 case ISDN_P_TE_S0:
4037 case ISDN_P_NT_S0:
4038 if (hc->type == 1) {
4039 err = -EINVAL;
4040 break;
4041 }
4042 err = open_dchannel(hc, dch, rq); /* locked there */
4043 break;
4044 case ISDN_P_TE_E1:
4045 case ISDN_P_NT_E1:
4046 if (hc->type != 1) {
4047 err = -EINVAL;
4048 break;
4049 }
4050 err = open_dchannel(hc, dch, rq); /* locked there */
4051 break;
4052 default:
4053 spin_lock_irqsave(&hc->lock, flags);
4054 err = open_bchannel(hc, dch, rq);
4055 spin_unlock_irqrestore(&hc->lock, flags);
4056 }
4057 break;
4058 case CLOSE_CHANNEL:
4059 if (debug & DEBUG_HW_OPEN)
4060 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
4061 __func__, dch->dev.id,
4062 __builtin_return_address(0));
4063 module_put(THIS_MODULE);
4064 break;
4065 case CONTROL_CHANNEL:
4066 spin_lock_irqsave(&hc->lock, flags);
4067 err = channel_dctrl(dch, arg);
4068 spin_unlock_irqrestore(&hc->lock, flags);
4069 break;
4070 default:
4071 if (dch->debug & DEBUG_HW)
4072 printk(KERN_DEBUG "%s: unknown command %x\n",
4073 __func__, cmd);
4074 err = -EINVAL;
4075 }
4076 return err;
4077}
4078
4079/*
4080 * initialize the card
4081 */
4082
4083/*
4084 * start timer irq, wait some time and check if we have interrupts.
4085 * if not, reset chip and try again.
4086 */
4087static int
4088init_card(struct hfc_multi *hc)
4089{
4090 int err = -EIO;
4091 u_long flags;
4092 u_short *plx_acc;
4093 u_long plx_flags;
4094
4095 if (debug & DEBUG_HFCMULTI_INIT)
4096 printk(KERN_DEBUG "%s: entered\n", __func__);
4097
4098 spin_lock_irqsave(&hc->lock, flags);
4099 /* set interrupts but leave global interrupt disabled */
4100 hc->hw.r_irq_ctrl = V_FIFO_IRQ;
4101 disable_hwirq(hc);
4102 spin_unlock_irqrestore(&hc->lock, flags);
4103
4104 if (request_irq(hc->pci_dev->irq, hfcmulti_interrupt, IRQF_SHARED,
4105 "HFC-multi", hc)) {
4106 printk(KERN_WARNING "mISDN: Could not get interrupt %d.\n",
4107 hc->pci_dev->irq);
4108 return -EIO;
4109 }
4110 hc->irq = hc->pci_dev->irq;
4111
4112 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
4113 spin_lock_irqsave(&plx_lock, plx_flags);
4114 plx_acc = (u_short *)(hc->plx_membase+PLX_INTCSR);
4115 writew((PLX_INTCSR_PCIINT_ENABLE | PLX_INTCSR_LINTI1_ENABLE),
4116 plx_acc); /* enable PCI & LINT1 irq */
4117 spin_unlock_irqrestore(&plx_lock, plx_flags);
4118 }
4119
4120 if (debug & DEBUG_HFCMULTI_INIT)
4121 printk(KERN_DEBUG "%s: IRQ %d count %d\n",
4122 __func__, hc->irq, hc->irqcnt);
4123 err = init_chip(hc);
4124 if (err)
4125 goto error;
4126 /*
4127 * Finally enable IRQ output
4128 * this is only allowed, if an IRQ routine is allready
4129 * established for this HFC, so don't do that earlier
4130 */
4131 spin_lock_irqsave(&hc->lock, flags);
4132 enable_hwirq(hc);
4133 spin_unlock_irqrestore(&hc->lock, flags);
4134 /* printk(KERN_DEBUG "no master irq set!!!\n"); */
4135 set_current_state(TASK_UNINTERRUPTIBLE);
4136 schedule_timeout((100*HZ)/1000); /* Timeout 100ms */
4137 /* turn IRQ off until chip is completely initialized */
4138 spin_lock_irqsave(&hc->lock, flags);
4139 disable_hwirq(hc);
4140 spin_unlock_irqrestore(&hc->lock, flags);
4141 if (debug & DEBUG_HFCMULTI_INIT)
4142 printk(KERN_DEBUG "%s: IRQ %d count %d\n",
4143 __func__, hc->irq, hc->irqcnt);
4144 if (hc->irqcnt) {
4145 if (debug & DEBUG_HFCMULTI_INIT)
4146 printk(KERN_DEBUG "%s: done\n", __func__);
4147
4148 return 0;
4149 }
4150 if (test_bit(HFC_CHIP_PCM_SLAVE, &hc->chip)) {
4151 printk(KERN_INFO "ignoring missing interrupts\n");
4152 return 0;
4153 }
4154
4155 printk(KERN_ERR "HFC PCI: IRQ(%d) getting no interrupts during init.\n",
4156 hc->irq);
4157
4158 err = -EIO;
4159
4160error:
4161 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
4162 spin_lock_irqsave(&plx_lock, plx_flags);
4163 plx_acc = (u_short *)(hc->plx_membase+PLX_INTCSR);
4164 writew(0x00, plx_acc); /*disable IRQs*/
4165 spin_unlock_irqrestore(&plx_lock, plx_flags);
4166 }
4167
4168 if (debug & DEBUG_HFCMULTI_INIT)
4169 printk(KERN_WARNING "%s: free irq %d\n", __func__, hc->irq);
4170 if (hc->irq) {
4171 free_irq(hc->irq, hc);
4172 hc->irq = 0;
4173 }
4174
4175 if (debug & DEBUG_HFCMULTI_INIT)
4176 printk(KERN_DEBUG "%s: done (err=%d)\n", __func__, err);
4177 return err;
4178}
4179
4180/*
4181 * find pci device and set it up
4182 */
4183
4184static int
4185setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
4186 const struct pci_device_id *ent)
4187{
4188 struct hm_map *m = (struct hm_map *)ent->driver_data;
4189
4190 printk(KERN_INFO
4191 "HFC-multi: card manufacturer: '%s' card name: '%s' clock: %s\n",
4192 m->vendor_name, m->card_name, m->clock2 ? "double" : "normal");
4193
4194 hc->pci_dev = pdev;
4195 if (m->clock2)
4196 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
4197
4198 if (ent->device == 0xB410) {
4199 test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
4200 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
4201 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
4202 hc->slots = 32;
4203 }
4204
4205 if (hc->pci_dev->irq <= 0) {
4206 printk(KERN_WARNING "HFC-multi: No IRQ for PCI card found.\n");
4207 return -EIO;
4208 }
4209 if (pci_enable_device(hc->pci_dev)) {
4210 printk(KERN_WARNING "HFC-multi: Error enabling PCI card.\n");
4211 return -EIO;
4212 }
4213 hc->leds = m->leds;
4214 hc->ledstate = 0xAFFEAFFE;
4215 hc->opticalsupport = m->opticalsupport;
4216
4217 /* set memory access methods */
4218 if (m->io_mode) /* use mode from card config */
4219 hc->io_mode = m->io_mode;
4220 switch (hc->io_mode) {
4221 case HFC_IO_MODE_PLXSD:
4222 test_and_set_bit(HFC_CHIP_PLXSD, &hc->chip);
4223 hc->slots = 128; /* required */
4224 /* fall through */
4225 case HFC_IO_MODE_PCIMEM:
4226 hc->HFC_outb = HFC_outb_pcimem;
4227 hc->HFC_inb = HFC_inb_pcimem;
4228 hc->HFC_inw = HFC_inw_pcimem;
4229 hc->HFC_wait = HFC_wait_pcimem;
4230 hc->read_fifo = read_fifo_pcimem;
4231 hc->write_fifo = write_fifo_pcimem;
4232 break;
4233 case HFC_IO_MODE_REGIO:
4234 hc->HFC_outb = HFC_outb_regio;
4235 hc->HFC_inb = HFC_inb_regio;
4236 hc->HFC_inw = HFC_inw_regio;
4237 hc->HFC_wait = HFC_wait_regio;
4238 hc->read_fifo = read_fifo_regio;
4239 hc->write_fifo = write_fifo_regio;
4240 break;
4241 default:
4242 printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
4243 pci_disable_device(hc->pci_dev);
4244 return -EIO;
4245 }
4246 hc->HFC_outb_nodebug = hc->HFC_outb;
4247 hc->HFC_inb_nodebug = hc->HFC_inb;
4248 hc->HFC_inw_nodebug = hc->HFC_inw;
4249 hc->HFC_wait_nodebug = hc->HFC_wait;
4250#ifdef HFC_REGISTER_DEBUG
4251 hc->HFC_outb = HFC_outb_debug;
4252 hc->HFC_inb = HFC_inb_debug;
4253 hc->HFC_inw = HFC_inw_debug;
4254 hc->HFC_wait = HFC_wait_debug;
4255#endif
4256 hc->pci_iobase = 0;
4257 hc->pci_membase = NULL;
4258 hc->plx_membase = NULL;
4259
4260 switch (hc->io_mode) {
4261 case HFC_IO_MODE_PLXSD:
4262 hc->plx_origmembase = hc->pci_dev->resource[0].start;
4263 /* MEMBASE 1 is PLX PCI Bridge */
4264
4265 if (!hc->plx_origmembase) {
4266 printk(KERN_WARNING
4267 "HFC-multi: No IO-Memory for PCI PLX bridge found\n");
4268 pci_disable_device(hc->pci_dev);
4269 return -EIO;
4270 }
4271
4272 hc->plx_membase = ioremap(hc->plx_origmembase, 0x80);
4273 if (!hc->plx_membase) {
4274 printk(KERN_WARNING
4275 "HFC-multi: failed to remap plx address space. "
4276 "(internal error)\n");
4277 pci_disable_device(hc->pci_dev);
4278 return -EIO;
4279 }
4280 printk(KERN_INFO
4281 "HFC-multi: plx_membase:%#lx plx_origmembase:%#lx\n",
4282 (u_long)hc->plx_membase, hc->plx_origmembase);
4283
4284 hc->pci_origmembase = hc->pci_dev->resource[2].start;
4285 /* MEMBASE 1 is PLX PCI Bridge */
4286 if (!hc->pci_origmembase) {
4287 printk(KERN_WARNING
4288 "HFC-multi: No IO-Memory for PCI card found\n");
4289 pci_disable_device(hc->pci_dev);
4290 return -EIO;
4291 }
4292
4293 hc->pci_membase = ioremap(hc->pci_origmembase, 0x400);
4294 if (!hc->pci_membase) {
4295 printk(KERN_WARNING "HFC-multi: failed to remap io "
4296 "address space. (internal error)\n");
4297 pci_disable_device(hc->pci_dev);
4298 return -EIO;
4299 }
4300
4301 printk(KERN_INFO
4302 "card %d: defined at MEMBASE %#lx (%#lx) IRQ %d HZ %d "
4303 "leds-type %d\n",
4304 hc->id, (u_long)hc->pci_membase, hc->pci_origmembase,
4305 hc->pci_dev->irq, HZ, hc->leds);
4306 pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
4307 break;
4308 case HFC_IO_MODE_PCIMEM:
4309 hc->pci_origmembase = hc->pci_dev->resource[1].start;
4310 if (!hc->pci_origmembase) {
4311 printk(KERN_WARNING
4312 "HFC-multi: No IO-Memory for PCI card found\n");
4313 pci_disable_device(hc->pci_dev);
4314 return -EIO;
4315 }
4316
4317 hc->pci_membase = ioremap(hc->pci_origmembase, 256);
4318 if (!hc->pci_membase) {
4319 printk(KERN_WARNING
4320 "HFC-multi: failed to remap io address space. "
4321 "(internal error)\n");
4322 pci_disable_device(hc->pci_dev);
4323 return -EIO;
4324 }
4325 printk(KERN_INFO "card %d: defined at MEMBASE %#lx (%#lx) IRQ %d "
4326 "HZ %d leds-type %d\n", hc->id, (u_long)hc->pci_membase,
4327 hc->pci_origmembase, hc->pci_dev->irq, HZ, hc->leds);
4328 pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_MEMIO);
4329 break;
4330 case HFC_IO_MODE_REGIO:
4331 hc->pci_iobase = (u_int) hc->pci_dev->resource[0].start;
4332 if (!hc->pci_iobase) {
4333 printk(KERN_WARNING
4334 "HFC-multi: No IO for PCI card found\n");
4335 pci_disable_device(hc->pci_dev);
4336 return -EIO;
4337 }
4338
4339 if (!request_region(hc->pci_iobase, 8, "hfcmulti")) {
4340 printk(KERN_WARNING "HFC-multi: failed to request "
4341 "address space at 0x%08lx (internal error)\n",
4342 hc->pci_iobase);
4343 pci_disable_device(hc->pci_dev);
4344 return -EIO;
4345 }
4346
4347 printk(KERN_INFO
4348 "%s %s: defined at IOBASE %#x IRQ %d HZ %d leds-type %d\n",
4349 m->vendor_name, m->card_name, (u_int) hc->pci_iobase,
4350 hc->pci_dev->irq, HZ, hc->leds);
4351 pci_write_config_word(hc->pci_dev, PCI_COMMAND, PCI_ENA_REGIO);
4352 break;
4353 default:
4354 printk(KERN_WARNING "HFC-multi: Invalid IO mode.\n");
4355 pci_disable_device(hc->pci_dev);
4356 return -EIO;
4357 }
4358
4359 pci_set_drvdata(hc->pci_dev, hc);
4360
4361 /* At this point the needed PCI config is done */
4362 /* fifos are still not enabled */
4363 return 0;
4364}
4365
4366
4367/*
4368 * remove port
4369 */
4370
4371static void
4372release_port(struct hfc_multi *hc, struct dchannel *dch)
4373{
4374 int pt, ci, i = 0;
4375 u_long flags;
4376 struct bchannel *pb;
4377
4378 ci = dch->slot;
4379 pt = hc->chan[ci].port;
4380
4381 if (debug & DEBUG_HFCMULTI_INIT)
4382 printk(KERN_DEBUG "%s: entered for port %d\n",
4383 __func__, pt + 1);
4384
4385 if (pt >= hc->ports) {
4386 printk(KERN_WARNING "%s: ERROR port out of range (%d).\n",
4387 __func__, pt + 1);
4388 return;
4389 }
4390
4391 if (debug & DEBUG_HFCMULTI_INIT)
4392 printk(KERN_DEBUG "%s: releasing port=%d\n",
4393 __func__, pt + 1);
4394
4395 if (dch->dev.D.protocol == ISDN_P_TE_S0)
4396 l1_event(dch->l1, CLOSE_CHANNEL);
4397
4398 hc->chan[ci].dch = NULL;
4399
4400 if (hc->created[pt]) {
4401 hc->created[pt] = 0;
4402 mISDN_unregister_device(&dch->dev);
4403 }
4404
4405 spin_lock_irqsave(&hc->lock, flags);
4406
4407 if (dch->timer.function) {
4408 del_timer(&dch->timer);
4409 dch->timer.function = NULL;
4410 }
4411
4412 if (hc->type == 1) { /* E1 */
4413 /* remove sync */
4414 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
4415 hc->syncronized = 0;
4416 plxsd_checksync(hc, 1);
4417 }
4418 /* free channels */
4419 for (i = 0; i <= 31; i++) {
4420 if (hc->chan[i].bch) {
4421 if (debug & DEBUG_HFCMULTI_INIT)
4422 printk(KERN_DEBUG
4423 "%s: free port %d channel %d\n",
4424 __func__, hc->chan[i].port+1, i);
4425 pb = hc->chan[i].bch;
4426 hc->chan[i].bch = NULL;
4427 spin_unlock_irqrestore(&hc->lock, flags);
4428 mISDN_freebchannel(pb);
4429 kfree(pb);
4430 kfree(hc->chan[i].coeff);
4431 spin_lock_irqsave(&hc->lock, flags);
4432 }
4433 }
4434 } else {
4435 /* remove sync */
4436 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) {
4437 hc->syncronized &=
4438 ~(1 << hc->chan[ci].port);
4439 plxsd_checksync(hc, 1);
4440 }
4441 /* free channels */
4442 if (hc->chan[ci - 2].bch) {
4443 if (debug & DEBUG_HFCMULTI_INIT)
4444 printk(KERN_DEBUG
4445 "%s: free port %d channel %d\n",
4446 __func__, hc->chan[ci - 2].port+1,
4447 ci - 2);
4448 pb = hc->chan[ci - 2].bch;
4449 hc->chan[ci - 2].bch = NULL;
4450 spin_unlock_irqrestore(&hc->lock, flags);
4451 mISDN_freebchannel(pb);
4452 kfree(pb);
4453 kfree(hc->chan[ci - 2].coeff);
4454 spin_lock_irqsave(&hc->lock, flags);
4455 }
4456 if (hc->chan[ci - 1].bch) {
4457 if (debug & DEBUG_HFCMULTI_INIT)
4458 printk(KERN_DEBUG
4459 "%s: free port %d channel %d\n",
4460 __func__, hc->chan[ci - 1].port+1,
4461 ci - 1);
4462 pb = hc->chan[ci - 1].bch;
4463 hc->chan[ci - 1].bch = NULL;
4464 spin_unlock_irqrestore(&hc->lock, flags);
4465 mISDN_freebchannel(pb);
4466 kfree(pb);
4467 kfree(hc->chan[ci - 1].coeff);
4468 spin_lock_irqsave(&hc->lock, flags);
4469 }
4470 }
4471
4472 spin_unlock_irqrestore(&hc->lock, flags);
4473
4474 if (debug & DEBUG_HFCMULTI_INIT)
4475 printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt);
4476 mISDN_freedchannel(dch);
4477 kfree(dch);
4478
4479 if (debug & DEBUG_HFCMULTI_INIT)
4480 printk(KERN_DEBUG "%s: done!\n", __func__);
4481}
4482
4483static void
4484release_card(struct hfc_multi *hc)
4485{
4486 u_long flags;
4487 int ch;
4488
4489 if (debug & DEBUG_HFCMULTI_INIT)
4490 printk(KERN_WARNING "%s: release card (%d) entered\n",
4491 __func__, hc->id);
4492
4493 spin_lock_irqsave(&hc->lock, flags);
4494 disable_hwirq(hc);
4495 spin_unlock_irqrestore(&hc->lock, flags);
4496
4497 udelay(1000);
4498
4499 /* dimm leds */
4500 if (hc->leds)
4501 hfcmulti_leds(hc);
4502
4503 /* disable D-channels & B-channels */
4504 if (debug & DEBUG_HFCMULTI_INIT)
4505 printk(KERN_DEBUG "%s: disable all channels (d and b)\n",
4506 __func__);
4507 for (ch = 0; ch <= 31; ch++) {
4508 if (hc->chan[ch].dch)
4509 release_port(hc, hc->chan[ch].dch);
4510 }
4511
4512 /* release hardware & irq */
4513 if (hc->irq) {
4514 if (debug & DEBUG_HFCMULTI_INIT)
4515 printk(KERN_WARNING "%s: free irq %d\n",
4516 __func__, hc->irq);
4517 free_irq(hc->irq, hc);
4518 hc->irq = 0;
4519
4520 }
4521 release_io_hfcmulti(hc);
4522
4523 if (debug & DEBUG_HFCMULTI_INIT)
4524 printk(KERN_WARNING "%s: remove instance from list\n",
4525 __func__);
4526 list_del(&hc->list);
4527
4528 if (debug & DEBUG_HFCMULTI_INIT)
4529 printk(KERN_WARNING "%s: delete instance\n", __func__);
4530 if (hc == syncmaster)
4531 syncmaster = NULL;
4532 kfree(hc);
4533 if (debug & DEBUG_HFCMULTI_INIT)
4534 printk(KERN_WARNING "%s: card successfully removed\n",
4535 __func__);
4536}
4537
4538static int
4539init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4540{
4541 struct dchannel *dch;
4542 struct bchannel *bch;
4543 int ch, ret = 0;
4544 char name[MISDN_MAX_IDLEN];
4545
4546 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4547 if (!dch)
4548 return -ENOMEM;
4549 dch->debug = debug;
4550 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4551 dch->hw = hc;
4552 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
4553 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4554 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4555 dch->dev.D.send = handle_dmsg;
4556 dch->dev.D.ctrl = hfcm_dctrl;
4557 dch->dev.nrbchan = (hc->dslot)?30:31;
4558 dch->slot = hc->dslot;
4559 hc->chan[hc->dslot].dch = dch;
4560 hc->chan[hc->dslot].port = 0;
4561 hc->chan[hc->dslot].nt_timer = -1;
4562 for (ch = 1; ch <= 31; ch++) {
4563 if (ch == hc->dslot) /* skip dchannel */
4564 continue;
4565 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4566 if (!bch) {
4567 printk(KERN_ERR "%s: no memory for bchannel\n",
4568 __func__);
4569 ret = -ENOMEM;
4570 goto free_chan;
4571 }
4572 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
4573 if (!hc->chan[ch].coeff) {
4574 printk(KERN_ERR "%s: no memory for coeffs\n",
4575 __func__);
4576 ret = -ENOMEM;
4577 goto free_chan;
4578 }
4579 bch->nr = ch;
4580 bch->slot = ch;
4581 bch->debug = debug;
4582 mISDN_initbchannel(bch, MAX_DATA_MEM);
4583 bch->hw = hc;
4584 bch->ch.send = handle_bmsg;
4585 bch->ch.ctrl = hfcm_bctrl;
4586 bch->ch.nr = ch;
4587 list_add(&bch->ch.list, &dch->dev.bchannels);
4588 hc->chan[ch].bch = bch;
4589 hc->chan[ch].port = 0;
4590 test_and_set_bit(bch->nr, &dch->dev.channelmap[0]);
4591 }
4592 /* set optical line type */
4593 if (port[Port_cnt] & 0x001) {
4594 if (!m->opticalsupport) {
4595 printk(KERN_INFO
4596 "This board has no optical "
4597 "support\n");
4598 } else {
4599 if (debug & DEBUG_HFCMULTI_INIT)
4600 printk(KERN_DEBUG
4601 "%s: PORT set optical "
4602 "interfacs: card(%d) "
4603 "port(%d)\n",
4604 __func__,
4605 HFC_cnt + 1, 1);
4606 test_and_set_bit(HFC_CFG_OPTICAL,
4607 &hc->chan[hc->dslot].cfg);
4608 }
4609 }
4610 /* set LOS report */
4611 if (port[Port_cnt] & 0x004) {
4612 if (debug & DEBUG_HFCMULTI_INIT)
4613 printk(KERN_DEBUG "%s: PORT set "
4614 "LOS report: card(%d) port(%d)\n",
4615 __func__, HFC_cnt + 1, 1);
4616 test_and_set_bit(HFC_CFG_REPORT_LOS,
4617 &hc->chan[hc->dslot].cfg);
4618 }
4619 /* set AIS report */
4620 if (port[Port_cnt] & 0x008) {
4621 if (debug & DEBUG_HFCMULTI_INIT)
4622 printk(KERN_DEBUG "%s: PORT set "
4623 "AIS report: card(%d) port(%d)\n",
4624 __func__, HFC_cnt + 1, 1);
4625 test_and_set_bit(HFC_CFG_REPORT_AIS,
4626 &hc->chan[hc->dslot].cfg);
4627 }
4628 /* set SLIP report */
4629 if (port[Port_cnt] & 0x010) {
4630 if (debug & DEBUG_HFCMULTI_INIT)
4631 printk(KERN_DEBUG
4632 "%s: PORT set SLIP report: "
4633 "card(%d) port(%d)\n",
4634 __func__, HFC_cnt + 1, 1);
4635 test_and_set_bit(HFC_CFG_REPORT_SLIP,
4636 &hc->chan[hc->dslot].cfg);
4637 }
4638 /* set RDI report */
4639 if (port[Port_cnt] & 0x020) {
4640 if (debug & DEBUG_HFCMULTI_INIT)
4641 printk(KERN_DEBUG
4642 "%s: PORT set RDI report: "
4643 "card(%d) port(%d)\n",
4644 __func__, HFC_cnt + 1, 1);
4645 test_and_set_bit(HFC_CFG_REPORT_RDI,
4646 &hc->chan[hc->dslot].cfg);
4647 }
4648 /* set CRC-4 Mode */
4649 if (!(port[Port_cnt] & 0x100)) {
4650 if (debug & DEBUG_HFCMULTI_INIT)
4651 printk(KERN_DEBUG "%s: PORT turn on CRC4 report:"
4652 " card(%d) port(%d)\n",
4653 __func__, HFC_cnt + 1, 1);
4654 test_and_set_bit(HFC_CFG_CRC4,
4655 &hc->chan[hc->dslot].cfg);
4656 } else {
4657 if (debug & DEBUG_HFCMULTI_INIT)
4658 printk(KERN_DEBUG "%s: PORT turn off CRC4"
4659 " report: card(%d) port(%d)\n",
4660 __func__, HFC_cnt + 1, 1);
4661 }
4662 /* set forced clock */
4663 if (port[Port_cnt] & 0x0200) {
4664 if (debug & DEBUG_HFCMULTI_INIT)
4665 printk(KERN_DEBUG "%s: PORT force getting clock from "
4666 "E1: card(%d) port(%d)\n",
4667 __func__, HFC_cnt + 1, 1);
4668 test_and_set_bit(HFC_CHIP_E1CLOCK_GET, &hc->chip);
4669 } else
4670 if (port[Port_cnt] & 0x0400) {
4671 if (debug & DEBUG_HFCMULTI_INIT)
4672 printk(KERN_DEBUG "%s: PORT force putting clock to "
4673 "E1: card(%d) port(%d)\n",
4674 __func__, HFC_cnt + 1, 1);
4675 test_and_set_bit(HFC_CHIP_E1CLOCK_PUT, &hc->chip);
4676 }
4677 /* set JATT PLL */
4678 if (port[Port_cnt] & 0x0800) {
4679 if (debug & DEBUG_HFCMULTI_INIT)
4680 printk(KERN_DEBUG "%s: PORT disable JATT PLL on "
4681 "E1: card(%d) port(%d)\n",
4682 __func__, HFC_cnt + 1, 1);
4683 test_and_set_bit(HFC_CHIP_RX_SYNC, &hc->chip);
4684 }
4685 /* set elastic jitter buffer */
4686 if (port[Port_cnt] & 0x3000) {
4687 hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3;
4688 if (debug & DEBUG_HFCMULTI_INIT)
4689 printk(KERN_DEBUG
4690 "%s: PORT set elastic "
4691 "buffer to %d: card(%d) port(%d)\n",
4692 __func__, hc->chan[hc->dslot].jitter,
4693 HFC_cnt + 1, 1);
4694 } else
4695 hc->chan[hc->dslot].jitter = 2; /* default */
4696 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
4697 ret = mISDN_register_device(&dch->dev, name);
4698 if (ret)
4699 goto free_chan;
4700 hc->created[0] = 1;
4701 return ret;
4702free_chan:
4703 release_port(hc, dch);
4704 return ret;
4705}
4706
4707static int
4708init_multi_port(struct hfc_multi *hc, int pt)
4709{
4710 struct dchannel *dch;
4711 struct bchannel *bch;
4712 int ch, i, ret = 0;
4713 char name[MISDN_MAX_IDLEN];
4714
4715 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4716 if (!dch)
4717 return -ENOMEM;
4718 dch->debug = debug;
4719 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4720 dch->hw = hc;
4721 dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
4722 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4723 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4724 dch->dev.D.send = handle_dmsg;
4725 dch->dev.D.ctrl = hfcm_dctrl;
4726 dch->dev.nrbchan = 2;
4727 i = pt << 2;
4728 dch->slot = i + 2;
4729 hc->chan[i + 2].dch = dch;
4730 hc->chan[i + 2].port = pt;
4731 hc->chan[i + 2].nt_timer = -1;
4732 for (ch = 0; ch < dch->dev.nrbchan; ch++) {
4733 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4734 if (!bch) {
4735 printk(KERN_ERR "%s: no memory for bchannel\n",
4736 __func__);
4737 ret = -ENOMEM;
4738 goto free_chan;
4739 }
4740 hc->chan[i + ch].coeff = kzalloc(512, GFP_KERNEL);
4741 if (!hc->chan[i + ch].coeff) {
4742 printk(KERN_ERR "%s: no memory for coeffs\n",
4743 __func__);
4744 ret = -ENOMEM;
4745 goto free_chan;
4746 }
4747 bch->nr = ch + 1;
4748 bch->slot = i + ch;
4749 bch->debug = debug;
4750 mISDN_initbchannel(bch, MAX_DATA_MEM);
4751 bch->hw = hc;
4752 bch->ch.send = handle_bmsg;
4753 bch->ch.ctrl = hfcm_bctrl;
4754 bch->ch.nr = ch + 1;
4755 list_add(&bch->ch.list, &dch->dev.bchannels);
4756 hc->chan[i + ch].bch = bch;
4757 hc->chan[i + ch].port = pt;
4758 test_and_set_bit(bch->nr, &dch->dev.channelmap[0]);
4759 }
4760 /* set master clock */
4761 if (port[Port_cnt] & 0x001) {
4762 if (debug & DEBUG_HFCMULTI_INIT)
4763 printk(KERN_DEBUG
4764 "%s: PROTOCOL set master clock: "
4765 "card(%d) port(%d)\n",
4766 __func__, HFC_cnt + 1, pt + 1);
4767 if (dch->dev.D.protocol != ISDN_P_TE_S0) {
4768 printk(KERN_ERR "Error: Master clock "
4769 "for port(%d) of card(%d) is only"
4770 " possible with TE-mode\n",
4771 pt + 1, HFC_cnt + 1);
4772 ret = -EINVAL;
4773 goto free_chan;
4774 }
4775 if (hc->masterclk >= 0) {
4776 printk(KERN_ERR "Error: Master clock "
4777 "for port(%d) of card(%d) already "
4778 "defined for port(%d)\n",
4779 pt + 1, HFC_cnt + 1, hc->masterclk+1);
4780 ret = -EINVAL;
4781 goto free_chan;
4782 }
4783 hc->masterclk = pt;
4784 }
4785 /* set transmitter line to non capacitive */
4786 if (port[Port_cnt] & 0x002) {
4787 if (debug & DEBUG_HFCMULTI_INIT)
4788 printk(KERN_DEBUG
4789 "%s: PROTOCOL set non capacitive "
4790 "transmitter: card(%d) port(%d)\n",
4791 __func__, HFC_cnt + 1, pt + 1);
4792 test_and_set_bit(HFC_CFG_NONCAP_TX,
4793 &hc->chan[i + 2].cfg);
4794 }
4795 /* disable E-channel */
4796 if (port[Port_cnt] & 0x004) {
4797 if (debug & DEBUG_HFCMULTI_INIT)
4798 printk(KERN_DEBUG
4799 "%s: PROTOCOL disable E-channel: "
4800 "card(%d) port(%d)\n",
4801 __func__, HFC_cnt + 1, pt + 1);
4802 test_and_set_bit(HFC_CFG_DIS_ECHANNEL,
4803 &hc->chan[i + 2].cfg);
4804 }
4805 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-%ds.%d/%d",
4806 hc->type, HFC_cnt + 1, pt + 1);
4807 ret = mISDN_register_device(&dch->dev, name);
4808 if (ret)
4809 goto free_chan;
4810 hc->created[pt] = 1;
4811 return ret;
4812free_chan:
4813 release_port(hc, dch);
4814 return ret;
4815}
4816
4817static int
4818hfcmulti_init(struct pci_dev *pdev, const struct pci_device_id *ent)
4819{
4820 struct hm_map *m = (struct hm_map *)ent->driver_data;
4821 int ret_err = 0;
4822 int pt;
4823 struct hfc_multi *hc;
4824 u_long flags;
4825 u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
4826
4827 if (HFC_cnt >= MAX_CARDS) {
4828 printk(KERN_ERR "too many cards (max=%d).\n",
4829 MAX_CARDS);
4830 return -EINVAL;
4831 }
4832 if ((type[HFC_cnt] & 0xff) && (type[HFC_cnt] & 0xff) != m->type) {
4833 printk(KERN_WARNING "HFC-MULTI: Card '%s:%s' type %d found but "
4834 "type[%d] %d was supplied as module parameter\n",
4835 m->vendor_name, m->card_name, m->type, HFC_cnt,
4836 type[HFC_cnt] & 0xff);
4837 printk(KERN_WARNING "HFC-MULTI: Load module without parameters "
4838 "first, to see cards and their types.");
4839 return -EINVAL;
4840 }
4841 if (debug & DEBUG_HFCMULTI_INIT)
4842 printk(KERN_DEBUG "%s: Registering %s:%s chip type %d (0x%x)\n",
4843 __func__, m->vendor_name, m->card_name, m->type,
4844 type[HFC_cnt]);
4845
4846 /* allocate card+fifo structure */
4847 hc = kzalloc(sizeof(struct hfc_multi), GFP_KERNEL);
4848 if (!hc) {
4849 printk(KERN_ERR "No kmem for HFC-Multi card\n");
4850 return -ENOMEM;
4851 }
4852 spin_lock_init(&hc->lock);
4853 hc->mtyp = m;
4854 hc->type = m->type;
4855 hc->ports = m->ports;
4856 hc->id = HFC_cnt;
4857 hc->pcm = pcm[HFC_cnt];
4858 hc->io_mode = iomode[HFC_cnt];
4859 if (dslot[HFC_cnt] < 0) {
4860 hc->dslot = 0;
4861 printk(KERN_INFO "HFC-E1 card has disabled D-channel, but "
4862 "31 B-channels\n");
4863 } if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32) {
4864 hc->dslot = dslot[HFC_cnt];
4865 printk(KERN_INFO "HFC-E1 card has alternating D-channel on "
4866 "time slot %d\n", dslot[HFC_cnt]);
4867 } else
4868 hc->dslot = 16;
4869
4870 /* set chip specific features */
4871 hc->masterclk = -1;
4872 if (type[HFC_cnt] & 0x100) {
4873 test_and_set_bit(HFC_CHIP_ULAW, &hc->chip);
4874 silence = 0xff; /* ulaw silence */
4875 } else
4876 silence = 0x2a; /* alaw silence */
4877 if (!(type[HFC_cnt] & 0x200))
4878 test_and_set_bit(HFC_CHIP_DTMF, &hc->chip);
4879
4880 if (type[HFC_cnt] & 0x800)
4881 test_and_set_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
4882 if (type[HFC_cnt] & 0x1000) {
4883 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
4884 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
4885 }
4886 if (type[HFC_cnt] & 0x4000)
4887 test_and_set_bit(HFC_CHIP_EXRAM_128, &hc->chip);
4888 if (type[HFC_cnt] & 0x8000)
4889 test_and_set_bit(HFC_CHIP_EXRAM_512, &hc->chip);
4890 hc->slots = 32;
4891 if (type[HFC_cnt] & 0x10000)
4892 hc->slots = 64;
4893 if (type[HFC_cnt] & 0x20000)
4894 hc->slots = 128;
4895 if (type[HFC_cnt] & 0x80000) {
4896 test_and_set_bit(HFC_CHIP_WATCHDOG, &hc->chip);
4897 hc->wdcount = 0;
4898 hc->wdbyte = V_GPIO_OUT2;
4899 printk(KERN_NOTICE "Watchdog enabled\n");
4900 }
4901
4902 /* setup pci, hc->slots may change due to PLXSD */
4903 ret_err = setup_pci(hc, pdev, ent);
4904 if (ret_err) {
4905 if (hc == syncmaster)
4906 syncmaster = NULL;
4907 kfree(hc);
4908 return ret_err;
4909 }
4910
4911 /* crate channels */
4912 for (pt = 0; pt < hc->ports; pt++) {
4913 if (Port_cnt >= MAX_PORTS) {
4914 printk(KERN_ERR "too many ports (max=%d).\n",
4915 MAX_PORTS);
4916 ret_err = -EINVAL;
4917 goto free_card;
4918 }
4919 if (hc->type == 1)
4920 ret_err = init_e1_port(hc, m);
4921 else
4922 ret_err = init_multi_port(hc, pt);
4923 if (debug & DEBUG_HFCMULTI_INIT)
4924 printk(KERN_DEBUG
4925 "%s: Registering D-channel, card(%d) port(%d)"
4926 "result %d\n",
4927 __func__, HFC_cnt + 1, pt, ret_err);
4928
4929 if (ret_err) {
4930 while (pt) { /* release already registered ports */
4931 pt--;
4932 release_port(hc, hc->chan[(pt << 2) + 2].dch);
4933 }
4934 goto free_card;
4935 }
4936 Port_cnt++;
4937 }
4938
4939 /* disp switches */
4940 switch (m->dip_type) {
4941 case DIP_4S:
4942 /*
4943 * get DIP Setting for beroNet 1S/2S/4S cards
4944 * check if Port Jumper config matches
4945 * module param 'protocol'
4946 * DIP Setting: (collect GPIO 13/14/15 (R_GPIO_IN1) +
4947 * GPI 19/23 (R_GPI_IN2))
4948 */
4949 dips = ((~HFC_inb(hc, R_GPIO_IN1) & 0xE0) >> 5) |
4950 ((~HFC_inb(hc, R_GPI_IN2) & 0x80) >> 3) |
4951 (~HFC_inb(hc, R_GPI_IN2) & 0x08);
4952
4953 /* Port mode (TE/NT) jumpers */
4954 pmj = ((HFC_inb(hc, R_GPI_IN3) >> 4) & 0xf);
4955
4956 if (test_bit(HFC_CHIP_B410P, &hc->chip))
4957 pmj = ~pmj & 0xf;
4958
4959 printk(KERN_INFO "%s: %s DIPs(0x%x) jumpers(0x%x)\n",
4960 m->vendor_name, m->card_name, dips, pmj);
4961 break;
4962 case DIP_8S:
4963 /*
4964 * get DIP Setting for beroNet 8S0+ cards
4965 *
4966 * enable PCI auxbridge function
4967 */
4968 HFC_outb(hc, R_BRG_PCM_CFG, 1 | V_PCM_CLK);
4969 /* prepare access to auxport */
4970 outw(0x4000, hc->pci_iobase + 4);
4971 /*
4972 * some dummy reads are required to
4973 * read valid DIP switch data
4974 */
4975 dips = inb(hc->pci_iobase);
4976 dips = inb(hc->pci_iobase);
4977 dips = inb(hc->pci_iobase);
4978 dips = ~inb(hc->pci_iobase) & 0x3F;
4979 outw(0x0, hc->pci_iobase + 4);
4980 /* disable PCI auxbridge function */
4981 HFC_outb(hc, R_BRG_PCM_CFG, V_PCM_CLK);
4982 printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
4983 m->vendor_name, m->card_name, dips);
4984 break;
4985 case DIP_E1:
4986 /*
4987 * get DIP Setting for beroNet E1 cards
4988 * DIP Setting: collect GPI 4/5/6/7 (R_GPI_IN0)
4989 */
4990 dips = (~HFC_inb(hc, R_GPI_IN0) & 0xF0)>>4;
4991 printk(KERN_INFO "%s: %s DIPs(0x%x)\n",
4992 m->vendor_name, m->card_name, dips);
4993 break;
4994 }
4995
4996 /* add to list */
4997 spin_lock_irqsave(&HFClock, flags);
4998 list_add_tail(&hc->list, &HFClist);
4999 spin_unlock_irqrestore(&HFClock, flags);
5000
5001 /* initialize hardware */
5002 ret_err = init_card(hc);
5003 if (ret_err) {
5004 printk(KERN_ERR "init card returns %d\n", ret_err);
5005 release_card(hc);
5006 return ret_err;
5007 }
5008
5009 /* start IRQ and return */
5010 spin_lock_irqsave(&hc->lock, flags);
5011 enable_hwirq(hc);
5012 spin_unlock_irqrestore(&hc->lock, flags);
5013 return 0;
5014
5015free_card:
5016 release_io_hfcmulti(hc);
5017 if (hc == syncmaster)
5018 syncmaster = NULL;
5019 kfree(hc);
5020 return ret_err;
5021}
5022
5023static void __devexit hfc_remove_pci(struct pci_dev *pdev)
5024{
5025 struct hfc_multi *card = pci_get_drvdata(pdev);
5026 u_long flags;
5027
5028 if (debug)
5029 printk(KERN_INFO "removing hfc_multi card vendor:%x "
5030 "device:%x subvendor:%x subdevice:%x\n",
5031 pdev->vendor, pdev->device,
5032 pdev->subsystem_vendor, pdev->subsystem_device);
5033
5034 if (card) {
5035 spin_lock_irqsave(&HFClock, flags);
5036 release_card(card);
5037 spin_unlock_irqrestore(&HFClock, flags);
5038 } else {
5039 if (debug)
5040 printk(KERN_WARNING "%s: drvdata allready removed\n",
5041 __func__);
5042 }
5043}
5044
5045#define VENDOR_CCD "Cologne Chip AG"
5046#define VENDOR_BN "beroNet GmbH"
5047#define VENDOR_DIG "Digium Inc."
5048#define VENDOR_JH "Junghanns.NET GmbH"
5049#define VENDOR_PRIM "PrimuX"
5050
5051static const struct hm_map hfcm_map[] = {
5052/*0*/ {VENDOR_BN, "HFC-1S Card (mini PCI)", 4, 1, 1, 3, 0, DIP_4S, 0},
5053/*1*/ {VENDOR_BN, "HFC-2S Card", 4, 2, 1, 3, 0, DIP_4S},
5054/*2*/ {VENDOR_BN, "HFC-2S Card (mini PCI)", 4, 2, 1, 3, 0, DIP_4S, 0},
5055/*3*/ {VENDOR_BN, "HFC-4S Card", 4, 4, 1, 2, 0, DIP_4S, 0},
5056/*4*/ {VENDOR_BN, "HFC-4S Card (mini PCI)", 4, 4, 1, 2, 0, 0, 0},
5057/*5*/ {VENDOR_CCD, "HFC-4S Eval (old)", 4, 4, 0, 0, 0, 0, 0},
5058/*6*/ {VENDOR_CCD, "HFC-4S IOB4ST", 4, 4, 1, 2, 0, 0, 0},
5059/*7*/ {VENDOR_CCD, "HFC-4S", 4, 4, 1, 2, 0, 0, 0},
5060/*8*/ {VENDOR_DIG, "HFC-4S Card", 4, 4, 0, 2, 0, 0, HFC_IO_MODE_REGIO},
5061/*9*/ {VENDOR_CCD, "HFC-4S Swyx 4xS0 SX2 QuadBri", 4, 4, 1, 2, 0, 0, 0},
5062/*10*/ {VENDOR_JH, "HFC-4S (junghanns 2.0)", 4, 4, 1, 2, 0, 0, 0},
5063/*11*/ {VENDOR_PRIM, "HFC-2S Primux Card", 4, 2, 0, 0, 0, 0, 0},
5064
5065/*12*/ {VENDOR_BN, "HFC-8S Card", 8, 8, 1, 0, 0, 0, 0},
5066/*13*/ {VENDOR_BN, "HFC-8S Card (+)", 8, 8, 1, 8, 0, DIP_8S,
5067 HFC_IO_MODE_REGIO},
5068/*14*/ {VENDOR_CCD, "HFC-8S Eval (old)", 8, 8, 0, 0, 0, 0, 0},
5069/*15*/ {VENDOR_CCD, "HFC-8S IOB4ST Recording", 8, 8, 1, 0, 0, 0, 0},
5070
5071/*16*/ {VENDOR_CCD, "HFC-8S IOB8ST", 8, 8, 1, 0, 0, 0, 0},
5072/*17*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
5073/*18*/ {VENDOR_CCD, "HFC-8S", 8, 8, 1, 0, 0, 0, 0},
5074
5075/*19*/ {VENDOR_BN, "HFC-E1 Card", 1, 1, 0, 1, 0, DIP_E1, 0},
5076/*20*/ {VENDOR_BN, "HFC-E1 Card (mini PCI)", 1, 1, 0, 1, 0, 0, 0},
5077/*21*/ {VENDOR_BN, "HFC-E1+ Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
5078/*22*/ {VENDOR_BN, "HFC-E1 Card (Dual)", 1, 1, 0, 1, 0, DIP_E1, 0},
5079
5080/*23*/ {VENDOR_CCD, "HFC-E1 Eval (old)", 1, 1, 0, 0, 0, 0, 0},
5081/*24*/ {VENDOR_CCD, "HFC-E1 IOB1E1", 1, 1, 0, 1, 0, 0, 0},
5082/*25*/ {VENDOR_CCD, "HFC-E1", 1, 1, 0, 1, 0, 0, 0},
5083
5084/*26*/ {VENDOR_CCD, "HFC-4S Speech Design", 4, 4, 0, 0, 0, 0,
5085 HFC_IO_MODE_PLXSD},
5086/*27*/ {VENDOR_CCD, "HFC-E1 Speech Design", 1, 1, 0, 0, 0, 0,
5087 HFC_IO_MODE_PLXSD},
5088/*28*/ {VENDOR_CCD, "HFC-4S OpenVox", 4, 4, 1, 0, 0, 0, 0},
5089/*29*/ {VENDOR_CCD, "HFC-2S OpenVox", 4, 2, 1, 0, 0, 0, 0},
5090/*30*/ {VENDOR_CCD, "HFC-8S OpenVox", 8, 8, 1, 0, 0, 0, 0},
5091};
5092
5093#undef H
5094#define H(x) ((unsigned long)&hfcm_map[x])
5095static struct pci_device_id hfmultipci_ids[] __devinitdata = {
5096
5097 /* Cards with HFC-4S Chip */
5098 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5099 PCI_SUBDEVICE_ID_CCD_BN1SM, 0, 0, H(0)}, /* BN1S mini PCI */
5100 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5101 PCI_SUBDEVICE_ID_CCD_BN2S, 0, 0, H(1)}, /* BN2S */
5102 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5103 PCI_SUBDEVICE_ID_CCD_BN2SM, 0, 0, H(2)}, /* BN2S mini PCI */
5104 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5105 PCI_SUBDEVICE_ID_CCD_BN4S, 0, 0, H(3)}, /* BN4S */
5106 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5107 PCI_SUBDEVICE_ID_CCD_BN4SM, 0, 0, H(4)}, /* BN4S mini PCI */
5108 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5109 PCI_DEVICE_ID_CCD_HFC4S, 0, 0, H(5)}, /* Old Eval */
5110 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5111 PCI_SUBDEVICE_ID_CCD_IOB4ST, 0, 0, H(6)}, /* IOB4ST */
5112 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5113 PCI_SUBDEVICE_ID_CCD_HFC4S, 0, 0, H(7)}, /* 4S */
5114 { PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S,
5115 PCI_VENDOR_ID_DIGIUM, PCI_DEVICE_ID_DIGIUM_HFC4S, 0, 0, H(8)},
5116 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5117 PCI_SUBDEVICE_ID_CCD_SWYX4S, 0, 0, H(9)}, /* 4S Swyx */
5118 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5119 PCI_SUBDEVICE_ID_CCD_JH4S20, 0, 0, H(10)},
5120 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5121 PCI_SUBDEVICE_ID_CCD_PMX2S, 0, 0, H(11)}, /* Primux */
5122 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5123 PCI_SUBDEVICE_ID_CCD_OV4S, 0, 0, H(28)}, /* OpenVox 4 */
5124 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD,
5125 PCI_SUBDEVICE_ID_CCD_OV2S, 0, 0, H(29)}, /* OpenVox 2 */
5126
5127 /* Cards with HFC-8S Chip */
5128 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5129 PCI_SUBDEVICE_ID_CCD_BN8S, 0, 0, H(12)}, /* BN8S */
5130 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5131 PCI_SUBDEVICE_ID_CCD_BN8SP, 0, 0, H(13)}, /* BN8S+ */
5132 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5133 PCI_DEVICE_ID_CCD_HFC8S, 0, 0, H(14)}, /* old Eval */
5134 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5135 PCI_SUBDEVICE_ID_CCD_IOB8STR, 0, 0, H(15)},
5136 /* IOB8ST Recording */
5137 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5138 PCI_SUBDEVICE_ID_CCD_IOB8ST, 0, 0, H(16)}, /* IOB8ST */
5139 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5140 PCI_SUBDEVICE_ID_CCD_IOB8ST_1, 0, 0, H(17)}, /* IOB8ST */
5141 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5142 PCI_SUBDEVICE_ID_CCD_HFC8S, 0, 0, H(18)}, /* 8S */
5143 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_VENDOR_ID_CCD,
5144 PCI_SUBDEVICE_ID_CCD_OV8S, 0, 0, H(30)}, /* OpenVox 8 */
5145
5146
5147 /* Cards with HFC-E1 Chip */
5148 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5149 PCI_SUBDEVICE_ID_CCD_BNE1, 0, 0, H(19)}, /* BNE1 */
5150 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5151 PCI_SUBDEVICE_ID_CCD_BNE1M, 0, 0, H(20)}, /* BNE1 mini PCI */
5152 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5153 PCI_SUBDEVICE_ID_CCD_BNE1DP, 0, 0, H(21)}, /* BNE1 + (Dual) */
5154 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5155 PCI_SUBDEVICE_ID_CCD_BNE1D, 0, 0, H(22)}, /* BNE1 (Dual) */
5156
5157 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5158 PCI_DEVICE_ID_CCD_HFCE1, 0, 0, H(23)}, /* Old Eval */
5159 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5160 PCI_SUBDEVICE_ID_CCD_IOB1E1, 0, 0, H(24)}, /* IOB1E1 */
5161 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_VENDOR_ID_CCD,
5162 PCI_SUBDEVICE_ID_CCD_HFCE1, 0, 0, H(25)}, /* E1 */
5163
5164 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
5165 PCI_SUBDEVICE_ID_CCD_SPD4S, 0, 0, H(26)}, /* PLX PCI Bridge */
5166 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_CCD,
5167 PCI_SUBDEVICE_ID_CCD_SPDE1, 0, 0, H(27)}, /* PLX PCI Bridge */
5168 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_ANY_ID, PCI_ANY_ID,
5169 0, 0, 0},
5170 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC8S, PCI_ANY_ID, PCI_ANY_ID,
5171 0, 0, 0},
5172 { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFCE1, PCI_ANY_ID, PCI_ANY_ID,
5173 0, 0, 0},
5174 {0, }
5175};
5176#undef H
5177
5178MODULE_DEVICE_TABLE(pci, hfmultipci_ids);
5179
5180static int
5181hfcmulti_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5182{
5183 struct hm_map *m = (struct hm_map *)ent->driver_data;
5184 int ret;
5185
5186 if (m == NULL) {
5187 if (ent->vendor == PCI_VENDOR_ID_CCD)
5188 if (ent->device == PCI_DEVICE_ID_CCD_HFC4S ||
5189 ent->device == PCI_DEVICE_ID_CCD_HFC8S ||
5190 ent->device == PCI_DEVICE_ID_CCD_HFCE1)
5191 printk(KERN_ERR
5192 "unknown HFC multiport controller "
5193 "(vendor:%x device:%x subvendor:%x "
5194 "subdevice:%x) Please contact the "
5195 "driver maintainer for support.\n",
5196 ent->vendor, ent->device,
5197 ent->subvendor, ent->subdevice);
5198 return -ENODEV;
5199 }
5200 ret = hfcmulti_init(pdev, ent);
5201 if (ret)
5202 return ret;
5203 HFC_cnt++;
5204 printk(KERN_INFO "%d devices registered\n", HFC_cnt);
5205 return 0;
5206}
5207
5208static struct pci_driver hfcmultipci_driver = {
5209 .name = "hfc_multi",
5210 .probe = hfcmulti_probe,
5211 .remove = __devexit_p(hfc_remove_pci),
5212 .id_table = hfmultipci_ids,
5213};
5214
5215static void __exit
5216HFCmulti_cleanup(void)
5217{
5218 struct hfc_multi *card, *next;
5219
5220 /* unload interrupt function symbol */
5221 if (hfc_interrupt)
5222 symbol_put(ztdummy_extern_interrupt);
5223 if (register_interrupt)
5224 symbol_put(ztdummy_register_interrupt);
5225 if (unregister_interrupt) {
5226 if (interrupt_registered) {
5227 interrupt_registered = 0;
5228 unregister_interrupt();
5229 }
5230 symbol_put(ztdummy_unregister_interrupt);
5231 }
5232
5233 list_for_each_entry_safe(card, next, &HFClist, list)
5234 release_card(card);
5235 /* get rid of all devices of this driver */
5236 pci_unregister_driver(&hfcmultipci_driver);
5237}
5238
5239static int __init
5240HFCmulti_init(void)
5241{
5242 int err;
5243
5244#ifdef IRQ_DEBUG
5245 printk(KERN_ERR "%s: IRQ_DEBUG IS ENABLED!\n", __func__);
5246#endif
5247
5248 spin_lock_init(&HFClock);
5249 spin_lock_init(&plx_lock);
5250
5251 if (debug & DEBUG_HFCMULTI_INIT)
5252 printk(KERN_DEBUG "%s: init entered\n", __func__);
5253
5254#ifdef __BIG_ENDIAN
5255#error "not running on big endian machines now"
5256#endif
5257 hfc_interrupt = symbol_get(ztdummy_extern_interrupt);
5258 register_interrupt = symbol_get(ztdummy_register_interrupt);
5259 unregister_interrupt = symbol_get(ztdummy_unregister_interrupt);
5260 printk(KERN_INFO "mISDN: HFC-multi driver %s\n",
5261 hfcmulti_revision);
5262
5263 switch (poll) {
5264 case 0:
5265 poll_timer = 6;
5266 poll = 128;
5267 break;
5268 /*
5269 * wenn dieses break nochmal verschwindet,
5270 * gibt es heisse ohren :-)
5271 * "without the break you will get hot ears ???"
5272 */
5273 case 8:
5274 poll_timer = 2;
5275 break;
5276 case 16:
5277 poll_timer = 3;
5278 break;
5279 case 32:
5280 poll_timer = 4;
5281 break;
5282 case 64:
5283 poll_timer = 5;
5284 break;
5285 case 128:
5286 poll_timer = 6;
5287 break;
5288 case 256:
5289 poll_timer = 7;
5290 break;
5291 default:
5292 printk(KERN_ERR
5293 "%s: Wrong poll value (%d).\n", __func__, poll);
5294 err = -EINVAL;
5295 return err;
5296
5297 }
5298
5299 err = pci_register_driver(&hfcmultipci_driver);
5300 if (err < 0) {
5301 printk(KERN_ERR "error registering pci driver: %x\n", err);
5302 if (hfc_interrupt)
5303 symbol_put(ztdummy_extern_interrupt);
5304 if (register_interrupt)
5305 symbol_put(ztdummy_register_interrupt);
5306 if (unregister_interrupt) {
5307 if (interrupt_registered) {
5308 interrupt_registered = 0;
5309 unregister_interrupt();
5310 }
5311 symbol_put(ztdummy_unregister_interrupt);
5312 }
5313 return err;
5314 }
5315 return 0;
5316}
5317
5318
5319module_init(HFCmulti_init);
5320module_exit(HFCmulti_cleanup);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
new file mode 100644
index 000000000000..917968530e1e
--- /dev/null
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -0,0 +1,2256 @@
1/*
2 *
3 * hfcpci.c low level driver for CCD's hfc-pci based cards
4 *
5 * Author Werner Cornelius (werner@isdn4linux.de)
6 * based on existing driver for CCD hfc ISA cards
7 * type approval valid for HFC-S PCI A based card
8 *
9 * Copyright 1999 by Werner Cornelius (werner@isdn-development.de)
10 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/delay.h>
31#include <linux/mISDNhw.h>
32
33#include "hfc_pci.h"
34
35static const char *hfcpci_revision = "2.0";
36
37#define MAX_CARDS 8
38static int HFC_cnt;
39static uint debug;
40
41MODULE_AUTHOR("Karsten Keil");
42MODULE_LICENSE("GPL");
43module_param(debug, uint, 0);
44
45static LIST_HEAD(HFClist);
46DEFINE_RWLOCK(HFClock);
47
48enum {
49 HFC_CCD_2BD0,
50 HFC_CCD_B000,
51 HFC_CCD_B006,
52 HFC_CCD_B007,
53 HFC_CCD_B008,
54 HFC_CCD_B009,
55 HFC_CCD_B00A,
56 HFC_CCD_B00B,
57 HFC_CCD_B00C,
58 HFC_CCD_B100,
59 HFC_CCD_B700,
60 HFC_CCD_B701,
61 HFC_ASUS_0675,
62 HFC_BERKOM_A1T,
63 HFC_BERKOM_TCONCEPT,
64 HFC_ANIGMA_MC145575,
65 HFC_ZOLTRIX_2BD0,
66 HFC_DIGI_DF_M_IOM2_E,
67 HFC_DIGI_DF_M_E,
68 HFC_DIGI_DF_M_IOM2_A,
69 HFC_DIGI_DF_M_A,
70 HFC_ABOCOM_2BD1,
71 HFC_SITECOM_DC105V2,
72};
73
74struct hfcPCI_hw {
75 unsigned char cirm;
76 unsigned char ctmt;
77 unsigned char clkdel;
78 unsigned char states;
79 unsigned char conn;
80 unsigned char mst_m;
81 unsigned char int_m1;
82 unsigned char int_m2;
83 unsigned char sctrl;
84 unsigned char sctrl_r;
85 unsigned char sctrl_e;
86 unsigned char trm;
87 unsigned char fifo_en;
88 unsigned char bswapped;
89 unsigned char protocol;
90 int nt_timer;
91 unsigned char *pci_io; /* start of PCI IO memory */
92 dma_addr_t dmahandle;
93 void *fifos; /* FIFO memory */
94 int last_bfifo_cnt[2];
95 /* marker saving last b-fifo frame count */
96 struct timer_list timer;
97};
98
99#define HFC_CFG_MASTER 1
100#define HFC_CFG_SLAVE 2
101#define HFC_CFG_PCM 3
102#define HFC_CFG_2HFC 4
103#define HFC_CFG_SLAVEHFC 5
104#define HFC_CFG_NEG_F0 6
105#define HFC_CFG_SW_DD_DU 7
106
107#define FLG_HFC_TIMER_T1 16
108#define FLG_HFC_TIMER_T3 17
109
110#define NT_T1_COUNT 1120 /* number of 3.125ms interrupts (3.5s) */
111#define NT_T3_COUNT 31 /* number of 3.125ms interrupts (97 ms) */
112#define CLKDEL_TE 0x0e /* CLKDEL in TE mode */
113#define CLKDEL_NT 0x6c /* CLKDEL in NT mode */
114
115
116struct hfc_pci {
117 struct list_head list;
118 u_char subtype;
119 u_char chanlimit;
120 u_char initdone;
121 u_long cfg;
122 u_int irq;
123 u_int irqcnt;
124 struct pci_dev *pdev;
125 struct hfcPCI_hw hw;
126 spinlock_t lock; /* card lock */
127 struct dchannel dch;
128 struct bchannel bch[2];
129};
130
131/* Interface functions */
132static void
133enable_hwirq(struct hfc_pci *hc)
134{
135 hc->hw.int_m2 |= HFCPCI_IRQ_ENABLE;
136 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
137}
138
139static void
140disable_hwirq(struct hfc_pci *hc)
141{
142 hc->hw.int_m2 &= ~((u_char)HFCPCI_IRQ_ENABLE);
143 Write_hfc(hc, HFCPCI_INT_M2, hc->hw.int_m2);
144}
145
146/*
147 * free hardware resources used by driver
148 */
149static void
150release_io_hfcpci(struct hfc_pci *hc)
151{
152 /* disable memory mapped ports + busmaster */
153 pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
154 del_timer(&hc->hw.timer);
155 pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
156 iounmap((void *)hc->hw.pci_io);
157}
158
159/*
160 * set mode (NT or TE)
161 */
162static void
163hfcpci_setmode(struct hfc_pci *hc)
164{
165 if (hc->hw.protocol == ISDN_P_NT_S0) {
166 hc->hw.clkdel = CLKDEL_NT; /* ST-Bit delay for NT-Mode */
167 hc->hw.sctrl |= SCTRL_MODE_NT; /* NT-MODE */
168 hc->hw.states = 1; /* G1 */
169 } else {
170 hc->hw.clkdel = CLKDEL_TE; /* ST-Bit delay for TE-Mode */
171 hc->hw.sctrl &= ~SCTRL_MODE_NT; /* TE-MODE */
172 hc->hw.states = 2; /* F2 */
173 }
174 Write_hfc(hc, HFCPCI_CLKDEL, hc->hw.clkdel);
175 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | hc->hw.states);
176 udelay(10);
177 Write_hfc(hc, HFCPCI_STATES, hc->hw.states | 0x40); /* Deactivate */
178 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
179}
180
181/*
182 * function called to reset the HFC PCI chip. A complete software reset of chip
183 * and fifos is done.
184 */
185static void
186reset_hfcpci(struct hfc_pci *hc)
187{
188 u_char val;
189 int cnt = 0;
190
191 printk(KERN_DEBUG "reset_hfcpci: entered\n");
192 val = Read_hfc(hc, HFCPCI_CHIP_ID);
193 printk(KERN_INFO "HFC_PCI: resetting HFC ChipId(%x)\n", val);
194 /* enable memory mapped ports, disable busmaster */
195 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
196 disable_hwirq(hc);
197 /* enable memory ports + busmaster */
198 pci_write_config_word(hc->pdev, PCI_COMMAND,
199 PCI_ENA_MEMIO + PCI_ENA_MASTER);
200 val = Read_hfc(hc, HFCPCI_STATUS);
201 printk(KERN_DEBUG "HFC-PCI status(%x) before reset\n", val);
202 hc->hw.cirm = HFCPCI_RESET; /* Reset On */
203 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
204 set_current_state(TASK_UNINTERRUPTIBLE);
205 mdelay(10); /* Timeout 10ms */
206 hc->hw.cirm = 0; /* Reset Off */
207 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
208 val = Read_hfc(hc, HFCPCI_STATUS);
209 printk(KERN_DEBUG "HFC-PCI status(%x) after reset\n", val);
210 while (cnt < 50000) { /* max 50000 us */
211 udelay(5);
212 cnt += 5;
213 val = Read_hfc(hc, HFCPCI_STATUS);
214 if (!(val & 2))
215 break;
216 }
217 printk(KERN_DEBUG "HFC-PCI status(%x) after %dus\n", val, cnt);
218
219 hc->hw.fifo_en = 0x30; /* only D fifos enabled */
220
221 hc->hw.bswapped = 0; /* no exchange */
222 hc->hw.ctmt = HFCPCI_TIM3_125 | HFCPCI_AUTO_TIMER;
223 hc->hw.trm = HFCPCI_BTRANS_THRESMASK; /* no echo connect , threshold */
224 hc->hw.sctrl = 0x40; /* set tx_lo mode, error in datasheet ! */
225 hc->hw.sctrl_r = 0;
226 hc->hw.sctrl_e = HFCPCI_AUTO_AWAKE; /* S/T Auto awake */
227 hc->hw.mst_m = 0;
228 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
229 hc->hw.mst_m |= HFCPCI_MASTER; /* HFC Master Mode */
230 if (test_bit(HFC_CFG_NEG_F0, &hc->cfg))
231 hc->hw.mst_m |= HFCPCI_F0_NEGATIV;
232 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
233 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
234 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
235 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
236
237 hc->hw.int_m1 = HFCPCI_INTS_DTRANS | HFCPCI_INTS_DREC |
238 HFCPCI_INTS_L1STATE | HFCPCI_INTS_TIMER;
239 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
240
241 /* Clear already pending ints */
242 if (Read_hfc(hc, HFCPCI_INT_S1));
243
244 /* set NT/TE mode */
245 hfcpci_setmode(hc);
246
247 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
248 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
249
250 /*
251 * Init GCI/IOM2 in master mode
252 * Slots 0 and 1 are set for B-chan 1 and 2
253 * D- and monitor/CI channel are not enabled
254 * STIO1 is used as output for data, B1+B2 from ST->IOM+HFC
255 * STIO2 is used as data input, B1+B2 from IOM->ST
256 * ST B-channel send disabled -> continous 1s
257 * The IOM slots are always enabled
258 */
259 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
260 /* set data flow directions: connect B1,B2: HFC to/from PCM */
261 hc->hw.conn = 0x09;
262 } else {
263 hc->hw.conn = 0x36; /* set data flow directions */
264 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
265 Write_hfc(hc, HFCPCI_B1_SSL, 0xC0);
266 Write_hfc(hc, HFCPCI_B2_SSL, 0xC1);
267 Write_hfc(hc, HFCPCI_B1_RSL, 0xC0);
268 Write_hfc(hc, HFCPCI_B2_RSL, 0xC1);
269 } else {
270 Write_hfc(hc, HFCPCI_B1_SSL, 0x80);
271 Write_hfc(hc, HFCPCI_B2_SSL, 0x81);
272 Write_hfc(hc, HFCPCI_B1_RSL, 0x80);
273 Write_hfc(hc, HFCPCI_B2_RSL, 0x81);
274 }
275 }
276 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
277 val = Read_hfc(hc, HFCPCI_INT_S2);
278}
279
280/*
281 * Timer function called when kernel timer expires
282 */
283static void
284hfcpci_Timer(struct hfc_pci *hc)
285{
286 hc->hw.timer.expires = jiffies + 75;
287 /* WD RESET */
288/*
289 * WriteReg(hc, HFCD_DATA, HFCD_CTMT, hc->hw.ctmt | 0x80);
290 * add_timer(&hc->hw.timer);
291 */
292}
293
294
295/*
296 * select a b-channel entry matching and active
297 */
298static struct bchannel *
299Sel_BCS(struct hfc_pci *hc, int channel)
300{
301 if (test_bit(FLG_ACTIVE, &hc->bch[0].Flags) &&
302 (hc->bch[0].nr & channel))
303 return &hc->bch[0];
304 else if (test_bit(FLG_ACTIVE, &hc->bch[1].Flags) &&
305 (hc->bch[1].nr & channel))
306 return &hc->bch[1];
307 else
308 return NULL;
309}
310
311/*
312 * clear the desired B-channel rx fifo
313 */
314static void
315hfcpci_clear_fifo_rx(struct hfc_pci *hc, int fifo)
316{
317 u_char fifo_state;
318 struct bzfifo *bzr;
319
320 if (fifo) {
321 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
322 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2RX;
323 } else {
324 bzr = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
325 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1RX;
326 }
327 if (fifo_state)
328 hc->hw.fifo_en ^= fifo_state;
329 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
330 hc->hw.last_bfifo_cnt[fifo] = 0;
331 bzr->f1 = MAX_B_FRAMES;
332 bzr->f2 = bzr->f1; /* init F pointers to remain constant */
333 bzr->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
334 bzr->za[MAX_B_FRAMES].z2 = cpu_to_le16(
335 le16_to_cpu(bzr->za[MAX_B_FRAMES].z1));
336 if (fifo_state)
337 hc->hw.fifo_en |= fifo_state;
338 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
339}
340
341/*
342 * clear the desired B-channel tx fifo
343 */
344static void hfcpci_clear_fifo_tx(struct hfc_pci *hc, int fifo)
345{
346 u_char fifo_state;
347 struct bzfifo *bzt;
348
349 if (fifo) {
350 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
351 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B2TX;
352 } else {
353 bzt = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
354 fifo_state = hc->hw.fifo_en & HFCPCI_FIFOEN_B1TX;
355 }
356 if (fifo_state)
357 hc->hw.fifo_en ^= fifo_state;
358 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
359 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
360 printk(KERN_DEBUG "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) "
361 "z1(%x) z2(%x) state(%x)\n",
362 fifo, bzt->f1, bzt->f2,
363 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
364 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2),
365 fifo_state);
366 bzt->f2 = MAX_B_FRAMES;
367 bzt->f1 = bzt->f2; /* init F pointers to remain constant */
368 bzt->za[MAX_B_FRAMES].z1 = cpu_to_le16(B_FIFO_SIZE + B_SUB_VAL - 1);
369 bzt->za[MAX_B_FRAMES].z2 = cpu_to_le16(
370 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1 - 1));
371 if (fifo_state)
372 hc->hw.fifo_en |= fifo_state;
373 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
374 if (hc->bch[fifo].debug & DEBUG_HW_BCHANNEL)
375 printk(KERN_DEBUG
376 "hfcpci_clear_fifo_tx%d f1(%x) f2(%x) z1(%x) z2(%x)\n",
377 fifo, bzt->f1, bzt->f2,
378 le16_to_cpu(bzt->za[MAX_B_FRAMES].z1),
379 le16_to_cpu(bzt->za[MAX_B_FRAMES].z2));
380}
381
382/*
383 * read a complete B-frame out of the buffer
384 */
385static void
386hfcpci_empty_bfifo(struct bchannel *bch, struct bzfifo *bz,
387 u_char *bdata, int count)
388{
389 u_char *ptr, *ptr1, new_f2;
390 int total, maxlen, new_z2;
391 struct zt *zp;
392
393 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
394 printk(KERN_DEBUG "hfcpci_empty_fifo\n");
395 zp = &bz->za[bz->f2]; /* point to Z-Regs */
396 new_z2 = le16_to_cpu(zp->z2) + count; /* new position in fifo */
397 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
398 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
399 new_f2 = (bz->f2 + 1) & MAX_B_FRAMES;
400 if ((count > MAX_DATA_SIZE + 3) || (count < 4) ||
401 (*(bdata + (le16_to_cpu(zp->z1) - B_SUB_VAL)))) {
402 if (bch->debug & DEBUG_HW)
403 printk(KERN_DEBUG "hfcpci_empty_fifo: incoming packet "
404 "invalid length %d or crc\n", count);
405#ifdef ERROR_STATISTIC
406 bch->err_inv++;
407#endif
408 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
409 bz->f2 = new_f2; /* next buffer */
410 } else {
411 bch->rx_skb = mI_alloc_skb(count - 3, GFP_ATOMIC);
412 if (!bch->rx_skb) {
413 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
414 return;
415 }
416 total = count;
417 count -= 3;
418 ptr = skb_put(bch->rx_skb, count);
419
420 if (le16_to_cpu(zp->z2) + count <= B_FIFO_SIZE + B_SUB_VAL)
421 maxlen = count; /* complete transfer */
422 else
423 maxlen = B_FIFO_SIZE + B_SUB_VAL -
424 le16_to_cpu(zp->z2); /* maximum */
425
426 ptr1 = bdata + (le16_to_cpu(zp->z2) - B_SUB_VAL);
427 /* start of data */
428 memcpy(ptr, ptr1, maxlen); /* copy data */
429 count -= maxlen;
430
431 if (count) { /* rest remaining */
432 ptr += maxlen;
433 ptr1 = bdata; /* start of buffer */
434 memcpy(ptr, ptr1, count); /* rest */
435 }
436 bz->za[new_f2].z2 = cpu_to_le16(new_z2);
437 bz->f2 = new_f2; /* next buffer */
438 recv_Bchannel(bch);
439 }
440}
441
442/*
443 * D-channel receive procedure
444 */
445static int
446receive_dmsg(struct hfc_pci *hc)
447{
448 struct dchannel *dch = &hc->dch;
449 int maxlen;
450 int rcnt, total;
451 int count = 5;
452 u_char *ptr, *ptr1;
453 struct dfifo *df;
454 struct zt *zp;
455
456 df = &((union fifo_area *)(hc->hw.fifos))->d_chan.d_rx;
457 while (((df->f1 & D_FREG_MASK) != (df->f2 & D_FREG_MASK)) && count--) {
458 zp = &df->za[df->f2 & D_FREG_MASK];
459 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
460 if (rcnt < 0)
461 rcnt += D_FIFO_SIZE;
462 rcnt++;
463 if (dch->debug & DEBUG_HW_DCHANNEL)
464 printk(KERN_DEBUG
465 "hfcpci recd f1(%d) f2(%d) z1(%x) z2(%x) cnt(%d)\n",
466 df->f1, df->f2,
467 le16_to_cpu(zp->z1),
468 le16_to_cpu(zp->z2),
469 rcnt);
470
471 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
472 (df->data[le16_to_cpu(zp->z1)])) {
473 if (dch->debug & DEBUG_HW)
474 printk(KERN_DEBUG
475 "empty_fifo hfcpci paket inv. len "
476 "%d or crc %d\n",
477 rcnt,
478 df->data[le16_to_cpu(zp->z1)]);
479#ifdef ERROR_STATISTIC
480 cs->err_rx++;
481#endif
482 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
483 (MAX_D_FRAMES + 1); /* next buffer */
484 df->za[df->f2 & D_FREG_MASK].z2 =
485 cpu_to_le16((zp->z2 + rcnt) & (D_FIFO_SIZE - 1));
486 } else {
487 dch->rx_skb = mI_alloc_skb(rcnt - 3, GFP_ATOMIC);
488 if (!dch->rx_skb) {
489 printk(KERN_WARNING
490 "HFC-PCI: D receive out of memory\n");
491 break;
492 }
493 total = rcnt;
494 rcnt -= 3;
495 ptr = skb_put(dch->rx_skb, rcnt);
496
497 if (le16_to_cpu(zp->z2) + rcnt <= D_FIFO_SIZE)
498 maxlen = rcnt; /* complete transfer */
499 else
500 maxlen = D_FIFO_SIZE - le16_to_cpu(zp->z2);
501 /* maximum */
502
503 ptr1 = df->data + le16_to_cpu(zp->z2);
504 /* start of data */
505 memcpy(ptr, ptr1, maxlen); /* copy data */
506 rcnt -= maxlen;
507
508 if (rcnt) { /* rest remaining */
509 ptr += maxlen;
510 ptr1 = df->data; /* start of buffer */
511 memcpy(ptr, ptr1, rcnt); /* rest */
512 }
513 df->f2 = ((df->f2 + 1) & MAX_D_FRAMES) |
514 (MAX_D_FRAMES + 1); /* next buffer */
515 df->za[df->f2 & D_FREG_MASK].z2 = cpu_to_le16((
516 le16_to_cpu(zp->z2) + total) & (D_FIFO_SIZE - 1));
517 recv_Dchannel(dch);
518 }
519 }
520 return 1;
521}
522
523/*
524 * check for transparent receive data and read max one threshold size if avail
525 */
526int
527hfcpci_empty_fifo_trans(struct bchannel *bch, struct bzfifo *bz, u_char *bdata)
528{
529 unsigned short *z1r, *z2r;
530 int new_z2, fcnt, maxlen;
531 u_char *ptr, *ptr1;
532
533 z1r = &bz->za[MAX_B_FRAMES].z1; /* pointer to z reg */
534 z2r = z1r + 1;
535
536 fcnt = le16_to_cpu(*z1r) - le16_to_cpu(*z2r);
537 if (!fcnt)
538 return 0; /* no data avail */
539
540 if (fcnt <= 0)
541 fcnt += B_FIFO_SIZE; /* bytes actually buffered */
542 if (fcnt > HFCPCI_BTRANS_THRESHOLD)
543 fcnt = HFCPCI_BTRANS_THRESHOLD; /* limit size */
544
545 new_z2 = le16_to_cpu(*z2r) + fcnt; /* new position in fifo */
546 if (new_z2 >= (B_FIFO_SIZE + B_SUB_VAL))
547 new_z2 -= B_FIFO_SIZE; /* buffer wrap */
548
549 bch->rx_skb = mI_alloc_skb(fcnt, GFP_ATOMIC);
550 if (bch->rx_skb) {
551 ptr = skb_put(bch->rx_skb, fcnt);
552 if (le16_to_cpu(*z2r) + fcnt <= B_FIFO_SIZE + B_SUB_VAL)
553 maxlen = fcnt; /* complete transfer */
554 else
555 maxlen = B_FIFO_SIZE + B_SUB_VAL - le16_to_cpu(*z2r);
556 /* maximum */
557
558 ptr1 = bdata + (le16_to_cpu(*z2r) - B_SUB_VAL);
559 /* start of data */
560 memcpy(ptr, ptr1, maxlen); /* copy data */
561 fcnt -= maxlen;
562
563 if (fcnt) { /* rest remaining */
564 ptr += maxlen;
565 ptr1 = bdata; /* start of buffer */
566 memcpy(ptr, ptr1, fcnt); /* rest */
567 }
568 recv_Bchannel(bch);
569 } else
570 printk(KERN_WARNING "HFCPCI: receive out of memory\n");
571
572 *z2r = cpu_to_le16(new_z2); /* new position */
573 return 1;
574}
575
576/*
577 * B-channel main receive routine
578 */
579void
580main_rec_hfcpci(struct bchannel *bch)
581{
582 struct hfc_pci *hc = bch->hw;
583 int rcnt, real_fifo;
584 int receive, count = 5;
585 struct bzfifo *bz;
586 u_char *bdata;
587 struct zt *zp;
588
589
590 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
591 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b2;
592 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b2;
593 real_fifo = 1;
594 } else {
595 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.rxbz_b1;
596 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.rxdat_b1;
597 real_fifo = 0;
598 }
599Begin:
600 count--;
601 if (bz->f1 != bz->f2) {
602 if (bch->debug & DEBUG_HW_BCHANNEL)
603 printk(KERN_DEBUG "hfcpci rec ch(%x) f1(%d) f2(%d)\n",
604 bch->nr, bz->f1, bz->f2);
605 zp = &bz->za[bz->f2];
606
607 rcnt = le16_to_cpu(zp->z1) - le16_to_cpu(zp->z2);
608 if (rcnt < 0)
609 rcnt += B_FIFO_SIZE;
610 rcnt++;
611 if (bch->debug & DEBUG_HW_BCHANNEL)
612 printk(KERN_DEBUG
613 "hfcpci rec ch(%x) z1(%x) z2(%x) cnt(%d)\n",
614 bch->nr, le16_to_cpu(zp->z1),
615 le16_to_cpu(zp->z2), rcnt);
616 hfcpci_empty_bfifo(bch, bz, bdata, rcnt);
617 rcnt = bz->f1 - bz->f2;
618 if (rcnt < 0)
619 rcnt += MAX_B_FRAMES + 1;
620 if (hc->hw.last_bfifo_cnt[real_fifo] > rcnt + 1) {
621 rcnt = 0;
622 hfcpci_clear_fifo_rx(hc, real_fifo);
623 }
624 hc->hw.last_bfifo_cnt[real_fifo] = rcnt;
625 if (rcnt > 1)
626 receive = 1;
627 else
628 receive = 0;
629 } else if (test_bit(FLG_TRANSPARENT, &bch->Flags))
630 receive = hfcpci_empty_fifo_trans(bch, bz, bdata);
631 else
632 receive = 0;
633 if (count && receive)
634 goto Begin;
635
636}
637
638/*
639 * D-channel send routine
640 */
641static void
642hfcpci_fill_dfifo(struct hfc_pci *hc)
643{
644 struct dchannel *dch = &hc->dch;
645 int fcnt;
646 int count, new_z1, maxlen;
647 struct dfifo *df;
648 u_char *src, *dst, new_f1;
649
650 if ((dch->debug & DEBUG_HW_DCHANNEL) && !(dch->debug & DEBUG_HW_DFIFO))
651 printk(KERN_DEBUG "%s\n", __func__);
652
653 if (!dch->tx_skb)
654 return;
655 count = dch->tx_skb->len - dch->tx_idx;
656 if (count <= 0)
657 return;
658 df = &((union fifo_area *) (hc->hw.fifos))->d_chan.d_tx;
659
660 if (dch->debug & DEBUG_HW_DFIFO)
661 printk(KERN_DEBUG "%s:f1(%d) f2(%d) z1(f1)(%x)\n", __func__,
662 df->f1, df->f2,
663 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1));
664 fcnt = df->f1 - df->f2; /* frame count actually buffered */
665 if (fcnt < 0)
666 fcnt += (MAX_D_FRAMES + 1); /* if wrap around */
667 if (fcnt > (MAX_D_FRAMES - 1)) {
668 if (dch->debug & DEBUG_HW_DCHANNEL)
669 printk(KERN_DEBUG
670 "hfcpci_fill_Dfifo more as 14 frames\n");
671#ifdef ERROR_STATISTIC
672 cs->err_tx++;
673#endif
674 return;
675 }
676 /* now determine free bytes in FIFO buffer */
677 maxlen = le16_to_cpu(df->za[df->f2 & D_FREG_MASK].z2) -
678 le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) - 1;
679 if (maxlen <= 0)
680 maxlen += D_FIFO_SIZE; /* count now contains available bytes */
681
682 if (dch->debug & DEBUG_HW_DCHANNEL)
683 printk(KERN_DEBUG "hfcpci_fill_Dfifo count(%d/%d)\n",
684 count, maxlen);
685 if (count > maxlen) {
686 if (dch->debug & DEBUG_HW_DCHANNEL)
687 printk(KERN_DEBUG "hfcpci_fill_Dfifo no fifo mem\n");
688 return;
689 }
690 new_z1 = (le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1) + count) &
691 (D_FIFO_SIZE - 1);
692 new_f1 = ((df->f1 + 1) & D_FREG_MASK) | (D_FREG_MASK + 1);
693 src = dch->tx_skb->data + dch->tx_idx; /* source pointer */
694 dst = df->data + le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
695 maxlen = D_FIFO_SIZE - le16_to_cpu(df->za[df->f1 & D_FREG_MASK].z1);
696 /* end fifo */
697 if (maxlen > count)
698 maxlen = count; /* limit size */
699 memcpy(dst, src, maxlen); /* first copy */
700
701 count -= maxlen; /* remaining bytes */
702 if (count) {
703 dst = df->data; /* start of buffer */
704 src += maxlen; /* new position */
705 memcpy(dst, src, count);
706 }
707 df->za[new_f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
708 /* for next buffer */
709 df->za[df->f1 & D_FREG_MASK].z1 = cpu_to_le16(new_z1);
710 /* new pos actual buffer */
711 df->f1 = new_f1; /* next frame */
712 dch->tx_idx = dch->tx_skb->len;
713}
714
715/*
716 * B-channel send routine
717 */
718static void
719hfcpci_fill_fifo(struct bchannel *bch)
720{
721 struct hfc_pci *hc = bch->hw;
722 int maxlen, fcnt;
723 int count, new_z1;
724 struct bzfifo *bz;
725 u_char *bdata;
726 u_char new_f1, *src, *dst;
727 unsigned short *z1t, *z2t;
728
729 if ((bch->debug & DEBUG_HW_BCHANNEL) && !(bch->debug & DEBUG_HW_BFIFO))
730 printk(KERN_DEBUG "%s\n", __func__);
731 if ((!bch->tx_skb) || bch->tx_skb->len <= 0)
732 return;
733 count = bch->tx_skb->len - bch->tx_idx;
734 if ((bch->nr & 2) && (!hc->hw.bswapped)) {
735 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b2;
736 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b2;
737 } else {
738 bz = &((union fifo_area *)(hc->hw.fifos))->b_chans.txbz_b1;
739 bdata = ((union fifo_area *)(hc->hw.fifos))->b_chans.txdat_b1;
740 }
741
742 if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
743 z1t = &bz->za[MAX_B_FRAMES].z1;
744 z2t = z1t + 1;
745 if (bch->debug & DEBUG_HW_BCHANNEL)
746 printk(KERN_DEBUG "hfcpci_fill_fifo_trans ch(%x) "
747 "cnt(%d) z1(%x) z2(%x)\n", bch->nr, count,
748 le16_to_cpu(*z1t), le16_to_cpu(*z2t));
749 fcnt = le16_to_cpu(*z2t) - le16_to_cpu(*z1t);
750 if (fcnt <= 0)
751 fcnt += B_FIFO_SIZE;
752 /* fcnt contains available bytes in fifo */
753 fcnt = B_FIFO_SIZE - fcnt;
754 /* remaining bytes to send (bytes in fifo) */
755next_t_frame:
756 count = bch->tx_skb->len - bch->tx_idx;
757 /* maximum fill shall be HFCPCI_BTRANS_MAX */
758 if (count > HFCPCI_BTRANS_MAX - fcnt)
759 count = HFCPCI_BTRANS_MAX - fcnt;
760 if (count <= 0)
761 return;
762 /* data is suitable for fifo */
763 new_z1 = le16_to_cpu(*z1t) + count;
764 /* new buffer Position */
765 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
766 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
767 src = bch->tx_skb->data + bch->tx_idx;
768 /* source pointer */
769 dst = bdata + (le16_to_cpu(*z1t) - B_SUB_VAL);
770 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(*z1t);
771 /* end of fifo */
772 if (bch->debug & DEBUG_HW_BFIFO)
773 printk(KERN_DEBUG "hfcpci_FFt fcnt(%d) "
774 "maxl(%d) nz1(%x) dst(%p)\n",
775 fcnt, maxlen, new_z1, dst);
776 fcnt += count;
777 bch->tx_idx += count;
778 if (maxlen > count)
779 maxlen = count; /* limit size */
780 memcpy(dst, src, maxlen); /* first copy */
781 count -= maxlen; /* remaining bytes */
782 if (count) {
783 dst = bdata; /* start of buffer */
784 src += maxlen; /* new position */
785 memcpy(dst, src, count);
786 }
787 *z1t = cpu_to_le16(new_z1); /* now send data */
788 if (bch->tx_idx < bch->tx_skb->len)
789 return;
790 /* send confirm, on trans, free on hdlc. */
791 if (test_bit(FLG_TRANSPARENT, &bch->Flags))
792 confirm_Bsend(bch);
793 dev_kfree_skb(bch->tx_skb);
794 if (get_next_bframe(bch))
795 goto next_t_frame;
796 return;
797 }
798 if (bch->debug & DEBUG_HW_BCHANNEL)
799 printk(KERN_DEBUG
800 "%s: ch(%x) f1(%d) f2(%d) z1(f1)(%x)\n",
801 __func__, bch->nr, bz->f1, bz->f2,
802 bz->za[bz->f1].z1);
803 fcnt = bz->f1 - bz->f2; /* frame count actually buffered */
804 if (fcnt < 0)
805 fcnt += (MAX_B_FRAMES + 1); /* if wrap around */
806 if (fcnt > (MAX_B_FRAMES - 1)) {
807 if (bch->debug & DEBUG_HW_BCHANNEL)
808 printk(KERN_DEBUG
809 "hfcpci_fill_Bfifo more as 14 frames\n");
810 return;
811 }
812 /* now determine free bytes in FIFO buffer */
813 maxlen = le16_to_cpu(bz->za[bz->f2].z2) -
814 le16_to_cpu(bz->za[bz->f1].z1) - 1;
815 if (maxlen <= 0)
816 maxlen += B_FIFO_SIZE; /* count now contains available bytes */
817
818 if (bch->debug & DEBUG_HW_BCHANNEL)
819 printk(KERN_DEBUG "hfcpci_fill_fifo ch(%x) count(%d/%d)\n",
820 bch->nr, count, maxlen);
821
822 if (maxlen < count) {
823 if (bch->debug & DEBUG_HW_BCHANNEL)
824 printk(KERN_DEBUG "hfcpci_fill_fifo no fifo mem\n");
825 return;
826 }
827 new_z1 = le16_to_cpu(bz->za[bz->f1].z1) + count;
828 /* new buffer Position */
829 if (new_z1 >= (B_FIFO_SIZE + B_SUB_VAL))
830 new_z1 -= B_FIFO_SIZE; /* buffer wrap */
831
832 new_f1 = ((bz->f1 + 1) & MAX_B_FRAMES);
833 src = bch->tx_skb->data + bch->tx_idx; /* source pointer */
834 dst = bdata + (le16_to_cpu(bz->za[bz->f1].z1) - B_SUB_VAL);
835 maxlen = (B_FIFO_SIZE + B_SUB_VAL) - le16_to_cpu(bz->za[bz->f1].z1);
836 /* end fifo */
837 if (maxlen > count)
838 maxlen = count; /* limit size */
839 memcpy(dst, src, maxlen); /* first copy */
840
841 count -= maxlen; /* remaining bytes */
842 if (count) {
843 dst = bdata; /* start of buffer */
844 src += maxlen; /* new position */
845 memcpy(dst, src, count);
846 }
847 bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */
848 bz->f1 = new_f1; /* next frame */
849 dev_kfree_skb(bch->tx_skb);
850 get_next_bframe(bch);
851}
852
853
854
855/*
856 * handle L1 state changes TE
857 */
858
859static void
860ph_state_te(struct dchannel *dch)
861{
862 if (dch->debug)
863 printk(KERN_DEBUG "%s: TE newstate %x\n",
864 __func__, dch->state);
865 switch (dch->state) {
866 case 0:
867 l1_event(dch->l1, HW_RESET_IND);
868 break;
869 case 3:
870 l1_event(dch->l1, HW_DEACT_IND);
871 break;
872 case 5:
873 case 8:
874 l1_event(dch->l1, ANYSIGNAL);
875 break;
876 case 6:
877 l1_event(dch->l1, INFO2);
878 break;
879 case 7:
880 l1_event(dch->l1, INFO4_P8);
881 break;
882 }
883}
884
885/*
886 * handle L1 state changes NT
887 */
888
889static void
890handle_nt_timer3(struct dchannel *dch) {
891 struct hfc_pci *hc = dch->hw;
892
893 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
894 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
895 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
896 hc->hw.nt_timer = 0;
897 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
898 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
899 hc->hw.mst_m |= HFCPCI_MASTER;
900 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
901 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
902 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
903}
904
905static void
906ph_state_nt(struct dchannel *dch)
907{
908 struct hfc_pci *hc = dch->hw;
909
910 if (dch->debug)
911 printk(KERN_DEBUG "%s: NT newstate %x\n",
912 __func__, dch->state);
913 switch (dch->state) {
914 case 2:
915 if (hc->hw.nt_timer < 0) {
916 hc->hw.nt_timer = 0;
917 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
918 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
919 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
920 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
921 /* Clear already pending ints */
922 if (Read_hfc(hc, HFCPCI_INT_S1));
923 Write_hfc(hc, HFCPCI_STATES, 4 | HFCPCI_LOAD_STATE);
924 udelay(10);
925 Write_hfc(hc, HFCPCI_STATES, 4);
926 dch->state = 4;
927 } else if (hc->hw.nt_timer == 0) {
928 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
929 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
930 hc->hw.nt_timer = NT_T1_COUNT;
931 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
932 hc->hw.ctmt |= HFCPCI_TIM3_125;
933 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
934 HFCPCI_CLTIMER);
935 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
936 test_and_set_bit(FLG_HFC_TIMER_T1, &dch->Flags);
937 /* allow G2 -> G3 transition */
938 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
939 } else {
940 Write_hfc(hc, HFCPCI_STATES, 2 | HFCPCI_NT_G2_G3);
941 }
942 break;
943 case 1:
944 hc->hw.nt_timer = 0;
945 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
946 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
947 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
948 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
949 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
950 hc->hw.mst_m &= ~HFCPCI_MASTER;
951 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
952 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
953 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND,
954 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
955 break;
956 case 4:
957 hc->hw.nt_timer = 0;
958 test_and_clear_bit(FLG_HFC_TIMER_T3, &dch->Flags);
959 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
960 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
961 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
962 break;
963 case 3:
964 if (!test_and_set_bit(FLG_HFC_TIMER_T3, &dch->Flags)) {
965 if (!test_and_clear_bit(FLG_L2_ACTIVATED,
966 &dch->Flags)) {
967 handle_nt_timer3(dch);
968 break;
969 }
970 test_and_clear_bit(FLG_HFC_TIMER_T1, &dch->Flags);
971 hc->hw.int_m1 |= HFCPCI_INTS_TIMER;
972 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
973 hc->hw.nt_timer = NT_T3_COUNT;
974 hc->hw.ctmt &= ~HFCPCI_AUTO_TIMER;
975 hc->hw.ctmt |= HFCPCI_TIM3_125;
976 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt |
977 HFCPCI_CLTIMER);
978 }
979 break;
980 }
981}
982
983static void
984ph_state(struct dchannel *dch)
985{
986 struct hfc_pci *hc = dch->hw;
987
988 if (hc->hw.protocol == ISDN_P_NT_S0) {
989 if (test_bit(FLG_HFC_TIMER_T3, &dch->Flags) &&
990 hc->hw.nt_timer < 0)
991 handle_nt_timer3(dch);
992 else
993 ph_state_nt(dch);
994 } else
995 ph_state_te(dch);
996}
997
998/*
999 * Layer 1 callback function
1000 */
1001static int
1002hfc_l1callback(struct dchannel *dch, u_int cmd)
1003{
1004 struct hfc_pci *hc = dch->hw;
1005
1006 switch (cmd) {
1007 case INFO3_P8:
1008 case INFO3_P10:
1009 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1010 hc->hw.mst_m |= HFCPCI_MASTER;
1011 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1012 break;
1013 case HW_RESET_REQ:
1014 Write_hfc(hc, HFCPCI_STATES, HFCPCI_LOAD_STATE | 3);
1015 /* HFC ST 3 */
1016 udelay(6);
1017 Write_hfc(hc, HFCPCI_STATES, 3); /* HFC ST 2 */
1018 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1019 hc->hw.mst_m |= HFCPCI_MASTER;
1020 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1021 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1022 HFCPCI_DO_ACTION);
1023 l1_event(dch->l1, HW_POWERUP_IND);
1024 break;
1025 case HW_DEACT_REQ:
1026 hc->hw.mst_m &= ~HFCPCI_MASTER;
1027 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1028 skb_queue_purge(&dch->squeue);
1029 if (dch->tx_skb) {
1030 dev_kfree_skb(dch->tx_skb);
1031 dch->tx_skb = NULL;
1032 }
1033 dch->tx_idx = 0;
1034 if (dch->rx_skb) {
1035 dev_kfree_skb(dch->rx_skb);
1036 dch->rx_skb = NULL;
1037 }
1038 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1039 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1040 del_timer(&dch->timer);
1041 break;
1042 case HW_POWERUP_REQ:
1043 Write_hfc(hc, HFCPCI_STATES, HFCPCI_DO_ACTION);
1044 break;
1045 case PH_ACTIVATE_IND:
1046 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
1047 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1048 GFP_ATOMIC);
1049 break;
1050 case PH_DEACTIVATE_IND:
1051 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
1052 _queue_data(&dch->dev.D, cmd, MISDN_ID_ANY, 0, NULL,
1053 GFP_ATOMIC);
1054 break;
1055 default:
1056 if (dch->debug & DEBUG_HW)
1057 printk(KERN_DEBUG "%s: unknown command %x\n",
1058 __func__, cmd);
1059 return -1;
1060 }
1061 return 0;
1062}
1063
1064/*
1065 * Interrupt handler
1066 */
1067static inline void
1068tx_birq(struct bchannel *bch)
1069{
1070 if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
1071 hfcpci_fill_fifo(bch);
1072 else {
1073 if (bch->tx_skb)
1074 dev_kfree_skb(bch->tx_skb);
1075 if (get_next_bframe(bch))
1076 hfcpci_fill_fifo(bch);
1077 }
1078}
1079
1080static inline void
1081tx_dirq(struct dchannel *dch)
1082{
1083 if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
1084 hfcpci_fill_dfifo(dch->hw);
1085 else {
1086 if (dch->tx_skb)
1087 dev_kfree_skb(dch->tx_skb);
1088 if (get_next_dframe(dch))
1089 hfcpci_fill_dfifo(dch->hw);
1090 }
1091}
1092
1093static irqreturn_t
1094hfcpci_int(int intno, void *dev_id)
1095{
1096 struct hfc_pci *hc = dev_id;
1097 u_char exval;
1098 struct bchannel *bch;
1099 u_char val, stat;
1100
1101 spin_lock(&hc->lock);
1102 if (!(hc->hw.int_m2 & 0x08)) {
1103 spin_unlock(&hc->lock);
1104 return IRQ_NONE; /* not initialised */
1105 }
1106 stat = Read_hfc(hc, HFCPCI_STATUS);
1107 if (HFCPCI_ANYINT & stat) {
1108 val = Read_hfc(hc, HFCPCI_INT_S1);
1109 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1110 printk(KERN_DEBUG
1111 "HFC-PCI: stat(%02x) s1(%02x)\n", stat, val);
1112 } else {
1113 /* shared */
1114 spin_unlock(&hc->lock);
1115 return IRQ_NONE;
1116 }
1117 hc->irqcnt++;
1118
1119 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1120 printk(KERN_DEBUG "HFC-PCI irq %x\n", val);
1121 val &= hc->hw.int_m1;
1122 if (val & 0x40) { /* state machine irq */
1123 exval = Read_hfc(hc, HFCPCI_STATES) & 0xf;
1124 if (hc->dch.debug & DEBUG_HW_DCHANNEL)
1125 printk(KERN_DEBUG "ph_state chg %d->%d\n",
1126 hc->dch.state, exval);
1127 hc->dch.state = exval;
1128 schedule_event(&hc->dch, FLG_PHCHANGE);
1129 val &= ~0x40;
1130 }
1131 if (val & 0x80) { /* timer irq */
1132 if (hc->hw.protocol == ISDN_P_NT_S0) {
1133 if ((--hc->hw.nt_timer) < 0)
1134 schedule_event(&hc->dch, FLG_PHCHANGE);
1135 }
1136 val &= ~0x80;
1137 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt | HFCPCI_CLTIMER);
1138 }
1139 if (val & 0x08) {
1140 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1141 if (bch)
1142 main_rec_hfcpci(bch);
1143 else if (hc->dch.debug)
1144 printk(KERN_DEBUG "hfcpci spurious 0x08 IRQ\n");
1145 }
1146 if (val & 0x10) {
1147 bch = Sel_BCS(hc, 2);
1148 if (bch)
1149 main_rec_hfcpci(bch);
1150 else if (hc->dch.debug)
1151 printk(KERN_DEBUG "hfcpci spurious 0x10 IRQ\n");
1152 }
1153 if (val & 0x01) {
1154 bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1);
1155 if (bch)
1156 tx_birq(bch);
1157 else if (hc->dch.debug)
1158 printk(KERN_DEBUG "hfcpci spurious 0x01 IRQ\n");
1159 }
1160 if (val & 0x02) {
1161 bch = Sel_BCS(hc, 2);
1162 if (bch)
1163 tx_birq(bch);
1164 else if (hc->dch.debug)
1165 printk(KERN_DEBUG "hfcpci spurious 0x02 IRQ\n");
1166 }
1167 if (val & 0x20)
1168 receive_dmsg(hc);
1169 if (val & 0x04) { /* dframe transmitted */
1170 if (test_and_clear_bit(FLG_BUSY_TIMER, &hc->dch.Flags))
1171 del_timer(&hc->dch.timer);
1172 tx_dirq(&hc->dch);
1173 }
1174 spin_unlock(&hc->lock);
1175 return IRQ_HANDLED;
1176}
1177
1178/*
1179 * timer callback for D-chan busy resolution. Currently no function
1180 */
1181static void
1182hfcpci_dbusy_timer(struct hfc_pci *hc)
1183{
1184}
1185
1186/*
1187 * activate/deactivate hardware for selected channels and mode
1188 */
1189static int
1190mode_hfcpci(struct bchannel *bch, int bc, int protocol)
1191{
1192 struct hfc_pci *hc = bch->hw;
1193 int fifo2;
1194 u_char rx_slot = 0, tx_slot = 0, pcm_mode;
1195
1196 if (bch->debug & DEBUG_HW_BCHANNEL)
1197 printk(KERN_DEBUG
1198 "HFCPCI bchannel protocol %x-->%x ch %x-->%x\n",
1199 bch->state, protocol, bch->nr, bc);
1200
1201 fifo2 = bc;
1202 pcm_mode = (bc>>24) & 0xff;
1203 if (pcm_mode) { /* PCM SLOT USE */
1204 if (!test_bit(HFC_CFG_PCM, &hc->cfg))
1205 printk(KERN_WARNING
1206 "%s: pcm channel id without HFC_CFG_PCM\n",
1207 __func__);
1208 rx_slot = (bc>>8) & 0xff;
1209 tx_slot = (bc>>16) & 0xff;
1210 bc = bc & 0xff;
1211 } else if (test_bit(HFC_CFG_PCM, &hc->cfg) &&
1212 (protocol > ISDN_P_NONE))
1213 printk(KERN_WARNING "%s: no pcm channel id but HFC_CFG_PCM\n",
1214 __func__);
1215 if (hc->chanlimit > 1) {
1216 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1217 hc->hw.sctrl_e &= ~0x80;
1218 } else {
1219 if (bc & 2) {
1220 if (protocol != ISDN_P_NONE) {
1221 hc->hw.bswapped = 1; /* B1 and B2 exchanged */
1222 hc->hw.sctrl_e |= 0x80;
1223 } else {
1224 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1225 hc->hw.sctrl_e &= ~0x80;
1226 }
1227 fifo2 = 1;
1228 } else {
1229 hc->hw.bswapped = 0; /* B1 and B2 normal mode */
1230 hc->hw.sctrl_e &= ~0x80;
1231 }
1232 }
1233 switch (protocol) {
1234 case (-1): /* used for init */
1235 bch->state = -1;
1236 bch->nr = bc;
1237 case (ISDN_P_NONE):
1238 if (bch->state == ISDN_P_NONE)
1239 return 0;
1240 if (bc & 2) {
1241 hc->hw.sctrl &= ~SCTRL_B2_ENA;
1242 hc->hw.sctrl_r &= ~SCTRL_B2_ENA;
1243 } else {
1244 hc->hw.sctrl &= ~SCTRL_B1_ENA;
1245 hc->hw.sctrl_r &= ~SCTRL_B1_ENA;
1246 }
1247 if (fifo2 & 2) {
1248 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
1249 hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
1250 HFCPCI_INTS_B2REC);
1251 } else {
1252 hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
1253 hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
1254 HFCPCI_INTS_B1REC);
1255 }
1256#ifdef REVERSE_BITORDER
1257 if (bch->nr & 2)
1258 hc->hw.cirm &= 0x7f;
1259 else
1260 hc->hw.cirm &= 0xbf;
1261#endif
1262 bch->state = ISDN_P_NONE;
1263 bch->nr = bc;
1264 test_and_clear_bit(FLG_HDLC, &bch->Flags);
1265 test_and_clear_bit(FLG_TRANSPARENT, &bch->Flags);
1266 break;
1267 case (ISDN_P_B_RAW):
1268 bch->state = protocol;
1269 bch->nr = bc;
1270 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1271 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1272 if (bc & 2) {
1273 hc->hw.sctrl |= SCTRL_B2_ENA;
1274 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1275#ifdef REVERSE_BITORDER
1276 hc->hw.cirm |= 0x80;
1277#endif
1278 } else {
1279 hc->hw.sctrl |= SCTRL_B1_ENA;
1280 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1281#ifdef REVERSE_BITORDER
1282 hc->hw.cirm |= 0x40;
1283#endif
1284 }
1285 if (fifo2 & 2) {
1286 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1287 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1288 HFCPCI_INTS_B2REC);
1289 hc->hw.ctmt |= 2;
1290 hc->hw.conn &= ~0x18;
1291 } else {
1292 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1293 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1294 HFCPCI_INTS_B1REC);
1295 hc->hw.ctmt |= 1;
1296 hc->hw.conn &= ~0x03;
1297 }
1298 test_and_set_bit(FLG_TRANSPARENT, &bch->Flags);
1299 break;
1300 case (ISDN_P_B_HDLC):
1301 bch->state = protocol;
1302 bch->nr = bc;
1303 hfcpci_clear_fifo_rx(hc, (fifo2 & 2)?1:0);
1304 hfcpci_clear_fifo_tx(hc, (fifo2 & 2)?1:0);
1305 if (bc & 2) {
1306 hc->hw.sctrl |= SCTRL_B2_ENA;
1307 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1308 } else {
1309 hc->hw.sctrl |= SCTRL_B1_ENA;
1310 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1311 }
1312 if (fifo2 & 2) {
1313 hc->hw.last_bfifo_cnt[1] = 0;
1314 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
1315 hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
1316 HFCPCI_INTS_B2REC);
1317 hc->hw.ctmt &= ~2;
1318 hc->hw.conn &= ~0x18;
1319 } else {
1320 hc->hw.last_bfifo_cnt[0] = 0;
1321 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
1322 hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
1323 HFCPCI_INTS_B1REC);
1324 hc->hw.ctmt &= ~1;
1325 hc->hw.conn &= ~0x03;
1326 }
1327 test_and_set_bit(FLG_HDLC, &bch->Flags);
1328 break;
1329 default:
1330 printk(KERN_DEBUG "prot not known %x\n", protocol);
1331 return -ENOPROTOOPT;
1332 }
1333 if (test_bit(HFC_CFG_PCM, &hc->cfg)) {
1334 if ((protocol == ISDN_P_NONE) ||
1335 (protocol == -1)) { /* init case */
1336 rx_slot = 0;
1337 tx_slot = 0;
1338 } else {
1339 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg)) {
1340 rx_slot |= 0xC0;
1341 tx_slot |= 0xC0;
1342 } else {
1343 rx_slot |= 0x80;
1344 tx_slot |= 0x80;
1345 }
1346 }
1347 if (bc & 2) {
1348 hc->hw.conn &= 0xc7;
1349 hc->hw.conn |= 0x08;
1350 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL 0x%x\n",
1351 __func__, tx_slot);
1352 printk(KERN_DEBUG "%s: Write_hfc: B2_RSL 0x%x\n",
1353 __func__, rx_slot);
1354 Write_hfc(hc, HFCPCI_B2_SSL, tx_slot);
1355 Write_hfc(hc, HFCPCI_B2_RSL, rx_slot);
1356 } else {
1357 hc->hw.conn &= 0xf8;
1358 hc->hw.conn |= 0x01;
1359 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL 0x%x\n",
1360 __func__, tx_slot);
1361 printk(KERN_DEBUG "%s: Write_hfc: B1_RSL 0x%x\n",
1362 __func__, rx_slot);
1363 Write_hfc(hc, HFCPCI_B1_SSL, tx_slot);
1364 Write_hfc(hc, HFCPCI_B1_RSL, rx_slot);
1365 }
1366 }
1367 Write_hfc(hc, HFCPCI_SCTRL_E, hc->hw.sctrl_e);
1368 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1369 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1370 Write_hfc(hc, HFCPCI_SCTRL, hc->hw.sctrl);
1371 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1372 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1373 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1374#ifdef REVERSE_BITORDER
1375 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1376#endif
1377 return 0;
1378}
1379
1380static int
1381set_hfcpci_rxtest(struct bchannel *bch, int protocol, int chan)
1382{
1383 struct hfc_pci *hc = bch->hw;
1384
1385 if (bch->debug & DEBUG_HW_BCHANNEL)
1386 printk(KERN_DEBUG
1387 "HFCPCI bchannel test rx protocol %x-->%x ch %x-->%x\n",
1388 bch->state, protocol, bch->nr, chan);
1389 if (bch->nr != chan) {
1390 printk(KERN_DEBUG
1391 "HFCPCI rxtest wrong channel parameter %x/%x\n",
1392 bch->nr, chan);
1393 return -EINVAL;
1394 }
1395 switch (protocol) {
1396 case (ISDN_P_B_RAW):
1397 bch->state = protocol;
1398 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1399 if (chan & 2) {
1400 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1401 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1402 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1403 hc->hw.ctmt |= 2;
1404 hc->hw.conn &= ~0x18;
1405#ifdef REVERSE_BITORDER
1406 hc->hw.cirm |= 0x80;
1407#endif
1408 } else {
1409 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1410 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1411 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1412 hc->hw.ctmt |= 1;
1413 hc->hw.conn &= ~0x03;
1414#ifdef REVERSE_BITORDER
1415 hc->hw.cirm |= 0x40;
1416#endif
1417 }
1418 break;
1419 case (ISDN_P_B_HDLC):
1420 bch->state = protocol;
1421 hfcpci_clear_fifo_rx(hc, (chan & 2)?1:0);
1422 if (chan & 2) {
1423 hc->hw.sctrl_r |= SCTRL_B2_ENA;
1424 hc->hw.last_bfifo_cnt[1] = 0;
1425 hc->hw.fifo_en |= HFCPCI_FIFOEN_B2RX;
1426 hc->hw.int_m1 |= HFCPCI_INTS_B2REC;
1427 hc->hw.ctmt &= ~2;
1428 hc->hw.conn &= ~0x18;
1429 } else {
1430 hc->hw.sctrl_r |= SCTRL_B1_ENA;
1431 hc->hw.last_bfifo_cnt[0] = 0;
1432 hc->hw.fifo_en |= HFCPCI_FIFOEN_B1RX;
1433 hc->hw.int_m1 |= HFCPCI_INTS_B1REC;
1434 hc->hw.ctmt &= ~1;
1435 hc->hw.conn &= ~0x03;
1436 }
1437 break;
1438 default:
1439 printk(KERN_DEBUG "prot not known %x\n", protocol);
1440 return -ENOPROTOOPT;
1441 }
1442 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1443 Write_hfc(hc, HFCPCI_FIFO_EN, hc->hw.fifo_en);
1444 Write_hfc(hc, HFCPCI_SCTRL_R, hc->hw.sctrl_r);
1445 Write_hfc(hc, HFCPCI_CTMT, hc->hw.ctmt);
1446 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1447#ifdef REVERSE_BITORDER
1448 Write_hfc(hc, HFCPCI_CIRM, hc->hw.cirm);
1449#endif
1450 return 0;
1451}
1452
1453static void
1454deactivate_bchannel(struct bchannel *bch)
1455{
1456 struct hfc_pci *hc = bch->hw;
1457 u_long flags;
1458
1459 spin_lock_irqsave(&hc->lock, flags);
1460 if (test_and_clear_bit(FLG_TX_NEXT, &bch->Flags)) {
1461 dev_kfree_skb(bch->next_skb);
1462 bch->next_skb = NULL;
1463 }
1464 if (bch->tx_skb) {
1465 dev_kfree_skb(bch->tx_skb);
1466 bch->tx_skb = NULL;
1467 }
1468 bch->tx_idx = 0;
1469 if (bch->rx_skb) {
1470 dev_kfree_skb(bch->rx_skb);
1471 bch->rx_skb = NULL;
1472 }
1473 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1474 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
1475 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
1476 spin_unlock_irqrestore(&hc->lock, flags);
1477}
1478
1479/*
1480 * Layer 1 B-channel hardware access
1481 */
1482static int
1483channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1484{
1485 int ret = 0;
1486
1487 switch (cq->op) {
1488 case MISDN_CTRL_GETOP:
1489 cq->op = 0;
1490 break;
1491 default:
1492 printk(KERN_WARNING "%s: unknown Op %x\n", __func__, cq->op);
1493 ret = -EINVAL;
1494 break;
1495 }
1496 return ret;
1497}
1498static int
1499hfc_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1500{
1501 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1502 struct hfc_pci *hc = bch->hw;
1503 int ret = -EINVAL;
1504 u_long flags;
1505
1506 if (bch->debug & DEBUG_HW)
1507 printk(KERN_DEBUG "%s: cmd:%x %p\n", __func__, cmd, arg);
1508 switch (cmd) {
1509 case HW_TESTRX_RAW:
1510 spin_lock_irqsave(&hc->lock, flags);
1511 ret = set_hfcpci_rxtest(bch, ISDN_P_B_RAW, (int)(long)arg);
1512 spin_unlock_irqrestore(&hc->lock, flags);
1513 break;
1514 case HW_TESTRX_HDLC:
1515 spin_lock_irqsave(&hc->lock, flags);
1516 ret = set_hfcpci_rxtest(bch, ISDN_P_B_HDLC, (int)(long)arg);
1517 spin_unlock_irqrestore(&hc->lock, flags);
1518 break;
1519 case HW_TESTRX_OFF:
1520 spin_lock_irqsave(&hc->lock, flags);
1521 mode_hfcpci(bch, bch->nr, ISDN_P_NONE);
1522 spin_unlock_irqrestore(&hc->lock, flags);
1523 ret = 0;
1524 break;
1525 case CLOSE_CHANNEL:
1526 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1527 if (test_bit(FLG_ACTIVE, &bch->Flags))
1528 deactivate_bchannel(bch);
1529 ch->protocol = ISDN_P_NONE;
1530 ch->peer = NULL;
1531 module_put(THIS_MODULE);
1532 ret = 0;
1533 break;
1534 case CONTROL_CHANNEL:
1535 ret = channel_bctrl(bch, arg);
1536 break;
1537 default:
1538 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1539 __func__, cmd);
1540 }
1541 return ret;
1542}
1543
1544/*
1545 * Layer2 -> Layer 1 Dchannel data
1546 */
1547static int
1548hfcpci_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
1549{
1550 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1551 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1552 struct hfc_pci *hc = dch->hw;
1553 int ret = -EINVAL;
1554 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1555 unsigned int id;
1556 u_long flags;
1557
1558 switch (hh->prim) {
1559 case PH_DATA_REQ:
1560 spin_lock_irqsave(&hc->lock, flags);
1561 ret = dchannel_senddata(dch, skb);
1562 if (ret > 0) { /* direct TX */
1563 id = hh->id; /* skb can be freed */
1564 hfcpci_fill_dfifo(dch->hw);
1565 ret = 0;
1566 spin_unlock_irqrestore(&hc->lock, flags);
1567 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1568 } else
1569 spin_unlock_irqrestore(&hc->lock, flags);
1570 return ret;
1571 case PH_ACTIVATE_REQ:
1572 spin_lock_irqsave(&hc->lock, flags);
1573 if (hc->hw.protocol == ISDN_P_NT_S0) {
1574 ret = 0;
1575 if (test_bit(HFC_CFG_MASTER, &hc->cfg))
1576 hc->hw.mst_m |= HFCPCI_MASTER;
1577 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1578 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
1579 spin_unlock_irqrestore(&hc->lock, flags);
1580 _queue_data(&dch->dev.D, PH_ACTIVATE_IND,
1581 MISDN_ID_ANY, 0, NULL, GFP_ATOMIC);
1582 break;
1583 }
1584 test_and_set_bit(FLG_L2_ACTIVATED, &dch->Flags);
1585 Write_hfc(hc, HFCPCI_STATES, HFCPCI_ACTIVATE |
1586 HFCPCI_DO_ACTION | 1);
1587 } else
1588 ret = l1_event(dch->l1, hh->prim);
1589 spin_unlock_irqrestore(&hc->lock, flags);
1590 break;
1591 case PH_DEACTIVATE_REQ:
1592 test_and_clear_bit(FLG_L2_ACTIVATED, &dch->Flags);
1593 spin_lock_irqsave(&hc->lock, flags);
1594 if (hc->hw.protocol == ISDN_P_NT_S0) {
1595 /* prepare deactivation */
1596 Write_hfc(hc, HFCPCI_STATES, 0x40);
1597 skb_queue_purge(&dch->squeue);
1598 if (dch->tx_skb) {
1599 dev_kfree_skb(dch->tx_skb);
1600 dch->tx_skb = NULL;
1601 }
1602 dch->tx_idx = 0;
1603 if (dch->rx_skb) {
1604 dev_kfree_skb(dch->rx_skb);
1605 dch->rx_skb = NULL;
1606 }
1607 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
1608 if (test_and_clear_bit(FLG_BUSY_TIMER, &dch->Flags))
1609 del_timer(&dch->timer);
1610#ifdef FIXME
1611 if (test_and_clear_bit(FLG_L1_BUSY, &dch->Flags))
1612 dchannel_sched_event(&hc->dch, D_CLEARBUSY);
1613#endif
1614 hc->hw.mst_m &= ~HFCPCI_MASTER;
1615 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1616 ret = 0;
1617 } else {
1618 ret = l1_event(dch->l1, hh->prim);
1619 }
1620 spin_unlock_irqrestore(&hc->lock, flags);
1621 break;
1622 }
1623 if (!ret)
1624 dev_kfree_skb(skb);
1625 return ret;
1626}
1627
1628/*
1629 * Layer2 -> Layer 1 Bchannel data
1630 */
1631static int
1632hfcpci_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
1633{
1634 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1635 struct hfc_pci *hc = bch->hw;
1636 int ret = -EINVAL;
1637 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1638 unsigned int id;
1639 u_long flags;
1640
1641 switch (hh->prim) {
1642 case PH_DATA_REQ:
1643 spin_lock_irqsave(&hc->lock, flags);
1644 ret = bchannel_senddata(bch, skb);
1645 if (ret > 0) { /* direct TX */
1646 id = hh->id; /* skb can be freed */
1647 hfcpci_fill_fifo(bch);
1648 ret = 0;
1649 spin_unlock_irqrestore(&hc->lock, flags);
1650 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
1651 queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
1652 } else
1653 spin_unlock_irqrestore(&hc->lock, flags);
1654 return ret;
1655 case PH_ACTIVATE_REQ:
1656 spin_lock_irqsave(&hc->lock, flags);
1657 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
1658 ret = mode_hfcpci(bch, bch->nr, ch->protocol);
1659 else
1660 ret = 0;
1661 spin_unlock_irqrestore(&hc->lock, flags);
1662 if (!ret)
1663 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
1664 NULL, GFP_KERNEL);
1665 break;
1666 case PH_DEACTIVATE_REQ:
1667 deactivate_bchannel(bch);
1668 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
1669 NULL, GFP_KERNEL);
1670 ret = 0;
1671 break;
1672 }
1673 if (!ret)
1674 dev_kfree_skb(skb);
1675 return ret;
1676}
1677
1678/*
1679 * called for card init message
1680 */
1681
1682void
1683inithfcpci(struct hfc_pci *hc)
1684{
1685 printk(KERN_DEBUG "inithfcpci: entered\n");
1686 hc->dch.timer.function = (void *) hfcpci_dbusy_timer;
1687 hc->dch.timer.data = (long) &hc->dch;
1688 init_timer(&hc->dch.timer);
1689 hc->chanlimit = 2;
1690 mode_hfcpci(&hc->bch[0], 1, -1);
1691 mode_hfcpci(&hc->bch[1], 2, -1);
1692}
1693
1694
1695static int
1696init_card(struct hfc_pci *hc)
1697{
1698 int cnt = 3;
1699 u_long flags;
1700
1701 printk(KERN_DEBUG "init_card: entered\n");
1702
1703
1704 spin_lock_irqsave(&hc->lock, flags);
1705 disable_hwirq(hc);
1706 spin_unlock_irqrestore(&hc->lock, flags);
1707 if (request_irq(hc->irq, hfcpci_int, IRQF_SHARED, "HFC PCI", hc)) {
1708 printk(KERN_WARNING
1709 "mISDN: couldn't get interrupt %d\n", hc->irq);
1710 return -EIO;
1711 }
1712 spin_lock_irqsave(&hc->lock, flags);
1713 reset_hfcpci(hc);
1714 while (cnt) {
1715 inithfcpci(hc);
1716 /*
1717 * Finally enable IRQ output
1718 * this is only allowed, if an IRQ routine is allready
1719 * established for this HFC, so don't do that earlier
1720 */
1721 enable_hwirq(hc);
1722 spin_unlock_irqrestore(&hc->lock, flags);
1723 /* Timeout 80ms */
1724 current->state = TASK_UNINTERRUPTIBLE;
1725 schedule_timeout((80*HZ)/1000);
1726 printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
1727 hc->irq, hc->irqcnt);
1728 /* now switch timer interrupt off */
1729 spin_lock_irqsave(&hc->lock, flags);
1730 hc->hw.int_m1 &= ~HFCPCI_INTS_TIMER;
1731 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1732 /* reinit mode reg */
1733 Write_hfc(hc, HFCPCI_MST_MODE, hc->hw.mst_m);
1734 if (!hc->irqcnt) {
1735 printk(KERN_WARNING
1736 "HFC PCI: IRQ(%d) getting no interrupts "
1737 "during init %d\n", hc->irq, 4 - cnt);
1738 if (cnt == 1) {
1739 spin_unlock_irqrestore(&hc->lock, flags);
1740 return -EIO;
1741 } else {
1742 reset_hfcpci(hc);
1743 cnt--;
1744 }
1745 } else {
1746 spin_unlock_irqrestore(&hc->lock, flags);
1747 hc->initdone = 1;
1748 return 0;
1749 }
1750 }
1751 disable_hwirq(hc);
1752 spin_unlock_irqrestore(&hc->lock, flags);
1753 free_irq(hc->irq, hc);
1754 return -EIO;
1755}
1756
1757static int
1758channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1759{
1760 int ret = 0;
1761 u_char slot;
1762
1763 switch (cq->op) {
1764 case MISDN_CTRL_GETOP:
1765 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1766 MISDN_CTRL_DISCONNECT;
1767 break;
1768 case MISDN_CTRL_LOOP:
1769 /* channel 0 disabled loop */
1770 if (cq->channel < 0 || cq->channel > 2) {
1771 ret = -EINVAL;
1772 break;
1773 }
1774 if (cq->channel & 1) {
1775 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1776 slot = 0xC0;
1777 else
1778 slot = 0x80;
1779 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1780 __func__, slot);
1781 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1782 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1783 hc->hw.conn = (hc->hw.conn & ~7) | 6;
1784 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1785 }
1786 if (cq->channel & 2) {
1787 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1788 slot = 0xC1;
1789 else
1790 slot = 0x81;
1791 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1792 __func__, slot);
1793 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1794 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1795 hc->hw.conn = (hc->hw.conn & ~0x38) | 0x30;
1796 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1797 }
1798 if (cq->channel & 3)
1799 hc->hw.trm |= 0x80; /* enable IOM-loop */
1800 else {
1801 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1802 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1803 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1804 }
1805 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1806 break;
1807 case MISDN_CTRL_CONNECT:
1808 if (cq->channel == cq->p1) {
1809 ret = -EINVAL;
1810 break;
1811 }
1812 if (cq->channel < 1 || cq->channel > 2 ||
1813 cq->p1 < 1 || cq->p1 > 2) {
1814 ret = -EINVAL;
1815 break;
1816 }
1817 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1818 slot = 0xC0;
1819 else
1820 slot = 0x80;
1821 printk(KERN_DEBUG "%s: Write_hfc: B1_SSL/RSL 0x%x\n",
1822 __func__, slot);
1823 Write_hfc(hc, HFCPCI_B1_SSL, slot);
1824 Write_hfc(hc, HFCPCI_B2_RSL, slot);
1825 if (test_bit(HFC_CFG_SW_DD_DU, &hc->cfg))
1826 slot = 0xC1;
1827 else
1828 slot = 0x81;
1829 printk(KERN_DEBUG "%s: Write_hfc: B2_SSL/RSL 0x%x\n",
1830 __func__, slot);
1831 Write_hfc(hc, HFCPCI_B2_SSL, slot);
1832 Write_hfc(hc, HFCPCI_B1_RSL, slot);
1833 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x36;
1834 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1835 hc->hw.trm |= 0x80;
1836 Write_hfc(hc, HFCPCI_TRM, hc->hw.trm);
1837 break;
1838 case MISDN_CTRL_DISCONNECT:
1839 hc->hw.conn = (hc->hw.conn & ~0x3f) | 0x09;
1840 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1841 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1842 break;
1843 default:
1844 printk(KERN_WARNING "%s: unknown Op %x\n",
1845 __func__, cq->op);
1846 ret = -EINVAL;
1847 break;
1848 }
1849 return ret;
1850}
1851
1852static int
1853open_dchannel(struct hfc_pci *hc, struct mISDNchannel *ch,
1854 struct channel_req *rq)
1855{
1856 int err = 0;
1857
1858 if (debug & DEBUG_HW_OPEN)
1859 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
1860 hc->dch.dev.id, __builtin_return_address(0));
1861 if (rq->protocol == ISDN_P_NONE)
1862 return -EINVAL;
1863 if (!hc->initdone) {
1864 if (rq->protocol == ISDN_P_TE_S0) {
1865 err = create_l1(&hc->dch, hfc_l1callback);
1866 if (err)
1867 return err;
1868 }
1869 hc->hw.protocol = rq->protocol;
1870 ch->protocol = rq->protocol;
1871 err = init_card(hc);
1872 if (err)
1873 return err;
1874 } else {
1875 if (rq->protocol != ch->protocol) {
1876 if (hc->hw.protocol == ISDN_P_TE_S0)
1877 l1_event(hc->dch.l1, CLOSE_CHANNEL);
1878 hc->hw.protocol = rq->protocol;
1879 ch->protocol = rq->protocol;
1880 hfcpci_setmode(hc);
1881 }
1882 }
1883
1884 if (((ch->protocol == ISDN_P_NT_S0) && (hc->dch.state == 3)) ||
1885 ((ch->protocol == ISDN_P_TE_S0) && (hc->dch.state == 7))) {
1886 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY,
1887 0, NULL, GFP_KERNEL);
1888 }
1889 rq->ch = ch;
1890 if (!try_module_get(THIS_MODULE))
1891 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1892 return 0;
1893}
1894
1895static int
1896open_bchannel(struct hfc_pci *hc, struct channel_req *rq)
1897{
1898 struct bchannel *bch;
1899
1900 if (rq->adr.channel > 2)
1901 return -EINVAL;
1902 if (rq->protocol == ISDN_P_NONE)
1903 return -EINVAL;
1904 bch = &hc->bch[rq->adr.channel - 1];
1905 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1906 return -EBUSY; /* b-channel can be only open once */
1907 bch->ch.protocol = rq->protocol;
1908 rq->ch = &bch->ch; /* TODO: E-channel */
1909 if (!try_module_get(THIS_MODULE))
1910 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1911 return 0;
1912}
1913
1914/*
1915 * device control function
1916 */
1917static int
1918hfc_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1919{
1920 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1921 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1922 struct hfc_pci *hc = dch->hw;
1923 struct channel_req *rq;
1924 int err = 0;
1925
1926 if (dch->debug & DEBUG_HW)
1927 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1928 __func__, cmd, arg);
1929 switch (cmd) {
1930 case OPEN_CHANNEL:
1931 rq = arg;
1932 if (rq->adr.channel == 0)
1933 err = open_dchannel(hc, ch, rq);
1934 else
1935 err = open_bchannel(hc, rq);
1936 break;
1937 case CLOSE_CHANNEL:
1938 if (debug & DEBUG_HW_OPEN)
1939 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1940 __func__, hc->dch.dev.id,
1941 __builtin_return_address(0));
1942 module_put(THIS_MODULE);
1943 break;
1944 case CONTROL_CHANNEL:
1945 err = channel_ctrl(hc, arg);
1946 break;
1947 default:
1948 if (dch->debug & DEBUG_HW)
1949 printk(KERN_DEBUG "%s: unknown command %x\n",
1950 __func__, cmd);
1951 return -EINVAL;
1952 }
1953 return err;
1954}
1955
1956static int
1957setup_hw(struct hfc_pci *hc)
1958{
1959 void *buffer;
1960
1961 printk(KERN_INFO "mISDN: HFC-PCI driver %s\n", hfcpci_revision);
1962 hc->hw.cirm = 0;
1963 hc->dch.state = 0;
1964 pci_set_master(hc->pdev);
1965 if (!hc->irq) {
1966 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1967 return 1;
1968 }
1969 hc->hw.pci_io = (char *)(ulong)hc->pdev->resource[1].start;
1970
1971 if (!hc->hw.pci_io) {
1972 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1973 return 1;
1974 }
1975 /* Allocate memory for FIFOS */
1976 /* the memory needs to be on a 32k boundary within the first 4G */
1977 pci_set_dma_mask(hc->pdev, 0xFFFF8000);
1978 buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
1979 /* We silently assume the address is okay if nonzero */
1980 if (!buffer) {
1981 printk(KERN_WARNING
1982 "HFC-PCI: Error allocating memory for FIFO!\n");
1983 return 1;
1984 }
1985 hc->hw.fifos = buffer;
1986 pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
1987 hc->hw.pci_io = ioremap((ulong) hc->hw.pci_io, 256);
1988 printk(KERN_INFO
1989 "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
1990 (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
1991 (u_long) virt_to_bus(hc->hw.fifos),
1992 hc->irq, HZ);
1993 /* enable memory mapped ports, disable busmaster */
1994 pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
1995 hc->hw.int_m2 = 0;
1996 disable_hwirq(hc);
1997 hc->hw.int_m1 = 0;
1998 Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
1999 /* At this point the needed PCI config is done */
2000 /* fifos are still not enabled */
2001 hc->hw.timer.function = (void *) hfcpci_Timer;
2002 hc->hw.timer.data = (long) hc;
2003 init_timer(&hc->hw.timer);
2004 /* default PCM master */
2005 test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
2006 return 0;
2007}
2008
2009static void
2010release_card(struct hfc_pci *hc) {
2011 u_long flags;
2012
2013 spin_lock_irqsave(&hc->lock, flags);
2014 hc->hw.int_m2 = 0; /* interrupt output off ! */
2015 disable_hwirq(hc);
2016 mode_hfcpci(&hc->bch[0], 1, ISDN_P_NONE);
2017 mode_hfcpci(&hc->bch[1], 2, ISDN_P_NONE);
2018 if (hc->dch.timer.function != NULL) {
2019 del_timer(&hc->dch.timer);
2020 hc->dch.timer.function = NULL;
2021 }
2022 spin_unlock_irqrestore(&hc->lock, flags);
2023 if (hc->hw.protocol == ISDN_P_TE_S0)
2024 l1_event(hc->dch.l1, CLOSE_CHANNEL);
2025 if (hc->initdone)
2026 free_irq(hc->irq, hc);
2027 release_io_hfcpci(hc); /* must release after free_irq! */
2028 mISDN_unregister_device(&hc->dch.dev);
2029 mISDN_freebchannel(&hc->bch[1]);
2030 mISDN_freebchannel(&hc->bch[0]);
2031 mISDN_freedchannel(&hc->dch);
2032 list_del(&hc->list);
2033 pci_set_drvdata(hc->pdev, NULL);
2034 kfree(hc);
2035}
2036
2037static int
2038setup_card(struct hfc_pci *card)
2039{
2040 int err = -EINVAL;
2041 u_int i;
2042 u_long flags;
2043 char name[MISDN_MAX_IDLEN];
2044
2045 if (HFC_cnt >= MAX_CARDS)
2046 return -EINVAL; /* maybe better value */
2047
2048 card->dch.debug = debug;
2049 spin_lock_init(&card->lock);
2050 mISDN_initdchannel(&card->dch, MAX_DFRAME_LEN_L1, ph_state);
2051 card->dch.hw = card;
2052 card->dch.dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
2053 card->dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
2054 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
2055 card->dch.dev.D.send = hfcpci_l2l1D;
2056 card->dch.dev.D.ctrl = hfc_dctrl;
2057 card->dch.dev.nrbchan = 2;
2058 for (i = 0; i < 2; i++) {
2059 card->bch[i].nr = i + 1;
2060 test_and_set_bit(i + 1, &card->dch.dev.channelmap[0]);
2061 card->bch[i].debug = debug;
2062 mISDN_initbchannel(&card->bch[i], MAX_DATA_MEM);
2063 card->bch[i].hw = card;
2064 card->bch[i].ch.send = hfcpci_l2l1B;
2065 card->bch[i].ch.ctrl = hfc_bctrl;
2066 card->bch[i].ch.nr = i + 1;
2067 list_add(&card->bch[i].ch.list, &card->dch.dev.bchannels);
2068 }
2069 err = setup_hw(card);
2070 if (err)
2071 goto error;
2072 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-pci.%d", HFC_cnt + 1);
2073 err = mISDN_register_device(&card->dch.dev, name);
2074 if (err)
2075 goto error;
2076 HFC_cnt++;
2077 write_lock_irqsave(&HFClock, flags);
2078 list_add_tail(&card->list, &HFClist);
2079 write_unlock_irqrestore(&HFClock, flags);
2080 printk(KERN_INFO "HFC %d cards installed\n", HFC_cnt);
2081 return 0;
2082error:
2083 mISDN_freebchannel(&card->bch[1]);
2084 mISDN_freebchannel(&card->bch[0]);
2085 mISDN_freedchannel(&card->dch);
2086 kfree(card);
2087 return err;
2088}
2089
2090/* private data in the PCI devices list */
2091struct _hfc_map {
2092 u_int subtype;
2093 u_int flag;
2094 char *name;
2095};
2096
2097static const struct _hfc_map hfc_map[] =
2098{
2099 {HFC_CCD_2BD0, 0, "CCD/Billion/Asuscom 2BD0"},
2100 {HFC_CCD_B000, 0, "Billion B000"},
2101 {HFC_CCD_B006, 0, "Billion B006"},
2102 {HFC_CCD_B007, 0, "Billion B007"},
2103 {HFC_CCD_B008, 0, "Billion B008"},
2104 {HFC_CCD_B009, 0, "Billion B009"},
2105 {HFC_CCD_B00A, 0, "Billion B00A"},
2106 {HFC_CCD_B00B, 0, "Billion B00B"},
2107 {HFC_CCD_B00C, 0, "Billion B00C"},
2108 {HFC_CCD_B100, 0, "Seyeon B100"},
2109 {HFC_CCD_B700, 0, "Primux II S0 B700"},
2110 {HFC_CCD_B701, 0, "Primux II S0 NT B701"},
2111 {HFC_ABOCOM_2BD1, 0, "Abocom/Magitek 2BD1"},
2112 {HFC_ASUS_0675, 0, "Asuscom/Askey 675"},
2113 {HFC_BERKOM_TCONCEPT, 0, "German telekom T-Concept"},
2114 {HFC_BERKOM_A1T, 0, "German telekom A1T"},
2115 {HFC_ANIGMA_MC145575, 0, "Motorola MC145575"},
2116 {HFC_ZOLTRIX_2BD0, 0, "Zoltrix 2BD0"},
2117 {HFC_DIGI_DF_M_IOM2_E, 0,
2118 "Digi International DataFire Micro V IOM2 (Europe)"},
2119 {HFC_DIGI_DF_M_E, 0,
2120 "Digi International DataFire Micro V (Europe)"},
2121 {HFC_DIGI_DF_M_IOM2_A, 0,
2122 "Digi International DataFire Micro V IOM2 (North America)"},
2123 {HFC_DIGI_DF_M_A, 0,
2124 "Digi International DataFire Micro V (North America)"},
2125 {HFC_SITECOM_DC105V2, 0, "Sitecom Connectivity DC-105 ISDN TA"},
2126 {},
2127};
2128
2129static struct pci_device_id hfc_ids[] =
2130{
2131 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_2BD0,
2132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[0]},
2133 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B000,
2134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[1]},
2135 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B006,
2136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[2]},
2137 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B007,
2138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[3]},
2139 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B008,
2140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[4]},
2141 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B009,
2142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[5]},
2143 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00A,
2144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[6]},
2145 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00B,
2146 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[7]},
2147 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B00C,
2148 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[8]},
2149 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B100,
2150 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[9]},
2151 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B700,
2152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[10]},
2153 {PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_B701,
2154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[11]},
2155 {PCI_VENDOR_ID_ABOCOM, PCI_DEVICE_ID_ABOCOM_2BD1,
2156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[12]},
2157 {PCI_VENDOR_ID_ASUSTEK, PCI_DEVICE_ID_ASUSTEK_0675,
2158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[13]},
2159 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_T_CONCEPT,
2160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[14]},
2161 {PCI_VENDOR_ID_BERKOM, PCI_DEVICE_ID_BERKOM_A1T,
2162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[15]},
2163 {PCI_VENDOR_ID_ANIGMA, PCI_DEVICE_ID_ANIGMA_MC145575,
2164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[16]},
2165 {PCI_VENDOR_ID_ZOLTRIX, PCI_DEVICE_ID_ZOLTRIX_2BD0,
2166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[17]},
2167 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_E,
2168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[18]},
2169 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_E,
2170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[19]},
2171 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_IOM2_A,
2172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[20]},
2173 {PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_DIGI_DF_M_A,
2174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[21]},
2175 {PCI_VENDOR_ID_SITECOM, PCI_DEVICE_ID_SITECOM_DC105V2,
2176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &hfc_map[22]},
2177 {},
2178};
2179
2180static int __devinit
2181hfc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2182{
2183 int err = -ENOMEM;
2184 struct hfc_pci *card;
2185 struct _hfc_map *m = (struct _hfc_map *)ent->driver_data;
2186
2187 card = kzalloc(sizeof(struct hfc_pci), GFP_ATOMIC);
2188 if (!card) {
2189 printk(KERN_ERR "No kmem for HFC card\n");
2190 return err;
2191 }
2192 card->pdev = pdev;
2193 card->subtype = m->subtype;
2194 err = pci_enable_device(pdev);
2195 if (err) {
2196 kfree(card);
2197 return err;
2198 }
2199
2200 printk(KERN_INFO "mISDN_hfcpci: found adapter %s at %s\n",
2201 m->name, pci_name(pdev));
2202
2203 card->irq = pdev->irq;
2204 pci_set_drvdata(pdev, card);
2205 err = setup_card(card);
2206 if (err)
2207 pci_set_drvdata(pdev, NULL);
2208 return err;
2209}
2210
2211static void __devexit
2212hfc_remove_pci(struct pci_dev *pdev)
2213{
2214 struct hfc_pci *card = pci_get_drvdata(pdev);
2215 u_long flags;
2216
2217 if (card) {
2218 write_lock_irqsave(&HFClock, flags);
2219 release_card(card);
2220 write_unlock_irqrestore(&HFClock, flags);
2221 } else
2222 if (debug)
2223 printk(KERN_WARNING "%s: drvdata allready removed\n",
2224 __func__);
2225}
2226
2227
2228static struct pci_driver hfc_driver = {
2229 .name = "hfcpci",
2230 .probe = hfc_probe,
2231 .remove = __devexit_p(hfc_remove_pci),
2232 .id_table = hfc_ids,
2233};
2234
2235static int __init
2236HFC_init(void)
2237{
2238 int err;
2239
2240 err = pci_register_driver(&hfc_driver);
2241 return err;
2242}
2243
2244static void __exit
2245HFC_cleanup(void)
2246{
2247 struct hfc_pci *card, *next;
2248
2249 list_for_each_entry_safe(card, next, &HFClist, list) {
2250 release_card(card);
2251 }
2252 pci_unregister_driver(&hfc_driver);
2253}
2254
2255module_init(HFC_init);
2256module_exit(HFC_cleanup);
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
new file mode 100644
index 000000000000..4938355c4072
--- /dev/null
+++ b/drivers/isdn/mISDN/Kconfig
@@ -0,0 +1,44 @@
1#
2# modularer ISDN driver
3#
4
5menuconfig MISDN
6 tristate "Modular ISDN driver"
7 help
8 Enable support for the modular ISDN driver.
9
10if MISDN != n
11
12config MISDN_DSP
13 tristate "Digital Audio Processing of transparent data"
14 depends on MISDN
15 help
16 Enable support for digital audio processing capability.
17 This module may be used for special applications that require
18 cross connecting of bchannels, conferencing, dtmf decoding
19 echo cancelation, tone generation, and Blowfish encryption and
20 decryption.
21 It may use hardware features if available.
22 E.g. it is required for PBX4Linux. Go to http://isdn.eversberg.eu
23 and get more informations about this module and it's usage.
24 If unsure, say 'N'.
25
26config MISDN_L1OIP
27 tristate "ISDN over IP tunnel"
28 depends on MISDN
29 help
30 Enable support for ISDN over IP tunnel.
31
32 It features:
33 - dynamic IP exchange, if one or both peers have dynamic IPs
34 - BRI (S0) and PRI (S2M) interface
35 - layer 1 control via network keepalive frames
36 - direct tunneling of physical interface via IP
37
38 NOTE: This protocol is called 'Layer 1 over IP' and is not
39 compatible with ISDNoIP (Agfeo) or TDMoIP. Protocol description is
40 provided in the source code.
41
42source "drivers/isdn/hardware/mISDN/Kconfig"
43
44endif #MISDN
diff --git a/drivers/isdn/mISDN/Makefile b/drivers/isdn/mISDN/Makefile
new file mode 100644
index 000000000000..1cb5e633cf75
--- /dev/null
+++ b/drivers/isdn/mISDN/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the modular ISDN driver
3#
4
5obj-$(CONFIG_MISDN) += mISDN_core.o
6obj-$(CONFIG_MISDN_DSP) += mISDN_dsp.o
7obj-$(CONFIG_MISDN_L1OIP) += l1oip.o
8
9# multi objects
10
11mISDN_core-objs := core.o fsm.o socket.o hwchannel.o stack.o layer1.o layer2.o tei.o timerdev.o
12mISDN_dsp-objs := dsp_core.o dsp_cmx.o dsp_tones.o dsp_dtmf.o dsp_audio.o dsp_blowfish.o dsp_pipeline.o dsp_hwec.o
13l1oip-objs := l1oip_core.o l1oip_codec.o
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
new file mode 100644
index 000000000000..33068177b7c9
--- /dev/null
+++ b/drivers/isdn/mISDN/core.c
@@ -0,0 +1,244 @@
1/*
2 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/stddef.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/mISDNif.h>
20#include "core.h"
21
22static u_int debug;
23
24MODULE_AUTHOR("Karsten Keil");
25MODULE_LICENSE("GPL");
26module_param(debug, uint, S_IRUGO | S_IWUSR);
27
28static LIST_HEAD(devices);
29DEFINE_RWLOCK(device_lock);
30static u64 device_ids;
31#define MAX_DEVICE_ID 63
32
33static LIST_HEAD(Bprotocols);
34DEFINE_RWLOCK(bp_lock);
35
36struct mISDNdevice
37*get_mdevice(u_int id)
38{
39 struct mISDNdevice *dev;
40
41 read_lock(&device_lock);
42 list_for_each_entry(dev, &devices, D.list)
43 if (dev->id == id) {
44 read_unlock(&device_lock);
45 return dev;
46 }
47 read_unlock(&device_lock);
48 return NULL;
49}
50
51int
52get_mdevice_count(void)
53{
54 struct mISDNdevice *dev;
55 int cnt = 0;
56
57 read_lock(&device_lock);
58 list_for_each_entry(dev, &devices, D.list)
59 cnt++;
60 read_unlock(&device_lock);
61 return cnt;
62}
63
64static int
65get_free_devid(void)
66{
67 u_int i;
68
69 for (i = 0; i <= MAX_DEVICE_ID; i++)
70 if (!test_and_set_bit(i, (u_long *)&device_ids))
71 return i;
72 return -1;
73}
74
75int
76mISDN_register_device(struct mISDNdevice *dev, char *name)
77{
78 u_long flags;
79 int err;
80
81 dev->id = get_free_devid();
82 if (dev->id < 0)
83 return -EBUSY;
84 if (name && name[0])
85 strcpy(dev->name, name);
86 else
87 sprintf(dev->name, "mISDN%d", dev->id);
88 if (debug & DEBUG_CORE)
89 printk(KERN_DEBUG "mISDN_register %s %d\n",
90 dev->name, dev->id);
91 err = create_stack(dev);
92 if (err)
93 return err;
94 write_lock_irqsave(&device_lock, flags);
95 list_add_tail(&dev->D.list, &devices);
96 write_unlock_irqrestore(&device_lock, flags);
97 return 0;
98}
99EXPORT_SYMBOL(mISDN_register_device);
100
101void
102mISDN_unregister_device(struct mISDNdevice *dev) {
103 u_long flags;
104
105 if (debug & DEBUG_CORE)
106 printk(KERN_DEBUG "mISDN_unregister %s %d\n",
107 dev->name, dev->id);
108 write_lock_irqsave(&device_lock, flags);
109 list_del(&dev->D.list);
110 write_unlock_irqrestore(&device_lock, flags);
111 test_and_clear_bit(dev->id, (u_long *)&device_ids);
112 delete_stack(dev);
113}
114EXPORT_SYMBOL(mISDN_unregister_device);
115
116u_int
117get_all_Bprotocols(void)
118{
119 struct Bprotocol *bp;
120 u_int m = 0;
121
122 read_lock(&bp_lock);
123 list_for_each_entry(bp, &Bprotocols, list)
124 m |= bp->Bprotocols;
125 read_unlock(&bp_lock);
126 return m;
127}
128
129struct Bprotocol *
130get_Bprotocol4mask(u_int m)
131{
132 struct Bprotocol *bp;
133
134 read_lock(&bp_lock);
135 list_for_each_entry(bp, &Bprotocols, list)
136 if (bp->Bprotocols & m) {
137 read_unlock(&bp_lock);
138 return bp;
139 }
140 read_unlock(&bp_lock);
141 return NULL;
142}
143
144struct Bprotocol *
145get_Bprotocol4id(u_int id)
146{
147 u_int m;
148
149 if (id < ISDN_P_B_START || id > 63) {
150 printk(KERN_WARNING "%s id not in range %d\n",
151 __func__, id);
152 return NULL;
153 }
154 m = 1 << (id & ISDN_P_B_MASK);
155 return get_Bprotocol4mask(m);
156}
157
158int
159mISDN_register_Bprotocol(struct Bprotocol *bp)
160{
161 u_long flags;
162 struct Bprotocol *old;
163
164 if (debug & DEBUG_CORE)
165 printk(KERN_DEBUG "%s: %s/%x\n", __func__,
166 bp->name, bp->Bprotocols);
167 old = get_Bprotocol4mask(bp->Bprotocols);
168 if (old) {
169 printk(KERN_WARNING
170 "register duplicate protocol old %s/%x new %s/%x\n",
171 old->name, old->Bprotocols, bp->name, bp->Bprotocols);
172 return -EBUSY;
173 }
174 write_lock_irqsave(&bp_lock, flags);
175 list_add_tail(&bp->list, &Bprotocols);
176 write_unlock_irqrestore(&bp_lock, flags);
177 return 0;
178}
179EXPORT_SYMBOL(mISDN_register_Bprotocol);
180
181void
182mISDN_unregister_Bprotocol(struct Bprotocol *bp)
183{
184 u_long flags;
185
186 if (debug & DEBUG_CORE)
187 printk(KERN_DEBUG "%s: %s/%x\n", __func__, bp->name,
188 bp->Bprotocols);
189 write_lock_irqsave(&bp_lock, flags);
190 list_del(&bp->list);
191 write_unlock_irqrestore(&bp_lock, flags);
192}
193EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
194
195int
196mISDNInit(void)
197{
198 int err;
199
200 printk(KERN_INFO "Modular ISDN core version %d.%d.%d\n",
201 MISDN_MAJOR_VERSION, MISDN_MINOR_VERSION, MISDN_RELEASE);
202 mISDN_initstack(&debug);
203 err = mISDN_inittimer(&debug);
204 if (err)
205 goto error;
206 err = l1_init(&debug);
207 if (err) {
208 mISDN_timer_cleanup();
209 goto error;
210 }
211 err = Isdnl2_Init(&debug);
212 if (err) {
213 mISDN_timer_cleanup();
214 l1_cleanup();
215 goto error;
216 }
217 err = misdn_sock_init(&debug);
218 if (err) {
219 mISDN_timer_cleanup();
220 l1_cleanup();
221 Isdnl2_cleanup();
222 }
223error:
224 return err;
225}
226
227void mISDN_cleanup(void)
228{
229 misdn_sock_cleanup();
230 mISDN_timer_cleanup();
231 l1_cleanup();
232 Isdnl2_cleanup();
233
234 if (!list_empty(&devices))
235 printk(KERN_ERR "%s devices still registered\n", __func__);
236
237 if (!list_empty(&Bprotocols))
238 printk(KERN_ERR "%s Bprotocols still registered\n", __func__);
239 printk(KERN_DEBUG "mISDNcore unloaded\n");
240}
241
242module_init(mISDNInit);
243module_exit(mISDN_cleanup);
244
diff --git a/drivers/isdn/mISDN/core.h b/drivers/isdn/mISDN/core.h
new file mode 100644
index 000000000000..7da7233b4c1a
--- /dev/null
+++ b/drivers/isdn/mISDN/core.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef mISDN_CORE_H
16#define mISDN_CORE_H
17
18extern struct mISDNdevice *get_mdevice(u_int);
19extern int get_mdevice_count(void);
20
21/* stack status flag */
22#define mISDN_STACK_ACTION_MASK 0x0000ffff
23#define mISDN_STACK_COMMAND_MASK 0x000f0000
24#define mISDN_STACK_STATUS_MASK 0xfff00000
25/* action bits 0-15 */
26#define mISDN_STACK_WORK 0
27#define mISDN_STACK_SETUP 1
28#define mISDN_STACK_CLEARING 2
29#define mISDN_STACK_RESTART 3
30#define mISDN_STACK_WAKEUP 4
31#define mISDN_STACK_ABORT 15
32/* command bits 16-19 */
33#define mISDN_STACK_STOPPED 16
34#define mISDN_STACK_INIT 17
35#define mISDN_STACK_THREADSTART 18
36/* status bits 20-31 */
37#define mISDN_STACK_BCHANNEL 20
38#define mISDN_STACK_ACTIVE 29
39#define mISDN_STACK_RUNNING 30
40#define mISDN_STACK_KILLED 31
41
42
43/* manager options */
44#define MGR_OPT_USER 24
45#define MGR_OPT_NETWORK 25
46
47extern int connect_Bstack(struct mISDNdevice *, struct mISDNchannel *,
48 u_int, struct sockaddr_mISDN *);
49extern int connect_layer1(struct mISDNdevice *, struct mISDNchannel *,
50 u_int, struct sockaddr_mISDN *);
51extern int create_l2entity(struct mISDNdevice *, struct mISDNchannel *,
52 u_int, struct sockaddr_mISDN *);
53
54extern int create_stack(struct mISDNdevice *);
55extern int create_teimanager(struct mISDNdevice *);
56extern void delete_teimanager(struct mISDNchannel *);
57extern void delete_channel(struct mISDNchannel *);
58extern void delete_stack(struct mISDNdevice *);
59extern void mISDN_initstack(u_int *);
60extern int misdn_sock_init(u_int *);
61extern void misdn_sock_cleanup(void);
62extern void add_layer2(struct mISDNchannel *, struct mISDNstack *);
63extern void __add_layer2(struct mISDNchannel *, struct mISDNstack *);
64
65extern u_int get_all_Bprotocols(void);
66struct Bprotocol *get_Bprotocol4mask(u_int);
67struct Bprotocol *get_Bprotocol4id(u_int);
68
69extern int mISDN_inittimer(u_int *);
70extern void mISDN_timer_cleanup(void);
71
72extern int l1_init(u_int *);
73extern void l1_cleanup(void);
74extern int Isdnl2_Init(u_int *);
75extern void Isdnl2_cleanup(void);
76
77#endif
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
new file mode 100644
index 000000000000..6c3fed6b8d4f
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp.h
@@ -0,0 +1,263 @@
1/*
2 * Audio support data for ISDN4Linux.
3 *
4 * Copyright 2002/2003 by Andreas Eversberg (jolly@eversberg.eu)
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 */
10
11#define DEBUG_DSP_CTRL 0x0001
12#define DEBUG_DSP_CORE 0x0002
13#define DEBUG_DSP_DTMF 0x0004
14#define DEBUG_DSP_CMX 0x0010
15#define DEBUG_DSP_TONE 0x0020
16#define DEBUG_DSP_BLOWFISH 0x0040
17#define DEBUG_DSP_DELAY 0x0100
18#define DEBUG_DSP_DTMFCOEFF 0x8000 /* heavy output */
19
20/* options may be:
21 *
22 * bit 0 = use ulaw instead of alaw
23 * bit 1 = enable hfc hardware accelleration for all channels
24 *
25 */
26#define DSP_OPT_ULAW (1<<0)
27#define DSP_OPT_NOHARDWARE (1<<1)
28
29#include <linux/timer.h>
30#include <linux/workqueue.h>
31
32#include "dsp_ecdis.h"
33
34extern int dsp_options;
35extern int dsp_debug;
36extern int dsp_poll;
37extern int dsp_tics;
38extern spinlock_t dsp_lock;
39extern struct work_struct dsp_workq;
40extern u32 dsp_poll_diff; /* calculated fix-comma corrected poll value */
41
42/***************
43 * audio stuff *
44 ***************/
45
46extern s32 dsp_audio_alaw_to_s32[256];
47extern s32 dsp_audio_ulaw_to_s32[256];
48extern s32 *dsp_audio_law_to_s32;
49extern u8 dsp_audio_s16_to_law[65536];
50extern u8 dsp_audio_alaw_to_ulaw[256];
51extern u8 dsp_audio_mix_law[65536];
52extern u8 dsp_audio_seven2law[128];
53extern u8 dsp_audio_law2seven[256];
54extern void dsp_audio_generate_law_tables(void);
55extern void dsp_audio_generate_s2law_table(void);
56extern void dsp_audio_generate_seven(void);
57extern void dsp_audio_generate_mix_table(void);
58extern void dsp_audio_generate_ulaw_samples(void);
59extern void dsp_audio_generate_volume_changes(void);
60extern u8 dsp_silence;
61
62
63/*************
64 * cmx stuff *
65 *************/
66
67#define MAX_POLL 256 /* maximum number of send-chunks */
68
69#define CMX_BUFF_SIZE 0x8000 /* must be 2**n (0x1000 about 1/2 second) */
70#define CMX_BUFF_HALF 0x4000 /* CMX_BUFF_SIZE / 2 */
71#define CMX_BUFF_MASK 0x7fff /* CMX_BUFF_SIZE - 1 */
72
73/* how many seconds will we check the lowest delay until the jitter buffer
74 is reduced by that delay */
75#define MAX_SECONDS_JITTER_CHECK 5
76
77extern struct timer_list dsp_spl_tl;
78extern u32 dsp_spl_jiffies;
79
80/* the structure of conferences:
81 *
82 * each conference has a unique number, given by user space.
83 * the conferences are linked in a chain.
84 * each conference has members linked in a chain.
85 * each dsplayer points to a member, each member points to a dsplayer.
86 */
87
88/* all members within a conference (this is linked 1:1 with the dsp) */
89struct dsp;
90struct dsp_conf_member {
91 struct list_head list;
92 struct dsp *dsp;
93};
94
95/* the list of all conferences */
96struct dsp_conf {
97 struct list_head list;
98 u32 id;
99 /* all cmx stacks with the same ID are
100 connected */
101 struct list_head mlist;
102 int software; /* conf is processed by software */
103 int hardware; /* conf is processed by hardware */
104 /* note: if both unset, has only one member */
105};
106
107
108/**************
109 * DTMF stuff *
110 **************/
111
112#define DSP_DTMF_NPOINTS 102
113
114#define ECHOCAN_BUFLEN (4*128)
115
116struct dsp_dtmf {
117 int treshold; /* above this is dtmf (square of) */
118 int software; /* dtmf uses software decoding */
119 int hardware; /* dtmf uses hardware decoding */
120 int size; /* number of bytes in buffer */
121 signed short buffer[DSP_DTMF_NPOINTS];
122 /* buffers one full dtmf frame */
123 u8 lastwhat, lastdigit;
124 int count;
125 u8 digits[16]; /* just the dtmf result */
126};
127
128
129/******************
130 * pipeline stuff *
131 ******************/
132struct dsp_pipeline {
133 rwlock_t lock;
134 struct list_head list;
135 int inuse;
136};
137
138/***************
139 * tones stuff *
140 ***************/
141
142struct dsp_tone {
143 int software; /* tones are generated by software */
144 int hardware; /* tones are generated by hardware */
145 int tone;
146 void *pattern;
147 int count;
148 int index;
149 struct timer_list tl;
150};
151
152/*****************
153 * general stuff *
154 *****************/
155
156struct dsp {
157 struct list_head list;
158 struct mISDNchannel ch;
159 struct mISDNchannel *up;
160 unsigned char name[64];
161 int b_active;
162 int echo; /* echo is enabled */
163 int rx_disabled; /* what the user wants */
164 int rx_is_off; /* what the card is */
165 int tx_mix;
166 struct dsp_tone tone;
167 struct dsp_dtmf dtmf;
168 int tx_volume, rx_volume;
169
170 /* queue for sending frames */
171 struct work_struct workq;
172 struct sk_buff_head sendq;
173 int hdlc; /* if mode is hdlc */
174 int data_pending; /* currently an unconfirmed frame */
175
176 /* conference stuff */
177 u32 conf_id;
178 struct dsp_conf *conf;
179 struct dsp_conf_member
180 *member;
181
182 /* buffer stuff */
183 int rx_W; /* current write pos for data without timestamp */
184 int rx_R; /* current read pos for transmit clock */
185 int rx_init; /* if set, pointers will be adjusted first */
186 int tx_W; /* current write pos for transmit data */
187 int tx_R; /* current read pos for transmit clock */
188 int rx_delay[MAX_SECONDS_JITTER_CHECK];
189 int tx_delay[MAX_SECONDS_JITTER_CHECK];
190 u8 tx_buff[CMX_BUFF_SIZE];
191 u8 rx_buff[CMX_BUFF_SIZE];
192 int last_tx; /* if set, we transmitted last poll interval */
193 int cmx_delay; /* initial delay of buffers,
194 or 0 for dynamic jitter buffer */
195 int tx_dejitter; /* if set, dejitter tx buffer */
196 int tx_data; /* enables tx-data of CMX to upper layer */
197
198 /* hardware stuff */
199 struct dsp_features features;
200 int features_rx_off; /* set if rx_off is featured */
201 int pcm_slot_rx; /* current PCM slot (or -1) */
202 int pcm_bank_rx;
203 int pcm_slot_tx;
204 int pcm_bank_tx;
205 int hfc_conf; /* unique id of current conference (or -1) */
206
207 /* encryption stuff */
208 int bf_enable;
209 u32 bf_p[18];
210 u32 bf_s[1024];
211 int bf_crypt_pos;
212 u8 bf_data_in[9];
213 u8 bf_crypt_out[9];
214 int bf_decrypt_in_pos;
215 int bf_decrypt_out_pos;
216 u8 bf_crypt_inring[16];
217 u8 bf_data_out[9];
218 int bf_sync;
219
220 struct dsp_pipeline
221 pipeline;
222};
223
224/* functions */
225
226extern void dsp_change_volume(struct sk_buff *skb, int volume);
227
228extern struct list_head dsp_ilist;
229extern struct list_head conf_ilist;
230extern void dsp_cmx_debug(struct dsp *dsp);
231extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
232extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
233extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
234extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
235extern void dsp_cmx_send(void *arg);
236extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
237extern int dsp_cmx_del_conf_member(struct dsp *dsp);
238extern int dsp_cmx_del_conf(struct dsp_conf *conf);
239
240extern void dsp_dtmf_goertzel_init(struct dsp *dsp);
241extern void dsp_dtmf_hardware(struct dsp *dsp);
242extern u8 *dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len,
243 int fmt);
244
245extern int dsp_tone(struct dsp *dsp, int tone);
246extern void dsp_tone_copy(struct dsp *dsp, u8 *data, int len);
247extern void dsp_tone_timeout(void *arg);
248
249extern void dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len);
250extern void dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len);
251extern int dsp_bf_init(struct dsp *dsp, const u8 *key, unsigned int keylen);
252extern void dsp_bf_cleanup(struct dsp *dsp);
253
254extern int dsp_pipeline_module_init(void);
255extern void dsp_pipeline_module_exit(void);
256extern int dsp_pipeline_init(struct dsp_pipeline *pipeline);
257extern void dsp_pipeline_destroy(struct dsp_pipeline *pipeline);
258extern int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg);
259extern void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data,
260 int len);
261extern void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data,
262 int len);
263
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
new file mode 100644
index 000000000000..1c2dd5694773
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -0,0 +1,434 @@
1/*
2 * Audio support data for mISDN_dsp.
3 *
4 * Copyright 2002/2003 by Andreas Eversberg (jolly@eversberg.eu)
5 * Rewritten by Peter
6 *
7 * This software may be used and distributed according to the terms
8 * of the GNU General Public License, incorporated herein by reference.
9 *
10 */
11
12#include <linux/delay.h>
13#include <linux/mISDNif.h>
14#include <linux/mISDNdsp.h>
15#include "core.h"
16#include "dsp.h"
17
18/* ulaw[unsigned char] -> signed 16-bit */
19s32 dsp_audio_ulaw_to_s32[256];
20/* alaw[unsigned char] -> signed 16-bit */
21s32 dsp_audio_alaw_to_s32[256];
22
23s32 *dsp_audio_law_to_s32;
24EXPORT_SYMBOL(dsp_audio_law_to_s32);
25
26/* signed 16-bit -> law */
27u8 dsp_audio_s16_to_law[65536];
28EXPORT_SYMBOL(dsp_audio_s16_to_law);
29
30/* alaw -> ulaw */
31u8 dsp_audio_alaw_to_ulaw[256];
32/* ulaw -> alaw */
33u8 dsp_audio_ulaw_to_alaw[256];
34u8 dsp_silence;
35
36
37/*****************************************************
38 * generate table for conversion of s16 to alaw/ulaw *
39 *****************************************************/
40
41#define AMI_MASK 0x55
42
43static inline unsigned char linear2alaw(short int linear)
44{
45 int mask;
46 int seg;
47 int pcm_val;
48 static int seg_end[8] = {
49 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF
50 };
51
52 pcm_val = linear;
53 if (pcm_val >= 0) {
54 /* Sign (7th) bit = 1 */
55 mask = AMI_MASK | 0x80;
56 } else {
57 /* Sign bit = 0 */
58 mask = AMI_MASK;
59 pcm_val = -pcm_val;
60 }
61
62 /* Convert the scaled magnitude to segment number. */
63 for (seg = 0; seg < 8; seg++) {
64 if (pcm_val <= seg_end[seg])
65 break;
66 }
67 /* Combine the sign, segment, and quantization bits. */
68 return ((seg << 4) |
69 ((pcm_val >> ((seg) ? (seg + 3) : 4)) & 0x0F)) ^ mask;
70}
71
72
73static inline short int alaw2linear(unsigned char alaw)
74{
75 int i;
76 int seg;
77
78 alaw ^= AMI_MASK;
79 i = ((alaw & 0x0F) << 4) + 8 /* rounding error */;
80 seg = (((int) alaw & 0x70) >> 4);
81 if (seg)
82 i = (i + 0x100) << (seg - 1);
83 return (short int) ((alaw & 0x80) ? i : -i);
84}
85
86static inline short int ulaw2linear(unsigned char ulaw)
87{
88 short mu, e, f, y;
89 static short etab[] = {0, 132, 396, 924, 1980, 4092, 8316, 16764};
90
91 mu = 255 - ulaw;
92 e = (mu & 0x70) / 16;
93 f = mu & 0x0f;
94 y = f * (1 << (e + 3));
95 y += etab[e];
96 if (mu & 0x80)
97 y = -y;
98 return y;
99}
100
101#define BIAS 0x84 /*!< define the add-in bias for 16 bit samples */
102
103static unsigned char linear2ulaw(short sample)
104{
105 static int exp_lut[256] = {
106 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
107 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
108 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
109 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
110 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
111 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
112 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
113 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
114 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
115 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
116 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
117 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
118 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
119 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
120 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
121 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7};
122 int sign, exponent, mantissa;
123 unsigned char ulawbyte;
124
125 /* Get the sample into sign-magnitude. */
126 sign = (sample >> 8) & 0x80; /* set aside the sign */
127 if (sign != 0)
128 sample = -sample; /* get magnitude */
129
130 /* Convert from 16 bit linear to ulaw. */
131 sample = sample + BIAS;
132 exponent = exp_lut[(sample >> 7) & 0xFF];
133 mantissa = (sample >> (exponent + 3)) & 0x0F;
134 ulawbyte = ~(sign | (exponent << 4) | mantissa);
135
136 return ulawbyte;
137}
138
139static int reverse_bits(int i)
140{
141 int z, j;
142 z = 0;
143
144 for (j = 0; j < 8; j++) {
145 if ((i & (1 << j)) != 0)
146 z |= 1 << (7 - j);
147 }
148 return z;
149}
150
151
152void dsp_audio_generate_law_tables(void)
153{
154 int i;
155 for (i = 0; i < 256; i++)
156 dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
157
158 for (i = 0; i < 256; i++)
159 dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
160
161 for (i = 0; i < 256; i++) {
162 dsp_audio_alaw_to_ulaw[i] =
163 linear2ulaw(dsp_audio_alaw_to_s32[i]);
164 dsp_audio_ulaw_to_alaw[i] =
165 linear2alaw(dsp_audio_ulaw_to_s32[i]);
166 }
167}
168
169void
170dsp_audio_generate_s2law_table(void)
171{
172 int i;
173
174 if (dsp_options & DSP_OPT_ULAW) {
175 /* generating ulaw-table */
176 for (i = -32768; i < 32768; i++) {
177 dsp_audio_s16_to_law[i & 0xffff] =
178 reverse_bits(linear2ulaw(i));
179 }
180 } else {
181 /* generating alaw-table */
182 for (i = -32768; i < 32768; i++) {
183 dsp_audio_s16_to_law[i & 0xffff] =
184 reverse_bits(linear2alaw(i));
185 }
186 }
187}
188
189
190/*
191 * the seven bit sample is the number of every second alaw-sample ordered by
192 * aplitude. 0x00 is negative, 0x7f is positive amplitude.
193 */
194u8 dsp_audio_seven2law[128];
195u8 dsp_audio_law2seven[256];
196
197/********************************************************************
198 * generate table for conversion law from/to 7-bit alaw-like sample *
199 ********************************************************************/
200
201void
202dsp_audio_generate_seven(void)
203{
204 int i, j, k;
205 u8 spl;
206 u8 sorted_alaw[256];
207
208 /* generate alaw table, sorted by the linear value */
209 for (i = 0; i < 256; i++) {
210 j = 0;
211 for (k = 0; k < 256; k++) {
212 if (dsp_audio_alaw_to_s32[k]
213 < dsp_audio_alaw_to_s32[i]) {
214 j++;
215 }
216 }
217 sorted_alaw[j] = i;
218 }
219
220 /* generate tabels */
221 for (i = 0; i < 256; i++) {
222 /* spl is the source: the law-sample (converted to alaw) */
223 spl = i;
224 if (dsp_options & DSP_OPT_ULAW)
225 spl = dsp_audio_ulaw_to_alaw[i];
226 /* find the 7-bit-sample */
227 for (j = 0; j < 256; j++) {
228 if (sorted_alaw[j] == spl)
229 break;
230 }
231 /* write 7-bit audio value */
232 dsp_audio_law2seven[i] = j >> 1;
233 }
234 for (i = 0; i < 128; i++) {
235 spl = sorted_alaw[i << 1];
236 if (dsp_options & DSP_OPT_ULAW)
237 spl = dsp_audio_alaw_to_ulaw[spl];
238 dsp_audio_seven2law[i] = spl;
239 }
240}
241
242
243/* mix 2*law -> law */
244u8 dsp_audio_mix_law[65536];
245
246/******************************************************
247 * generate mix table to mix two law samples into one *
248 ******************************************************/
249
250void
251dsp_audio_generate_mix_table(void)
252{
253 int i, j;
254 s32 sample;
255
256 i = 0;
257 while (i < 256) {
258 j = 0;
259 while (j < 256) {
260 sample = dsp_audio_law_to_s32[i];
261 sample += dsp_audio_law_to_s32[j];
262 if (sample > 32767)
263 sample = 32767;
264 if (sample < -32768)
265 sample = -32768;
266 dsp_audio_mix_law[(i<<8)|j] =
267 dsp_audio_s16_to_law[sample & 0xffff];
268 j++;
269 }
270 i++;
271 }
272}
273
274
275/*************************************
276 * generate different volume changes *
277 *************************************/
278
279static u8 dsp_audio_reduce8[256];
280static u8 dsp_audio_reduce7[256];
281static u8 dsp_audio_reduce6[256];
282static u8 dsp_audio_reduce5[256];
283static u8 dsp_audio_reduce4[256];
284static u8 dsp_audio_reduce3[256];
285static u8 dsp_audio_reduce2[256];
286static u8 dsp_audio_reduce1[256];
287static u8 dsp_audio_increase1[256];
288static u8 dsp_audio_increase2[256];
289static u8 dsp_audio_increase3[256];
290static u8 dsp_audio_increase4[256];
291static u8 dsp_audio_increase5[256];
292static u8 dsp_audio_increase6[256];
293static u8 dsp_audio_increase7[256];
294static u8 dsp_audio_increase8[256];
295
296static u8 *dsp_audio_volume_change[16] = {
297 dsp_audio_reduce8,
298 dsp_audio_reduce7,
299 dsp_audio_reduce6,
300 dsp_audio_reduce5,
301 dsp_audio_reduce4,
302 dsp_audio_reduce3,
303 dsp_audio_reduce2,
304 dsp_audio_reduce1,
305 dsp_audio_increase1,
306 dsp_audio_increase2,
307 dsp_audio_increase3,
308 dsp_audio_increase4,
309 dsp_audio_increase5,
310 dsp_audio_increase6,
311 dsp_audio_increase7,
312 dsp_audio_increase8,
313};
314
315void
316dsp_audio_generate_volume_changes(void)
317{
318 register s32 sample;
319 int i;
320 int num[] = { 110, 125, 150, 175, 200, 300, 400, 500 };
321 int denum[] = { 100, 100, 100, 100, 100, 100, 100, 100 };
322
323 i = 0;
324 while (i < 256) {
325 dsp_audio_reduce8[i] = dsp_audio_s16_to_law[
326 (dsp_audio_law_to_s32[i] * denum[7] / num[7]) & 0xffff];
327 dsp_audio_reduce7[i] = dsp_audio_s16_to_law[
328 (dsp_audio_law_to_s32[i] * denum[6] / num[6]) & 0xffff];
329 dsp_audio_reduce6[i] = dsp_audio_s16_to_law[
330 (dsp_audio_law_to_s32[i] * denum[5] / num[5]) & 0xffff];
331 dsp_audio_reduce5[i] = dsp_audio_s16_to_law[
332 (dsp_audio_law_to_s32[i] * denum[4] / num[4]) & 0xffff];
333 dsp_audio_reduce4[i] = dsp_audio_s16_to_law[
334 (dsp_audio_law_to_s32[i] * denum[3] / num[3]) & 0xffff];
335 dsp_audio_reduce3[i] = dsp_audio_s16_to_law[
336 (dsp_audio_law_to_s32[i] * denum[2] / num[2]) & 0xffff];
337 dsp_audio_reduce2[i] = dsp_audio_s16_to_law[
338 (dsp_audio_law_to_s32[i] * denum[1] / num[1]) & 0xffff];
339 dsp_audio_reduce1[i] = dsp_audio_s16_to_law[
340 (dsp_audio_law_to_s32[i] * denum[0] / num[0]) & 0xffff];
341 sample = dsp_audio_law_to_s32[i] * num[0] / denum[0];
342 if (sample < -32768)
343 sample = -32768;
344 else if (sample > 32767)
345 sample = 32767;
346 dsp_audio_increase1[i] = dsp_audio_s16_to_law[sample & 0xffff];
347 sample = dsp_audio_law_to_s32[i] * num[1] / denum[1];
348 if (sample < -32768)
349 sample = -32768;
350 else if (sample > 32767)
351 sample = 32767;
352 dsp_audio_increase2[i] = dsp_audio_s16_to_law[sample & 0xffff];
353 sample = dsp_audio_law_to_s32[i] * num[2] / denum[2];
354 if (sample < -32768)
355 sample = -32768;
356 else if (sample > 32767)
357 sample = 32767;
358 dsp_audio_increase3[i] = dsp_audio_s16_to_law[sample & 0xffff];
359 sample = dsp_audio_law_to_s32[i] * num[3] / denum[3];
360 if (sample < -32768)
361 sample = -32768;
362 else if (sample > 32767)
363 sample = 32767;
364 dsp_audio_increase4[i] = dsp_audio_s16_to_law[sample & 0xffff];
365 sample = dsp_audio_law_to_s32[i] * num[4] / denum[4];
366 if (sample < -32768)
367 sample = -32768;
368 else if (sample > 32767)
369 sample = 32767;
370 dsp_audio_increase5[i] = dsp_audio_s16_to_law[sample & 0xffff];
371 sample = dsp_audio_law_to_s32[i] * num[5] / denum[5];
372 if (sample < -32768)
373 sample = -32768;
374 else if (sample > 32767)
375 sample = 32767;
376 dsp_audio_increase6[i] = dsp_audio_s16_to_law[sample & 0xffff];
377 sample = dsp_audio_law_to_s32[i] * num[6] / denum[6];
378 if (sample < -32768)
379 sample = -32768;
380 else if (sample > 32767)
381 sample = 32767;
382 dsp_audio_increase7[i] = dsp_audio_s16_to_law[sample & 0xffff];
383 sample = dsp_audio_law_to_s32[i] * num[7] / denum[7];
384 if (sample < -32768)
385 sample = -32768;
386 else if (sample > 32767)
387 sample = 32767;
388 dsp_audio_increase8[i] = dsp_audio_s16_to_law[sample & 0xffff];
389
390 i++;
391 }
392}
393
394
395/**************************************
396 * change the volume of the given skb *
397 **************************************/
398
399/* this is a helper function for changing volume of skb. the range may be
400 * -8 to 8, which is a shift to the power of 2. 0 == no volume, 3 == volume*8
401 */
402void
403dsp_change_volume(struct sk_buff *skb, int volume)
404{
405 u8 *volume_change;
406 int i, ii;
407 u8 *p;
408 int shift;
409
410 if (volume == 0)
411 return;
412
413 /* get correct conversion table */
414 if (volume < 0) {
415 shift = volume + 8;
416 if (shift < 0)
417 shift = 0;
418 } else {
419 shift = volume + 7;
420 if (shift > 15)
421 shift = 15;
422 }
423 volume_change = dsp_audio_volume_change[shift];
424 i = 0;
425 ii = skb->len;
426 p = skb->data;
427 /* change volume */
428 while (i < ii) {
429 *p = volume_change[*p];
430 p++;
431 i++;
432 }
433}
434
diff --git a/drivers/isdn/mISDN/dsp_biquad.h b/drivers/isdn/mISDN/dsp_biquad.h
new file mode 100644
index 000000000000..038191bc45f5
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_biquad.h
@@ -0,0 +1,65 @@
1/*
2 * SpanDSP - a series of DSP components for telephony
3 *
4 * biquad.h - General telephony bi-quad section routines (currently this just
5 * handles canonic/type 2 form)
6 *
7 * Written by Steve Underwood <steveu@coppice.org>
8 *
9 * Copyright (C) 2001 Steve Underwood
10 *
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 */
28
29struct biquad2_state {
30 int32_t gain;
31 int32_t a1;
32 int32_t a2;
33 int32_t b1;
34 int32_t b2;
35
36 int32_t z1;
37 int32_t z2;
38};
39
40static inline void biquad2_init(struct biquad2_state *bq,
41 int32_t gain, int32_t a1, int32_t a2, int32_t b1, int32_t b2)
42{
43 bq->gain = gain;
44 bq->a1 = a1;
45 bq->a2 = a2;
46 bq->b1 = b1;
47 bq->b2 = b2;
48
49 bq->z1 = 0;
50 bq->z2 = 0;
51}
52
53static inline int16_t biquad2(struct biquad2_state *bq, int16_t sample)
54{
55 int32_t y;
56 int32_t z0;
57
58 z0 = sample*bq->gain + bq->z1*bq->a1 + bq->z2*bq->a2;
59 y = z0 + bq->z1*bq->b1 + bq->z2*bq->b2;
60
61 bq->z2 = bq->z1;
62 bq->z1 = z0 >> 15;
63 y >>= 15;
64 return y;
65}
diff --git a/drivers/isdn/mISDN/dsp_blowfish.c b/drivers/isdn/mISDN/dsp_blowfish.c
new file mode 100644
index 000000000000..18e411e95bba
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_blowfish.c
@@ -0,0 +1,672 @@
1/*
2 * Blowfish encryption/decryption for mISDN_dsp.
3 *
4 * Copyright Andreas Eversberg (jolly@eversberg.eu)
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 */
10
11#include <linux/mISDNif.h>
12#include <linux/mISDNdsp.h>
13#include "core.h"
14#include "dsp.h"
15
16/*
17 * how to encode a sample stream to 64-bit blocks that will be encryped
18 *
19 * first of all, data is collected until a block of 9 samples are received.
20 * of course, a packet may have much more than 9 sample, but is may have
21 * not excacly the multiple of 9 samples. if there is a rest, the next
22 * received data will complete the block.
23 *
24 * the block is then converted to 9 uLAW samples without the least sigificant
25 * bit. the result is a 7-bit encoded sample.
26 *
27 * the samples will be reoganised to form 8 bytes of data:
28 * (5(6) means: encoded sample no. 5, bit 6)
29 *
30 * 0(6) 0(5) 0(4) 0(3) 0(2) 0(1) 0(0) 1(6)
31 * 1(5) 1(4) 1(3) 1(2) 1(1) 1(0) 2(6) 2(5)
32 * 2(4) 2(3) 2(2) 2(1) 2(0) 3(6) 3(5) 3(4)
33 * 3(3) 3(2) 3(1) 3(0) 4(6) 4(5) 4(4) 4(3)
34 * 4(2) 4(1) 4(0) 5(6) 5(5) 5(4) 5(3) 5(2)
35 * 5(1) 5(0) 6(6) 6(5) 6(4) 6(3) 6(2) 6(1)
36 * 6(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
37 * 8(6) 8(5) 8(4) 8(3) 8(2) 8(1) 8(0)
38 *
39 * the missing bit 0 of the last byte is filled with some
40 * random noise, to fill all 8 bytes.
41 *
42 * the 8 bytes will be encrypted using blowfish.
43 *
44 * the result will be converted into 9 bytes. the bit 7 is used for
45 * checksumme (CS) for sync (0, 1) and for the last bit:
46 * (5(6) means: crypted byte 5, bit 6)
47 *
48 * 1 0(7) 0(6) 0(5) 0(4) 0(3) 0(2) 0(1)
49 * 0 0(0) 1(7) 1(6) 1(5) 1(4) 1(3) 1(2)
50 * 0 1(1) 1(0) 2(7) 2(6) 2(5) 2(4) 2(3)
51 * 0 2(2) 2(1) 2(0) 3(7) 3(6) 3(5) 3(4)
52 * 0 3(3) 3(2) 3(1) 3(0) 4(7) 4(6) 4(5)
53 * CS 4(4) 4(3) 4(2) 4(1) 4(0) 5(7) 5(6)
54 * CS 5(5) 5(4) 5(3) 5(2) 5(1) 5(0) 6(7)
55 * CS 6(6) 6(5) 6(4) 6(3) 6(2) 6(1) 6(0)
56 * 7(0) 7(6) 7(5) 7(4) 7(3) 7(2) 7(1) 7(0)
57 *
58 * the checksum is used to detect transmission errors and frame drops.
59 *
60 * synchronisation of received block is done by shifting the upper bit of each
61 * byte (bit 7) to a shift register. if the rigister has the first five bits
62 * (10000), this is used to find the sync. only if sync has been found, the
63 * current block of 9 received bytes are decrypted. before that the check
64 * sum is calculated. if it is incorrect the block is dropped.
65 * this will avoid loud noise due to corrupt encrypted data.
66 *
67 * if the last block is corrupt, the current decoded block is repeated
68 * until a valid block has been received.
69 */
70
71/*
72 * some blowfish parts are taken from the
73 * crypto-api for faster implementation
74 */
75
76struct bf_ctx {
77 u32 p[18];
78 u32 s[1024];
79};
80
81static const u32 bf_pbox[16 + 2] = {
82 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344,
83 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89,
84 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
85 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917,
86 0x9216d5d9, 0x8979fb1b,
87};
88
89static const u32 bf_sbox[256 * 4] = {
90 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7,
91 0xb8e1afed, 0x6a267e96, 0xba7c9045, 0xf12c7f99,
92 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
93 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e,
94 0x0d95748f, 0x728eb658, 0x718bcd58, 0x82154aee,
95 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
96 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef,
97 0x8e79dcb0, 0x603a180e, 0x6c9e0e8b, 0xb01e8a3e,
98 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
99 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440,
100 0x55ca396a, 0x2aab10b6, 0xb4cc5c34, 0x1141e8ce,
101 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
102 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e,
103 0xafd6ba33, 0x6c24cf5c, 0x7a325381, 0x28958677,
104 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
105 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032,
106 0xef845d5d, 0xe98575b1, 0xdc262302, 0xeb651b88,
107 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
108 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e,
109 0x21c66842, 0xf6e96c9a, 0x670c9c61, 0xabd388f0,
110 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
111 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98,
112 0xa1f1651d, 0x39af0176, 0x66ca593e, 0x82430e88,
113 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
114 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6,
115 0x4ed3aa62, 0x363f7706, 0x1bfedf72, 0x429b023d,
116 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
117 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7,
118 0xe3fe501a, 0xb6794c3b, 0x976ce0bd, 0x04c006ba,
119 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
120 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f,
121 0x6dfc511f, 0x9b30952c, 0xcc814544, 0xaf5ebd09,
122 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
123 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb,
124 0x5579c0bd, 0x1a60320a, 0xd6a100c6, 0x402c7279,
125 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
126 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab,
127 0x323db5fa, 0xfd238760, 0x53317b48, 0x3e00df82,
128 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
129 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573,
130 0x695b27b0, 0xbbca58c8, 0xe1ffa35d, 0xb8f011a0,
131 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
132 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790,
133 0xe1ddf2da, 0xa4cb7e33, 0x62fb1341, 0xcee4c6e8,
134 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
135 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0,
136 0xd08ed1d0, 0xafc725e0, 0x8e3c5b2f, 0x8e7594b7,
137 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
138 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad,
139 0x2f2f2218, 0xbe0e1777, 0xea752dfe, 0x8b021fa1,
140 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
141 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9,
142 0x165fa266, 0x80957705, 0x93cc7314, 0x211a1477,
143 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
144 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49,
145 0x00250e2d, 0x2071b35e, 0x226800bb, 0x57b8e0af,
146 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
147 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5,
148 0x83260376, 0x6295cfa9, 0x11c81968, 0x4e734a41,
149 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
150 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400,
151 0x08ba6fb5, 0x571be91f, 0xf296ec6b, 0x2a0dd915,
152 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
153 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
154 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623,
155 0xad6ea6b0, 0x49a7df7d, 0x9cee60b8, 0x8fedb266,
156 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
157 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e,
158 0x3f54989a, 0x5b429d65, 0x6b8fe4d6, 0x99f73fd6,
159 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
160 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e,
161 0x09686b3f, 0x3ebaefc9, 0x3c971814, 0x6b6a70a1,
162 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
163 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8,
164 0xb03ada37, 0xf0500c0d, 0xf01c1f04, 0x0200b3ff,
165 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
166 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701,
167 0x3ae5e581, 0x37c2dadc, 0xc8b57634, 0x9af3dda7,
168 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
169 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331,
170 0x4e548b38, 0x4f6db908, 0x6f420d03, 0xf60a04bf,
171 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
172 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e,
173 0x5512721f, 0x2e6b7124, 0x501adde6, 0x9f84cd87,
174 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
175 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2,
176 0xef1c1847, 0x3215d908, 0xdd433b37, 0x24c2ba16,
177 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
178 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b,
179 0x043556f1, 0xd7a3c76b, 0x3c11183b, 0x5924a509,
180 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
181 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3,
182 0x771fe71c, 0x4e3d06fa, 0x2965dcb9, 0x99e71d0f,
183 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
184 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4,
185 0xf2f74ea7, 0x361d2b3d, 0x1939260f, 0x19c27960,
186 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
187 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28,
188 0xc332ddef, 0xbe6c5aa5, 0x65582185, 0x68ab9802,
189 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
190 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510,
191 0x13cca830, 0xeb61bd96, 0x0334fe1e, 0xaa0363cf,
192 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
193 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e,
194 0x648b1eaf, 0x19bdf0ca, 0xa02369b9, 0x655abb50,
195 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
196 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8,
197 0xf837889a, 0x97e32d77, 0x11ed935f, 0x16681281,
198 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
199 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696,
200 0xcdb30aeb, 0x532e3054, 0x8fd948e4, 0x6dbc3128,
201 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
202 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0,
203 0x45eee2b6, 0xa3aaabea, 0xdb6c4f15, 0xfacb4fd0,
204 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
205 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250,
206 0xcf62a1f2, 0x5b8d2646, 0xfc8883a0, 0xc1c7b6a3,
207 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
208 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00,
209 0x58428d2a, 0x0c55f5ea, 0x1dadf43e, 0x233f7061,
210 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
211 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e,
212 0xa6078084, 0x19f8509e, 0xe8efd855, 0x61d99735,
213 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
214 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9,
215 0xdb73dbd3, 0x105588cd, 0x675fda79, 0xe3674340,
216 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
217 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
218 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934,
219 0x411520f7, 0x7602d4f7, 0xbcf46b2e, 0xd4a20068,
220 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
221 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840,
222 0x4d95fc1d, 0x96b591af, 0x70f4ddd3, 0x66a02f45,
223 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
224 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a,
225 0x28507825, 0x530429f4, 0x0a2c86da, 0xe9b66dfb,
226 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
227 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6,
228 0xaace1e7c, 0xd3375fec, 0xce78a399, 0x406b2a42,
229 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
230 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2,
231 0x3a6efa74, 0xdd5b4332, 0x6841e7f7, 0xca7820fb,
232 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
233 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b,
234 0x55a867bc, 0xa1159a58, 0xcca92963, 0x99e1db33,
235 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
236 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3,
237 0x95c11548, 0xe4c66d22, 0x48c1133f, 0xc70f86dc,
238 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
239 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564,
240 0x257b7834, 0x602a9c60, 0xdff8e8a3, 0x1f636c1b,
241 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
242 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922,
243 0x85b2a20e, 0xe6ba0d99, 0xde720c8c, 0x2da2f728,
244 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
245 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e,
246 0x0a476341, 0x992eff74, 0x3a6f6eab, 0xf4f8fd37,
247 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
248 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804,
249 0xf1290dc7, 0xcc00ffa3, 0xb5390f92, 0x690fed0b,
250 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
251 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb,
252 0x37392eb3, 0xcc115979, 0x8026e297, 0xf42e312d,
253 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
254 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350,
255 0x1a6b1018, 0x11caedfa, 0x3d25bdd8, 0xe2e1c3c9,
256 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
257 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe,
258 0x9dbc8057, 0xf0f7c086, 0x60787bf8, 0x6003604d,
259 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
260 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f,
261 0x77a057be, 0xbde8ae24, 0x55464299, 0xbf582e61,
262 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
263 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9,
264 0x7aeb2661, 0x8b1ddf84, 0x846a0e79, 0x915f95e2,
265 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
266 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e,
267 0xb77f19b6, 0xe0a9dc09, 0x662d09a1, 0xc4324633,
268 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
269 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169,
270 0xdcb7da83, 0x573906fe, 0xa1e2ce9b, 0x4fcd7f52,
271 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
272 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5,
273 0xf0177a28, 0xc0f586e0, 0x006058aa, 0x30dc7d62,
274 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
275 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76,
276 0x6f05e409, 0x4b7c0188, 0x39720a3d, 0x7c927c24,
277 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
278 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4,
279 0x1e50ef5e, 0xb161e6f8, 0xa28514d9, 0x6c51133c,
280 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
281 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
282 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b,
283 0x5cb0679e, 0x4fa33742, 0xd3822740, 0x99bc9bbe,
284 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
285 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4,
286 0x5748ab2f, 0xbc946e79, 0xc6a376d2, 0x6549c2c8,
287 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
288 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304,
289 0xa1fad5f0, 0x6a2d519a, 0x63ef8ce2, 0x9a86ee22,
290 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
291 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6,
292 0x2826a2f9, 0xa73a3ae1, 0x4ba99586, 0xef5562e9,
293 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
294 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593,
295 0xe990fd5a, 0x9e34d797, 0x2cf0b7d9, 0x022b8b51,
296 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
297 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c,
298 0xe029ac71, 0xe019a5e6, 0x47b0acfd, 0xed93fa9b,
299 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
300 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c,
301 0x15056dd4, 0x88f46dba, 0x03a16125, 0x0564f0bd,
302 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
303 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319,
304 0x7533d928, 0xb155fdf5, 0x03563482, 0x8aba3cbb,
305 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
306 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991,
307 0xea7a90c2, 0xfb3e7bce, 0x5121ce64, 0x774fbe32,
308 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
309 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166,
310 0xb39a460a, 0x6445c0dd, 0x586cdecf, 0x1c20c8ae,
311 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
312 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5,
313 0x72eacea8, 0xfa6484bb, 0x8d6612ae, 0xbf3c6f47,
314 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
315 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d,
316 0x4040cb08, 0x4eb4e2cc, 0x34d2466a, 0x0115af84,
317 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
318 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8,
319 0x611560b1, 0xe7933fdc, 0xbb3a792b, 0x344525bd,
320 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
321 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7,
322 0x1a908749, 0xd44fbd9a, 0xd0dadecb, 0xd50ada38,
323 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
324 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c,
325 0xbf97222c, 0x15e6fc2a, 0x0f91fc71, 0x9b941525,
326 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
327 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442,
328 0xe0ec6e0e, 0x1698db3b, 0x4c98a0be, 0x3278e964,
329 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
330 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8,
331 0xdf359f8d, 0x9b992f2e, 0xe60b6f47, 0x0fe3f11d,
332 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
333 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299,
334 0xf523f357, 0xa6327623, 0x93a83531, 0x56cccd02,
335 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
336 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614,
337 0xe6c6c7bd, 0x327a140a, 0x45e1d006, 0xc3f27b9a,
338 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
339 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b,
340 0x53113ec0, 0x1640e3d3, 0x38abbd60, 0x2547adf0,
341 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
342 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e,
343 0x1948c25c, 0x02fb8a8c, 0x01c36ae4, 0xd6ebe1f9,
344 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
345 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
346};
347
348/*
349 * Round loop unrolling macros, S is a pointer to a S-Box array
350 * organized in 4 unsigned longs at a row.
351 */
352#define GET32_3(x) (((x) & 0xff))
353#define GET32_2(x) (((x) >> (8)) & (0xff))
354#define GET32_1(x) (((x) >> (16)) & (0xff))
355#define GET32_0(x) (((x) >> (24)) & (0xff))
356
357#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
358 S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
359
360#define EROUND(a, b, n) do { b ^= P[n]; a ^= bf_F(b); } while (0)
361#define DROUND(a, b, n) do { a ^= bf_F(b); b ^= P[n]; } while (0)
362
363
364/*
365 * encrypt isdn data frame
366 * every block with 9 samples is encrypted
367 */
368void
369dsp_bf_encrypt(struct dsp *dsp, u8 *data, int len)
370{
371 int i = 0, j = dsp->bf_crypt_pos;
372 u8 *bf_data_in = dsp->bf_data_in;
373 u8 *bf_crypt_out = dsp->bf_crypt_out;
374 u32 *P = dsp->bf_p;
375 u32 *S = dsp->bf_s;
376 u32 yl, yr;
377 u32 cs;
378 u8 nibble;
379
380 while (i < len) {
381 /* collect a block of 9 samples */
382 if (j < 9) {
383 bf_data_in[j] = *data;
384 *data++ = bf_crypt_out[j++];
385 i++;
386 continue;
387 }
388 j = 0;
389 /* transcode 9 samples xlaw to 8 bytes */
390 yl = dsp_audio_law2seven[bf_data_in[0]];
391 yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[1]];
392 yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[2]];
393 yl = (yl<<7) | dsp_audio_law2seven[bf_data_in[3]];
394 nibble = dsp_audio_law2seven[bf_data_in[4]];
395 yr = nibble;
396 yl = (yl<<4) | (nibble>>3);
397 yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[5]];
398 yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[6]];
399 yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[7]];
400 yr = (yr<<7) | dsp_audio_law2seven[bf_data_in[8]];
401 yr = (yr<<1) | (bf_data_in[0] & 1);
402
403 /* fill unused bit with random noise of audio input */
404 /* encrypt */
405
406 EROUND(yr, yl, 0);
407 EROUND(yl, yr, 1);
408 EROUND(yr, yl, 2);
409 EROUND(yl, yr, 3);
410 EROUND(yr, yl, 4);
411 EROUND(yl, yr, 5);
412 EROUND(yr, yl, 6);
413 EROUND(yl, yr, 7);
414 EROUND(yr, yl, 8);
415 EROUND(yl, yr, 9);
416 EROUND(yr, yl, 10);
417 EROUND(yl, yr, 11);
418 EROUND(yr, yl, 12);
419 EROUND(yl, yr, 13);
420 EROUND(yr, yl, 14);
421 EROUND(yl, yr, 15);
422 yl ^= P[16];
423 yr ^= P[17];
424
425 /* calculate 3-bit checksumme */
426 cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
427 ^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
428 ^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
429 ^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
430 ^ (yr>>28) ^ (yr>>31);
431
432 /*
433 * transcode 8 crypted bytes to 9 data bytes with sync
434 * and checksum information
435 */
436 bf_crypt_out[0] = (yl>>25) | 0x80;
437 bf_crypt_out[1] = (yl>>18) & 0x7f;
438 bf_crypt_out[2] = (yl>>11) & 0x7f;
439 bf_crypt_out[3] = (yl>>4) & 0x7f;
440 bf_crypt_out[4] = ((yl<<3) & 0x78) | ((yr>>29) & 0x07);
441 bf_crypt_out[5] = ((yr>>22) & 0x7f) | ((cs<<5) & 0x80);
442 bf_crypt_out[6] = ((yr>>15) & 0x7f) | ((cs<<6) & 0x80);
443 bf_crypt_out[7] = ((yr>>8) & 0x7f) | (cs<<7);
444 bf_crypt_out[8] = yr;
445 }
446
447 /* write current count */
448 dsp->bf_crypt_pos = j;
449
450}
451
452
453/*
454 * decrypt isdn data frame
455 * every block with 9 bytes is decrypted
456 */
457void
458dsp_bf_decrypt(struct dsp *dsp, u8 *data, int len)
459{
460 int i = 0;
461 u8 j = dsp->bf_decrypt_in_pos;
462 u8 k = dsp->bf_decrypt_out_pos;
463 u8 *bf_crypt_inring = dsp->bf_crypt_inring;
464 u8 *bf_data_out = dsp->bf_data_out;
465 u16 sync = dsp->bf_sync;
466 u32 *P = dsp->bf_p;
467 u32 *S = dsp->bf_s;
468 u32 yl, yr;
469 u8 nibble;
470 u8 cs, cs0, cs1, cs2;
471
472 while (i < len) {
473 /*
474 * shift upper bit and rotate data to buffer ring
475 * send current decrypted data
476 */
477 sync = (sync<<1) | ((*data)>>7);
478 bf_crypt_inring[j++ & 15] = *data;
479 *data++ = bf_data_out[k++];
480 i++;
481 if (k == 9)
482 k = 0; /* repeat if no sync has been found */
483 /* check if not in sync */
484 if ((sync&0x1f0) != 0x100)
485 continue;
486 j -= 9;
487 /* transcode receive data to 64 bit block of encrypted data */
488 yl = bf_crypt_inring[j++ & 15];
489 yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
490 yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
491 yl = (yl<<7) | bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
492 nibble = bf_crypt_inring[j++ & 15]; /* bit7 = 0 */
493 yr = nibble;
494 yl = (yl<<4) | (nibble>>3);
495 cs2 = bf_crypt_inring[j++ & 15];
496 yr = (yr<<7) | (cs2 & 0x7f);
497 cs1 = bf_crypt_inring[j++ & 15];
498 yr = (yr<<7) | (cs1 & 0x7f);
499 cs0 = bf_crypt_inring[j++ & 15];
500 yr = (yr<<7) | (cs0 & 0x7f);
501 yr = (yr<<8) | bf_crypt_inring[j++ & 15];
502
503 /* calculate 3-bit checksumme */
504 cs = yl ^ (yl>>3) ^ (yl>>6) ^ (yl>>9) ^ (yl>>12) ^ (yl>>15)
505 ^ (yl>>18) ^ (yl>>21) ^ (yl>>24) ^ (yl>>27) ^ (yl>>30)
506 ^ (yr<<2) ^ (yr>>1) ^ (yr>>4) ^ (yr>>7) ^ (yr>>10)
507 ^ (yr>>13) ^ (yr>>16) ^ (yr>>19) ^ (yr>>22) ^ (yr>>25)
508 ^ (yr>>28) ^ (yr>>31);
509
510 /* check if frame is valid */
511 if ((cs&0x7) != (((cs2>>5)&4) | ((cs1>>6)&2) | (cs0 >> 7))) {
512 if (dsp_debug & DEBUG_DSP_BLOWFISH)
513 printk(KERN_DEBUG
514 "DSP BLOWFISH: received corrupt frame, "
515 "checksumme is not correct\n");
516 continue;
517 }
518
519 /* decrypt */
520 yr ^= P[17];
521 yl ^= P[16];
522 DROUND(yl, yr, 15);
523 DROUND(yr, yl, 14);
524 DROUND(yl, yr, 13);
525 DROUND(yr, yl, 12);
526 DROUND(yl, yr, 11);
527 DROUND(yr, yl, 10);
528 DROUND(yl, yr, 9);
529 DROUND(yr, yl, 8);
530 DROUND(yl, yr, 7);
531 DROUND(yr, yl, 6);
532 DROUND(yl, yr, 5);
533 DROUND(yr, yl, 4);
534 DROUND(yl, yr, 3);
535 DROUND(yr, yl, 2);
536 DROUND(yl, yr, 1);
537 DROUND(yr, yl, 0);
538
539 /* transcode 8 crypted bytes to 9 sample bytes */
540 bf_data_out[0] = dsp_audio_seven2law[(yl>>25) & 0x7f];
541 bf_data_out[1] = dsp_audio_seven2law[(yl>>18) & 0x7f];
542 bf_data_out[2] = dsp_audio_seven2law[(yl>>11) & 0x7f];
543 bf_data_out[3] = dsp_audio_seven2law[(yl>>4) & 0x7f];
544 bf_data_out[4] = dsp_audio_seven2law[((yl<<3) & 0x78) |
545 ((yr>>29) & 0x07)];
546
547 bf_data_out[5] = dsp_audio_seven2law[(yr>>22) & 0x7f];
548 bf_data_out[6] = dsp_audio_seven2law[(yr>>15) & 0x7f];
549 bf_data_out[7] = dsp_audio_seven2law[(yr>>8) & 0x7f];
550 bf_data_out[8] = dsp_audio_seven2law[(yr>>1) & 0x7f];
551 k = 0; /* start with new decoded frame */
552 }
553
554 /* write current count and sync */
555 dsp->bf_decrypt_in_pos = j;
556 dsp->bf_decrypt_out_pos = k;
557 dsp->bf_sync = sync;
558}
559
560
561/* used to encrypt S and P boxes */
562static inline void
563encrypt_block(const u32 *P, const u32 *S, u32 *dst, u32 *src)
564{
565 u32 yl = src[0];
566 u32 yr = src[1];
567
568 EROUND(yr, yl, 0);
569 EROUND(yl, yr, 1);
570 EROUND(yr, yl, 2);
571 EROUND(yl, yr, 3);
572 EROUND(yr, yl, 4);
573 EROUND(yl, yr, 5);
574 EROUND(yr, yl, 6);
575 EROUND(yl, yr, 7);
576 EROUND(yr, yl, 8);
577 EROUND(yl, yr, 9);
578 EROUND(yr, yl, 10);
579 EROUND(yl, yr, 11);
580 EROUND(yr, yl, 12);
581 EROUND(yl, yr, 13);
582 EROUND(yr, yl, 14);
583 EROUND(yl, yr, 15);
584
585 yl ^= P[16];
586 yr ^= P[17];
587
588 dst[0] = yr;
589 dst[1] = yl;
590}
591
592/*
593 * initialize the dsp for encryption and decryption using the same key
594 * Calculates the blowfish S and P boxes for encryption and decryption.
595 * The margin of keylen must be 4-56 bytes.
596 * returns 0 if ok.
597 */
598int
599dsp_bf_init(struct dsp *dsp, const u8 *key, uint keylen)
600{
601 short i, j, count;
602 u32 data[2], temp;
603 u32 *P = (u32 *)dsp->bf_p;
604 u32 *S = (u32 *)dsp->bf_s;
605
606 if (keylen < 4 || keylen > 56)
607 return 1;
608
609 /* Set dsp states */
610 i = 0;
611 while (i < 9) {
612 dsp->bf_crypt_out[i] = 0xff;
613 dsp->bf_data_out[i] = dsp_silence;
614 i++;
615 }
616 dsp->bf_crypt_pos = 0;
617 dsp->bf_decrypt_in_pos = 0;
618 dsp->bf_decrypt_out_pos = 0;
619 dsp->bf_sync = 0x1ff;
620 dsp->bf_enable = 1;
621
622 /* Copy the initialization s-boxes */
623 for (i = 0, count = 0; i < 256; i++)
624 for (j = 0; j < 4; j++, count++)
625 S[count] = bf_sbox[count];
626
627 /* Set the p-boxes */
628 for (i = 0; i < 16 + 2; i++)
629 P[i] = bf_pbox[i];
630
631 /* Actual subkey generation */
632 for (j = 0, i = 0; i < 16 + 2; i++) {
633 temp = (((u32)key[j] << 24) |
634 ((u32)key[(j + 1) % keylen] << 16) |
635 ((u32)key[(j + 2) % keylen] << 8) |
636 ((u32)key[(j + 3) % keylen]));
637
638 P[i] = P[i] ^ temp;
639 j = (j + 4) % keylen;
640 }
641
642 data[0] = 0x00000000;
643 data[1] = 0x00000000;
644
645 for (i = 0; i < 16 + 2; i += 2) {
646 encrypt_block(P, S, data, data);
647
648 P[i] = data[0];
649 P[i + 1] = data[1];
650 }
651
652 for (i = 0; i < 4; i++) {
653 for (j = 0, count = i * 256; j < 256; j += 2, count += 2) {
654 encrypt_block(P, S, data, data);
655
656 S[count] = data[0];
657 S[count + 1] = data[1];
658 }
659 }
660
661 return 0;
662}
663
664
665/*
666 * turn encryption off
667 */
668void
669dsp_bf_cleanup(struct dsp *dsp)
670{
671 dsp->bf_enable = 0;
672}
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
new file mode 100644
index 000000000000..e92b1ba4b45e
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -0,0 +1,1886 @@
1/*
2 * Audio crossconnecting/conferrencing (hardware level).
3 *
4 * Copyright 2002 by Andreas Eversberg (jolly@eversberg.eu)
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 */
10
11/*
12 * The process of adding and removing parties to/from a conference:
13 *
14 * There is a chain of struct dsp_conf which has one or more members in a chain
15 * of struct dsp_conf_member.
16 *
17 * After a party is added, the conference is checked for hardware capability.
18 * Also if a party is removed, the conference is checked again.
19 *
20 * There are 3 different solutions: -1 = software, 0 = hardware-crossconnect
21 * 1-n = hardware-conference. The n will give the conference number.
22 *
23 * Depending on the change after removal or insertion of a party, hardware
24 * commands are given.
25 *
26 * The current solution is stored within the struct dsp_conf entry.
27 */
28
29/*
30 * HOW THE CMX WORKS:
31 *
32 * There are 3 types of interaction: One member is alone, in this case only
33 * data flow from upper to lower layer is done.
34 * Two members will also exchange their data so they are crossconnected.
35 * Three or more members will be added in a conference and will hear each
36 * other but will not receive their own speech (echo) if not enabled.
37 *
38 * Features of CMX are:
39 * - Crossconnecting or even conference, if more than two members are together.
40 * - Force mixing of transmit data with other crossconnect/conference members.
41 * - Echo generation to benchmark the delay of audio processing.
42 * - Use hardware to minimize cpu load, disable FIFO load and minimize delay.
43 * - Dejittering and clock generation.
44 *
45 * There are 2 buffers:
46 *
47 *
48 * RX-Buffer
49 * R W
50 * | |
51 * ----------------+-------------+-------------------
52 *
53 * The rx-buffer is a ring buffer used to store the received data for each
54 * individual member. This is only the case if data needs to be dejittered
55 * or in case of a conference where different clocks require reclocking.
56 * The transmit-clock (R) will read the buffer.
57 * If the clock overruns the write-pointer, we will have a buffer underrun.
58 * If the write pointer always has a certain distance from the transmit-
59 * clock, we will have a delay. The delay will dynamically be increased and
60 * reduced.
61 *
62 *
63 * TX-Buffer
64 * R W
65 * | |
66 * -----------------+--------+-----------------------
67 *
68 * The tx-buffer is a ring buffer to queue the transmit data from user space
69 * until it will be mixed or sent. There are two pointers, R and W. If the write
70 * pointer W would reach or overrun R, the buffer would overrun. In this case
71 * (some) data is dropped so that it will not overrun.
72 * Additionally a dynamic dejittering can be enabled. this allows data from
73 * user space that have jitter and different clock source.
74 *
75 *
76 * Clock:
77 *
78 * A Clock is not required, if the data source has exactly one clock. In this
79 * case the data source is forwarded to the destination.
80 *
81 * A Clock is required, because the data source
82 * - has multiple clocks.
83 * - has no usable clock due to jitter or packet loss (VoIP).
84 * In this case the system's clock is used. The clock resolution depends on
85 * the jiffie resolution.
86 *
87 * If a member joins a conference:
88 *
89 * - If a member joins, its rx_buff is set to silence and change read pointer
90 * to transmit clock.
91 *
92 * The procedure of received data from card is explained in cmx_receive.
93 * The procedure of received data from user space is explained in cmx_transmit.
94 * The procedure of transmit data to card is cmx_send.
95 *
96 *
97 * Interaction with other features:
98 *
99 * DTMF:
100 * DTMF decoding is done before the data is crossconnected.
101 *
102 * Volume change:
103 * Changing rx-volume is done before the data is crossconnected. The tx-volume
104 * must be changed whenever data is transmitted to the card by the cmx.
105 *
106 * Tones:
107 * If a tone is enabled, it will be processed whenever data is transmitted to
108 * the card. It will replace the tx-data from the user space.
109 * If tones are generated by hardware, this conference member is removed for
110 * this time.
111 *
112 * Disable rx-data:
113 * If cmx is realized in hardware, rx data will be disabled if requested by
114 * the upper layer. If dtmf decoding is done by software and enabled, rx data
115 * will not be diabled but blocked to the upper layer.
116 *
117 * HFC conference engine:
118 * If it is possible to realize all features using hardware, hardware will be
119 * used if not forbidden by control command. Disabling rx-data provides
120 * absolutely traffic free audio processing. (except for the quick 1-frame
121 * upload of a tone loop, only once for a new tone)
122 *
123 */
124
125/* delay.h is required for hw_lock.h */
126
127#include <linux/delay.h>
128#include <linux/mISDNif.h>
129#include <linux/mISDNdsp.h>
130#include "core.h"
131#include "dsp.h"
132/*
133 * debugging of multi party conference,
134 * by using conference even with two members
135 */
136
137/* #define CMX_CONF_DEBUG */
138
139/*#define CMX_DEBUG * massive read/write pointer output */
140/*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */
141
142static inline int
143count_list_member(struct list_head *head)
144{
145 int cnt = 0;
146 struct list_head *m;
147
148 list_for_each(m, head)
149 cnt++;
150 return cnt;
151}
152
153/*
154 * debug cmx memory structure
155 */
156void
157dsp_cmx_debug(struct dsp *dsp)
158{
159 struct dsp_conf *conf;
160 struct dsp_conf_member *member;
161 struct dsp *odsp;
162
163 printk(KERN_DEBUG "-----Current DSP\n");
164 list_for_each_entry(odsp, &dsp_ilist, list) {
165 printk(KERN_DEBUG "* %s echo=%d txmix=%d",
166 odsp->name, odsp->echo, odsp->tx_mix);
167 if (odsp->conf)
168 printk(" (Conf %d)", odsp->conf->id);
169 if (dsp == odsp)
170 printk(" *this*");
171 printk("\n");
172 }
173 printk(KERN_DEBUG "-----Current Conf:\n");
174 list_for_each_entry(conf, &conf_ilist, list) {
175 printk(KERN_DEBUG "* Conf %d (%p)\n", conf->id, conf);
176 list_for_each_entry(member, &conf->mlist, list) {
177 printk(KERN_DEBUG
178 " - member = %s (slot_tx %d, bank_tx %d, "
179 "slot_rx %d, bank_rx %d hfc_conf %d)%s\n",
180 member->dsp->name, member->dsp->pcm_slot_tx,
181 member->dsp->pcm_bank_tx, member->dsp->pcm_slot_rx,
182 member->dsp->pcm_bank_rx, member->dsp->hfc_conf,
183 (member->dsp == dsp) ? " *this*" : "");
184 }
185 }
186 printk(KERN_DEBUG "-----end\n");
187}
188
189/*
190 * search conference
191 */
192static struct dsp_conf *
193dsp_cmx_search_conf(u32 id)
194{
195 struct dsp_conf *conf;
196
197 if (!id) {
198 printk(KERN_WARNING "%s: conference ID is 0.\n", __func__);
199 return NULL;
200 }
201
202 /* search conference */
203 list_for_each_entry(conf, &conf_ilist, list)
204 if (conf->id == id)
205 return conf;
206
207 return NULL;
208}
209
210
211/*
212 * add member to conference
213 */
214static int
215dsp_cmx_add_conf_member(struct dsp *dsp, struct dsp_conf *conf)
216{
217 struct dsp_conf_member *member;
218
219 if (!conf || !dsp) {
220 printk(KERN_WARNING "%s: conf or dsp is 0.\n", __func__);
221 return -EINVAL;
222 }
223 if (dsp->member) {
224 printk(KERN_WARNING "%s: dsp is already member in a conf.\n",
225 __func__);
226 return -EINVAL;
227 }
228
229 if (dsp->conf) {
230 printk(KERN_WARNING "%s: dsp is already in a conf.\n",
231 __func__);
232 return -EINVAL;
233 }
234
235 member = kzalloc(sizeof(struct dsp_conf_member), GFP_ATOMIC);
236 if (!member) {
237 printk(KERN_ERR "kmalloc struct dsp_conf_member failed\n");
238 return -ENOMEM;
239 }
240 member->dsp = dsp;
241 /* clear rx buffer */
242 memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
243 dsp->rx_init = 1; /* rx_W and rx_R will be adjusted on first frame */
244 dsp->rx_W = 0;
245 dsp->rx_R = 0;
246
247 list_add_tail(&member->list, &conf->mlist);
248
249 dsp->conf = conf;
250 dsp->member = member;
251
252 return 0;
253}
254
255
256/*
257 * del member from conference
258 */
259int
260dsp_cmx_del_conf_member(struct dsp *dsp)
261{
262 struct dsp_conf_member *member;
263
264 if (!dsp) {
265 printk(KERN_WARNING "%s: dsp is 0.\n",
266 __func__);
267 return -EINVAL;
268 }
269
270 if (!dsp->conf) {
271 printk(KERN_WARNING "%s: dsp is not in a conf.\n",
272 __func__);
273 return -EINVAL;
274 }
275
276 if (list_empty(&dsp->conf->mlist)) {
277 printk(KERN_WARNING "%s: dsp has linked an empty conf.\n",
278 __func__);
279 return -EINVAL;
280 }
281
282 /* find us in conf */
283 list_for_each_entry(member, &dsp->conf->mlist, list) {
284 if (member->dsp == dsp) {
285 list_del(&member->list);
286 dsp->conf = NULL;
287 dsp->member = NULL;
288 kfree(member);
289 return 0;
290 }
291 }
292 printk(KERN_WARNING
293 "%s: dsp is not present in its own conf_meber list.\n",
294 __func__);
295
296 return -EINVAL;
297}
298
299
300/*
301 * new conference
302 */
303static struct dsp_conf
304*dsp_cmx_new_conf(u32 id)
305{
306 struct dsp_conf *conf;
307
308 if (!id) {
309 printk(KERN_WARNING "%s: id is 0.\n",
310 __func__);
311 return NULL;
312 }
313
314 conf = kzalloc(sizeof(struct dsp_conf), GFP_ATOMIC);
315 if (!conf) {
316 printk(KERN_ERR "kmalloc struct dsp_conf failed\n");
317 return NULL;
318 }
319 INIT_LIST_HEAD(&conf->mlist);
320 conf->id = id;
321
322 list_add_tail(&conf->list, &conf_ilist);
323
324 return conf;
325}
326
327
328/*
329 * del conference
330 */
331int
332dsp_cmx_del_conf(struct dsp_conf *conf)
333{
334 if (!conf) {
335 printk(KERN_WARNING "%s: conf is null.\n",
336 __func__);
337 return -EINVAL;
338 }
339
340 if (!list_empty(&conf->mlist)) {
341 printk(KERN_WARNING "%s: conf not empty.\n",
342 __func__);
343 return -EINVAL;
344 }
345 list_del(&conf->list);
346 kfree(conf);
347
348 return 0;
349}
350
351
352/*
353 * send HW message to hfc card
354 */
355static void
356dsp_cmx_hw_message(struct dsp *dsp, u32 message, u32 param1, u32 param2,
357 u32 param3, u32 param4)
358{
359 struct mISDN_ctrl_req cq;
360
361 memset(&cq, 0, sizeof(cq));
362 cq.op = message;
363 cq.p1 = param1 | (param2 << 8);
364 cq.p2 = param3 | (param4 << 8);
365 if (dsp->ch.peer)
366 dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq);
367}
368
369
370/*
371 * do hardware update and set the software/hardware flag
372 *
373 * either a conference or a dsp instance can be given
374 * if only dsp instance is given, the instance is not associated with a conf
375 * and therefore removed. if a conference is given, the dsp is expected to
376 * be member of that conference.
377 */
378void
379dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
380{
381 struct dsp_conf_member *member, *nextm;
382 struct dsp *finddsp;
383 int memb = 0, i, ii, i1, i2;
384 int freeunits[8];
385 u_char freeslots[256];
386 int same_hfc = -1, same_pcm = -1, current_conf = -1,
387 all_conf = 1;
388
389 /* dsp gets updated (no conf) */
390 if (!conf) {
391 if (!dsp)
392 return;
393 if (dsp_debug & DEBUG_DSP_CMX)
394 printk(KERN_DEBUG "%s checking dsp %s\n",
395 __func__, dsp->name);
396one_member:
397 /* remove HFC conference if enabled */
398 if (dsp->hfc_conf >= 0) {
399 if (dsp_debug & DEBUG_DSP_CMX)
400 printk(KERN_DEBUG
401 "%s removing %s from HFC conf %d "
402 "because dsp is split\n", __func__,
403 dsp->name, dsp->hfc_conf);
404 dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_CONF_SPLIT,
405 0, 0, 0, 0);
406 dsp->hfc_conf = -1;
407 }
408 /* process hw echo */
409 if (dsp->features.pcm_banks < 1)
410 return;
411 if (!dsp->echo) {
412 /* NO ECHO: remove PCM slot if assigned */
413 if (dsp->pcm_slot_tx >= 0 || dsp->pcm_slot_rx >= 0) {
414 if (dsp_debug & DEBUG_DSP_CMX)
415 printk(KERN_DEBUG "%s removing %s from"
416 " PCM slot %d (TX) %d (RX) because"
417 " dsp is split (no echo)\n",
418 __func__, dsp->name,
419 dsp->pcm_slot_tx, dsp->pcm_slot_rx);
420 dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_DISC,
421 0, 0, 0, 0);
422 dsp->pcm_slot_tx = -1;
423 dsp->pcm_bank_tx = -1;
424 dsp->pcm_slot_rx = -1;
425 dsp->pcm_bank_rx = -1;
426 }
427 return;
428 }
429 /* ECHO: already echo */
430 if (dsp->pcm_slot_tx >= 0 && dsp->pcm_slot_rx < 0 &&
431 dsp->pcm_bank_tx == 2 && dsp->pcm_bank_rx == 2)
432 return;
433 /* ECHO: if slot already assigned */
434 if (dsp->pcm_slot_tx >= 0) {
435 dsp->pcm_slot_rx = dsp->pcm_slot_tx;
436 dsp->pcm_bank_tx = 2; /* 2 means loop */
437 dsp->pcm_bank_rx = 2;
438 if (dsp_debug & DEBUG_DSP_CMX)
439 printk(KERN_DEBUG
440 "%s refresh %s for echo using slot %d\n",
441 __func__, dsp->name,
442 dsp->pcm_slot_tx);
443 dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
444 dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
445 return;
446 }
447 /* ECHO: find slot */
448 dsp->pcm_slot_tx = -1;
449 dsp->pcm_slot_rx = -1;
450 memset(freeslots, 1, sizeof(freeslots));
451 list_for_each_entry(finddsp, &dsp_ilist, list) {
452 if (finddsp->features.pcm_id == dsp->features.pcm_id) {
453 if (finddsp->pcm_slot_rx >= 0 &&
454 finddsp->pcm_slot_rx < sizeof(freeslots))
455 freeslots[finddsp->pcm_slot_tx] = 0;
456 if (finddsp->pcm_slot_tx >= 0 &&
457 finddsp->pcm_slot_tx < sizeof(freeslots))
458 freeslots[finddsp->pcm_slot_rx] = 0;
459 }
460 }
461 i = 0;
462 ii = dsp->features.pcm_slots;
463 while (i < ii) {
464 if (freeslots[i])
465 break;
466 i++;
467 }
468 if (i == ii) {
469 if (dsp_debug & DEBUG_DSP_CMX)
470 printk(KERN_DEBUG
471 "%s no slot available for echo\n",
472 __func__);
473 /* no more slots available */
474 return;
475 }
476 /* assign free slot */
477 dsp->pcm_slot_tx = i;
478 dsp->pcm_slot_rx = i;
479 dsp->pcm_bank_tx = 2; /* loop */
480 dsp->pcm_bank_rx = 2;
481 if (dsp_debug & DEBUG_DSP_CMX)
482 printk(KERN_DEBUG
483 "%s assign echo for %s using slot %d\n",
484 __func__, dsp->name, dsp->pcm_slot_tx);
485 dsp_cmx_hw_message(dsp, MISDN_CTRL_HFC_PCM_CONN,
486 dsp->pcm_slot_tx, 2, dsp->pcm_slot_rx, 2);
487 return;
488 }
489
490 /* conf gets updated (all members) */
491 if (dsp_debug & DEBUG_DSP_CMX)
492 printk(KERN_DEBUG "%s checking conference %d\n",
493 __func__, conf->id);
494
495 if (list_empty(&conf->mlist)) {
496 printk(KERN_ERR "%s: conference whithout members\n",
497 __func__);
498 return;
499 }
500 member = list_entry(conf->mlist.next, struct dsp_conf_member, list);
501 same_hfc = member->dsp->features.hfc_id;
502 same_pcm = member->dsp->features.pcm_id;
503 /* check all members in our conference */
504 list_for_each_entry(member, &conf->mlist, list) {
505 /* check if member uses mixing */
506 if (member->dsp->tx_mix) {
507 if (dsp_debug & DEBUG_DSP_CMX)
508 printk(KERN_DEBUG
509 "%s dsp %s cannot form a conf, because "
510 "tx_mix is turned on\n", __func__,
511 member->dsp->name);
512conf_software:
513 list_for_each_entry(member, &conf->mlist, list) {
514 dsp = member->dsp;
515 /* remove HFC conference if enabled */
516 if (dsp->hfc_conf >= 0) {
517 if (dsp_debug & DEBUG_DSP_CMX)
518 printk(KERN_DEBUG
519 "%s removing %s from HFC "
520 "conf %d because not "
521 "possible with hardware\n",
522 __func__,
523 dsp->name,
524 dsp->hfc_conf);
525 dsp_cmx_hw_message(dsp,
526 MISDN_CTRL_HFC_CONF_SPLIT,
527 0, 0, 0, 0);
528 dsp->hfc_conf = -1;
529 }
530 /* remove PCM slot if assigned */
531 if (dsp->pcm_slot_tx >= 0 ||
532 dsp->pcm_slot_rx >= 0) {
533 if (dsp_debug & DEBUG_DSP_CMX)
534 printk(KERN_DEBUG "%s removing "
535 "%s from PCM slot %d (TX)"
536 " slot %d (RX) because not"
537 " possible with hardware\n",
538 __func__,
539 dsp->name,
540 dsp->pcm_slot_tx,
541 dsp->pcm_slot_rx);
542 dsp_cmx_hw_message(dsp,
543 MISDN_CTRL_HFC_PCM_DISC,
544 0, 0, 0, 0);
545 dsp->pcm_slot_tx = -1;
546 dsp->pcm_bank_tx = -1;
547 dsp->pcm_slot_rx = -1;
548 dsp->pcm_bank_rx = -1;
549 }
550 }
551 conf->hardware = 0;
552 conf->software = 1;
553 return;
554 }
555 /* check if member has echo turned on */
556 if (member->dsp->echo) {
557 if (dsp_debug & DEBUG_DSP_CMX)
558 printk(KERN_DEBUG
559 "%s dsp %s cannot form a conf, because "
560 "echo is turned on\n", __func__,
561 member->dsp->name);
562 goto conf_software;
563 }
564 /* check if member has tx_mix turned on */
565 if (member->dsp->tx_mix) {
566 if (dsp_debug & DEBUG_DSP_CMX)
567 printk(KERN_DEBUG
568 "%s dsp %s cannot form a conf, because "
569 "tx_mix is turned on\n",
570 __func__, member->dsp->name);
571 goto conf_software;
572 }
573 /* check if member changes volume at an not suppoted level */
574 if (member->dsp->tx_volume) {
575 if (dsp_debug & DEBUG_DSP_CMX)
576 printk(KERN_DEBUG
577 "%s dsp %s cannot form a conf, because "
578 "tx_volume is changed\n",
579 __func__, member->dsp->name);
580 goto conf_software;
581 }
582 if (member->dsp->rx_volume) {
583 if (dsp_debug & DEBUG_DSP_CMX)
584 printk(KERN_DEBUG
585 "%s dsp %s cannot form a conf, because "
586 "rx_volume is changed\n",
587 __func__, member->dsp->name);
588 goto conf_software;
589 }
590 /* check if tx-data turned on */
591 if (member->dsp->tx_data) {
592 if (dsp_debug & DEBUG_DSP_CMX)
593 printk(KERN_DEBUG
594 "%s dsp %s cannot form a conf, because "
595 "tx_data is turned on\n",
596 __func__, member->dsp->name);
597 goto conf_software;
598 }
599 /* check if pipeline exists */
600 if (member->dsp->pipeline.inuse) {
601 if (dsp_debug & DEBUG_DSP_CMX)
602 printk(KERN_DEBUG
603 "%s dsp %s cannot form a conf, because "
604 "pipeline exists\n", __func__,
605 member->dsp->name);
606 goto conf_software;
607 }
608 /* check if encryption is enabled */
609 if (member->dsp->bf_enable) {
610 if (dsp_debug & DEBUG_DSP_CMX)
611 printk(KERN_DEBUG "%s dsp %s cannot form a "
612 "conf, because encryption is enabled\n",
613 __func__, member->dsp->name);
614 goto conf_software;
615 }
616 /* check if member is on a card with PCM support */
617 if (member->dsp->features.pcm_id < 0) {
618 if (dsp_debug & DEBUG_DSP_CMX)
619 printk(KERN_DEBUG
620 "%s dsp %s cannot form a conf, because "
621 "dsp has no PCM bus\n",
622 __func__, member->dsp->name);
623 goto conf_software;
624 }
625 /* check if relations are on the same PCM bus */
626 if (member->dsp->features.pcm_id != same_pcm) {
627 if (dsp_debug & DEBUG_DSP_CMX)
628 printk(KERN_DEBUG
629 "%s dsp %s cannot form a conf, because "
630 "dsp is on a different PCM bus than the "
631 "first dsp\n",
632 __func__, member->dsp->name);
633 goto conf_software;
634 }
635 /* determine if members are on the same hfc chip */
636 if (same_hfc != member->dsp->features.hfc_id)
637 same_hfc = -1;
638 /* if there are members already in a conference */
639 if (current_conf < 0 && member->dsp->hfc_conf >= 0)
640 current_conf = member->dsp->hfc_conf;
641 /* if any member is not in a conference */
642 if (member->dsp->hfc_conf < 0)
643 all_conf = 0;
644
645 memb++;
646 }
647
648 /* if no member, this is an error */
649 if (memb < 1)
650 return;
651
652 /* one member */
653 if (memb == 1) {
654 if (dsp_debug & DEBUG_DSP_CMX)
655 printk(KERN_DEBUG
656 "%s conf %d cannot form a HW conference, "
657 "because dsp is alone\n", __func__, conf->id);
658 conf->hardware = 0;
659 conf->software = 0;
660 member = list_entry(conf->mlist.next, struct dsp_conf_member,
661 list);
662 dsp = member->dsp;
663 goto one_member;
664 }
665
666 /*
667 * ok, now we are sure that all members are on the same pcm.
668 * now we will see if we have only two members, so we can do
669 * crossconnections, which don't have any limitations.
670 */
671
672 /* if we have only two members */
673 if (memb == 2) {
674 member = list_entry(conf->mlist.next, struct dsp_conf_member,
675 list);
676 nextm = list_entry(member->list.next, struct dsp_conf_member,
677 list);
678 /* remove HFC conference if enabled */
679 if (member->dsp->hfc_conf >= 0) {
680 if (dsp_debug & DEBUG_DSP_CMX)
681 printk(KERN_DEBUG
682 "%s removing %s from HFC conf %d because "
683 "two parties require only a PCM slot\n",
684 __func__, member->dsp->name,
685 member->dsp->hfc_conf);
686 dsp_cmx_hw_message(member->dsp,
687 MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
688 member->dsp->hfc_conf = -1;
689 }
690 if (nextm->dsp->hfc_conf >= 0) {
691 if (dsp_debug & DEBUG_DSP_CMX)
692 printk(KERN_DEBUG
693 "%s removing %s from HFC conf %d because "
694 "two parties require only a PCM slot\n",
695 __func__, nextm->dsp->name,
696 nextm->dsp->hfc_conf);
697 dsp_cmx_hw_message(nextm->dsp,
698 MISDN_CTRL_HFC_CONF_SPLIT, 0, 0, 0, 0);
699 nextm->dsp->hfc_conf = -1;
700 }
701 /* if members have two banks (and not on the same chip) */
702 if (member->dsp->features.pcm_banks > 1 &&
703 nextm->dsp->features.pcm_banks > 1 &&
704 member->dsp->features.hfc_id !=
705 nextm->dsp->features.hfc_id) {
706 /* if both members have same slots with crossed banks */
707 if (member->dsp->pcm_slot_tx >= 0 &&
708 member->dsp->pcm_slot_rx >= 0 &&
709 nextm->dsp->pcm_slot_tx >= 0 &&
710 nextm->dsp->pcm_slot_rx >= 0 &&
711 nextm->dsp->pcm_slot_tx ==
712 member->dsp->pcm_slot_rx &&
713 nextm->dsp->pcm_slot_rx ==
714 member->dsp->pcm_slot_tx &&
715 nextm->dsp->pcm_slot_tx ==
716 member->dsp->pcm_slot_tx &&
717 member->dsp->pcm_bank_tx !=
718 member->dsp->pcm_bank_rx &&
719 nextm->dsp->pcm_bank_tx !=
720 nextm->dsp->pcm_bank_rx) {
721 /* all members have same slot */
722 if (dsp_debug & DEBUG_DSP_CMX)
723 printk(KERN_DEBUG
724 "%s dsp %s & %s stay joined on "
725 "PCM slot %d bank %d (TX) bank %d "
726 "(RX) (on different chips)\n",
727 __func__,
728 member->dsp->name,
729 nextm->dsp->name,
730 member->dsp->pcm_slot_tx,
731 member->dsp->pcm_bank_tx,
732 member->dsp->pcm_bank_rx);
733 conf->hardware = 0;
734 conf->software = 1;
735 return;
736 }
737 /* find a new slot */
738 memset(freeslots, 1, sizeof(freeslots));
739 list_for_each_entry(dsp, &dsp_ilist, list) {
740 if (dsp != member->dsp &&
741 dsp != nextm->dsp &&
742 member->dsp->features.pcm_id ==
743 dsp->features.pcm_id) {
744 if (dsp->pcm_slot_rx >= 0 &&
745 dsp->pcm_slot_rx <
746 sizeof(freeslots))
747 freeslots[dsp->pcm_slot_tx] = 0;
748 if (dsp->pcm_slot_tx >= 0 &&
749 dsp->pcm_slot_tx <
750 sizeof(freeslots))
751 freeslots[dsp->pcm_slot_rx] = 0;
752 }
753 }
754 i = 0;
755 ii = member->dsp->features.pcm_slots;
756 while (i < ii) {
757 if (freeslots[i])
758 break;
759 i++;
760 }
761 if (i == ii) {
762 if (dsp_debug & DEBUG_DSP_CMX)
763 printk(KERN_DEBUG
764 "%s no slot available for "
765 "%s & %s\n", __func__,
766 member->dsp->name,
767 nextm->dsp->name);
768 /* no more slots available */
769 goto conf_software;
770 }
771 /* assign free slot */
772 member->dsp->pcm_slot_tx = i;
773 member->dsp->pcm_slot_rx = i;
774 nextm->dsp->pcm_slot_tx = i;
775 nextm->dsp->pcm_slot_rx = i;
776 member->dsp->pcm_bank_rx = 0;
777 member->dsp->pcm_bank_tx = 1;
778 nextm->dsp->pcm_bank_rx = 1;
779 nextm->dsp->pcm_bank_tx = 0;
780 if (dsp_debug & DEBUG_DSP_CMX)
781 printk(KERN_DEBUG
782 "%s adding %s & %s to new PCM slot %d "
783 "(TX and RX on different chips) because "
784 "both members have not same slots\n",
785 __func__,
786 member->dsp->name,
787 nextm->dsp->name,
788 member->dsp->pcm_slot_tx);
789 dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
790 member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
791 member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
792 dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
793 nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
794 nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
795 conf->hardware = 1;
796 conf->software = 0;
797 return;
798 /* if members have one bank (or on the same chip) */
799 } else {
800 /* if both members have different crossed slots */
801 if (member->dsp->pcm_slot_tx >= 0 &&
802 member->dsp->pcm_slot_rx >= 0 &&
803 nextm->dsp->pcm_slot_tx >= 0 &&
804 nextm->dsp->pcm_slot_rx >= 0 &&
805 nextm->dsp->pcm_slot_tx ==
806 member->dsp->pcm_slot_rx &&
807 nextm->dsp->pcm_slot_rx ==
808 member->dsp->pcm_slot_tx &&
809 member->dsp->pcm_slot_tx !=
810 member->dsp->pcm_slot_rx &&
811 member->dsp->pcm_bank_tx == 0 &&
812 member->dsp->pcm_bank_rx == 0 &&
813 nextm->dsp->pcm_bank_tx == 0 &&
814 nextm->dsp->pcm_bank_rx == 0) {
815 /* all members have same slot */
816 if (dsp_debug & DEBUG_DSP_CMX)
817 printk(KERN_DEBUG
818 "%s dsp %s & %s stay joined on PCM "
819 "slot %d (TX) %d (RX) on same chip "
820 "or one bank PCM)\n", __func__,
821 member->dsp->name,
822 nextm->dsp->name,
823 member->dsp->pcm_slot_tx,
824 member->dsp->pcm_slot_rx);
825 conf->hardware = 0;
826 conf->software = 1;
827 return;
828 }
829 /* find two new slot */
830 memset(freeslots, 1, sizeof(freeslots));
831 list_for_each_entry(dsp, &dsp_ilist, list) {
832 if (dsp != member->dsp &&
833 dsp != nextm->dsp &&
834 member->dsp->features.pcm_id ==
835 dsp->features.pcm_id) {
836 if (dsp->pcm_slot_rx >= 0 &&
837 dsp->pcm_slot_rx <
838 sizeof(freeslots))
839 freeslots[dsp->pcm_slot_tx] = 0;
840 if (dsp->pcm_slot_tx >= 0 &&
841 dsp->pcm_slot_tx <
842 sizeof(freeslots))
843 freeslots[dsp->pcm_slot_rx] = 0;
844 }
845 }
846 i1 = 0;
847 ii = member->dsp->features.pcm_slots;
848 while (i1 < ii) {
849 if (freeslots[i1])
850 break;
851 i1++;
852 }
853 if (i1 == ii) {
854 if (dsp_debug & DEBUG_DSP_CMX)
855 printk(KERN_DEBUG
856 "%s no slot available "
857 "for %s & %s\n", __func__,
858 member->dsp->name,
859 nextm->dsp->name);
860 /* no more slots available */
861 goto conf_software;
862 }
863 i2 = i1+1;
864 while (i2 < ii) {
865 if (freeslots[i2])
866 break;
867 i2++;
868 }
869 if (i2 == ii) {
870 if (dsp_debug & DEBUG_DSP_CMX)
871 printk(KERN_DEBUG
872 "%s no slot available "
873 "for %s & %s\n",
874 __func__,
875 member->dsp->name,
876 nextm->dsp->name);
877 /* no more slots available */
878 goto conf_software;
879 }
880 /* assign free slots */
881 member->dsp->pcm_slot_tx = i1;
882 member->dsp->pcm_slot_rx = i2;
883 nextm->dsp->pcm_slot_tx = i2;
884 nextm->dsp->pcm_slot_rx = i1;
885 member->dsp->pcm_bank_rx = 0;
886 member->dsp->pcm_bank_tx = 0;
887 nextm->dsp->pcm_bank_rx = 0;
888 nextm->dsp->pcm_bank_tx = 0;
889 if (dsp_debug & DEBUG_DSP_CMX)
890 printk(KERN_DEBUG
891 "%s adding %s & %s to new PCM slot %d "
892 "(TX) %d (RX) on same chip or one bank "
893 "PCM, because both members have not "
894 "crossed slots\n", __func__,
895 member->dsp->name,
896 nextm->dsp->name,
897 member->dsp->pcm_slot_tx,
898 member->dsp->pcm_slot_rx);
899 dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
900 member->dsp->pcm_slot_tx, member->dsp->pcm_bank_tx,
901 member->dsp->pcm_slot_rx, member->dsp->pcm_bank_rx);
902 dsp_cmx_hw_message(nextm->dsp, MISDN_CTRL_HFC_PCM_CONN,
903 nextm->dsp->pcm_slot_tx, nextm->dsp->pcm_bank_tx,
904 nextm->dsp->pcm_slot_rx, nextm->dsp->pcm_bank_rx);
905 conf->hardware = 1;
906 conf->software = 0;
907 return;
908 }
909 }
910
911 /*
912 * if we have more than two, we may check if we have a conference
913 * unit available on the chip. also all members must be on the same
914 */
915
916 /* if not the same HFC chip */
917 if (same_hfc < 0) {
918 if (dsp_debug & DEBUG_DSP_CMX)
919 printk(KERN_DEBUG
920 "%s conference %d cannot be formed, because "
921 "members are on different chips or not "
922 "on HFC chip\n",
923 __func__, conf->id);
924 goto conf_software;
925 }
926
927 /* for more than two members.. */
928
929 /* in case of hdlc, we change to software */
930 if (dsp->hdlc)
931 goto conf_software;
932
933 /* if all members already have the same conference */
934 if (all_conf)
935 return;
936
937 /*
938 * if there is an existing conference, but not all members have joined
939 */
940 if (current_conf >= 0) {
941join_members:
942 list_for_each_entry(member, &conf->mlist, list) {
943 /* join to current conference */
944 if (member->dsp->hfc_conf == current_conf)
945 continue;
946 /* get a free timeslot first */
947 memset(freeslots, 1, sizeof(freeslots));
948 list_for_each_entry(dsp, &dsp_ilist, list) {
949 /*
950 * not checking current member, because
951 * slot will be overwritten.
952 */
953 if (
954 dsp != member->dsp &&
955 /* dsp must be on the same PCM */
956 member->dsp->features.pcm_id ==
957 dsp->features.pcm_id) {
958 /* dsp must be on a slot */
959 if (dsp->pcm_slot_tx >= 0 &&
960 dsp->pcm_slot_tx <
961 sizeof(freeslots))
962 freeslots[dsp->pcm_slot_tx] = 0;
963 if (dsp->pcm_slot_rx >= 0 &&
964 dsp->pcm_slot_rx <
965 sizeof(freeslots))
966 freeslots[dsp->pcm_slot_rx] = 0;
967 }
968 }
969 i = 0;
970 ii = member->dsp->features.pcm_slots;
971 while (i < ii) {
972 if (freeslots[i])
973 break;
974 i++;
975 }
976 if (i == ii) {
977 /* no more slots available */
978 if (dsp_debug & DEBUG_DSP_CMX)
979 printk(KERN_DEBUG
980 "%s conference %d cannot be formed,"
981 " because no slot free\n",
982 __func__, conf->id);
983 goto conf_software;
984 }
985 if (dsp_debug & DEBUG_DSP_CMX)
986 printk(KERN_DEBUG
987 "%s changing dsp %s to HW conference "
988 "%d slot %d\n", __func__,
989 member->dsp->name, current_conf, i);
990 /* assign free slot & set PCM & join conf */
991 member->dsp->pcm_slot_tx = i;
992 member->dsp->pcm_slot_rx = i;
993 member->dsp->pcm_bank_tx = 2; /* loop */
994 member->dsp->pcm_bank_rx = 2;
995 member->dsp->hfc_conf = current_conf;
996 dsp_cmx_hw_message(member->dsp, MISDN_CTRL_HFC_PCM_CONN,
997 i, 2, i, 2);
998 dsp_cmx_hw_message(member->dsp,
999 MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
1000 }
1001 return;
1002 }
1003
1004 /*
1005 * no member is in a conference yet, so we find a free one
1006 */
1007 memset(freeunits, 1, sizeof(freeunits));
1008 list_for_each_entry(dsp, &dsp_ilist, list) {
1009 /* dsp must be on the same chip */
1010 if (dsp->features.hfc_id == same_hfc &&
1011 /* dsp must have joined a HW conference */
1012 dsp->hfc_conf >= 0 &&
1013 /* slot must be within range */
1014 dsp->hfc_conf < 8)
1015 freeunits[dsp->hfc_conf] = 0;
1016 }
1017 i = 0;
1018 ii = 8;
1019 while (i < ii) {
1020 if (freeunits[i])
1021 break;
1022 i++;
1023 }
1024 if (i == ii) {
1025 /* no more conferences available */
1026 if (dsp_debug & DEBUG_DSP_CMX)
1027 printk(KERN_DEBUG
1028 "%s conference %d cannot be formed, because "
1029 "no conference number free\n",
1030 __func__, conf->id);
1031 goto conf_software;
1032 }
1033 /* join all members */
1034 current_conf = i;
1035 goto join_members;
1036}
1037
1038
1039/*
1040 * conf_id != 0: join or change conference
1041 * conf_id == 0: split from conference if not already
1042 */
1043int
1044dsp_cmx_conf(struct dsp *dsp, u32 conf_id)
1045{
1046 int err;
1047 struct dsp_conf *conf;
1048 struct dsp_conf_member *member;
1049
1050 /* if conference doesn't change */
1051 if (dsp->conf_id == conf_id)
1052 return 0;
1053
1054 /* first remove us from current conf */
1055 if (dsp->conf_id) {
1056 if (dsp_debug & DEBUG_DSP_CMX)
1057 printk(KERN_DEBUG "removing us from conference %d\n",
1058 dsp->conf->id);
1059 /* remove us from conf */
1060 conf = dsp->conf;
1061 err = dsp_cmx_del_conf_member(dsp);
1062 if (err)
1063 return err;
1064 dsp->conf_id = 0;
1065
1066 /* update hardware */
1067 dsp_cmx_hardware(NULL, dsp);
1068
1069 /* conf now empty? */
1070 if (list_empty(&conf->mlist)) {
1071 if (dsp_debug & DEBUG_DSP_CMX)
1072 printk(KERN_DEBUG
1073 "conference is empty, so we remove it.\n");
1074 err = dsp_cmx_del_conf(conf);
1075 if (err)
1076 return err;
1077 } else {
1078 /* update members left on conf */
1079 dsp_cmx_hardware(conf, NULL);
1080 }
1081 }
1082
1083 /* if split */
1084 if (!conf_id)
1085 return 0;
1086
1087 /* now add us to conf */
1088 if (dsp_debug & DEBUG_DSP_CMX)
1089 printk(KERN_DEBUG "searching conference %d\n",
1090 conf_id);
1091 conf = dsp_cmx_search_conf(conf_id);
1092 if (!conf) {
1093 if (dsp_debug & DEBUG_DSP_CMX)
1094 printk(KERN_DEBUG
1095 "conference doesn't exist yet, creating.\n");
1096 /* the conference doesn't exist, so we create */
1097 conf = dsp_cmx_new_conf(conf_id);
1098 if (!conf)
1099 return -EINVAL;
1100 } else if (!list_empty(&conf->mlist)) {
1101 member = list_entry(conf->mlist.next, struct dsp_conf_member,
1102 list);
1103 if (dsp->hdlc && !member->dsp->hdlc) {
1104 if (dsp_debug & DEBUG_DSP_CMX)
1105 printk(KERN_DEBUG
1106 "cannot join transparent conference.\n");
1107 return -EINVAL;
1108 }
1109 if (!dsp->hdlc && member->dsp->hdlc) {
1110 if (dsp_debug & DEBUG_DSP_CMX)
1111 printk(KERN_DEBUG
1112 "cannot join hdlc conference.\n");
1113 return -EINVAL;
1114 }
1115 }
1116 /* add conference member */
1117 err = dsp_cmx_add_conf_member(dsp, conf);
1118 if (err)
1119 return err;
1120 dsp->conf_id = conf_id;
1121
1122 /* if we are alone, we do nothing! */
1123 if (list_empty(&conf->mlist)) {
1124 if (dsp_debug & DEBUG_DSP_CMX)
1125 printk(KERN_DEBUG
1126 "we are alone in this conference, so exit.\n");
1127 /* update hardware */
1128 dsp_cmx_hardware(NULL, dsp);
1129 return 0;
1130 }
1131
1132 /* update members on conf */
1133 dsp_cmx_hardware(conf, NULL);
1134
1135 return 0;
1136}
1137
1138
1139/*
1140 * audio data is received from card
1141 */
1142void
1143dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb)
1144{
1145 u8 *d, *p;
1146 int len = skb->len;
1147 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1148 int w, i, ii;
1149
1150 /* check if we have sompen */
1151 if (len < 1)
1152 return;
1153
1154 /* half of the buffer should be larger than maximum packet size */
1155 if (len >= CMX_BUFF_HALF) {
1156 printk(KERN_ERR
1157 "%s line %d: packet from card is too large (%d bytes). "
1158 "please make card send smaller packets OR increase "
1159 "CMX_BUFF_SIZE\n", __FILE__, __LINE__, len);
1160 return;
1161 }
1162
1163 /*
1164 * initialize pointers if not already -
1165 * also add delay if requested by PH_SIGNAL
1166 */
1167 if (dsp->rx_init) {
1168 dsp->rx_init = 0;
1169 if (dsp->features.unordered) {
1170 dsp->rx_R = (hh->id & CMX_BUFF_MASK);
1171 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
1172 & CMX_BUFF_MASK;
1173 } else {
1174 dsp->rx_R = 0;
1175 dsp->rx_W = dsp->cmx_delay;
1176 }
1177 }
1178 /* if frame contains time code, write directly */
1179 if (dsp->features.unordered) {
1180 dsp->rx_W = (hh->id & CMX_BUFF_MASK);
1181 /* printk(KERN_DEBUG "%s %08x\n", dsp->name, hh->id); */
1182 }
1183 /*
1184 * if we underrun (or maybe overrun),
1185 * we set our new read pointer, and write silence to buffer
1186 */
1187 if (((dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK) >= CMX_BUFF_HALF) {
1188 if (dsp_debug & DEBUG_DSP_CMX)
1189 printk(KERN_DEBUG
1190 "cmx_receive(dsp=%lx): UNDERRUN (or overrun the "
1191 "maximum delay), adjusting read pointer! "
1192 "(inst %s)\n", (u_long)dsp, dsp->name);
1193 /* flush buffer */
1194 if (dsp->features.unordered) {
1195 dsp->rx_R = (hh->id & CMX_BUFF_MASK);
1196 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
1197 & CMX_BUFF_MASK;
1198 } else {
1199 dsp->rx_R = 0;
1200 dsp->rx_W = dsp->cmx_delay;
1201 }
1202 memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
1203 }
1204 /* if we have reached double delay, jump back to middle */
1205 if (dsp->cmx_delay)
1206 if (((dsp->rx_W - dsp->rx_R) & CMX_BUFF_MASK) >=
1207 (dsp->cmx_delay << 1)) {
1208 if (dsp_debug & DEBUG_DSP_CMX)
1209 printk(KERN_DEBUG
1210 "cmx_receive(dsp=%lx): OVERRUN (because "
1211 "twice the delay is reached), adjusting "
1212 "read pointer! (inst %s)\n",
1213 (u_long)dsp, dsp->name);
1214 /* flush buffer */
1215 if (dsp->features.unordered) {
1216 dsp->rx_R = (hh->id & CMX_BUFF_MASK);
1217 dsp->rx_W = (dsp->rx_R + dsp->cmx_delay)
1218 & CMX_BUFF_MASK;
1219 } else {
1220 dsp->rx_R = 0;
1221 dsp->rx_W = dsp->cmx_delay;
1222 }
1223 memset(dsp->rx_buff, dsp_silence, sizeof(dsp->rx_buff));
1224 }
1225
1226 /* show where to write */
1227#ifdef CMX_DEBUG
1228 printk(KERN_DEBUG
1229 "cmx_receive(dsp=%lx): rx_R(dsp)=%05x rx_W(dsp)=%05x len=%d %s\n",
1230 (u_long)dsp, dsp->rx_R, dsp->rx_W, len, dsp->name);
1231#endif
1232
1233 /* write data into rx_buffer */
1234 p = skb->data;
1235 d = dsp->rx_buff;
1236 w = dsp->rx_W;
1237 i = 0;
1238 ii = len;
1239 while (i < ii) {
1240 d[w++ & CMX_BUFF_MASK] = *p++;
1241 i++;
1242 }
1243
1244 /* increase write-pointer */
1245 dsp->rx_W = ((dsp->rx_W+len) & CMX_BUFF_MASK);
1246}
1247
1248
1249/*
1250 * send (mixed) audio data to card and control jitter
1251 */
1252static void
1253dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
1254{
1255 struct dsp_conf *conf = dsp->conf;
1256 struct dsp *member, *other;
1257 register s32 sample;
1258 u8 *d, *p, *q, *o_q;
1259 struct sk_buff *nskb, *txskb;
1260 int r, rr, t, tt, o_r, o_rr;
1261 int preload = 0;
1262 struct mISDNhead *hh, *thh;
1263
1264 /* don't process if: */
1265 if (!dsp->b_active) { /* if not active */
1266 dsp->last_tx = 0;
1267 return;
1268 }
1269 if (dsp->pcm_slot_tx >= 0 && /* connected to pcm slot */
1270 dsp->tx_R == dsp->tx_W && /* AND no tx-data */
1271 !(dsp->tone.tone && dsp->tone.software)) { /* AND not soft tones */
1272 dsp->last_tx = 0;
1273 return;
1274 }
1275
1276#ifdef CMX_DEBUG
1277 printk(KERN_DEBUG
1278 "SEND members=%d dsp=%s, conf=%p, rx_R=%05x rx_W=%05x\n",
1279 members, dsp->name, conf, dsp->rx_R, dsp->rx_W);
1280#endif
1281
1282 /* preload if we have delay set */
1283 if (dsp->cmx_delay && !dsp->last_tx) {
1284 preload = len;
1285 if (preload < 128)
1286 preload = 128;
1287 }
1288
1289 /* PREPARE RESULT */
1290 nskb = mI_alloc_skb(len + preload, GFP_ATOMIC);
1291 if (!nskb) {
1292 printk(KERN_ERR
1293 "FATAL ERROR in mISDN_dsp.o: cannot alloc %d bytes\n",
1294 len + preload);
1295 return;
1296 }
1297 hh = mISDN_HEAD_P(nskb);
1298 hh->prim = PH_DATA_REQ;
1299 hh->id = 0;
1300 dsp->last_tx = 1;
1301
1302 /* set pointers, indexes and stuff */
1303 member = dsp;
1304 p = dsp->tx_buff; /* transmit data */
1305 q = dsp->rx_buff; /* received data */
1306 d = skb_put(nskb, preload + len); /* result */
1307 t = dsp->tx_R; /* tx-pointers */
1308 tt = dsp->tx_W;
1309 r = dsp->rx_R; /* rx-pointers */
1310 rr = (r + len) & CMX_BUFF_MASK;
1311
1312 /* preload with silence, if required */
1313 if (preload) {
1314 memset(d, dsp_silence, preload);
1315 d += preload;
1316 }
1317
1318 /* PROCESS TONES/TX-DATA ONLY */
1319 if (dsp->tone.tone && dsp->tone.software) {
1320 /* -> copy tone */
1321 dsp_tone_copy(dsp, d, len);
1322 dsp->tx_R = 0; /* clear tx buffer */
1323 dsp->tx_W = 0;
1324 goto send_packet;
1325 }
1326 /* if we have tx-data but do not use mixing */
1327 if (!dsp->tx_mix && t != tt) {
1328 /* -> send tx-data and continue when not enough */
1329#ifdef CMX_TX_DEBUG
1330 sprintf(debugbuf, "TX sending (%04x-%04x)%p: ", t, tt, p);
1331#endif
1332 while (r != rr && t != tt) {
1333#ifdef CMX_TX_DEBUG
1334 if (strlen(debugbuf) < 48)
1335 sprintf(debugbuf+strlen(debugbuf), " %02x", p[t]);
1336#endif
1337 *d++ = p[t]; /* write tx_buff */
1338 t = (t+1) & CMX_BUFF_MASK;
1339 r = (r+1) & CMX_BUFF_MASK;
1340 }
1341 if (r == rr) {
1342 dsp->tx_R = t;
1343#ifdef CMX_TX_DEBUG
1344 printk(KERN_DEBUG "%s\n", debugbuf);
1345#endif
1346 goto send_packet;
1347 }
1348 }
1349#ifdef CMX_TX_DEBUG
1350 printk(KERN_DEBUG "%s\n", debugbuf);
1351#endif
1352
1353 /* PROCESS DATA (one member / no conf) */
1354 if (!conf || members <= 1) {
1355 /* -> if echo is NOT enabled */
1356 if (!dsp->echo) {
1357 /* -> send tx-data if available or use 0-volume */
1358 while (r != rr && t != tt) {
1359 *d++ = p[t]; /* write tx_buff */
1360 t = (t+1) & CMX_BUFF_MASK;
1361 r = (r+1) & CMX_BUFF_MASK;
1362 }
1363 if (r != rr)
1364 memset(d, dsp_silence, (rr-r)&CMX_BUFF_MASK);
1365 /* -> if echo is enabled */
1366 } else {
1367 /*
1368 * -> mix tx-data with echo if available,
1369 * or use echo only
1370 */
1371 while (r != rr && t != tt) {
1372 *d++ = dsp_audio_mix_law[(p[t]<<8)|q[r]];
1373 t = (t+1) & CMX_BUFF_MASK;
1374 r = (r+1) & CMX_BUFF_MASK;
1375 }
1376 while (r != rr) {
1377 *d++ = q[r]; /* echo */
1378 r = (r+1) & CMX_BUFF_MASK;
1379 }
1380 }
1381 dsp->tx_R = t;
1382 goto send_packet;
1383 }
1384 /* PROCESS DATA (two members) */
1385#ifdef CMX_CONF_DEBUG
1386 if (0) {
1387#else
1388 if (members == 2) {
1389#endif
1390 /* "other" becomes other party */
1391 other = (list_entry(conf->mlist.next,
1392 struct dsp_conf_member, list))->dsp;
1393 if (other == member)
1394 other = (list_entry(conf->mlist.prev,
1395 struct dsp_conf_member, list))->dsp;
1396 o_q = other->rx_buff; /* received data */
1397 o_rr = (other->rx_R + len) & CMX_BUFF_MASK;
1398 /* end of rx-pointer */
1399 o_r = (o_rr - rr + r) & CMX_BUFF_MASK;
1400 /* start rx-pointer at current read position*/
1401 /* -> if echo is NOT enabled */
1402 if (!dsp->echo) {
1403 /*
1404 * -> copy other member's rx-data,
1405 * if tx-data is available, mix
1406 */
1407 while (o_r != o_rr && t != tt) {
1408 *d++ = dsp_audio_mix_law[(p[t]<<8)|o_q[o_r]];
1409 t = (t+1) & CMX_BUFF_MASK;
1410 o_r = (o_r+1) & CMX_BUFF_MASK;
1411 }
1412 while (o_r != o_rr) {
1413 *d++ = o_q[o_r];
1414 o_r = (o_r+1) & CMX_BUFF_MASK;
1415 }
1416 /* -> if echo is enabled */
1417 } else {
1418 /*
1419 * -> mix other member's rx-data with echo,
1420 * if tx-data is available, mix
1421 */
1422 while (r != rr && t != tt) {
1423 sample = dsp_audio_law_to_s32[p[t]] +
1424 dsp_audio_law_to_s32[q[r]] +
1425 dsp_audio_law_to_s32[o_q[o_r]];
1426 if (sample < -32768)
1427 sample = -32768;
1428 else if (sample > 32767)
1429 sample = 32767;
1430 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1431 /* tx-data + rx_data + echo */
1432 t = (t+1) & CMX_BUFF_MASK;
1433 r = (r+1) & CMX_BUFF_MASK;
1434 o_r = (o_r+1) & CMX_BUFF_MASK;
1435 }
1436 while (r != rr) {
1437 *d++ = dsp_audio_mix_law[(q[r]<<8)|o_q[o_r]];
1438 r = (r+1) & CMX_BUFF_MASK;
1439 o_r = (o_r+1) & CMX_BUFF_MASK;
1440 }
1441 }
1442 dsp->tx_R = t;
1443 goto send_packet;
1444 }
1445#ifdef DSP_NEVER_DEFINED
1446 }
1447#endif
1448 /* PROCESS DATA (three or more members) */
1449 /* -> if echo is NOT enabled */
1450 if (!dsp->echo) {
1451 /*
1452 * -> substract rx-data from conf-data,
1453 * if tx-data is available, mix
1454 */
1455 while (r != rr && t != tt) {
1456 sample = dsp_audio_law_to_s32[p[t]] + *c++ -
1457 dsp_audio_law_to_s32[q[r]];
1458 if (sample < -32768)
1459 sample = -32768;
1460 else if (sample > 32767)
1461 sample = 32767;
1462 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1463 /* conf-rx+tx */
1464 r = (r+1) & CMX_BUFF_MASK;
1465 t = (t+1) & CMX_BUFF_MASK;
1466 }
1467 while (r != rr) {
1468 sample = *c++ - dsp_audio_law_to_s32[q[r]];
1469 if (sample < -32768)
1470 sample = -32768;
1471 else if (sample > 32767)
1472 sample = 32767;
1473 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1474 /* conf-rx */
1475 r = (r+1) & CMX_BUFF_MASK;
1476 }
1477 /* -> if echo is enabled */
1478 } else {
1479 /*
1480 * -> encode conf-data, if tx-data
1481 * is available, mix
1482 */
1483 while (r != rr && t != tt) {
1484 sample = dsp_audio_law_to_s32[p[t]] + *c++;
1485 if (sample < -32768)
1486 sample = -32768;
1487 else if (sample > 32767)
1488 sample = 32767;
1489 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1490 /* conf(echo)+tx */
1491 t = (t+1) & CMX_BUFF_MASK;
1492 r = (r+1) & CMX_BUFF_MASK;
1493 }
1494 while (r != rr) {
1495 sample = *c++;
1496 if (sample < -32768)
1497 sample = -32768;
1498 else if (sample > 32767)
1499 sample = 32767;
1500 *d++ = dsp_audio_s16_to_law[sample & 0xffff];
1501 /* conf(echo) */
1502 r = (r+1) & CMX_BUFF_MASK;
1503 }
1504 }
1505 dsp->tx_R = t;
1506 goto send_packet;
1507
1508send_packet:
1509 /*
1510 * send tx-data if enabled - don't filter,
1511 * becuase we want what we send, not what we filtered
1512 */
1513 if (dsp->tx_data) {
1514 /* PREPARE RESULT */
1515 txskb = mI_alloc_skb(len, GFP_ATOMIC);
1516 if (!txskb) {
1517 printk(KERN_ERR
1518 "FATAL ERROR in mISDN_dsp.o: "
1519 "cannot alloc %d bytes\n", len);
1520 } else {
1521 thh = mISDN_HEAD_P(txskb);
1522 thh->prim = DL_DATA_REQ;
1523 thh->id = 0;
1524 memcpy(skb_put(txskb, len), nskb->data+preload, len);
1525 /* queue (trigger later) */
1526 skb_queue_tail(&dsp->sendq, txskb);
1527 }
1528 }
1529 /* adjust volume */
1530 if (dsp->tx_volume)
1531 dsp_change_volume(nskb, dsp->tx_volume);
1532 /* pipeline */
1533 if (dsp->pipeline.inuse)
1534 dsp_pipeline_process_tx(&dsp->pipeline, nskb->data, nskb->len);
1535 /* crypt */
1536 if (dsp->bf_enable)
1537 dsp_bf_encrypt(dsp, nskb->data, nskb->len);
1538 /* queue and trigger */
1539 skb_queue_tail(&dsp->sendq, nskb);
1540 schedule_work(&dsp->workq);
1541}
1542
1543u32 samplecount;
1544struct timer_list dsp_spl_tl;
1545u32 dsp_spl_jiffies; /* calculate the next time to fire */
1546u32 dsp_start_jiffies; /* jiffies at the time, the calculation begins */
1547struct timeval dsp_start_tv; /* time at start of calculation */
1548
1549void
1550dsp_cmx_send(void *arg)
1551{
1552 struct dsp_conf *conf;
1553 struct dsp_conf_member *member;
1554 struct dsp *dsp;
1555 int mustmix, members;
1556 s32 mixbuffer[MAX_POLL+100], *c;
1557 u8 *p, *q;
1558 int r, rr;
1559 int jittercheck = 0, delay, i;
1560 u_long flags;
1561 struct timeval tv;
1562 u32 elapsed;
1563 s16 length;
1564
1565 /* lock */
1566 spin_lock_irqsave(&dsp_lock, flags);
1567
1568 if (!dsp_start_tv.tv_sec) {
1569 do_gettimeofday(&dsp_start_tv);
1570 length = dsp_poll;
1571 } else {
1572 do_gettimeofday(&tv);
1573 elapsed = ((tv.tv_sec - dsp_start_tv.tv_sec) * 8000)
1574 + ((s32)(tv.tv_usec / 125) - (dsp_start_tv.tv_usec / 125));
1575 dsp_start_tv.tv_sec = tv.tv_sec;
1576 dsp_start_tv.tv_usec = tv.tv_usec;
1577 length = elapsed;
1578 }
1579 if (length > MAX_POLL + 100)
1580 length = MAX_POLL + 100;
1581/* printk(KERN_DEBUG "len=%d dsp_count=0x%x.%04x dsp_poll_diff=0x%x.%04x\n",
1582 length, dsp_count >> 16, dsp_count & 0xffff, dsp_poll_diff >> 16,
1583 dsp_poll_diff & 0xffff);
1584 */
1585
1586 /*
1587 * check if jitter needs to be checked
1588 * (this is about every second = 8192 samples)
1589 */
1590 samplecount += length;
1591 if ((samplecount & 8191) < length)
1592 jittercheck = 1;
1593
1594 /* loop all members that do not require conference mixing */
1595 list_for_each_entry(dsp, &dsp_ilist, list) {
1596 if (dsp->hdlc)
1597 continue;
1598 conf = dsp->conf;
1599 mustmix = 0;
1600 members = 0;
1601 if (conf) {
1602 members = count_list_member(&conf->mlist);
1603#ifdef CMX_CONF_DEBUG
1604 if (conf->software && members > 1)
1605#else
1606 if (conf->software && members > 2)
1607#endif
1608 mustmix = 1;
1609 }
1610
1611 /* transmission required */
1612 if (!mustmix) {
1613 dsp_cmx_send_member(dsp, length, mixbuffer, members);
1614
1615 /*
1616 * unused mixbuffer is given to prevent a
1617 * potential null-pointer-bug
1618 */
1619 }
1620 }
1621
1622 /* loop all members that require conference mixing */
1623 list_for_each_entry(conf, &conf_ilist, list) {
1624 /* count members and check hardware */
1625 members = count_list_member(&conf->mlist);
1626#ifdef CMX_CONF_DEBUG
1627 if (conf->software && members > 1) {
1628#else
1629 if (conf->software && members > 2) {
1630#endif
1631 /* check for hdlc conf */
1632 member = list_entry(conf->mlist.next,
1633 struct dsp_conf_member, list);
1634 if (member->dsp->hdlc)
1635 continue;
1636 /* mix all data */
1637 memset(mixbuffer, 0, length*sizeof(s32));
1638 list_for_each_entry(member, &conf->mlist, list) {
1639 dsp = member->dsp;
1640 /* get range of data to mix */
1641 c = mixbuffer;
1642 q = dsp->rx_buff;
1643 r = dsp->rx_R;
1644 rr = (r + length) & CMX_BUFF_MASK;
1645 /* add member's data */
1646 while (r != rr) {
1647 *c++ += dsp_audio_law_to_s32[q[r]];
1648 r = (r+1) & CMX_BUFF_MASK;
1649 }
1650 }
1651
1652 /* process each member */
1653 list_for_each_entry(member, &conf->mlist, list) {
1654 /* transmission */
1655 dsp_cmx_send_member(member->dsp, length,
1656 mixbuffer, members);
1657 }
1658 }
1659 }
1660
1661 /* delete rx-data, increment buffers, change pointers */
1662 list_for_each_entry(dsp, &dsp_ilist, list) {
1663 if (dsp->hdlc)
1664 continue;
1665 p = dsp->rx_buff;
1666 q = dsp->tx_buff;
1667 r = dsp->rx_R;
1668 /* move receive pointer when receiving */
1669 if (!dsp->rx_is_off) {
1670 rr = (r + length) & CMX_BUFF_MASK;
1671 /* delete rx-data */
1672 while (r != rr) {
1673 p[r] = dsp_silence;
1674 r = (r+1) & CMX_BUFF_MASK;
1675 }
1676 /* increment rx-buffer pointer */
1677 dsp->rx_R = r; /* write incremented read pointer */
1678 }
1679
1680 /* check current rx_delay */
1681 delay = (dsp->rx_W-dsp->rx_R) & CMX_BUFF_MASK;
1682 if (delay >= CMX_BUFF_HALF)
1683 delay = 0; /* will be the delay before next write */
1684 /* check for lower delay */
1685 if (delay < dsp->rx_delay[0])
1686 dsp->rx_delay[0] = delay;
1687 /* check current tx_delay */
1688 delay = (dsp->tx_W-dsp->tx_R) & CMX_BUFF_MASK;
1689 if (delay >= CMX_BUFF_HALF)
1690 delay = 0; /* will be the delay before next write */
1691 /* check for lower delay */
1692 if (delay < dsp->tx_delay[0])
1693 dsp->tx_delay[0] = delay;
1694 if (jittercheck) {
1695 /* find the lowest of all rx_delays */
1696 delay = dsp->rx_delay[0];
1697 i = 1;
1698 while (i < MAX_SECONDS_JITTER_CHECK) {
1699 if (delay > dsp->rx_delay[i])
1700 delay = dsp->rx_delay[i];
1701 i++;
1702 }
1703 /*
1704 * remove rx_delay only if we have delay AND we
1705 * have not preset cmx_delay
1706 */
1707 if (delay && !dsp->cmx_delay) {
1708 if (dsp_debug & DEBUG_DSP_CMX)
1709 printk(KERN_DEBUG
1710 "%s lowest rx_delay of %d bytes for"
1711 " dsp %s are now removed.\n",
1712 __func__, delay,
1713 dsp->name);
1714 r = dsp->rx_R;
1715 rr = (r + delay) & CMX_BUFF_MASK;
1716 /* delete rx-data */
1717 while (r != rr) {
1718 p[r] = dsp_silence;
1719 r = (r+1) & CMX_BUFF_MASK;
1720 }
1721 /* increment rx-buffer pointer */
1722 dsp->rx_R = r;
1723 /* write incremented read pointer */
1724 }
1725 /* find the lowest of all tx_delays */
1726 delay = dsp->tx_delay[0];
1727 i = 1;
1728 while (i < MAX_SECONDS_JITTER_CHECK) {
1729 if (delay > dsp->tx_delay[i])
1730 delay = dsp->tx_delay[i];
1731 i++;
1732 }
1733 /*
1734 * remove delay only if we have delay AND we
1735 * have enabled tx_dejitter
1736 */
1737 if (delay && dsp->tx_dejitter) {
1738 if (dsp_debug & DEBUG_DSP_CMX)
1739 printk(KERN_DEBUG
1740 "%s lowest tx_delay of %d bytes for"
1741 " dsp %s are now removed.\n",
1742 __func__, delay,
1743 dsp->name);
1744 r = dsp->tx_R;
1745 rr = (r + delay) & CMX_BUFF_MASK;
1746 /* delete tx-data */
1747 while (r != rr) {
1748 q[r] = dsp_silence;
1749 r = (r+1) & CMX_BUFF_MASK;
1750 }
1751 /* increment rx-buffer pointer */
1752 dsp->tx_R = r;
1753 /* write incremented read pointer */
1754 }
1755 /* scroll up delays */
1756 i = MAX_SECONDS_JITTER_CHECK - 1;
1757 while (i) {
1758 dsp->rx_delay[i] = dsp->rx_delay[i-1];
1759 dsp->tx_delay[i] = dsp->tx_delay[i-1];
1760 i--;
1761 }
1762 dsp->tx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
1763 dsp->rx_delay[0] = CMX_BUFF_HALF; /* (infinite) delay */
1764 }
1765 }
1766
1767 /* if next event would be in the past ... */
1768 if ((s32)(dsp_spl_jiffies+dsp_tics-jiffies) <= 0)
1769 dsp_spl_jiffies = jiffies + 1;
1770 else
1771 dsp_spl_jiffies += dsp_tics;
1772
1773 dsp_spl_tl.expires = dsp_spl_jiffies;
1774 add_timer(&dsp_spl_tl);
1775
1776 /* unlock */
1777 spin_unlock_irqrestore(&dsp_lock, flags);
1778}
1779
1780/*
1781 * audio data is transmitted from upper layer to the dsp
1782 */
1783void
1784dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb)
1785{
1786 u_int w, ww;
1787 u8 *d, *p;
1788 int space; /* todo: , l = skb->len; */
1789#ifdef CMX_TX_DEBUG
1790 char debugbuf[256] = "";
1791#endif
1792
1793 /* check if there is enough space, and then copy */
1794 w = dsp->tx_W;
1795 ww = dsp->tx_R;
1796 p = dsp->tx_buff;
1797 d = skb->data;
1798 space = ww-w;
1799 if (space <= 0)
1800 space += CMX_BUFF_SIZE;
1801 /* write-pointer should not overrun nor reach read pointer */
1802 if (space-1 < skb->len)
1803 /* write to the space we have left */
1804 ww = (ww - 1) & CMX_BUFF_MASK;
1805 else
1806 /* write until all byte are copied */
1807 ww = (w + skb->len) & CMX_BUFF_MASK;
1808 dsp->tx_W = ww;
1809
1810 /* show current buffer */
1811#ifdef CMX_DEBUG
1812 printk(KERN_DEBUG
1813 "cmx_transmit(dsp=%lx) %d bytes to 0x%x-0x%x. %s\n",
1814 (u_long)dsp, (ww-w)&CMX_BUFF_MASK, w, ww, dsp->name);
1815#endif
1816
1817 /* copy transmit data to tx-buffer */
1818#ifdef CMX_TX_DEBUG
1819 sprintf(debugbuf, "TX getting (%04x-%04x)%p: ", w, ww, p);
1820#endif
1821 while (w != ww) {
1822#ifdef CMX_TX_DEBUG
1823 if (strlen(debugbuf) < 48)
1824 sprintf(debugbuf+strlen(debugbuf), " %02x", *d);
1825#endif
1826 p[w] = *d++;
1827 w = (w+1) & CMX_BUFF_MASK;
1828 }
1829#ifdef CMX_TX_DEBUG
1830 printk(KERN_DEBUG "%s\n", debugbuf);
1831#endif
1832
1833}
1834
1835/*
1836 * hdlc data is received from card and sent to all members.
1837 */
1838void
1839dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb)
1840{
1841 struct sk_buff *nskb = NULL;
1842 struct dsp_conf_member *member;
1843 struct mISDNhead *hh;
1844
1845 /* not if not active */
1846 if (!dsp->b_active)
1847 return;
1848
1849 /* check if we have sompen */
1850 if (skb->len < 1)
1851 return;
1852
1853 /* no conf */
1854 if (!dsp->conf) {
1855 /* in case of hardware (echo) */
1856 if (dsp->pcm_slot_tx >= 0)
1857 return;
1858 if (dsp->echo)
1859 nskb = skb_clone(skb, GFP_ATOMIC);
1860 if (nskb) {
1861 hh = mISDN_HEAD_P(nskb);
1862 hh->prim = PH_DATA_REQ;
1863 hh->id = 0;
1864 skb_queue_tail(&dsp->sendq, nskb);
1865 schedule_work(&dsp->workq);
1866 }
1867 return;
1868 }
1869 /* in case of hardware conference */
1870 if (dsp->conf->hardware)
1871 return;
1872 list_for_each_entry(member, &dsp->conf->mlist, list) {
1873 if (dsp->echo || member->dsp != dsp) {
1874 nskb = skb_clone(skb, GFP_ATOMIC);
1875 if (nskb) {
1876 hh = mISDN_HEAD_P(nskb);
1877 hh->prim = PH_DATA_REQ;
1878 hh->id = 0;
1879 skb_queue_tail(&member->dsp->sendq, nskb);
1880 schedule_work(&member->dsp->workq);
1881 }
1882 }
1883 }
1884}
1885
1886
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
new file mode 100644
index 000000000000..2f10ed82c0db
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -0,0 +1,1191 @@
1/*
2 * Author Andreas Eversberg (jolly@eversberg.eu)
3 * Based on source code structure by
4 * Karsten Keil (keil@isdn4linux.de)
5 *
6 * This file is (c) under GNU PUBLIC LICENSE
7 * For changes and modifications please read
8 * ../../../Documentation/isdn/mISDN.cert
9 *
10 * Thanks to Karsten Keil (great drivers)
11 * Cologne Chip (great chips)
12 *
13 * This module does:
14 * Real-time tone generation
15 * DTMF detection
16 * Real-time cross-connection and conferrence
17 * Compensate jitter due to system load and hardware fault.
18 * All features are done in kernel space and will be realized
19 * using hardware, if available and supported by chip set.
20 * Blowfish encryption/decryption
21 */
22
23/* STRUCTURE:
24 *
25 * The dsp module provides layer 2 for b-channels (64kbit). It provides
26 * transparent audio forwarding with special digital signal processing:
27 *
28 * - (1) generation of tones
29 * - (2) detection of dtmf tones
30 * - (3) crossconnecting and conferences (clocking)
31 * - (4) echo generation for delay test
32 * - (5) volume control
33 * - (6) disable receive data
34 * - (7) pipeline
35 * - (8) encryption/decryption
36 *
37 * Look:
38 * TX RX
39 * ------upper layer------
40 * | ^
41 * | |(6)
42 * v |
43 * +-----+-------------+-----+
44 * |(3)(4) |
45 * | CMX |
46 * | |
47 * | +-------------+
48 * | | ^
49 * | | |
50 * |+---------+| +----+----+
51 * ||(1) || |(2) |
52 * || || | |
53 * || Tones || | DTMF |
54 * || || | |
55 * || || | |
56 * |+----+----+| +----+----+
57 * +-----+-----+ ^
58 * | |
59 * v |
60 * +----+----+ +----+----+
61 * |(5) | |(5) |
62 * | | | |
63 * |TX Volume| |RX Volume|
64 * | | | |
65 * | | | |
66 * +----+----+ +----+----+
67 * | ^
68 * | |
69 * v |
70 * +----+-------------+----+
71 * |(7) |
72 * | |
73 * | Pipeline Processing |
74 * | |
75 * | |
76 * +----+-------------+----+
77 * | ^
78 * | |
79 * v |
80 * +----+----+ +----+----+
81 * |(8) | |(8) |
82 * | | | |
83 * | Encrypt | | Decrypt |
84 * | | | |
85 * | | | |
86 * +----+----+ +----+----+
87 * | ^
88 * | |
89 * v |
90 * ------card layer------
91 * TX RX
92 *
93 * Above you can see the logical data flow. If software is used to do the
94 * process, it is actually the real data flow. If hardware is used, data
95 * may not flow, but hardware commands to the card, to provide the data flow
96 * as shown.
97 *
98 * NOTE: The channel must be activated in order to make dsp work, even if
99 * no data flow to the upper layer is intended. Activation can be done
100 * after and before controlling the setting using PH_CONTROL requests.
101 *
102 * DTMF: Will be detected by hardware if possible. It is done before CMX
103 * processing.
104 *
105 * Tones: Will be generated via software if endless looped audio fifos are
106 * not supported by hardware. Tones will override all data from CMX.
107 * It is not required to join a conference to use tones at any time.
108 *
109 * CMX: Is transparent when not used. When it is used, it will do
110 * crossconnections and conferences via software if not possible through
111 * hardware. If hardware capability is available, hardware is used.
112 *
113 * Echo: Is generated by CMX and is used to check performane of hard and
114 * software CMX.
115 *
116 * The CMX has special functions for conferences with one, two and more
117 * members. It will allow different types of data flow. Receive and transmit
118 * data to/form upper layer may be swithed on/off individually without loosing
119 * features of CMX, Tones and DTMF.
120 *
121 * Echo Cancellation: Sometimes we like to cancel echo from the interface.
122 * Note that a VoIP call may not have echo caused by the IP phone. The echo
123 * is generated by the telephone line connected to it. Because the delay
124 * is high, it becomes an echo. RESULT: Echo Cachelation is required if
125 * both echo AND delay is applied to an interface.
126 * Remember that software CMX always generates a more or less delay.
127 *
128 * If all used features can be realized in hardware, and if transmit and/or
129 * receive data ist disabled, the card may not send/receive any data at all.
130 * Not receiving is usefull if only announcements are played. Not sending is
131 * usefull if an answering machine records audio. Not sending and receiving is
132 * usefull during most states of the call. If supported by hardware, tones
133 * will be played without cpu load. Small PBXs and NT-Mode applications will
134 * not need expensive hardware when processing calls.
135 *
136 *
137 * LOCKING:
138 *
139 * When data is received from upper or lower layer (card), the complete dsp
140 * module is locked by a global lock. This lock MUST lock irq, because it
141 * must lock timer events by DSP poll timer.
142 * When data is ready to be transmitted down, the data is queued and sent
143 * outside lock and timer event.
144 * PH_CONTROL must not change any settings, join or split conference members
145 * during process of data.
146 *
147 * HDLC:
148 *
149 * It works quite the same as transparent, except that HDLC data is forwarded
150 * to all other conference members if no hardware bridging is possible.
151 * Send data will be writte to sendq. Sendq will be sent if confirm is received.
152 * Conference cannot join, if one member is not hdlc.
153 *
154 */
155
156#include <linux/delay.h>
157#include <linux/mISDNif.h>
158#include <linux/mISDNdsp.h>
159#include <linux/module.h>
160#include <linux/vmalloc.h>
161#include "core.h"
162#include "dsp.h"
163
164const char *mISDN_dsp_revision = "2.0";
165
166static int debug;
167static int options;
168static int poll;
169static int dtmfthreshold = 100;
170
171MODULE_AUTHOR("Andreas Eversberg");
172module_param(debug, uint, S_IRUGO | S_IWUSR);
173module_param(options, uint, S_IRUGO | S_IWUSR);
174module_param(poll, uint, S_IRUGO | S_IWUSR);
175module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR);
176MODULE_LICENSE("GPL");
177
178/*int spinnest = 0;*/
179
180spinlock_t dsp_lock; /* global dsp lock */
181struct list_head dsp_ilist;
182struct list_head conf_ilist;
183int dsp_debug;
184int dsp_options;
185int dsp_poll, dsp_tics;
186
187/* check if rx may be turned off or must be turned on */
188static void
189dsp_rx_off_member(struct dsp *dsp)
190{
191 struct mISDN_ctrl_req cq;
192 int rx_off = 1;
193
194 if (!dsp->features_rx_off)
195 return;
196
197 /* not disabled */
198 if (!dsp->rx_disabled)
199 rx_off = 0;
200 /* software dtmf */
201 else if (dsp->dtmf.software)
202 rx_off = 0;
203 /* echo in software */
204 else if (dsp->echo && dsp->pcm_slot_tx < 0)
205 rx_off = 0;
206 /* bridge in software */
207 else if (dsp->conf) {
208 if (dsp->conf->software)
209 rx_off = 0;
210 }
211
212 if (rx_off == dsp->rx_is_off)
213 return;
214
215 if (!dsp->ch.peer) {
216 if (dsp_debug & DEBUG_DSP_CORE)
217 printk(KERN_DEBUG "%s: no peer, no rx_off\n",
218 __func__);
219 return;
220 }
221 cq.op = MISDN_CTRL_RX_OFF;
222 cq.p1 = rx_off;
223 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
224 printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
225 __func__);
226 return;
227 }
228 dsp->rx_is_off = rx_off;
229 if (dsp_debug & DEBUG_DSP_CORE)
230 printk(KERN_DEBUG "%s: %s set rx_off = %d\n",
231 __func__, dsp->name, rx_off);
232}
233static void
234dsp_rx_off(struct dsp *dsp)
235{
236 struct dsp_conf_member *member;
237
238 if (dsp_options & DSP_OPT_NOHARDWARE)
239 return;
240
241 /* no conf */
242 if (!dsp->conf) {
243 dsp_rx_off_member(dsp);
244 return;
245 }
246 /* check all members in conf */
247 list_for_each_entry(member, &dsp->conf->mlist, list) {
248 dsp_rx_off_member(member->dsp);
249 }
250}
251
252static int
253dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
254{
255 struct sk_buff *nskb;
256 int ret = 0;
257 int cont;
258 u8 *data;
259 int len;
260
261 if (skb->len < sizeof(int))
262 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
263 cont = *((int *)skb->data);
264 len = skb->len - sizeof(int);
265 data = skb->data + sizeof(int);
266
267 switch (cont) {
268 case DTMF_TONE_START: /* turn on DTMF */
269 if (dsp->hdlc) {
270 ret = -EINVAL;
271 break;
272 }
273 if (dsp_debug & DEBUG_DSP_CORE)
274 printk(KERN_DEBUG "%s: start dtmf\n", __func__);
275 if (len == sizeof(int)) {
276 printk(KERN_NOTICE "changing DTMF Threshold "
277 "to %d\n", *((int *)data));
278 dsp->dtmf.treshold = (*(int *)data) * 10000;
279 }
280 /* init goertzel */
281 dsp_dtmf_goertzel_init(dsp);
282
283 /* check dtmf hardware */
284 dsp_dtmf_hardware(dsp);
285 break;
286 case DTMF_TONE_STOP: /* turn off DTMF */
287 if (dsp_debug & DEBUG_DSP_CORE)
288 printk(KERN_DEBUG "%s: stop dtmf\n", __func__);
289 dsp->dtmf.hardware = 0;
290 dsp->dtmf.software = 0;
291 break;
292 case DSP_CONF_JOIN: /* join / update conference */
293 if (len < sizeof(int)) {
294 ret = -EINVAL;
295 break;
296 }
297 if (*((u32 *)data) == 0)
298 goto conf_split;
299 if (dsp_debug & DEBUG_DSP_CORE)
300 printk(KERN_DEBUG "%s: join conference %d\n",
301 __func__, *((u32 *)data));
302 ret = dsp_cmx_conf(dsp, *((u32 *)data));
303 /* dsp_cmx_hardware will also be called here */
304 dsp_rx_off(dsp);
305 if (dsp_debug & DEBUG_DSP_CMX)
306 dsp_cmx_debug(dsp);
307 break;
308 case DSP_CONF_SPLIT: /* remove from conference */
309conf_split:
310 if (dsp_debug & DEBUG_DSP_CORE)
311 printk(KERN_DEBUG "%s: release conference\n", __func__);
312 ret = dsp_cmx_conf(dsp, 0);
313 /* dsp_cmx_hardware will also be called here */
314 if (dsp_debug & DEBUG_DSP_CMX)
315 dsp_cmx_debug(dsp);
316 dsp_rx_off(dsp);
317 break;
318 case DSP_TONE_PATT_ON: /* play tone */
319 if (dsp->hdlc) {
320 ret = -EINVAL;
321 break;
322 }
323 if (len < sizeof(int)) {
324 ret = -EINVAL;
325 break;
326 }
327 if (dsp_debug & DEBUG_DSP_CORE)
328 printk(KERN_DEBUG "%s: turn tone 0x%x on\n",
329 __func__, *((int *)skb->data));
330 ret = dsp_tone(dsp, *((int *)data));
331 if (!ret) {
332 dsp_cmx_hardware(dsp->conf, dsp);
333 dsp_rx_off(dsp);
334 }
335 if (!dsp->tone.tone)
336 goto tone_off;
337 break;
338 case DSP_TONE_PATT_OFF: /* stop tone */
339 if (dsp->hdlc) {
340 ret = -EINVAL;
341 break;
342 }
343 if (dsp_debug & DEBUG_DSP_CORE)
344 printk(KERN_DEBUG "%s: turn tone off\n", __func__);
345 dsp_tone(dsp, 0);
346 dsp_cmx_hardware(dsp->conf, dsp);
347 dsp_rx_off(dsp);
348 /* reset tx buffers (user space data) */
349tone_off:
350 dsp->rx_W = 0;
351 dsp->rx_R = 0;
352 break;
353 case DSP_VOL_CHANGE_TX: /* change volume */
354 if (dsp->hdlc) {
355 ret = -EINVAL;
356 break;
357 }
358 if (len < sizeof(int)) {
359 ret = -EINVAL;
360 break;
361 }
362 dsp->tx_volume = *((int *)data);
363 if (dsp_debug & DEBUG_DSP_CORE)
364 printk(KERN_DEBUG "%s: change tx vol to %d\n",
365 __func__, dsp->tx_volume);
366 dsp_cmx_hardware(dsp->conf, dsp);
367 dsp_dtmf_hardware(dsp);
368 dsp_rx_off(dsp);
369 break;
370 case DSP_VOL_CHANGE_RX: /* change volume */
371 if (dsp->hdlc) {
372 ret = -EINVAL;
373 break;
374 }
375 if (len < sizeof(int)) {
376 ret = -EINVAL;
377 break;
378 }
379 dsp->rx_volume = *((int *)data);
380 if (dsp_debug & DEBUG_DSP_CORE)
381 printk(KERN_DEBUG "%s: change rx vol to %d\n",
382 __func__, dsp->tx_volume);
383 dsp_cmx_hardware(dsp->conf, dsp);
384 dsp_dtmf_hardware(dsp);
385 dsp_rx_off(dsp);
386 break;
387 case DSP_ECHO_ON: /* enable echo */
388 dsp->echo = 1; /* soft echo */
389 if (dsp_debug & DEBUG_DSP_CORE)
390 printk(KERN_DEBUG "%s: enable cmx-echo\n", __func__);
391 dsp_cmx_hardware(dsp->conf, dsp);
392 dsp_rx_off(dsp);
393 if (dsp_debug & DEBUG_DSP_CMX)
394 dsp_cmx_debug(dsp);
395 break;
396 case DSP_ECHO_OFF: /* disable echo */
397 dsp->echo = 0;
398 if (dsp_debug & DEBUG_DSP_CORE)
399 printk(KERN_DEBUG "%s: disable cmx-echo\n", __func__);
400 dsp_cmx_hardware(dsp->conf, dsp);
401 dsp_rx_off(dsp);
402 if (dsp_debug & DEBUG_DSP_CMX)
403 dsp_cmx_debug(dsp);
404 break;
405 case DSP_RECEIVE_ON: /* enable receive to user space */
406 if (dsp_debug & DEBUG_DSP_CORE)
407 printk(KERN_DEBUG "%s: enable receive to user "
408 "space\n", __func__);
409 dsp->rx_disabled = 0;
410 dsp_rx_off(dsp);
411 break;
412 case DSP_RECEIVE_OFF: /* disable receive to user space */
413 if (dsp_debug & DEBUG_DSP_CORE)
414 printk(KERN_DEBUG "%s: disable receive to "
415 "user space\n", __func__);
416 dsp->rx_disabled = 1;
417 dsp_rx_off(dsp);
418 break;
419 case DSP_MIX_ON: /* enable mixing of tx data */
420 if (dsp->hdlc) {
421 ret = -EINVAL;
422 break;
423 }
424 if (dsp_debug & DEBUG_DSP_CORE)
425 printk(KERN_DEBUG "%s: enable mixing of "
426 "tx-data with conf mebers\n", __func__);
427 dsp->tx_mix = 1;
428 dsp_cmx_hardware(dsp->conf, dsp);
429 dsp_rx_off(dsp);
430 if (dsp_debug & DEBUG_DSP_CMX)
431 dsp_cmx_debug(dsp);
432 break;
433 case DSP_MIX_OFF: /* disable mixing of tx data */
434 if (dsp->hdlc) {
435 ret = -EINVAL;
436 break;
437 }
438 if (dsp_debug & DEBUG_DSP_CORE)
439 printk(KERN_DEBUG "%s: disable mixing of "
440 "tx-data with conf mebers\n", __func__);
441 dsp->tx_mix = 0;
442 dsp_cmx_hardware(dsp->conf, dsp);
443 dsp_rx_off(dsp);
444 if (dsp_debug & DEBUG_DSP_CMX)
445 dsp_cmx_debug(dsp);
446 break;
447 case DSP_TXDATA_ON: /* enable txdata */
448 dsp->tx_data = 1;
449 if (dsp_debug & DEBUG_DSP_CORE)
450 printk(KERN_DEBUG "%s: enable tx-data\n", __func__);
451 dsp_cmx_hardware(dsp->conf, dsp);
452 dsp_rx_off(dsp);
453 if (dsp_debug & DEBUG_DSP_CMX)
454 dsp_cmx_debug(dsp);
455 break;
456 case DSP_TXDATA_OFF: /* disable txdata */
457 dsp->tx_data = 0;
458 if (dsp_debug & DEBUG_DSP_CORE)
459 printk(KERN_DEBUG "%s: disable tx-data\n", __func__);
460 dsp_cmx_hardware(dsp->conf, dsp);
461 dsp_rx_off(dsp);
462 if (dsp_debug & DEBUG_DSP_CMX)
463 dsp_cmx_debug(dsp);
464 break;
465 case DSP_DELAY: /* use delay algorithm instead of dynamic
466 jitter algorithm */
467 if (dsp->hdlc) {
468 ret = -EINVAL;
469 break;
470 }
471 if (len < sizeof(int)) {
472 ret = -EINVAL;
473 break;
474 }
475 dsp->cmx_delay = (*((int *)data)) << 3;
476 /* miliseconds to samples */
477 if (dsp->cmx_delay >= (CMX_BUFF_HALF>>1))
478 /* clip to half of maximum usable buffer
479 (half of half buffer) */
480 dsp->cmx_delay = (CMX_BUFF_HALF>>1) - 1;
481 if (dsp_debug & DEBUG_DSP_CORE)
482 printk(KERN_DEBUG "%s: use delay algorithm to "
483 "compensate jitter (%d samples)\n",
484 __func__, dsp->cmx_delay);
485 break;
486 case DSP_JITTER: /* use dynamic jitter algorithm instead of
487 delay algorithm */
488 if (dsp->hdlc) {
489 ret = -EINVAL;
490 break;
491 }
492 dsp->cmx_delay = 0;
493 if (dsp_debug & DEBUG_DSP_CORE)
494 printk(KERN_DEBUG "%s: use jitter algorithm to "
495 "compensate jitter\n", __func__);
496 break;
497 case DSP_TX_DEJITTER: /* use dynamic jitter algorithm for tx-buffer */
498 if (dsp->hdlc) {
499 ret = -EINVAL;
500 break;
501 }
502 dsp->tx_dejitter = 1;
503 if (dsp_debug & DEBUG_DSP_CORE)
504 printk(KERN_DEBUG "%s: use dejitter on TX "
505 "buffer\n", __func__);
506 break;
507 case DSP_TX_DEJ_OFF: /* use tx-buffer without dejittering*/
508 if (dsp->hdlc) {
509 ret = -EINVAL;
510 break;
511 }
512 dsp->tx_dejitter = 0;
513 if (dsp_debug & DEBUG_DSP_CORE)
514 printk(KERN_DEBUG "%s: use TX buffer without "
515 "dejittering\n", __func__);
516 break;
517 case DSP_PIPELINE_CFG:
518 if (dsp->hdlc) {
519 ret = -EINVAL;
520 break;
521 }
522 if (len > 0 && ((char *)data)[len - 1]) {
523 printk(KERN_DEBUG "%s: pipeline config string "
524 "is not NULL terminated!\n", __func__);
525 ret = -EINVAL;
526 } else {
527 dsp->pipeline.inuse = 1;
528 dsp_cmx_hardware(dsp->conf, dsp);
529 ret = dsp_pipeline_build(&dsp->pipeline,
530 len > 0 ? (char *)data : NULL);
531 dsp_cmx_hardware(dsp->conf, dsp);
532 dsp_rx_off(dsp);
533 }
534 break;
535 case DSP_BF_ENABLE_KEY: /* turn blowfish on */
536 if (dsp->hdlc) {
537 ret = -EINVAL;
538 break;
539 }
540 if (len < 4 || len > 56) {
541 ret = -EINVAL;
542 break;
543 }
544 if (dsp_debug & DEBUG_DSP_CORE)
545 printk(KERN_DEBUG "%s: turn blowfish on (key "
546 "not shown)\n", __func__);
547 ret = dsp_bf_init(dsp, (u8 *)data, len);
548 /* set new cont */
549 if (!ret)
550 cont = DSP_BF_ACCEPT;
551 else
552 cont = DSP_BF_REJECT;
553 /* send indication if it worked to set it */
554 nskb = _alloc_mISDN_skb(PH_CONTROL_IND, MISDN_ID_ANY,
555 sizeof(int), &cont, GFP_ATOMIC);
556 if (nskb) {
557 if (dsp->up) {
558 if (dsp->up->send(dsp->up, nskb))
559 dev_kfree_skb(nskb);
560 } else
561 dev_kfree_skb(nskb);
562 }
563 if (!ret) {
564 dsp_cmx_hardware(dsp->conf, dsp);
565 dsp_dtmf_hardware(dsp);
566 dsp_rx_off(dsp);
567 }
568 break;
569 case DSP_BF_DISABLE: /* turn blowfish off */
570 if (dsp->hdlc) {
571 ret = -EINVAL;
572 break;
573 }
574 if (dsp_debug & DEBUG_DSP_CORE)
575 printk(KERN_DEBUG "%s: turn blowfish off\n", __func__);
576 dsp_bf_cleanup(dsp);
577 dsp_cmx_hardware(dsp->conf, dsp);
578 dsp_dtmf_hardware(dsp);
579 dsp_rx_off(dsp);
580 break;
581 default:
582 if (dsp_debug & DEBUG_DSP_CORE)
583 printk(KERN_DEBUG "%s: ctrl req %x unhandled\n",
584 __func__, cont);
585 ret = -EINVAL;
586 }
587 return ret;
588}
589
590static void
591get_features(struct mISDNchannel *ch)
592{
593 struct dsp *dsp = container_of(ch, struct dsp, ch);
594 struct mISDN_ctrl_req cq;
595
596 if (dsp_options & DSP_OPT_NOHARDWARE)
597 return;
598 if (!ch->peer) {
599 if (dsp_debug & DEBUG_DSP_CORE)
600 printk(KERN_DEBUG "%s: no peer, no features\n",
601 __func__);
602 return;
603 }
604 memset(&cq, 0, sizeof(cq));
605 cq.op = MISDN_CTRL_GETOP;
606 if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) {
607 printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
608 __func__);
609 return;
610 }
611 if (cq.op & MISDN_CTRL_RX_OFF)
612 dsp->features_rx_off = 1;
613 if ((cq.op & MISDN_CTRL_HW_FEATURES_OP)) {
614 cq.op = MISDN_CTRL_HW_FEATURES;
615 *((u_long *)&cq.p1) = (u_long)&dsp->features;
616 if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) {
617 printk(KERN_DEBUG "%s: 2nd CONTROL_CHANNEL failed\n",
618 __func__);
619 }
620 } else
621 if (dsp_debug & DEBUG_DSP_CORE)
622 printk(KERN_DEBUG "%s: features not supported for %s\n",
623 __func__, dsp->name);
624}
625
626static int
627dsp_function(struct mISDNchannel *ch, struct sk_buff *skb)
628{
629 struct dsp *dsp = container_of(ch, struct dsp, ch);
630 struct mISDNhead *hh;
631 int ret = 0;
632 u8 *digits;
633 int cont;
634 struct sk_buff *nskb;
635 u_long flags;
636
637 hh = mISDN_HEAD_P(skb);
638 switch (hh->prim) {
639 /* FROM DOWN */
640 case (PH_DATA_CNF):
641 dsp->data_pending = 0;
642 /* trigger next hdlc frame, if any */
643 if (dsp->hdlc) {
644 spin_lock_irqsave(&dsp_lock, flags);
645 if (dsp->b_active)
646 schedule_work(&dsp->workq);
647 spin_unlock_irqrestore(&dsp_lock, flags);
648 }
649 break;
650 case (PH_DATA_IND):
651 case (DL_DATA_IND):
652 if (skb->len < 1) {
653 ret = -EINVAL;
654 break;
655 }
656 if (dsp->rx_is_off) {
657 if (dsp_debug & DEBUG_DSP_CORE)
658 printk(KERN_DEBUG "%s: rx-data during rx_off"
659 " for %s\n",
660 __func__, dsp->name);
661 }
662 if (dsp->hdlc) {
663 /* hdlc */
664 spin_lock_irqsave(&dsp_lock, flags);
665 dsp_cmx_hdlc(dsp, skb);
666 spin_unlock_irqrestore(&dsp_lock, flags);
667 if (dsp->rx_disabled) {
668 /* if receive is not allowed */
669 break;
670 }
671 hh->prim = DL_DATA_IND;
672 if (dsp->up)
673 return dsp->up->send(dsp->up, skb);
674 break;
675 }
676
677 /* decrypt if enabled */
678 if (dsp->bf_enable)
679 dsp_bf_decrypt(dsp, skb->data, skb->len);
680 /* pipeline */
681 if (dsp->pipeline.inuse)
682 dsp_pipeline_process_rx(&dsp->pipeline, skb->data,
683 skb->len);
684 /* change volume if requested */
685 if (dsp->rx_volume)
686 dsp_change_volume(skb, dsp->rx_volume);
687
688 /* check if dtmf soft decoding is turned on */
689 if (dsp->dtmf.software) {
690 digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
691 skb->len, (dsp_options&DSP_OPT_ULAW)?1:0);
692 while (*digits) {
693 if (dsp_debug & DEBUG_DSP_DTMF)
694 printk(KERN_DEBUG "%s: digit"
695 "(%c) to layer %s\n",
696 __func__, *digits, dsp->name);
697 cont = DTMF_TONE_VAL | *digits;
698 nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
699 MISDN_ID_ANY, sizeof(int), &cont,
700 GFP_ATOMIC);
701 if (nskb) {
702 if (dsp->up) {
703 if (dsp->up->send(
704 dsp->up, nskb))
705 dev_kfree_skb(nskb);
706 } else
707 dev_kfree_skb(nskb);
708 }
709 digits++;
710 }
711 }
712 /* we need to process receive data if software */
713 spin_lock_irqsave(&dsp_lock, flags);
714 if (dsp->pcm_slot_tx < 0 && dsp->pcm_slot_rx < 0) {
715 /* process data from card at cmx */
716 dsp_cmx_receive(dsp, skb);
717 }
718 spin_unlock_irqrestore(&dsp_lock, flags);
719
720 if (dsp->rx_disabled) {
721 /* if receive is not allowed */
722 break;
723 }
724 hh->prim = DL_DATA_IND;
725 if (dsp->up)
726 return dsp->up->send(dsp->up, skb);
727 break;
728 case (PH_CONTROL_IND):
729 if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
730 printk(KERN_DEBUG "%s: PH_CONTROL INDICATION "
731 "received: %x (len %d) %s\n", __func__,
732 hh->id, skb->len, dsp->name);
733 switch (hh->id) {
734 case (DTMF_HFC_COEF): /* getting coefficients */
735 if (!dsp->dtmf.hardware) {
736 if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
737 printk(KERN_DEBUG "%s: ignoring DTMF "
738 "coefficients from HFC\n",
739 __func__);
740 break;
741 }
742 digits = dsp_dtmf_goertzel_decode(dsp, skb->data,
743 skb->len, 2);
744 while (*digits) {
745 int k;
746 struct sk_buff *nskb;
747 if (dsp_debug & DEBUG_DSP_DTMF)
748 printk(KERN_DEBUG "%s: digit"
749 "(%c) to layer %s\n",
750 __func__, *digits, dsp->name);
751 k = *digits | DTMF_TONE_VAL;
752 nskb = _alloc_mISDN_skb(PH_CONTROL_IND,
753 MISDN_ID_ANY, sizeof(int), &k,
754 GFP_ATOMIC);
755 if (nskb) {
756 if (dsp->up) {
757 if (dsp->up->send(
758 dsp->up, nskb))
759 dev_kfree_skb(nskb);
760 } else
761 dev_kfree_skb(nskb);
762 }
763 digits++;
764 }
765 break;
766 case (HFC_VOL_CHANGE_TX): /* change volume */
767 if (skb->len != sizeof(int)) {
768 ret = -EINVAL;
769 break;
770 }
771 spin_lock_irqsave(&dsp_lock, flags);
772 dsp->tx_volume = *((int *)skb->data);
773 if (dsp_debug & DEBUG_DSP_CORE)
774 printk(KERN_DEBUG "%s: change tx volume to "
775 "%d\n", __func__, dsp->tx_volume);
776 dsp_cmx_hardware(dsp->conf, dsp);
777 dsp_dtmf_hardware(dsp);
778 dsp_rx_off(dsp);
779 spin_unlock_irqrestore(&dsp_lock, flags);
780 break;
781 default:
782 if (dsp_debug & DEBUG_DSP_CORE)
783 printk(KERN_DEBUG "%s: ctrl ind %x unhandled "
784 "%s\n", __func__, hh->id, dsp->name);
785 ret = -EINVAL;
786 }
787 break;
788 case (PH_ACTIVATE_IND):
789 case (PH_ACTIVATE_CNF):
790 if (dsp_debug & DEBUG_DSP_CORE)
791 printk(KERN_DEBUG "%s: b_channel is now active %s\n",
792 __func__, dsp->name);
793 /* bchannel now active */
794 spin_lock_irqsave(&dsp_lock, flags);
795 dsp->b_active = 1;
796 dsp->data_pending = 0;
797 dsp->rx_init = 1;
798 /* rx_W and rx_R will be adjusted on first frame */
799 dsp->rx_W = 0;
800 dsp->rx_R = 0;
801 memset(dsp->rx_buff, 0, sizeof(dsp->rx_buff));
802 dsp_cmx_hardware(dsp->conf, dsp);
803 dsp_dtmf_hardware(dsp);
804 dsp_rx_off(dsp);
805 spin_unlock_irqrestore(&dsp_lock, flags);
806 if (dsp_debug & DEBUG_DSP_CORE)
807 printk(KERN_DEBUG "%s: done with activation, sending "
808 "confirm to user space. %s\n", __func__,
809 dsp->name);
810 /* send activation to upper layer */
811 hh->prim = DL_ESTABLISH_CNF;
812 if (dsp->up)
813 return dsp->up->send(dsp->up, skb);
814 break;
815 case (PH_DEACTIVATE_IND):
816 case (PH_DEACTIVATE_CNF):
817 if (dsp_debug & DEBUG_DSP_CORE)
818 printk(KERN_DEBUG "%s: b_channel is now inactive %s\n",
819 __func__, dsp->name);
820 /* bchannel now inactive */
821 spin_lock_irqsave(&dsp_lock, flags);
822 dsp->b_active = 0;
823 dsp->data_pending = 0;
824 dsp_cmx_hardware(dsp->conf, dsp);
825 dsp_rx_off(dsp);
826 spin_unlock_irqrestore(&dsp_lock, flags);
827 hh->prim = DL_RELEASE_CNF;
828 if (dsp->up)
829 return dsp->up->send(dsp->up, skb);
830 break;
831 /* FROM UP */
832 case (DL_DATA_REQ):
833 case (PH_DATA_REQ):
834 if (skb->len < 1) {
835 ret = -EINVAL;
836 break;
837 }
838 if (dsp->hdlc) {
839 /* hdlc */
840 spin_lock_irqsave(&dsp_lock, flags);
841 if (dsp->b_active) {
842 skb_queue_tail(&dsp->sendq, skb);
843 schedule_work(&dsp->workq);
844 }
845 spin_unlock_irqrestore(&dsp_lock, flags);
846 return 0;
847 }
848 /* send data to tx-buffer (if no tone is played) */
849 if (!dsp->tone.tone) {
850 spin_lock_irqsave(&dsp_lock, flags);
851 dsp_cmx_transmit(dsp, skb);
852 spin_unlock_irqrestore(&dsp_lock, flags);
853 }
854 break;
855 case (PH_CONTROL_REQ):
856 spin_lock_irqsave(&dsp_lock, flags);
857 ret = dsp_control_req(dsp, hh, skb);
858 spin_unlock_irqrestore(&dsp_lock, flags);
859 break;
860 case (DL_ESTABLISH_REQ):
861 case (PH_ACTIVATE_REQ):
862 if (dsp_debug & DEBUG_DSP_CORE)
863 printk(KERN_DEBUG "%s: activating b_channel %s\n",
864 __func__, dsp->name);
865 if (dsp->dtmf.hardware || dsp->dtmf.software)
866 dsp_dtmf_goertzel_init(dsp);
867 get_features(ch);
868 /* send ph_activate */
869 hh->prim = PH_ACTIVATE_REQ;
870 if (ch->peer)
871 return ch->recv(ch->peer, skb);
872 break;
873 case (DL_RELEASE_REQ):
874 case (PH_DEACTIVATE_REQ):
875 if (dsp_debug & DEBUG_DSP_CORE)
876 printk(KERN_DEBUG "%s: releasing b_channel %s\n",
877 __func__, dsp->name);
878 spin_lock_irqsave(&dsp_lock, flags);
879 dsp->tone.tone = 0;
880 dsp->tone.hardware = 0;
881 dsp->tone.software = 0;
882 if (timer_pending(&dsp->tone.tl))
883 del_timer(&dsp->tone.tl);
884 if (dsp->conf)
885 dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be
886 called here */
887 skb_queue_purge(&dsp->sendq);
888 spin_unlock_irqrestore(&dsp_lock, flags);
889 hh->prim = PH_DEACTIVATE_REQ;
890 if (ch->peer)
891 return ch->recv(ch->peer, skb);
892 break;
893 default:
894 if (dsp_debug & DEBUG_DSP_CORE)
895 printk(KERN_DEBUG "%s: msg %x unhandled %s\n",
896 __func__, hh->prim, dsp->name);
897 ret = -EINVAL;
898 }
899 if (!ret)
900 dev_kfree_skb(skb);
901 return ret;
902}
903
904static int
905dsp_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
906{
907 struct dsp *dsp = container_of(ch, struct dsp, ch);
908 u_long flags;
909 int err = 0;
910
911 if (debug & DEBUG_DSP_CTRL)
912 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
913
914 switch (cmd) {
915 case OPEN_CHANNEL:
916 break;
917 case CLOSE_CHANNEL:
918 if (dsp->ch.peer)
919 dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL);
920
921 /* wait until workqueue has finished,
922 * must lock here, or we may hit send-process currently
923 * queueing. */
924 spin_lock_irqsave(&dsp_lock, flags);
925 dsp->b_active = 0;
926 spin_unlock_irqrestore(&dsp_lock, flags);
927 /* MUST not be locked, because it waits until queue is done. */
928 cancel_work_sync(&dsp->workq);
929 spin_lock_irqsave(&dsp_lock, flags);
930 if (timer_pending(&dsp->tone.tl))
931 del_timer(&dsp->tone.tl);
932 skb_queue_purge(&dsp->sendq);
933 if (dsp_debug & DEBUG_DSP_CTRL)
934 printk(KERN_DEBUG "%s: releasing member %s\n",
935 __func__, dsp->name);
936 dsp->b_active = 0;
937 dsp_cmx_conf(dsp, 0); /* dsp_cmx_hardware will also be called
938 here */
939 dsp_pipeline_destroy(&dsp->pipeline);
940
941 if (dsp_debug & DEBUG_DSP_CTRL)
942 printk(KERN_DEBUG "%s: remove & destroy object %s\n",
943 __func__, dsp->name);
944 list_del(&dsp->list);
945 spin_unlock_irqrestore(&dsp_lock, flags);
946
947 if (dsp_debug & DEBUG_DSP_CTRL)
948 printk(KERN_DEBUG "%s: dsp instance released\n",
949 __func__);
950 vfree(dsp);
951 module_put(THIS_MODULE);
952 break;
953 }
954 return err;
955}
956
957static void
958dsp_send_bh(struct work_struct *work)
959{
960 struct dsp *dsp = container_of(work, struct dsp, workq);
961 struct sk_buff *skb;
962 struct mISDNhead *hh;
963
964 if (dsp->hdlc && dsp->data_pending)
965 return; /* wait until data has been acknowledged */
966
967 /* send queued data */
968 while ((skb = skb_dequeue(&dsp->sendq))) {
969 /* in locked date, we must have still data in queue */
970 if (dsp->data_pending) {
971 if (dsp_debug & DEBUG_DSP_CORE)
972 printk(KERN_DEBUG "%s: fifo full %s, this is "
973 "no bug!\n", __func__, dsp->name);
974 /* flush transparent data, if not acked */
975 dev_kfree_skb(skb);
976 continue;
977 }
978 hh = mISDN_HEAD_P(skb);
979 if (hh->prim == DL_DATA_REQ) {
980 /* send packet up */
981 if (dsp->up) {
982 if (dsp->up->send(dsp->up, skb))
983 dev_kfree_skb(skb);
984 } else
985 dev_kfree_skb(skb);
986 } else {
987 /* send packet down */
988 if (dsp->ch.peer) {
989 dsp->data_pending = 1;
990 if (dsp->ch.recv(dsp->ch.peer, skb)) {
991 dev_kfree_skb(skb);
992 dsp->data_pending = 0;
993 }
994 } else
995 dev_kfree_skb(skb);
996 }
997 }
998}
999
1000static int
1001dspcreate(struct channel_req *crq)
1002{
1003 struct dsp *ndsp;
1004 u_long flags;
1005
1006 if (crq->protocol != ISDN_P_B_L2DSP
1007 && crq->protocol != ISDN_P_B_L2DSPHDLC)
1008 return -EPROTONOSUPPORT;
1009 ndsp = vmalloc(sizeof(struct dsp));
1010 if (!ndsp) {
1011 printk(KERN_ERR "%s: vmalloc struct dsp failed\n", __func__);
1012 return -ENOMEM;
1013 }
1014 memset(ndsp, 0, sizeof(struct dsp));
1015 if (dsp_debug & DEBUG_DSP_CTRL)
1016 printk(KERN_DEBUG "%s: creating new dsp instance\n", __func__);
1017
1018 /* default enabled */
1019 INIT_WORK(&ndsp->workq, (void *)dsp_send_bh);
1020 skb_queue_head_init(&ndsp->sendq);
1021 ndsp->ch.send = dsp_function;
1022 ndsp->ch.ctrl = dsp_ctrl;
1023 ndsp->up = crq->ch;
1024 crq->ch = &ndsp->ch;
1025 if (crq->protocol == ISDN_P_B_L2DSP) {
1026 crq->protocol = ISDN_P_B_RAW;
1027 ndsp->hdlc = 0;
1028 } else {
1029 crq->protocol = ISDN_P_B_HDLC;
1030 ndsp->hdlc = 1;
1031 }
1032 if (!try_module_get(THIS_MODULE))
1033 printk(KERN_WARNING "%s:cannot get module\n",
1034 __func__);
1035
1036 sprintf(ndsp->name, "DSP_C%x(0x%p)",
1037 ndsp->up->st->dev->id + 1, ndsp);
1038 /* set frame size to start */
1039 ndsp->features.hfc_id = -1; /* current PCM id */
1040 ndsp->features.pcm_id = -1; /* current PCM id */
1041 ndsp->pcm_slot_rx = -1; /* current CPM slot */
1042 ndsp->pcm_slot_tx = -1;
1043 ndsp->pcm_bank_rx = -1;
1044 ndsp->pcm_bank_tx = -1;
1045 ndsp->hfc_conf = -1; /* current conference number */
1046 /* set tone timer */
1047 ndsp->tone.tl.function = (void *)dsp_tone_timeout;
1048 ndsp->tone.tl.data = (long) ndsp;
1049 init_timer(&ndsp->tone.tl);
1050
1051 if (dtmfthreshold < 20 || dtmfthreshold > 500)
1052 dtmfthreshold = 200;
1053 ndsp->dtmf.treshold = dtmfthreshold*10000;
1054
1055 /* init pipeline append to list */
1056 spin_lock_irqsave(&dsp_lock, flags);
1057 dsp_pipeline_init(&ndsp->pipeline);
1058 list_add_tail(&ndsp->list, &dsp_ilist);
1059 spin_unlock_irqrestore(&dsp_lock, flags);
1060
1061 return 0;
1062}
1063
1064
1065static struct Bprotocol DSP = {
1066 .Bprotocols = (1 << (ISDN_P_B_L2DSP & ISDN_P_B_MASK))
1067 | (1 << (ISDN_P_B_L2DSPHDLC & ISDN_P_B_MASK)),
1068 .name = "dsp",
1069 .create = dspcreate
1070};
1071
1072static int dsp_init(void)
1073{
1074 int err;
1075 int tics;
1076
1077 printk(KERN_INFO "DSP modul %s\n", mISDN_dsp_revision);
1078
1079 dsp_options = options;
1080 dsp_debug = debug;
1081
1082 /* set packet size */
1083 dsp_poll = poll;
1084 if (dsp_poll) {
1085 if (dsp_poll > MAX_POLL) {
1086 printk(KERN_ERR "%s: Wrong poll value (%d), use %d "
1087 "maximum.\n", __func__, poll, MAX_POLL);
1088 err = -EINVAL;
1089 return err;
1090 }
1091 if (dsp_poll < 8) {
1092 printk(KERN_ERR "%s: Wrong poll value (%d), use 8 "
1093 "minimum.\n", __func__, dsp_poll);
1094 err = -EINVAL;
1095 return err;
1096 }
1097 dsp_tics = poll * HZ / 8000;
1098 if (dsp_tics * 8000 != poll * HZ) {
1099 printk(KERN_INFO "mISDN_dsp: Cannot clock every %d "
1100 "samples (0,125 ms). It is not a multiple of "
1101 "%d HZ.\n", poll, HZ);
1102 err = -EINVAL;
1103 return err;
1104 }
1105 } else {
1106 poll = 8;
1107 while (poll <= MAX_POLL) {
1108 tics = poll * HZ / 8000;
1109 if (tics * 8000 == poll * HZ) {
1110 dsp_tics = tics;
1111 dsp_poll = poll;
1112 if (poll >= 64)
1113 break;
1114 }
1115 poll++;
1116 }
1117 }
1118 if (dsp_poll == 0) {
1119 printk(KERN_INFO "mISDN_dsp: There is no multiple of kernel "
1120 "clock that equals exactly the duration of 8-256 "
1121 "samples. (Choose kernel clock speed like 100, 250, "
1122 "300, 1000)\n");
1123 err = -EINVAL;
1124 return err;
1125 }
1126 printk(KERN_INFO "mISDN_dsp: DSP clocks every %d samples. This equals "
1127 "%d jiffies.\n", dsp_poll, dsp_tics);
1128
1129 spin_lock_init(&dsp_lock);
1130 INIT_LIST_HEAD(&dsp_ilist);
1131 INIT_LIST_HEAD(&conf_ilist);
1132
1133 /* init conversion tables */
1134 dsp_audio_generate_law_tables();
1135 dsp_silence = (dsp_options&DSP_OPT_ULAW)?0xff:0x2a;
1136 dsp_audio_law_to_s32 = (dsp_options&DSP_OPT_ULAW)?dsp_audio_ulaw_to_s32:
1137 dsp_audio_alaw_to_s32;
1138 dsp_audio_generate_s2law_table();
1139 dsp_audio_generate_seven();
1140 dsp_audio_generate_mix_table();
1141 if (dsp_options & DSP_OPT_ULAW)
1142 dsp_audio_generate_ulaw_samples();
1143 dsp_audio_generate_volume_changes();
1144
1145 err = dsp_pipeline_module_init();
1146 if (err) {
1147 printk(KERN_ERR "mISDN_dsp: Can't initialize pipeline, "
1148 "error(%d)\n", err);
1149 return err;
1150 }
1151
1152 err = mISDN_register_Bprotocol(&DSP);
1153 if (err) {
1154 printk(KERN_ERR "Can't register %s error(%d)\n", DSP.name, err);
1155 return err;
1156 }
1157
1158 /* set sample timer */
1159 dsp_spl_tl.function = (void *)dsp_cmx_send;
1160 dsp_spl_tl.data = 0;
1161 init_timer(&dsp_spl_tl);
1162 dsp_spl_tl.expires = jiffies + dsp_tics;
1163 dsp_spl_jiffies = dsp_spl_tl.expires;
1164 add_timer(&dsp_spl_tl);
1165
1166 return 0;
1167}
1168
1169
1170static void dsp_cleanup(void)
1171{
1172 mISDN_unregister_Bprotocol(&DSP);
1173
1174 if (timer_pending(&dsp_spl_tl))
1175 del_timer(&dsp_spl_tl);
1176
1177 if (!list_empty(&dsp_ilist)) {
1178 printk(KERN_ERR "mISDN_dsp: Audio DSP object inst list not "
1179 "empty.\n");
1180 }
1181 if (!list_empty(&conf_ilist)) {
1182 printk(KERN_ERR "mISDN_dsp: Conference list not empty. Not "
1183 "all memory freed.\n");
1184 }
1185
1186 dsp_pipeline_module_exit();
1187}
1188
1189module_init(dsp_init);
1190module_exit(dsp_cleanup);
1191
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
new file mode 100644
index 000000000000..efc371c1f0dc
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -0,0 +1,303 @@
1/*
2 * DTMF decoder.
3 *
4 * Copyright by Andreas Eversberg (jolly@eversberg.eu)
5 * based on different decoders such as ISDN4Linux
6 *
7 * This software may be used and distributed according to the terms
8 * of the GNU General Public License, incorporated herein by reference.
9 *
10 */
11
12#include <linux/mISDNif.h>
13#include <linux/mISDNdsp.h>
14#include "core.h"
15#include "dsp.h"
16
17#define NCOEFF 8 /* number of frequencies to be analyzed */
18
19/* For DTMF recognition:
20 * 2 * cos(2 * PI * k / N) precalculated for all k
21 */
22static u64 cos2pik[NCOEFF] =
23{
24 /* k << 15 (source: hfc-4s/8s documentation (www.colognechip.de)) */
25 55960, 53912, 51402, 48438, 38146, 32650, 26170, 18630
26};
27
28/* digit matrix */
29static char dtmf_matrix[4][4] =
30{
31 {'1', '2', '3', 'A'},
32 {'4', '5', '6', 'B'},
33 {'7', '8', '9', 'C'},
34 {'*', '0', '#', 'D'}
35};
36
37/* dtmf detection using goertzel algorithm
38 * init function
39 */
40void dsp_dtmf_goertzel_init(struct dsp *dsp)
41{
42 dsp->dtmf.size = 0;
43 dsp->dtmf.lastwhat = '\0';
44 dsp->dtmf.lastdigit = '\0';
45 dsp->dtmf.count = 0;
46}
47
48/* check for hardware or software features
49 */
50void dsp_dtmf_hardware(struct dsp *dsp)
51{
52 int hardware = 1;
53
54 if (!dsp->features.hfc_dtmf)
55 hardware = 0;
56
57 /* check for volume change */
58 if (dsp->tx_volume) {
59 if (dsp_debug & DEBUG_DSP_DTMF)
60 printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
61 "because tx_volume is changed\n",
62 __func__, dsp->name);
63 hardware = 0;
64 }
65 if (dsp->rx_volume) {
66 if (dsp_debug & DEBUG_DSP_DTMF)
67 printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
68 "because rx_volume is changed\n",
69 __func__, dsp->name);
70 hardware = 0;
71 }
72 /* check if encryption is enabled */
73 if (dsp->bf_enable) {
74 if (dsp_debug & DEBUG_DSP_DTMF)
75 printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
76 "because encryption is enabled\n",
77 __func__, dsp->name);
78 hardware = 0;
79 }
80 /* check if pipeline exists */
81 if (dsp->pipeline.inuse) {
82 if (dsp_debug & DEBUG_DSP_DTMF)
83 printk(KERN_DEBUG "%s dsp %s cannot do hardware DTMF, "
84 "because pipeline exists.\n",
85 __func__, dsp->name);
86 hardware = 0;
87 }
88
89 dsp->dtmf.hardware = hardware;
90 dsp->dtmf.software = !hardware;
91}
92
93
94/*************************************************************
95 * calculate the coefficients of the given sample and decode *
96 *************************************************************/
97
98/* the given sample is decoded. if the sample is not long enough for a
99 * complete frame, the decoding is finished and continued with the next
100 * call of this function.
101 *
102 * the algorithm is very good for detection with a minimum of errors. i
103 * tested it allot. it even works with very short tones (40ms). the only
104 * disadvantage is, that it doesn't work good with different volumes of both
105 * tones. this will happen, if accoustically coupled dialers are used.
106 * it sometimes detects tones during speach, which is normal for decoders.
107 * use sequences to given commands during calls.
108 *
109 * dtmf - points to a structure of the current dtmf state
110 * spl and len - the sample
111 * fmt - 0 = alaw, 1 = ulaw, 2 = coefficients from HFC DTMF hw-decoder
112 */
113
114u8
115*dsp_dtmf_goertzel_decode(struct dsp *dsp, u8 *data, int len, int fmt)
116{
117 u8 what;
118 int size;
119 signed short *buf;
120 s32 sk, sk1, sk2;
121 int k, n, i;
122 s32 *hfccoeff;
123 s32 result[NCOEFF], tresh, treshl;
124 int lowgroup, highgroup;
125 s64 cos2pik_;
126
127 dsp->dtmf.digits[0] = '\0';
128
129 /* Note: The function will loop until the buffer has not enough samples
130 * left to decode a full frame.
131 */
132again:
133 /* convert samples */
134 size = dsp->dtmf.size;
135 buf = dsp->dtmf.buffer;
136 switch (fmt) {
137 case 0: /* alaw */
138 case 1: /* ulaw */
139 while (size < DSP_DTMF_NPOINTS && len) {
140 buf[size++] = dsp_audio_law_to_s32[*data++];
141 len--;
142 }
143 break;
144
145 case 2: /* HFC coefficients */
146 default:
147 if (len < 64) {
148 if (len > 0)
149 printk(KERN_ERR "%s: coefficients have invalid "
150 "size. (is=%d < must=%d)\n",
151 __func__, len, 64);
152 return dsp->dtmf.digits;
153 }
154 hfccoeff = (s32 *)data;
155 for (k = 0; k < NCOEFF; k++) {
156 sk2 = (*hfccoeff++)>>4;
157 sk = (*hfccoeff++)>>4;
158 if (sk > 32767 || sk < -32767 || sk2 > 32767
159 || sk2 < -32767)
160 printk(KERN_WARNING
161 "DTMF-Detection overflow\n");
162 /* compute |X(k)|**2 */
163 result[k] =
164 (sk * sk) -
165 (((cos2pik[k] * sk) >> 15) * sk2) +
166 (sk2 * sk2);
167 }
168 data += 64;
169 len -= 64;
170 goto coefficients;
171 break;
172 }
173 dsp->dtmf.size = size;
174
175 if (size < DSP_DTMF_NPOINTS)
176 return dsp->dtmf.digits;
177
178 dsp->dtmf.size = 0;
179
180 /* now we have a full buffer of signed long samples - we do goertzel */
181 for (k = 0; k < NCOEFF; k++) {
182 sk = 0;
183 sk1 = 0;
184 sk2 = 0;
185 buf = dsp->dtmf.buffer;
186 cos2pik_ = cos2pik[k];
187 for (n = 0; n < DSP_DTMF_NPOINTS; n++) {
188 sk = ((cos2pik_*sk1)>>15) - sk2 + (*buf++);
189 sk2 = sk1;
190 sk1 = sk;
191 }
192 sk >>= 8;
193 sk2 >>= 8;
194 if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767)
195 printk(KERN_WARNING "DTMF-Detection overflow\n");
196 /* compute |X(k)|**2 */
197 result[k] =
198 (sk * sk) -
199 (((cos2pik[k] * sk) >> 15) * sk2) +
200 (sk2 * sk2);
201 }
202
203 /* our (squared) coefficients have been calculated, we need to process
204 * them.
205 */
206coefficients:
207 tresh = 0;
208 for (i = 0; i < NCOEFF; i++) {
209 if (result[i] < 0)
210 result[i] = 0;
211 if (result[i] > dsp->dtmf.treshold) {
212 if (result[i] > tresh)
213 tresh = result[i];
214 }
215 }
216
217 if (tresh == 0) {
218 what = 0;
219 goto storedigit;
220 }
221
222 if (dsp_debug & DEBUG_DSP_DTMFCOEFF)
223 printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
224 " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
225 result[0]/10000, result[1]/10000, result[2]/10000,
226 result[3]/10000, result[4]/10000, result[5]/10000,
227 result[6]/10000, result[7]/10000, tresh/10000,
228 result[0]/(tresh/100), result[1]/(tresh/100),
229 result[2]/(tresh/100), result[3]/(tresh/100),
230 result[4]/(tresh/100), result[5]/(tresh/100),
231 result[6]/(tresh/100), result[7]/(tresh/100));
232
233 /* calc digit (lowgroup/highgroup) */
234 lowgroup = -1;
235 highgroup = -1;
236 treshl = tresh >> 3; /* tones which are not on, must be below 9 dB */
237 tresh = tresh >> 2; /* touchtones must match within 6 dB */
238 for (i = 0; i < NCOEFF; i++) {
239 if (result[i] < treshl)
240 continue; /* ignore */
241 if (result[i] < tresh) {
242 lowgroup = -1;
243 highgroup = -1;
244 break; /* noise inbetween */
245 }
246 /* good level found. This is allowed only one time per group */
247 if (i < NCOEFF/2) {
248 /* lowgroup */
249 if (lowgroup >= 0) {
250 /* Bad. Another tone found. */
251 lowgroup = -1;
252 break;
253 } else
254 lowgroup = i;
255 } else {
256 /* higroup */
257 if (highgroup >= 0) {
258 /* Bad. Another tone found. */
259 highgroup = -1;
260 break;
261 } else
262 highgroup = i-(NCOEFF/2);
263 }
264 }
265
266 /* get digit or null */
267 what = 0;
268 if (lowgroup >= 0 && highgroup >= 0)
269 what = dtmf_matrix[lowgroup][highgroup];
270
271storedigit:
272 if (what && (dsp_debug & DEBUG_DSP_DTMF))
273 printk(KERN_DEBUG "DTMF what: %c\n", what);
274
275 if (dsp->dtmf.lastwhat != what)
276 dsp->dtmf.count = 0;
277
278 /* the tone (or no tone) must remain 3 times without change */
279 if (dsp->dtmf.count == 2) {
280 if (dsp->dtmf.lastdigit != what) {
281 dsp->dtmf.lastdigit = what;
282 if (what) {
283 if (dsp_debug & DEBUG_DSP_DTMF)
284 printk(KERN_DEBUG "DTMF digit: %c\n",
285 what);
286 if ((strlen(dsp->dtmf.digits)+1)
287 < sizeof(dsp->dtmf.digits)) {
288 dsp->dtmf.digits[strlen(
289 dsp->dtmf.digits)+1] = '\0';
290 dsp->dtmf.digits[strlen(
291 dsp->dtmf.digits)] = what;
292 }
293 }
294 }
295 } else
296 dsp->dtmf.count++;
297
298 dsp->dtmf.lastwhat = what;
299
300 goto again;
301}
302
303
diff --git a/drivers/isdn/mISDN/dsp_ecdis.h b/drivers/isdn/mISDN/dsp_ecdis.h
new file mode 100644
index 000000000000..8a20af43308b
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_ecdis.h
@@ -0,0 +1,110 @@
1/*
2 * SpanDSP - a series of DSP components for telephony
3 *
4 * ec_disable_detector.h - A detector which should eventually meet the
5 * G.164/G.165 requirements for detecting the
6 * 2100Hz echo cancellor disable tone.
7 *
8 * Written by Steve Underwood <steveu@coppice.org>
9 *
10 * Copyright (C) 2001 Steve Underwood
11 *
12 * All rights reserved.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 */
29
30#include "dsp_biquad.h"
31
32struct ec_disable_detector_state {
33 struct biquad2_state notch;
34 int notch_level;
35 int channel_level;
36 int tone_present;
37 int tone_cycle_duration;
38 int good_cycles;
39 int hit;
40};
41
42
43#define FALSE 0
44#define TRUE (!FALSE)
45
46static inline void
47echo_can_disable_detector_init(struct ec_disable_detector_state *det)
48{
49 /* Elliptic notch */
50 /* This is actually centred at 2095Hz, but gets the balance we want, due
51 to the asymmetric walls of the notch */
52 biquad2_init(&det->notch,
53 (int32_t) (-0.7600000*32768.0),
54 (int32_t) (-0.1183852*32768.0),
55 (int32_t) (-0.5104039*32768.0),
56 (int32_t) (0.1567596*32768.0),
57 (int32_t) (1.0000000*32768.0));
58
59 det->channel_level = 0;
60 det->notch_level = 0;
61 det->tone_present = FALSE;
62 det->tone_cycle_duration = 0;
63 det->good_cycles = 0;
64 det->hit = 0;
65}
66/*- End of function --------------------------------------------------------*/
67
68static inline int
69echo_can_disable_detector_update(struct ec_disable_detector_state *det,
70int16_t amp)
71{
72 int16_t notched;
73
74 notched = biquad2(&det->notch, amp);
75 /* Estimate the overall energy in the channel, and the energy in
76 the notch (i.e. overall channel energy - tone energy => noise).
77 Use abs instead of multiply for speed (is it really faster?).
78 Damp the overall energy a little more for a stable result.
79 Damp the notch energy a little less, so we don't damp out the
80 blip every time the phase reverses */
81 det->channel_level += ((abs(amp) - det->channel_level) >> 5);
82 det->notch_level += ((abs(notched) - det->notch_level) >> 4);
83 if (det->channel_level > 280) {
84 /* There is adequate energy in the channel.
85 Is it mostly at 2100Hz? */
86 if (det->notch_level*6 < det->channel_level) {
87 /* The notch says yes, so we have the tone. */
88 if (!det->tone_present) {
89 /* Do we get a kick every 450+-25ms? */
90 if (det->tone_cycle_duration >= 425*8
91 && det->tone_cycle_duration <= 475*8) {
92 det->good_cycles++;
93 if (det->good_cycles > 2)
94 det->hit = TRUE;
95 }
96 det->tone_cycle_duration = 0;
97 }
98 det->tone_present = TRUE;
99 } else
100 det->tone_present = FALSE;
101 det->tone_cycle_duration++;
102 } else {
103 det->tone_present = FALSE;
104 det->tone_cycle_duration = 0;
105 det->good_cycles = 0;
106 }
107 return det->hit;
108}
109/*- End of function --------------------------------------------------------*/
110/*- End of file ------------------------------------------------------------*/
diff --git a/drivers/isdn/mISDN/dsp_hwec.c b/drivers/isdn/mISDN/dsp_hwec.c
new file mode 100644
index 000000000000..eb892d9dd5c6
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_hwec.c
@@ -0,0 +1,138 @@
1/*
2 * dsp_hwec.c:
3 * builtin mISDN dsp pipeline element for enabling the hw echocanceller
4 *
5 * Copyright (C) 2007, Nadi Sarrar
6 *
7 * Nadi Sarrar <nadi@beronet.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * The full GNU General Public License is included in this distribution in the
24 * file called LICENSE.
25 *
26 */
27
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/mISDNdsp.h>
31#include <linux/mISDNif.h>
32#include "core.h"
33#include "dsp.h"
34#include "dsp_hwec.h"
35
36static struct mISDN_dsp_element_arg args[] = {
37 { "deftaps", "128", "Set the number of taps of cancellation." },
38};
39
40static struct mISDN_dsp_element dsp_hwec_p = {
41 .name = "hwec",
42 .new = NULL,
43 .free = NULL,
44 .process_tx = NULL,
45 .process_rx = NULL,
46 .num_args = sizeof(args) / sizeof(struct mISDN_dsp_element_arg),
47 .args = args,
48};
49struct mISDN_dsp_element *dsp_hwec = &dsp_hwec_p;
50
51void dsp_hwec_enable(struct dsp *dsp, const char *arg)
52{
53 int deftaps = 128,
54 len;
55 struct mISDN_ctrl_req cq;
56
57 if (!dsp) {
58 printk(KERN_ERR "%s: failed to enable hwec: dsp is NULL\n",
59 __func__);
60 return;
61 }
62
63 if (!arg)
64 goto _do;
65
66 len = strlen(arg);
67 if (!len)
68 goto _do;
69
70 {
71 char _dup[len + 1];
72 char *dup, *tok, *name, *val;
73 int tmp;
74
75 strcpy(_dup, arg);
76 dup = _dup;
77
78 while ((tok = strsep(&dup, ","))) {
79 if (!strlen(tok))
80 continue;
81 name = strsep(&tok, "=");
82 val = tok;
83
84 if (!val)
85 continue;
86
87 if (!strcmp(name, "deftaps")) {
88 if (sscanf(val, "%d", &tmp) == 1)
89 deftaps = tmp;
90 }
91 }
92 }
93
94_do:
95 printk(KERN_DEBUG "%s: enabling hwec with deftaps=%d\n",
96 __func__, deftaps);
97 memset(&cq, 0, sizeof(cq));
98 cq.op = MISDN_CTRL_HFC_ECHOCAN_ON;
99 cq.p1 = deftaps;
100 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
101 printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
102 __func__);
103 return;
104 }
105}
106
107void dsp_hwec_disable(struct dsp *dsp)
108{
109 struct mISDN_ctrl_req cq;
110
111 if (!dsp) {
112 printk(KERN_ERR "%s: failed to disable hwec: dsp is NULL\n",
113 __func__);
114 return;
115 }
116
117 printk(KERN_DEBUG "%s: disabling hwec\n", __func__);
118 memset(&cq, 0, sizeof(cq));
119 cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF;
120 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
121 printk(KERN_DEBUG "%s: CONTROL_CHANNEL failed\n",
122 __func__);
123 return;
124 }
125}
126
127int dsp_hwec_init(void)
128{
129 mISDN_dsp_element_register(dsp_hwec);
130
131 return 0;
132}
133
134void dsp_hwec_exit(void)
135{
136 mISDN_dsp_element_unregister(dsp_hwec);
137}
138
diff --git a/drivers/isdn/mISDN/dsp_hwec.h b/drivers/isdn/mISDN/dsp_hwec.h
new file mode 100644
index 000000000000..eebe80c3f713
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_hwec.h
@@ -0,0 +1,10 @@
1/*
2 * dsp_hwec.h
3 */
4
5extern struct mISDN_dsp_element *dsp_hwec;
6extern void dsp_hwec_enable(struct dsp *dsp, const char *arg);
7extern void dsp_hwec_disable(struct dsp *dsp);
8extern int dsp_hwec_init(void);
9extern void dsp_hwec_exit(void);
10
diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c
new file mode 100644
index 000000000000..850260ab57d0
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_pipeline.c
@@ -0,0 +1,348 @@
1/*
2 * dsp_pipeline.c: pipelined audio processing
3 *
4 * Copyright (C) 2007, Nadi Sarrar
5 *
6 * Nadi Sarrar <nadi@beronet.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called LICENSE.
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/string.h>
30#include <linux/mISDNif.h>
31#include <linux/mISDNdsp.h>
32#include "dsp.h"
33#include "dsp_hwec.h"
34
35/* uncomment for debugging */
36/*#define PIPELINE_DEBUG*/
37
38struct dsp_pipeline_entry {
39 struct mISDN_dsp_element *elem;
40 void *p;
41 struct list_head list;
42};
43struct dsp_element_entry {
44 struct mISDN_dsp_element *elem;
45 struct device dev;
46 struct list_head list;
47};
48
49static LIST_HEAD(dsp_elements);
50
51/* sysfs */
52static struct class *elements_class;
53
54static ssize_t
55attr_show_args(struct device *dev, struct device_attribute *attr, char *buf)
56{
57 struct mISDN_dsp_element *elem = dev_get_drvdata(dev);
58 ssize_t len = 0;
59 int i = 0;
60
61 *buf = 0;
62 for (; i < elem->num_args; ++i)
63 len = sprintf(buf, "%sName: %s\n%s%s%sDescription: %s\n"
64 "\n", buf,
65 elem->args[i].name,
66 elem->args[i].def ? "Default: " : "",
67 elem->args[i].def ? elem->args[i].def : "",
68 elem->args[i].def ? "\n" : "",
69 elem->args[i].desc);
70
71 return len;
72}
73
74static struct device_attribute element_attributes[] = {
75 __ATTR(args, 0444, attr_show_args, NULL),
76};
77
78int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
79{
80 struct dsp_element_entry *entry;
81 int ret, i;
82
83 if (!elem)
84 return -EINVAL;
85
86 entry = kzalloc(sizeof(struct dsp_element_entry), GFP_KERNEL);
87 if (!entry)
88 return -ENOMEM;
89
90 entry->elem = elem;
91
92 entry->dev.class = elements_class;
93 dev_set_drvdata(&entry->dev, elem);
94 snprintf(entry->dev.bus_id, BUS_ID_SIZE, elem->name);
95 ret = device_register(&entry->dev);
96 if (ret) {
97 printk(KERN_ERR "%s: failed to register %s\n",
98 __func__, elem->name);
99 goto err1;
100 }
101
102 for (i = 0; i < (sizeof(element_attributes)
103 / sizeof(struct device_attribute)); ++i)
104 ret = device_create_file(&entry->dev,
105 &element_attributes[i]);
106 if (ret) {
107 printk(KERN_ERR "%s: failed to create device file\n",
108 __func__);
109 goto err2;
110 }
111
112 list_add_tail(&entry->list, &dsp_elements);
113
114 printk(KERN_DEBUG "%s: %s registered\n", __func__, elem->name);
115
116 return 0;
117
118err2:
119 device_unregister(&entry->dev);
120err1:
121 kfree(entry);
122 return ret;
123}
124EXPORT_SYMBOL(mISDN_dsp_element_register);
125
126void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem)
127{
128 struct dsp_element_entry *entry, *n;
129
130 if (!elem)
131 return;
132
133 list_for_each_entry_safe(entry, n, &dsp_elements, list)
134 if (entry->elem == elem) {
135 list_del(&entry->list);
136 device_unregister(&entry->dev);
137 kfree(entry);
138 printk(KERN_DEBUG "%s: %s unregistered\n",
139 __func__, elem->name);
140 return;
141 }
142 printk(KERN_ERR "%s: element %s not in list.\n", __func__, elem->name);
143}
144EXPORT_SYMBOL(mISDN_dsp_element_unregister);
145
146int dsp_pipeline_module_init(void)
147{
148 elements_class = class_create(THIS_MODULE, "dsp_pipeline");
149 if (IS_ERR(elements_class))
150 return PTR_ERR(elements_class);
151
152#ifdef PIPELINE_DEBUG
153 printk(KERN_DEBUG "%s: dsp pipeline module initialized\n", __func__);
154#endif
155
156 dsp_hwec_init();
157
158 return 0;
159}
160
161void dsp_pipeline_module_exit(void)
162{
163 struct dsp_element_entry *entry, *n;
164
165 dsp_hwec_exit();
166
167 class_destroy(elements_class);
168
169 list_for_each_entry_safe(entry, n, &dsp_elements, list) {
170 list_del(&entry->list);
171 printk(KERN_WARNING "%s: element was still registered: %s\n",
172 __func__, entry->elem->name);
173 kfree(entry);
174 }
175
176 printk(KERN_DEBUG "%s: dsp pipeline module exited\n", __func__);
177}
178
179int dsp_pipeline_init(struct dsp_pipeline *pipeline)
180{
181 if (!pipeline)
182 return -EINVAL;
183
184 INIT_LIST_HEAD(&pipeline->list);
185
186#ifdef PIPELINE_DEBUG
187 printk(KERN_DEBUG "%s: dsp pipeline ready\n", __func__);
188#endif
189
190 return 0;
191}
192
193static inline void _dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
194{
195 struct dsp_pipeline_entry *entry, *n;
196
197 list_for_each_entry_safe(entry, n, &pipeline->list, list) {
198 list_del(&entry->list);
199 if (entry->elem == dsp_hwec)
200 dsp_hwec_disable(container_of(pipeline, struct dsp,
201 pipeline));
202 else
203 entry->elem->free(entry->p);
204 kfree(entry);
205 }
206}
207
208void dsp_pipeline_destroy(struct dsp_pipeline *pipeline)
209{
210
211 if (!pipeline)
212 return;
213
214 _dsp_pipeline_destroy(pipeline);
215
216#ifdef PIPELINE_DEBUG
217 printk(KERN_DEBUG "%s: dsp pipeline destroyed\n", __func__);
218#endif
219}
220
221int dsp_pipeline_build(struct dsp_pipeline *pipeline, const char *cfg)
222{
223 int len, incomplete = 0, found = 0;
224 char *dup, *tok, *name, *args;
225 struct dsp_element_entry *entry, *n;
226 struct dsp_pipeline_entry *pipeline_entry;
227 struct mISDN_dsp_element *elem;
228
229 if (!pipeline)
230 return -EINVAL;
231
232 if (!list_empty(&pipeline->list))
233 _dsp_pipeline_destroy(pipeline);
234
235 if (!cfg)
236 return 0;
237
238 len = strlen(cfg);
239 if (!len)
240 return 0;
241
242 dup = kmalloc(len + 1, GFP_KERNEL);
243 if (!dup)
244 return 0;
245 strcpy(dup, cfg);
246 while ((tok = strsep(&dup, "|"))) {
247 if (!strlen(tok))
248 continue;
249 name = strsep(&tok, "(");
250 args = strsep(&tok, ")");
251 if (args && !*args)
252 args = 0;
253
254 list_for_each_entry_safe(entry, n, &dsp_elements, list)
255 if (!strcmp(entry->elem->name, name)) {
256 elem = entry->elem;
257
258 pipeline_entry = kmalloc(sizeof(struct
259 dsp_pipeline_entry), GFP_KERNEL);
260 if (!pipeline_entry) {
261 printk(KERN_DEBUG "%s: failed to add "
262 "entry to pipeline: %s (out of "
263 "memory)\n", __func__, elem->name);
264 incomplete = 1;
265 goto _out;
266 }
267 pipeline_entry->elem = elem;
268
269 if (elem == dsp_hwec) {
270 /* This is a hack to make the hwec
271 available as a pipeline module */
272 dsp_hwec_enable(container_of(pipeline,
273 struct dsp, pipeline), args);
274 list_add_tail(&pipeline_entry->list,
275 &pipeline->list);
276 } else {
277 pipeline_entry->p = elem->new(args);
278 if (pipeline_entry->p) {
279 list_add_tail(&pipeline_entry->
280 list, &pipeline->list);
281#ifdef PIPELINE_DEBUG
282 printk(KERN_DEBUG "%s: created "
283 "instance of %s%s%s\n",
284 __func__, name, args ?
285 " with args " : "", args ?
286 args : "");
287#endif
288 } else {
289 printk(KERN_DEBUG "%s: failed "
290 "to add entry to pipeline: "
291 "%s (new() returned NULL)\n",
292 __func__, elem->name);
293 kfree(pipeline_entry);
294 incomplete = 1;
295 }
296 }
297 found = 1;
298 break;
299 }
300
301 if (found)
302 found = 0;
303 else {
304 printk(KERN_DEBUG "%s: element not found, skipping: "
305 "%s\n", __func__, name);
306 incomplete = 1;
307 }
308 }
309
310_out:
311 if (!list_empty(&pipeline->list))
312 pipeline->inuse = 1;
313 else
314 pipeline->inuse = 0;
315
316#ifdef PIPELINE_DEBUG
317 printk(KERN_DEBUG "%s: dsp pipeline built%s: %s\n",
318 __func__, incomplete ? " incomplete" : "", cfg);
319#endif
320 kfree(dup);
321 return 0;
322}
323
324void dsp_pipeline_process_tx(struct dsp_pipeline *pipeline, u8 *data, int len)
325{
326 struct dsp_pipeline_entry *entry;
327
328 if (!pipeline)
329 return;
330
331 list_for_each_entry(entry, &pipeline->list, list)
332 if (entry->elem->process_tx)
333 entry->elem->process_tx(entry->p, data, len);
334}
335
336void dsp_pipeline_process_rx(struct dsp_pipeline *pipeline, u8 *data, int len)
337{
338 struct dsp_pipeline_entry *entry;
339
340 if (!pipeline)
341 return;
342
343 list_for_each_entry_reverse(entry, &pipeline->list, list)
344 if (entry->elem->process_rx)
345 entry->elem->process_rx(entry->p, data, len);
346}
347
348
diff --git a/drivers/isdn/mISDN/dsp_tones.c b/drivers/isdn/mISDN/dsp_tones.c
new file mode 100644
index 000000000000..23dd0dd21524
--- /dev/null
+++ b/drivers/isdn/mISDN/dsp_tones.c
@@ -0,0 +1,551 @@
1/*
2 * Audio support data for ISDN4Linux.
3 *
4 * Copyright Andreas Eversberg (jolly@eversberg.eu)
5 *
6 * This software may be used and distributed according to the terms
7 * of the GNU General Public License, incorporated herein by reference.
8 *
9 */
10
11#include <linux/mISDNif.h>
12#include <linux/mISDNdsp.h>
13#include "core.h"
14#include "dsp.h"
15
16
17#define DATA_S sample_silence
18#define SIZE_S (&sizeof_silence)
19#define DATA_GA sample_german_all
20#define SIZE_GA (&sizeof_german_all)
21#define DATA_GO sample_german_old
22#define SIZE_GO (&sizeof_german_old)
23#define DATA_DT sample_american_dialtone
24#define SIZE_DT (&sizeof_american_dialtone)
25#define DATA_RI sample_american_ringing
26#define SIZE_RI (&sizeof_american_ringing)
27#define DATA_BU sample_american_busy
28#define SIZE_BU (&sizeof_american_busy)
29#define DATA_S1 sample_special1
30#define SIZE_S1 (&sizeof_special1)
31#define DATA_S2 sample_special2
32#define SIZE_S2 (&sizeof_special2)
33#define DATA_S3 sample_special3
34#define SIZE_S3 (&sizeof_special3)
35
36/***************/
37/* tones loops */
38/***************/
39
40/* all tones are alaw encoded */
41/* the last sample+1 is in phase with the first sample. the error is low */
42
43static u8 sample_german_all[] = {
44 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
45 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
46 0xdc, 0xfc, 0x6c,
47 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
48 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
49 0xdc, 0xfc, 0x6c,
50 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
51 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
52 0xdc, 0xfc, 0x6c,
53 0x80, 0xab, 0x81, 0x6d, 0xfd, 0xdd, 0x5d, 0x9d,
54 0x4d, 0xd1, 0x89, 0x88, 0xd0, 0x4c, 0x9c, 0x5c,
55 0xdc, 0xfc, 0x6c,
56};
57static u32 sizeof_german_all = sizeof(sample_german_all);
58
59static u8 sample_german_old[] = {
60 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
61 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
62 0x8c,
63 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
64 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
65 0x8c,
66 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
67 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
68 0x8c,
69 0xec, 0x68, 0xe1, 0x6d, 0x6d, 0x91, 0x51, 0xed,
70 0x6d, 0x01, 0x1e, 0x10, 0x0c, 0x90, 0x60, 0x70,
71 0x8c,
72};
73static u32 sizeof_german_old = sizeof(sample_german_old);
74
75static u8 sample_american_dialtone[] = {
76 0x2a, 0x18, 0x90, 0x6c, 0x4c, 0xbc, 0x4c, 0x6c,
77 0x10, 0x58, 0x32, 0xb9, 0x31, 0x2d, 0x8d, 0x0d,
78 0x8d, 0x2d, 0x31, 0x99, 0x0f, 0x28, 0x60, 0xf0,
79 0xd0, 0x50, 0xd0, 0x30, 0x60, 0x08, 0x8e, 0x67,
80 0x09, 0x19, 0x21, 0xe1, 0xd9, 0xb9, 0x29, 0x67,
81 0x83, 0x02, 0xce, 0xbe, 0xee, 0x1a, 0x1b, 0xef,
82 0xbf, 0xcf, 0x03, 0x82, 0x66, 0x28, 0xb8, 0xd8,
83 0xe0, 0x20, 0x18, 0x08, 0x66, 0x8f, 0x09, 0x61,
84 0x31, 0xd1, 0x51, 0xd1, 0xf1, 0x61, 0x29, 0x0e,
85 0x98, 0x30, 0x2c, 0x8c, 0x0c, 0x8c, 0x2c, 0x30,
86 0xb8, 0x33, 0x59, 0x11, 0x6d, 0x4d, 0xbd, 0x4d,
87 0x6d, 0x91, 0x19,
88};
89static u32 sizeof_american_dialtone = sizeof(sample_american_dialtone);
90
91static u8 sample_american_ringing[] = {
92 0x2a, 0xe0, 0xac, 0x0c, 0xbc, 0x4c, 0x8c, 0x90,
93 0x48, 0xc7, 0xc1, 0xed, 0xcd, 0x4d, 0xcd, 0xed,
94 0xc1, 0xb7, 0x08, 0x30, 0xec, 0xcc, 0xcc, 0x8c,
95 0x10, 0x58, 0x1a, 0x99, 0x71, 0xed, 0x8d, 0x8d,
96 0x2d, 0x41, 0x89, 0x9e, 0x20, 0x70, 0x2c, 0xec,
97 0x2c, 0x70, 0x20, 0x86, 0x77, 0xe1, 0x31, 0x11,
98 0xd1, 0xf1, 0x81, 0x09, 0xa3, 0x56, 0x58, 0x00,
99 0x40, 0xc0, 0x60, 0x38, 0x46, 0x43, 0x57, 0x39,
100 0xd9, 0x59, 0x99, 0xc9, 0x77, 0x2f, 0x2e, 0xc6,
101 0xd6, 0x28, 0xd6, 0x36, 0x26, 0x2e, 0x8a, 0xa3,
102 0x43, 0x63, 0x4b, 0x4a, 0x62, 0x42, 0xa2, 0x8b,
103 0x2f, 0x27, 0x37, 0xd7, 0x29, 0xd7, 0xc7, 0x2f,
104 0x2e, 0x76, 0xc8, 0x98, 0x58, 0xd8, 0x38, 0x56,
105 0x42, 0x47, 0x39, 0x61, 0xc1, 0x41, 0x01, 0x59,
106 0x57, 0xa2, 0x08, 0x80, 0xf0, 0xd0, 0x10, 0x30,
107 0xe0, 0x76, 0x87, 0x21, 0x71, 0x2d, 0xed, 0x2d,
108 0x71, 0x21, 0x9f, 0x88, 0x40, 0x2c, 0x8c, 0x8c,
109 0xec, 0x70, 0x98, 0x1b, 0x59, 0x11, 0x8d, 0xcd,
110 0xcd, 0xed, 0x31, 0x09, 0xb6, 0xc0, 0xec, 0xcc,
111 0x4c, 0xcc, 0xec, 0xc0, 0xc6, 0x49, 0x91, 0x8d,
112 0x4d, 0xbd, 0x0d, 0xad, 0xe1,
113};
114static u32 sizeof_american_ringing = sizeof(sample_american_ringing);
115
116static u8 sample_american_busy[] = {
117 0x2a, 0x00, 0x6c, 0x4c, 0x4c, 0x6c, 0xb0, 0x66,
118 0x99, 0x11, 0x6d, 0x8d, 0x2d, 0x41, 0xd7, 0x96,
119 0x60, 0xf0, 0x70, 0x40, 0x58, 0xf6, 0x53, 0x57,
120 0x09, 0x89, 0xd7, 0x5f, 0xe3, 0x2a, 0xe3, 0x5f,
121 0xd7, 0x89, 0x09, 0x57, 0x53, 0xf6, 0x58, 0x40,
122 0x70, 0xf0, 0x60, 0x96, 0xd7, 0x41, 0x2d, 0x8d,
123 0x6d, 0x11, 0x99, 0x66, 0xb0, 0x6c, 0x4c, 0x4c,
124 0x6c, 0x00, 0x2a, 0x01, 0x6d, 0x4d, 0x4d, 0x6d,
125 0xb1, 0x67, 0x98, 0x10, 0x6c, 0x8c, 0x2c, 0x40,
126 0xd6, 0x97, 0x61, 0xf1, 0x71, 0x41, 0x59, 0xf7,
127 0x52, 0x56, 0x08, 0x88, 0xd6, 0x5e, 0xe2, 0x2a,
128 0xe2, 0x5e, 0xd6, 0x88, 0x08, 0x56, 0x52, 0xf7,
129 0x59, 0x41, 0x71, 0xf1, 0x61, 0x97, 0xd6, 0x40,
130 0x2c, 0x8c, 0x6c, 0x10, 0x98, 0x67, 0xb1, 0x6d,
131 0x4d, 0x4d, 0x6d, 0x01,
132};
133static u32 sizeof_american_busy = sizeof(sample_american_busy);
134
135static u8 sample_special1[] = {
136 0x2a, 0x2c, 0xbc, 0x6c, 0xd6, 0x71, 0xbd, 0x0d,
137 0xd9, 0x80, 0xcc, 0x4c, 0x40, 0x39, 0x0d, 0xbd,
138 0x11, 0x86, 0xec, 0xbc, 0xec, 0x0e, 0x51, 0xbd,
139 0x8d, 0x89, 0x30, 0x4c, 0xcc, 0xe0, 0xe1, 0xcd,
140 0x4d, 0x31, 0x88, 0x8c, 0xbc, 0x50, 0x0f, 0xed,
141 0xbd, 0xed, 0x87, 0x10, 0xbc, 0x0c, 0x38, 0x41,
142 0x4d, 0xcd, 0x81, 0xd8, 0x0c, 0xbc, 0x70, 0xd7,
143 0x6d, 0xbd, 0x2d,
144};
145static u32 sizeof_special1 = sizeof(sample_special1);
146
147static u8 sample_special2[] = {
148 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
149 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
150 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
151 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
152 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
153 0x2a, 0xcc, 0x8c, 0xd7, 0x4d, 0x2d, 0x18, 0xbc,
154 0x10, 0xc1, 0xbd, 0xc1, 0x10, 0xbc, 0x18, 0x2d,
155 0x4d, 0xd7, 0x8c, 0xcc, 0x2a, 0xcd, 0x8d, 0xd6,
156 0x4c, 0x2c, 0x19, 0xbd, 0x11, 0xc0, 0xbc, 0xc0,
157 0x11, 0xbd, 0x19, 0x2c, 0x4c, 0xd6, 0x8d, 0xcd,
158};
159static u32 sizeof_special2 = sizeof(sample_special2);
160
161static u8 sample_special3[] = {
162 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
163 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
164 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
165 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
166 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
167 0x2a, 0xbc, 0x18, 0xcd, 0x11, 0x2c, 0x8c, 0xc1,
168 0x4d, 0xd6, 0xbc, 0xd6, 0x4d, 0xc1, 0x8c, 0x2c,
169 0x11, 0xcd, 0x18, 0xbc, 0x2a, 0xbd, 0x19, 0xcc,
170 0x10, 0x2d, 0x8d, 0xc0, 0x4c, 0xd7, 0xbd, 0xd7,
171 0x4c, 0xc0, 0x8d, 0x2d, 0x10, 0xcc, 0x19, 0xbd,
172};
173static u32 sizeof_special3 = sizeof(sample_special3);
174
175static u8 sample_silence[] = {
176 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
177 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
178 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
179 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
180 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
181 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
182 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
183 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
184 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
185 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
186 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
187 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a,
188};
189static u32 sizeof_silence = sizeof(sample_silence);
190
191struct tones_samples {
192 u32 *len;
193 u8 *data;
194};
195static struct
196tones_samples samples[] = {
197 {&sizeof_german_all, sample_german_all},
198 {&sizeof_german_old, sample_german_old},
199 {&sizeof_american_dialtone, sample_american_dialtone},
200 {&sizeof_american_ringing, sample_american_ringing},
201 {&sizeof_american_busy, sample_american_busy},
202 {&sizeof_special1, sample_special1},
203 {&sizeof_special2, sample_special2},
204 {&sizeof_special3, sample_special3},
205 {NULL, NULL},
206};
207
208/***********************************
209 * generate ulaw from alaw samples *
210 ***********************************/
211
212void
213dsp_audio_generate_ulaw_samples(void)
214{
215 int i, j;
216
217 i = 0;
218 while (samples[i].len) {
219 j = 0;
220 while (j < (*samples[i].len)) {
221 samples[i].data[j] =
222 dsp_audio_alaw_to_ulaw[samples[i].data[j]];
223 j++;
224 }
225 i++;
226 }
227}
228
229
230/****************************
231 * tone sequence definition *
232 ****************************/
233
234struct pattern {
235 int tone;
236 u8 *data[10];
237 u32 *siz[10];
238 u32 seq[10];
239} pattern[] = {
240 {TONE_GERMAN_DIALTONE,
241 {DATA_GA, 0, 0, 0, 0, 0, 0, 0, 0, 0},
242 {SIZE_GA, 0, 0, 0, 0, 0, 0, 0, 0, 0},
243 {1900, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
244
245 {TONE_GERMAN_OLDDIALTONE,
246 {DATA_GO, 0, 0, 0, 0, 0, 0, 0, 0, 0},
247 {SIZE_GO, 0, 0, 0, 0, 0, 0, 0, 0, 0},
248 {1998, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
249
250 {TONE_AMERICAN_DIALTONE,
251 {DATA_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
252 {SIZE_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
253 {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
254
255 {TONE_GERMAN_DIALPBX,
256 {DATA_GA, DATA_S, DATA_GA, DATA_S, DATA_GA, DATA_S, 0, 0, 0, 0},
257 {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, 0, 0, 0, 0},
258 {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
259
260 {TONE_GERMAN_OLDDIALPBX,
261 {DATA_GO, DATA_S, DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0},
262 {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0},
263 {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
264
265 {TONE_AMERICAN_DIALPBX,
266 {DATA_DT, DATA_S, DATA_DT, DATA_S, DATA_DT, DATA_S, 0, 0, 0, 0},
267 {SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, SIZE_DT, SIZE_S, 0, 0, 0, 0},
268 {2000, 2000, 2000, 2000, 2000, 12000, 0, 0, 0, 0} },
269
270 {TONE_GERMAN_RINGING,
271 {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
272 {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
273 {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
274
275 {TONE_GERMAN_OLDRINGING,
276 {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
277 {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
278 {8000, 40000, 0, 0, 0, 0, 0, 0, 0, 0} },
279
280 {TONE_AMERICAN_RINGING,
281 {DATA_RI, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
282 {SIZE_RI, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
283 {8000, 32000, 0, 0, 0, 0, 0, 0, 0, 0} },
284
285 {TONE_GERMAN_RINGPBX,
286 {DATA_GA, DATA_S, DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0},
287 {SIZE_GA, SIZE_S, SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0},
288 {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
289
290 {TONE_GERMAN_OLDRINGPBX,
291 {DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0},
292 {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0},
293 {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
294
295 {TONE_AMERICAN_RINGPBX,
296 {DATA_RI, DATA_S, DATA_RI, DATA_S, 0, 0, 0, 0, 0, 0},
297 {SIZE_RI, SIZE_S, SIZE_RI, SIZE_S, 0, 0, 0, 0, 0, 0},
298 {4000, 4000, 4000, 28000, 0, 0, 0, 0, 0, 0} },
299
300 {TONE_GERMAN_BUSY,
301 {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
302 {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
303 {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
304
305 {TONE_GERMAN_OLDBUSY,
306 {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
307 {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
308 {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
309
310 {TONE_AMERICAN_BUSY,
311 {DATA_BU, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
312 {SIZE_BU, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
313 {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
314
315 {TONE_GERMAN_HANGUP,
316 {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
317 {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
318 {4000, 4000, 0, 0, 0, 0, 0, 0, 0, 0} },
319
320 {TONE_GERMAN_OLDHANGUP,
321 {DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
322 {SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
323 {1000, 5000, 0, 0, 0, 0, 0, 0, 0, 0} },
324
325 {TONE_AMERICAN_HANGUP,
326 {DATA_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
327 {SIZE_DT, 0, 0, 0, 0, 0, 0, 0, 0, 0},
328 {8000, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
329
330 {TONE_SPECIAL_INFO,
331 {DATA_S1, DATA_S2, DATA_S3, DATA_S, 0, 0, 0, 0, 0, 0},
332 {SIZE_S1, SIZE_S2, SIZE_S3, SIZE_S, 0, 0, 0, 0, 0, 0},
333 {2666, 2666, 2666, 8002, 0, 0, 0, 0, 0, 0} },
334
335 {TONE_GERMAN_GASSENBESETZT,
336 {DATA_GA, DATA_S, 0, 0, 0, 0, 0, 0, 0, 0},
337 {SIZE_GA, SIZE_S, 0, 0, 0, 0, 0, 0, 0, 0},
338 {2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0} },
339
340 {TONE_GERMAN_AUFSCHALTTON,
341 {DATA_GO, DATA_S, DATA_GO, DATA_S, 0, 0, 0, 0, 0, 0},
342 {SIZE_GO, SIZE_S, SIZE_GO, SIZE_S, 0, 0, 0, 0, 0, 0},
343 {1000, 5000, 1000, 17000, 0, 0, 0, 0, 0, 0} },
344
345 {0,
346 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
347 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
348 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
349};
350
351/******************
352 * copy tone data *
353 ******************/
354
355/* an sk_buff is generated from the number of samples needed.
356 * the count will be changed and may begin from 0 each pattern period.
357 * the clue is to precalculate the pointers and legths to use only one
358 * memcpy per function call, or two memcpy if the tone sequence changes.
359 *
360 * pattern - the type of the pattern
361 * count - the sample from the beginning of the pattern (phase)
362 * len - the number of bytes
363 *
364 * return - the sk_buff with the sample
365 *
366 * if tones has finished (e.g. knocking tone), dsp->tones is turned off
367 */
368void dsp_tone_copy(struct dsp *dsp, u8 *data, int len)
369{
370 int index, count, start, num;
371 struct pattern *pat;
372 struct dsp_tone *tone = &dsp->tone;
373
374 /* if we have no tone, we copy silence */
375 if (!tone->tone) {
376 memset(data, dsp_silence, len);
377 return;
378 }
379
380 /* process pattern */
381 pat = (struct pattern *)tone->pattern;
382 /* points to the current pattern */
383 index = tone->index; /* gives current sequence index */
384 count = tone->count; /* gives current sample */
385
386 /* copy sample */
387 while (len) {
388 /* find sample to start with */
389 while (42) {
390 /* warp arround */
391 if (!pat->seq[index]) {
392 count = 0;
393 index = 0;
394 }
395 /* check if we are currently playing this tone */
396 if (count < pat->seq[index])
397 break;
398 if (dsp_debug & DEBUG_DSP_TONE)
399 printk(KERN_DEBUG "%s: reaching next sequence "
400 "(index=%d)\n", __func__, index);
401 count -= pat->seq[index];
402 index++;
403 }
404 /* calculate start and number of samples */
405 start = count % (*(pat->siz[index]));
406 num = len;
407 if (num+count > pat->seq[index])
408 num = pat->seq[index] - count;
409 if (num+start > (*(pat->siz[index])))
410 num = (*(pat->siz[index])) - start;
411 /* copy memory */
412 memcpy(data, pat->data[index]+start, num);
413 /* reduce length */
414 data += num;
415 count += num;
416 len -= num;
417 }
418 tone->index = index;
419 tone->count = count;
420
421 /* return sk_buff */
422 return;
423}
424
425
426/*******************************
427 * send HW message to hfc card *
428 *******************************/
429
430static void
431dsp_tone_hw_message(struct dsp *dsp, u8 *sample, int len)
432{
433 struct sk_buff *nskb;
434
435 /* unlocking is not required, because we don't expect a response */
436 nskb = _alloc_mISDN_skb(PH_CONTROL_REQ,
437 (len)?HFC_SPL_LOOP_ON:HFC_SPL_LOOP_OFF, len, sample,
438 GFP_ATOMIC);
439 if (nskb) {
440 if (dsp->ch.peer) {
441 if (dsp->ch.recv(dsp->ch.peer, nskb))
442 dev_kfree_skb(nskb);
443 } else
444 dev_kfree_skb(nskb);
445 }
446}
447
448
449/*****************
450 * timer expires *
451 *****************/
452void
453dsp_tone_timeout(void *arg)
454{
455 struct dsp *dsp = arg;
456 struct dsp_tone *tone = &dsp->tone;
457 struct pattern *pat = (struct pattern *)tone->pattern;
458 int index = tone->index;
459
460 if (!tone->tone)
461 return;
462
463 index++;
464 if (!pat->seq[index])
465 index = 0;
466 tone->index = index;
467
468 /* set next tone */
469 if (pat->data[index] == DATA_S)
470 dsp_tone_hw_message(dsp, 0, 0);
471 else
472 dsp_tone_hw_message(dsp, pat->data[index], *(pat->siz[index]));
473 /* set timer */
474 init_timer(&tone->tl);
475 tone->tl.expires = jiffies + (pat->seq[index] * HZ) / 8000;
476 add_timer(&tone->tl);
477}
478
479
480/********************
481 * set/release tone *
482 ********************/
483
484/*
485 * tones are relaized by streaming or by special loop commands if supported
486 * by hardware. when hardware is used, the patterns will be controlled by
487 * timers.
488 */
489int
490dsp_tone(struct dsp *dsp, int tone)
491{
492 struct pattern *pat;
493 int i;
494 struct dsp_tone *tonet = &dsp->tone;
495
496 tonet->software = 0;
497 tonet->hardware = 0;
498
499 /* we turn off the tone */
500 if (!tone) {
501 if (dsp->features.hfc_loops)
502 if (timer_pending(&tonet->tl))
503 del_timer(&tonet->tl);
504 if (dsp->features.hfc_loops)
505 dsp_tone_hw_message(dsp, NULL, 0);
506 tonet->tone = 0;
507 return 0;
508 }
509
510 pat = NULL;
511 i = 0;
512 while (pattern[i].tone) {
513 if (pattern[i].tone == tone) {
514 pat = &pattern[i];
515 break;
516 }
517 i++;
518 }
519 if (!pat) {
520 printk(KERN_WARNING "dsp: given tone 0x%x is invalid\n", tone);
521 return -EINVAL;
522 }
523 if (dsp_debug & DEBUG_DSP_TONE)
524 printk(KERN_DEBUG "%s: now starting tone %d (index=%d)\n",
525 __func__, tone, 0);
526 tonet->tone = tone;
527 tonet->pattern = pat;
528 tonet->index = 0;
529 tonet->count = 0;
530
531 if (dsp->features.hfc_loops) {
532 tonet->hardware = 1;
533 /* set first tone */
534 dsp_tone_hw_message(dsp, pat->data[0], *(pat->siz[0]));
535 /* set timer */
536 if (timer_pending(&tonet->tl))
537 del_timer(&tonet->tl);
538 init_timer(&tonet->tl);
539 tonet->tl.expires = jiffies + (pat->seq[0] * HZ) / 8000;
540 add_timer(&tonet->tl);
541 } else {
542 tonet->software = 1;
543 }
544
545 return 0;
546}
547
548
549
550
551
diff --git a/drivers/isdn/mISDN/fsm.c b/drivers/isdn/mISDN/fsm.c
new file mode 100644
index 000000000000..b5d6553f2dc8
--- /dev/null
+++ b/drivers/isdn/mISDN/fsm.c
@@ -0,0 +1,183 @@
1/*
2 * finite state machine implementation
3 *
4 * Author Karsten Keil <kkeil@novell.com>
5 *
6 * Thanks to Jan den Ouden
7 * Fritz Elfert
8 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/module.h>
24#include <linux/string.h>
25#include "fsm.h"
26
27#define FSM_TIMER_DEBUG 0
28
29void
30mISDN_FsmNew(struct Fsm *fsm,
31 struct FsmNode *fnlist, int fncount)
32{
33 int i;
34
35 fsm->jumpmatrix = kzalloc(sizeof(FSMFNPTR) * fsm->state_count *
36 fsm->event_count, GFP_KERNEL);
37
38 for (i = 0; i < fncount; i++)
39 if ((fnlist[i].state >= fsm->state_count) ||
40 (fnlist[i].event >= fsm->event_count)) {
41 printk(KERN_ERR
42 "mISDN_FsmNew Error: %d st(%ld/%ld) ev(%ld/%ld)\n",
43 i, (long)fnlist[i].state, (long)fsm->state_count,
44 (long)fnlist[i].event, (long)fsm->event_count);
45 } else
46 fsm->jumpmatrix[fsm->state_count * fnlist[i].event +
47 fnlist[i].state] = (FSMFNPTR) fnlist[i].routine;
48}
49EXPORT_SYMBOL(mISDN_FsmNew);
50
51void
52mISDN_FsmFree(struct Fsm *fsm)
53{
54 kfree((void *) fsm->jumpmatrix);
55}
56EXPORT_SYMBOL(mISDN_FsmFree);
57
58int
59mISDN_FsmEvent(struct FsmInst *fi, int event, void *arg)
60{
61 FSMFNPTR r;
62
63 if ((fi->state >= fi->fsm->state_count) ||
64 (event >= fi->fsm->event_count)) {
65 printk(KERN_ERR
66 "mISDN_FsmEvent Error st(%ld/%ld) ev(%d/%ld)\n",
67 (long)fi->state, (long)fi->fsm->state_count, event,
68 (long)fi->fsm->event_count);
69 return 1;
70 }
71 r = fi->fsm->jumpmatrix[fi->fsm->state_count * event + fi->state];
72 if (r) {
73 if (fi->debug)
74 fi->printdebug(fi, "State %s Event %s",
75 fi->fsm->strState[fi->state],
76 fi->fsm->strEvent[event]);
77 r(fi, event, arg);
78 return 0;
79 } else {
80 if (fi->debug)
81 fi->printdebug(fi, "State %s Event %s no action",
82 fi->fsm->strState[fi->state],
83 fi->fsm->strEvent[event]);
84 return 1;
85 }
86}
87EXPORT_SYMBOL(mISDN_FsmEvent);
88
89void
90mISDN_FsmChangeState(struct FsmInst *fi, int newstate)
91{
92 fi->state = newstate;
93 if (fi->debug)
94 fi->printdebug(fi, "ChangeState %s",
95 fi->fsm->strState[newstate]);
96}
97EXPORT_SYMBOL(mISDN_FsmChangeState);
98
99static void
100FsmExpireTimer(struct FsmTimer *ft)
101{
102#if FSM_TIMER_DEBUG
103 if (ft->fi->debug)
104 ft->fi->printdebug(ft->fi, "FsmExpireTimer %lx", (long) ft);
105#endif
106 mISDN_FsmEvent(ft->fi, ft->event, ft->arg);
107}
108
109void
110mISDN_FsmInitTimer(struct FsmInst *fi, struct FsmTimer *ft)
111{
112 ft->fi = fi;
113 ft->tl.function = (void *) FsmExpireTimer;
114 ft->tl.data = (long) ft;
115#if FSM_TIMER_DEBUG
116 if (ft->fi->debug)
117 ft->fi->printdebug(ft->fi, "mISDN_FsmInitTimer %lx", (long) ft);
118#endif
119 init_timer(&ft->tl);
120}
121EXPORT_SYMBOL(mISDN_FsmInitTimer);
122
123void
124mISDN_FsmDelTimer(struct FsmTimer *ft, int where)
125{
126#if FSM_TIMER_DEBUG
127 if (ft->fi->debug)
128 ft->fi->printdebug(ft->fi, "mISDN_FsmDelTimer %lx %d",
129 (long) ft, where);
130#endif
131 del_timer(&ft->tl);
132}
133EXPORT_SYMBOL(mISDN_FsmDelTimer);
134
135int
136mISDN_FsmAddTimer(struct FsmTimer *ft,
137 int millisec, int event, void *arg, int where)
138{
139
140#if FSM_TIMER_DEBUG
141 if (ft->fi->debug)
142 ft->fi->printdebug(ft->fi, "mISDN_FsmAddTimer %lx %d %d",
143 (long) ft, millisec, where);
144#endif
145
146 if (timer_pending(&ft->tl)) {
147 if (ft->fi->debug) {
148 printk(KERN_WARNING
149 "mISDN_FsmAddTimer: timer already active!\n");
150 ft->fi->printdebug(ft->fi,
151 "mISDN_FsmAddTimer already active!");
152 }
153 return -1;
154 }
155 init_timer(&ft->tl);
156 ft->event = event;
157 ft->arg = arg;
158 ft->tl.expires = jiffies + (millisec * HZ) / 1000;
159 add_timer(&ft->tl);
160 return 0;
161}
162EXPORT_SYMBOL(mISDN_FsmAddTimer);
163
164void
165mISDN_FsmRestartTimer(struct FsmTimer *ft,
166 int millisec, int event, void *arg, int where)
167{
168
169#if FSM_TIMER_DEBUG
170 if (ft->fi->debug)
171 ft->fi->printdebug(ft->fi, "mISDN_FsmRestartTimer %lx %d %d",
172 (long) ft, millisec, where);
173#endif
174
175 if (timer_pending(&ft->tl))
176 del_timer(&ft->tl);
177 init_timer(&ft->tl);
178 ft->event = event;
179 ft->arg = arg;
180 ft->tl.expires = jiffies + (millisec * HZ) / 1000;
181 add_timer(&ft->tl);
182}
183EXPORT_SYMBOL(mISDN_FsmRestartTimer);
diff --git a/drivers/isdn/mISDN/fsm.h b/drivers/isdn/mISDN/fsm.h
new file mode 100644
index 000000000000..928f5be192c1
--- /dev/null
+++ b/drivers/isdn/mISDN/fsm.h
@@ -0,0 +1,67 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Thanks to Jan den Ouden
6 * Fritz Elfert
7 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#ifndef _MISDN_FSM_H
21#define _MISDN_FSM_H
22
23#include <linux/timer.h>
24
25/* Statemachine */
26
27struct FsmInst;
28
29typedef void (*FSMFNPTR)(struct FsmInst *, int, void *);
30
31struct Fsm {
32 FSMFNPTR *jumpmatrix;
33 int state_count, event_count;
34 char **strEvent, **strState;
35};
36
37struct FsmInst {
38 struct Fsm *fsm;
39 int state;
40 int debug;
41 void *userdata;
42 int userint;
43 void (*printdebug) (struct FsmInst *, char *, ...);
44};
45
46struct FsmNode {
47 int state, event;
48 void (*routine) (struct FsmInst *, int, void *);
49};
50
51struct FsmTimer {
52 struct FsmInst *fi;
53 struct timer_list tl;
54 int event;
55 void *arg;
56};
57
58extern void mISDN_FsmNew(struct Fsm *, struct FsmNode *, int);
59extern void mISDN_FsmFree(struct Fsm *);
60extern int mISDN_FsmEvent(struct FsmInst *, int , void *);
61extern void mISDN_FsmChangeState(struct FsmInst *, int);
62extern void mISDN_FsmInitTimer(struct FsmInst *, struct FsmTimer *);
63extern int mISDN_FsmAddTimer(struct FsmTimer *, int, int, void *, int);
64extern void mISDN_FsmRestartTimer(struct FsmTimer *, int, int, void *, int);
65extern void mISDN_FsmDelTimer(struct FsmTimer *, int);
66
67#endif
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
new file mode 100644
index 000000000000..2596fba4e614
--- /dev/null
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -0,0 +1,365 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/module.h>
19#include <linux/mISDNhw.h>
20
21static void
22dchannel_bh(struct work_struct *ws)
23{
24 struct dchannel *dch = container_of(ws, struct dchannel, workq);
25 struct sk_buff *skb;
26 int err;
27
28 if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
29 while ((skb = skb_dequeue(&dch->rqueue))) {
30 if (likely(dch->dev.D.peer)) {
31 err = dch->dev.D.recv(dch->dev.D.peer, skb);
32 if (err)
33 dev_kfree_skb(skb);
34 } else
35 dev_kfree_skb(skb);
36 }
37 }
38 if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
39 if (dch->phfunc)
40 dch->phfunc(dch);
41 }
42}
43
44static void
45bchannel_bh(struct work_struct *ws)
46{
47 struct bchannel *bch = container_of(ws, struct bchannel, workq);
48 struct sk_buff *skb;
49 int err;
50
51 if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
52 while ((skb = skb_dequeue(&bch->rqueue))) {
53 if (bch->rcount >= 64)
54 printk(KERN_WARNING "B-channel %p receive "
55 "queue if full, but empties...\n", bch);
56 bch->rcount--;
57 if (likely(bch->ch.peer)) {
58 err = bch->ch.recv(bch->ch.peer, skb);
59 if (err)
60 dev_kfree_skb(skb);
61 } else
62 dev_kfree_skb(skb);
63 }
64 }
65}
66
67int
68mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
69{
70 test_and_set_bit(FLG_HDLC, &ch->Flags);
71 ch->maxlen = maxlen;
72 ch->hw = NULL;
73 ch->rx_skb = NULL;
74 ch->tx_skb = NULL;
75 ch->tx_idx = 0;
76 ch->phfunc = phf;
77 skb_queue_head_init(&ch->squeue);
78 skb_queue_head_init(&ch->rqueue);
79 INIT_LIST_HEAD(&ch->dev.bchannels);
80 INIT_WORK(&ch->workq, dchannel_bh);
81 return 0;
82}
83EXPORT_SYMBOL(mISDN_initdchannel);
84
85int
86mISDN_initbchannel(struct bchannel *ch, int maxlen)
87{
88 ch->Flags = 0;
89 ch->maxlen = maxlen;
90 ch->hw = NULL;
91 ch->rx_skb = NULL;
92 ch->tx_skb = NULL;
93 ch->tx_idx = 0;
94 skb_queue_head_init(&ch->rqueue);
95 ch->rcount = 0;
96 ch->next_skb = NULL;
97 INIT_WORK(&ch->workq, bchannel_bh);
98 return 0;
99}
100EXPORT_SYMBOL(mISDN_initbchannel);
101
102int
103mISDN_freedchannel(struct dchannel *ch)
104{
105 if (ch->tx_skb) {
106 dev_kfree_skb(ch->tx_skb);
107 ch->tx_skb = NULL;
108 }
109 if (ch->rx_skb) {
110 dev_kfree_skb(ch->rx_skb);
111 ch->rx_skb = NULL;
112 }
113 skb_queue_purge(&ch->squeue);
114 skb_queue_purge(&ch->rqueue);
115 flush_scheduled_work();
116 return 0;
117}
118EXPORT_SYMBOL(mISDN_freedchannel);
119
120int
121mISDN_freebchannel(struct bchannel *ch)
122{
123 if (ch->tx_skb) {
124 dev_kfree_skb(ch->tx_skb);
125 ch->tx_skb = NULL;
126 }
127 if (ch->rx_skb) {
128 dev_kfree_skb(ch->rx_skb);
129 ch->rx_skb = NULL;
130 }
131 if (ch->next_skb) {
132 dev_kfree_skb(ch->next_skb);
133 ch->next_skb = NULL;
134 }
135 skb_queue_purge(&ch->rqueue);
136 ch->rcount = 0;
137 flush_scheduled_work();
138 return 0;
139}
140EXPORT_SYMBOL(mISDN_freebchannel);
141
142static inline u_int
143get_sapi_tei(u_char *p)
144{
145 u_int sapi, tei;
146
147 sapi = *p >> 2;
148 tei = p[1] >> 1;
149 return sapi | (tei << 8);
150}
151
152void
153recv_Dchannel(struct dchannel *dch)
154{
155 struct mISDNhead *hh;
156
157 if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
158 dev_kfree_skb(dch->rx_skb);
159 dch->rx_skb = NULL;
160 return;
161 }
162 hh = mISDN_HEAD_P(dch->rx_skb);
163 hh->prim = PH_DATA_IND;
164 hh->id = get_sapi_tei(dch->rx_skb->data);
165 skb_queue_tail(&dch->rqueue, dch->rx_skb);
166 dch->rx_skb = NULL;
167 schedule_event(dch, FLG_RECVQUEUE);
168}
169EXPORT_SYMBOL(recv_Dchannel);
170
171void
172recv_Bchannel(struct bchannel *bch)
173{
174 struct mISDNhead *hh;
175
176 hh = mISDN_HEAD_P(bch->rx_skb);
177 hh->prim = PH_DATA_IND;
178 hh->id = MISDN_ID_ANY;
179 if (bch->rcount >= 64) {
180 dev_kfree_skb(bch->rx_skb);
181 bch->rx_skb = NULL;
182 return;
183 }
184 bch->rcount++;
185 skb_queue_tail(&bch->rqueue, bch->rx_skb);
186 bch->rx_skb = NULL;
187 schedule_event(bch, FLG_RECVQUEUE);
188}
189EXPORT_SYMBOL(recv_Bchannel);
190
191void
192recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
193{
194 skb_queue_tail(&dch->rqueue, skb);
195 schedule_event(dch, FLG_RECVQUEUE);
196}
197EXPORT_SYMBOL(recv_Dchannel_skb);
198
199void
200recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
201{
202 if (bch->rcount >= 64) {
203 dev_kfree_skb(skb);
204 return;
205 }
206 bch->rcount++;
207 skb_queue_tail(&bch->rqueue, skb);
208 schedule_event(bch, FLG_RECVQUEUE);
209}
210EXPORT_SYMBOL(recv_Bchannel_skb);
211
212static void
213confirm_Dsend(struct dchannel *dch)
214{
215 struct sk_buff *skb;
216
217 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
218 0, NULL, GFP_ATOMIC);
219 if (!skb) {
220 printk(KERN_ERR "%s: no skb id %x\n", __func__,
221 mISDN_HEAD_ID(dch->tx_skb));
222 return;
223 }
224 skb_queue_tail(&dch->rqueue, skb);
225 schedule_event(dch, FLG_RECVQUEUE);
226}
227
228int
229get_next_dframe(struct dchannel *dch)
230{
231 dch->tx_idx = 0;
232 dch->tx_skb = skb_dequeue(&dch->squeue);
233 if (dch->tx_skb) {
234 confirm_Dsend(dch);
235 return 1;
236 }
237 dch->tx_skb = NULL;
238 test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
239 return 0;
240}
241EXPORT_SYMBOL(get_next_dframe);
242
243void
244confirm_Bsend(struct bchannel *bch)
245{
246 struct sk_buff *skb;
247
248 if (bch->rcount >= 64)
249 return;
250 skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
251 0, NULL, GFP_ATOMIC);
252 if (!skb) {
253 printk(KERN_ERR "%s: no skb id %x\n", __func__,
254 mISDN_HEAD_ID(bch->tx_skb));
255 return;
256 }
257 bch->rcount++;
258 skb_queue_tail(&bch->rqueue, skb);
259 schedule_event(bch, FLG_RECVQUEUE);
260}
261EXPORT_SYMBOL(confirm_Bsend);
262
263int
264get_next_bframe(struct bchannel *bch)
265{
266 bch->tx_idx = 0;
267 if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
268 bch->tx_skb = bch->next_skb;
269 if (bch->tx_skb) {
270 bch->next_skb = NULL;
271 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
272 if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
273 confirm_Bsend(bch); /* not for transparent */
274 return 1;
275 } else {
276 test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
277 printk(KERN_WARNING "B TX_NEXT without skb\n");
278 }
279 }
280 bch->tx_skb = NULL;
281 test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
282 return 0;
283}
284EXPORT_SYMBOL(get_next_bframe);
285
286void
287queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
288{
289 struct mISDNhead *hh;
290
291 if (!skb) {
292 _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
293 } else {
294 if (ch->peer) {
295 hh = mISDN_HEAD_P(skb);
296 hh->prim = pr;
297 hh->id = id;
298 if (!ch->recv(ch->peer, skb))
299 return;
300 }
301 dev_kfree_skb(skb);
302 }
303}
304EXPORT_SYMBOL(queue_ch_frame);
305
306int
307dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
308{
309 /* check oversize */
310 if (skb->len <= 0) {
311 printk(KERN_WARNING "%s: skb too small\n", __func__);
312 return -EINVAL;
313 }
314 if (skb->len > ch->maxlen) {
315 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
316 __func__, skb->len, ch->maxlen);
317 return -EINVAL;
318 }
319 /* HW lock must be obtained */
320 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
321 skb_queue_tail(&ch->squeue, skb);
322 return 0;
323 } else {
324 /* write to fifo */
325 ch->tx_skb = skb;
326 ch->tx_idx = 0;
327 return 1;
328 }
329}
330EXPORT_SYMBOL(dchannel_senddata);
331
332int
333bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
334{
335
336 /* check oversize */
337 if (skb->len <= 0) {
338 printk(KERN_WARNING "%s: skb too small\n", __func__);
339 return -EINVAL;
340 }
341 if (skb->len > ch->maxlen) {
342 printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
343 __func__, skb->len, ch->maxlen);
344 return -EINVAL;
345 }
346 /* HW lock must be obtained */
347 /* check for pending next_skb */
348 if (ch->next_skb) {
349 printk(KERN_WARNING
350 "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
351 __func__, skb->len, ch->next_skb->len);
352 return -EBUSY;
353 }
354 if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
355 test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
356 ch->next_skb = skb;
357 return 0;
358 } else {
359 /* write to fifo */
360 ch->tx_skb = skb;
361 ch->tx_idx = 0;
362 return 1;
363 }
364}
365EXPORT_SYMBOL(bchannel_senddata);
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
new file mode 100644
index 000000000000..a23d575449f6
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -0,0 +1,91 @@
1/*
2 * see notice in l1oip.c
3 */
4
5/* debugging */
6#define DEBUG_L1OIP_INIT 0x00010000
7#define DEBUG_L1OIP_SOCKET 0x00020000
8#define DEBUG_L1OIP_MGR 0x00040000
9#define DEBUG_L1OIP_MSG 0x00080000
10
11/* enable to disorder received bchannels by sequence 2143658798... */
12/*
13#define REORDER_DEBUG
14*/
15
16/* frames */
17#define L1OIP_MAX_LEN 2048 /* max packet size form l2 */
18#define L1OIP_MAX_PERFRAME 1400 /* max data size in one frame */
19
20
21/* timers */
22#define L1OIP_KEEPALIVE 15
23#define L1OIP_TIMEOUT 65
24
25
26/* socket */
27#define L1OIP_DEFAULTPORT 931
28
29
30/* channel structure */
31struct l1oip_chan {
32 struct dchannel *dch;
33 struct bchannel *bch;
34 u32 tx_counter; /* counts xmit bytes/packets */
35 u32 rx_counter; /* counts recv bytes/packets */
36 u32 codecstate; /* used by codec to save data */
37#ifdef REORDER_DEBUG
38 int disorder_flag;
39 struct sk_buff *disorder_skb;
40 u32 disorder_cnt;
41#endif
42};
43
44
45/* card structure */
46struct l1oip {
47 struct list_head list;
48
49 /* card */
50 int registered; /* if registered with mISDN */
51 char name[MISDN_MAX_IDLEN];
52 int idx; /* card index */
53 int pri; /* 1=pri, 0=bri */
54 int d_idx; /* current dchannel number */
55 int b_num; /* number of bchannels */
56 u32 id; /* id of connection */
57 int ondemand; /* if transmis. is on demand */
58 int bundle; /* bundle channels in one frm */
59 int codec; /* codec to use for transmis. */
60 int limit; /* limit number of bchannels */
61
62 /* timer */
63 struct timer_list keep_tl;
64 struct timer_list timeout_tl;
65 int timeout_on;
66 struct work_struct workq;
67
68 /* socket */
69 struct socket *socket; /* if set, socket is created */
70 struct completion socket_complete;/* completion of sock thread */
71 struct task_struct *socket_thread;
72 spinlock_t socket_lock; /* access sock outside thread */
73 u32 remoteip; /* if all set, ip is assigned */
74 u16 localport; /* must always be set */
75 u16 remoteport; /* must always be set */
76 struct sockaddr_in sin_local; /* local socket name */
77 struct sockaddr_in sin_remote; /* remote socket name */
78 struct msghdr sendmsg; /* ip message to send */
79 struct iovec sendiov; /* iov for message */
80
81 /* frame */
82 struct l1oip_chan chan[128]; /* channel instances */
83};
84
85extern int l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state);
86extern int l1oip_4bit_to_law(u8 *data, int len, u8 *result);
87extern int l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result);
88extern int l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result);
89extern void l1oip_4bit_free(void);
90extern int l1oip_4bit_alloc(int ulaw);
91
diff --git a/drivers/isdn/mISDN/l1oip_codec.c b/drivers/isdn/mISDN/l1oip_codec.c
new file mode 100644
index 000000000000..a2dc4570ef43
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip_codec.c
@@ -0,0 +1,374 @@
1/*
2
3 * l1oip_codec.c generic codec using lookup table
4 * -> conversion from a-Law to u-Law
5 * -> conversion from u-Law to a-Law
6 * -> compression by reducing the number of sample resolution to 4
7 *
8 * NOTE: It is not compatible with any standard codec like ADPCM.
9 *
10 * Author Andreas Eversberg (jolly@eversberg.eu)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25
26 */
27
28/*
29
30How the codec works:
31--------------------
32
33The volume is increased to increase the dynamic range of the audio signal.
34Each sample is converted to a-LAW with only 16 steps of level resolution.
35A pair of two samples are stored in one byte.
36
37The first byte is stored in the upper bits, the second byte is stored in the
38lower bits.
39
40To speed up compression and decompression, two lookup tables are formed:
41
42- 16 bits index for two samples (law encoded) with 8 bit compressed result.
43- 8 bits index for one compressed data with 16 bits decompressed result.
44
45NOTE: The bytes are handled as they are law-encoded.
46
47*/
48
49#include <linux/vmalloc.h>
50#include <linux/mISDNif.h>
51#include "core.h"
52
53/* definitions of codec. don't use calculations, code may run slower. */
54
55static u8 *table_com;
56static u16 *table_dec;
57
58
59/* alaw -> ulaw */
60static u8 alaw_to_ulaw[256] =
61{
62 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49,
63 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57,
64 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41,
65 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f,
66 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d,
67 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b,
68 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45,
69 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53,
70 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47,
71 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55,
72 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f,
73 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e,
74 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b,
75 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59,
76 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43,
77 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51,
78 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a,
79 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58,
80 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42,
81 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50,
82 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e,
83 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c,
84 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46,
85 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54,
86 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48,
87 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56,
88 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40,
89 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f,
90 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c,
91 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a,
92 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44,
93 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52
94};
95
96/* ulaw -> alaw */
97static u8 ulaw_to_alaw[256] =
98{
99 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35,
100 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25,
101 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d,
102 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d,
103 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31,
104 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21,
105 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9,
106 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9,
107 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47,
108 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf,
109 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f,
110 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33,
111 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23,
112 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b,
113 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b,
114 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b,
115 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34,
116 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24,
117 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c,
118 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c,
119 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30,
120 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20,
121 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8,
122 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8,
123 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46,
124 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde,
125 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e,
126 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32,
127 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22,
128 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a,
129 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a,
130 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a
131};
132
133/* alaw -> 4bit compression */
134static u8 alaw_to_4bit[256] = {
135 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
136 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
137 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
138 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
139 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
140 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
141 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
142 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
143 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
144 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
145 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0d, 0x02,
146 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
147 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
148 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
149 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
150 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
151 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
152 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
153 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
154 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
155 0x0e, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
156 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x01, 0x0a, 0x05,
157 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
158 0x0d, 0x02, 0x09, 0x07, 0x0f, 0x00, 0x0b, 0x04,
159 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
160 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
161 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
162 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
163 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
164 0x0d, 0x02, 0x08, 0x07, 0x0f, 0x00, 0x0b, 0x04,
165 0x0e, 0x01, 0x0a, 0x05, 0x0f, 0x00, 0x0c, 0x03,
166 0x0d, 0x02, 0x09, 0x06, 0x0f, 0x00, 0x0b, 0x04,
167};
168
169/* 4bit -> alaw decompression */
170static u8 _4bit_to_alaw[16] = {
171 0x5d, 0x51, 0xd9, 0xd7, 0x5f, 0x53, 0xa3, 0x4b,
172 0x2a, 0x3a, 0x22, 0x2e, 0x26, 0x56, 0x20, 0x2c,
173};
174
175/* ulaw -> 4bit compression */
176static u8 ulaw_to_4bit[256] = {
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
181 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
182 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
183 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
184 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
185 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
186 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04,
187 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
188 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
189 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
190 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
191 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
192 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x08,
193 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
194 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
195 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
196 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
197 0x0f, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
198 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e,
199 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
200 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d,
201 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
202 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b,
203 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
204 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x0a, 0x0a,
205 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a,
206 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,
207 0x09, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
208 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
209};
210
211/* 4bit -> ulaw decompression */
212static u8 _4bit_to_ulaw[16] = {
213 0x11, 0x21, 0x31, 0x40, 0x4e, 0x5c, 0x68, 0x71,
214 0xfe, 0xef, 0xe7, 0xdb, 0xcd, 0xbf, 0xaf, 0x9f,
215};
216
217
218/*
219 * Compresses data to the result buffer
220 * The result size must be at least half of the input buffer.
221 * The number of samples also must be even!
222 */
223int
224l1oip_law_to_4bit(u8 *data, int len, u8 *result, u32 *state)
225{
226 int ii, i = 0, o = 0;
227
228 if (!len)
229 return 0;
230
231 /* send saved byte and first input byte */
232 if (*state) {
233 *result++ = table_com[(((*state)<<8)&0xff00) | (*data++)];
234 len--;
235 o++;
236 }
237
238 ii = len >> 1;
239
240 while (i < ii) {
241 *result++ = table_com[(data[0]<<8) | (data[1])];
242 data += 2;
243 i++;
244 o++;
245 }
246
247 /* if len has an odd number, we save byte for next call */
248 if (len & 1)
249 *state = 0x100 + *data;
250 else
251 *state = 0;
252
253 return o;
254}
255
256/* Decompress data to the result buffer
257 * The result size must be the number of sample in packet. (2 * input data)
258 * The number of samples in the result are even!
259 */
260int
261l1oip_4bit_to_law(u8 *data, int len, u8 *result)
262{
263 int i = 0;
264 u16 r;
265
266 while (i < len) {
267 r = table_dec[*data++];
268 *result++ = r>>8;
269 *result++ = r;
270 i++;
271 }
272
273 return len << 1;
274}
275
276
277/*
278 * law conversion
279 */
280int
281l1oip_alaw_to_ulaw(u8 *data, int len, u8 *result)
282{
283 int i = 0;
284
285 while (i < len) {
286 *result++ = alaw_to_ulaw[*data++];
287 i++;
288 }
289
290 return len;
291}
292
293int
294l1oip_ulaw_to_alaw(u8 *data, int len, u8 *result)
295{
296 int i = 0;
297
298 while (i < len) {
299 *result++ = ulaw_to_alaw[*data++];
300 i++;
301 }
302
303 return len;
304}
305
306
307/*
308 * generate/free compression and decompression table
309 */
310void
311l1oip_4bit_free(void)
312{
313 if (table_dec)
314 vfree(table_dec);
315 if (table_com)
316 vfree(table_com);
317 table_com = NULL;
318 table_dec = NULL;
319}
320
321int
322l1oip_4bit_alloc(int ulaw)
323{
324 int i1, i2, c, sample;
325
326 /* in case, it is called again */
327 if (table_dec)
328 return 0;
329
330 /* alloc conversion tables */
331 table_com = vmalloc(65536);
332 table_dec = vmalloc(512);
333 if (!table_com | !table_dec) {
334 l1oip_4bit_free();
335 return -ENOMEM;
336 }
337 memset(table_com, 0, 65536);
338 memset(table_dec, 0, 512);
339 /* generate compression table */
340 i1 = 0;
341 while (i1 < 256) {
342 if (ulaw)
343 c = ulaw_to_4bit[i1];
344 else
345 c = alaw_to_4bit[i1];
346 i2 = 0;
347 while (i2 < 256) {
348 table_com[(i1<<8) | i2] |= (c<<4);
349 table_com[(i2<<8) | i1] |= c;
350 i2++;
351 }
352 i1++;
353 }
354
355 /* generate decompression table */
356 i1 = 0;
357 while (i1 < 16) {
358 if (ulaw)
359 sample = _4bit_to_ulaw[i1];
360 else
361 sample = _4bit_to_alaw[i1];
362 i2 = 0;
363 while (i2 < 16) {
364 table_dec[(i1<<4) | i2] |= (sample<<8);
365 table_dec[(i2<<4) | i1] |= sample;
366 i2++;
367 }
368 i1++;
369 }
370
371 return 0;
372}
373
374
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
new file mode 100644
index 000000000000..155b99780c4f
--- /dev/null
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -0,0 +1,1518 @@
1/*
2
3 * l1oip.c low level driver for tunneling layer 1 over IP
4 *
5 * NOTE: It is not compatible with TDMoIP nor "ISDN over IP".
6 *
7 * Author Andreas Eversberg (jolly@eversberg.eu)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 */
24
25/* module parameters:
26 * type:
27 Value 1 = BRI
28 Value 2 = PRI
29 Value 3 = BRI (multi channel frame, not supported yet)
30 Value 4 = PRI (multi channel frame, not supported yet)
31 A multi channel frame reduces overhead to a single frame for all
32 b-channels, but increases delay.
33 (NOTE: Multi channel frames are not implemented yet.)
34
35 * codec:
36 Value 0 = transparent (default)
37 Value 1 = transfer ALAW
38 Value 2 = transfer ULAW
39 Value 3 = transfer generic 4 bit compression.
40
41 * ulaw:
42 0 = we use a-Law (default)
43 1 = we use u-Law
44
45 * limit:
46 limitation of B-channels to control bandwidth (1...126)
47 BRI: 1 or 2
48 PRI: 1-30, 31-126 (126, because dchannel ist not counted here)
49 Also limited ressources are used for stack, resulting in less channels.
50 It is possible to have more channels than 30 in PRI mode, this must
51 be supported by the application.
52
53 * ip:
54 byte representation of remote ip address (127.0.0.1 -> 127,0,0,1)
55 If not given or four 0, no remote address is set.
56 For multiple interfaces, concat ip addresses. (127,0,0,1,127,0,0,1)
57
58 * port:
59 port number (local interface)
60 If not given or 0, port 931 is used for fist instance, 932 for next...
61 For multiple interfaces, different ports must be given.
62
63 * remoteport:
64 port number (remote interface)
65 If not given or 0, remote port equals local port
66 For multiple interfaces on equal sites, different ports must be given.
67
68 * ondemand:
69 0 = fixed (always transmit packets, even when remote side timed out)
70 1 = on demand (only transmit packets, when remote side is detected)
71 the default is 0
72 NOTE: ID must also be set for on demand.
73
74 * id:
75 optional value to identify frames. This value must be equal on both
76 peers and should be random. If omitted or 0, no ID is transmitted.
77
78 * debug:
79 NOTE: only one debug value must be given for all cards
80 enable debugging (see l1oip.h for debug options)
81
82
83Special mISDN controls:
84
85 op = MISDN_CTRL_SETPEER*
86 p1 = bytes 0-3 : remote IP address in network order (left element first)
87 p2 = bytes 1-2 : remote port in network order (high byte first)
88 optional:
89 p2 = bytes 3-4 : local port in network order (high byte first)
90
91 op = MISDN_CTRL_UNSETPEER*
92
93 * Use l1oipctrl for comfortable setting or removing ip address.
94 (Layer 1 Over IP CTRL)
95
96
97L1oIP-Protocol
98--------------
99
100Frame Header:
101
102 7 6 5 4 3 2 1 0
103+---------------+
104|Ver|T|I|Coding |
105+---------------+
106| ID byte 3 * |
107+---------------+
108| ID byte 2 * |
109+---------------+
110| ID byte 1 * |
111+---------------+
112| ID byte 0 * |
113+---------------+
114|M| Channel |
115+---------------+
116| Length * |
117+---------------+
118| Time Base MSB |
119+---------------+
120| Time Base LSB |
121+---------------+
122| Data.... |
123
124...
125
126| |
127+---------------+
128|M| Channel |
129+---------------+
130| Length * |
131+---------------+
132| Time Base MSB |
133+---------------+
134| Time Base LSB |
135+---------------+
136| Data.... |
137
138...
139
140
141* Only included in some cases.
142
143- Ver = Version
144If version is missmatch, the frame must be ignored.
145
146- T = Type of interface
147Must be 0 for S0 or 1 for E1.
148
149- I = Id present
150If bit is set, four ID bytes are included in frame.
151
152- ID = Connection ID
153Additional ID to prevent Denial of Service attacs. Also it prevents hijacking
154connections with dynamic IP. The ID should be random and must not be 0.
155
156- Coding = Type of codec
157Must be 0 for no transcoding. Also for D-channel and other HDLC frames.
158 1 and 2 are reserved for explicitly use of a-LAW or u-LAW codec.
159 3 is used for generic table compressor.
160
161- M = More channels to come. If this flag is 1, the following byte contains
162the length of the channel data. After the data block, the next channel will
163be defined. The flag for the last channel block (or if only one channel is
164transmitted), must be 0 and no length is given.
165
166- Channel = Channel number
1670 reserved
1681-3 channel data for S0 (3 is D-channel)
1691-31 channel data for E1 (16 is D-channel)
17032-127 channel data for extended E1 (16 is D-channel)
171
172- The length is used if the M-flag is 1. It is used to find the next channel
173inside frame.
174NOTE: A value of 0 equals 256 bytes of data.
175 -> For larger data blocks, a single frame must be used.
176 -> For larger streams, a single frame or multiple blocks with same channel ID
177 must be used.
178
179- Time Base = Timestamp of first sample in frame
180The "Time Base" is used to rearange packets and to detect packet loss.
181The 16 bits are sent in network order (MSB first) and count 1/8000 th of a
182second. This causes a wrap arround each 8,192 seconds. There is no requirement
183for the initial "Time Base", but 0 should be used for the first packet.
184In case of HDLC data, this timestamp counts the packet or byte number.
185
186
187Two Timers:
188
189After initialisation, a timer of 15 seconds is started. Whenever a packet is
190transmitted, the timer is reset to 15 seconds again. If the timer expires, an
191empty packet is transmitted. This keep the connection alive.
192
193When a valid packet is received, a timer 65 seconds is started. The interface
194become ACTIVE. If the timer expires, the interface becomes INACTIVE.
195
196
197Dynamic IP handling:
198
199To allow dynamic IP, the ID must be non 0. In this case, any packet with the
200correct port number and ID will be accepted. If the remote side changes its IP
201the new IP is used for all transmitted packets until it changes again.
202
203
204On Demand:
205
206If the ondemand parameter is given, the remote IP is set to 0 on timeout.
207This will stop keepalive traffic to remote. If the remote is online again,
208traffic will continue to the remote address. This is usefull for road warriors.
209This feature only works with ID set, otherwhise it is highly unsecure.
210
211
212Socket and Thread
213-----------------
214
215The complete socket opening and closing is done by a thread.
216When the thread opened a socket, the hc->socket descriptor is set. Whenever a
217packet shall be sent to the socket, the hc->socket must be checked wheter not
218NULL. To prevent change in socket descriptor, the hc->socket_lock must be used.
219To change the socket, a recall of l1oip_socket_open() will safely kill the
220socket process and create a new one.
221
222*/
223
224#define L1OIP_VERSION 0 /* 0...3 */
225
226#include <linux/module.h>
227#include <linux/delay.h>
228#include <linux/mISDNif.h>
229#include <linux/mISDNhw.h>
230#include <linux/mISDNdsp.h>
231#include <linux/init.h>
232#include <linux/in.h>
233#include <linux/inet.h>
234#include <linux/workqueue.h>
235#include <linux/kthread.h>
236#include <net/sock.h>
237#include "core.h"
238#include "l1oip.h"
239
240static const char *l1oip_revision = "2.00";
241
242static int l1oip_cnt;
243static spinlock_t l1oip_lock;
244static struct list_head l1oip_ilist;
245
246#define MAX_CARDS 16
247static u_int type[MAX_CARDS];
248static u_int codec[MAX_CARDS];
249static u_int ip[MAX_CARDS*4];
250static u_int port[MAX_CARDS];
251static u_int remoteport[MAX_CARDS];
252static u_int ondemand[MAX_CARDS];
253static u_int limit[MAX_CARDS];
254static u_int id[MAX_CARDS];
255static int debug;
256static int ulaw;
257
258MODULE_AUTHOR("Andreas Eversberg");
259MODULE_LICENSE("GPL");
260module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
261module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR);
262module_param_array(ip, uint, NULL, S_IRUGO | S_IWUSR);
263module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
264module_param_array(remoteport, uint, NULL, S_IRUGO | S_IWUSR);
265module_param_array(ondemand, uint, NULL, S_IRUGO | S_IWUSR);
266module_param_array(limit, uint, NULL, S_IRUGO | S_IWUSR);
267module_param_array(id, uint, NULL, S_IRUGO | S_IWUSR);
268module_param(ulaw, uint, S_IRUGO | S_IWUSR);
269module_param(debug, uint, S_IRUGO | S_IWUSR);
270
271/*
272 * send a frame via socket, if open and restart timer
273 */
274static int
275l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
276 u16 timebase, u8 *buf, int len)
277{
278 u8 *p;
279 int multi = 0;
280 u8 frame[len+32];
281 struct socket *socket = NULL;
282 mm_segment_t oldfs;
283
284 if (debug & DEBUG_L1OIP_MSG)
285 printk(KERN_DEBUG "%s: sending data to socket (len = %d)\n",
286 __func__, len);
287
288 p = frame;
289
290 /* restart timer */
291 if ((int)(hc->keep_tl.expires-jiffies) < 5*HZ) {
292 del_timer(&hc->keep_tl);
293 hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ;
294 add_timer(&hc->keep_tl);
295 } else
296 hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE*HZ;
297
298 if (debug & DEBUG_L1OIP_MSG)
299 printk(KERN_DEBUG "%s: resetting timer\n", __func__);
300
301 /* drop if we have no remote ip or port */
302 if (!hc->sin_remote.sin_addr.s_addr || !hc->sin_remote.sin_port) {
303 if (debug & DEBUG_L1OIP_MSG)
304 printk(KERN_DEBUG "%s: dropping frame, because remote "
305 "IP is not set.\n", __func__);
306 return len;
307 }
308
309 /* assemble frame */
310 *p++ = (L1OIP_VERSION<<6) /* version and coding */
311 | (hc->pri?0x20:0x00) /* type */
312 | (hc->id?0x10:0x00) /* id */
313 | localcodec;
314 if (hc->id) {
315 *p++ = hc->id>>24; /* id */
316 *p++ = hc->id>>16;
317 *p++ = hc->id>>8;
318 *p++ = hc->id;
319 }
320 *p++ = (multi == 1)?0x80:0x00 + channel; /* m-flag, channel */
321 if (multi == 1)
322 *p++ = len; /* length */
323 *p++ = timebase>>8; /* time base */
324 *p++ = timebase;
325
326 if (buf && len) { /* add data to frame */
327 if (localcodec == 1 && ulaw)
328 l1oip_ulaw_to_alaw(buf, len, p);
329 else if (localcodec == 2 && !ulaw)
330 l1oip_alaw_to_ulaw(buf, len, p);
331 else if (localcodec == 3)
332 len = l1oip_law_to_4bit(buf, len, p,
333 &hc->chan[channel].codecstate);
334 else
335 memcpy(p, buf, len);
336 }
337 len += p - frame;
338
339 /* check for socket in safe condition */
340 spin_lock(&hc->socket_lock);
341 if (!hc->socket) {
342 spin_unlock(&hc->socket_lock);
343 return 0;
344 }
345 /* seize socket */
346 socket = hc->socket;
347 hc->socket = NULL;
348 spin_unlock(&hc->socket_lock);
349 /* send packet */
350 if (debug & DEBUG_L1OIP_MSG)
351 printk(KERN_DEBUG "%s: sending packet to socket (len "
352 "= %d)\n", __func__, len);
353 hc->sendiov.iov_base = frame;
354 hc->sendiov.iov_len = len;
355 oldfs = get_fs();
356 set_fs(KERNEL_DS);
357 len = sock_sendmsg(socket, &hc->sendmsg, len);
358 set_fs(oldfs);
359 /* give socket back */
360 hc->socket = socket; /* no locking required */
361
362 return len;
363}
364
365
366/*
367 * receive channel data from socket
368 */
369static void
370l1oip_socket_recv(struct l1oip *hc, u8 remotecodec, u8 channel, u16 timebase,
371 u8 *buf, int len)
372{
373 struct sk_buff *nskb;
374 struct bchannel *bch;
375 struct dchannel *dch;
376 u8 *p;
377 u32 rx_counter;
378
379 if (len == 0) {
380 if (debug & DEBUG_L1OIP_MSG)
381 printk(KERN_DEBUG "%s: received empty keepalive data, "
382 "ignoring\n", __func__);
383 return;
384 }
385
386 if (debug & DEBUG_L1OIP_MSG)
387 printk(KERN_DEBUG "%s: received data, sending to mISDN (%d)\n",
388 __func__, len);
389
390 if (channel < 1 || channel > 127) {
391 printk(KERN_WARNING "%s: packet error - channel %d out of "
392 "range\n", __func__, channel);
393 return;
394 }
395 dch = hc->chan[channel].dch;
396 bch = hc->chan[channel].bch;
397 if (!dch && !bch) {
398 printk(KERN_WARNING "%s: packet error - channel %d not in "
399 "stack\n", __func__, channel);
400 return;
401 }
402
403 /* prepare message */
404 nskb = mI_alloc_skb((remotecodec == 3)?(len<<1):len, GFP_ATOMIC);
405 if (!nskb) {
406 printk(KERN_ERR "%s: No mem for skb.\n", __func__);
407 return;
408 }
409 p = skb_put(nskb, (remotecodec == 3)?(len<<1):len);
410
411 if (remotecodec == 1 && ulaw)
412 l1oip_alaw_to_ulaw(buf, len, p);
413 else if (remotecodec == 2 && !ulaw)
414 l1oip_ulaw_to_alaw(buf, len, p);
415 else if (remotecodec == 3)
416 len = l1oip_4bit_to_law(buf, len, p);
417 else
418 memcpy(p, buf, len);
419
420 /* send message up */
421 if (dch && len >= 2) {
422 dch->rx_skb = nskb;
423 recv_Dchannel(dch);
424 }
425 if (bch) {
426 /* expand 16 bit sequence number to 32 bit sequence number */
427 rx_counter = hc->chan[channel].rx_counter;
428 if (((s16)(timebase - rx_counter)) >= 0) {
429 /* time has changed forward */
430 if (timebase >= (rx_counter & 0xffff))
431 rx_counter =
432 (rx_counter & 0xffff0000) | timebase;
433 else
434 rx_counter = ((rx_counter & 0xffff0000)+0x10000)
435 | timebase;
436 } else {
437 /* time has changed backwards */
438 if (timebase < (rx_counter & 0xffff))
439 rx_counter =
440 (rx_counter & 0xffff0000) | timebase;
441 else
442 rx_counter = ((rx_counter & 0xffff0000)-0x10000)
443 | timebase;
444 }
445 hc->chan[channel].rx_counter = rx_counter;
446
447#ifdef REORDER_DEBUG
448 if (hc->chan[channel].disorder_flag) {
449 struct sk_buff *skb;
450 int cnt;
451 skb = hc->chan[channel].disorder_skb;
452 hc->chan[channel].disorder_skb = nskb;
453 nskb = skb;
454 cnt = hc->chan[channel].disorder_cnt;
455 hc->chan[channel].disorder_cnt = rx_counter;
456 rx_counter = cnt;
457 }
458 hc->chan[channel].disorder_flag ^= 1;
459 if (nskb)
460#endif
461 queue_ch_frame(&bch->ch, PH_DATA_IND, rx_counter, nskb);
462 }
463}
464
465
466/*
467 * parse frame and extract channel data
468 */
469static void
470l1oip_socket_parse(struct l1oip *hc, struct sockaddr_in *sin, u8 *buf, int len)
471{
472 u32 id;
473 u8 channel;
474 u8 remotecodec;
475 u16 timebase;
476 int m, mlen;
477 int len_start = len; /* initial frame length */
478 struct dchannel *dch = hc->chan[hc->d_idx].dch;
479
480 if (debug & DEBUG_L1OIP_MSG)
481 printk(KERN_DEBUG "%s: received frame, parsing... (%d)\n",
482 __func__, len);
483
484 /* check lenght */
485 if (len < 1+1+2) {
486 printk(KERN_WARNING "%s: packet error - length %d below "
487 "4 bytes\n", __func__, len);
488 return;
489 }
490
491 /* check version */
492 if (((*buf)>>6) != L1OIP_VERSION) {
493 printk(KERN_WARNING "%s: packet error - unknown version %d\n",
494 __func__, buf[0]>>6);
495 return;
496 }
497
498 /* check type */
499 if (((*buf)&0x20) && !hc->pri) {
500 printk(KERN_WARNING "%s: packet error - received E1 packet "
501 "on S0 interface\n", __func__);
502 return;
503 }
504 if (!((*buf)&0x20) && hc->pri) {
505 printk(KERN_WARNING "%s: packet error - received S0 packet "
506 "on E1 interface\n", __func__);
507 return;
508 }
509
510 /* get id flag */
511 id = (*buf>>4)&1;
512
513 /* check coding */
514 remotecodec = (*buf) & 0x0f;
515 if (remotecodec > 3) {
516 printk(KERN_WARNING "%s: packet error - remotecodec %d "
517 "unsupported\n", __func__, remotecodec);
518 return;
519 }
520 buf++;
521 len--;
522
523 /* check id */
524 if (id) {
525 if (!hc->id) {
526 printk(KERN_WARNING "%s: packet error - packet has id "
527 "0x%x, but we have not\n", __func__, id);
528 return;
529 }
530 if (len < 4) {
531 printk(KERN_WARNING "%s: packet error - packet too "
532 "short for ID value\n", __func__);
533 return;
534 }
535 id = (*buf++) << 24;
536 id += (*buf++) << 16;
537 id += (*buf++) << 8;
538 id += (*buf++);
539 len -= 4;
540
541 if (id != hc->id) {
542 printk(KERN_WARNING "%s: packet error - ID mismatch, "
543 "got 0x%x, we 0x%x\n",
544 __func__, id, hc->id);
545 return;
546 }
547 } else {
548 if (hc->id) {
549 printk(KERN_WARNING "%s: packet error - packet has no "
550 "ID, but we have\n", __func__);
551 return;
552 }
553 }
554
555multiframe:
556 if (len < 1) {
557 printk(KERN_WARNING "%s: packet error - packet too short, "
558 "channel expected at position %d.\n",
559 __func__, len-len_start+1);
560 return;
561 }
562
563 /* get channel and multiframe flag */
564 channel = *buf&0x7f;
565 m = *buf >> 7;
566 buf++;
567 len--;
568
569 /* check length on multiframe */
570 if (m) {
571 if (len < 1) {
572 printk(KERN_WARNING "%s: packet error - packet too "
573 "short, length expected at position %d.\n",
574 __func__, len_start-len-1);
575 return;
576 }
577
578 mlen = *buf++;
579 len--;
580 if (mlen == 0)
581 mlen = 256;
582 if (len < mlen+3) {
583 printk(KERN_WARNING "%s: packet error - length %d at "
584 "position %d exceeds total length %d.\n",
585 __func__, mlen, len_start-len-1, len_start);
586 return;
587 }
588 if (len == mlen+3) {
589 printk(KERN_WARNING "%s: packet error - length %d at "
590 "position %d will not allow additional "
591 "packet.\n",
592 __func__, mlen, len_start-len+1);
593 return;
594 }
595 } else
596 mlen = len-2; /* single frame, substract timebase */
597
598 if (len < 2) {
599 printk(KERN_WARNING "%s: packet error - packet too short, time "
600 "base expected at position %d.\n",
601 __func__, len-len_start+1);
602 return;
603 }
604
605 /* get time base */
606 timebase = (*buf++) << 8;
607 timebase |= (*buf++);
608 len -= 2;
609
610 /* if inactive, we send up a PH_ACTIVATE and activate */
611 if (!test_bit(FLG_ACTIVE, &dch->Flags)) {
612 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
613 printk(KERN_DEBUG "%s: interface become active due to "
614 "received packet\n", __func__);
615 test_and_set_bit(FLG_ACTIVE, &dch->Flags);
616 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
617 NULL, GFP_ATOMIC);
618 }
619
620 /* distribute packet */
621 l1oip_socket_recv(hc, remotecodec, channel, timebase, buf, mlen);
622 buf += mlen;
623 len -= mlen;
624
625 /* multiframe */
626 if (m)
627 goto multiframe;
628
629 /* restart timer */
630 if ((int)(hc->timeout_tl.expires-jiffies) < 5*HZ || !hc->timeout_on) {
631 hc->timeout_on = 1;
632 del_timer(&hc->timeout_tl);
633 hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ;
634 add_timer(&hc->timeout_tl);
635 } else /* only adjust timer */
636 hc->timeout_tl.expires = jiffies + L1OIP_TIMEOUT*HZ;
637
638 /* if ip or source port changes */
639 if ((hc->sin_remote.sin_addr.s_addr != sin->sin_addr.s_addr)
640 || (hc->sin_remote.sin_port != sin->sin_port)) {
641 if (debug & DEBUG_L1OIP_SOCKET)
642 printk(KERN_DEBUG "%s: remote address changes from "
643 "0x%08x to 0x%08x (port %d to %d)\n", __func__,
644 ntohl(hc->sin_remote.sin_addr.s_addr),
645 ntohl(sin->sin_addr.s_addr),
646 ntohs(hc->sin_remote.sin_port),
647 ntohs(sin->sin_port));
648 hc->sin_remote.sin_addr.s_addr = sin->sin_addr.s_addr;
649 hc->sin_remote.sin_port = sin->sin_port;
650 }
651}
652
653
654/*
655 * socket stuff
656 */
657static int
658l1oip_socket_thread(void *data)
659{
660 struct l1oip *hc = (struct l1oip *)data;
661 int ret = 0;
662 struct msghdr msg;
663 struct iovec iov;
664 mm_segment_t oldfs;
665 struct sockaddr_in sin_rx;
666 unsigned char recvbuf[1500];
667 int recvlen;
668 struct socket *socket = NULL;
669 DECLARE_COMPLETION(wait);
670
671 /* make daemon */
672 allow_signal(SIGTERM);
673
674 /* create socket */
675 if (sock_create(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &socket)) {
676 printk(KERN_ERR "%s: Failed to create socket.\n", __func__);
677 return -EIO;
678 }
679
680 /* set incoming address */
681 hc->sin_local.sin_family = AF_INET;
682 hc->sin_local.sin_addr.s_addr = INADDR_ANY;
683 hc->sin_local.sin_port = htons((unsigned short)hc->localport);
684
685 /* set outgoing address */
686 hc->sin_remote.sin_family = AF_INET;
687 hc->sin_remote.sin_addr.s_addr = htonl(hc->remoteip);
688 hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
689
690 /* bind to incomming port */
691 if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
692 sizeof(hc->sin_local))) {
693 printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
694 __func__, hc->localport);
695 ret = -EINVAL;
696 goto fail;
697 }
698
699 /* check sk */
700 if (socket->sk == NULL) {
701 printk(KERN_ERR "%s: socket->sk == NULL\n", __func__);
702 ret = -EIO;
703 goto fail;
704 }
705
706 /* build receive message */
707 msg.msg_name = &sin_rx;
708 msg.msg_namelen = sizeof(sin_rx);
709 msg.msg_control = NULL;
710 msg.msg_controllen = 0;
711 msg.msg_iov = &iov;
712 msg.msg_iovlen = 1;
713
714 /* build send message */
715 hc->sendmsg.msg_name = &hc->sin_remote;
716 hc->sendmsg.msg_namelen = sizeof(hc->sin_remote);
717 hc->sendmsg.msg_control = NULL;
718 hc->sendmsg.msg_controllen = 0;
719 hc->sendmsg.msg_iov = &hc->sendiov;
720 hc->sendmsg.msg_iovlen = 1;
721
722 /* give away socket */
723 spin_lock(&hc->socket_lock);
724 hc->socket = socket;
725 spin_unlock(&hc->socket_lock);
726
727 /* read loop */
728 if (debug & DEBUG_L1OIP_SOCKET)
729 printk(KERN_DEBUG "%s: socket created and open\n",
730 __func__);
731 while (!signal_pending(current)) {
732 iov.iov_base = recvbuf;
733 iov.iov_len = sizeof(recvbuf);
734 oldfs = get_fs();
735 set_fs(KERNEL_DS);
736 recvlen = sock_recvmsg(socket, &msg, sizeof(recvbuf), 0);
737 set_fs(oldfs);
738 if (recvlen > 0) {
739 l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
740 } else {
741 if (debug & DEBUG_L1OIP_SOCKET)
742 printk(KERN_WARNING "%s: broken pipe on socket\n",
743 __func__);
744 }
745 }
746
747 /* get socket back, check first if in use, maybe by send function */
748 spin_lock(&hc->socket_lock);
749 /* if hc->socket is NULL, it is in use until it is given back */
750 while (!hc->socket) {
751 spin_unlock(&hc->socket_lock);
752 schedule_timeout(HZ/10);
753 spin_lock(&hc->socket_lock);
754 }
755 hc->socket = NULL;
756 spin_unlock(&hc->socket_lock);
757
758 if (debug & DEBUG_L1OIP_SOCKET)
759 printk(KERN_DEBUG "%s: socket thread terminating\n",
760 __func__);
761
762fail:
763 /* close socket */
764 if (socket)
765 sock_release(socket);
766
767 /* if we got killed, signal completion */
768 complete(&hc->socket_complete);
769 hc->socket_thread = NULL; /* show termination of thread */
770
771 if (debug & DEBUG_L1OIP_SOCKET)
772 printk(KERN_DEBUG "%s: socket thread terminated\n",
773 __func__);
774 return ret;
775}
776
777static void
778l1oip_socket_close(struct l1oip *hc)
779{
780 /* kill thread */
781 if (hc->socket_thread) {
782 if (debug & DEBUG_L1OIP_SOCKET)
783 printk(KERN_DEBUG "%s: socket thread exists, "
784 "killing...\n", __func__);
785 send_sig(SIGTERM, hc->socket_thread, 0);
786 wait_for_completion(&hc->socket_complete);
787 }
788}
789
790static int
791l1oip_socket_open(struct l1oip *hc)
792{
793 /* in case of reopen, we need to close first */
794 l1oip_socket_close(hc);
795
796 init_completion(&hc->socket_complete);
797
798 /* create receive process */
799 hc->socket_thread = kthread_run(l1oip_socket_thread, hc, "l1oip_%s",
800 hc->name);
801 if (IS_ERR(hc->socket_thread)) {
802 int err = PTR_ERR(hc->socket_thread);
803 printk(KERN_ERR "%s: Failed (%d) to create socket process.\n",
804 __func__, err);
805 hc->socket_thread = NULL;
806 sock_release(hc->socket);
807 return err;
808 }
809 if (debug & DEBUG_L1OIP_SOCKET)
810 printk(KERN_DEBUG "%s: socket thread created\n", __func__);
811
812 return 0;
813}
814
815
816static void
817l1oip_send_bh(struct work_struct *work)
818{
819 struct l1oip *hc = container_of(work, struct l1oip, workq);
820
821 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
822 printk(KERN_DEBUG "%s: keepalive timer expired, sending empty "
823 "frame on dchannel\n", __func__);
824
825 /* send an empty l1oip frame at D-channel */
826 l1oip_socket_send(hc, 0, hc->d_idx, 0, 0, NULL, 0);
827}
828
829
830/*
831 * timer stuff
832 */
833static void
834l1oip_keepalive(void *data)
835{
836 struct l1oip *hc = (struct l1oip *)data;
837
838 schedule_work(&hc->workq);
839}
840
841static void
842l1oip_timeout(void *data)
843{
844 struct l1oip *hc = (struct l1oip *)data;
845 struct dchannel *dch = hc->chan[hc->d_idx].dch;
846
847 if (debug & DEBUG_L1OIP_MSG)
848 printk(KERN_DEBUG "%s: timeout timer expired, turn layer one "
849 "down.\n", __func__);
850
851 hc->timeout_on = 0; /* state that timer must be initialized next time */
852
853 /* if timeout, we send up a PH_DEACTIVATE and deactivate */
854 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
855 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
856 printk(KERN_DEBUG "%s: interface become deactivated "
857 "due to timeout\n", __func__);
858 test_and_clear_bit(FLG_ACTIVE, &dch->Flags);
859 _queue_data(&dch->dev.D, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
860 NULL, GFP_ATOMIC);
861 }
862
863 /* if we have ondemand set, we remove ip address */
864 if (hc->ondemand) {
865 if (debug & DEBUG_L1OIP_MSG)
866 printk(KERN_DEBUG "%s: on demand causes ip address to "
867 "be removed\n", __func__);
868 hc->sin_remote.sin_addr.s_addr = 0;
869 }
870}
871
872
873/*
874 * message handling
875 */
876static int
877handle_dmsg(struct mISDNchannel *ch, struct sk_buff *skb)
878{
879 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
880 struct dchannel *dch = container_of(dev, struct dchannel, dev);
881 struct l1oip *hc = dch->hw;
882 struct mISDNhead *hh = mISDN_HEAD_P(skb);
883 int ret = -EINVAL;
884 int l, ll;
885 unsigned char *p;
886
887 switch (hh->prim) {
888 case PH_DATA_REQ:
889 if (skb->len < 1) {
890 printk(KERN_WARNING "%s: skb too small\n",
891 __func__);
892 break;
893 }
894 if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
895 printk(KERN_WARNING "%s: skb too large\n",
896 __func__);
897 break;
898 }
899 /* send frame */
900 p = skb->data;
901 l = skb->len;
902 while (l) {
903 ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
904 l1oip_socket_send(hc, 0, dch->slot, 0,
905 hc->chan[dch->slot].tx_counter++, p, ll);
906 p += ll;
907 l -= ll;
908 }
909 skb_trim(skb, 0);
910 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
911 return 0;
912 case PH_ACTIVATE_REQ:
913 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
914 printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
915 , __func__, dch->slot, hc->b_num+1);
916 skb_trim(skb, 0);
917 if (test_bit(FLG_ACTIVE, &dch->Flags))
918 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
919 else
920 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
921 return 0;
922 case PH_DEACTIVATE_REQ:
923 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
924 printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
925 "(1..%d)\n", __func__, dch->slot,
926 hc->b_num+1);
927 skb_trim(skb, 0);
928 if (test_bit(FLG_ACTIVE, &dch->Flags))
929 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
930 else
931 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
932 return 0;
933 }
934 if (!ret)
935 dev_kfree_skb(skb);
936 return ret;
937}
938
939static int
940channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
941{
942 int ret = 0;
943 struct l1oip *hc = dch->hw;
944
945 switch (cq->op) {
946 case MISDN_CTRL_GETOP:
947 cq->op = MISDN_CTRL_SETPEER | MISDN_CTRL_UNSETPEER;
948 break;
949 case MISDN_CTRL_SETPEER:
950 hc->remoteip = (u32)cq->p1;
951 hc->remoteport = cq->p2 & 0xffff;
952 hc->localport = cq->p2 >> 16;
953 if (!hc->remoteport)
954 hc->remoteport = hc->localport;
955 if (debug & DEBUG_L1OIP_SOCKET)
956 printk(KERN_DEBUG "%s: got new ip address from user "
957 "space.\n", __func__);
958 l1oip_socket_open(hc);
959 break;
960 case MISDN_CTRL_UNSETPEER:
961 if (debug & DEBUG_L1OIP_SOCKET)
962 printk(KERN_DEBUG "%s: removing ip address.\n",
963 __func__);
964 hc->remoteip = 0;
965 l1oip_socket_open(hc);
966 break;
967 default:
968 printk(KERN_WARNING "%s: unknown Op %x\n",
969 __func__, cq->op);
970 ret = -EINVAL;
971 break;
972 }
973 return ret;
974}
975
976static int
977open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
978{
979 if (debug & DEBUG_HW_OPEN)
980 printk(KERN_DEBUG "%s: dev(%d) open from %p\n", __func__,
981 dch->dev.id, __builtin_return_address(0));
982 if (rq->protocol == ISDN_P_NONE)
983 return -EINVAL;
984 if ((dch->dev.D.protocol != ISDN_P_NONE) &&
985 (dch->dev.D.protocol != rq->protocol)) {
986 if (debug & DEBUG_HW_OPEN)
987 printk(KERN_WARNING "%s: change protocol %x to %x\n",
988 __func__, dch->dev.D.protocol, rq->protocol);
989 }
990 if (dch->dev.D.protocol != rq->protocol)
991 dch->dev.D.protocol = rq->protocol;
992
993 if (test_bit(FLG_ACTIVE, &dch->Flags)) {
994 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
995 0, NULL, GFP_KERNEL);
996 }
997 rq->ch = &dch->dev.D;
998 if (!try_module_get(THIS_MODULE))
999 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1000 return 0;
1001}
1002
1003static int
1004open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq)
1005{
1006 struct bchannel *bch;
1007 int ch;
1008
1009 if (!test_bit(rq->adr.channel & 0x1f,
1010 &dch->dev.channelmap[rq->adr.channel >> 5]))
1011 return -EINVAL;
1012 if (rq->protocol == ISDN_P_NONE)
1013 return -EINVAL;
1014 ch = rq->adr.channel; /* BRI: 1=B1 2=B2 PRI: 1..15,17.. */
1015 bch = hc->chan[ch].bch;
1016 if (!bch) {
1017 printk(KERN_ERR "%s:internal error ch %d has no bch\n",
1018 __func__, ch);
1019 return -EINVAL;
1020 }
1021 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
1022 return -EBUSY; /* b-channel can be only open once */
1023 bch->ch.protocol = rq->protocol;
1024 rq->ch = &bch->ch;
1025 if (!try_module_get(THIS_MODULE))
1026 printk(KERN_WARNING "%s:cannot get module\n", __func__);
1027 return 0;
1028}
1029
1030static int
1031l1oip_dctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1032{
1033 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
1034 struct dchannel *dch = container_of(dev, struct dchannel, dev);
1035 struct l1oip *hc = dch->hw;
1036 struct channel_req *rq;
1037 int err = 0;
1038
1039 if (dch->debug & DEBUG_HW)
1040 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1041 __func__, cmd, arg);
1042 switch (cmd) {
1043 case OPEN_CHANNEL:
1044 rq = arg;
1045 switch (rq->protocol) {
1046 case ISDN_P_TE_S0:
1047 case ISDN_P_NT_S0:
1048 if (hc->pri) {
1049 err = -EINVAL;
1050 break;
1051 }
1052 err = open_dchannel(hc, dch, rq);
1053 break;
1054 case ISDN_P_TE_E1:
1055 case ISDN_P_NT_E1:
1056 if (!hc->pri) {
1057 err = -EINVAL;
1058 break;
1059 }
1060 err = open_dchannel(hc, dch, rq);
1061 break;
1062 default:
1063 err = open_bchannel(hc, dch, rq);
1064 }
1065 break;
1066 case CLOSE_CHANNEL:
1067 if (debug & DEBUG_HW_OPEN)
1068 printk(KERN_DEBUG "%s: dev(%d) close from %p\n",
1069 __func__, dch->dev.id,
1070 __builtin_return_address(0));
1071 module_put(THIS_MODULE);
1072 break;
1073 case CONTROL_CHANNEL:
1074 err = channel_dctrl(dch, arg);
1075 break;
1076 default:
1077 if (dch->debug & DEBUG_HW)
1078 printk(KERN_DEBUG "%s: unknown command %x\n",
1079 __func__, cmd);
1080 err = -EINVAL;
1081 }
1082 return err;
1083}
1084
1085static int
1086handle_bmsg(struct mISDNchannel *ch, struct sk_buff *skb)
1087{
1088 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1089 struct l1oip *hc = bch->hw;
1090 int ret = -EINVAL;
1091 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1092 int l, ll, i;
1093 unsigned char *p;
1094
1095 switch (hh->prim) {
1096 case PH_DATA_REQ:
1097 if (skb->len <= 0) {
1098 printk(KERN_WARNING "%s: skb too small\n",
1099 __func__);
1100 break;
1101 }
1102 if (skb->len > MAX_DFRAME_LEN_L1 || skb->len > L1OIP_MAX_LEN) {
1103 printk(KERN_WARNING "%s: skb too large\n",
1104 __func__);
1105 break;
1106 }
1107 /* check for AIS / ulaw-silence */
1108 p = skb->data;
1109 l = skb->len;
1110 for (i = 0; i < l; i++) {
1111 if (*p++ != 0xff)
1112 break;
1113 }
1114 if (i == l) {
1115 if (debug & DEBUG_L1OIP_MSG)
1116 printk(KERN_DEBUG "%s: got AIS, not sending, "
1117 "but counting\n", __func__);
1118 hc->chan[bch->slot].tx_counter += l;
1119 skb_trim(skb, 0);
1120 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
1121 return 0;
1122 }
1123 /* check for silence */
1124 p = skb->data;
1125 l = skb->len;
1126 for (i = 0; i < l; i++) {
1127 if (*p++ != 0x2a)
1128 break;
1129 }
1130 if (i == l) {
1131 if (debug & DEBUG_L1OIP_MSG)
1132 printk(KERN_DEBUG "%s: got silence, not sending"
1133 ", but counting\n", __func__);
1134 hc->chan[bch->slot].tx_counter += l;
1135 skb_trim(skb, 0);
1136 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
1137 return 0;
1138 }
1139
1140 /* send frame */
1141 p = skb->data;
1142 l = skb->len;
1143 while (l) {
1144 ll = (l < L1OIP_MAX_PERFRAME)?l:L1OIP_MAX_PERFRAME;
1145 l1oip_socket_send(hc, hc->codec, bch->slot, 0,
1146 hc->chan[bch->slot].tx_counter, p, ll);
1147 hc->chan[bch->slot].tx_counter += ll;
1148 p += ll;
1149 l -= ll;
1150 }
1151 skb_trim(skb, 0);
1152 queue_ch_frame(ch, PH_DATA_CNF, hh->id, skb);
1153 return 0;
1154 case PH_ACTIVATE_REQ:
1155 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
1156 printk(KERN_DEBUG "%s: PH_ACTIVATE channel %d (1..%d)\n"
1157 , __func__, bch->slot, hc->b_num+1);
1158 hc->chan[bch->slot].codecstate = 0;
1159 test_and_set_bit(FLG_ACTIVE, &bch->Flags);
1160 skb_trim(skb, 0);
1161 queue_ch_frame(ch, PH_ACTIVATE_IND, hh->id, skb);
1162 return 0;
1163 case PH_DEACTIVATE_REQ:
1164 if (debug & (DEBUG_L1OIP_MSG|DEBUG_L1OIP_SOCKET))
1165 printk(KERN_DEBUG "%s: PH_DEACTIVATE channel %d "
1166 "(1..%d)\n", __func__, bch->slot,
1167 hc->b_num+1);
1168 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
1169 skb_trim(skb, 0);
1170 queue_ch_frame(ch, PH_DEACTIVATE_IND, hh->id, skb);
1171 return 0;
1172 }
1173 if (!ret)
1174 dev_kfree_skb(skb);
1175 return ret;
1176}
1177
1178static int
1179channel_bctrl(struct bchannel *bch, struct mISDN_ctrl_req *cq)
1180{
1181 int ret = 0;
1182 struct dsp_features *features =
1183 (struct dsp_features *)(*((u_long *)&cq->p1));
1184
1185 switch (cq->op) {
1186 case MISDN_CTRL_GETOP:
1187 cq->op = MISDN_CTRL_HW_FEATURES_OP;
1188 break;
1189 case MISDN_CTRL_HW_FEATURES: /* fill features structure */
1190 if (debug & DEBUG_L1OIP_MSG)
1191 printk(KERN_DEBUG "%s: HW_FEATURE request\n",
1192 __func__);
1193 /* create confirm */
1194 features->unclocked = 1;
1195 features->unordered = 1;
1196 break;
1197 default:
1198 printk(KERN_WARNING "%s: unknown Op %x\n",
1199 __func__, cq->op);
1200 ret = -EINVAL;
1201 break;
1202 }
1203 return ret;
1204}
1205
1206static int
1207l1oip_bctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1208{
1209 struct bchannel *bch = container_of(ch, struct bchannel, ch);
1210 int err = -EINVAL;
1211
1212 if (bch->debug & DEBUG_HW)
1213 printk(KERN_DEBUG "%s: cmd:%x %p\n",
1214 __func__, cmd, arg);
1215 switch (cmd) {
1216 case CLOSE_CHANNEL:
1217 test_and_clear_bit(FLG_OPEN, &bch->Flags);
1218 test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
1219 ch->protocol = ISDN_P_NONE;
1220 ch->peer = NULL;
1221 module_put(THIS_MODULE);
1222 err = 0;
1223 break;
1224 case CONTROL_CHANNEL:
1225 err = channel_bctrl(bch, arg);
1226 break;
1227 default:
1228 printk(KERN_WARNING "%s: unknown prim(%x)\n",
1229 __func__, cmd);
1230 }
1231 return err;
1232}
1233
1234
1235/*
1236 * cleanup module and stack
1237 */
1238static void
1239release_card(struct l1oip *hc)
1240{
1241 int ch;
1242
1243 if (timer_pending(&hc->keep_tl))
1244 del_timer(&hc->keep_tl);
1245
1246 if (timer_pending(&hc->timeout_tl))
1247 del_timer(&hc->timeout_tl);
1248
1249 if (hc->socket_thread)
1250 l1oip_socket_close(hc);
1251
1252 if (hc->registered && hc->chan[hc->d_idx].dch)
1253 mISDN_unregister_device(&hc->chan[hc->d_idx].dch->dev);
1254 for (ch = 0; ch < 128; ch++) {
1255 if (hc->chan[ch].dch) {
1256 mISDN_freedchannel(hc->chan[ch].dch);
1257 kfree(hc->chan[ch].dch);
1258 }
1259 if (hc->chan[ch].bch) {
1260 mISDN_freebchannel(hc->chan[ch].bch);
1261 kfree(hc->chan[ch].bch);
1262#ifdef REORDER_DEBUG
1263 if (hc->chan[ch].disorder_skb)
1264 dev_kfree_skb(hc->chan[ch].disorder_skb);
1265#endif
1266 }
1267 }
1268
1269 spin_lock(&l1oip_lock);
1270 list_del(&hc->list);
1271 spin_unlock(&l1oip_lock);
1272
1273 kfree(hc);
1274}
1275
1276static void
1277l1oip_cleanup(void)
1278{
1279 struct l1oip *hc, *next;
1280
1281 list_for_each_entry_safe(hc, next, &l1oip_ilist, list)
1282 release_card(hc);
1283
1284 l1oip_4bit_free();
1285}
1286
1287
1288/*
1289 * module and stack init
1290 */
1291static int
1292init_card(struct l1oip *hc, int pri, int bundle)
1293{
1294 struct dchannel *dch;
1295 struct bchannel *bch;
1296 int ret;
1297 int i, ch;
1298
1299 spin_lock_init(&hc->socket_lock);
1300 hc->idx = l1oip_cnt;
1301 hc->pri = pri;
1302 hc->d_idx = pri?16:3;
1303 hc->b_num = pri?30:2;
1304 hc->bundle = bundle;
1305 if (hc->pri)
1306 sprintf(hc->name, "l1oip-e1.%d", l1oip_cnt + 1);
1307 else
1308 sprintf(hc->name, "l1oip-s0.%d", l1oip_cnt + 1);
1309
1310 switch (codec[l1oip_cnt]) {
1311 case 0: /* as is */
1312 case 1: /* alaw */
1313 case 2: /* ulaw */
1314 case 3: /* 4bit */
1315 break;
1316 default:
1317 printk(KERN_ERR "Codec(%d) not supported.\n",
1318 codec[l1oip_cnt]);
1319 return -EINVAL;
1320 }
1321 hc->codec = codec[l1oip_cnt];
1322 if (debug & DEBUG_L1OIP_INIT)
1323 printk(KERN_DEBUG "%s: using codec %d\n",
1324 __func__, hc->codec);
1325
1326 if (id[l1oip_cnt] == 0) {
1327 printk(KERN_WARNING "Warning: No 'id' value given or "
1328 "0, this is highly unsecure. Please use 32 "
1329 "bit randmom number 0x...\n");
1330 }
1331 hc->id = id[l1oip_cnt];
1332 if (debug & DEBUG_L1OIP_INIT)
1333 printk(KERN_DEBUG "%s: using id 0x%x\n", __func__, hc->id);
1334
1335 hc->ondemand = ondemand[l1oip_cnt];
1336 if (hc->ondemand && !hc->id) {
1337 printk(KERN_ERR "%s: ondemand option only allowed in "
1338 "conjunction with non 0 ID\n", __func__);
1339 return -EINVAL;
1340 }
1341
1342 if (limit[l1oip_cnt])
1343 hc->b_num = limit[l1oip_cnt];
1344 if (!pri && hc->b_num > 2) {
1345 printk(KERN_ERR "Maximum limit for BRI interface is 2 "
1346 "channels.\n");
1347 return -EINVAL;
1348 }
1349 if (pri && hc->b_num > 126) {
1350 printk(KERN_ERR "Maximum limit for PRI interface is 126 "
1351 "channels.\n");
1352 return -EINVAL;
1353 }
1354 if (pri && hc->b_num > 30) {
1355 printk(KERN_WARNING "Maximum limit for BRI interface is 30 "
1356 "channels.\n");
1357 printk(KERN_WARNING "Your selection of %d channels must be "
1358 "supported by application.\n", hc->limit);
1359 }
1360
1361 hc->remoteip = ip[l1oip_cnt<<2] << 24
1362 | ip[(l1oip_cnt<<2)+1] << 16
1363 | ip[(l1oip_cnt<<2)+2] << 8
1364 | ip[(l1oip_cnt<<2)+3];
1365 hc->localport = port[l1oip_cnt]?:(L1OIP_DEFAULTPORT+l1oip_cnt);
1366 if (remoteport[l1oip_cnt])
1367 hc->remoteport = remoteport[l1oip_cnt];
1368 else
1369 hc->remoteport = hc->localport;
1370 if (debug & DEBUG_L1OIP_INIT)
1371 printk(KERN_DEBUG "%s: using local port %d remote ip "
1372 "%d.%d.%d.%d port %d ondemand %d\n", __func__,
1373 hc->localport, hc->remoteip >> 24,
1374 (hc->remoteip >> 16) & 0xff,
1375 (hc->remoteip >> 8) & 0xff, hc->remoteip & 0xff,
1376 hc->remoteport, hc->ondemand);
1377
1378 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
1379 if (!dch)
1380 return -ENOMEM;
1381 dch->debug = debug;
1382 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, NULL);
1383 dch->hw = hc;
1384 if (pri)
1385 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
1386 else
1387 dch->dev.Dprotocols = (1 << ISDN_P_TE_S0) | (1 << ISDN_P_NT_S0);
1388 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
1389 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
1390 dch->dev.D.send = handle_dmsg;
1391 dch->dev.D.ctrl = l1oip_dctrl;
1392 dch->dev.nrbchan = hc->b_num;
1393 dch->slot = hc->d_idx;
1394 hc->chan[hc->d_idx].dch = dch;
1395 i = 1;
1396 for (ch = 0; ch < dch->dev.nrbchan; ch++) {
1397 if (ch == 15)
1398 i++;
1399 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
1400 if (!bch) {
1401 printk(KERN_ERR "%s: no memory for bchannel\n",
1402 __func__);
1403 return -ENOMEM;
1404 }
1405 bch->nr = i + ch;
1406 bch->slot = i + ch;
1407 bch->debug = debug;
1408 mISDN_initbchannel(bch, MAX_DATA_MEM);
1409 bch->hw = hc;
1410 bch->ch.send = handle_bmsg;
1411 bch->ch.ctrl = l1oip_bctrl;
1412 bch->ch.nr = i + ch;
1413 list_add(&bch->ch.list, &dch->dev.bchannels);
1414 hc->chan[i + ch].bch = bch;
1415 test_and_set_bit(bch->nr & 0x1f,
1416 &dch->dev.channelmap[bch->nr >> 5]);
1417 }
1418 ret = mISDN_register_device(&dch->dev, hc->name);
1419 if (ret)
1420 return ret;
1421 hc->registered = 1;
1422
1423 if (debug & DEBUG_L1OIP_INIT)
1424 printk(KERN_DEBUG "%s: Setting up network card(%d)\n",
1425 __func__, l1oip_cnt + 1);
1426 ret = l1oip_socket_open(hc);
1427 if (ret)
1428 return ret;
1429
1430 hc->keep_tl.function = (void *)l1oip_keepalive;
1431 hc->keep_tl.data = (ulong)hc;
1432 init_timer(&hc->keep_tl);
1433 hc->keep_tl.expires = jiffies + 2*HZ; /* two seconds first time */
1434 add_timer(&hc->keep_tl);
1435
1436 hc->timeout_tl.function = (void *)l1oip_timeout;
1437 hc->timeout_tl.data = (ulong)hc;
1438 init_timer(&hc->timeout_tl);
1439 hc->timeout_on = 0; /* state that we have timer off */
1440
1441 return 0;
1442}
1443
1444static int __init
1445l1oip_init(void)
1446{
1447 int pri, bundle;
1448 struct l1oip *hc;
1449 int ret;
1450
1451 printk(KERN_INFO "mISDN: Layer-1-over-IP driver Rev. %s\n",
1452 l1oip_revision);
1453
1454 INIT_LIST_HEAD(&l1oip_ilist);
1455 spin_lock_init(&l1oip_lock);
1456
1457 if (l1oip_4bit_alloc(ulaw))
1458 return -ENOMEM;
1459
1460 l1oip_cnt = 0;
1461 while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) {
1462 switch (type[l1oip_cnt] & 0xff) {
1463 case 1:
1464 pri = 0;
1465 bundle = 0;
1466 break;
1467 case 2:
1468 pri = 1;
1469 bundle = 0;
1470 break;
1471 case 3:
1472 pri = 0;
1473 bundle = 1;
1474 break;
1475 case 4:
1476 pri = 1;
1477 bundle = 1;
1478 break;
1479 default:
1480 printk(KERN_ERR "Card type(%d) not supported.\n",
1481 type[l1oip_cnt] & 0xff);
1482 l1oip_cleanup();
1483 return -EINVAL;
1484 }
1485
1486 if (debug & DEBUG_L1OIP_INIT)
1487 printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
1488 __func__, l1oip_cnt, pri?"PRI":"BRI",
1489 bundle?"bundled IP packet for all B-channels"
1490 :"seperate IP packets for every B-channel");
1491
1492 hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
1493 if (!hc) {
1494 printk(KERN_ERR "No kmem for L1-over-IP driver.\n");
1495 l1oip_cleanup();
1496 return -ENOMEM;
1497 }
1498 INIT_WORK(&hc->workq, (void *)l1oip_send_bh);
1499
1500 spin_lock(&l1oip_lock);
1501 list_add_tail(&hc->list, &l1oip_ilist);
1502 spin_unlock(&l1oip_lock);
1503
1504 ret = init_card(hc, pri, bundle);
1505 if (ret) {
1506 l1oip_cleanup();
1507 return ret;
1508 }
1509
1510 l1oip_cnt++;
1511 }
1512 printk(KERN_INFO "%d virtual devices registered\n", l1oip_cnt);
1513 return 0;
1514}
1515
1516module_init(l1oip_init);
1517module_exit(l1oip_cleanup);
1518
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
new file mode 100644
index 000000000000..fced1a2755f8
--- /dev/null
+++ b/drivers/isdn/mISDN/layer1.c
@@ -0,0 +1,403 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18
19#include <linux/module.h>
20#include <linux/mISDNhw.h>
21#include "layer1.h"
22#include "fsm.h"
23
24static int *debug;
25
26struct layer1 {
27 u_long Flags;
28 struct FsmInst l1m;
29 struct FsmTimer timer;
30 int delay;
31 struct dchannel *dch;
32 dchannel_l1callback *dcb;
33};
34
35#define TIMER3_VALUE 7000
36
37static
38struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
39
40enum {
41 ST_L1_F2,
42 ST_L1_F3,
43 ST_L1_F4,
44 ST_L1_F5,
45 ST_L1_F6,
46 ST_L1_F7,
47 ST_L1_F8,
48};
49
50#define L1S_STATE_COUNT (ST_L1_F8+1)
51
52static char *strL1SState[] =
53{
54 "ST_L1_F2",
55 "ST_L1_F3",
56 "ST_L1_F4",
57 "ST_L1_F5",
58 "ST_L1_F6",
59 "ST_L1_F7",
60 "ST_L1_F8",
61};
62
63enum {
64 EV_PH_ACTIVATE,
65 EV_PH_DEACTIVATE,
66 EV_RESET_IND,
67 EV_DEACT_CNF,
68 EV_DEACT_IND,
69 EV_POWER_UP,
70 EV_ANYSIG_IND,
71 EV_INFO2_IND,
72 EV_INFO4_IND,
73 EV_TIMER_DEACT,
74 EV_TIMER_ACT,
75 EV_TIMER3,
76};
77
78#define L1_EVENT_COUNT (EV_TIMER3 + 1)
79
80static char *strL1Event[] =
81{
82 "EV_PH_ACTIVATE",
83 "EV_PH_DEACTIVATE",
84 "EV_RESET_IND",
85 "EV_DEACT_CNF",
86 "EV_DEACT_IND",
87 "EV_POWER_UP",
88 "EV_ANYSIG_IND",
89 "EV_INFO2_IND",
90 "EV_INFO4_IND",
91 "EV_TIMER_DEACT",
92 "EV_TIMER_ACT",
93 "EV_TIMER3",
94};
95
96static void
97l1m_debug(struct FsmInst *fi, char *fmt, ...)
98{
99 struct layer1 *l1 = fi->userdata;
100 va_list va;
101
102 va_start(va, fmt);
103 printk(KERN_DEBUG "%s: ", l1->dch->dev.name);
104 vprintk(fmt, va);
105 printk("\n");
106 va_end(va);
107}
108
109static void
110l1_reset(struct FsmInst *fi, int event, void *arg)
111{
112 mISDN_FsmChangeState(fi, ST_L1_F3);
113}
114
115static void
116l1_deact_cnf(struct FsmInst *fi, int event, void *arg)
117{
118 struct layer1 *l1 = fi->userdata;
119
120 mISDN_FsmChangeState(fi, ST_L1_F3);
121 if (test_bit(FLG_L1_ACTIVATING, &l1->Flags))
122 l1->dcb(l1->dch, HW_POWERUP_REQ);
123}
124
125static void
126l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
127{
128 struct layer1 *l1 = fi->userdata;
129
130 mISDN_FsmChangeState(fi, ST_L1_F3);
131 mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2);
132 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
133}
134
135static void
136l1_power_up_s(struct FsmInst *fi, int event, void *arg)
137{
138 struct layer1 *l1 = fi->userdata;
139
140 if (test_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
141 mISDN_FsmChangeState(fi, ST_L1_F4);
142 l1->dcb(l1->dch, INFO3_P8);
143 } else
144 mISDN_FsmChangeState(fi, ST_L1_F3);
145}
146
147static void
148l1_go_F5(struct FsmInst *fi, int event, void *arg)
149{
150 mISDN_FsmChangeState(fi, ST_L1_F5);
151}
152
153static void
154l1_go_F8(struct FsmInst *fi, int event, void *arg)
155{
156 mISDN_FsmChangeState(fi, ST_L1_F8);
157}
158
159static void
160l1_info2_ind(struct FsmInst *fi, int event, void *arg)
161{
162 struct layer1 *l1 = fi->userdata;
163
164 mISDN_FsmChangeState(fi, ST_L1_F6);
165 l1->dcb(l1->dch, INFO3_P8);
166}
167
168static void
169l1_info4_ind(struct FsmInst *fi, int event, void *arg)
170{
171 struct layer1 *l1 = fi->userdata;
172
173 mISDN_FsmChangeState(fi, ST_L1_F7);
174 l1->dcb(l1->dch, INFO3_P8);
175 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
176 mISDN_FsmDelTimer(&l1->timer, 4);
177 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
178 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
179 mISDN_FsmDelTimer(&l1->timer, 3);
180 mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2);
181 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
182 }
183}
184
185static void
186l1_timer3(struct FsmInst *fi, int event, void *arg)
187{
188 struct layer1 *l1 = fi->userdata;
189
190 test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags);
191 if (test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags)) {
192 if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
193 l1->dcb(l1->dch, HW_D_NOBLOCKED);
194 l1->dcb(l1->dch, PH_DEACTIVATE_IND);
195 }
196 if (l1->l1m.state != ST_L1_F6) {
197 mISDN_FsmChangeState(fi, ST_L1_F3);
198 l1->dcb(l1->dch, HW_POWERUP_REQ);
199 }
200}
201
202static void
203l1_timer_act(struct FsmInst *fi, int event, void *arg)
204{
205 struct layer1 *l1 = fi->userdata;
206
207 test_and_clear_bit(FLG_L1_ACTTIMER, &l1->Flags);
208 test_and_set_bit(FLG_L1_ACTIVATED, &l1->Flags);
209 l1->dcb(l1->dch, PH_ACTIVATE_IND);
210}
211
212static void
213l1_timer_deact(struct FsmInst *fi, int event, void *arg)
214{
215 struct layer1 *l1 = fi->userdata;
216
217 test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags);
218 test_and_clear_bit(FLG_L1_ACTIVATED, &l1->Flags);
219 if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
220 l1->dcb(l1->dch, HW_D_NOBLOCKED);
221 l1->dcb(l1->dch, PH_DEACTIVATE_IND);
222 l1->dcb(l1->dch, HW_DEACT_REQ);
223}
224
225static void
226l1_activate_s(struct FsmInst *fi, int event, void *arg)
227{
228 struct layer1 *l1 = fi->userdata;
229
230 mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2);
231 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
232 l1->dcb(l1->dch, HW_RESET_REQ);
233}
234
235static void
236l1_activate_no(struct FsmInst *fi, int event, void *arg)
237{
238 struct layer1 *l1 = fi->userdata;
239
240 if ((!test_bit(FLG_L1_DEACTTIMER, &l1->Flags)) &&
241 (!test_bit(FLG_L1_T3RUN, &l1->Flags))) {
242 test_and_clear_bit(FLG_L1_ACTIVATING, &l1->Flags);
243 if (test_and_clear_bit(FLG_L1_DBLOCKED, &l1->Flags))
244 l1->dcb(l1->dch, HW_D_NOBLOCKED);
245 l1->dcb(l1->dch, PH_DEACTIVATE_IND);
246 }
247}
248
249static struct FsmNode L1SFnList[] =
250{
251 {ST_L1_F3, EV_PH_ACTIVATE, l1_activate_s},
252 {ST_L1_F6, EV_PH_ACTIVATE, l1_activate_no},
253 {ST_L1_F8, EV_PH_ACTIVATE, l1_activate_no},
254 {ST_L1_F3, EV_RESET_IND, l1_reset},
255 {ST_L1_F4, EV_RESET_IND, l1_reset},
256 {ST_L1_F5, EV_RESET_IND, l1_reset},
257 {ST_L1_F6, EV_RESET_IND, l1_reset},
258 {ST_L1_F7, EV_RESET_IND, l1_reset},
259 {ST_L1_F8, EV_RESET_IND, l1_reset},
260 {ST_L1_F3, EV_DEACT_CNF, l1_deact_cnf},
261 {ST_L1_F4, EV_DEACT_CNF, l1_deact_cnf},
262 {ST_L1_F5, EV_DEACT_CNF, l1_deact_cnf},
263 {ST_L1_F6, EV_DEACT_CNF, l1_deact_cnf},
264 {ST_L1_F7, EV_DEACT_CNF, l1_deact_cnf},
265 {ST_L1_F8, EV_DEACT_CNF, l1_deact_cnf},
266 {ST_L1_F6, EV_DEACT_IND, l1_deact_req_s},
267 {ST_L1_F7, EV_DEACT_IND, l1_deact_req_s},
268 {ST_L1_F8, EV_DEACT_IND, l1_deact_req_s},
269 {ST_L1_F3, EV_POWER_UP, l1_power_up_s},
270 {ST_L1_F4, EV_ANYSIG_IND, l1_go_F5},
271 {ST_L1_F6, EV_ANYSIG_IND, l1_go_F8},
272 {ST_L1_F7, EV_ANYSIG_IND, l1_go_F8},
273 {ST_L1_F3, EV_INFO2_IND, l1_info2_ind},
274 {ST_L1_F4, EV_INFO2_IND, l1_info2_ind},
275 {ST_L1_F5, EV_INFO2_IND, l1_info2_ind},
276 {ST_L1_F7, EV_INFO2_IND, l1_info2_ind},
277 {ST_L1_F8, EV_INFO2_IND, l1_info2_ind},
278 {ST_L1_F3, EV_INFO4_IND, l1_info4_ind},
279 {ST_L1_F4, EV_INFO4_IND, l1_info4_ind},
280 {ST_L1_F5, EV_INFO4_IND, l1_info4_ind},
281 {ST_L1_F6, EV_INFO4_IND, l1_info4_ind},
282 {ST_L1_F8, EV_INFO4_IND, l1_info4_ind},
283 {ST_L1_F3, EV_TIMER3, l1_timer3},
284 {ST_L1_F4, EV_TIMER3, l1_timer3},
285 {ST_L1_F5, EV_TIMER3, l1_timer3},
286 {ST_L1_F6, EV_TIMER3, l1_timer3},
287 {ST_L1_F8, EV_TIMER3, l1_timer3},
288 {ST_L1_F7, EV_TIMER_ACT, l1_timer_act},
289 {ST_L1_F3, EV_TIMER_DEACT, l1_timer_deact},
290 {ST_L1_F4, EV_TIMER_DEACT, l1_timer_deact},
291 {ST_L1_F5, EV_TIMER_DEACT, l1_timer_deact},
292 {ST_L1_F6, EV_TIMER_DEACT, l1_timer_deact},
293 {ST_L1_F7, EV_TIMER_DEACT, l1_timer_deact},
294 {ST_L1_F8, EV_TIMER_DEACT, l1_timer_deact},
295};
296
297static void
298release_l1(struct layer1 *l1) {
299 mISDN_FsmDelTimer(&l1->timer, 0);
300 if (l1->dch)
301 l1->dch->l1 = NULL;
302 module_put(THIS_MODULE);
303 kfree(l1);
304}
305
306int
307l1_event(struct layer1 *l1, u_int event)
308{
309 int err = 0;
310
311 if (!l1)
312 return -EINVAL;
313 switch (event) {
314 case HW_RESET_IND:
315 mISDN_FsmEvent(&l1->l1m, EV_RESET_IND, NULL);
316 break;
317 case HW_DEACT_IND:
318 mISDN_FsmEvent(&l1->l1m, EV_DEACT_IND, NULL);
319 break;
320 case HW_POWERUP_IND:
321 mISDN_FsmEvent(&l1->l1m, EV_POWER_UP, NULL);
322 break;
323 case HW_DEACT_CNF:
324 mISDN_FsmEvent(&l1->l1m, EV_DEACT_CNF, NULL);
325 break;
326 case ANYSIGNAL:
327 mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
328 break;
329 case LOSTFRAMING:
330 mISDN_FsmEvent(&l1->l1m, EV_ANYSIG_IND, NULL);
331 break;
332 case INFO2:
333 mISDN_FsmEvent(&l1->l1m, EV_INFO2_IND, NULL);
334 break;
335 case INFO4_P8:
336 mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
337 break;
338 case INFO4_P10:
339 mISDN_FsmEvent(&l1->l1m, EV_INFO4_IND, NULL);
340 break;
341 case PH_ACTIVATE_REQ:
342 if (test_bit(FLG_L1_ACTIVATED, &l1->Flags))
343 l1->dcb(l1->dch, PH_ACTIVATE_IND);
344 else {
345 test_and_set_bit(FLG_L1_ACTIVATING, &l1->Flags);
346 mISDN_FsmEvent(&l1->l1m, EV_PH_ACTIVATE, NULL);
347 }
348 break;
349 case CLOSE_CHANNEL:
350 release_l1(l1);
351 break;
352 default:
353 if (*debug & DEBUG_L1)
354 printk(KERN_DEBUG "%s %x unhandled\n",
355 __func__, event);
356 err = -EINVAL;
357 }
358 return err;
359}
360EXPORT_SYMBOL(l1_event);
361
362int
363create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
364 struct layer1 *nl1;
365
366 nl1 = kzalloc(sizeof(struct layer1), GFP_ATOMIC);
367 if (!nl1) {
368 printk(KERN_ERR "kmalloc struct layer1 failed\n");
369 return -ENOMEM;
370 }
371 nl1->l1m.fsm = &l1fsm_s;
372 nl1->l1m.state = ST_L1_F3;
373 nl1->Flags = 0;
374 nl1->l1m.debug = *debug & DEBUG_L1_FSM;
375 nl1->l1m.userdata = nl1;
376 nl1->l1m.userint = 0;
377 nl1->l1m.printdebug = l1m_debug;
378 nl1->dch = dch;
379 nl1->dcb = dcb;
380 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer);
381 __module_get(THIS_MODULE);
382 dch->l1 = nl1;
383 return 0;
384}
385EXPORT_SYMBOL(create_l1);
386
387int
388l1_init(u_int *deb)
389{
390 debug = deb;
391 l1fsm_s.state_count = L1S_STATE_COUNT;
392 l1fsm_s.event_count = L1_EVENT_COUNT;
393 l1fsm_s.strEvent = strL1Event;
394 l1fsm_s.strState = strL1SState;
395 mISDN_FsmNew(&l1fsm_s, L1SFnList, ARRAY_SIZE(L1SFnList));
396 return 0;
397}
398
399void
400l1_cleanup(void)
401{
402 mISDN_FsmFree(&l1fsm_s);
403}
diff --git a/drivers/isdn/mISDN/layer1.h b/drivers/isdn/mISDN/layer1.h
new file mode 100644
index 000000000000..9c8125fd89af
--- /dev/null
+++ b/drivers/isdn/mISDN/layer1.h
@@ -0,0 +1,26 @@
1/*
2 *
3 * Layer 1 defines
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define FLG_L1_ACTIVATING 1
19#define FLG_L1_ACTIVATED 2
20#define FLG_L1_DEACTTIMER 3
21#define FLG_L1_ACTTIMER 4
22#define FLG_L1_T3RUN 5
23#define FLG_L1_PULL_REQ 6
24#define FLG_L1_UINT 7
25#define FLG_L1_DBLOCKED 8
26
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
new file mode 100644
index 000000000000..a7915a156c04
--- /dev/null
+++ b/drivers/isdn/mISDN/layer2.c
@@ -0,0 +1,2216 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include "fsm.h"
19#include "layer2.h"
20
21static int *debug;
22
23static
24struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
25
26static char *strL2State[] =
27{
28 "ST_L2_1",
29 "ST_L2_2",
30 "ST_L2_3",
31 "ST_L2_4",
32 "ST_L2_5",
33 "ST_L2_6",
34 "ST_L2_7",
35 "ST_L2_8",
36};
37
38enum {
39 EV_L2_UI,
40 EV_L2_SABME,
41 EV_L2_DISC,
42 EV_L2_DM,
43 EV_L2_UA,
44 EV_L2_FRMR,
45 EV_L2_SUPER,
46 EV_L2_I,
47 EV_L2_DL_DATA,
48 EV_L2_ACK_PULL,
49 EV_L2_DL_UNITDATA,
50 EV_L2_DL_ESTABLISH_REQ,
51 EV_L2_DL_RELEASE_REQ,
52 EV_L2_MDL_ASSIGN,
53 EV_L2_MDL_REMOVE,
54 EV_L2_MDL_ERROR,
55 EV_L1_DEACTIVATE,
56 EV_L2_T200,
57 EV_L2_T203,
58 EV_L2_SET_OWN_BUSY,
59 EV_L2_CLEAR_OWN_BUSY,
60 EV_L2_FRAME_ERROR,
61};
62
63#define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
64
65static char *strL2Event[] =
66{
67 "EV_L2_UI",
68 "EV_L2_SABME",
69 "EV_L2_DISC",
70 "EV_L2_DM",
71 "EV_L2_UA",
72 "EV_L2_FRMR",
73 "EV_L2_SUPER",
74 "EV_L2_I",
75 "EV_L2_DL_DATA",
76 "EV_L2_ACK_PULL",
77 "EV_L2_DL_UNITDATA",
78 "EV_L2_DL_ESTABLISH_REQ",
79 "EV_L2_DL_RELEASE_REQ",
80 "EV_L2_MDL_ASSIGN",
81 "EV_L2_MDL_REMOVE",
82 "EV_L2_MDL_ERROR",
83 "EV_L1_DEACTIVATE",
84 "EV_L2_T200",
85 "EV_L2_T203",
86 "EV_L2_SET_OWN_BUSY",
87 "EV_L2_CLEAR_OWN_BUSY",
88 "EV_L2_FRAME_ERROR",
89};
90
91static void
92l2m_debug(struct FsmInst *fi, char *fmt, ...)
93{
94 struct layer2 *l2 = fi->userdata;
95 va_list va;
96
97 if (!(*debug & DEBUG_L2_FSM))
98 return;
99 va_start(va, fmt);
100 printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
101 vprintk(fmt, va);
102 printk("\n");
103 va_end(va);
104}
105
106inline u_int
107l2headersize(struct layer2 *l2, int ui)
108{
109 return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
110 (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
111}
112
113inline u_int
114l2addrsize(struct layer2 *l2)
115{
116 return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
117}
118
119static u_int
120l2_newid(struct layer2 *l2)
121{
122 u_int id;
123
124 id = l2->next_id++;
125 if (id == 0x7fff)
126 l2->next_id = 1;
127 id <<= 16;
128 id |= l2->tei << 8;
129 id |= l2->sapi;
130 return id;
131}
132
133static void
134l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
135{
136 int err;
137
138 if (!l2->up)
139 return;
140 mISDN_HEAD_PRIM(skb) = prim;
141 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
142 err = l2->up->send(l2->up, skb);
143 if (err) {
144 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
145 dev_kfree_skb(skb);
146 }
147}
148
149static void
150l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
151{
152 struct sk_buff *skb;
153 struct mISDNhead *hh;
154 int err;
155
156 if (!l2->up)
157 return;
158 skb = mI_alloc_skb(len, GFP_ATOMIC);
159 if (!skb)
160 return;
161 hh = mISDN_HEAD_P(skb);
162 hh->prim = prim;
163 hh->id = (l2->ch.nr << 16) | l2->ch.addr;
164 if (len)
165 memcpy(skb_put(skb, len), arg, len);
166 err = l2->up->send(l2->up, skb);
167 if (err) {
168 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
169 dev_kfree_skb(skb);
170 }
171}
172
173static int
174l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
175 int ret;
176
177 ret = l2->ch.recv(l2->ch.peer, skb);
178 if (ret && (*debug & DEBUG_L2_RECV))
179 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
180 return ret;
181}
182
183static int
184l2down_raw(struct layer2 *l2, struct sk_buff *skb)
185{
186 struct mISDNhead *hh = mISDN_HEAD_P(skb);
187
188 if (hh->prim == PH_DATA_REQ) {
189 if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
190 skb_queue_tail(&l2->down_queue, skb);
191 return 0;
192 }
193 l2->down_id = mISDN_HEAD_ID(skb);
194 }
195 return l2down_skb(l2, skb);
196}
197
198static int
199l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
200{
201 struct mISDNhead *hh = mISDN_HEAD_P(skb);
202
203 hh->prim = prim;
204 hh->id = id;
205 return l2down_raw(l2, skb);
206}
207
208static int
209l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
210{
211 struct sk_buff *skb;
212 int err;
213 struct mISDNhead *hh;
214
215 skb = mI_alloc_skb(len, GFP_ATOMIC);
216 if (!skb)
217 return -ENOMEM;
218 hh = mISDN_HEAD_P(skb);
219 hh->prim = prim;
220 hh->id = id;
221 if (len)
222 memcpy(skb_put(skb, len), arg, len);
223 err = l2down_raw(l2, skb);
224 if (err)
225 dev_kfree_skb(skb);
226 return err;
227}
228
229static int
230ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
231 struct sk_buff *nskb = skb;
232 int ret = -EAGAIN;
233
234 if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
235 if (hh->id == l2->down_id) {
236 nskb = skb_dequeue(&l2->down_queue);
237 if (nskb) {
238 l2->down_id = mISDN_HEAD_ID(nskb);
239 if (l2down_skb(l2, nskb)) {
240 dev_kfree_skb(nskb);
241 l2->down_id = MISDN_ID_NONE;
242 }
243 } else
244 l2->down_id = MISDN_ID_NONE;
245 if (ret) {
246 dev_kfree_skb(skb);
247 ret = 0;
248 }
249 if (l2->down_id == MISDN_ID_NONE) {
250 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
251 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
252 }
253 }
254 }
255 if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
256 nskb = skb_dequeue(&l2->down_queue);
257 if (nskb) {
258 l2->down_id = mISDN_HEAD_ID(nskb);
259 if (l2down_skb(l2, nskb)) {
260 dev_kfree_skb(nskb);
261 l2->down_id = MISDN_ID_NONE;
262 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
263 }
264 } else
265 test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
266 }
267 return ret;
268}
269
270static int
271l2mgr(struct layer2 *l2, u_int prim, void *arg) {
272 long c = (long)arg;
273
274 printk(KERN_WARNING
275 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
276 if (test_bit(FLG_LAPD, &l2->flag) &&
277 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
278 switch (c) {
279 case 'C':
280 case 'D':
281 case 'G':
282 case 'H':
283 l2_tei(l2, prim, (u_long)arg);
284 break;
285 }
286 }
287 return 0;
288}
289
290static void
291set_peer_busy(struct layer2 *l2) {
292 test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
293 if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
294 test_and_set_bit(FLG_L2BLOCK, &l2->flag);
295}
296
297static void
298clear_peer_busy(struct layer2 *l2) {
299 if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
300 test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
301}
302
303static void
304InitWin(struct layer2 *l2)
305{
306 int i;
307
308 for (i = 0; i < MAX_WINDOW; i++)
309 l2->windowar[i] = NULL;
310}
311
312static int
313freewin(struct layer2 *l2)
314{
315 int i, cnt = 0;
316
317 for (i = 0; i < MAX_WINDOW; i++) {
318 if (l2->windowar[i]) {
319 cnt++;
320 dev_kfree_skb(l2->windowar[i]);
321 l2->windowar[i] = NULL;
322 }
323 }
324 return cnt;
325}
326
327static void
328ReleaseWin(struct layer2 *l2)
329{
330 int cnt = freewin(l2);
331
332 if (cnt)
333 printk(KERN_WARNING
334 "isdnl2 freed %d skbuffs in release\n", cnt);
335}
336
337inline unsigned int
338cansend(struct layer2 *l2)
339{
340 unsigned int p1;
341
342 if (test_bit(FLG_MOD128, &l2->flag))
343 p1 = (l2->vs - l2->va) % 128;
344 else
345 p1 = (l2->vs - l2->va) % 8;
346 return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
347}
348
349inline void
350clear_exception(struct layer2 *l2)
351{
352 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
353 test_and_clear_bit(FLG_REJEXC, &l2->flag);
354 test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
355 clear_peer_busy(l2);
356}
357
358static int
359sethdraddr(struct layer2 *l2, u_char *header, int rsp)
360{
361 u_char *ptr = header;
362 int crbit = rsp;
363
364 if (test_bit(FLG_LAPD, &l2->flag)) {
365 if (test_bit(FLG_LAPD_NET, &l2->flag))
366 crbit = !crbit;
367 *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
368 *ptr++ = (l2->tei << 1) | 1;
369 return 2;
370 } else {
371 if (test_bit(FLG_ORIG, &l2->flag))
372 crbit = !crbit;
373 if (crbit)
374 *ptr++ = l2->addr.B;
375 else
376 *ptr++ = l2->addr.A;
377 return 1;
378 }
379}
380
381static inline void
382enqueue_super(struct layer2 *l2, struct sk_buff *skb)
383{
384 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
385 dev_kfree_skb(skb);
386}
387
388static inline void
389enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
390{
391 if (l2->tm)
392 l2_tei(l2, MDL_STATUS_UI_IND, 0);
393 if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
394 dev_kfree_skb(skb);
395}
396
397inline int
398IsUI(u_char *data)
399{
400 return (data[0] & 0xef) == UI;
401}
402
403inline int
404IsUA(u_char *data)
405{
406 return (data[0] & 0xef) == UA;
407}
408
409inline int
410IsDM(u_char *data)
411{
412 return (data[0] & 0xef) == DM;
413}
414
415inline int
416IsDISC(u_char *data)
417{
418 return (data[0] & 0xef) == DISC;
419}
420
421inline int
422IsRR(u_char *data, struct layer2 *l2)
423{
424 if (test_bit(FLG_MOD128, &l2->flag))
425 return data[0] == RR;
426 else
427 return (data[0] & 0xf) == 1;
428}
429
430inline int
431IsSFrame(u_char *data, struct layer2 *l2)
432{
433 register u_char d = *data;
434
435 if (!test_bit(FLG_MOD128, &l2->flag))
436 d &= 0xf;
437 return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
438}
439
440inline int
441IsSABME(u_char *data, struct layer2 *l2)
442{
443 u_char d = data[0] & ~0x10;
444
445 return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
446}
447
448inline int
449IsREJ(u_char *data, struct layer2 *l2)
450{
451 return test_bit(FLG_MOD128, &l2->flag) ?
452 data[0] == REJ : (data[0] & 0xf) == REJ;
453}
454
455inline int
456IsFRMR(u_char *data)
457{
458 return (data[0] & 0xef) == FRMR;
459}
460
461inline int
462IsRNR(u_char *data, struct layer2 *l2)
463{
464 return test_bit(FLG_MOD128, &l2->flag) ?
465 data[0] == RNR : (data[0] & 0xf) == RNR;
466}
467
468int
469iframe_error(struct layer2 *l2, struct sk_buff *skb)
470{
471 u_int i;
472 int rsp = *skb->data & 0x2;
473
474 i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
475 if (test_bit(FLG_ORIG, &l2->flag))
476 rsp = !rsp;
477 if (rsp)
478 return 'L';
479 if (skb->len < i)
480 return 'N';
481 if ((skb->len - i) > l2->maxlen)
482 return 'O';
483 return 0;
484}
485
486int
487super_error(struct layer2 *l2, struct sk_buff *skb)
488{
489 if (skb->len != l2addrsize(l2) +
490 (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
491 return 'N';
492 return 0;
493}
494
495int
496unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
497{
498 int rsp = (*skb->data & 0x2) >> 1;
499 if (test_bit(FLG_ORIG, &l2->flag))
500 rsp = !rsp;
501 if (rsp != wantrsp)
502 return 'L';
503 if (skb->len != l2addrsize(l2) + 1)
504 return 'N';
505 return 0;
506}
507
508int
509UI_error(struct layer2 *l2, struct sk_buff *skb)
510{
511 int rsp = *skb->data & 0x2;
512 if (test_bit(FLG_ORIG, &l2->flag))
513 rsp = !rsp;
514 if (rsp)
515 return 'L';
516 if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
517 return 'O';
518 return 0;
519}
520
521int
522FRMR_error(struct layer2 *l2, struct sk_buff *skb)
523{
524 u_int headers = l2addrsize(l2) + 1;
525 u_char *datap = skb->data + headers;
526 int rsp = *skb->data & 0x2;
527
528 if (test_bit(FLG_ORIG, &l2->flag))
529 rsp = !rsp;
530 if (!rsp)
531 return 'L';
532 if (test_bit(FLG_MOD128, &l2->flag)) {
533 if (skb->len < headers + 5)
534 return 'N';
535 else if (*debug & DEBUG_L2)
536 l2m_debug(&l2->l2m,
537 "FRMR information %2x %2x %2x %2x %2x",
538 datap[0], datap[1], datap[2], datap[3], datap[4]);
539 } else {
540 if (skb->len < headers + 3)
541 return 'N';
542 else if (*debug & DEBUG_L2)
543 l2m_debug(&l2->l2m,
544 "FRMR information %2x %2x %2x",
545 datap[0], datap[1], datap[2]);
546 }
547 return 0;
548}
549
550static unsigned int
551legalnr(struct layer2 *l2, unsigned int nr)
552{
553 if (test_bit(FLG_MOD128, &l2->flag))
554 return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
555 else
556 return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
557}
558
559static void
560setva(struct layer2 *l2, unsigned int nr)
561{
562 struct sk_buff *skb;
563
564 while (l2->va != nr) {
565 l2->va++;
566 if (test_bit(FLG_MOD128, &l2->flag))
567 l2->va %= 128;
568 else
569 l2->va %= 8;
570 if (l2->windowar[l2->sow]) {
571 skb_trim(l2->windowar[l2->sow], 0);
572 skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
573 l2->windowar[l2->sow] = NULL;
574 }
575 l2->sow = (l2->sow + 1) % l2->window;
576 }
577 skb = skb_dequeue(&l2->tmp_queue);
578 while (skb) {
579 dev_kfree_skb(skb);
580 skb = skb_dequeue(&l2->tmp_queue);
581 }
582}
583
584static void
585send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
586{
587 u_char tmp[MAX_L2HEADER_LEN];
588 int i;
589
590 i = sethdraddr(l2, tmp, cr);
591 tmp[i++] = cmd;
592 if (skb)
593 skb_trim(skb, 0);
594 else {
595 skb = mI_alloc_skb(i, GFP_ATOMIC);
596 if (!skb) {
597 printk(KERN_WARNING "%s: can't alloc skbuff\n",
598 __func__);
599 return;
600 }
601 }
602 memcpy(skb_put(skb, i), tmp, i);
603 enqueue_super(l2, skb);
604}
605
606
607inline u_char
608get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
609{
610 return skb->data[l2addrsize(l2)] & 0x10;
611}
612
613inline u_char
614get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
615{
616 u_char PF;
617
618 PF = get_PollFlag(l2, skb);
619 dev_kfree_skb(skb);
620 return PF;
621}
622
623inline void
624start_t200(struct layer2 *l2, int i)
625{
626 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
627 test_and_set_bit(FLG_T200_RUN, &l2->flag);
628}
629
630inline void
631restart_t200(struct layer2 *l2, int i)
632{
633 mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
634 test_and_set_bit(FLG_T200_RUN, &l2->flag);
635}
636
637inline void
638stop_t200(struct layer2 *l2, int i)
639{
640 if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
641 mISDN_FsmDelTimer(&l2->t200, i);
642}
643
644inline void
645st5_dl_release_l2l3(struct layer2 *l2)
646{
647 int pr;
648
649 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
650 pr = DL_RELEASE_CNF;
651 else
652 pr = DL_RELEASE_IND;
653 l2up_create(l2, pr, 0, NULL);
654}
655
656inline void
657lapb_dl_release_l2l3(struct layer2 *l2, int f)
658{
659 if (test_bit(FLG_LAPB, &l2->flag))
660 l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
661 l2up_create(l2, f, 0, NULL);
662}
663
664static void
665establishlink(struct FsmInst *fi)
666{
667 struct layer2 *l2 = fi->userdata;
668 u_char cmd;
669
670 clear_exception(l2);
671 l2->rc = 0;
672 cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
673 send_uframe(l2, NULL, cmd, CMD);
674 mISDN_FsmDelTimer(&l2->t203, 1);
675 restart_t200(l2, 1);
676 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
677 freewin(l2);
678 mISDN_FsmChangeState(fi, ST_L2_5);
679}
680
681static void
682l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
683{
684 struct sk_buff *skb = arg;
685 struct layer2 *l2 = fi->userdata;
686
687 if (get_PollFlagFree(l2, skb))
688 l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
689 else
690 l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
691
692}
693
694static void
695l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
696{
697 struct sk_buff *skb = arg;
698 struct layer2 *l2 = fi->userdata;
699
700 if (get_PollFlagFree(l2, skb))
701 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
702 else {
703 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
704 establishlink(fi);
705 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
706 }
707}
708
709static void
710l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
711{
712 struct sk_buff *skb = arg;
713 struct layer2 *l2 = fi->userdata;
714
715 if (get_PollFlagFree(l2, skb))
716 l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
717 else
718 l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
719 establishlink(fi);
720 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
721}
722
723static void
724l2_go_st3(struct FsmInst *fi, int event, void *arg)
725{
726 dev_kfree_skb((struct sk_buff *)arg);
727 mISDN_FsmChangeState(fi, ST_L2_3);
728}
729
730static void
731l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
732{
733 struct layer2 *l2 = fi->userdata;
734
735 mISDN_FsmChangeState(fi, ST_L2_3);
736 dev_kfree_skb((struct sk_buff *)arg);
737 l2_tei(l2, MDL_ASSIGN_IND, 0);
738}
739
740static void
741l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
742{
743 struct layer2 *l2 = fi->userdata;
744 struct sk_buff *skb = arg;
745
746 skb_queue_tail(&l2->ui_queue, skb);
747 mISDN_FsmChangeState(fi, ST_L2_2);
748 l2_tei(l2, MDL_ASSIGN_IND, 0);
749}
750
751static void
752l2_queue_ui(struct FsmInst *fi, int event, void *arg)
753{
754 struct layer2 *l2 = fi->userdata;
755 struct sk_buff *skb = arg;
756
757 skb_queue_tail(&l2->ui_queue, skb);
758}
759
760static void
761tx_ui(struct layer2 *l2)
762{
763 struct sk_buff *skb;
764 u_char header[MAX_L2HEADER_LEN];
765 int i;
766
767 i = sethdraddr(l2, header, CMD);
768 if (test_bit(FLG_LAPD_NET, &l2->flag))
769 header[1] = 0xff; /* tei 127 */
770 header[i++] = UI;
771 while ((skb = skb_dequeue(&l2->ui_queue))) {
772 memcpy(skb_push(skb, i), header, i);
773 enqueue_ui(l2, skb);
774 }
775}
776
777static void
778l2_send_ui(struct FsmInst *fi, int event, void *arg)
779{
780 struct layer2 *l2 = fi->userdata;
781 struct sk_buff *skb = arg;
782
783 skb_queue_tail(&l2->ui_queue, skb);
784 tx_ui(l2);
785}
786
787static void
788l2_got_ui(struct FsmInst *fi, int event, void *arg)
789{
790 struct layer2 *l2 = fi->userdata;
791 struct sk_buff *skb = arg;
792
793 skb_pull(skb, l2headersize(l2, 1));
794/*
795 * in states 1-3 for broadcast
796 */
797
798 if (l2->tm)
799 l2_tei(l2, MDL_STATUS_UI_IND, 0);
800 l2up(l2, DL_UNITDATA_IND, skb);
801}
802
803static void
804l2_establish(struct FsmInst *fi, int event, void *arg)
805{
806 struct sk_buff *skb = arg;
807 struct layer2 *l2 = fi->userdata;
808
809 establishlink(fi);
810 test_and_set_bit(FLG_L3_INIT, &l2->flag);
811 dev_kfree_skb(skb);
812}
813
814static void
815l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
816{
817 struct sk_buff *skb = arg;
818 struct layer2 *l2 = fi->userdata;
819
820 skb_queue_purge(&l2->i_queue);
821 test_and_set_bit(FLG_L3_INIT, &l2->flag);
822 test_and_clear_bit(FLG_PEND_REL, &l2->flag);
823 dev_kfree_skb(skb);
824}
825
826static void
827l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
828{
829 struct sk_buff *skb = arg;
830 struct layer2 *l2 = fi->userdata;
831
832 skb_queue_purge(&l2->i_queue);
833 establishlink(fi);
834 test_and_set_bit(FLG_L3_INIT, &l2->flag);
835 dev_kfree_skb(skb);
836}
837
838static void
839l2_release(struct FsmInst *fi, int event, void *arg)
840{
841 struct layer2 *l2 = fi->userdata;
842 struct sk_buff *skb = arg;
843
844 skb_trim(skb, 0);
845 l2up(l2, DL_RELEASE_CNF, skb);
846}
847
848static void
849l2_pend_rel(struct FsmInst *fi, int event, void *arg)
850{
851 struct sk_buff *skb = arg;
852 struct layer2 *l2 = fi->userdata;
853
854 test_and_set_bit(FLG_PEND_REL, &l2->flag);
855 dev_kfree_skb(skb);
856}
857
858static void
859l2_disconnect(struct FsmInst *fi, int event, void *arg)
860{
861 struct layer2 *l2 = fi->userdata;
862 struct sk_buff *skb = arg;
863
864 skb_queue_purge(&l2->i_queue);
865 freewin(l2);
866 mISDN_FsmChangeState(fi, ST_L2_6);
867 l2->rc = 0;
868 send_uframe(l2, NULL, DISC | 0x10, CMD);
869 mISDN_FsmDelTimer(&l2->t203, 1);
870 restart_t200(l2, 2);
871 if (skb)
872 dev_kfree_skb(skb);
873}
874
875static void
876l2_start_multi(struct FsmInst *fi, int event, void *arg)
877{
878 struct layer2 *l2 = fi->userdata;
879 struct sk_buff *skb = arg;
880
881 l2->vs = 0;
882 l2->va = 0;
883 l2->vr = 0;
884 l2->sow = 0;
885 clear_exception(l2);
886 send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
887 mISDN_FsmChangeState(fi, ST_L2_7);
888 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
889 skb_trim(skb, 0);
890 l2up(l2, DL_ESTABLISH_IND, skb);
891 if (l2->tm)
892 l2_tei(l2, MDL_STATUS_UP_IND, 0);
893}
894
895static void
896l2_send_UA(struct FsmInst *fi, int event, void *arg)
897{
898 struct layer2 *l2 = fi->userdata;
899 struct sk_buff *skb = arg;
900
901 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
902}
903
904static void
905l2_send_DM(struct FsmInst *fi, int event, void *arg)
906{
907 struct layer2 *l2 = fi->userdata;
908 struct sk_buff *skb = arg;
909
910 send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
911}
912
913static void
914l2_restart_multi(struct FsmInst *fi, int event, void *arg)
915{
916 struct layer2 *l2 = fi->userdata;
917 struct sk_buff *skb = arg;
918 int est = 0;
919
920 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
921
922 l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
923
924 if (l2->vs != l2->va) {
925 skb_queue_purge(&l2->i_queue);
926 est = 1;
927 }
928
929 clear_exception(l2);
930 l2->vs = 0;
931 l2->va = 0;
932 l2->vr = 0;
933 l2->sow = 0;
934 mISDN_FsmChangeState(fi, ST_L2_7);
935 stop_t200(l2, 3);
936 mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
937
938 if (est)
939 l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
940/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
941 * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
942 * 0, NULL, 0);
943 */
944 if (skb_queue_len(&l2->i_queue) && cansend(l2))
945 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
946}
947
948static void
949l2_stop_multi(struct FsmInst *fi, int event, void *arg)
950{
951 struct layer2 *l2 = fi->userdata;
952 struct sk_buff *skb = arg;
953
954 mISDN_FsmChangeState(fi, ST_L2_4);
955 mISDN_FsmDelTimer(&l2->t203, 3);
956 stop_t200(l2, 4);
957
958 send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
959 skb_queue_purge(&l2->i_queue);
960 freewin(l2);
961 lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
962 if (l2->tm)
963 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
964}
965
966static void
967l2_connected(struct FsmInst *fi, int event, void *arg)
968{
969 struct layer2 *l2 = fi->userdata;
970 struct sk_buff *skb = arg;
971 int pr = -1;
972
973 if (!get_PollFlag(l2, skb)) {
974 l2_mdl_error_ua(fi, event, arg);
975 return;
976 }
977 dev_kfree_skb(skb);
978 if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
979 l2_disconnect(fi, event, NULL);
980 if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
981 pr = DL_ESTABLISH_CNF;
982 } else if (l2->vs != l2->va) {
983 skb_queue_purge(&l2->i_queue);
984 pr = DL_ESTABLISH_IND;
985 }
986 stop_t200(l2, 5);
987 l2->vr = 0;
988 l2->vs = 0;
989 l2->va = 0;
990 l2->sow = 0;
991 mISDN_FsmChangeState(fi, ST_L2_7);
992 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
993 if (pr != -1)
994 l2up_create(l2, pr, 0, NULL);
995
996 if (skb_queue_len(&l2->i_queue) && cansend(l2))
997 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
998
999 if (l2->tm)
1000 l2_tei(l2, MDL_STATUS_UP_IND, 0);
1001}
1002
1003static void
1004l2_released(struct FsmInst *fi, int event, void *arg)
1005{
1006 struct layer2 *l2 = fi->userdata;
1007 struct sk_buff *skb = arg;
1008
1009 if (!get_PollFlag(l2, skb)) {
1010 l2_mdl_error_ua(fi, event, arg);
1011 return;
1012 }
1013 dev_kfree_skb(skb);
1014 stop_t200(l2, 6);
1015 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1016 mISDN_FsmChangeState(fi, ST_L2_4);
1017 if (l2->tm)
1018 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1019}
1020
1021static void
1022l2_reestablish(struct FsmInst *fi, int event, void *arg)
1023{
1024 struct layer2 *l2 = fi->userdata;
1025 struct sk_buff *skb = arg;
1026
1027 if (!get_PollFlagFree(l2, skb)) {
1028 establishlink(fi);
1029 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1030 }
1031}
1032
1033static void
1034l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
1035{
1036 struct layer2 *l2 = fi->userdata;
1037 struct sk_buff *skb = arg;
1038
1039 if (get_PollFlagFree(l2, skb)) {
1040 stop_t200(l2, 7);
1041 if (!test_bit(FLG_L3_INIT, &l2->flag))
1042 skb_queue_purge(&l2->i_queue);
1043 if (test_bit(FLG_LAPB, &l2->flag))
1044 l2down_create(l2, PH_DEACTIVATE_REQ,
1045 l2_newid(l2), 0, NULL);
1046 st5_dl_release_l2l3(l2);
1047 mISDN_FsmChangeState(fi, ST_L2_4);
1048 if (l2->tm)
1049 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1050 }
1051}
1052
1053static void
1054l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
1055{
1056 struct layer2 *l2 = fi->userdata;
1057 struct sk_buff *skb = arg;
1058
1059 if (get_PollFlagFree(l2, skb)) {
1060 stop_t200(l2, 8);
1061 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1062 mISDN_FsmChangeState(fi, ST_L2_4);
1063 if (l2->tm)
1064 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1065 }
1066}
1067
1068void
1069enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1070{
1071 struct sk_buff *skb;
1072 u_char tmp[MAX_L2HEADER_LEN];
1073 int i;
1074
1075 i = sethdraddr(l2, tmp, cr);
1076 if (test_bit(FLG_MOD128, &l2->flag)) {
1077 tmp[i++] = typ;
1078 tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
1079 } else
1080 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1081 skb = mI_alloc_skb(i, GFP_ATOMIC);
1082 if (!skb) {
1083 printk(KERN_WARNING
1084 "isdnl2 can't alloc sbbuff for enquiry_cr\n");
1085 return;
1086 }
1087 memcpy(skb_put(skb, i), tmp, i);
1088 enqueue_super(l2, skb);
1089}
1090
1091inline void
1092enquiry_response(struct layer2 *l2)
1093{
1094 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1095 enquiry_cr(l2, RNR, RSP, 1);
1096 else
1097 enquiry_cr(l2, RR, RSP, 1);
1098 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1099}
1100
1101inline void
1102transmit_enquiry(struct layer2 *l2)
1103{
1104 if (test_bit(FLG_OWN_BUSY, &l2->flag))
1105 enquiry_cr(l2, RNR, CMD, 1);
1106 else
1107 enquiry_cr(l2, RR, CMD, 1);
1108 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1109 start_t200(l2, 9);
1110}
1111
1112
1113static void
1114nrerrorrecovery(struct FsmInst *fi)
1115{
1116 struct layer2 *l2 = fi->userdata;
1117
1118 l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
1119 establishlink(fi);
1120 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1121}
1122
1123static void
1124invoke_retransmission(struct layer2 *l2, unsigned int nr)
1125{
1126 u_int p1;
1127
1128 if (l2->vs != nr) {
1129 while (l2->vs != nr) {
1130 (l2->vs)--;
1131 if (test_bit(FLG_MOD128, &l2->flag)) {
1132 l2->vs %= 128;
1133 p1 = (l2->vs - l2->va) % 128;
1134 } else {
1135 l2->vs %= 8;
1136 p1 = (l2->vs - l2->va) % 8;
1137 }
1138 p1 = (p1 + l2->sow) % l2->window;
1139 if (l2->windowar[p1])
1140 skb_queue_head(&l2->i_queue, l2->windowar[p1]);
1141 else
1142 printk(KERN_WARNING
1143 "%s: windowar[%d] is NULL\n",
1144 __func__, p1);
1145 l2->windowar[p1] = NULL;
1146 }
1147 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
1148 }
1149}
1150
1151static void
1152l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
1153{
1154 struct layer2 *l2 = fi->userdata;
1155 struct sk_buff *skb = arg;
1156 int PollFlag, rsp, typ = RR;
1157 unsigned int nr;
1158
1159 rsp = *skb->data & 0x2;
1160 if (test_bit(FLG_ORIG, &l2->flag))
1161 rsp = !rsp;
1162
1163 skb_pull(skb, l2addrsize(l2));
1164 if (IsRNR(skb->data, l2)) {
1165 set_peer_busy(l2);
1166 typ = RNR;
1167 } else
1168 clear_peer_busy(l2);
1169 if (IsREJ(skb->data, l2))
1170 typ = REJ;
1171
1172 if (test_bit(FLG_MOD128, &l2->flag)) {
1173 PollFlag = (skb->data[1] & 0x1) == 0x1;
1174 nr = skb->data[1] >> 1;
1175 } else {
1176 PollFlag = (skb->data[0] & 0x10);
1177 nr = (skb->data[0] >> 5) & 0x7;
1178 }
1179 dev_kfree_skb(skb);
1180
1181 if (PollFlag) {
1182 if (rsp)
1183 l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
1184 else
1185 enquiry_response(l2);
1186 }
1187 if (legalnr(l2, nr)) {
1188 if (typ == REJ) {
1189 setva(l2, nr);
1190 invoke_retransmission(l2, nr);
1191 stop_t200(l2, 10);
1192 if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
1193 EV_L2_T203, NULL, 6))
1194 l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
1195 } else if ((nr == l2->vs) && (typ == RR)) {
1196 setva(l2, nr);
1197 stop_t200(l2, 11);
1198 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1199 EV_L2_T203, NULL, 7);
1200 } else if ((l2->va != nr) || (typ == RNR)) {
1201 setva(l2, nr);
1202 if (typ != RR)
1203 mISDN_FsmDelTimer(&l2->t203, 9);
1204 restart_t200(l2, 12);
1205 }
1206 if (skb_queue_len(&l2->i_queue) && (typ == RR))
1207 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1208 } else
1209 nrerrorrecovery(fi);
1210}
1211
1212static void
1213l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
1214{
1215 struct layer2 *l2 = fi->userdata;
1216 struct sk_buff *skb = arg;
1217
1218 if (!test_bit(FLG_L3_INIT, &l2->flag))
1219 skb_queue_tail(&l2->i_queue, skb);
1220 else
1221 dev_kfree_skb(skb);
1222}
1223
1224static void
1225l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
1226{
1227 struct layer2 *l2 = fi->userdata;
1228 struct sk_buff *skb = arg;
1229
1230 skb_queue_tail(&l2->i_queue, skb);
1231 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1232}
1233
1234static void
1235l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
1236{
1237 struct layer2 *l2 = fi->userdata;
1238 struct sk_buff *skb = arg;
1239
1240 skb_queue_tail(&l2->i_queue, skb);
1241}
1242
1243static void
1244l2_got_iframe(struct FsmInst *fi, int event, void *arg)
1245{
1246 struct layer2 *l2 = fi->userdata;
1247 struct sk_buff *skb = arg;
1248 int PollFlag, i;
1249 u_int ns, nr;
1250
1251 i = l2addrsize(l2);
1252 if (test_bit(FLG_MOD128, &l2->flag)) {
1253 PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
1254 ns = skb->data[i] >> 1;
1255 nr = (skb->data[i + 1] >> 1) & 0x7f;
1256 } else {
1257 PollFlag = (skb->data[i] & 0x10);
1258 ns = (skb->data[i] >> 1) & 0x7;
1259 nr = (skb->data[i] >> 5) & 0x7;
1260 }
1261 if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
1262 dev_kfree_skb(skb);
1263 if (PollFlag)
1264 enquiry_response(l2);
1265 } else {
1266 if (l2->vr == ns) {
1267 l2->vr++;
1268 if (test_bit(FLG_MOD128, &l2->flag))
1269 l2->vr %= 128;
1270 else
1271 l2->vr %= 8;
1272 test_and_clear_bit(FLG_REJEXC, &l2->flag);
1273 if (PollFlag)
1274 enquiry_response(l2);
1275 else
1276 test_and_set_bit(FLG_ACK_PEND, &l2->flag);
1277 skb_pull(skb, l2headersize(l2, 0));
1278 l2up(l2, DL_DATA_IND, skb);
1279 } else {
1280 /* n(s)!=v(r) */
1281 dev_kfree_skb(skb);
1282 if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
1283 if (PollFlag)
1284 enquiry_response(l2);
1285 } else {
1286 enquiry_cr(l2, REJ, RSP, PollFlag);
1287 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1288 }
1289 }
1290 }
1291 if (legalnr(l2, nr)) {
1292 if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
1293 (fi->state == ST_L2_7)) {
1294 if (nr == l2->vs) {
1295 stop_t200(l2, 13);
1296 mISDN_FsmRestartTimer(&l2->t203, l2->T203,
1297 EV_L2_T203, NULL, 7);
1298 } else if (nr != l2->va)
1299 restart_t200(l2, 14);
1300 }
1301 setva(l2, nr);
1302 } else {
1303 nrerrorrecovery(fi);
1304 return;
1305 }
1306 if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
1307 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1308 if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
1309 enquiry_cr(l2, RR, RSP, 0);
1310}
1311
1312static void
1313l2_got_tei(struct FsmInst *fi, int event, void *arg)
1314{
1315 struct layer2 *l2 = fi->userdata;
1316 u_int info;
1317
1318 l2->tei = (signed char)(long)arg;
1319 set_channel_address(&l2->ch, l2->sapi, l2->tei);
1320 info = DL_INFO_L2_CONNECT;
1321 l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
1322 if (fi->state == ST_L2_3) {
1323 establishlink(fi);
1324 test_and_set_bit(FLG_L3_INIT, &l2->flag);
1325 } else
1326 mISDN_FsmChangeState(fi, ST_L2_4);
1327 if (skb_queue_len(&l2->ui_queue))
1328 tx_ui(l2);
1329}
1330
1331static void
1332l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
1333{
1334 struct layer2 *l2 = fi->userdata;
1335
1336 if (test_bit(FLG_LAPD, &l2->flag) &&
1337 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1338 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1339 } else if (l2->rc == l2->N200) {
1340 mISDN_FsmChangeState(fi, ST_L2_4);
1341 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1342 skb_queue_purge(&l2->i_queue);
1343 l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
1344 if (test_bit(FLG_LAPB, &l2->flag))
1345 l2down_create(l2, PH_DEACTIVATE_REQ,
1346 l2_newid(l2), 0, NULL);
1347 st5_dl_release_l2l3(l2);
1348 if (l2->tm)
1349 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1350 } else {
1351 l2->rc++;
1352 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1353 send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
1354 SABME : SABM) | 0x10, CMD);
1355 }
1356}
1357
1358static void
1359l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
1360{
1361 struct layer2 *l2 = fi->userdata;
1362
1363 if (test_bit(FLG_LAPD, &l2->flag) &&
1364 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1365 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1366 } else if (l2->rc == l2->N200) {
1367 mISDN_FsmChangeState(fi, ST_L2_4);
1368 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1369 l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
1370 lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
1371 if (l2->tm)
1372 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1373 } else {
1374 l2->rc++;
1375 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
1376 NULL, 9);
1377 send_uframe(l2, NULL, DISC | 0x10, CMD);
1378 }
1379}
1380
1381static void
1382l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
1383{
1384 struct layer2 *l2 = fi->userdata;
1385
1386 if (test_bit(FLG_LAPD, &l2->flag) &&
1387 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1388 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1389 return;
1390 }
1391 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1392 l2->rc = 0;
1393 mISDN_FsmChangeState(fi, ST_L2_8);
1394 transmit_enquiry(l2);
1395 l2->rc++;
1396}
1397
1398static void
1399l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
1400{
1401 struct layer2 *l2 = fi->userdata;
1402
1403 if (test_bit(FLG_LAPD, &l2->flag) &&
1404 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1405 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
1406 return;
1407 }
1408 test_and_clear_bit(FLG_T200_RUN, &l2->flag);
1409 if (l2->rc == l2->N200) {
1410 l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
1411 establishlink(fi);
1412 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1413 } else {
1414 transmit_enquiry(l2);
1415 l2->rc++;
1416 }
1417}
1418
1419static void
1420l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
1421{
1422 struct layer2 *l2 = fi->userdata;
1423
1424 if (test_bit(FLG_LAPD, &l2->flag) &&
1425 test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
1426 mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
1427 return;
1428 }
1429 mISDN_FsmChangeState(fi, ST_L2_8);
1430 transmit_enquiry(l2);
1431 l2->rc = 0;
1432}
1433
1434static void
1435l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1436{
1437 struct layer2 *l2 = fi->userdata;
1438 struct sk_buff *skb, *nskb, *oskb;
1439 u_char header[MAX_L2HEADER_LEN];
1440 u_int i, p1;
1441
1442 if (!cansend(l2))
1443 return;
1444
1445 skb = skb_dequeue(&l2->i_queue);
1446 if (!skb)
1447 return;
1448
1449 if (test_bit(FLG_MOD128, &l2->flag))
1450 p1 = (l2->vs - l2->va) % 128;
1451 else
1452 p1 = (l2->vs - l2->va) % 8;
1453 p1 = (p1 + l2->sow) % l2->window;
1454 if (l2->windowar[p1]) {
1455 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
1456 p1);
1457 dev_kfree_skb(l2->windowar[p1]);
1458 }
1459 l2->windowar[p1] = skb;
1460 i = sethdraddr(l2, header, CMD);
1461 if (test_bit(FLG_MOD128, &l2->flag)) {
1462 header[i++] = l2->vs << 1;
1463 header[i++] = l2->vr << 1;
1464 l2->vs = (l2->vs + 1) % 128;
1465 } else {
1466 header[i++] = (l2->vr << 5) | (l2->vs << 1);
1467 l2->vs = (l2->vs + 1) % 8;
1468 }
1469
1470 nskb = skb_clone(skb, GFP_ATOMIC);
1471 p1 = skb_headroom(nskb);
1472 if (p1 >= i)
1473 memcpy(skb_push(nskb, i), header, i);
1474 else {
1475 printk(KERN_WARNING
1476 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1477 oskb = nskb;
1478 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1479 if (!nskb) {
1480 dev_kfree_skb(oskb);
1481 printk(KERN_WARNING "%s: no skb mem\n", __func__);
1482 return;
1483 }
1484 memcpy(skb_put(nskb, i), header, i);
1485 memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
1486 dev_kfree_skb(oskb);
1487 }
1488 l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
1489 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1490 if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
1491 mISDN_FsmDelTimer(&l2->t203, 13);
1492 mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
1493 }
1494}
1495
1496static void
1497l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
1498{
1499 struct layer2 *l2 = fi->userdata;
1500 struct sk_buff *skb = arg;
1501 int PollFlag, rsp, rnr = 0;
1502 unsigned int nr;
1503
1504 rsp = *skb->data & 0x2;
1505 if (test_bit(FLG_ORIG, &l2->flag))
1506 rsp = !rsp;
1507
1508 skb_pull(skb, l2addrsize(l2));
1509
1510 if (IsRNR(skb->data, l2)) {
1511 set_peer_busy(l2);
1512 rnr = 1;
1513 } else
1514 clear_peer_busy(l2);
1515
1516 if (test_bit(FLG_MOD128, &l2->flag)) {
1517 PollFlag = (skb->data[1] & 0x1) == 0x1;
1518 nr = skb->data[1] >> 1;
1519 } else {
1520 PollFlag = (skb->data[0] & 0x10);
1521 nr = (skb->data[0] >> 5) & 0x7;
1522 }
1523 dev_kfree_skb(skb);
1524 if (rsp && PollFlag) {
1525 if (legalnr(l2, nr)) {
1526 if (rnr) {
1527 restart_t200(l2, 15);
1528 } else {
1529 stop_t200(l2, 16);
1530 mISDN_FsmAddTimer(&l2->t203, l2->T203,
1531 EV_L2_T203, NULL, 5);
1532 setva(l2, nr);
1533 }
1534 invoke_retransmission(l2, nr);
1535 mISDN_FsmChangeState(fi, ST_L2_7);
1536 if (skb_queue_len(&l2->i_queue) && cansend(l2))
1537 mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
1538 } else
1539 nrerrorrecovery(fi);
1540 } else {
1541 if (!rsp && PollFlag)
1542 enquiry_response(l2);
1543 if (legalnr(l2, nr))
1544 setva(l2, nr);
1545 else
1546 nrerrorrecovery(fi);
1547 }
1548}
1549
1550static void
1551l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
1552{
1553 struct layer2 *l2 = fi->userdata;
1554 struct sk_buff *skb = arg;
1555
1556 skb_pull(skb, l2addrsize(l2) + 1);
1557
1558 if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
1559 (IsUA(skb->data) && (fi->state == ST_L2_7))) {
1560 l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
1561 establishlink(fi);
1562 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1563 }
1564 dev_kfree_skb(skb);
1565}
1566
1567static void
1568l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
1569{
1570 struct layer2 *l2 = fi->userdata;
1571
1572 skb_queue_purge(&l2->ui_queue);
1573 l2->tei = GROUP_TEI;
1574 mISDN_FsmChangeState(fi, ST_L2_1);
1575}
1576
1577static void
1578l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
1579{
1580 struct layer2 *l2 = fi->userdata;
1581
1582 skb_queue_purge(&l2->ui_queue);
1583 l2->tei = GROUP_TEI;
1584 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1585 mISDN_FsmChangeState(fi, ST_L2_1);
1586}
1587
1588static void
1589l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
1590{
1591 struct layer2 *l2 = fi->userdata;
1592
1593 skb_queue_purge(&l2->i_queue);
1594 skb_queue_purge(&l2->ui_queue);
1595 freewin(l2);
1596 l2->tei = GROUP_TEI;
1597 stop_t200(l2, 17);
1598 st5_dl_release_l2l3(l2);
1599 mISDN_FsmChangeState(fi, ST_L2_1);
1600}
1601
1602static void
1603l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
1604{
1605 struct layer2 *l2 = fi->userdata;
1606
1607 skb_queue_purge(&l2->ui_queue);
1608 l2->tei = GROUP_TEI;
1609 stop_t200(l2, 18);
1610 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1611 mISDN_FsmChangeState(fi, ST_L2_1);
1612}
1613
1614static void
1615l2_tei_remove(struct FsmInst *fi, int event, void *arg)
1616{
1617 struct layer2 *l2 = fi->userdata;
1618
1619 skb_queue_purge(&l2->i_queue);
1620 skb_queue_purge(&l2->ui_queue);
1621 freewin(l2);
1622 l2->tei = GROUP_TEI;
1623 stop_t200(l2, 17);
1624 mISDN_FsmDelTimer(&l2->t203, 19);
1625 l2up_create(l2, DL_RELEASE_IND, 0, NULL);
1626/* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
1627 * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
1628 * 0, NULL, 0);
1629 */
1630 mISDN_FsmChangeState(fi, ST_L2_1);
1631}
1632
1633static void
1634l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
1635{
1636 struct layer2 *l2 = fi->userdata;
1637 struct sk_buff *skb = arg;
1638
1639 skb_queue_purge(&l2->i_queue);
1640 skb_queue_purge(&l2->ui_queue);
1641 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1642 l2up(l2, DL_RELEASE_IND, skb);
1643 else
1644 dev_kfree_skb(skb);
1645}
1646
1647static void
1648l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
1649{
1650 struct layer2 *l2 = fi->userdata;
1651 struct sk_buff *skb = arg;
1652
1653 skb_queue_purge(&l2->i_queue);
1654 skb_queue_purge(&l2->ui_queue);
1655 freewin(l2);
1656 stop_t200(l2, 19);
1657 st5_dl_release_l2l3(l2);
1658 mISDN_FsmChangeState(fi, ST_L2_4);
1659 if (l2->tm)
1660 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1661 dev_kfree_skb(skb);
1662}
1663
1664static void
1665l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
1666{
1667 struct layer2 *l2 = fi->userdata;
1668 struct sk_buff *skb = arg;
1669
1670 skb_queue_purge(&l2->ui_queue);
1671 stop_t200(l2, 20);
1672 l2up(l2, DL_RELEASE_CNF, skb);
1673 mISDN_FsmChangeState(fi, ST_L2_4);
1674 if (l2->tm)
1675 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1676}
1677
1678static void
1679l2_persistant_da(struct FsmInst *fi, int event, void *arg)
1680{
1681 struct layer2 *l2 = fi->userdata;
1682 struct sk_buff *skb = arg;
1683
1684 skb_queue_purge(&l2->i_queue);
1685 skb_queue_purge(&l2->ui_queue);
1686 freewin(l2);
1687 stop_t200(l2, 19);
1688 mISDN_FsmDelTimer(&l2->t203, 19);
1689 l2up(l2, DL_RELEASE_IND, skb);
1690 mISDN_FsmChangeState(fi, ST_L2_4);
1691 if (l2->tm)
1692 l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
1693}
1694
1695static void
1696l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
1697{
1698 struct layer2 *l2 = fi->userdata;
1699 struct sk_buff *skb = arg;
1700
1701 if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
1702 enquiry_cr(l2, RNR, RSP, 0);
1703 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1704 }
1705 if (skb)
1706 dev_kfree_skb(skb);
1707}
1708
1709static void
1710l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
1711{
1712 struct layer2 *l2 = fi->userdata;
1713 struct sk_buff *skb = arg;
1714
1715 if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
1716 enquiry_cr(l2, RR, RSP, 0);
1717 test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
1718 }
1719 if (skb)
1720 dev_kfree_skb(skb);
1721}
1722
1723static void
1724l2_frame_error(struct FsmInst *fi, int event, void *arg)
1725{
1726 struct layer2 *l2 = fi->userdata;
1727
1728 l2mgr(l2, MDL_ERROR_IND, arg);
1729}
1730
1731static void
1732l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
1733{
1734 struct layer2 *l2 = fi->userdata;
1735
1736 l2mgr(l2, MDL_ERROR_IND, arg);
1737 establishlink(fi);
1738 test_and_clear_bit(FLG_L3_INIT, &l2->flag);
1739}
1740
1741static struct FsmNode L2FnList[] =
1742{
1743 {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
1744 {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
1745 {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
1746 {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
1747 {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1748 {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
1749 {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
1750 {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
1751 {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1752 {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
1753 {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
1754 {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
1755 {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
1756 {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
1757 {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
1758 {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
1759 {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
1760 {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
1761 {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
1762 {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
1763 {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
1764 {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
1765 {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
1766 {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
1767 {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
1768 {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
1769 {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
1770 {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
1771 {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
1772 {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
1773 {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
1774 {ST_L2_4, EV_L2_SABME, l2_start_multi},
1775 {ST_L2_5, EV_L2_SABME, l2_send_UA},
1776 {ST_L2_6, EV_L2_SABME, l2_send_DM},
1777 {ST_L2_7, EV_L2_SABME, l2_restart_multi},
1778 {ST_L2_8, EV_L2_SABME, l2_restart_multi},
1779 {ST_L2_4, EV_L2_DISC, l2_send_DM},
1780 {ST_L2_5, EV_L2_DISC, l2_send_DM},
1781 {ST_L2_6, EV_L2_DISC, l2_send_UA},
1782 {ST_L2_7, EV_L2_DISC, l2_stop_multi},
1783 {ST_L2_8, EV_L2_DISC, l2_stop_multi},
1784 {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
1785 {ST_L2_5, EV_L2_UA, l2_connected},
1786 {ST_L2_6, EV_L2_UA, l2_released},
1787 {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
1788 {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
1789 {ST_L2_4, EV_L2_DM, l2_reestablish},
1790 {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
1791 {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
1792 {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
1793 {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
1794 {ST_L2_1, EV_L2_UI, l2_got_ui},
1795 {ST_L2_2, EV_L2_UI, l2_got_ui},
1796 {ST_L2_3, EV_L2_UI, l2_got_ui},
1797 {ST_L2_4, EV_L2_UI, l2_got_ui},
1798 {ST_L2_5, EV_L2_UI, l2_got_ui},
1799 {ST_L2_6, EV_L2_UI, l2_got_ui},
1800 {ST_L2_7, EV_L2_UI, l2_got_ui},
1801 {ST_L2_8, EV_L2_UI, l2_got_ui},
1802 {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
1803 {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
1804 {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
1805 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1806 {ST_L2_7, EV_L2_I, l2_got_iframe},
1807 {ST_L2_8, EV_L2_I, l2_got_iframe},
1808 {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
1809 {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
1810 {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
1811 {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
1812 {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
1813 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1814 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1815 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1816 {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1817 {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
1818 {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
1819 {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
1820 {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
1821 {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1822 {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
1823 {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1824 {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
1825 {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
1826 {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
1827 {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
1828 {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
1829 {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
1830 {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
1831};
1832
1833#define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
1834
1835static int
1836ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1837{
1838 u_char *datap = skb->data;
1839 int ret = -EINVAL;
1840 int psapi, ptei;
1841 u_int l;
1842 int c = 0;
1843
1844 l = l2addrsize(l2);
1845 if (skb->len <= l) {
1846 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
1847 return ret;
1848 }
1849 if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
1850 psapi = *datap++;
1851 ptei = *datap++;
1852 if ((psapi & 1) || !(ptei & 1)) {
1853 printk(KERN_WARNING
1854 "l2 D-channel frame wrong EA0/EA1\n");
1855 return ret;
1856 }
1857 psapi >>= 2;
1858 ptei >>= 1;
1859 if (psapi != l2->sapi) {
1860 /* not our bussiness
1861 * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
1862 * __func__,
1863 * psapi, l2->sapi);
1864 */
1865 dev_kfree_skb(skb);
1866 return 0;
1867 }
1868 if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
1869 /* not our bussiness
1870 * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
1871 * __func__,
1872 * ptei, l2->tei, psapi);
1873 */
1874 dev_kfree_skb(skb);
1875 return 0;
1876 }
1877 } else
1878 datap += l;
1879 if (!(*datap & 1)) { /* I-Frame */
1880 c = iframe_error(l2, skb);
1881 if (!c)
1882 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
1883 } else if (IsSFrame(datap, l2)) { /* S-Frame */
1884 c = super_error(l2, skb);
1885 if (!c)
1886 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
1887 } else if (IsUI(datap)) {
1888 c = UI_error(l2, skb);
1889 if (!c)
1890 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
1891 } else if (IsSABME(datap, l2)) {
1892 c = unnum_error(l2, skb, CMD);
1893 if (!c)
1894 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
1895 } else if (IsUA(datap)) {
1896 c = unnum_error(l2, skb, RSP);
1897 if (!c)
1898 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
1899 } else if (IsDISC(datap)) {
1900 c = unnum_error(l2, skb, CMD);
1901 if (!c)
1902 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
1903 } else if (IsDM(datap)) {
1904 c = unnum_error(l2, skb, RSP);
1905 if (!c)
1906 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
1907 } else if (IsFRMR(datap)) {
1908 c = FRMR_error(l2, skb);
1909 if (!c)
1910 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
1911 } else
1912 c = 'L';
1913 if (c) {
1914 printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
1915 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1916 }
1917 return ret;
1918}
1919
1920static int
1921l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1922{
1923 struct layer2 *l2 = container_of(ch, struct layer2, ch);
1924 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1925 int ret = -EINVAL;
1926
1927 if (*debug & DEBUG_L2_RECV)
1928 printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
1929 __func__, hh->prim, hh->id, l2->tei);
1930 switch (hh->prim) {
1931 case PH_DATA_IND:
1932 ret = ph_data_indication(l2, hh, skb);
1933 break;
1934 case PH_DATA_CNF:
1935 ret = ph_data_confirm(l2, hh, skb);
1936 break;
1937 case PH_ACTIVATE_IND:
1938 test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
1939 l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
1940 if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
1941 ret = mISDN_FsmEvent(&l2->l2m,
1942 EV_L2_DL_ESTABLISH_REQ, skb);
1943 break;
1944 case PH_DEACTIVATE_IND:
1945 test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
1946 l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
1947 ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
1948 break;
1949 case MPH_INFORMATION_IND:
1950 if (!l2->up)
1951 break;
1952 ret = l2->up->send(l2->up, skb);
1953 break;
1954 case DL_DATA_REQ:
1955 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
1956 break;
1957 case DL_UNITDATA_REQ:
1958 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
1959 break;
1960 case DL_ESTABLISH_REQ:
1961 if (test_bit(FLG_LAPB, &l2->flag))
1962 test_and_set_bit(FLG_ORIG, &l2->flag);
1963 if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
1964 if (test_bit(FLG_LAPD, &l2->flag) ||
1965 test_bit(FLG_ORIG, &l2->flag))
1966 ret = mISDN_FsmEvent(&l2->l2m,
1967 EV_L2_DL_ESTABLISH_REQ, skb);
1968 } else {
1969 if (test_bit(FLG_LAPD, &l2->flag) ||
1970 test_bit(FLG_ORIG, &l2->flag)) {
1971 test_and_set_bit(FLG_ESTAB_PEND,
1972 &l2->flag);
1973 }
1974 ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
1975 skb);
1976 }
1977 break;
1978 case DL_RELEASE_REQ:
1979 if (test_bit(FLG_LAPB, &l2->flag))
1980 l2down_create(l2, PH_DEACTIVATE_REQ,
1981 l2_newid(l2), 0, NULL);
1982 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1983 skb);
1984 break;
1985 default:
1986 if (*debug & DEBUG_L2)
1987 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
1988 hh->prim);
1989 }
1990 if (ret) {
1991 dev_kfree_skb(skb);
1992 ret = 0;
1993 }
1994 return ret;
1995}
1996
1997int
1998tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
1999{
2000 int ret = -EINVAL;
2001
2002 if (*debug & DEBUG_L2_TEI)
2003 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
2004 switch (cmd) {
2005 case (MDL_ASSIGN_REQ):
2006 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
2007 break;
2008 case (MDL_REMOVE_REQ):
2009 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
2010 break;
2011 case (MDL_ERROR_IND):
2012 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2013 break;
2014 case (MDL_ERROR_RSP):
2015 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2016 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
2017 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2018 break;
2019 }
2020 return ret;
2021}
2022
2023static void
2024release_l2(struct layer2 *l2)
2025{
2026 mISDN_FsmDelTimer(&l2->t200, 21);
2027 mISDN_FsmDelTimer(&l2->t203, 16);
2028 skb_queue_purge(&l2->i_queue);
2029 skb_queue_purge(&l2->ui_queue);
2030 skb_queue_purge(&l2->down_queue);
2031 ReleaseWin(l2);
2032 if (test_bit(FLG_LAPD, &l2->flag)) {
2033 TEIrelease(l2);
2034 if (l2->ch.st)
2035 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
2036 CLOSE_CHANNEL, NULL);
2037 }
2038 kfree(l2);
2039}
2040
2041static int
2042l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2043{
2044 struct layer2 *l2 = container_of(ch, struct layer2, ch);
2045 u_int info;
2046
2047 if (*debug & DEBUG_L2_CTRL)
2048 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
2049
2050 switch (cmd) {
2051 case OPEN_CHANNEL:
2052 if (test_bit(FLG_LAPD, &l2->flag)) {
2053 set_channel_address(&l2->ch, l2->sapi, l2->tei);
2054 info = DL_INFO_L2_CONNECT;
2055 l2up_create(l2, DL_INFORMATION_IND,
2056 sizeof(info), &info);
2057 }
2058 break;
2059 case CLOSE_CHANNEL:
2060 if (l2->ch.peer)
2061 l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
2062 release_l2(l2);
2063 break;
2064 }
2065 return 0;
2066}
2067
2068struct layer2 *
2069create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
2070{
2071 struct layer2 *l2;
2072 struct channel_req rq;
2073
2074 l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
2075 if (!l2) {
2076 printk(KERN_ERR "kzalloc layer2 failed\n");
2077 return NULL;
2078 }
2079 l2->next_id = 1;
2080 l2->down_id = MISDN_ID_NONE;
2081 l2->up = ch;
2082 l2->ch.st = ch->st;
2083 l2->ch.send = l2_send;
2084 l2->ch.ctrl = l2_ctrl;
2085 switch (protocol) {
2086 case ISDN_P_LAPD_NT:
2087 test_and_set_bit(FLG_LAPD, &l2->flag);
2088 test_and_set_bit(FLG_LAPD_NET, &l2->flag);
2089 test_and_set_bit(FLG_MOD128, &l2->flag);
2090 l2->sapi = 0;
2091 l2->maxlen = MAX_DFRAME_LEN;
2092 if (test_bit(OPTION_L2_PMX, &options))
2093 l2->window = 7;
2094 else
2095 l2->window = 1;
2096 if (test_bit(OPTION_L2_PTP, &options))
2097 test_and_set_bit(FLG_PTP, &l2->flag);
2098 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2099 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2100 l2->tei = (u_int)arg;
2101 l2->T200 = 1000;
2102 l2->N200 = 3;
2103 l2->T203 = 10000;
2104 if (test_bit(OPTION_L2_PMX, &options))
2105 rq.protocol = ISDN_P_NT_E1;
2106 else
2107 rq.protocol = ISDN_P_NT_S0;
2108 rq.adr.channel = 0;
2109 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2110 break;
2111 case ISDN_P_LAPD_TE:
2112 test_and_set_bit(FLG_LAPD, &l2->flag);
2113 test_and_set_bit(FLG_MOD128, &l2->flag);
2114 test_and_set_bit(FLG_ORIG, &l2->flag);
2115 l2->sapi = 0;
2116 l2->maxlen = MAX_DFRAME_LEN;
2117 if (test_bit(OPTION_L2_PMX, &options))
2118 l2->window = 7;
2119 else
2120 l2->window = 1;
2121 if (test_bit(OPTION_L2_PTP, &options))
2122 test_and_set_bit(FLG_PTP, &l2->flag);
2123 if (test_bit(OPTION_L2_FIXEDTEI, &options))
2124 test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
2125 l2->tei = (u_int)arg;
2126 l2->T200 = 1000;
2127 l2->N200 = 3;
2128 l2->T203 = 10000;
2129 if (test_bit(OPTION_L2_PMX, &options))
2130 rq.protocol = ISDN_P_TE_E1;
2131 else
2132 rq.protocol = ISDN_P_TE_S0;
2133 rq.adr.channel = 0;
2134 l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
2135 break;
2136 case ISDN_P_B_X75SLP:
2137 test_and_set_bit(FLG_LAPB, &l2->flag);
2138 l2->window = 7;
2139 l2->maxlen = MAX_DATA_SIZE;
2140 l2->T200 = 1000;
2141 l2->N200 = 4;
2142 l2->T203 = 5000;
2143 l2->addr.A = 3;
2144 l2->addr.B = 1;
2145 break;
2146 default:
2147 printk(KERN_ERR "layer2 create failed prt %x\n",
2148 protocol);
2149 kfree(l2);
2150 return NULL;
2151 }
2152 skb_queue_head_init(&l2->i_queue);
2153 skb_queue_head_init(&l2->ui_queue);
2154 skb_queue_head_init(&l2->down_queue);
2155 skb_queue_head_init(&l2->tmp_queue);
2156 InitWin(l2);
2157 l2->l2m.fsm = &l2fsm;
2158 if (test_bit(FLG_LAPB, &l2->flag) ||
2159 test_bit(FLG_PTP, &l2->flag) ||
2160 test_bit(FLG_LAPD_NET, &l2->flag))
2161 l2->l2m.state = ST_L2_4;
2162 else
2163 l2->l2m.state = ST_L2_1;
2164 l2->l2m.debug = *debug;
2165 l2->l2m.userdata = l2;
2166 l2->l2m.userint = 0;
2167 l2->l2m.printdebug = l2m_debug;
2168
2169 mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
2170 mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
2171 return l2;
2172}
2173
2174static int
2175x75create(struct channel_req *crq)
2176{
2177 struct layer2 *l2;
2178
2179 if (crq->protocol != ISDN_P_B_X75SLP)
2180 return -EPROTONOSUPPORT;
2181 l2 = create_l2(crq->ch, crq->protocol, 0, 0);
2182 if (!l2)
2183 return -ENOMEM;
2184 crq->ch = &l2->ch;
2185 crq->protocol = ISDN_P_B_HDLC;
2186 return 0;
2187}
2188
2189static struct Bprotocol X75SLP = {
2190 .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
2191 .name = "X75SLP",
2192 .create = x75create
2193};
2194
2195int
2196Isdnl2_Init(u_int *deb)
2197{
2198 debug = deb;
2199 mISDN_register_Bprotocol(&X75SLP);
2200 l2fsm.state_count = L2_STATE_COUNT;
2201 l2fsm.event_count = L2_EVENT_COUNT;
2202 l2fsm.strEvent = strL2Event;
2203 l2fsm.strState = strL2State;
2204 mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
2205 TEIInit(deb);
2206 return 0;
2207}
2208
2209void
2210Isdnl2_cleanup(void)
2211{
2212 mISDN_unregister_Bprotocol(&X75SLP);
2213 TEIFree();
2214 mISDN_FsmFree(&l2fsm);
2215}
2216
diff --git a/drivers/isdn/mISDN/layer2.h b/drivers/isdn/mISDN/layer2.h
new file mode 100644
index 000000000000..6293f80dc2d3
--- /dev/null
+++ b/drivers/isdn/mISDN/layer2.h
@@ -0,0 +1,140 @@
1/*
2 * Layer 2 defines
3 *
4 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mISDNif.h>
18#include <linux/skbuff.h>
19#include "fsm.h"
20
21#define MAX_WINDOW 8
22
23struct manager {
24 struct mISDNchannel ch;
25 struct mISDNchannel bcast;
26 u_long options;
27 struct list_head layer2;
28 rwlock_t lock;
29 struct FsmInst deact;
30 struct FsmTimer datimer;
31 struct sk_buff_head sendq;
32 struct mISDNchannel *up;
33 u_int nextid;
34 u_int lastid;
35};
36
37struct teimgr {
38 int ri;
39 int rcnt;
40 struct FsmInst tei_m;
41 struct FsmTimer timer;
42 int tval, nval;
43 struct layer2 *l2;
44 struct manager *mgr;
45};
46
47struct laddr {
48 u_char A;
49 u_char B;
50};
51
52struct layer2 {
53 struct list_head list;
54 struct mISDNchannel ch;
55 u_long flag;
56 int id;
57 struct mISDNchannel *up;
58 signed char sapi;
59 signed char tei;
60 struct laddr addr;
61 u_int maxlen;
62 struct teimgr *tm;
63 u_int vs, va, vr;
64 int rc;
65 u_int window;
66 u_int sow;
67 struct FsmInst l2m;
68 struct FsmTimer t200, t203;
69 int T200, N200, T203;
70 u_int next_id;
71 u_int down_id;
72 struct sk_buff *windowar[MAX_WINDOW];
73 struct sk_buff_head i_queue;
74 struct sk_buff_head ui_queue;
75 struct sk_buff_head down_queue;
76 struct sk_buff_head tmp_queue;
77};
78
79enum {
80 ST_L2_1,
81 ST_L2_2,
82 ST_L2_3,
83 ST_L2_4,
84 ST_L2_5,
85 ST_L2_6,
86 ST_L2_7,
87 ST_L2_8,
88};
89
90#define L2_STATE_COUNT (ST_L2_8+1)
91
92extern struct layer2 *create_l2(struct mISDNchannel *, u_int,
93 u_long, u_long);
94extern int tei_l2(struct layer2 *, u_int, u_long arg);
95
96
97/* from tei.c */
98extern int l2_tei(struct layer2 *, u_int, u_long arg);
99extern void TEIrelease(struct layer2 *);
100extern int TEIInit(u_int *);
101extern void TEIFree(void);
102
103#define MAX_L2HEADER_LEN 4
104
105#define RR 0x01
106#define RNR 0x05
107#define REJ 0x09
108#define SABME 0x6f
109#define SABM 0x2f
110#define DM 0x0f
111#define UI 0x03
112#define DISC 0x43
113#define UA 0x63
114#define FRMR 0x87
115#define XID 0xaf
116
117#define CMD 0
118#define RSP 1
119
120#define LC_FLUSH_WAIT 1
121
122#define FLG_LAPB 0
123#define FLG_LAPD 1
124#define FLG_ORIG 2
125#define FLG_MOD128 3
126#define FLG_PEND_REL 4
127#define FLG_L3_INIT 5
128#define FLG_T200_RUN 6
129#define FLG_ACK_PEND 7
130#define FLG_REJEXC 8
131#define FLG_OWN_BUSY 9
132#define FLG_PEER_BUSY 10
133#define FLG_DCHAN_BUSY 11
134#define FLG_L1_ACTIV 12
135#define FLG_ESTAB_PEND 13
136#define FLG_PTP 14
137#define FLG_FIXED_TEI 15
138#define FLG_L2BLOCK 16
139#define FLG_L1_NOTREADY 17
140#define FLG_LAPD_NET 18
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
new file mode 100644
index 000000000000..4ba4cc364c9e
--- /dev/null
+++ b/drivers/isdn/mISDN/socket.c
@@ -0,0 +1,781 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/mISDNif.h>
19#include "core.h"
20
21static int *debug;
22
23static struct proto mISDN_proto = {
24 .name = "misdn",
25 .owner = THIS_MODULE,
26 .obj_size = sizeof(struct mISDN_sock)
27};
28
29#define _pms(sk) ((struct mISDN_sock *)sk)
30
31static struct mISDN_sock_list data_sockets = {
32 .lock = __RW_LOCK_UNLOCKED(data_sockets.lock)
33};
34
35static struct mISDN_sock_list base_sockets = {
36 .lock = __RW_LOCK_UNLOCKED(base_sockets.lock)
37};
38
39#define L2_HEADER_LEN 4
40
41static inline struct sk_buff *
42_l2_alloc_skb(unsigned int len, gfp_t gfp_mask)
43{
44 struct sk_buff *skb;
45
46 skb = alloc_skb(len + L2_HEADER_LEN, gfp_mask);
47 if (likely(skb))
48 skb_reserve(skb, L2_HEADER_LEN);
49 return skb;
50}
51
52static void
53mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk)
54{
55 write_lock_bh(&l->lock);
56 sk_add_node(sk, &l->head);
57 write_unlock_bh(&l->lock);
58}
59
60static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk)
61{
62 write_lock_bh(&l->lock);
63 sk_del_node_init(sk);
64 write_unlock_bh(&l->lock);
65}
66
67static int
68mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
69{
70 struct mISDN_sock *msk;
71 int err;
72
73 msk = container_of(ch, struct mISDN_sock, ch);
74 if (*debug & DEBUG_SOCKET)
75 printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
76 if (msk->sk.sk_state == MISDN_CLOSED)
77 return -EUNATCH;
78 __net_timestamp(skb);
79 err = sock_queue_rcv_skb(&msk->sk, skb);
80 if (err)
81 printk(KERN_WARNING "%s: error %d\n", __func__, err);
82 return err;
83}
84
85static int
86mISDN_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
87{
88 struct mISDN_sock *msk;
89
90 msk = container_of(ch, struct mISDN_sock, ch);
91 if (*debug & DEBUG_SOCKET)
92 printk(KERN_DEBUG "%s(%p, %x, %p)\n", __func__, ch, cmd, arg);
93 switch (cmd) {
94 case CLOSE_CHANNEL:
95 msk->sk.sk_state = MISDN_CLOSED;
96 break;
97 }
98 return 0;
99}
100
101static inline void
102mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
103{
104 struct timeval tv;
105
106 if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
107 skb_get_timestamp(skb, &tv);
108 put_cmsg(msg, SOL_MISDN, MISDN_TIME_STAMP, sizeof(tv), &tv);
109 }
110}
111
112static int
113mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
114 struct msghdr *msg, size_t len, int flags)
115{
116 struct sk_buff *skb;
117 struct sock *sk = sock->sk;
118 struct sockaddr_mISDN *maddr;
119
120 int copied, err;
121
122 if (*debug & DEBUG_SOCKET)
123 printk(KERN_DEBUG "%s: len %d, flags %x ch.nr %d, proto %x\n",
124 __func__, (int)len, flags, _pms(sk)->ch.nr,
125 sk->sk_protocol);
126 if (flags & (MSG_OOB))
127 return -EOPNOTSUPP;
128
129 if (sk->sk_state == MISDN_CLOSED)
130 return 0;
131
132 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
133 if (!skb)
134 return err;
135
136 if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
137 msg->msg_namelen = sizeof(struct sockaddr_mISDN);
138 maddr = (struct sockaddr_mISDN *)msg->msg_name;
139 maddr->family = AF_ISDN;
140 maddr->dev = _pms(sk)->dev->id;
141 if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
142 (sk->sk_protocol == ISDN_P_LAPD_NT)) {
143 maddr->channel = (mISDN_HEAD_ID(skb) >> 16) & 0xff;
144 maddr->tei = (mISDN_HEAD_ID(skb) >> 8) & 0xff;
145 maddr->sapi = mISDN_HEAD_ID(skb) & 0xff;
146 } else {
147 maddr->channel = _pms(sk)->ch.nr;
148 maddr->sapi = _pms(sk)->ch.addr & 0xFF;
149 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
150 }
151 } else {
152 if (msg->msg_namelen)
153 printk(KERN_WARNING "%s: too small namelen %d\n",
154 __func__, msg->msg_namelen);
155 msg->msg_namelen = 0;
156 }
157
158 copied = skb->len + MISDN_HEADER_LEN;
159 if (len < copied) {
160 if (flags & MSG_PEEK)
161 atomic_dec(&skb->users);
162 else
163 skb_queue_head(&sk->sk_receive_queue, skb);
164 return -ENOSPC;
165 }
166 memcpy(skb_push(skb, MISDN_HEADER_LEN), mISDN_HEAD_P(skb),
167 MISDN_HEADER_LEN);
168
169 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
170
171 mISDN_sock_cmsg(sk, msg, skb);
172
173 skb_free_datagram(sk, skb);
174
175 return err ? : copied;
176}
177
178static int
179mISDN_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
180 struct msghdr *msg, size_t len)
181{
182 struct sock *sk = sock->sk;
183 struct sk_buff *skb;
184 int err = -ENOMEM;
185 struct sockaddr_mISDN *maddr;
186
187 if (*debug & DEBUG_SOCKET)
188 printk(KERN_DEBUG "%s: len %d flags %x ch %d proto %x\n",
189 __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr,
190 sk->sk_protocol);
191
192 if (msg->msg_flags & MSG_OOB)
193 return -EOPNOTSUPP;
194
195 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
196 return -EINVAL;
197
198 if (len < MISDN_HEADER_LEN)
199 return -EINVAL;
200
201 if (sk->sk_state != MISDN_BOUND)
202 return -EBADFD;
203
204 lock_sock(sk);
205
206 skb = _l2_alloc_skb(len, GFP_KERNEL);
207 if (!skb)
208 goto done;
209
210 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
211 err = -EFAULT;
212 goto drop;
213 }
214
215 memcpy(mISDN_HEAD_P(skb), skb->data, MISDN_HEADER_LEN);
216 skb_pull(skb, MISDN_HEADER_LEN);
217
218 if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
219 /* if we have a address, we use it */
220 maddr = (struct sockaddr_mISDN *)msg->msg_name;
221 mISDN_HEAD_ID(skb) = maddr->channel;
222 } else { /* use default for L2 messages */
223 if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
224 (sk->sk_protocol == ISDN_P_LAPD_NT))
225 mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
226 }
227
228 if (*debug & DEBUG_SOCKET)
229 printk(KERN_DEBUG "%s: ID:%x\n",
230 __func__, mISDN_HEAD_ID(skb));
231
232 err = -ENODEV;
233 if (!_pms(sk)->ch.peer ||
234 (err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb)))
235 goto drop;
236
237 err = len;
238
239done:
240 release_sock(sk);
241 return err;
242
243drop:
244 kfree_skb(skb);
245 goto done;
246}
247
248static int
249data_sock_release(struct socket *sock)
250{
251 struct sock *sk = sock->sk;
252
253 if (*debug & DEBUG_SOCKET)
254 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
255 if (!sk)
256 return 0;
257 switch (sk->sk_protocol) {
258 case ISDN_P_TE_S0:
259 case ISDN_P_NT_S0:
260 case ISDN_P_TE_E1:
261 case ISDN_P_NT_E1:
262 if (sk->sk_state == MISDN_BOUND)
263 delete_channel(&_pms(sk)->ch);
264 else
265 mISDN_sock_unlink(&data_sockets, sk);
266 break;
267 case ISDN_P_LAPD_TE:
268 case ISDN_P_LAPD_NT:
269 case ISDN_P_B_RAW:
270 case ISDN_P_B_HDLC:
271 case ISDN_P_B_X75SLP:
272 case ISDN_P_B_L2DTMF:
273 case ISDN_P_B_L2DSP:
274 case ISDN_P_B_L2DSPHDLC:
275 delete_channel(&_pms(sk)->ch);
276 mISDN_sock_unlink(&data_sockets, sk);
277 break;
278 }
279
280 lock_sock(sk);
281
282 sock_orphan(sk);
283 skb_queue_purge(&sk->sk_receive_queue);
284
285 release_sock(sk);
286 sock_put(sk);
287
288 return 0;
289}
290
291static int
292data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
293{
294 struct mISDN_ctrl_req cq;
295 int err = -EINVAL, val;
296 struct mISDNchannel *bchan, *next;
297
298 lock_sock(sk);
299 if (!_pms(sk)->dev) {
300 err = -ENODEV;
301 goto done;
302 }
303 switch (cmd) {
304 case IMCTRLREQ:
305 if (copy_from_user(&cq, p, sizeof(cq))) {
306 err = -EFAULT;
307 break;
308 }
309 if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) {
310 list_for_each_entry_safe(bchan, next,
311 &_pms(sk)->dev->bchannels, list) {
312 if (bchan->nr == cq.channel) {
313 err = bchan->ctrl(bchan,
314 CONTROL_CHANNEL, &cq);
315 break;
316 }
317 }
318 } else
319 err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D,
320 CONTROL_CHANNEL, &cq);
321 if (err)
322 break;
323 if (copy_to_user(p, &cq, sizeof(cq)))
324 err = -EFAULT;
325 break;
326 case IMCLEAR_L2:
327 if (sk->sk_protocol != ISDN_P_LAPD_NT) {
328 err = -EINVAL;
329 break;
330 }
331 if (get_user(val, (int __user *)p)) {
332 err = -EFAULT;
333 break;
334 }
335 err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
336 CONTROL_CHANNEL, &val);
337 break;
338 default:
339 err = -EINVAL;
340 break;
341 }
342done:
343 release_sock(sk);
344 return err;
345}
346
347static int
348data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
349{
350 int err = 0, id;
351 struct sock *sk = sock->sk;
352 struct mISDNdevice *dev;
353 struct mISDNversion ver;
354
355 switch (cmd) {
356 case IMGETVERSION:
357 ver.major = MISDN_MAJOR_VERSION;
358 ver.minor = MISDN_MINOR_VERSION;
359 ver.release = MISDN_RELEASE;
360 if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
361 err = -EFAULT;
362 break;
363 case IMGETCOUNT:
364 id = get_mdevice_count();
365 if (put_user(id, (int __user *)arg))
366 err = -EFAULT;
367 break;
368 case IMGETDEVINFO:
369 if (get_user(id, (int __user *)arg)) {
370 err = -EFAULT;
371 break;
372 }
373 dev = get_mdevice(id);
374 if (dev) {
375 struct mISDN_devinfo di;
376
377 di.id = dev->id;
378 di.Dprotocols = dev->Dprotocols;
379 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
380 di.protocol = dev->D.protocol;
381 memcpy(di.channelmap, dev->channelmap,
382 MISDN_CHMAP_SIZE * 4);
383 di.nrbchan = dev->nrbchan;
384 strcpy(di.name, dev->name);
385 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
386 err = -EFAULT;
387 } else
388 err = -ENODEV;
389 break;
390 default:
391 if (sk->sk_state == MISDN_BOUND)
392 err = data_sock_ioctl_bound(sk, cmd,
393 (void __user *)arg);
394 else
395 err = -ENOTCONN;
396 }
397 return err;
398}
399
400static int data_sock_setsockopt(struct socket *sock, int level, int optname,
401 char __user *optval, int len)
402{
403 struct sock *sk = sock->sk;
404 int err = 0, opt = 0;
405
406 if (*debug & DEBUG_SOCKET)
407 printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock,
408 level, optname, optval, len);
409
410 lock_sock(sk);
411
412 switch (optname) {
413 case MISDN_TIME_STAMP:
414 if (get_user(opt, (int __user *)optval)) {
415 err = -EFAULT;
416 break;
417 }
418
419 if (opt)
420 _pms(sk)->cmask |= MISDN_TIME_STAMP;
421 else
422 _pms(sk)->cmask &= ~MISDN_TIME_STAMP;
423 break;
424 default:
425 err = -ENOPROTOOPT;
426 break;
427 }
428 release_sock(sk);
429 return err;
430}
431
432static int data_sock_getsockopt(struct socket *sock, int level, int optname,
433 char __user *optval, int __user *optlen)
434{
435 struct sock *sk = sock->sk;
436 int len, opt;
437
438 if (get_user(len, optlen))
439 return -EFAULT;
440
441 switch (optname) {
442 case MISDN_TIME_STAMP:
443 if (_pms(sk)->cmask & MISDN_TIME_STAMP)
444 opt = 1;
445 else
446 opt = 0;
447
448 if (put_user(opt, optval))
449 return -EFAULT;
450 break;
451 default:
452 return -ENOPROTOOPT;
453 }
454
455 return 0;
456}
457
458static int
459data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
460{
461 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
462 struct sock *sk = sock->sk;
463 int err = 0;
464
465 if (*debug & DEBUG_SOCKET)
466 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
467 if (addr_len != sizeof(struct sockaddr_mISDN))
468 return -EINVAL;
469 if (!maddr || maddr->family != AF_ISDN)
470 return -EINVAL;
471
472 lock_sock(sk);
473
474 if (_pms(sk)->dev) {
475 err = -EALREADY;
476 goto done;
477 }
478 _pms(sk)->dev = get_mdevice(maddr->dev);
479 if (!_pms(sk)->dev) {
480 err = -ENODEV;
481 goto done;
482 }
483 _pms(sk)->ch.send = mISDN_send;
484 _pms(sk)->ch.ctrl = mISDN_ctrl;
485
486 switch (sk->sk_protocol) {
487 case ISDN_P_TE_S0:
488 case ISDN_P_NT_S0:
489 case ISDN_P_TE_E1:
490 case ISDN_P_NT_E1:
491 mISDN_sock_unlink(&data_sockets, sk);
492 err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch,
493 sk->sk_protocol, maddr);
494 if (err)
495 mISDN_sock_link(&data_sockets, sk);
496 break;
497 case ISDN_P_LAPD_TE:
498 case ISDN_P_LAPD_NT:
499 err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch,
500 sk->sk_protocol, maddr);
501 break;
502 case ISDN_P_B_RAW:
503 case ISDN_P_B_HDLC:
504 case ISDN_P_B_X75SLP:
505 case ISDN_P_B_L2DTMF:
506 case ISDN_P_B_L2DSP:
507 case ISDN_P_B_L2DSPHDLC:
508 err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch,
509 sk->sk_protocol, maddr);
510 break;
511 default:
512 err = -EPROTONOSUPPORT;
513 }
514 if (err)
515 goto done;
516 sk->sk_state = MISDN_BOUND;
517 _pms(sk)->ch.protocol = sk->sk_protocol;
518
519done:
520 release_sock(sk);
521 return err;
522}
523
524static int
525data_sock_getname(struct socket *sock, struct sockaddr *addr,
526 int *addr_len, int peer)
527{
528 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
529 struct sock *sk = sock->sk;
530
531 if (!_pms(sk)->dev)
532 return -EBADFD;
533
534 lock_sock(sk);
535
536 *addr_len = sizeof(*maddr);
537 maddr->dev = _pms(sk)->dev->id;
538 maddr->channel = _pms(sk)->ch.nr;
539 maddr->sapi = _pms(sk)->ch.addr & 0xff;
540 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
541 release_sock(sk);
542 return 0;
543}
544
545static const struct proto_ops data_sock_ops = {
546 .family = PF_ISDN,
547 .owner = THIS_MODULE,
548 .release = data_sock_release,
549 .ioctl = data_sock_ioctl,
550 .bind = data_sock_bind,
551 .getname = data_sock_getname,
552 .sendmsg = mISDN_sock_sendmsg,
553 .recvmsg = mISDN_sock_recvmsg,
554 .poll = datagram_poll,
555 .listen = sock_no_listen,
556 .shutdown = sock_no_shutdown,
557 .setsockopt = data_sock_setsockopt,
558 .getsockopt = data_sock_getsockopt,
559 .connect = sock_no_connect,
560 .socketpair = sock_no_socketpair,
561 .accept = sock_no_accept,
562 .mmap = sock_no_mmap
563};
564
565static int
566data_sock_create(struct net *net, struct socket *sock, int protocol)
567{
568 struct sock *sk;
569
570 if (sock->type != SOCK_DGRAM)
571 return -ESOCKTNOSUPPORT;
572
573 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
574 if (!sk)
575 return -ENOMEM;
576
577 sock_init_data(sock, sk);
578
579 sock->ops = &data_sock_ops;
580 sock->state = SS_UNCONNECTED;
581 sock_reset_flag(sk, SOCK_ZAPPED);
582
583 sk->sk_protocol = protocol;
584 sk->sk_state = MISDN_OPEN;
585 mISDN_sock_link(&data_sockets, sk);
586
587 return 0;
588}
589
590static int
591base_sock_release(struct socket *sock)
592{
593 struct sock *sk = sock->sk;
594
595 printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
596 if (!sk)
597 return 0;
598
599 mISDN_sock_unlink(&base_sockets, sk);
600 sock_orphan(sk);
601 sock_put(sk);
602
603 return 0;
604}
605
606static int
607base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
608{
609 int err = 0, id;
610 struct mISDNdevice *dev;
611 struct mISDNversion ver;
612
613 switch (cmd) {
614 case IMGETVERSION:
615 ver.major = MISDN_MAJOR_VERSION;
616 ver.minor = MISDN_MINOR_VERSION;
617 ver.release = MISDN_RELEASE;
618 if (copy_to_user((void __user *)arg, &ver, sizeof(ver)))
619 err = -EFAULT;
620 break;
621 case IMGETCOUNT:
622 id = get_mdevice_count();
623 if (put_user(id, (int __user *)arg))
624 err = -EFAULT;
625 break;
626 case IMGETDEVINFO:
627 if (get_user(id, (int __user *)arg)) {
628 err = -EFAULT;
629 break;
630 }
631 dev = get_mdevice(id);
632 if (dev) {
633 struct mISDN_devinfo di;
634
635 di.id = dev->id;
636 di.Dprotocols = dev->Dprotocols;
637 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
638 di.protocol = dev->D.protocol;
639 memcpy(di.channelmap, dev->channelmap,
640 MISDN_CHMAP_SIZE * 4);
641 di.nrbchan = dev->nrbchan;
642 strcpy(di.name, dev->name);
643 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
644 err = -EFAULT;
645 } else
646 err = -ENODEV;
647 break;
648 default:
649 err = -EINVAL;
650 }
651 return err;
652}
653
654static int
655base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
656{
657 struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
658 struct sock *sk = sock->sk;
659 int err = 0;
660
661 if (!maddr || maddr->family != AF_ISDN)
662 return -EINVAL;
663
664 lock_sock(sk);
665
666 if (_pms(sk)->dev) {
667 err = -EALREADY;
668 goto done;
669 }
670
671 _pms(sk)->dev = get_mdevice(maddr->dev);
672 if (!_pms(sk)->dev) {
673 err = -ENODEV;
674 goto done;
675 }
676 sk->sk_state = MISDN_BOUND;
677
678done:
679 release_sock(sk);
680 return err;
681}
682
683static const struct proto_ops base_sock_ops = {
684 .family = PF_ISDN,
685 .owner = THIS_MODULE,
686 .release = base_sock_release,
687 .ioctl = base_sock_ioctl,
688 .bind = base_sock_bind,
689 .getname = sock_no_getname,
690 .sendmsg = sock_no_sendmsg,
691 .recvmsg = sock_no_recvmsg,
692 .poll = sock_no_poll,
693 .listen = sock_no_listen,
694 .shutdown = sock_no_shutdown,
695 .setsockopt = sock_no_setsockopt,
696 .getsockopt = sock_no_getsockopt,
697 .connect = sock_no_connect,
698 .socketpair = sock_no_socketpair,
699 .accept = sock_no_accept,
700 .mmap = sock_no_mmap
701};
702
703
704static int
705base_sock_create(struct net *net, struct socket *sock, int protocol)
706{
707 struct sock *sk;
708
709 if (sock->type != SOCK_RAW)
710 return -ESOCKTNOSUPPORT;
711
712 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto);
713 if (!sk)
714 return -ENOMEM;
715
716 sock_init_data(sock, sk);
717 sock->ops = &base_sock_ops;
718 sock->state = SS_UNCONNECTED;
719 sock_reset_flag(sk, SOCK_ZAPPED);
720 sk->sk_protocol = protocol;
721 sk->sk_state = MISDN_OPEN;
722 mISDN_sock_link(&base_sockets, sk);
723
724 return 0;
725}
726
727static int
728mISDN_sock_create(struct net *net, struct socket *sock, int proto)
729{
730 int err = -EPROTONOSUPPORT;
731
732 switch (proto) {
733 case ISDN_P_BASE:
734 err = base_sock_create(net, sock, proto);
735 break;
736 case ISDN_P_TE_S0:
737 case ISDN_P_NT_S0:
738 case ISDN_P_TE_E1:
739 case ISDN_P_NT_E1:
740 case ISDN_P_LAPD_TE:
741 case ISDN_P_LAPD_NT:
742 case ISDN_P_B_RAW:
743 case ISDN_P_B_HDLC:
744 case ISDN_P_B_X75SLP:
745 case ISDN_P_B_L2DTMF:
746 case ISDN_P_B_L2DSP:
747 case ISDN_P_B_L2DSPHDLC:
748 err = data_sock_create(net, sock, proto);
749 break;
750 default:
751 return err;
752 }
753
754 return err;
755}
756
757static struct
758net_proto_family mISDN_sock_family_ops = {
759 .owner = THIS_MODULE,
760 .family = PF_ISDN,
761 .create = mISDN_sock_create,
762};
763
764int
765misdn_sock_init(u_int *deb)
766{
767 int err;
768
769 debug = deb;
770 err = sock_register(&mISDN_sock_family_ops);
771 if (err)
772 printk(KERN_ERR "%s: error(%d)\n", __func__, err);
773 return err;
774}
775
776void
777misdn_sock_cleanup(void)
778{
779 sock_unregister(PF_ISDN);
780}
781
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
new file mode 100644
index 000000000000..54cfddcc4784
--- /dev/null
+++ b/drivers/isdn/mISDN/stack.c
@@ -0,0 +1,674 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/mISDNif.h>
19#include <linux/kthread.h>
20#include "core.h"
21
22static u_int *debug;
23
24static inline void
25_queue_message(struct mISDNstack *st, struct sk_buff *skb)
26{
27 struct mISDNhead *hh = mISDN_HEAD_P(skb);
28
29 if (*debug & DEBUG_QUEUE_FUNC)
30 printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
31 __func__, hh->prim, hh->id, skb);
32 skb_queue_tail(&st->msgq, skb);
33 if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
34 test_and_set_bit(mISDN_STACK_WORK, &st->status);
35 wake_up_interruptible(&st->workq);
36 }
37}
38
39int
40mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
41{
42 _queue_message(ch->st, skb);
43 return 0;
44}
45
46static struct mISDNchannel *
47get_channel4id(struct mISDNstack *st, u_int id)
48{
49 struct mISDNchannel *ch;
50
51 mutex_lock(&st->lmutex);
52 list_for_each_entry(ch, &st->layer2, list) {
53 if (id == ch->nr)
54 goto unlock;
55 }
56 ch = NULL;
57unlock:
58 mutex_unlock(&st->lmutex);
59 return ch;
60}
61
62static void
63send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
64{
65 struct hlist_node *node;
66 struct sock *sk;
67 struct sk_buff *cskb = NULL;
68
69 read_lock(&sl->lock);
70 sk_for_each(sk, node, &sl->head) {
71 if (sk->sk_state != MISDN_BOUND)
72 continue;
73 if (!cskb)
74 cskb = skb_copy(skb, GFP_KERNEL);
75 if (!cskb) {
76 printk(KERN_WARNING "%s no skb\n", __func__);
77 break;
78 }
79 if (!sock_queue_rcv_skb(sk, cskb))
80 cskb = NULL;
81 }
82 read_unlock(&sl->lock);
83 if (cskb)
84 dev_kfree_skb(cskb);
85}
86
87static void
88send_layer2(struct mISDNstack *st, struct sk_buff *skb)
89{
90 struct sk_buff *cskb;
91 struct mISDNhead *hh = mISDN_HEAD_P(skb);
92 struct mISDNchannel *ch;
93 int ret;
94
95 if (!st)
96 return;
97 mutex_lock(&st->lmutex);
98 if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
99 list_for_each_entry(ch, &st->layer2, list) {
100 if (list_is_last(&ch->list, &st->layer2)) {
101 cskb = skb;
102 skb = NULL;
103 } else {
104 cskb = skb_copy(skb, GFP_KERNEL);
105 }
106 if (cskb) {
107 ret = ch->send(ch, cskb);
108 if (ret) {
109 if (*debug & DEBUG_SEND_ERR)
110 printk(KERN_DEBUG
111 "%s ch%d prim(%x) addr(%x)"
112 " err %d\n",
113 __func__, ch->nr,
114 hh->prim, ch->addr, ret);
115 dev_kfree_skb(cskb);
116 }
117 } else {
118 printk(KERN_WARNING "%s ch%d addr %x no mem\n",
119 __func__, ch->nr, ch->addr);
120 goto out;
121 }
122 }
123 } else {
124 list_for_each_entry(ch, &st->layer2, list) {
125 if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
126 ret = ch->send(ch, skb);
127 if (!ret)
128 skb = NULL;
129 goto out;
130 }
131 }
132 ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
133 if (!ret)
134 skb = NULL;
135 else if (*debug & DEBUG_SEND_ERR)
136 printk(KERN_DEBUG
137 "%s ch%d mgr prim(%x) addr(%x) err %d\n",
138 __func__, ch->nr, hh->prim, ch->addr, ret);
139 }
140out:
141 mutex_unlock(&st->lmutex);
142 if (skb)
143 dev_kfree_skb(skb);
144}
145
146static inline int
147send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
148{
149 struct mISDNhead *hh = mISDN_HEAD_P(skb);
150 struct mISDNchannel *ch;
151 int lm;
152
153 lm = hh->prim & MISDN_LAYERMASK;
154 if (*debug & DEBUG_QUEUE_FUNC)
155 printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
156 __func__, hh->prim, hh->id, skb);
157 if (lm == 0x1) {
158 if (!hlist_empty(&st->l1sock.head)) {
159 __net_timestamp(skb);
160 send_socklist(&st->l1sock, skb);
161 }
162 return st->layer1->send(st->layer1, skb);
163 } else if (lm == 0x2) {
164 if (!hlist_empty(&st->l1sock.head))
165 send_socklist(&st->l1sock, skb);
166 send_layer2(st, skb);
167 return 0;
168 } else if (lm == 0x4) {
169 ch = get_channel4id(st, hh->id);
170 if (ch)
171 return ch->send(ch, skb);
172 else
173 printk(KERN_WARNING
174 "%s: dev(%s) prim(%x) id(%x) no channel\n",
175 __func__, st->dev->name, hh->prim, hh->id);
176 } else if (lm == 0x8) {
177 WARN_ON(lm == 0x8);
178 ch = get_channel4id(st, hh->id);
179 if (ch)
180 return ch->send(ch, skb);
181 else
182 printk(KERN_WARNING
183 "%s: dev(%s) prim(%x) id(%x) no channel\n",
184 __func__, st->dev->name, hh->prim, hh->id);
185 } else {
186 /* broadcast not handled yet */
187 printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
188 __func__, st->dev->name, hh->prim);
189 }
190 return -ESRCH;
191}
192
193static void
194do_clear_stack(struct mISDNstack *st)
195{
196}
197
198static int
199mISDNStackd(void *data)
200{
201 struct mISDNstack *st = data;
202 int err = 0;
203
204#ifdef CONFIG_SMP
205 lock_kernel();
206#endif
207 sigfillset(&current->blocked);
208#ifdef CONFIG_SMP
209 unlock_kernel();
210#endif
211 if (*debug & DEBUG_MSG_THREAD)
212 printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name);
213
214 if (st->notify != NULL) {
215 complete(st->notify);
216 st->notify = NULL;
217 }
218
219 for (;;) {
220 struct sk_buff *skb;
221
222 if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
223 test_and_clear_bit(mISDN_STACK_WORK, &st->status);
224 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
225 } else
226 test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
227 while (test_bit(mISDN_STACK_WORK, &st->status)) {
228 skb = skb_dequeue(&st->msgq);
229 if (!skb) {
230 test_and_clear_bit(mISDN_STACK_WORK,
231 &st->status);
232 /* test if a race happens */
233 skb = skb_dequeue(&st->msgq);
234 if (!skb)
235 continue;
236 test_and_set_bit(mISDN_STACK_WORK,
237 &st->status);
238 }
239#ifdef MISDN_MSG_STATS
240 st->msg_cnt++;
241#endif
242 err = send_msg_to_layer(st, skb);
243 if (unlikely(err)) {
244 if (*debug & DEBUG_SEND_ERR)
245 printk(KERN_DEBUG
246 "%s: %s prim(%x) id(%x) "
247 "send call(%d)\n",
248 __func__, st->dev->name,
249 mISDN_HEAD_PRIM(skb),
250 mISDN_HEAD_ID(skb), err);
251 dev_kfree_skb(skb);
252 continue;
253 }
254 if (unlikely(test_bit(mISDN_STACK_STOPPED,
255 &st->status))) {
256 test_and_clear_bit(mISDN_STACK_WORK,
257 &st->status);
258 test_and_clear_bit(mISDN_STACK_RUNNING,
259 &st->status);
260 break;
261 }
262 }
263 if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
264 test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
265 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
266 do_clear_stack(st);
267 test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
268 test_and_set_bit(mISDN_STACK_RESTART, &st->status);
269 }
270 if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
271 test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
272 test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
273 if (!skb_queue_empty(&st->msgq))
274 test_and_set_bit(mISDN_STACK_WORK,
275 &st->status);
276 }
277 if (test_bit(mISDN_STACK_ABORT, &st->status))
278 break;
279 if (st->notify != NULL) {
280 complete(st->notify);
281 st->notify = NULL;
282 }
283#ifdef MISDN_MSG_STATS
284 st->sleep_cnt++;
285#endif
286 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
287 wait_event_interruptible(st->workq, (st->status &
288 mISDN_STACK_ACTION_MASK));
289 if (*debug & DEBUG_MSG_THREAD)
290 printk(KERN_DEBUG "%s: %s wake status %08lx\n",
291 __func__, st->dev->name, st->status);
292 test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
293
294 test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
295
296 if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
297 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
298#ifdef MISDN_MSG_STATS
299 st->stopped_cnt++;
300#endif
301 }
302 }
303#ifdef MISDN_MSG_STATS
304 printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
305 "msg %d sleep %d stopped\n",
306 st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt);
307 printk(KERN_DEBUG
308 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
309 st->dev->name, st->thread->utime, st->thread->stime);
310 printk(KERN_DEBUG
311 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
312 st->dev->name, st->thread->nvcsw, st->thread->nivcsw);
313 printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
314 st->dev->name);
315#endif
316 test_and_set_bit(mISDN_STACK_KILLED, &st->status);
317 test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
318 test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
319 test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
320 skb_queue_purge(&st->msgq);
321 st->thread = NULL;
322 if (st->notify != NULL) {
323 complete(st->notify);
324 st->notify = NULL;
325 }
326 return 0;
327}
328
329static int
330l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
331{
332 if (!ch->st)
333 return -ENODEV;
334 __net_timestamp(skb);
335 _queue_message(ch->st, skb);
336 return 0;
337}
338
339void
340set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
341{
342 ch->addr = sapi | (tei << 8);
343}
344
345void
346__add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
347{
348 list_add_tail(&ch->list, &st->layer2);
349}
350
351void
352add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
353{
354 mutex_lock(&st->lmutex);
355 __add_layer2(ch, st);
356 mutex_unlock(&st->lmutex);
357}
358
359static int
360st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
361{
362 if (!ch->st || ch->st->layer1)
363 return -EINVAL;
364 return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
365}
366
367int
368create_stack(struct mISDNdevice *dev)
369{
370 struct mISDNstack *newst;
371 int err;
372 DECLARE_COMPLETION_ONSTACK(done);
373
374 newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
375 if (!newst) {
376 printk(KERN_ERR "kmalloc mISDN_stack failed\n");
377 return -ENOMEM;
378 }
379 newst->dev = dev;
380 INIT_LIST_HEAD(&newst->layer2);
381 INIT_HLIST_HEAD(&newst->l1sock.head);
382 rwlock_init(&newst->l1sock.lock);
383 init_waitqueue_head(&newst->workq);
384 skb_queue_head_init(&newst->msgq);
385 mutex_init(&newst->lmutex);
386 dev->D.st = newst;
387 err = create_teimanager(dev);
388 if (err) {
389 printk(KERN_ERR "kmalloc teimanager failed\n");
390 kfree(newst);
391 return err;
392 }
393 dev->teimgr->peer = &newst->own;
394 dev->teimgr->recv = mISDN_queue_message;
395 dev->teimgr->st = newst;
396 newst->layer1 = &dev->D;
397 dev->D.recv = l1_receive;
398 dev->D.peer = &newst->own;
399 newst->own.st = newst;
400 newst->own.ctrl = st_own_ctrl;
401 newst->own.send = mISDN_queue_message;
402 newst->own.recv = mISDN_queue_message;
403 if (*debug & DEBUG_CORE_FUNC)
404 printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name);
405 newst->notify = &done;
406 newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
407 newst->dev->name);
408 if (IS_ERR(newst->thread)) {
409 err = PTR_ERR(newst->thread);
410 printk(KERN_ERR
411 "mISDN:cannot create kernel thread for %s (%d)\n",
412 newst->dev->name, err);
413 delete_teimanager(dev->teimgr);
414 kfree(newst);
415 } else
416 wait_for_completion(&done);
417 return err;
418}
419
420int
421connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
422 u_int protocol, struct sockaddr_mISDN *adr)
423{
424 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
425 struct channel_req rq;
426 int err;
427
428
429 if (*debug & DEBUG_CORE_FUNC)
430 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
431 __func__, dev->name, protocol, adr->dev, adr->channel,
432 adr->sapi, adr->tei);
433 switch (protocol) {
434 case ISDN_P_NT_S0:
435 case ISDN_P_NT_E1:
436 case ISDN_P_TE_S0:
437 case ISDN_P_TE_E1:
438#ifdef PROTOCOL_CHECK
439 /* this should be enhanced */
440 if (!list_empty(&dev->D.st->layer2)
441 && dev->D.protocol != protocol)
442 return -EBUSY;
443 if (!hlist_empty(&dev->D.st->l1sock.head)
444 && dev->D.protocol != protocol)
445 return -EBUSY;
446#endif
447 ch->recv = mISDN_queue_message;
448 ch->peer = &dev->D.st->own;
449 ch->st = dev->D.st;
450 rq.protocol = protocol;
451 rq.adr.channel = 0;
452 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
453 printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
454 if (err)
455 return err;
456 write_lock_bh(&dev->D.st->l1sock.lock);
457 sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
458 write_unlock_bh(&dev->D.st->l1sock.lock);
459 break;
460 default:
461 return -ENOPROTOOPT;
462 }
463 return 0;
464}
465
466int
467connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
468 u_int protocol, struct sockaddr_mISDN *adr)
469{
470 struct channel_req rq, rq2;
471 int pmask, err;
472 struct Bprotocol *bp;
473
474 if (*debug & DEBUG_CORE_FUNC)
475 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
476 __func__, dev->name, protocol,
477 adr->dev, adr->channel, adr->sapi,
478 adr->tei);
479 ch->st = dev->D.st;
480 pmask = 1 << (protocol & ISDN_P_B_MASK);
481 if (pmask & dev->Bprotocols) {
482 rq.protocol = protocol;
483 rq.adr = *adr;
484 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
485 if (err)
486 return err;
487 ch->recv = rq.ch->send;
488 ch->peer = rq.ch;
489 rq.ch->recv = ch->send;
490 rq.ch->peer = ch;
491 rq.ch->st = dev->D.st;
492 } else {
493 bp = get_Bprotocol4mask(pmask);
494 if (!bp)
495 return -ENOPROTOOPT;
496 rq2.protocol = protocol;
497 rq2.adr = *adr;
498 rq2.ch = ch;
499 err = bp->create(&rq2);
500 if (err)
501 return err;
502 ch->recv = rq2.ch->send;
503 ch->peer = rq2.ch;
504 rq2.ch->st = dev->D.st;
505 rq.protocol = rq2.protocol;
506 rq.adr = *adr;
507 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
508 if (err) {
509 rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
510 return err;
511 }
512 rq2.ch->recv = rq.ch->send;
513 rq2.ch->peer = rq.ch;
514 rq.ch->recv = rq2.ch->send;
515 rq.ch->peer = rq2.ch;
516 rq.ch->st = dev->D.st;
517 }
518 ch->protocol = protocol;
519 ch->nr = rq.ch->nr;
520 return 0;
521}
522
523int
524create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
525 u_int protocol, struct sockaddr_mISDN *adr)
526{
527 struct channel_req rq;
528 int err;
529
530 if (*debug & DEBUG_CORE_FUNC)
531 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
532 __func__, dev->name, protocol,
533 adr->dev, adr->channel, adr->sapi,
534 adr->tei);
535 rq.protocol = ISDN_P_TE_S0;
536 if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
537 rq.protocol = ISDN_P_TE_E1;
538 switch (protocol) {
539 case ISDN_P_LAPD_NT:
540 rq.protocol = ISDN_P_NT_S0;
541 if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
542 rq.protocol = ISDN_P_NT_E1;
543 case ISDN_P_LAPD_TE:
544#ifdef PROTOCOL_CHECK
545 /* this should be enhanced */
546 if (!list_empty(&dev->D.st->layer2)
547 && dev->D.protocol != protocol)
548 return -EBUSY;
549 if (!hlist_empty(&dev->D.st->l1sock.head)
550 && dev->D.protocol != protocol)
551 return -EBUSY;
552#endif
553 ch->recv = mISDN_queue_message;
554 ch->peer = &dev->D.st->own;
555 ch->st = dev->D.st;
556 rq.adr.channel = 0;
557 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
558 printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
559 if (err)
560 break;
561 rq.protocol = protocol;
562 rq.adr = *adr;
563 rq.ch = ch;
564 err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
565 printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
566 if (!err) {
567 if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
568 break;
569 add_layer2(rq.ch, dev->D.st);
570 rq.ch->recv = mISDN_queue_message;
571 rq.ch->peer = &dev->D.st->own;
572 rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
573 }
574 break;
575 default:
576 err = -EPROTONOSUPPORT;
577 }
578 return err;
579}
580
581void
582delete_channel(struct mISDNchannel *ch)
583{
584 struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
585 struct mISDNchannel *pch;
586
587 if (!ch->st) {
588 printk(KERN_WARNING "%s: no stack\n", __func__);
589 return;
590 }
591 if (*debug & DEBUG_CORE_FUNC)
592 printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
593 ch->st->dev->name, ch->protocol);
594 if (ch->protocol >= ISDN_P_B_START) {
595 if (ch->peer) {
596 ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
597 ch->peer = NULL;
598 }
599 return;
600 }
601 switch (ch->protocol) {
602 case ISDN_P_NT_S0:
603 case ISDN_P_TE_S0:
604 case ISDN_P_NT_E1:
605 case ISDN_P_TE_E1:
606 write_lock_bh(&ch->st->l1sock.lock);
607 sk_del_node_init(&msk->sk);
608 write_unlock_bh(&ch->st->l1sock.lock);
609 ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
610 break;
611 case ISDN_P_LAPD_TE:
612 pch = get_channel4id(ch->st, ch->nr);
613 if (pch) {
614 mutex_lock(&ch->st->lmutex);
615 list_del(&pch->list);
616 mutex_unlock(&ch->st->lmutex);
617 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
618 pch = ch->st->dev->teimgr;
619 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
620 } else
621 printk(KERN_WARNING "%s: no l2 channel\n",
622 __func__);
623 break;
624 case ISDN_P_LAPD_NT:
625 pch = ch->st->dev->teimgr;
626 if (pch) {
627 pch->ctrl(pch, CLOSE_CHANNEL, NULL);
628 } else
629 printk(KERN_WARNING "%s: no l2 channel\n",
630 __func__);
631 break;
632 default:
633 break;
634 }
635 return;
636}
637
638void
639delete_stack(struct mISDNdevice *dev)
640{
641 struct mISDNstack *st = dev->D.st;
642 DECLARE_COMPLETION_ONSTACK(done);
643
644 if (*debug & DEBUG_CORE_FUNC)
645 printk(KERN_DEBUG "%s: st(%s)\n", __func__,
646 st->dev->name);
647 if (dev->teimgr)
648 delete_teimanager(dev->teimgr);
649 if (st->thread) {
650 if (st->notify) {
651 printk(KERN_WARNING "%s: notifier in use\n",
652 __func__);
653 complete(st->notify);
654 }
655 st->notify = &done;
656 test_and_set_bit(mISDN_STACK_ABORT, &st->status);
657 test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
658 wake_up_interruptible(&st->workq);
659 wait_for_completion(&done);
660 }
661 if (!list_empty(&st->layer2))
662 printk(KERN_WARNING "%s: layer2 list not empty\n",
663 __func__);
664 if (!hlist_empty(&st->l1sock.head))
665 printk(KERN_WARNING "%s: layer1 list not empty\n",
666 __func__);
667 kfree(st);
668}
669
670void
671mISDN_initstack(u_int *dp)
672{
673 debug = dp;
674}
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
new file mode 100644
index 000000000000..6fbae42127bf
--- /dev/null
+++ b/drivers/isdn/mISDN/tei.c
@@ -0,0 +1,1340 @@
1/*
2 *
3 * Author Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17#include "layer2.h"
18#include <linux/random.h>
19#include "core.h"
20
21#define ID_REQUEST 1
22#define ID_ASSIGNED 2
23#define ID_DENIED 3
24#define ID_CHK_REQ 4
25#define ID_CHK_RES 5
26#define ID_REMOVE 6
27#define ID_VERIFY 7
28
29#define TEI_ENTITY_ID 0xf
30
31#define MGR_PH_ACTIVE 16
32#define MGR_PH_NOTREADY 17
33
34#define DATIMER_VAL 10000
35
36static u_int *debug;
37
38static struct Fsm deactfsm = {NULL, 0, 0, NULL, NULL};
39static struct Fsm teifsmu = {NULL, 0, 0, NULL, NULL};
40static struct Fsm teifsmn = {NULL, 0, 0, NULL, NULL};
41
42enum {
43 ST_L1_DEACT,
44 ST_L1_DEACT_PENDING,
45 ST_L1_ACTIV,
46};
47#define DEACT_STATE_COUNT (ST_L1_ACTIV+1)
48
49static char *strDeactState[] =
50{
51 "ST_L1_DEACT",
52 "ST_L1_DEACT_PENDING",
53 "ST_L1_ACTIV",
54};
55
56enum {
57 EV_ACTIVATE,
58 EV_ACTIVATE_IND,
59 EV_DEACTIVATE,
60 EV_DEACTIVATE_IND,
61 EV_UI,
62 EV_DATIMER,
63};
64
65#define DEACT_EVENT_COUNT (EV_DATIMER+1)
66
67static char *strDeactEvent[] =
68{
69 "EV_ACTIVATE",
70 "EV_ACTIVATE_IND",
71 "EV_DEACTIVATE",
72 "EV_DEACTIVATE_IND",
73 "EV_UI",
74 "EV_DATIMER",
75};
76
77static void
78da_debug(struct FsmInst *fi, char *fmt, ...)
79{
80 struct manager *mgr = fi->userdata;
81 va_list va;
82
83 if (!(*debug & DEBUG_L2_TEIFSM))
84 return;
85 va_start(va, fmt);
86 printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id);
87 vprintk(fmt, va);
88 printk("\n");
89 va_end(va);
90}
91
92static void
93da_activate(struct FsmInst *fi, int event, void *arg)
94{
95 struct manager *mgr = fi->userdata;
96
97 if (fi->state == ST_L1_DEACT_PENDING)
98 mISDN_FsmDelTimer(&mgr->datimer, 1);
99 mISDN_FsmChangeState(fi, ST_L1_ACTIV);
100}
101
102static void
103da_deactivate_ind(struct FsmInst *fi, int event, void *arg)
104{
105 mISDN_FsmChangeState(fi, ST_L1_DEACT);
106}
107
108static void
109da_deactivate(struct FsmInst *fi, int event, void *arg)
110{
111 struct manager *mgr = fi->userdata;
112 struct layer2 *l2;
113 u_long flags;
114
115 read_lock_irqsave(&mgr->lock, flags);
116 list_for_each_entry(l2, &mgr->layer2, list) {
117 if (l2->l2m.state > ST_L2_4) {
118 /* have still activ TEI */
119 read_unlock_irqrestore(&mgr->lock, flags);
120 return;
121 }
122 }
123 read_unlock_irqrestore(&mgr->lock, flags);
124 /* All TEI are inactiv */
125 mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 1);
126 mISDN_FsmChangeState(fi, ST_L1_DEACT_PENDING);
127}
128
129static void
130da_ui(struct FsmInst *fi, int event, void *arg)
131{
132 struct manager *mgr = fi->userdata;
133
134 /* restart da timer */
135 mISDN_FsmDelTimer(&mgr->datimer, 2);
136 mISDN_FsmAddTimer(&mgr->datimer, DATIMER_VAL, EV_DATIMER, NULL, 2);
137
138}
139
140static void
141da_timer(struct FsmInst *fi, int event, void *arg)
142{
143 struct manager *mgr = fi->userdata;
144 struct layer2 *l2;
145 u_long flags;
146
147 /* check again */
148 read_lock_irqsave(&mgr->lock, flags);
149 list_for_each_entry(l2, &mgr->layer2, list) {
150 if (l2->l2m.state > ST_L2_4) {
151 /* have still activ TEI */
152 read_unlock_irqrestore(&mgr->lock, flags);
153 mISDN_FsmChangeState(fi, ST_L1_ACTIV);
154 return;
155 }
156 }
157 read_unlock_irqrestore(&mgr->lock, flags);
158 /* All TEI are inactiv */
159 mISDN_FsmChangeState(fi, ST_L1_DEACT);
160 _queue_data(&mgr->ch, PH_DEACTIVATE_REQ, MISDN_ID_ANY, 0, NULL,
161 GFP_ATOMIC);
162}
163
164static struct FsmNode DeactFnList[] =
165{
166 {ST_L1_DEACT, EV_ACTIVATE_IND, da_activate},
167 {ST_L1_ACTIV, EV_DEACTIVATE_IND, da_deactivate_ind},
168 {ST_L1_ACTIV, EV_DEACTIVATE, da_deactivate},
169 {ST_L1_DEACT_PENDING, EV_ACTIVATE, da_activate},
170 {ST_L1_DEACT_PENDING, EV_UI, da_ui},
171 {ST_L1_DEACT_PENDING, EV_DATIMER, da_timer},
172};
173
174enum {
175 ST_TEI_NOP,
176 ST_TEI_IDREQ,
177 ST_TEI_IDVERIFY,
178};
179
180#define TEI_STATE_COUNT (ST_TEI_IDVERIFY+1)
181
182static char *strTeiState[] =
183{
184 "ST_TEI_NOP",
185 "ST_TEI_IDREQ",
186 "ST_TEI_IDVERIFY",
187};
188
189enum {
190 EV_IDREQ,
191 EV_ASSIGN,
192 EV_ASSIGN_REQ,
193 EV_DENIED,
194 EV_CHKREQ,
195 EV_CHKRESP,
196 EV_REMOVE,
197 EV_VERIFY,
198 EV_TIMER,
199};
200
201#define TEI_EVENT_COUNT (EV_TIMER+1)
202
203static char *strTeiEvent[] =
204{
205 "EV_IDREQ",
206 "EV_ASSIGN",
207 "EV_ASSIGN_REQ",
208 "EV_DENIED",
209 "EV_CHKREQ",
210 "EV_CHKRESP",
211 "EV_REMOVE",
212 "EV_VERIFY",
213 "EV_TIMER",
214};
215
216static void
217tei_debug(struct FsmInst *fi, char *fmt, ...)
218{
219 struct teimgr *tm = fi->userdata;
220 va_list va;
221
222 if (!(*debug & DEBUG_L2_TEIFSM))
223 return;
224 va_start(va, fmt);
225 printk(KERN_DEBUG "tei(%d): ", tm->l2->tei);
226 vprintk(fmt, va);
227 printk("\n");
228 va_end(va);
229}
230
231
232
233static int
234get_free_id(struct manager *mgr)
235{
236 u64 ids = 0;
237 int i;
238 struct layer2 *l2;
239
240 list_for_each_entry(l2, &mgr->layer2, list) {
241 if (l2->ch.nr > 63) {
242 printk(KERN_WARNING
243 "%s: more as 63 layer2 for one device\n",
244 __func__);
245 return -EBUSY;
246 }
247 test_and_set_bit(l2->ch.nr, (u_long *)&ids);
248 }
249 for (i = 1; i < 64; i++)
250 if (!test_bit(i, (u_long *)&ids))
251 return i;
252 printk(KERN_WARNING "%s: more as 63 layer2 for one device\n",
253 __func__);
254 return -EBUSY;
255}
256
257static int
258get_free_tei(struct manager *mgr)
259{
260 u64 ids = 0;
261 int i;
262 struct layer2 *l2;
263
264 list_for_each_entry(l2, &mgr->layer2, list) {
265 if (l2->ch.nr == 0)
266 continue;
267 if ((l2->ch.addr & 0xff) != 0)
268 continue;
269 i = l2->ch.addr >> 8;
270 if (i < 64)
271 continue;
272 i -= 64;
273
274 test_and_set_bit(i, (u_long *)&ids);
275 }
276 for (i = 0; i < 64; i++)
277 if (!test_bit(i, (u_long *)&ids))
278 return i + 64;
279 printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n",
280 __func__);
281 return -1;
282}
283
284static void
285teiup_create(struct manager *mgr, u_int prim, int len, void *arg)
286{
287 struct sk_buff *skb;
288 struct mISDNhead *hh;
289 int err;
290
291 skb = mI_alloc_skb(len, GFP_ATOMIC);
292 if (!skb)
293 return;
294 hh = mISDN_HEAD_P(skb);
295 hh->prim = prim;
296 hh->id = (mgr->ch.nr << 16) | mgr->ch.addr;
297 if (len)
298 memcpy(skb_put(skb, len), arg, len);
299 err = mgr->up->send(mgr->up, skb);
300 if (err) {
301 printk(KERN_WARNING "%s: err=%d\n", __func__, err);
302 dev_kfree_skb(skb);
303 }
304}
305
306static u_int
307new_id(struct manager *mgr)
308{
309 u_int id;
310
311 id = mgr->nextid++;
312 if (id == 0x7fff)
313 mgr->nextid = 1;
314 id <<= 16;
315 id |= GROUP_TEI << 8;
316 id |= TEI_SAPI;
317 return id;
318}
319
320static void
321do_send(struct manager *mgr)
322{
323 if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
324 return;
325
326 if (!test_and_set_bit(MGR_PH_NOTREADY, &mgr->options)) {
327 struct sk_buff *skb = skb_dequeue(&mgr->sendq);
328
329 if (!skb) {
330 test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
331 return;
332 }
333 mgr->lastid = mISDN_HEAD_ID(skb);
334 mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
335 if (mgr->ch.recv(mgr->ch.peer, skb)) {
336 dev_kfree_skb(skb);
337 test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
338 mgr->lastid = MISDN_ID_NONE;
339 }
340 }
341}
342
343static void
344do_ack(struct manager *mgr, u_int id)
345{
346 if (test_bit(MGR_PH_NOTREADY, &mgr->options)) {
347 if (id == mgr->lastid) {
348 if (test_bit(MGR_PH_ACTIVE, &mgr->options)) {
349 struct sk_buff *skb;
350
351 skb = skb_dequeue(&mgr->sendq);
352 if (skb) {
353 mgr->lastid = mISDN_HEAD_ID(skb);
354 if (!mgr->ch.recv(mgr->ch.peer, skb))
355 return;
356 dev_kfree_skb(skb);
357 }
358 }
359 mgr->lastid = MISDN_ID_NONE;
360 test_and_clear_bit(MGR_PH_NOTREADY, &mgr->options);
361 }
362 }
363}
364
365static void
366mgr_send_down(struct manager *mgr, struct sk_buff *skb)
367{
368 skb_queue_tail(&mgr->sendq, skb);
369 if (!test_bit(MGR_PH_ACTIVE, &mgr->options)) {
370 _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
371 NULL, GFP_KERNEL);
372 } else {
373 do_send(mgr);
374 }
375}
376
377static int
378dl_unit_data(struct manager *mgr, struct sk_buff *skb)
379{
380 if (!test_bit(MGR_OPT_NETWORK, &mgr->options)) /* only net send UI */
381 return -EINVAL;
382 if (!test_bit(MGR_PH_ACTIVE, &mgr->options))
383 _queue_data(&mgr->ch, PH_ACTIVATE_REQ, MISDN_ID_ANY, 0,
384 NULL, GFP_KERNEL);
385 skb_push(skb, 3);
386 skb->data[0] = 0x02; /* SAPI 0 C/R = 1 */
387 skb->data[1] = 0xff; /* TEI 127 */
388 skb->data[2] = UI; /* UI frame */
389 mISDN_HEAD_PRIM(skb) = PH_DATA_REQ;
390 mISDN_HEAD_ID(skb) = new_id(mgr);
391 skb_queue_tail(&mgr->sendq, skb);
392 do_send(mgr);
393 return 0;
394}
395
396unsigned int
397random_ri(void)
398{
399 u16 x;
400
401 get_random_bytes(&x, sizeof(x));
402 return x;
403}
404
405static struct layer2 *
406findtei(struct manager *mgr, int tei)
407{
408 struct layer2 *l2;
409 u_long flags;
410
411 read_lock_irqsave(&mgr->lock, flags);
412 list_for_each_entry(l2, &mgr->layer2, list) {
413 if ((l2->sapi == 0) && (l2->tei > 0) &&
414 (l2->tei != GROUP_TEI) && (l2->tei == tei))
415 goto done;
416 }
417 l2 = NULL;
418done:
419 read_unlock_irqrestore(&mgr->lock, flags);
420 return l2;
421}
422
423static void
424put_tei_msg(struct manager *mgr, u_char m_id, unsigned int ri, u_char tei)
425{
426 struct sk_buff *skb;
427 u_char bp[8];
428
429 bp[0] = (TEI_SAPI << 2);
430 if (test_bit(MGR_OPT_NETWORK, &mgr->options))
431 bp[0] |= 2; /* CR:=1 for net command */
432 bp[1] = (GROUP_TEI << 1) | 0x1;
433 bp[2] = UI;
434 bp[3] = TEI_ENTITY_ID;
435 bp[4] = ri >> 8;
436 bp[5] = ri & 0xff;
437 bp[6] = m_id;
438 bp[7] = (tei << 1) | 1;
439 skb = _alloc_mISDN_skb(PH_DATA_REQ, new_id(mgr),
440 8, bp, GFP_ATOMIC);
441 if (!skb) {
442 printk(KERN_WARNING "%s: no skb for tei msg\n", __func__);
443 return;
444 }
445 mgr_send_down(mgr, skb);
446}
447
448static void
449tei_id_request(struct FsmInst *fi, int event, void *arg)
450{
451 struct teimgr *tm = fi->userdata;
452
453 if (tm->l2->tei != GROUP_TEI) {
454 tm->tei_m.printdebug(&tm->tei_m,
455 "assign request for allready assigned tei %d",
456 tm->l2->tei);
457 return;
458 }
459 tm->ri = random_ri();
460 if (*debug & DEBUG_L2_TEI)
461 tm->tei_m.printdebug(&tm->tei_m,
462 "assign request ri %d", tm->ri);
463 put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
464 mISDN_FsmChangeState(fi, ST_TEI_IDREQ);
465 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 1);
466 tm->nval = 3;
467}
468
469static void
470tei_id_assign(struct FsmInst *fi, int event, void *arg)
471{
472 struct teimgr *tm = fi->userdata;
473 struct layer2 *l2;
474 u_char *dp = arg;
475 int ri, tei;
476
477 ri = ((unsigned int) *dp++ << 8);
478 ri += *dp++;
479 dp++;
480 tei = *dp >> 1;
481 if (*debug & DEBUG_L2_TEI)
482 tm->tei_m.printdebug(fi, "identity assign ri %d tei %d",
483 ri, tei);
484 l2 = findtei(tm->mgr, tei);
485 if (l2) { /* same tei is in use */
486 if (ri != l2->tm->ri) {
487 tm->tei_m.printdebug(fi,
488 "possible duplicate assignment tei %d", tei);
489 tei_l2(l2, MDL_ERROR_RSP, 0);
490 }
491 } else if (ri == tm->ri) {
492 mISDN_FsmDelTimer(&tm->timer, 1);
493 mISDN_FsmChangeState(fi, ST_TEI_NOP);
494 tei_l2(tm->l2, MDL_ASSIGN_REQ, tei);
495 }
496}
497
498static void
499tei_id_test_dup(struct FsmInst *fi, int event, void *arg)
500{
501 struct teimgr *tm = fi->userdata;
502 struct layer2 *l2;
503 u_char *dp = arg;
504 int tei, ri;
505
506 ri = ((unsigned int) *dp++ << 8);
507 ri += *dp++;
508 dp++;
509 tei = *dp >> 1;
510 if (*debug & DEBUG_L2_TEI)
511 tm->tei_m.printdebug(fi, "foreign identity assign ri %d tei %d",
512 ri, tei);
513 l2 = findtei(tm->mgr, tei);
514 if (l2) { /* same tei is in use */
515 if (ri != l2->tm->ri) { /* and it wasn't our request */
516 tm->tei_m.printdebug(fi,
517 "possible duplicate assignment tei %d", tei);
518 mISDN_FsmEvent(&l2->tm->tei_m, EV_VERIFY, NULL);
519 }
520 }
521}
522
523static void
524tei_id_denied(struct FsmInst *fi, int event, void *arg)
525{
526 struct teimgr *tm = fi->userdata;
527 u_char *dp = arg;
528 int ri, tei;
529
530 ri = ((unsigned int) *dp++ << 8);
531 ri += *dp++;
532 dp++;
533 tei = *dp >> 1;
534 if (*debug & DEBUG_L2_TEI)
535 tm->tei_m.printdebug(fi, "identity denied ri %d tei %d",
536 ri, tei);
537}
538
539static void
540tei_id_chk_req(struct FsmInst *fi, int event, void *arg)
541{
542 struct teimgr *tm = fi->userdata;
543 u_char *dp = arg;
544 int tei;
545
546 tei = *(dp+3) >> 1;
547 if (*debug & DEBUG_L2_TEI)
548 tm->tei_m.printdebug(fi, "identity check req tei %d", tei);
549 if ((tm->l2->tei != GROUP_TEI) && ((tei == GROUP_TEI) ||
550 (tei == tm->l2->tei))) {
551 mISDN_FsmDelTimer(&tm->timer, 4);
552 mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
553 put_tei_msg(tm->mgr, ID_CHK_RES, random_ri(), tm->l2->tei);
554 }
555}
556
557static void
558tei_id_remove(struct FsmInst *fi, int event, void *arg)
559{
560 struct teimgr *tm = fi->userdata;
561 u_char *dp = arg;
562 int tei;
563
564 tei = *(dp+3) >> 1;
565 if (*debug & DEBUG_L2_TEI)
566 tm->tei_m.printdebug(fi, "identity remove tei %d", tei);
567 if ((tm->l2->tei != GROUP_TEI) &&
568 ((tei == GROUP_TEI) || (tei == tm->l2->tei))) {
569 mISDN_FsmDelTimer(&tm->timer, 5);
570 mISDN_FsmChangeState(&tm->tei_m, ST_TEI_NOP);
571 tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
572 }
573}
574
575static void
576tei_id_verify(struct FsmInst *fi, int event, void *arg)
577{
578 struct teimgr *tm = fi->userdata;
579
580 if (*debug & DEBUG_L2_TEI)
581 tm->tei_m.printdebug(fi, "id verify request for tei %d",
582 tm->l2->tei);
583 put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
584 mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
585 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
586 tm->nval = 2;
587}
588
589static void
590tei_id_req_tout(struct FsmInst *fi, int event, void *arg)
591{
592 struct teimgr *tm = fi->userdata;
593
594 if (--tm->nval) {
595 tm->ri = random_ri();
596 if (*debug & DEBUG_L2_TEI)
597 tm->tei_m.printdebug(fi, "assign req(%d) ri %d",
598 4 - tm->nval, tm->ri);
599 put_tei_msg(tm->mgr, ID_REQUEST, tm->ri, GROUP_TEI);
600 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 3);
601 } else {
602 tm->tei_m.printdebug(fi, "assign req failed");
603 tei_l2(tm->l2, MDL_ERROR_RSP, 0);
604 mISDN_FsmChangeState(fi, ST_TEI_NOP);
605 }
606}
607
608static void
609tei_id_ver_tout(struct FsmInst *fi, int event, void *arg)
610{
611 struct teimgr *tm = fi->userdata;
612
613 if (--tm->nval) {
614 if (*debug & DEBUG_L2_TEI)
615 tm->tei_m.printdebug(fi,
616 "id verify req(%d) for tei %d",
617 3 - tm->nval, tm->l2->tei);
618 put_tei_msg(tm->mgr, ID_VERIFY, 0, tm->l2->tei);
619 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
620 } else {
621 tm->tei_m.printdebug(fi, "verify req for tei %d failed",
622 tm->l2->tei);
623 tei_l2(tm->l2, MDL_REMOVE_REQ, 0);
624 mISDN_FsmChangeState(fi, ST_TEI_NOP);
625 }
626}
627
628static struct FsmNode TeiFnListUser[] =
629{
630 {ST_TEI_NOP, EV_IDREQ, tei_id_request},
631 {ST_TEI_NOP, EV_ASSIGN, tei_id_test_dup},
632 {ST_TEI_NOP, EV_VERIFY, tei_id_verify},
633 {ST_TEI_NOP, EV_REMOVE, tei_id_remove},
634 {ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req},
635 {ST_TEI_IDREQ, EV_TIMER, tei_id_req_tout},
636 {ST_TEI_IDREQ, EV_ASSIGN, tei_id_assign},
637 {ST_TEI_IDREQ, EV_DENIED, tei_id_denied},
638 {ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout},
639 {ST_TEI_IDVERIFY, EV_REMOVE, tei_id_remove},
640 {ST_TEI_IDVERIFY, EV_CHKREQ, tei_id_chk_req},
641};
642
643static void
644tei_l2remove(struct layer2 *l2)
645{
646 put_tei_msg(l2->tm->mgr, ID_REMOVE, 0, l2->tei);
647 tei_l2(l2, MDL_REMOVE_REQ, 0);
648 list_del(&l2->ch.list);
649 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
650}
651
652static void
653tei_assign_req(struct FsmInst *fi, int event, void *arg)
654{
655 struct teimgr *tm = fi->userdata;
656 u_char *dp = arg;
657
658 if (tm->l2->tei == GROUP_TEI) {
659 tm->tei_m.printdebug(&tm->tei_m,
660 "net tei assign request without tei");
661 return;
662 }
663 tm->ri = ((unsigned int) *dp++ << 8);
664 tm->ri += *dp++;
665 if (*debug & DEBUG_L2_TEI)
666 tm->tei_m.printdebug(&tm->tei_m,
667 "net assign request ri %d teim %d", tm->ri, *dp);
668 put_tei_msg(tm->mgr, ID_ASSIGNED, tm->ri, tm->l2->tei);
669 mISDN_FsmChangeState(fi, ST_TEI_NOP);
670}
671
672static void
673tei_id_chk_req_net(struct FsmInst *fi, int event, void *arg)
674{
675 struct teimgr *tm = fi->userdata;
676
677 if (*debug & DEBUG_L2_TEI)
678 tm->tei_m.printdebug(fi, "id check request for tei %d",
679 tm->l2->tei);
680 tm->rcnt = 0;
681 put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
682 mISDN_FsmChangeState(&tm->tei_m, ST_TEI_IDVERIFY);
683 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 2);
684 tm->nval = 2;
685}
686
687static void
688tei_id_chk_resp(struct FsmInst *fi, int event, void *arg)
689{
690 struct teimgr *tm = fi->userdata;
691 u_char *dp = arg;
692 int tei;
693
694 tei = dp[3] >> 1;
695 if (*debug & DEBUG_L2_TEI)
696 tm->tei_m.printdebug(fi, "identity check resp tei %d", tei);
697 if (tei == tm->l2->tei)
698 tm->rcnt++;
699}
700
701static void
702tei_id_verify_net(struct FsmInst *fi, int event, void *arg)
703{
704 struct teimgr *tm = fi->userdata;
705 u_char *dp = arg;
706 int tei;
707
708 tei = dp[3] >> 1;
709 if (*debug & DEBUG_L2_TEI)
710 tm->tei_m.printdebug(fi, "identity verify req tei %d/%d",
711 tei, tm->l2->tei);
712 if (tei == tm->l2->tei)
713 tei_id_chk_req_net(fi, event, arg);
714}
715
716static void
717tei_id_ver_tout_net(struct FsmInst *fi, int event, void *arg)
718{
719 struct teimgr *tm = fi->userdata;
720
721 if (tm->rcnt == 1) {
722 if (*debug & DEBUG_L2_TEI)
723 tm->tei_m.printdebug(fi,
724 "check req for tei %d sucessful\n", tm->l2->tei);
725 mISDN_FsmChangeState(fi, ST_TEI_NOP);
726 } else if (tm->rcnt > 1) {
727 /* duplicate assignment; remove */
728 tei_l2remove(tm->l2);
729 } else if (--tm->nval) {
730 if (*debug & DEBUG_L2_TEI)
731 tm->tei_m.printdebug(fi,
732 "id check req(%d) for tei %d",
733 3 - tm->nval, tm->l2->tei);
734 put_tei_msg(tm->mgr, ID_CHK_REQ, 0, tm->l2->tei);
735 mISDN_FsmAddTimer(&tm->timer, tm->tval, EV_TIMER, NULL, 4);
736 } else {
737 tm->tei_m.printdebug(fi, "check req for tei %d failed",
738 tm->l2->tei);
739 mISDN_FsmChangeState(fi, ST_TEI_NOP);
740 tei_l2remove(tm->l2);
741 }
742}
743
744static struct FsmNode TeiFnListNet[] =
745{
746 {ST_TEI_NOP, EV_ASSIGN_REQ, tei_assign_req},
747 {ST_TEI_NOP, EV_VERIFY, tei_id_verify_net},
748 {ST_TEI_NOP, EV_CHKREQ, tei_id_chk_req_net},
749 {ST_TEI_IDVERIFY, EV_TIMER, tei_id_ver_tout_net},
750 {ST_TEI_IDVERIFY, EV_CHKRESP, tei_id_chk_resp},
751};
752
753static void
754tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
755{
756 if (test_bit(FLG_FIXED_TEI, &tm->l2->flag))
757 return;
758 if (*debug & DEBUG_L2_TEI)
759 tm->tei_m.printdebug(&tm->tei_m, "tei handler mt %x", mt);
760 if (mt == ID_ASSIGNED)
761 mISDN_FsmEvent(&tm->tei_m, EV_ASSIGN, dp);
762 else if (mt == ID_DENIED)
763 mISDN_FsmEvent(&tm->tei_m, EV_DENIED, dp);
764 else if (mt == ID_CHK_REQ)
765 mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, dp);
766 else if (mt == ID_REMOVE)
767 mISDN_FsmEvent(&tm->tei_m, EV_REMOVE, dp);
768 else if (mt == ID_VERIFY)
769 mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, dp);
770 else if (mt == ID_CHK_RES)
771 mISDN_FsmEvent(&tm->tei_m, EV_CHKRESP, dp);
772}
773
774static struct layer2 *
775create_new_tei(struct manager *mgr, int tei)
776{
777 u_long opt = 0;
778 u_long flags;
779 int id;
780 struct layer2 *l2;
781
782 if (!mgr->up)
783 return NULL;
784 if (tei < 64)
785 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
786 if (mgr->ch.st->dev->Dprotocols
787 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
788 test_and_set_bit(OPTION_L2_PMX, &opt);
789 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, (u_int)opt, (u_long)tei);
790 if (!l2) {
791 printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
792 return NULL;
793 }
794 l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
795 if (!l2->tm) {
796 kfree(l2);
797 printk(KERN_WARNING "%s:no memory for teimgr\n", __func__);
798 return NULL;
799 }
800 l2->tm->mgr = mgr;
801 l2->tm->l2 = l2;
802 l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
803 l2->tm->tei_m.userdata = l2->tm;
804 l2->tm->tei_m.printdebug = tei_debug;
805 l2->tm->tei_m.fsm = &teifsmn;
806 l2->tm->tei_m.state = ST_TEI_NOP;
807 l2->tm->tval = 2000; /* T202 2 sec */
808 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
809 write_lock_irqsave(&mgr->lock, flags);
810 id = get_free_id(mgr);
811 list_add_tail(&l2->list, &mgr->layer2);
812 write_unlock_irqrestore(&mgr->lock, flags);
813 if (id < 0) {
814 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
815 printk(KERN_WARNING "%s:no free id\n", __func__);
816 return NULL;
817 } else {
818 l2->ch.nr = id;
819 __add_layer2(&l2->ch, mgr->ch.st);
820 l2->ch.recv = mgr->ch.recv;
821 l2->ch.peer = mgr->ch.peer;
822 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
823 }
824 return l2;
825}
826
827static void
828new_tei_req(struct manager *mgr, u_char *dp)
829{
830 int tei, ri;
831 struct layer2 *l2;
832
833 ri = dp[0] << 8;
834 ri += dp[1];
835 if (!mgr->up)
836 goto denied;
837 tei = get_free_tei(mgr);
838 if (tei < 0) {
839 printk(KERN_WARNING "%s:No free tei\n", __func__);
840 goto denied;
841 }
842 l2 = create_new_tei(mgr, tei);
843 if (!l2)
844 goto denied;
845 else
846 mISDN_FsmEvent(&l2->tm->tei_m, EV_ASSIGN_REQ, dp);
847 return;
848denied:
849 put_tei_msg(mgr, ID_DENIED, ri, GROUP_TEI);
850}
851
852static int
853ph_data_ind(struct manager *mgr, struct sk_buff *skb)
854{
855 int ret = -EINVAL;
856 struct layer2 *l2;
857 u_long flags;
858 u_char mt;
859
860 if (skb->len < 8) {
861 if (*debug & DEBUG_L2_TEI)
862 printk(KERN_DEBUG "%s: short mgr frame %d/8\n",
863 __func__, skb->len);
864 goto done;
865 }
866 if (*debug & DEBUG_L2_TEI)
867
868 if ((skb->data[0] >> 2) != TEI_SAPI) /* not for us */
869 goto done;
870 if (skb->data[0] & 1) /* EA0 formal error */
871 goto done;
872 if (!(skb->data[1] & 1)) /* EA1 formal error */
873 goto done;
874 if ((skb->data[1] >> 1) != GROUP_TEI) /* not for us */
875 goto done;
876 if ((skb->data[2] & 0xef) != UI) /* not UI */
877 goto done;
878 if (skb->data[3] != TEI_ENTITY_ID) /* not tei entity */
879 goto done;
880 mt = skb->data[6];
881 switch (mt) {
882 case ID_REQUEST:
883 case ID_CHK_RES:
884 case ID_VERIFY:
885 if (!test_bit(MGR_OPT_NETWORK, &mgr->options))
886 goto done;
887 break;
888 case ID_ASSIGNED:
889 case ID_DENIED:
890 case ID_CHK_REQ:
891 case ID_REMOVE:
892 if (test_bit(MGR_OPT_NETWORK, &mgr->options))
893 goto done;
894 break;
895 default:
896 goto done;
897 }
898 ret = 0;
899 if (mt == ID_REQUEST) {
900 new_tei_req(mgr, &skb->data[4]);
901 goto done;
902 }
903 read_lock_irqsave(&mgr->lock, flags);
904 list_for_each_entry(l2, &mgr->layer2, list) {
905 tei_ph_data_ind(l2->tm, mt, &skb->data[4], skb->len - 4);
906 }
907 read_unlock_irqrestore(&mgr->lock, flags);
908done:
909 return ret;
910}
911
912int
913l2_tei(struct layer2 *l2, u_int cmd, u_long arg)
914{
915 struct teimgr *tm = l2->tm;
916
917 if (test_bit(FLG_FIXED_TEI, &l2->flag))
918 return 0;
919 if (*debug & DEBUG_L2_TEI)
920 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
921 switch (cmd) {
922 case MDL_ASSIGN_IND:
923 mISDN_FsmEvent(&tm->tei_m, EV_IDREQ, NULL);
924 break;
925 case MDL_ERROR_IND:
926 if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
927 mISDN_FsmEvent(&tm->tei_m, EV_CHKREQ, &l2->tei);
928 if (test_bit(MGR_OPT_USER, &tm->mgr->options))
929 mISDN_FsmEvent(&tm->tei_m, EV_VERIFY, NULL);
930 break;
931 case MDL_STATUS_UP_IND:
932 if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
933 mISDN_FsmEvent(&tm->mgr->deact, EV_ACTIVATE, NULL);
934 break;
935 case MDL_STATUS_DOWN_IND:
936 if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
937 mISDN_FsmEvent(&tm->mgr->deact, EV_DEACTIVATE, NULL);
938 break;
939 case MDL_STATUS_UI_IND:
940 if (test_bit(MGR_OPT_NETWORK, &tm->mgr->options))
941 mISDN_FsmEvent(&tm->mgr->deact, EV_UI, NULL);
942 break;
943 }
944 return 0;
945}
946
947void
948TEIrelease(struct layer2 *l2)
949{
950 struct teimgr *tm = l2->tm;
951 u_long flags;
952
953 mISDN_FsmDelTimer(&tm->timer, 1);
954 write_lock_irqsave(&tm->mgr->lock, flags);
955 list_del(&l2->list);
956 write_unlock_irqrestore(&tm->mgr->lock, flags);
957 l2->tm = NULL;
958 kfree(tm);
959}
960
961static int
962create_teimgr(struct manager *mgr, struct channel_req *crq)
963{
964 struct layer2 *l2;
965 u_long opt = 0;
966 u_long flags;
967 int id;
968
969 if (*debug & DEBUG_L2_TEI)
970 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
971 __func__, mgr->ch.st->dev->name, crq->protocol,
972 crq->adr.dev, crq->adr.channel, crq->adr.sapi,
973 crq->adr.tei);
974 if (crq->adr.sapi != 0) /* not supported yet */
975 return -EINVAL;
976 if (crq->adr.tei > GROUP_TEI)
977 return -EINVAL;
978 if (crq->adr.tei < 64)
979 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
980 if (crq->adr.tei == 0)
981 test_and_set_bit(OPTION_L2_PTP, &opt);
982 if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
983 if (crq->protocol == ISDN_P_LAPD_TE)
984 return -EPROTONOSUPPORT;
985 if ((crq->adr.tei != 0) && (crq->adr.tei != 127))
986 return -EINVAL;
987 if (mgr->up) {
988 printk(KERN_WARNING
989 "%s: only one network manager is allowed\n",
990 __func__);
991 return -EBUSY;
992 }
993 } else if (test_bit(MGR_OPT_USER, &mgr->options)) {
994 if (crq->protocol == ISDN_P_LAPD_NT)
995 return -EPROTONOSUPPORT;
996 if ((crq->adr.tei >= 64) && (crq->adr.tei < GROUP_TEI))
997 return -EINVAL; /* dyn tei */
998 } else {
999 if (crq->protocol == ISDN_P_LAPD_NT)
1000 test_and_set_bit(MGR_OPT_NETWORK, &mgr->options);
1001 if (crq->protocol == ISDN_P_LAPD_TE)
1002 test_and_set_bit(MGR_OPT_USER, &mgr->options);
1003 }
1004 if (mgr->ch.st->dev->Dprotocols
1005 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
1006 test_and_set_bit(OPTION_L2_PMX, &opt);
1007 if ((crq->protocol == ISDN_P_LAPD_NT) && (crq->adr.tei == 127)) {
1008 mgr->up = crq->ch;
1009 id = DL_INFO_L2_CONNECT;
1010 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
1011 crq->ch = NULL;
1012 if (!list_empty(&mgr->layer2)) {
1013 read_lock_irqsave(&mgr->lock, flags);
1014 list_for_each_entry(l2, &mgr->layer2, list) {
1015 l2->up = mgr->up;
1016 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
1017 }
1018 read_unlock_irqrestore(&mgr->lock, flags);
1019 }
1020 return 0;
1021 }
1022 l2 = create_l2(crq->ch, crq->protocol, (u_int)opt,
1023 (u_long)crq->adr.tei);
1024 if (!l2)
1025 return -ENOMEM;
1026 l2->tm = kzalloc(sizeof(struct teimgr), GFP_KERNEL);
1027 if (!l2->tm) {
1028 kfree(l2);
1029 printk(KERN_ERR "kmalloc teimgr failed\n");
1030 return -ENOMEM;
1031 }
1032 l2->tm->mgr = mgr;
1033 l2->tm->l2 = l2;
1034 l2->tm->tei_m.debug = *debug & DEBUG_L2_TEIFSM;
1035 l2->tm->tei_m.userdata = l2->tm;
1036 l2->tm->tei_m.printdebug = tei_debug;
1037 if (crq->protocol == ISDN_P_LAPD_TE) {
1038 l2->tm->tei_m.fsm = &teifsmu;
1039 l2->tm->tei_m.state = ST_TEI_NOP;
1040 l2->tm->tval = 1000; /* T201 1 sec */
1041 } else {
1042 l2->tm->tei_m.fsm = &teifsmn;
1043 l2->tm->tei_m.state = ST_TEI_NOP;
1044 l2->tm->tval = 2000; /* T202 2 sec */
1045 }
1046 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
1047 write_lock_irqsave(&mgr->lock, flags);
1048 id = get_free_id(mgr);
1049 list_add_tail(&l2->list, &mgr->layer2);
1050 write_unlock_irqrestore(&mgr->lock, flags);
1051 if (id < 0) {
1052 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1053 } else {
1054 l2->ch.nr = id;
1055 l2->up->nr = id;
1056 crq->ch = &l2->ch;
1057 id = 0;
1058 }
1059 return id;
1060}
1061
1062static int
1063mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
1064{
1065 struct manager *mgr;
1066 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1067 int ret = -EINVAL;
1068
1069 mgr = container_of(ch, struct manager, ch);
1070 if (*debug & DEBUG_L2_RECV)
1071 printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
1072 __func__, hh->prim, hh->id);
1073 switch (hh->prim) {
1074 case PH_DATA_IND:
1075 mISDN_FsmEvent(&mgr->deact, EV_UI, NULL);
1076 ret = ph_data_ind(mgr, skb);
1077 break;
1078 case PH_DATA_CNF:
1079 do_ack(mgr, hh->id);
1080 ret = 0;
1081 break;
1082 case PH_ACTIVATE_IND:
1083 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
1084 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
1085 do_send(mgr);
1086 ret = 0;
1087 break;
1088 case PH_DEACTIVATE_IND:
1089 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
1090 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
1091 ret = 0;
1092 break;
1093 case DL_UNITDATA_REQ:
1094 return dl_unit_data(mgr, skb);
1095 }
1096 if (!ret)
1097 dev_kfree_skb(skb);
1098 return ret;
1099}
1100
1101static int
1102free_teimanager(struct manager *mgr)
1103{
1104 struct layer2 *l2, *nl2;
1105
1106 if (test_bit(MGR_OPT_NETWORK, &mgr->options)) {
1107 /* not locked lock is taken in release tei */
1108 mgr->up = NULL;
1109 if (test_bit(OPTION_L2_CLEANUP, &mgr->options)) {
1110 list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
1111 put_tei_msg(mgr, ID_REMOVE, 0, l2->tei);
1112 mutex_lock(&mgr->ch.st->lmutex);
1113 list_del(&l2->ch.list);
1114 mutex_unlock(&mgr->ch.st->lmutex);
1115 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1116 }
1117 test_and_clear_bit(MGR_OPT_NETWORK, &mgr->options);
1118 } else {
1119 list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
1120 l2->up = NULL;
1121 }
1122 }
1123 }
1124 if (test_bit(MGR_OPT_USER, &mgr->options)) {
1125 if (list_empty(&mgr->layer2))
1126 test_and_clear_bit(MGR_OPT_USER, &mgr->options);
1127 }
1128 mgr->ch.st->dev->D.ctrl(&mgr->ch.st->dev->D, CLOSE_CHANNEL, NULL);
1129 return 0;
1130}
1131
1132static int
1133ctrl_teimanager(struct manager *mgr, void *arg)
1134{
1135 /* currently we only have one option */
1136 int clean = *((int *)arg);
1137
1138 if (clean)
1139 test_and_set_bit(OPTION_L2_CLEANUP, &mgr->options);
1140 else
1141 test_and_clear_bit(OPTION_L2_CLEANUP, &mgr->options);
1142 return 0;
1143}
1144
1145/* This function does create a L2 for fixed TEI in NT Mode */
1146static int
1147check_data(struct manager *mgr, struct sk_buff *skb)
1148{
1149 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1150 int ret, tei;
1151 struct layer2 *l2;
1152
1153 if (*debug & DEBUG_L2_CTRL)
1154 printk(KERN_DEBUG "%s: prim(%x) id(%x)\n",
1155 __func__, hh->prim, hh->id);
1156 if (test_bit(MGR_OPT_USER, &mgr->options))
1157 return -ENOTCONN;
1158 if (hh->prim != PH_DATA_IND)
1159 return -ENOTCONN;
1160 if (skb->len != 3)
1161 return -ENOTCONN;
1162 if (skb->data[0] != 0)
1163 /* only SAPI 0 command */
1164 return -ENOTCONN;
1165 if (!(skb->data[1] & 1)) /* invalid EA1 */
1166 return -EINVAL;
1167 tei = skb->data[1] >> 0;
1168 if (tei > 63) /* not a fixed tei */
1169 return -ENOTCONN;
1170 if ((skb->data[2] & ~0x10) != SABME)
1171 return -ENOTCONN;
1172 /* We got a SABME for a fixed TEI */
1173 l2 = create_new_tei(mgr, tei);
1174 if (!l2)
1175 return -ENOMEM;
1176 ret = l2->ch.send(&l2->ch, skb);
1177 return ret;
1178}
1179
1180void
1181delete_teimanager(struct mISDNchannel *ch)
1182{
1183 struct manager *mgr;
1184 struct layer2 *l2, *nl2;
1185
1186 mgr = container_of(ch, struct manager, ch);
1187 /* not locked lock is taken in release tei */
1188 list_for_each_entry_safe(l2, nl2, &mgr->layer2, list) {
1189 mutex_lock(&mgr->ch.st->lmutex);
1190 list_del(&l2->ch.list);
1191 mutex_unlock(&mgr->ch.st->lmutex);
1192 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1193 }
1194 list_del(&mgr->ch.list);
1195 list_del(&mgr->bcast.list);
1196 skb_queue_purge(&mgr->sendq);
1197 kfree(mgr);
1198}
1199
1200static int
1201mgr_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1202{
1203 struct manager *mgr;
1204 int ret = -EINVAL;
1205
1206 mgr = container_of(ch, struct manager, ch);
1207 if (*debug & DEBUG_L2_CTRL)
1208 printk(KERN_DEBUG "%s(%x, %p)\n", __func__, cmd, arg);
1209 switch (cmd) {
1210 case OPEN_CHANNEL:
1211 ret = create_teimgr(mgr, arg);
1212 break;
1213 case CLOSE_CHANNEL:
1214 ret = free_teimanager(mgr);
1215 break;
1216 case CONTROL_CHANNEL:
1217 ret = ctrl_teimanager(mgr, arg);
1218 break;
1219 case CHECK_DATA:
1220 ret = check_data(mgr, arg);
1221 break;
1222 }
1223 return ret;
1224}
1225
1226static int
1227mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1228{
1229 struct manager *mgr = container_of(ch, struct manager, bcast);
1230 struct mISDNhead *hh = mISDN_HEAD_P(skb);
1231 struct sk_buff *cskb = NULL;
1232 struct layer2 *l2;
1233 u_long flags;
1234 int ret;
1235
1236 read_lock_irqsave(&mgr->lock, flags);
1237 list_for_each_entry(l2, &mgr->layer2, list) {
1238 if ((hh->id & MISDN_ID_SAPI_MASK) ==
1239 (l2->ch.addr & MISDN_ID_SAPI_MASK)) {
1240 if (list_is_last(&l2->list, &mgr->layer2)) {
1241 cskb = skb;
1242 skb = NULL;
1243 } else {
1244 if (!cskb)
1245 cskb = skb_copy(skb, GFP_KERNEL);
1246 }
1247 if (cskb) {
1248 ret = l2->ch.send(&l2->ch, cskb);
1249 if (ret) {
1250 if (*debug & DEBUG_SEND_ERR)
1251 printk(KERN_DEBUG
1252 "%s ch%d prim(%x) addr(%x)"
1253 " err %d\n",
1254 __func__, l2->ch.nr,
1255 hh->prim, l2->ch.addr, ret);
1256 } else
1257 cskb = NULL;
1258 } else {
1259 printk(KERN_WARNING "%s ch%d addr %x no mem\n",
1260 __func__, ch->nr, ch->addr);
1261 goto out;
1262 }
1263 }
1264 }
1265out:
1266 read_unlock_irqrestore(&mgr->lock, flags);
1267 if (cskb)
1268 dev_kfree_skb(cskb);
1269 if (skb)
1270 dev_kfree_skb(skb);
1271 return 0;
1272}
1273
1274static int
1275mgr_bcast_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
1276{
1277
1278 return -EINVAL;
1279}
1280
1281int
1282create_teimanager(struct mISDNdevice *dev)
1283{
1284 struct manager *mgr;
1285
1286 mgr = kzalloc(sizeof(struct manager), GFP_KERNEL);
1287 if (!mgr)
1288 return -ENOMEM;
1289 INIT_LIST_HEAD(&mgr->layer2);
1290 mgr->lock = __RW_LOCK_UNLOCKED(mgr->lock);
1291 skb_queue_head_init(&mgr->sendq);
1292 mgr->nextid = 1;
1293 mgr->lastid = MISDN_ID_NONE;
1294 mgr->ch.send = mgr_send;
1295 mgr->ch.ctrl = mgr_ctrl;
1296 mgr->ch.st = dev->D.st;
1297 set_channel_address(&mgr->ch, TEI_SAPI, GROUP_TEI);
1298 add_layer2(&mgr->ch, dev->D.st);
1299 mgr->bcast.send = mgr_bcast;
1300 mgr->bcast.ctrl = mgr_bcast_ctrl;
1301 mgr->bcast.st = dev->D.st;
1302 set_channel_address(&mgr->bcast, 0, GROUP_TEI);
1303 add_layer2(&mgr->bcast, dev->D.st);
1304 mgr->deact.debug = *debug & DEBUG_MANAGER;
1305 mgr->deact.userdata = mgr;
1306 mgr->deact.printdebug = da_debug;
1307 mgr->deact.fsm = &deactfsm;
1308 mgr->deact.state = ST_L1_DEACT;
1309 mISDN_FsmInitTimer(&mgr->deact, &mgr->datimer);
1310 dev->teimgr = &mgr->ch;
1311 return 0;
1312}
1313
1314int TEIInit(u_int *deb)
1315{
1316 debug = deb;
1317 teifsmu.state_count = TEI_STATE_COUNT;
1318 teifsmu.event_count = TEI_EVENT_COUNT;
1319 teifsmu.strEvent = strTeiEvent;
1320 teifsmu.strState = strTeiState;
1321 mISDN_FsmNew(&teifsmu, TeiFnListUser, ARRAY_SIZE(TeiFnListUser));
1322 teifsmn.state_count = TEI_STATE_COUNT;
1323 teifsmn.event_count = TEI_EVENT_COUNT;
1324 teifsmn.strEvent = strTeiEvent;
1325 teifsmn.strState = strTeiState;
1326 mISDN_FsmNew(&teifsmn, TeiFnListNet, ARRAY_SIZE(TeiFnListNet));
1327 deactfsm.state_count = DEACT_STATE_COUNT;
1328 deactfsm.event_count = DEACT_EVENT_COUNT;
1329 deactfsm.strEvent = strDeactEvent;
1330 deactfsm.strState = strDeactState;
1331 mISDN_FsmNew(&deactfsm, DeactFnList, ARRAY_SIZE(DeactFnList));
1332 return 0;
1333}
1334
1335void TEIFree(void)
1336{
1337 mISDN_FsmFree(&teifsmu);
1338 mISDN_FsmFree(&teifsmn);
1339 mISDN_FsmFree(&deactfsm);
1340}
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
new file mode 100644
index 000000000000..b5fabc7019d8
--- /dev/null
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -0,0 +1,301 @@
1/*
2 *
3 * general timer device for using in ISDN stacks
4 *
5 * Author Karsten Keil <kkeil@novell.com>
6 *
7 * Copyright 2008 by Karsten Keil <kkeil@novell.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/poll.h>
21#include <linux/vmalloc.h>
22#include <linux/timer.h>
23#include <linux/miscdevice.h>
24#include <linux/module.h>
25#include <linux/mISDNif.h>
26
27static int *debug;
28
29
30struct mISDNtimerdev {
31 int next_id;
32 struct list_head pending;
33 struct list_head expired;
34 wait_queue_head_t wait;
35 u_int work;
36 spinlock_t lock; /* protect lists */
37};
38
39struct mISDNtimer {
40 struct list_head list;
41 struct mISDNtimerdev *dev;
42 struct timer_list tl;
43 int id;
44};
45
46static int
47mISDN_open(struct inode *ino, struct file *filep)
48{
49 struct mISDNtimerdev *dev;
50
51 if (*debug & DEBUG_TIMER)
52 printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
53 dev = kmalloc(sizeof(struct mISDNtimerdev) , GFP_KERNEL);
54 if (!dev)
55 return -ENOMEM;
56 dev->next_id = 1;
57 INIT_LIST_HEAD(&dev->pending);
58 INIT_LIST_HEAD(&dev->expired);
59 spin_lock_init(&dev->lock);
60 dev->work = 0;
61 init_waitqueue_head(&dev->wait);
62 filep->private_data = dev;
63 __module_get(THIS_MODULE);
64 return 0;
65}
66
67static int
68mISDN_close(struct inode *ino, struct file *filep)
69{
70 struct mISDNtimerdev *dev = filep->private_data;
71 struct mISDNtimer *timer, *next;
72
73 if (*debug & DEBUG_TIMER)
74 printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep);
75 list_for_each_entry_safe(timer, next, &dev->pending, list) {
76 del_timer(&timer->tl);
77 kfree(timer);
78 }
79 list_for_each_entry_safe(timer, next, &dev->expired, list) {
80 kfree(timer);
81 }
82 kfree(dev);
83 module_put(THIS_MODULE);
84 return 0;
85}
86
87static ssize_t
88mISDN_read(struct file *filep, char *buf, size_t count, loff_t *off)
89{
90 struct mISDNtimerdev *dev = filep->private_data;
91 struct mISDNtimer *timer;
92 u_long flags;
93 int ret = 0;
94
95 if (*debug & DEBUG_TIMER)
96 printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__,
97 filep, buf, (int)count, off);
98 if (*off != filep->f_pos)
99 return -ESPIPE;
100
101 if (list_empty(&dev->expired) && (dev->work == 0)) {
102 if (filep->f_flags & O_NONBLOCK)
103 return -EAGAIN;
104 wait_event_interruptible(dev->wait, (dev->work ||
105 !list_empty(&dev->expired)));
106 if (signal_pending(current))
107 return -ERESTARTSYS;
108 }
109 if (count < sizeof(int))
110 return -ENOSPC;
111 if (dev->work)
112 dev->work = 0;
113 if (!list_empty(&dev->expired)) {
114 spin_lock_irqsave(&dev->lock, flags);
115 timer = (struct mISDNtimer *)dev->expired.next;
116 list_del(&timer->list);
117 spin_unlock_irqrestore(&dev->lock, flags);
118 if (put_user(timer->id, (int *)buf))
119 ret = -EFAULT;
120 else
121 ret = sizeof(int);
122 kfree(timer);
123 }
124 return ret;
125}
126
127static loff_t
128mISDN_llseek(struct file *filep, loff_t offset, int orig)
129{
130 return -ESPIPE;
131}
132
133static ssize_t
134mISDN_write(struct file *filep, const char *buf, size_t count, loff_t *off)
135{
136 return -EOPNOTSUPP;
137}
138
139static unsigned int
140mISDN_poll(struct file *filep, poll_table *wait)
141{
142 struct mISDNtimerdev *dev = filep->private_data;
143 unsigned int mask = POLLERR;
144
145 if (*debug & DEBUG_TIMER)
146 printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait);
147 if (dev) {
148 poll_wait(filep, &dev->wait, wait);
149 mask = 0;
150 if (dev->work || !list_empty(&dev->expired))
151 mask |= (POLLIN | POLLRDNORM);
152 if (*debug & DEBUG_TIMER)
153 printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__,
154 dev->work, list_empty(&dev->expired));
155 }
156 return mask;
157}
158
159static void
160dev_expire_timer(struct mISDNtimer *timer)
161{
162 u_long flags;
163
164 spin_lock_irqsave(&timer->dev->lock, flags);
165 list_del(&timer->list);
166 list_add_tail(&timer->list, &timer->dev->expired);
167 spin_unlock_irqrestore(&timer->dev->lock, flags);
168 wake_up_interruptible(&timer->dev->wait);
169}
170
171static int
172misdn_add_timer(struct mISDNtimerdev *dev, int timeout)
173{
174 int id;
175 u_long flags;
176 struct mISDNtimer *timer;
177
178 if (!timeout) {
179 dev->work = 1;
180 wake_up_interruptible(&dev->wait);
181 id = 0;
182 } else {
183 timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL);
184 if (!timer)
185 return -ENOMEM;
186 spin_lock_irqsave(&dev->lock, flags);
187 timer->id = dev->next_id++;
188 if (dev->next_id < 0)
189 dev->next_id = 1;
190 list_add_tail(&timer->list, &dev->pending);
191 spin_unlock_irqrestore(&dev->lock, flags);
192 timer->dev = dev;
193 timer->tl.data = (long)timer;
194 timer->tl.function = (void *) dev_expire_timer;
195 init_timer(&timer->tl);
196 timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000);
197 add_timer(&timer->tl);
198 id = timer->id;
199 }
200 return id;
201}
202
203static int
204misdn_del_timer(struct mISDNtimerdev *dev, int id)
205{
206 u_long flags;
207 struct mISDNtimer *timer;
208 int ret = 0;
209
210 spin_lock_irqsave(&dev->lock, flags);
211 list_for_each_entry(timer, &dev->pending, list) {
212 if (timer->id == id) {
213 list_del_init(&timer->list);
214 del_timer(&timer->tl);
215 ret = timer->id;
216 kfree(timer);
217 goto unlock;
218 }
219 }
220unlock:
221 spin_unlock_irqrestore(&dev->lock, flags);
222 return ret;
223}
224
225static int
226mISDN_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
227 unsigned long arg)
228{
229 struct mISDNtimerdev *dev = filep->private_data;
230 int id, tout, ret = 0;
231
232
233 if (*debug & DEBUG_TIMER)
234 printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__,
235 filep, cmd, arg);
236 switch (cmd) {
237 case IMADDTIMER:
238 if (get_user(tout, (int __user *)arg)) {
239 ret = -EFAULT;
240 break;
241 }
242 id = misdn_add_timer(dev, tout);
243 if (*debug & DEBUG_TIMER)
244 printk(KERN_DEBUG "%s add %d id %d\n", __func__,
245 tout, id);
246 if (id < 0) {
247 ret = id;
248 break;
249 }
250 if (put_user(id, (int __user *)arg))
251 ret = -EFAULT;
252 break;
253 case IMDELTIMER:
254 if (get_user(id, (int __user *)arg)) {
255 ret = -EFAULT;
256 break;
257 }
258 if (*debug & DEBUG_TIMER)
259 printk(KERN_DEBUG "%s del id %d\n", __func__, id);
260 id = misdn_del_timer(dev, id);
261 if (put_user(id, (int __user *)arg))
262 ret = -EFAULT;
263 break;
264 default:
265 ret = -EINVAL;
266 }
267 return ret;
268}
269
270static struct file_operations mISDN_fops = {
271 .llseek = mISDN_llseek,
272 .read = mISDN_read,
273 .write = mISDN_write,
274 .poll = mISDN_poll,
275 .ioctl = mISDN_ioctl,
276 .open = mISDN_open,
277 .release = mISDN_close,
278};
279
280static struct miscdevice mISDNtimer = {
281 .minor = MISC_DYNAMIC_MINOR,
282 .name = "mISDNtimer",
283 .fops = &mISDN_fops,
284};
285
286int
287mISDN_inittimer(int *deb)
288{
289 int err;
290
291 debug = deb;
292 err = misc_register(&mISDNtimer);
293 if (err)
294 printk(KERN_WARNING "mISDN: Could not register timer device\n");
295 return err;
296}
297
298void mISDN_timer_cleanup(void)
299{
300 misc_deregister(&mISDNtimer);
301}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index fea966d66f98..71dd65aa31b6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -147,9 +147,12 @@ static struct priority_group *alloc_priority_group(void)
147static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 147static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
148{ 148{
149 struct pgpath *pgpath, *tmp; 149 struct pgpath *pgpath, *tmp;
150 struct multipath *m = ti->private;
150 151
151 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 152 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
152 list_del(&pgpath->list); 153 list_del(&pgpath->list);
154 if (m->hw_handler_name)
155 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
153 dm_put_device(ti, pgpath->path.dev); 156 dm_put_device(ti, pgpath->path.dev);
154 free_pgpath(pgpath); 157 free_pgpath(pgpath);
155 } 158 }
@@ -548,6 +551,7 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
548{ 551{
549 int r; 552 int r;
550 struct pgpath *p; 553 struct pgpath *p;
554 struct multipath *m = ti->private;
551 555
552 /* we need at least a path arg */ 556 /* we need at least a path arg */
553 if (as->argc < 1) { 557 if (as->argc < 1) {
@@ -566,6 +570,15 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
566 goto bad; 570 goto bad;
567 } 571 }
568 572
573 if (m->hw_handler_name) {
574 r = scsi_dh_attach(bdev_get_queue(p->path.dev->bdev),
575 m->hw_handler_name);
576 if (r < 0) {
577 dm_put_device(ti, p->path.dev);
578 goto bad;
579 }
580 }
581
569 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 582 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
570 if (r) { 583 if (r) {
571 dm_put_device(ti, p->path.dev); 584 dm_put_device(ti, p->path.dev);
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 34402c47027e..d6a0074b9dc3 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -273,12 +273,12 @@ mpt_fault_reset_work(struct work_struct *work)
273 ioc_raw_state = mpt_GetIocState(ioc, 0); 273 ioc_raw_state = mpt_GetIocState(ioc, 0);
274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 274 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", 275 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
276 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 276 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", 277 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
278 ioc->name, __FUNCTION__); 278 ioc->name, __func__);
279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP); 279 rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name, 280 printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
281 __FUNCTION__, (rc == 0) ? "success" : "failed"); 281 __func__, (rc == 0) ? "success" : "failed");
282 ioc_raw_state = mpt_GetIocState(ioc, 0); 282 ioc_raw_state = mpt_GetIocState(ioc, 0);
283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) 283 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " 284 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
@@ -356,7 +356,7 @@ mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
356 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 356 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
357 MptCallbacks[cb_idx] == NULL) { 357 MptCallbacks[cb_idx] == NULL) {
358 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 358 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
359 __FUNCTION__, ioc->name, cb_idx); 359 __func__, ioc->name, cb_idx);
360 goto out; 360 goto out;
361 } 361 }
362 362
@@ -420,7 +420,7 @@ mpt_reply(MPT_ADAPTER *ioc, u32 pa)
420 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS || 420 if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
421 MptCallbacks[cb_idx] == NULL) { 421 MptCallbacks[cb_idx] == NULL) {
422 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n", 422 printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
423 __FUNCTION__, ioc->name, cb_idx); 423 __func__, ioc->name, cb_idx);
424 freeme = 0; 424 freeme = 0;
425 goto out; 425 goto out;
426 } 426 }
@@ -2434,7 +2434,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
2434 2434
2435 if (ioc->cached_fw != NULL) { 2435 if (ioc->cached_fw != NULL) {
2436 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " 2436 ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
2437 "adapter\n", __FUNCTION__, ioc->name)); 2437 "adapter\n", __func__, ioc->name));
2438 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) 2438 if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
2439 ioc->cached_fw, CAN_SLEEP)) < 0) { 2439 ioc->cached_fw, CAN_SLEEP)) < 0) {
2440 printk(MYIOC_s_WARN_FMT 2440 printk(MYIOC_s_WARN_FMT
@@ -3693,7 +3693,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
3693 3693
3694 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { 3694 if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
3695 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " 3695 drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
3696 "address=%p\n", ioc->name, __FUNCTION__, 3696 "address=%p\n", ioc->name, __func__,
3697 &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); 3697 &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
3698 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07); 3698 CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
3699 if (sleepFlag == CAN_SLEEP) 3699 if (sleepFlag == CAN_SLEEP)
@@ -4742,12 +4742,12 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4742 break; 4742 break;
4743 } 4743 }
4744 4744
4745 printk("%s: persist_opcode=%x\n",__FUNCTION__, persist_opcode); 4745 printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
4746 4746
4747 /* Get a MF for this command. 4747 /* Get a MF for this command.
4748 */ 4748 */
4749 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 4749 if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
4750 printk("%s: no msg frames!\n",__FUNCTION__); 4750 printk("%s: no msg frames!\n",__func__);
4751 return -1; 4751 return -1;
4752 } 4752 }
4753 4753
@@ -4771,13 +4771,13 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
4771 (SasIoUnitControlReply_t *)ioc->persist_reply_frame; 4771 (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
4772 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { 4772 if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
4773 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 4773 printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
4774 __FUNCTION__, 4774 __func__,
4775 sasIoUnitCntrReply->IOCStatus, 4775 sasIoUnitCntrReply->IOCStatus,
4776 sasIoUnitCntrReply->IOCLogInfo); 4776 sasIoUnitCntrReply->IOCLogInfo);
4777 return -1; 4777 return -1;
4778 } 4778 }
4779 4779
4780 printk("%s: success\n",__FUNCTION__); 4780 printk("%s: success\n",__func__);
4781 return 0; 4781 return 0;
4782} 4782}
4783 4783
@@ -5784,7 +5784,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
5784 5784
5785 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { 5785 if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
5786 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 5786 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
5787 ioc->name,__FUNCTION__)); 5787 ioc->name,__func__));
5788 return -1; 5788 return -1;
5789 } 5789 }
5790 5790
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a5920423e2b2..f5233f3d9eff 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -505,7 +505,7 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
505 event = le32_to_cpu(pEvReply->Event) & 0xFF; 505 event = le32_to_cpu(pEvReply->Event) & 0xFF;
506 506
507 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n", 507 dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
508 ioc->name, __FUNCTION__)); 508 ioc->name, __func__));
509 if(async_queue == NULL) 509 if(async_queue == NULL)
510 return 1; 510 return 1;
511 511
@@ -2482,7 +2482,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
2482 */ 2482 */
2483 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { 2483 if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
2484 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", 2484 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
2485 ioc->name,__FUNCTION__)); 2485 ioc->name,__func__));
2486 goto out; 2486 goto out;
2487 } 2487 }
2488 2488
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index b36cae9ec6db..c3c24fdf9fb6 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -231,28 +231,28 @@ static int
231mptfc_abort(struct scsi_cmnd *SCpnt) 231mptfc_abort(struct scsi_cmnd *SCpnt)
232{ 232{
233 return 233 return
234 mptfc_block_error_handler(SCpnt, mptscsih_abort, __FUNCTION__); 234 mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
235} 235}
236 236
237static int 237static int
238mptfc_dev_reset(struct scsi_cmnd *SCpnt) 238mptfc_dev_reset(struct scsi_cmnd *SCpnt)
239{ 239{
240 return 240 return
241 mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __FUNCTION__); 241 mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
242} 242}
243 243
244static int 244static int
245mptfc_bus_reset(struct scsi_cmnd *SCpnt) 245mptfc_bus_reset(struct scsi_cmnd *SCpnt)
246{ 246{
247 return 247 return
248 mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __FUNCTION__); 248 mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
249} 249}
250 250
251static int 251static int
252mptfc_host_reset(struct scsi_cmnd *SCpnt) 252mptfc_host_reset(struct scsi_cmnd *SCpnt)
253{ 253{
254 return 254 return
255 mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __FUNCTION__); 255 mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
256} 256}
257 257
258static void 258static void
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index d709d92b7b30..a1abf95cf751 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -610,7 +610,7 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
610 610
611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 611 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
612 IOC_AND_NETDEV_NAMES_s_s(dev), 612 IOC_AND_NETDEV_NAMES_s_s(dev),
613 __FUNCTION__, sent)); 613 __func__, sent));
614 614
615 priv->SendCtl[ctx].skb = NULL; 615 priv->SendCtl[ctx].skb = NULL;
616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 616 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -676,7 +676,7 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
676 676
677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 677 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
678 IOC_AND_NETDEV_NAMES_s_s(dev), 678 IOC_AND_NETDEV_NAMES_s_s(dev),
679 __FUNCTION__, sent)); 679 __func__, sent));
680 680
681 priv->SendCtl[ctx].skb = NULL; 681 priv->SendCtl[ctx].skb = NULL;
682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 682 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
@@ -715,7 +715,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
715 u16 cur_naa = 0x1000; 715 u16 cur_naa = 0x1000;
716 716
717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", 717 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
718 __FUNCTION__, skb)); 718 __func__, skb));
719 719
720 spin_lock_irqsave(&priv->txfidx_lock, flags); 720 spin_lock_irqsave(&priv->txfidx_lock, flags);
721 if (priv->mpt_txfidx_tail < 0) { 721 if (priv->mpt_txfidx_tail < 0) {
@@ -723,7 +723,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
723 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 723 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
724 724
725 printk (KERN_ERR "%s: no tx context available: %u\n", 725 printk (KERN_ERR "%s: no tx context available: %u\n",
726 __FUNCTION__, priv->mpt_txfidx_tail); 726 __func__, priv->mpt_txfidx_tail);
727 return 1; 727 return 1;
728 } 728 }
729 729
@@ -733,7 +733,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
733 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 733 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
734 734
735 printk (KERN_ERR "%s: Unable to alloc request frame\n", 735 printk (KERN_ERR "%s: Unable to alloc request frame\n",
736 __FUNCTION__); 736 __func__);
737 return 1; 737 return 1;
738 } 738 }
739 739
@@ -1208,7 +1208,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1208 1208
1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", 1209 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1210 IOC_AND_NETDEV_NAMES_s_s(dev), 1210 IOC_AND_NETDEV_NAMES_s_s(dev),
1211 __FUNCTION__, buckets, curr)); 1211 __func__, buckets, curr));
1212 1212
1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / 1213 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); 1214 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
@@ -1217,9 +1217,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev); 1217 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1218 if (mf == NULL) { 1218 if (mf == NULL) {
1219 printk (KERN_ERR "%s: Unable to alloc request frame\n", 1219 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1220 __FUNCTION__); 1220 __func__);
1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n", 1221 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1222 __FUNCTION__, buckets)); 1222 __func__, buckets));
1223 goto out; 1223 goto out;
1224 } 1224 }
1225 pRecvReq = (LANReceivePostRequest_t *) mf; 1225 pRecvReq = (LANReceivePostRequest_t *) mf;
@@ -1244,7 +1244,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1244 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1244 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1245 if (priv->mpt_rxfidx_tail < 0) { 1245 if (priv->mpt_rxfidx_tail < 0) {
1246 printk (KERN_ERR "%s: Can't alloc context\n", 1246 printk (KERN_ERR "%s: Can't alloc context\n",
1247 __FUNCTION__); 1247 __func__);
1248 spin_unlock_irqrestore(&priv->rxfidx_lock, 1248 spin_unlock_irqrestore(&priv->rxfidx_lock,
1249 flags); 1249 flags);
1250 break; 1250 break;
@@ -1267,7 +1267,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1267 if (skb == NULL) { 1267 if (skb == NULL) {
1268 printk (KERN_WARNING 1268 printk (KERN_WARNING
1269 MYNAM "/%s: Can't alloc skb\n", 1269 MYNAM "/%s: Can't alloc skb\n",
1270 __FUNCTION__); 1270 __func__);
1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1271 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1272 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1273 break; 1273 break;
@@ -1305,7 +1305,7 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1305 1305
1306 if (pSimple == NULL) { 1306 if (pSimple == NULL) {
1307/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", 1307/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1308/**/ __FUNCTION__); 1308/**/ __func__);
1309 mpt_free_msg_frame(mpt_dev, mf); 1309 mpt_free_msg_frame(mpt_dev, mf);
1310 goto out; 1310 goto out;
1311 } 1311 }
@@ -1329,9 +1329,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1329 1329
1330out: 1330out:
1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", 1331 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1332 __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); 1332 __func__, buckets, atomic_read(&priv->buckets_out)));
1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", 1333 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1334 __FUNCTION__, priv->total_posted, priv->total_received)); 1334 __func__, priv->total_posted, priv->total_received));
1335 1335
1336 clear_bit(0, &priv->post_buckets_active); 1336 clear_bit(0, &priv->post_buckets_active);
1337} 1337}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b1147aa7afde..12b732512e57 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -300,7 +300,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
300 phy_info = port_info->phy_info; 300 phy_info = port_info->phy_info;
301 301
302 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d " 302 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
303 "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, port_details, 303 "bitmask=0x%016llX\n", ioc->name, __func__, port_details,
304 port_details->num_phys, (unsigned long long) 304 port_details->num_phys, (unsigned long long)
305 port_details->phy_bitmask)); 305 port_details->phy_bitmask));
306 306
@@ -411,7 +411,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
411 */ 411 */
412 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 412 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
413 "%s: [%p]: deleting phy = %d\n", 413 "%s: [%p]: deleting phy = %d\n",
414 ioc->name, __FUNCTION__, port_details, i)); 414 ioc->name, __func__, port_details, i));
415 port_details->num_phys--; 415 port_details->num_phys--;
416 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id); 416 port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
417 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); 417 memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
@@ -497,7 +497,7 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
497 continue; 497 continue;
498 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT 498 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
499 "%s: [%p]: phy_id=%02d num_phys=%02d " 499 "%s: [%p]: phy_id=%02d num_phys=%02d "
500 "bitmask=0x%016llX\n", ioc->name, __FUNCTION__, 500 "bitmask=0x%016llX\n", ioc->name, __func__,
501 port_details, i, port_details->num_phys, 501 port_details, i, port_details->num_phys,
502 (unsigned long long)port_details->phy_bitmask)); 502 (unsigned long long)port_details->phy_bitmask));
503 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n", 503 dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
@@ -553,7 +553,7 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
553 553
554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { 554 if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n", 555 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
556 ioc->name,__FUNCTION__, __LINE__)); 556 ioc->name,__func__, __LINE__));
557 return 0; 557 return 0;
558 } 558 }
559 559
@@ -606,7 +606,7 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
606 GFP_ATOMIC); 606 GFP_ATOMIC);
607 if (!target_reset_list) { 607 if (!target_reset_list) {
608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 608 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
609 ioc->name,__FUNCTION__, __LINE__)); 609 ioc->name,__func__, __LINE__));
610 return; 610 return;
611 } 611 }
612 612
@@ -673,7 +673,7 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 673 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
674 if (!ev) { 674 if (!ev) {
675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", 675 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
676 ioc->name,__FUNCTION__, __LINE__)); 676 ioc->name,__func__, __LINE__));
677 return; 677 return;
678 } 678 }
679 679
@@ -1183,7 +1183,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
1183 reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply; 1183 reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
1184 if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) { 1184 if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
1185 printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", 1185 printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
1186 ioc->name, __FUNCTION__, reply->IOCStatus, reply->IOCLogInfo); 1186 ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
1187 error = -ENXIO; 1187 error = -ENXIO;
1188 goto out_unlock; 1188 goto out_unlock;
1189 } 1189 }
@@ -1270,14 +1270,14 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1270 1270
1271 if (!rsp) { 1271 if (!rsp) {
1272 printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n", 1272 printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
1273 ioc->name, __FUNCTION__); 1273 ioc->name, __func__);
1274 return -EINVAL; 1274 return -EINVAL;
1275 } 1275 }
1276 1276
1277 /* do we need to support multiple segments? */ 1277 /* do we need to support multiple segments? */
1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
1280 ioc->name, __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1280 ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
1281 rsp->bio->bi_vcnt, rsp->data_len); 1281 rsp->bio->bi_vcnt, rsp->data_len);
1282 return -EINVAL; 1282 return -EINVAL;
1283 } 1283 }
@@ -1343,7 +1343,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1343 1343
1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); 1344 timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
1345 if (!timeleft) { 1345 if (!timeleft) {
1346 printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __FUNCTION__); 1346 printk(MYIOC_s_ERR_FMT "%s: smp timeout!\n", ioc->name, __func__);
1347 /* On timeout reset the board */ 1347 /* On timeout reset the board */
1348 mpt_HardResetHandler(ioc, CAN_SLEEP); 1348 mpt_HardResetHandler(ioc, CAN_SLEEP);
1349 ret = -ETIMEDOUT; 1349 ret = -ETIMEDOUT;
@@ -1361,7 +1361,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1361 rsp->data_len -= smprep->ResponseDataLength; 1361 rsp->data_len -= smprep->ResponseDataLength;
1362 } else { 1362 } else {
1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
1364 ioc->name, __FUNCTION__); 1364 ioc->name, __func__);
1365 ret = -ENXIO; 1365 ret = -ENXIO;
1366 } 1366 }
1367unmap: 1367unmap:
@@ -2006,7 +2006,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2006 if (error) { 2006 if (error) {
2007 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2007 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2008 "%s: exit at line=%d\n", ioc->name, 2008 "%s: exit at line=%d\n", ioc->name,
2009 __FUNCTION__, __LINE__)); 2009 __func__, __LINE__));
2010 goto out; 2010 goto out;
2011 } 2011 }
2012 mptsas_set_port(ioc, phy_info, port); 2012 mptsas_set_port(ioc, phy_info, port);
@@ -2076,7 +2076,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2076 if (!rphy) { 2076 if (!rphy) {
2077 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2077 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2078 "%s: exit at line=%d\n", ioc->name, 2078 "%s: exit at line=%d\n", ioc->name,
2079 __FUNCTION__, __LINE__)); 2079 __func__, __LINE__));
2080 goto out; 2080 goto out;
2081 } 2081 }
2082 2082
@@ -2085,7 +2085,7 @@ static int mptsas_probe_one_phy(struct device *dev,
2085 if (error) { 2085 if (error) {
2086 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2086 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2087 "%s: exit at line=%d\n", ioc->name, 2087 "%s: exit at line=%d\n", ioc->name,
2088 __FUNCTION__, __LINE__)); 2088 __func__, __LINE__));
2089 sas_rphy_free(rphy); 2089 sas_rphy_free(rphy);
2090 goto out; 2090 goto out;
2091 } 2091 }
@@ -2613,7 +2613,7 @@ mptsas_hotplug_work(struct work_struct *work)
2613 (ev->channel << 8) + ev->id)) { 2613 (ev->channel << 8) + ev->id)) {
2614 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2614 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2615 "%s: exit at line=%d\n", ioc->name, 2615 "%s: exit at line=%d\n", ioc->name,
2616 __FUNCTION__, __LINE__)); 2616 __func__, __LINE__));
2617 break; 2617 break;
2618 } 2618 }
2619 phy_info = mptsas_find_phyinfo_by_sas_address( 2619 phy_info = mptsas_find_phyinfo_by_sas_address(
@@ -2633,20 +2633,20 @@ mptsas_hotplug_work(struct work_struct *work)
2633 if (!phy_info){ 2633 if (!phy_info){
2634 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2634 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2635 "%s: exit at line=%d\n", ioc->name, 2635 "%s: exit at line=%d\n", ioc->name,
2636 __FUNCTION__, __LINE__)); 2636 __func__, __LINE__));
2637 break; 2637 break;
2638 } 2638 }
2639 if (!phy_info->port_details) { 2639 if (!phy_info->port_details) {
2640 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2640 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2641 "%s: exit at line=%d\n", ioc->name, 2641 "%s: exit at line=%d\n", ioc->name,
2642 __FUNCTION__, __LINE__)); 2642 __func__, __LINE__));
2643 break; 2643 break;
2644 } 2644 }
2645 rphy = mptsas_get_rphy(phy_info); 2645 rphy = mptsas_get_rphy(phy_info);
2646 if (!rphy) { 2646 if (!rphy) {
2647 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2647 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2648 "%s: exit at line=%d\n", ioc->name, 2648 "%s: exit at line=%d\n", ioc->name,
2649 __FUNCTION__, __LINE__)); 2649 __func__, __LINE__));
2650 break; 2650 break;
2651 } 2651 }
2652 2652
@@ -2654,7 +2654,7 @@ mptsas_hotplug_work(struct work_struct *work)
2654 if (!port) { 2654 if (!port) {
2655 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2655 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2656 "%s: exit at line=%d\n", ioc->name, 2656 "%s: exit at line=%d\n", ioc->name,
2657 __FUNCTION__, __LINE__)); 2657 __func__, __LINE__));
2658 break; 2658 break;
2659 } 2659 }
2660 2660
@@ -2665,7 +2665,7 @@ mptsas_hotplug_work(struct work_struct *work)
2665 if (!vtarget) { 2665 if (!vtarget) {
2666 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2666 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2667 "%s: exit at line=%d\n", ioc->name, 2667 "%s: exit at line=%d\n", ioc->name,
2668 __FUNCTION__, __LINE__)); 2668 __func__, __LINE__));
2669 break; 2669 break;
2670 } 2670 }
2671 2671
@@ -2720,7 +2720,7 @@ mptsas_hotplug_work(struct work_struct *work)
2720 (ev->channel << 8) + ev->id)) { 2720 (ev->channel << 8) + ev->id)) {
2721 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2721 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2722 "%s: exit at line=%d\n", ioc->name, 2722 "%s: exit at line=%d\n", ioc->name,
2723 __FUNCTION__, __LINE__)); 2723 __func__, __LINE__));
2724 break; 2724 break;
2725 } 2725 }
2726 2726
@@ -2732,7 +2732,7 @@ mptsas_hotplug_work(struct work_struct *work)
2732 if (!phy_info || !phy_info->port_details) { 2732 if (!phy_info || !phy_info->port_details) {
2733 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2733 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2734 "%s: exit at line=%d\n", ioc->name, 2734 "%s: exit at line=%d\n", ioc->name,
2735 __FUNCTION__, __LINE__)); 2735 __func__, __LINE__));
2736 break; 2736 break;
2737 } 2737 }
2738 2738
@@ -2744,7 +2744,7 @@ mptsas_hotplug_work(struct work_struct *work)
2744 if (!vtarget) { 2744 if (!vtarget) {
2745 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2745 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2746 "%s: exit at line=%d\n", ioc->name, 2746 "%s: exit at line=%d\n", ioc->name,
2747 __FUNCTION__, __LINE__)); 2747 __func__, __LINE__));
2748 break; 2748 break;
2749 } 2749 }
2750 /* 2750 /*
@@ -2767,7 +2767,7 @@ mptsas_hotplug_work(struct work_struct *work)
2767 if (mptsas_get_rphy(phy_info)) { 2767 if (mptsas_get_rphy(phy_info)) {
2768 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2768 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2769 "%s: exit at line=%d\n", ioc->name, 2769 "%s: exit at line=%d\n", ioc->name,
2770 __FUNCTION__, __LINE__)); 2770 __func__, __LINE__));
2771 if (ev->channel) printk("%d\n", __LINE__); 2771 if (ev->channel) printk("%d\n", __LINE__);
2772 break; 2772 break;
2773 } 2773 }
@@ -2776,7 +2776,7 @@ mptsas_hotplug_work(struct work_struct *work)
2776 if (!port) { 2776 if (!port) {
2777 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2777 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2778 "%s: exit at line=%d\n", ioc->name, 2778 "%s: exit at line=%d\n", ioc->name,
2779 __FUNCTION__, __LINE__)); 2779 __func__, __LINE__));
2780 break; 2780 break;
2781 } 2781 }
2782 memcpy(&phy_info->attached, &sas_device, 2782 memcpy(&phy_info->attached, &sas_device,
@@ -2801,7 +2801,7 @@ mptsas_hotplug_work(struct work_struct *work)
2801 if (!rphy) { 2801 if (!rphy) {
2802 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2802 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2803 "%s: exit at line=%d\n", ioc->name, 2803 "%s: exit at line=%d\n", ioc->name,
2804 __FUNCTION__, __LINE__)); 2804 __func__, __LINE__));
2805 break; /* non-fatal: an rphy can be added later */ 2805 break; /* non-fatal: an rphy can be added later */
2806 } 2806 }
2807 2807
@@ -2809,7 +2809,7 @@ mptsas_hotplug_work(struct work_struct *work)
2809 if (sas_rphy_add(rphy)) { 2809 if (sas_rphy_add(rphy)) {
2810 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT 2810 dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
2811 "%s: exit at line=%d\n", ioc->name, 2811 "%s: exit at line=%d\n", ioc->name,
2812 __FUNCTION__, __LINE__)); 2812 __func__, __LINE__));
2813 sas_rphy_free(rphy); 2813 sas_rphy_free(rphy);
2814 break; 2814 break;
2815 } 2815 }
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index d142b6b4b976..9f9354fd3516 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -461,7 +461,7 @@ mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
461 461
462 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { 462 if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
463 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n", 463 dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
464 ioc->name,__FUNCTION__)); 464 ioc->name,__func__));
465 return; 465 return;
466 } 466 }
467 467
@@ -2187,7 +2187,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
2187 (ioc->debug_level & MPT_DEBUG_TM )) 2187 (ioc->debug_level & MPT_DEBUG_TM ))
2188 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X " 2188 printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
2189 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X " 2189 "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
2190 "term_cmnds=%d\n", __FUNCTION__, ioc->id, pScsiTmReply->Bus, 2190 "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
2191 pScsiTmReply->TargetID, pScsiTmReq->TaskType, 2191 pScsiTmReply->TargetID, pScsiTmReq->TaskType,
2192 le16_to_cpu(pScsiTmReply->IOCStatus), 2192 le16_to_cpu(pScsiTmReply->IOCStatus),
2193 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode, 2193 le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 321eb9134635..f5ade1904aad 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -360,7 +360,7 @@ config THINKPAD_ACPI_VIDEO
360 If you are not sure, say Y here. 360 If you are not sure, say Y here.
361 361
362config THINKPAD_ACPI_HOTKEY_POLL 362config THINKPAD_ACPI_HOTKEY_POLL
363 bool "Suport NVRAM polling for hot keys" 363 bool "Support NVRAM polling for hot keys"
364 depends on THINKPAD_ACPI 364 depends on THINKPAD_ACPI
365 default y 365 default y
366 ---help--- 366 ---help---
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e171650766ce..bf5e4d065436 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -13,7 +13,6 @@
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/list.h>
17#include <linux/spinlock.h> 16#include <linux/spinlock.h>
18#include <linux/atmel-ssc.h> 17#include <linux/atmel-ssc.h>
19 18
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 19a1a254a0c5..889e5f898f6f 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -12,3 +12,4 @@ mmc_core-y := core.o bus.o host.o \
12 sdio.o sdio_ops.o sdio_bus.o \ 12 sdio.o sdio_ops.o sdio_bus.o \
13 sdio_cis.o sdio_io.o sdio_irq.o 13 sdio_cis.o sdio_io.o sdio_irq.o
14 14
15mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index fd95b18e988b..0d9b2d6f9ebf 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -252,6 +252,10 @@ int mmc_add_card(struct mmc_card *card)
252 if (ret) 252 if (ret)
253 return ret; 253 return ret;
254 254
255#ifdef CONFIG_DEBUG_FS
256 mmc_add_card_debugfs(card);
257#endif
258
255 mmc_card_set_present(card); 259 mmc_card_set_present(card);
256 260
257 return 0; 261 return 0;
@@ -263,6 +267,10 @@ int mmc_add_card(struct mmc_card *card)
263 */ 267 */
264void mmc_remove_card(struct mmc_card *card) 268void mmc_remove_card(struct mmc_card *card)
265{ 269{
270#ifdef CONFIG_DEBUG_FS
271 mmc_remove_card_debugfs(card);
272#endif
273
266 if (mmc_card_present(card)) { 274 if (mmc_card_present(card)) {
267 if (mmc_host_is_spi(card->host)) { 275 if (mmc_host_is_spi(card->host)) {
268 printk(KERN_INFO "%s: SPI card removed\n", 276 printk(KERN_INFO "%s: SPI card removed\n",
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index cdb332b7dedc..c819effa1032 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -52,5 +52,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
52 52
53extern int use_spi_crc; 53extern int use_spi_crc;
54 54
55/* Debugfs information for hosts and cards */
56void mmc_add_host_debugfs(struct mmc_host *host);
57void mmc_remove_host_debugfs(struct mmc_host *host);
58
59void mmc_add_card_debugfs(struct mmc_card *card);
60void mmc_remove_card_debugfs(struct mmc_card *card);
61
55#endif 62#endif
56 63
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
new file mode 100644
index 000000000000..1237bb4c722b
--- /dev/null
+++ b/drivers/mmc/core/debugfs.c
@@ -0,0 +1,225 @@
1/*
2 * Debugfs support for hosts and cards
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/debugfs.h>
11#include <linux/fs.h>
12#include <linux/seq_file.h>
13#include <linux/stat.h>
14
15#include <linux/mmc/card.h>
16#include <linux/mmc/host.h>
17
18#include "core.h"
19#include "mmc_ops.h"
20
21/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
22static int mmc_ios_show(struct seq_file *s, void *data)
23{
24 static const char *vdd_str[] = {
25 [8] = "2.0",
26 [9] = "2.1",
27 [10] = "2.2",
28 [11] = "2.3",
29 [12] = "2.4",
30 [13] = "2.5",
31 [14] = "2.6",
32 [15] = "2.7",
33 [16] = "2.8",
34 [17] = "2.9",
35 [18] = "3.0",
36 [19] = "3.1",
37 [20] = "3.2",
38 [21] = "3.3",
39 [22] = "3.4",
40 [23] = "3.5",
41 [24] = "3.6",
42 };
43 struct mmc_host *host = s->private;
44 struct mmc_ios *ios = &host->ios;
45 const char *str;
46
47 seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
48 seq_printf(s, "vdd:\t\t%u ", ios->vdd);
49 if ((1 << ios->vdd) & MMC_VDD_165_195)
50 seq_printf(s, "(1.65 - 1.95 V)\n");
51 else if (ios->vdd < (ARRAY_SIZE(vdd_str) - 1)
52 && vdd_str[ios->vdd] && vdd_str[ios->vdd + 1])
53 seq_printf(s, "(%s ~ %s V)\n", vdd_str[ios->vdd],
54 vdd_str[ios->vdd + 1]);
55 else
56 seq_printf(s, "(invalid)\n");
57
58 switch (ios->bus_mode) {
59 case MMC_BUSMODE_OPENDRAIN:
60 str = "open drain";
61 break;
62 case MMC_BUSMODE_PUSHPULL:
63 str = "push-pull";
64 break;
65 default:
66 str = "invalid";
67 break;
68 }
69 seq_printf(s, "bus mode:\t%u (%s)\n", ios->bus_mode, str);
70
71 switch (ios->chip_select) {
72 case MMC_CS_DONTCARE:
73 str = "don't care";
74 break;
75 case MMC_CS_HIGH:
76 str = "active high";
77 break;
78 case MMC_CS_LOW:
79 str = "active low";
80 break;
81 default:
82 str = "invalid";
83 break;
84 }
85 seq_printf(s, "chip select:\t%u (%s)\n", ios->chip_select, str);
86
87 switch (ios->power_mode) {
88 case MMC_POWER_OFF:
89 str = "off";
90 break;
91 case MMC_POWER_UP:
92 str = "up";
93 break;
94 case MMC_POWER_ON:
95 str = "on";
96 break;
97 default:
98 str = "invalid";
99 break;
100 }
101 seq_printf(s, "power mode:\t%u (%s)\n", ios->power_mode, str);
102 seq_printf(s, "bus width:\t%u (%u bits)\n",
103 ios->bus_width, 1 << ios->bus_width);
104
105 switch (ios->timing) {
106 case MMC_TIMING_LEGACY:
107 str = "legacy";
108 break;
109 case MMC_TIMING_MMC_HS:
110 str = "mmc high-speed";
111 break;
112 case MMC_TIMING_SD_HS:
113 str = "sd high-speed";
114 break;
115 default:
116 str = "invalid";
117 break;
118 }
119 seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str);
120
121 return 0;
122}
123
124static int mmc_ios_open(struct inode *inode, struct file *file)
125{
126 return single_open(file, mmc_ios_show, inode->i_private);
127}
128
129static const struct file_operations mmc_ios_fops = {
130 .open = mmc_ios_open,
131 .read = seq_read,
132 .llseek = seq_lseek,
133 .release = single_release,
134};
135
136void mmc_add_host_debugfs(struct mmc_host *host)
137{
138 struct dentry *root;
139
140 root = debugfs_create_dir(mmc_hostname(host), NULL);
141 if (IS_ERR(root))
142 /* Don't complain -- debugfs just isn't enabled */
143 return;
144 if (!root)
145 /* Complain -- debugfs is enabled, but it failed to
146 * create the directory. */
147 goto err_root;
148
149 host->debugfs_root = root;
150
151 if (!debugfs_create_file("ios", S_IRUSR, root, host, &mmc_ios_fops))
152 goto err_ios;
153
154 return;
155
156err_ios:
157 debugfs_remove_recursive(root);
158 host->debugfs_root = NULL;
159err_root:
160 dev_err(&host->class_dev, "failed to initialize debugfs\n");
161}
162
163void mmc_remove_host_debugfs(struct mmc_host *host)
164{
165 debugfs_remove_recursive(host->debugfs_root);
166}
167
168static int mmc_dbg_card_status_get(void *data, u64 *val)
169{
170 struct mmc_card *card = data;
171 u32 status;
172 int ret;
173
174 mmc_claim_host(card->host);
175
176 ret = mmc_send_status(data, &status);
177 if (!ret)
178 *val = status;
179
180 mmc_release_host(card->host);
181
182 return ret;
183}
184DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get,
185 NULL, "%08llx\n");
186
187void mmc_add_card_debugfs(struct mmc_card *card)
188{
189 struct mmc_host *host = card->host;
190 struct dentry *root;
191
192 if (!host->debugfs_root)
193 return;
194
195 root = debugfs_create_dir(mmc_card_id(card), host->debugfs_root);
196 if (IS_ERR(root))
197 /* Don't complain -- debugfs just isn't enabled */
198 return;
199 if (!root)
200 /* Complain -- debugfs is enabled, but it failed to
201 * create the directory. */
202 goto err;
203
204 card->debugfs_root = root;
205
206 if (!debugfs_create_x32("state", S_IRUSR, root, &card->state))
207 goto err;
208
209 if (mmc_card_mmc(card) || mmc_card_sd(card))
210 if (!debugfs_create_file("status", S_IRUSR, root, card,
211 &mmc_dbg_card_status_fops))
212 goto err;
213
214 return;
215
216err:
217 debugfs_remove_recursive(root);
218 card->debugfs_root = NULL;
219 dev_err(&card->dev, "failed to initialize debugfs\n");
220}
221
222void mmc_remove_card_debugfs(struct mmc_card *card)
223{
224 debugfs_remove_recursive(card->debugfs_root);
225}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 1d795c5379b5..6da80fd4d974 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -127,6 +127,10 @@ int mmc_add_host(struct mmc_host *host)
127 if (err) 127 if (err)
128 return err; 128 return err;
129 129
130#ifdef CONFIG_DEBUG_FS
131 mmc_add_host_debugfs(host);
132#endif
133
130 mmc_start_host(host); 134 mmc_start_host(host);
131 135
132 return 0; 136 return 0;
@@ -146,6 +150,10 @@ void mmc_remove_host(struct mmc_host *host)
146{ 150{
147 mmc_stop_host(host); 151 mmc_stop_host(host);
148 152
153#ifdef CONFIG_DEBUG_FS
154 mmc_remove_host_debugfs(host);
155#endif
156
149 device_del(&host->class_dev); 157 device_del(&host->class_dev);
150 158
151 led_trigger_unregister_simple(host->led); 159 led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index a9a5657706c6..26bd80e65031 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -82,6 +82,8 @@
82# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */ 82# define MCI_OVRE ( 1 << 30) /* RX Overrun Error */
83# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */ 83# define MCI_UNRE ( 1 << 31) /* TX Underrun Error */
84 84
85#define MCI_REGS_SIZE 0x100
86
85/* Register access macros */ 87/* Register access macros */
86#define mci_readl(port,reg) \ 88#define mci_readl(port,reg) \
87 __raw_readl((port)->regs + MCI_##reg) 89 __raw_readl((port)->regs + MCI_##reg)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index cce873c5a149..992b4beb757c 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -9,13 +9,18 @@
9 */ 9 */
10#include <linux/blkdev.h> 10#include <linux/blkdev.h>
11#include <linux/clk.h> 11#include <linux/clk.h>
12#include <linux/debugfs.h>
12#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/gpio.h>
13#include <linux/init.h> 16#include <linux/init.h>
14#include <linux/interrupt.h> 17#include <linux/interrupt.h>
15#include <linux/ioport.h> 18#include <linux/ioport.h>
16#include <linux/module.h> 19#include <linux/module.h>
17#include <linux/platform_device.h> 20#include <linux/platform_device.h>
18#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
22#include <linux/seq_file.h>
23#include <linux/stat.h>
19 24
20#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
21 26
@@ -24,7 +29,6 @@
24#include <asm/unaligned.h> 29#include <asm/unaligned.h>
25 30
26#include <asm/arch/board.h> 31#include <asm/arch/board.h>
27#include <asm/arch/gpio.h>
28 32
29#include "atmel-mci-regs.h" 33#include "atmel-mci-regs.h"
30 34
@@ -88,6 +92,188 @@ struct atmel_mci {
88#define atmci_clear_pending(host, event) \ 92#define atmci_clear_pending(host, event) \
89 clear_bit(event, &host->pending_events) 93 clear_bit(event, &host->pending_events)
90 94
95/*
96 * The debugfs stuff below is mostly optimized away when
97 * CONFIG_DEBUG_FS is not set.
98 */
99static int atmci_req_show(struct seq_file *s, void *v)
100{
101 struct atmel_mci *host = s->private;
102 struct mmc_request *mrq = host->mrq;
103 struct mmc_command *cmd;
104 struct mmc_command *stop;
105 struct mmc_data *data;
106
107 /* Make sure we get a consistent snapshot */
108 spin_lock_irq(&host->mmc->lock);
109
110 if (mrq) {
111 cmd = mrq->cmd;
112 data = mrq->data;
113 stop = mrq->stop;
114
115 if (cmd)
116 seq_printf(s,
117 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
118 cmd->opcode, cmd->arg, cmd->flags,
119 cmd->resp[0], cmd->resp[1], cmd->resp[2],
120 cmd->resp[2], cmd->error);
121 if (data)
122 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
123 data->bytes_xfered, data->blocks,
124 data->blksz, data->flags, data->error);
125 if (stop)
126 seq_printf(s,
127 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
128 stop->opcode, stop->arg, stop->flags,
129 stop->resp[0], stop->resp[1], stop->resp[2],
130 stop->resp[2], stop->error);
131 }
132
133 spin_unlock_irq(&host->mmc->lock);
134
135 return 0;
136}
137
138static int atmci_req_open(struct inode *inode, struct file *file)
139{
140 return single_open(file, atmci_req_show, inode->i_private);
141}
142
143static const struct file_operations atmci_req_fops = {
144 .owner = THIS_MODULE,
145 .open = atmci_req_open,
146 .read = seq_read,
147 .llseek = seq_lseek,
148 .release = single_release,
149};
150
151static void atmci_show_status_reg(struct seq_file *s,
152 const char *regname, u32 value)
153{
154 static const char *sr_bit[] = {
155 [0] = "CMDRDY",
156 [1] = "RXRDY",
157 [2] = "TXRDY",
158 [3] = "BLKE",
159 [4] = "DTIP",
160 [5] = "NOTBUSY",
161 [8] = "SDIOIRQA",
162 [9] = "SDIOIRQB",
163 [16] = "RINDE",
164 [17] = "RDIRE",
165 [18] = "RCRCE",
166 [19] = "RENDE",
167 [20] = "RTOE",
168 [21] = "DCRCE",
169 [22] = "DTOE",
170 [30] = "OVRE",
171 [31] = "UNRE",
172 };
173 unsigned int i;
174
175 seq_printf(s, "%s:\t0x%08x", regname, value);
176 for (i = 0; i < ARRAY_SIZE(sr_bit); i++) {
177 if (value & (1 << i)) {
178 if (sr_bit[i])
179 seq_printf(s, " %s", sr_bit[i]);
180 else
181 seq_puts(s, " UNKNOWN");
182 }
183 }
184 seq_putc(s, '\n');
185}
186
187static int atmci_regs_show(struct seq_file *s, void *v)
188{
189 struct atmel_mci *host = s->private;
190 u32 *buf;
191
192 buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL);
193 if (!buf)
194 return -ENOMEM;
195
196 /* Grab a more or less consistent snapshot */
197 spin_lock_irq(&host->mmc->lock);
198 memcpy_fromio(buf, host->regs, MCI_REGS_SIZE);
199 spin_unlock_irq(&host->mmc->lock);
200
201 seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
202 buf[MCI_MR / 4],
203 buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "",
204 buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "",
205 buf[MCI_MR / 4] & 0xff);
206 seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]);
207 seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]);
208 seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]);
209 seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n",
210 buf[MCI_BLKR / 4],
211 buf[MCI_BLKR / 4] & 0xffff,
212 (buf[MCI_BLKR / 4] >> 16) & 0xffff);
213
214 /* Don't read RSPR and RDR; it will consume the data there */
215
216 atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
217 atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
218
219 return 0;
220}
221
222static int atmci_regs_open(struct inode *inode, struct file *file)
223{
224 return single_open(file, atmci_regs_show, inode->i_private);
225}
226
227static const struct file_operations atmci_regs_fops = {
228 .owner = THIS_MODULE,
229 .open = atmci_regs_open,
230 .read = seq_read,
231 .llseek = seq_lseek,
232 .release = single_release,
233};
234
235static void atmci_init_debugfs(struct atmel_mci *host)
236{
237 struct mmc_host *mmc;
238 struct dentry *root;
239 struct dentry *node;
240 struct resource *res;
241
242 mmc = host->mmc;
243 root = mmc->debugfs_root;
244 if (!root)
245 return;
246
247 node = debugfs_create_file("regs", S_IRUSR, root, host,
248 &atmci_regs_fops);
249 if (IS_ERR(node))
250 return;
251 if (!node)
252 goto err;
253
254 res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
255 node->d_inode->i_size = res->end - res->start + 1;
256
257 node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops);
258 if (!node)
259 goto err;
260
261 node = debugfs_create_x32("pending_events", S_IRUSR, root,
262 (u32 *)&host->pending_events);
263 if (!node)
264 goto err;
265
266 node = debugfs_create_x32("completed_events", S_IRUSR, root,
267 (u32 *)&host->completed_events);
268 if (!node)
269 goto err;
270
271 return;
272
273err:
274 dev_err(&host->pdev->dev,
275 "failed to initialize debugfs for controller\n");
276}
91 277
92static void atmci_enable(struct atmel_mci *host) 278static void atmci_enable(struct atmel_mci *host)
93{ 279{
@@ -388,7 +574,7 @@ static int atmci_get_ro(struct mmc_host *mmc)
388 int read_only = 0; 574 int read_only = 0;
389 struct atmel_mci *host = mmc_priv(mmc); 575 struct atmel_mci *host = mmc_priv(mmc);
390 576
391 if (host->wp_pin >= 0) { 577 if (gpio_is_valid(host->wp_pin)) {
392 read_only = gpio_get_value(host->wp_pin); 578 read_only = gpio_get_value(host->wp_pin);
393 dev_dbg(&mmc->class_dev, "card is %s\n", 579 dev_dbg(&mmc->class_dev, "card is %s\n",
394 read_only ? "read-only" : "read-write"); 580 read_only ? "read-only" : "read-write");
@@ -450,7 +636,7 @@ static void atmci_detect_change(unsigned long data)
450 * been freed. 636 * been freed.
451 */ 637 */
452 smp_rmb(); 638 smp_rmb();
453 if (host->detect_pin < 0) 639 if (!gpio_is_valid(host->detect_pin))
454 return; 640 return;
455 641
456 enable_irq(gpio_to_irq(host->detect_pin)); 642 enable_irq(gpio_to_irq(host->detect_pin));
@@ -865,7 +1051,7 @@ static int __init atmci_probe(struct platform_device *pdev)
865 1051
866 /* Assume card is present if we don't have a detect pin */ 1052 /* Assume card is present if we don't have a detect pin */
867 host->present = 1; 1053 host->present = 1;
868 if (host->detect_pin >= 0) { 1054 if (gpio_is_valid(host->detect_pin)) {
869 if (gpio_request(host->detect_pin, "mmc_detect")) { 1055 if (gpio_request(host->detect_pin, "mmc_detect")) {
870 dev_dbg(&mmc->class_dev, "no detect pin available\n"); 1056 dev_dbg(&mmc->class_dev, "no detect pin available\n");
871 host->detect_pin = -1; 1057 host->detect_pin = -1;
@@ -873,7 +1059,7 @@ static int __init atmci_probe(struct platform_device *pdev)
873 host->present = !gpio_get_value(host->detect_pin); 1059 host->present = !gpio_get_value(host->detect_pin);
874 } 1060 }
875 } 1061 }
876 if (host->wp_pin >= 0) { 1062 if (gpio_is_valid(host->wp_pin)) {
877 if (gpio_request(host->wp_pin, "mmc_wp")) { 1063 if (gpio_request(host->wp_pin, "mmc_wp")) {
878 dev_dbg(&mmc->class_dev, "no WP pin available\n"); 1064 dev_dbg(&mmc->class_dev, "no WP pin available\n");
879 host->wp_pin = -1; 1065 host->wp_pin = -1;
@@ -884,7 +1070,7 @@ static int __init atmci_probe(struct platform_device *pdev)
884 1070
885 mmc_add_host(mmc); 1071 mmc_add_host(mmc);
886 1072
887 if (host->detect_pin >= 0) { 1073 if (gpio_is_valid(host->detect_pin)) {
888 setup_timer(&host->detect_timer, atmci_detect_change, 1074 setup_timer(&host->detect_timer, atmci_detect_change,
889 (unsigned long)host); 1075 (unsigned long)host);
890 1076
@@ -905,6 +1091,8 @@ static int __init atmci_probe(struct platform_device *pdev)
905 "Atmel MCI controller at 0x%08lx irq %d\n", 1091 "Atmel MCI controller at 0x%08lx irq %d\n",
906 host->mapbase, irq); 1092 host->mapbase, irq);
907 1093
1094 atmci_init_debugfs(host);
1095
908 return 0; 1096 return 0;
909 1097
910err_request_irq: 1098err_request_irq:
@@ -923,7 +1111,9 @@ static int __exit atmci_remove(struct platform_device *pdev)
923 platform_set_drvdata(pdev, NULL); 1111 platform_set_drvdata(pdev, NULL);
924 1112
925 if (host) { 1113 if (host) {
926 if (host->detect_pin >= 0) { 1114 /* Debugfs stuff is cleaned up by mmc core */
1115
1116 if (gpio_is_valid(host->detect_pin)) {
927 int pin = host->detect_pin; 1117 int pin = host->detect_pin;
928 1118
929 /* Make sure the timer doesn't enable the interrupt */ 1119 /* Make sure the timer doesn't enable the interrupt */
@@ -943,7 +1133,7 @@ static int __exit atmci_remove(struct platform_device *pdev)
943 mci_readl(host, SR); 1133 mci_readl(host, SR);
944 clk_disable(host->mck); 1134 clk_disable(host->mck);
945 1135
946 if (host->wp_pin >= 0) 1136 if (gpio_is_valid(host->wp_pin))
947 gpio_free(host->wp_pin); 1137 gpio_free(host->wp_pin);
948 1138
949 free_irq(platform_get_irq(pdev, 0), host->mmc); 1139 free_irq(platform_get_irq(pdev, 0), host->mmc);
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
index 5e880c0f1349..f61406da65d2 100644
--- a/drivers/mmc/host/imxmmc.c
+++ b/drivers/mmc/host/imxmmc.c
@@ -26,12 +26,6 @@
26 * 26 *
27 */ 27 */
28 28
29#ifdef CONFIG_MMC_DEBUG
30#define DEBUG
31#else
32#undef DEBUG
33#endif
34
35#include <linux/module.h> 29#include <linux/module.h>
36#include <linux/init.h> 30#include <linux/init.h>
37#include <linux/ioport.h> 31#include <linux/ioport.h>
@@ -907,31 +901,12 @@ static const struct mmc_host_ops imxmci_ops = {
907 .get_ro = imxmci_get_ro, 901 .get_ro = imxmci_get_ro,
908}; 902};
909 903
910static struct resource *platform_device_resource(struct platform_device *dev, unsigned int mask, int nr)
911{
912 int i;
913
914 for (i = 0; i < dev->num_resources; i++)
915 if (dev->resource[i].flags == mask && nr-- == 0)
916 return &dev->resource[i];
917 return NULL;
918}
919
920static int platform_device_irq(struct platform_device *dev, int nr)
921{
922 int i;
923
924 for (i = 0; i < dev->num_resources; i++)
925 if (dev->resource[i].flags == IORESOURCE_IRQ && nr-- == 0)
926 return dev->resource[i].start;
927 return NO_IRQ;
928}
929
930static void imxmci_check_status(unsigned long data) 904static void imxmci_check_status(unsigned long data)
931{ 905{
932 struct imxmci_host *host = (struct imxmci_host *)data; 906 struct imxmci_host *host = (struct imxmci_host *)data;
933 907
934 if( host->pdata->card_present(mmc_dev(host->mmc)) != host->present ) { 908 if (host->pdata && host->pdata->card_present &&
909 host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
935 host->present ^= 1; 910 host->present ^= 1;
936 dev_info(mmc_dev(host->mmc), "card %s\n", 911 dev_info(mmc_dev(host->mmc), "card %s\n",
937 host->present ? "inserted" : "removed"); 912 host->present ? "inserted" : "removed");
@@ -962,13 +937,12 @@ static int imxmci_probe(struct platform_device *pdev)
962 937
963 printk(KERN_INFO "i.MX mmc driver\n"); 938 printk(KERN_INFO "i.MX mmc driver\n");
964 939
965 r = platform_device_resource(pdev, IORESOURCE_MEM, 0); 940 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
966 irq = platform_device_irq(pdev, 0); 941 irq = platform_get_irq(pdev, 0);
967 if (!r || irq == NO_IRQ) 942 if (!r || irq < 0)
968 return -ENXIO; 943 return -ENXIO;
969 944
970 r = request_mem_region(r->start, 0x100, "IMXMCI"); 945 if (!request_mem_region(r->start, 0x100, pdev->name))
971 if (!r)
972 return -EBUSY; 946 return -EBUSY;
973 947
974 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); 948 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
@@ -995,6 +969,8 @@ static int imxmci_probe(struct platform_device *pdev)
995 host->mmc = mmc; 969 host->mmc = mmc;
996 host->dma_allocated = 0; 970 host->dma_allocated = 0;
997 host->pdata = pdev->dev.platform_data; 971 host->pdata = pdev->dev.platform_data;
972 if (!host->pdata)
973 dev_warn(&pdev->dev, "No platform data provided!\n");
998 974
999 spin_lock_init(&host->lock); 975 spin_lock_init(&host->lock);
1000 host->res = r; 976 host->res = r;
@@ -1047,7 +1023,11 @@ static int imxmci_probe(struct platform_device *pdev)
1047 if (ret) 1023 if (ret)
1048 goto out; 1024 goto out;
1049 1025
1050 host->present = host->pdata->card_present(mmc_dev(mmc)); 1026 if (host->pdata && host->pdata->card_present)
1027 host->present = host->pdata->card_present(mmc_dev(mmc));
1028 else /* if there is no way to detect assume that card is present */
1029 host->present = 1;
1030
1051 init_timer(&host->timer); 1031 init_timer(&host->timer);
1052 host->timer.data = (unsigned long)host; 1032 host->timer.data = (unsigned long)host;
1053 host->timer.function = imxmci_check_status; 1033 host->timer.function = imxmci_check_status;
@@ -1073,7 +1053,7 @@ out:
1073 } 1053 }
1074 if (mmc) 1054 if (mmc)
1075 mmc_free_host(mmc); 1055 mmc_free_host(mmc);
1076 release_resource(r); 1056 release_mem_region(r->start, 0x100);
1077 return ret; 1057 return ret;
1078} 1058}
1079 1059
@@ -1102,7 +1082,7 @@ static int imxmci_remove(struct platform_device *pdev)
1102 clk_disable(host->clk); 1082 clk_disable(host->clk);
1103 clk_put(host->clk); 1083 clk_put(host->clk);
1104 1084
1105 release_resource(host->res); 1085 release_mem_region(host->res->start, 0x100);
1106 1086
1107 mmc_free_host(mmc); 1087 mmc_free_host(mmc);
1108 } 1088 }
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 41cc63360e43..7503b81374e0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1076,6 +1076,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1076 */ 1076 */
1077 if (canpower && ios->power_mode == MMC_POWER_OFF) { 1077 if (canpower && ios->power_mode == MMC_POWER_OFF) {
1078 int mres; 1078 int mres;
1079 u8 nullbyte = 0;
1079 1080
1080 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA); 1081 host->spi->mode &= ~(SPI_CPOL|SPI_CPHA);
1081 mres = spi_setup(host->spi); 1082 mres = spi_setup(host->spi);
@@ -1083,7 +1084,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1083 dev_dbg(&host->spi->dev, 1084 dev_dbg(&host->spi->dev,
1084 "switch to SPI mode 0 failed\n"); 1085 "switch to SPI mode 0 failed\n");
1085 1086
1086 if (spi_w8r8(host->spi, 0x00) < 0) 1087 if (spi_write(host->spi, &nullbyte, 1) < 0)
1087 dev_dbg(&host->spi->dev, 1088 dev_dbg(&host->spi->dev,
1088 "put spi signals to low failed\n"); 1089 "put spi signals to low failed\n");
1089 1090
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index eed06d068fd1..14f11f8b9e5f 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -1,5 +1,3 @@
1# $Id: Kconfig,v 1.11 2005/11/07 11:14:19 gleixner Exp $
2
3menuconfig MTD 1menuconfig MTD
4 tristate "Memory Technology Device (MTD) support" 2 tristate "Memory Technology Device (MTD) support"
5 depends on HAS_IOMEM 3 depends on HAS_IOMEM
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 52d51eb91c16..d072ca5be689 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -21,8 +21,6 @@
21 This is access code for flashes using ARM's flash partitioning 21 This is access code for flashes using ARM's flash partitioning
22 standards. 22 standards.
23 23
24 $Id: afs.c,v 1.15 2005/11/07 11:14:19 gleixner Exp $
25
26======================================================================*/ 24======================================================================*/
27 25
28#include <linux/module.h> 26#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index fcd1aeccdf93..5f1b472137a0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,8 +4,6 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8 *
9 * 7 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 8 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and 9 * - completely revamped method functions so they are aware and
@@ -50,6 +48,8 @@
50#define I82802AC 0x00ac 48#define I82802AC 0x00ac
51#define MANUFACTURER_ST 0x0020 49#define MANUFACTURER_ST 0x0020
52#define M50LPW080 0x002F 50#define M50LPW080 0x002F
51#define M50FLW080A 0x0080
52#define M50FLW080B 0x0081
53#define AT49BV640D 0x02de 53#define AT49BV640D 0x02de
54 54
55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 55static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -204,7 +204,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
204{ 204{
205 struct map_info *map = mtd->priv; 205 struct map_info *map = mtd->priv;
206 struct cfi_private *cfi = map->fldrv_priv; 206 struct cfi_private *cfi = map->fldrv_priv;
207 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; 207 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
208 208
209 printk(KERN_WARNING "cfi_cmdset_0001: Suspend " 209 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
210 "erase on write disabled.\n"); 210 "erase on write disabled.\n");
@@ -301,6 +301,8 @@ static struct cfi_fixup jedec_fixup_table[] = {
301 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, 301 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
302 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, 302 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
303 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, 303 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
304 { MANUFACTURER_ST, M50FLW080A, fixup_use_fwh_lock, NULL, },
305 { MANUFACTURER_ST, M50FLW080B, fixup_use_fwh_lock, NULL, },
304 { 0, 0, NULL, NULL } 306 { 0, 0, NULL, NULL }
305}; 307};
306static struct cfi_fixup fixup_table[] = { 308static struct cfi_fixup fixup_table[] = {
@@ -1147,7 +1149,7 @@ static int inval_cache_and_wait_for_operation(
1147 struct cfi_private *cfi = map->fldrv_priv; 1149 struct cfi_private *cfi = map->fldrv_priv;
1148 map_word status, status_OK = CMD(0x80); 1150 map_word status, status_OK = CMD(0x80);
1149 int chip_state = chip->state; 1151 int chip_state = chip->state;
1150 unsigned int timeo, sleep_time; 1152 unsigned int timeo, sleep_time, reset_timeo;
1151 1153
1152 spin_unlock(chip->mutex); 1154 spin_unlock(chip->mutex);
1153 if (inval_len) 1155 if (inval_len)
@@ -1158,6 +1160,7 @@ static int inval_cache_and_wait_for_operation(
1158 timeo = chip_op_time * 8; 1160 timeo = chip_op_time * 8;
1159 if (!timeo) 1161 if (!timeo)
1160 timeo = 500000; 1162 timeo = 500000;
1163 reset_timeo = timeo;
1161 sleep_time = chip_op_time / 2; 1164 sleep_time = chip_op_time / 2;
1162 1165
1163 for (;;) { 1166 for (;;) {
@@ -1199,6 +1202,12 @@ static int inval_cache_and_wait_for_operation(
1199 remove_wait_queue(&chip->wq, &wait); 1202 remove_wait_queue(&chip->wq, &wait);
1200 spin_lock(chip->mutex); 1203 spin_lock(chip->mutex);
1201 } 1204 }
1205 if (chip->erase_suspended || chip->write_suspended) {
1206 /* Suspend has occured while sleep: reset timeout */
1207 timeo = reset_timeo;
1208 chip->erase_suspended = 0;
1209 chip->write_suspended = 0;
1210 }
1202 } 1211 }
1203 1212
1204 /* Done and happy. */ 1213 /* Done and happy. */
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index f7fcc6389533..a972cc6be436 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -16,9 +16,6 @@
16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com 16 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
17 * 17 *
18 * This code is GPL 18 * This code is GPL
19 *
20 * $Id: cfi_cmdset_0002.c,v 1.122 2005/11/07 11:14:22 gleixner Exp $
21 *
22 */ 19 */
23 20
24#include <linux/module.h> 21#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 1b720cc571f3..d4714dd9f7ab 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -4,8 +4,6 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0020.c,v 1.22 2005/11/07 11:14:22 gleixner Exp $
8 *
9 * 10/10/2000 Nicolas Pitre <nico@cam.org> 7 * 10/10/2000 Nicolas Pitre <nico@cam.org>
10 * - completely revamped method functions so they are aware and 8 * - completely revamped method functions so they are aware and
11 * independent of the flash geometry (buswidth, interleave, etc.) 9 * independent of the flash geometry (buswidth, interleave, etc.)
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index a4463a91ce31..c418e92e1d92 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -1,7 +1,6 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: cfi_probe.c,v 1.86 2005/11/29 14:48:31 gleixner Exp $
5*/ 4*/
6 5
7#include <linux/module.h> 6#include <linux/module.h>
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index 72e0022a47bf..0ee457018016 100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
@@ -6,9 +6,6 @@
6 * Copyright (C) 2003 STMicroelectronics Limited 6 * Copyright (C) 2003 STMicroelectronics Limited
7 * 7 *
8 * This code is covered by the GPL. 8 * This code is covered by the GPL.
9 *
10 * $Id: cfi_util.c,v 1.10 2005/11/07 11:14:23 gleixner Exp $
11 *
12 */ 9 */
13 10
14#include <linux/module.h> 11#include <linux/module.h>
diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c
index 2174c97549f0..c85760968227 100644
--- a/drivers/mtd/chips/chipreg.c
+++ b/drivers/mtd/chips/chipreg.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: chipreg.c,v 1.17 2004/11/16 18:29:00 dwmw2 Exp $
3 *
4 * Registration for chip drivers 2 * Registration for chip drivers
5 * 3 *
6 */ 4 */
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index d338b8c92780..f061885b2812 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -2,7 +2,6 @@
2 * Routines common to all CFI-type probes. 2 * Routines common to all CFI-type probes.
3 * (C) 2001-2003 Red Hat, Inc. 3 * (C) 2001-2003 Red Hat, Inc.
4 * GPL'd 4 * GPL'd
5 * $Id: gen_probe.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
6 */ 5 */
7 6
8#include <linux/kernel.h> 7#include <linux/kernel.h>
@@ -71,8 +70,8 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
71 interleave and device type, etc. */ 70 interleave and device type, etc. */
72 if (!genprobe_new_chip(map, cp, &cfi)) { 71 if (!genprobe_new_chip(map, cp, &cfi)) {
73 /* The probe didn't like it */ 72 /* The probe didn't like it */
74 printk(KERN_DEBUG "%s: Found no %s device at location zero\n", 73 pr_debug("%s: Found no %s device at location zero\n",
75 cp->name, map->name); 74 cp->name, map->name);
76 return NULL; 75 return NULL;
77 } 76 }
78 77
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index aa07575eb288..dbba5abf0db8 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1,7 +1,6 @@
1/* 1/*
2 Common Flash Interface probe code. 2 Common Flash Interface probe code.
3 (C) 2000 Red Hat. GPL'd. 3 (C) 2000 Red Hat. GPL'd.
4 $Id: jedec_probe.c,v 1.66 2005/11/07 11:14:23 gleixner Exp $
5 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5) 4 See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
6 for the standard this probe goes back to. 5 for the standard this probe goes back to.
7 6
@@ -26,6 +25,7 @@
26/* Manufacturers */ 25/* Manufacturers */
27#define MANUFACTURER_AMD 0x0001 26#define MANUFACTURER_AMD 0x0001
28#define MANUFACTURER_ATMEL 0x001f 27#define MANUFACTURER_ATMEL 0x001f
28#define MANUFACTURER_EON 0x001c
29#define MANUFACTURER_FUJITSU 0x0004 29#define MANUFACTURER_FUJITSU 0x0004
30#define MANUFACTURER_HYUNDAI 0x00AD 30#define MANUFACTURER_HYUNDAI 0x00AD
31#define MANUFACTURER_INTEL 0x0089 31#define MANUFACTURER_INTEL 0x0089
@@ -37,6 +37,7 @@
37#define MANUFACTURER_ST 0x0020 37#define MANUFACTURER_ST 0x0020
38#define MANUFACTURER_TOSHIBA 0x0098 38#define MANUFACTURER_TOSHIBA 0x0098
39#define MANUFACTURER_WINBOND 0x00da 39#define MANUFACTURER_WINBOND 0x00da
40#define CONTINUATION_CODE 0x007f
40 41
41 42
42/* AMD */ 43/* AMD */
@@ -58,6 +59,8 @@
58#define AM29LV040B 0x004F 59#define AM29LV040B 0x004F
59#define AM29F032B 0x0041 60#define AM29F032B 0x0041
60#define AM29F002T 0x00B0 61#define AM29F002T 0x00B0
62#define AM29SL800DB 0x226B
63#define AM29SL800DT 0x22EA
61 64
62/* Atmel */ 65/* Atmel */
63#define AT49BV512 0x0003 66#define AT49BV512 0x0003
@@ -67,6 +70,10 @@
67#define AT49BV32X 0x00C8 70#define AT49BV32X 0x00C8
68#define AT49BV32XT 0x00C9 71#define AT49BV32XT 0x00C9
69 72
73/* Eon */
74#define EN29SL800BB 0x226B
75#define EN29SL800BT 0x22EA
76
70/* Fujitsu */ 77/* Fujitsu */
71#define MBM29F040C 0x00A4 78#define MBM29F040C 0x00A4
72#define MBM29F800BA 0x2258 79#define MBM29F800BA 0x2258
@@ -141,6 +148,8 @@
141#define M50FW080 0x002D 148#define M50FW080 0x002D
142#define M50FW016 0x002E 149#define M50FW016 0x002E
143#define M50LPW080 0x002F 150#define M50LPW080 0x002F
151#define M50FLW080A 0x0080
152#define M50FLW080B 0x0081
144 153
145/* SST */ 154/* SST */
146#define SST29EE020 0x0010 155#define SST29EE020 0x0010
@@ -191,6 +200,7 @@ enum uaddr {
191 MTD_UADDR_0x0555_0x0AAA, 200 MTD_UADDR_0x0555_0x0AAA,
192 MTD_UADDR_0x5555_0x2AAA, 201 MTD_UADDR_0x5555_0x2AAA,
193 MTD_UADDR_0x0AAA_0x0555, 202 MTD_UADDR_0x0AAA_0x0555,
203 MTD_UADDR_0xAAAA_0x5555,
194 MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */ 204 MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
195 MTD_UADDR_UNNECESSARY, /* Does not require any address */ 205 MTD_UADDR_UNNECESSARY, /* Does not require any address */
196}; 206};
@@ -238,6 +248,11 @@ static const struct unlock_addr unlock_addrs[] = {
238 .addr2 = 0x0555 248 .addr2 = 0x0555
239 }, 249 },
240 250
251 [MTD_UADDR_0xAAAA_0x5555] = {
252 .addr1 = 0xaaaa,
253 .addr2 = 0x5555
254 },
255
241 [MTD_UADDR_DONT_CARE] = { 256 [MTD_UADDR_DONT_CARE] = {
242 .addr1 = 0x0000, /* Doesn't matter which address */ 257 .addr1 = 0x0000, /* Doesn't matter which address */
243 .addr2 = 0x0000 /* is used - must be last entry */ 258 .addr2 = 0x0000 /* is used - must be last entry */
@@ -522,6 +537,36 @@ static const struct amd_flash_info jedec_table[] = {
522 ERASEINFO(0x04000,1), 537 ERASEINFO(0x04000,1),
523 } 538 }
524 }, { 539 }, {
540 .mfr_id = MANUFACTURER_AMD,
541 .dev_id = AM29SL800DT,
542 .name = "AMD AM29SL800DT",
543 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
544 .uaddr = MTD_UADDR_0x0AAA_0x0555,
545 .dev_size = SIZE_1MiB,
546 .cmd_set = P_ID_AMD_STD,
547 .nr_regions = 4,
548 .regions = {
549 ERASEINFO(0x10000,15),
550 ERASEINFO(0x08000,1),
551 ERASEINFO(0x02000,2),
552 ERASEINFO(0x04000,1),
553 }
554 }, {
555 .mfr_id = MANUFACTURER_AMD,
556 .dev_id = AM29SL800DB,
557 .name = "AMD AM29SL800DB",
558 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
559 .uaddr = MTD_UADDR_0x0AAA_0x0555,
560 .dev_size = SIZE_1MiB,
561 .cmd_set = P_ID_AMD_STD,
562 .nr_regions = 4,
563 .regions = {
564 ERASEINFO(0x04000,1),
565 ERASEINFO(0x02000,2),
566 ERASEINFO(0x08000,1),
567 ERASEINFO(0x10000,15),
568 }
569 }, {
525 .mfr_id = MANUFACTURER_ATMEL, 570 .mfr_id = MANUFACTURER_ATMEL,
526 .dev_id = AT49BV512, 571 .dev_id = AT49BV512,
527 .name = "Atmel AT49BV512", 572 .name = "Atmel AT49BV512",
@@ -599,6 +644,36 @@ static const struct amd_flash_info jedec_table[] = {
599 ERASEINFO(0x02000,8) 644 ERASEINFO(0x02000,8)
600 } 645 }
601 }, { 646 }, {
647 .mfr_id = MANUFACTURER_EON,
648 .dev_id = EN29SL800BT,
649 .name = "Eon EN29SL800BT",
650 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
651 .uaddr = MTD_UADDR_0x0AAA_0x0555,
652 .dev_size = SIZE_1MiB,
653 .cmd_set = P_ID_AMD_STD,
654 .nr_regions = 4,
655 .regions = {
656 ERASEINFO(0x10000,15),
657 ERASEINFO(0x08000,1),
658 ERASEINFO(0x02000,2),
659 ERASEINFO(0x04000,1),
660 }
661 }, {
662 .mfr_id = MANUFACTURER_EON,
663 .dev_id = EN29SL800BB,
664 .name = "Eon EN29SL800BB",
665 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
666 .uaddr = MTD_UADDR_0x0AAA_0x0555,
667 .dev_size = SIZE_1MiB,
668 .cmd_set = P_ID_AMD_STD,
669 .nr_regions = 4,
670 .regions = {
671 ERASEINFO(0x04000,1),
672 ERASEINFO(0x02000,2),
673 ERASEINFO(0x08000,1),
674 ERASEINFO(0x10000,15),
675 }
676 }, {
602 .mfr_id = MANUFACTURER_FUJITSU, 677 .mfr_id = MANUFACTURER_FUJITSU,
603 .dev_id = MBM29F040C, 678 .dev_id = MBM29F040C,
604 .name = "Fujitsu MBM29F040C", 679 .name = "Fujitsu MBM29F040C",
@@ -1392,8 +1467,8 @@ static const struct amd_flash_info jedec_table[] = {
1392 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1467 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1393 .dev_id = SST39LF160, 1468 .dev_id = SST39LF160,
1394 .name = "SST 39LF160", 1469 .name = "SST 39LF160",
1395 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1470 .devtypes = CFI_DEVICETYPE_X16,
1396 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1471 .uaddr = MTD_UADDR_0xAAAA_0x5555,
1397 .dev_size = SIZE_2MiB, 1472 .dev_size = SIZE_2MiB,
1398 .cmd_set = P_ID_AMD_STD, 1473 .cmd_set = P_ID_AMD_STD,
1399 .nr_regions = 2, 1474 .nr_regions = 2,
@@ -1405,8 +1480,8 @@ static const struct amd_flash_info jedec_table[] = {
1405 .mfr_id = MANUFACTURER_SST, /* should be CFI */ 1480 .mfr_id = MANUFACTURER_SST, /* should be CFI */
1406 .dev_id = SST39VF1601, 1481 .dev_id = SST39VF1601,
1407 .name = "SST 39VF1601", 1482 .name = "SST 39VF1601",
1408 .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, 1483 .devtypes = CFI_DEVICETYPE_X16,
1409 .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */ 1484 .uaddr = MTD_UADDR_0xAAAA_0x5555,
1410 .dev_size = SIZE_2MiB, 1485 .dev_size = SIZE_2MiB,
1411 .cmd_set = P_ID_AMD_STD, 1486 .cmd_set = P_ID_AMD_STD,
1412 .nr_regions = 2, 1487 .nr_regions = 2,
@@ -1590,6 +1665,36 @@ static const struct amd_flash_info jedec_table[] = {
1590 .nr_regions = 1, 1665 .nr_regions = 1,
1591 .regions = { 1666 .regions = {
1592 ERASEINFO(0x10000,16), 1667 ERASEINFO(0x10000,16),
1668 },
1669 }, {
1670 .mfr_id = MANUFACTURER_ST,
1671 .dev_id = M50FLW080A,
1672 .name = "ST M50FLW080A",
1673 .devtypes = CFI_DEVICETYPE_X8,
1674 .uaddr = MTD_UADDR_UNNECESSARY,
1675 .dev_size = SIZE_1MiB,
1676 .cmd_set = P_ID_INTEL_EXT,
1677 .nr_regions = 4,
1678 .regions = {
1679 ERASEINFO(0x1000,16),
1680 ERASEINFO(0x10000,13),
1681 ERASEINFO(0x1000,16),
1682 ERASEINFO(0x1000,16),
1683 }
1684 }, {
1685 .mfr_id = MANUFACTURER_ST,
1686 .dev_id = M50FLW080B,
1687 .name = "ST M50FLW080B",
1688 .devtypes = CFI_DEVICETYPE_X8,
1689 .uaddr = MTD_UADDR_UNNECESSARY,
1690 .dev_size = SIZE_1MiB,
1691 .cmd_set = P_ID_INTEL_EXT,
1692 .nr_regions = 4,
1693 .regions = {
1694 ERASEINFO(0x1000,16),
1695 ERASEINFO(0x1000,16),
1696 ERASEINFO(0x10000,13),
1697 ERASEINFO(0x1000,16),
1593 } 1698 }
1594 }, { 1699 }, {
1595 .mfr_id = MANUFACTURER_TOSHIBA, 1700 .mfr_id = MANUFACTURER_TOSHIBA,
@@ -1696,9 +1801,21 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
1696{ 1801{
1697 map_word result; 1802 map_word result;
1698 unsigned long mask; 1803 unsigned long mask;
1699 u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type); 1804 int bank = 0;
1700 mask = (1 << (cfi->device_type * 8)) -1; 1805
1701 result = map_read(map, base + ofs); 1806 /* According to JEDEC "Standard Manufacturer's Identification Code"
1807 * (http://www.jedec.org/download/search/jep106W.pdf)
1808 * several first banks can contain 0x7f instead of actual ID
1809 */
1810 do {
1811 uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8),
1812 cfi_interleave(cfi),
1813 cfi->device_type);
1814 mask = (1 << (cfi->device_type * 8)) - 1;
1815 result = map_read(map, base + ofs);
1816 bank++;
1817 } while ((result.x[0] & mask) == CONTINUATION_CODE);
1818
1702 return result.x[0] & mask; 1819 return result.x[0] & mask;
1703} 1820}
1704 1821
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index fc478c0f93f5..494d30d0631a 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Common code to handle absent "placeholder" devices 2 * Common code to handle absent "placeholder" devices
3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com> 3 * Copyright 2001 Resilience Corporation <ebrower@resilience.com>
4 * $Id: map_absent.c,v 1.6 2005/11/07 11:14:23 gleixner Exp $
5 * 4 *
6 * This map driver is used to allocate "placeholder" MTD 5 * This map driver is used to allocate "placeholder" MTD
7 * devices on systems that have socketed/removable media. 6 * devices on systems that have socketed/removable media.
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 5cb6d5263661..072dd8abf33a 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Common code to handle map devices which are simple RAM 2 * Common code to handle map devices which are simple RAM
3 * (C) 2000 Red Hat. GPL'd. 3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $
5 */ 4 */
6 5
7#include <linux/module.h> 6#include <linux/module.h>
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index cb27f855074c..821d0ed6bae3 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Common code to handle map devices which are simple ROM 2 * Common code to handle map devices which are simple ROM
3 * (C) 2000 Red Hat. GPL'd. 3 * (C) 2000 Red Hat. GPL'd.
4 * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $
5 */ 4 */
6 5
7#include <linux/module.h> 6#include <linux/module.h>
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index e472a0e9de9d..71bc07f149b7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: cmdlinepart.c,v 1.19 2005/11/07 11:14:19 gleixner Exp $
3 *
4 * Read flash partition table from command line 2 * Read flash partition table from command line
5 * 3 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH 4 * Copyright 2002 SYSGO Real-Time Solutions GmbH
@@ -308,7 +306,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
308 unsigned long offset; 306 unsigned long offset;
309 int i; 307 int i;
310 struct cmdline_mtd_partition *part; 308 struct cmdline_mtd_partition *part;
311 char *mtd_id = master->name; 309 const char *mtd_id = master->name;
312 310
313 /* parse command line */ 311 /* parse command line */
314 if (!cmdline_parsed) 312 if (!cmdline_parsed)
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35ed1103dbb2..9c613f06623c 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -1,5 +1,4 @@
1# drivers/mtd/maps/Kconfig 1# drivers/mtd/maps/Kconfig
2# $Id: Kconfig,v 1.18 2005/11/07 11:14:24 gleixner Exp $
3 2
4menu "Self-contained MTD device drivers" 3menu "Self-contained MTD device drivers"
5 depends on MTD!=n 4 depends on MTD!=n
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0f788d5c4bf8..0993d5cf3923 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -1,7 +1,6 @@
1# 1#
2# linux/drivers/devices/Makefile 2# linux/drivers/devices/Makefile
3# 3#
4# $Id: Makefile.common,v 1.7 2004/12/22 17:51:15 joern Exp $
5 4
6obj-$(CONFIG_MTD_DOC2000) += doc2000.o 5obj-$(CONFIG_MTD_DOC2000) += doc2000.o
7obj-$(CONFIG_MTD_DOC2001) += doc2001.o 6obj-$(CONFIG_MTD_DOC2001) += doc2001.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 7b72a1b36115..91fbba767635 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: block2mtd.c,v 1.30 2005/11/29 14:48:32 gleixner Exp $
3 *
4 * block2mtd.c - create an mtd from a block device 2 * block2mtd.c - create an mtd from a block device
5 * 3 *
6 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk> 4 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
@@ -20,9 +18,6 @@
20#include <linux/mutex.h> 18#include <linux/mutex.h>
21#include <linux/mount.h> 19#include <linux/mount.h>
22 20
23#define VERSION "$Revision: 1.30 $"
24
25
26#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args) 21#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
27#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args) 22#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
28 23
@@ -453,7 +448,6 @@ MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
453static int __init block2mtd_init(void) 448static int __init block2mtd_init(void)
454{ 449{
455 int ret = 0; 450 int ret = 0;
456 INFO("version " VERSION);
457 451
458#ifndef MODULE 452#ifndef MODULE
459 if (strlen(block2mtd_paramline)) 453 if (strlen(block2mtd_paramline))
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 846989f292e3..50de839c77a9 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -3,8 +3,6 @@
3 * Linux driver for Disk-On-Chip 2000 and Millennium 3 * Linux driver for Disk-On-Chip 2000 and Millennium
4 * (c) 1999 Machine Vision Holdings, Inc. 4 * (c) 1999 Machine Vision Holdings, Inc.
5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org> 5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
6 *
7 * $Id: doc2000.c,v 1.67 2005/11/07 11:14:24 gleixner Exp $
8 */ 6 */
9 7
10#include <linux/kernel.h> 8#include <linux/kernel.h>
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 6413efc045e0..e32c568c1145 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -3,8 +3,6 @@
3 * Linux driver for Disk-On-Chip Millennium 3 * Linux driver for Disk-On-Chip Millennium
4 * (c) 1999 Machine Vision Holdings, Inc. 4 * (c) 1999 Machine Vision Holdings, Inc.
5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org> 5 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
6 *
7 * $Id: doc2001.c,v 1.49 2005/11/07 11:14:24 gleixner Exp $
8 */ 6 */
9 7
10#include <linux/kernel.h> 8#include <linux/kernel.h>
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 83be3461658f..d853f891b586 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -6,8 +6,6 @@
6 * (c) 1999 Machine Vision Holdings, Inc. 6 * (c) 1999 Machine Vision Holdings, Inc.
7 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org> 7 * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
8 * 8 *
9 * $Id: doc2001plus.c,v 1.14 2005/11/07 11:14:24 gleixner Exp $
10 *
11 * Released under GPL 9 * Released under GPL
12 */ 10 */
13 11
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index fd8a8daba3a8..874e51b110a2 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -7,8 +7,6 @@
7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 7 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
8 * Copyright (C) 2000 Netgem S.A. 8 * Copyright (C) 2000 Netgem S.A.
9 * 9 *
10 * $Id: docecc.c,v 1.7 2005/11/07 11:14:25 gleixner Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index d8cc94ec4e50..6e62922942b1 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -4,9 +4,6 @@
4/* (C) 1999 Machine Vision Holdings, Inc. */ 4/* (C) 1999 Machine Vision Holdings, Inc. */
5/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */ 5/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
6 6
7/* $Id: docprobe.c,v 1.46 2005/11/07 11:14:25 gleixner Exp $ */
8
9
10 7
11/* DOC_PASSIVE_PROBE: 8/* DOC_PASSIVE_PROBE:
12 In order to ensure that the BIOS checksum is correct at boot time, and 9 In order to ensure that the BIOS checksum is correct at boot time, and
@@ -79,8 +76,6 @@ static unsigned long __initdata doc_locations[] = {
79 0xe0000, 0xe2000, 0xe4000, 0xe6000, 76 0xe0000, 0xe2000, 0xe4000, 0xe6000,
80 0xe8000, 0xea000, 0xec000, 0xee000, 77 0xe8000, 0xea000, 0xec000, 0xee000,
81#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 78#endif /* CONFIG_MTD_DOCPROBE_HIGH */
82#elif defined(__PPC__)
83 0xe4000000,
84#else 79#else
85#warning Unknown architecture for DiskOnChip. No default probe locations defined 80#warning Unknown architecture for DiskOnChip. No default probe locations defined
86#endif 81#endif
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 1d324e5c412d..f4bda4cee495 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -2,8 +2,6 @@
2/* 2/*
3 * MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART. 3 * MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
4 * 4 *
5 * $Id: lart.c,v 1.9 2005/11/07 11:14:25 gleixner Exp $
6 *
7 * Author: Abraham vd Merwe <abraham@2d3d.co.za> 5 * Author: Abraham vd Merwe <abraham@2d3d.co.za>
8 * 6 *
9 * Copyright (c) 2001, 2d3D, Inc. 7 * Copyright (c) 2001, 2d3D, Inc.
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index b402269301f6..b35c3333e210 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -33,6 +33,7 @@
33/* Flash opcodes. */ 33/* Flash opcodes. */
34#define OPCODE_WREN 0x06 /* Write enable */ 34#define OPCODE_WREN 0x06 /* Write enable */
35#define OPCODE_RDSR 0x05 /* Read status register */ 35#define OPCODE_RDSR 0x05 /* Read status register */
36#define OPCODE_WRSR 0x01 /* Write status register 1 byte */
36#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ 37#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
37#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ 38#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
38#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ 39#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
@@ -112,6 +113,17 @@ static int read_sr(struct m25p *flash)
112 return val; 113 return val;
113} 114}
114 115
116/*
117 * Write status register 1 byte
118 * Returns negative if error occurred.
119 */
120static int write_sr(struct m25p *flash, u8 val)
121{
122 flash->command[0] = OPCODE_WRSR;
123 flash->command[1] = val;
124
125 return spi_write(flash->spi, flash->command, 2);
126}
115 127
116/* 128/*
117 * Set write enable latch with Write Enable command. 129 * Set write enable latch with Write Enable command.
@@ -589,6 +601,16 @@ static int __devinit m25p_probe(struct spi_device *spi)
589 mutex_init(&flash->lock); 601 mutex_init(&flash->lock);
590 dev_set_drvdata(&spi->dev, flash); 602 dev_set_drvdata(&spi->dev, flash);
591 603
604 /*
605 * Atmel serial flash tend to power up
606 * with the software protection bits set
607 */
608
609 if (info->jedec_id >> 16 == 0x1f) {
610 write_enable(flash);
611 write_sr(flash, 0);
612 }
613
592 if (data && data->name) 614 if (data && data->name)
593 flash->mtd.name = data->name; 615 flash->mtd.name = data->name;
594 else 616 else
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 9cff119a2024..6a9a24a80a6d 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -5,8 +5,6 @@
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version. 7 * 2 of the License, or (at your option) any later version.
8 *
9 * $Id: ms02-nv.c,v 1.11 2005/11/14 13:41:47 macro Exp $
10 */ 8 */
11 9
12#include <linux/init.h> 10#include <linux/init.h>
diff --git a/drivers/mtd/devices/ms02-nv.h b/drivers/mtd/devices/ms02-nv.h
index 8a6eef7cfee3..04deafd3a771 100644
--- a/drivers/mtd/devices/ms02-nv.h
+++ b/drivers/mtd/devices/ms02-nv.h
@@ -9,8 +9,6 @@
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 *
13 * $Id: ms02-nv.h,v 1.3 2003/08/19 09:25:36 dwmw2 Exp $
14 */ 12 */
15 13
16#include <linux/ioport.h> 14#include <linux/ioport.h>
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index b35e4813a3a5..54e36bfc2c3b 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -82,7 +82,7 @@
82 82
83 83
84struct dataflash { 84struct dataflash {
85 u8 command[4]; 85 uint8_t command[4];
86 char name[24]; 86 char name[24];
87 87
88 unsigned partitioned:1; 88 unsigned partitioned:1;
@@ -150,7 +150,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
150 struct spi_transfer x = { .tx_dma = 0, }; 150 struct spi_transfer x = { .tx_dma = 0, };
151 struct spi_message msg; 151 struct spi_message msg;
152 unsigned blocksize = priv->page_size << 3; 152 unsigned blocksize = priv->page_size << 3;
153 u8 *command; 153 uint8_t *command;
154 154
155 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", 155 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
156 spi->dev.bus_id, 156 spi->dev.bus_id,
@@ -182,8 +182,8 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
182 pageaddr = pageaddr << priv->page_offset; 182 pageaddr = pageaddr << priv->page_offset;
183 183
184 command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE; 184 command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
185 command[1] = (u8)(pageaddr >> 16); 185 command[1] = (uint8_t)(pageaddr >> 16);
186 command[2] = (u8)(pageaddr >> 8); 186 command[2] = (uint8_t)(pageaddr >> 8);
187 command[3] = 0; 187 command[3] = 0;
188 188
189 DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n", 189 DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
@@ -234,7 +234,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
234 struct spi_transfer x[2] = { { .tx_dma = 0, }, }; 234 struct spi_transfer x[2] = { { .tx_dma = 0, }, };
235 struct spi_message msg; 235 struct spi_message msg;
236 unsigned int addr; 236 unsigned int addr;
237 u8 *command; 237 uint8_t *command;
238 int status; 238 int status;
239 239
240 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", 240 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
@@ -274,9 +274,9 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
274 * fewer "don't care" bytes. Both buffers stay unchanged. 274 * fewer "don't care" bytes. Both buffers stay unchanged.
275 */ 275 */
276 command[0] = OP_READ_CONTINUOUS; 276 command[0] = OP_READ_CONTINUOUS;
277 command[1] = (u8)(addr >> 16); 277 command[1] = (uint8_t)(addr >> 16);
278 command[2] = (u8)(addr >> 8); 278 command[2] = (uint8_t)(addr >> 8);
279 command[3] = (u8)(addr >> 0); 279 command[3] = (uint8_t)(addr >> 0);
280 /* plus 4 "don't care" bytes */ 280 /* plus 4 "don't care" bytes */
281 281
282 status = spi_sync(priv->spi, &msg); 282 status = spi_sync(priv->spi, &msg);
@@ -311,7 +311,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
311 size_t remaining = len; 311 size_t remaining = len;
312 u_char *writebuf = (u_char *) buf; 312 u_char *writebuf = (u_char *) buf;
313 int status = -EINVAL; 313 int status = -EINVAL;
314 u8 *command; 314 uint8_t *command;
315 315
316 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", 316 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
317 spi->dev.bus_id, (unsigned)to, (unsigned)(to + len)); 317 spi->dev.bus_id, (unsigned)to, (unsigned)(to + len));
@@ -487,7 +487,9 @@ add_dataflash(struct spi_device *spi, char *name,
487 device->write = dataflash_write; 487 device->write = dataflash_write;
488 device->priv = priv; 488 device->priv = priv;
489 489
490 dev_info(&spi->dev, "%s (%d KBytes)\n", name, device->size/1024); 490 dev_info(&spi->dev, "%s (%d KBytes) pagesize %d bytes, "
491 "erasesize %d bytes\n", name, device->size/1024,
492 pagesize, pagesize * 8); /* 8 pages = 1 block */
491 dev_set_drvdata(&spi->dev, priv); 493 dev_set_drvdata(&spi->dev, priv);
492 494
493 if (mtd_has_partitions()) { 495 if (mtd_has_partitions()) {
@@ -521,7 +523,7 @@ add_dataflash(struct spi_device *spi, char *name,
521 * 523 *
522 * Device Density ID code #Pages PageSize Offset 524 * Device Density ID code #Pages PageSize Offset
523 * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9 525 * AT45DB011B 1Mbit (128K) xx0011xx (0x0c) 512 264 9
524 * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1025 264 9 526 * AT45DB021B 2Mbit (256K) xx0101xx (0x14) 1024 264 9
525 * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9 527 * AT45DB041B 4Mbit (512K) xx0111xx (0x1c) 2048 264 9
526 * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9 528 * AT45DB081B 8Mbit (1M) xx1001xx (0x24) 4096 264 9
527 * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10 529 * AT45DB0161B 16Mbit (2M) xx1011xx (0x2c) 4096 528 10
@@ -529,9 +531,114 @@ add_dataflash(struct spi_device *spi, char *name,
529 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11 531 * AT45DB0642 64Mbit (8M) xx111xxx (0x3c) 8192 1056 11
530 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11 532 * AT45DB1282 128Mbit (16M) xx0100xx (0x10) 16384 1056 11
531 */ 533 */
534
535struct flash_info {
536 char *name;
537
538 /* JEDEC id zero means "no ID" (most older chips); otherwise it has
539 * a high byte of zero plus three data bytes: the manufacturer id,
540 * then a two byte device id.
541 */
542 uint32_t jedec_id;
543
544 /* The size listed here is what works with OPCODE_SE, which isn't
545 * necessarily called a "sector" by the vendor.
546 */
547 unsigned nr_pages;
548 uint16_t pagesize;
549 uint16_t pageoffset;
550
551 uint16_t flags;
552#define SUP_POW2PS 0x02
553#define IS_POW2PS 0x01
554};
555
556static struct flash_info __devinitdata dataflash_data [] = {
557
558 { "at45db011d", 0x1f2200, 512, 264, 9, SUP_POW2PS},
559 { "at45db011d", 0x1f2200, 512, 256, 8, SUP_POW2PS | IS_POW2PS},
560
561 { "at45db021d", 0x1f2300, 1024, 264, 9, SUP_POW2PS},
562 { "at45db021d", 0x1f2300, 1024, 256, 8, SUP_POW2PS | IS_POW2PS},
563
564 { "at45db041d", 0x1f2400, 2048, 264, 9, SUP_POW2PS},
565 { "at45db041d", 0x1f2400, 2048, 256, 8, SUP_POW2PS | IS_POW2PS},
566
567 { "at45db081d", 0x1f2500, 4096, 264, 9, SUP_POW2PS},
568 { "at45db081d", 0x1f2500, 4096, 256, 8, SUP_POW2PS | IS_POW2PS},
569
570 { "at45db161d", 0x1f2600, 4096, 528, 10, SUP_POW2PS},
571 { "at45db161d", 0x1f2600, 4096, 512, 9, SUP_POW2PS | IS_POW2PS},
572
573 { "at45db321c", 0x1f2700, 8192, 528, 10, },
574
575 { "at45db321d", 0x1f2701, 8192, 528, 10, SUP_POW2PS},
576 { "at45db321d", 0x1f2701, 8192, 512, 9, SUP_POW2PS | IS_POW2PS},
577
578 { "at45db641d", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
579 { "at45db641d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
580};
581
582static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
583{
584 int tmp;
585 uint8_t code = OP_READ_ID;
586 uint8_t id[3];
587 uint32_t jedec;
588 struct flash_info *info;
589 int status;
590
591
592 /* JEDEC also defines an optional "extended device information"
593 * string for after vendor-specific data, after the three bytes
594 * we use here. Supporting some chips might require using it.
595 */
596 tmp = spi_write_then_read(spi, &code, 1, id, 3);
597 if (tmp < 0) {
598 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
599 spi->dev.bus_id, tmp);
600 return NULL;
601 }
602 jedec = id[0];
603 jedec = jedec << 8;
604 jedec |= id[1];
605 jedec = jedec << 8;
606 jedec |= id[2];
607
608 for (tmp = 0, info = dataflash_data;
609 tmp < ARRAY_SIZE(dataflash_data);
610 tmp++, info++) {
611 if (info->jedec_id == jedec) {
612 if (info->flags & SUP_POW2PS) {
613 status = dataflash_status(spi);
614 if (status & 0x1)
615 /* return power of 2 pagesize */
616 return ++info;
617 else
618 return info;
619 }
620 }
621 }
622 return NULL;
623}
624
532static int __devinit dataflash_probe(struct spi_device *spi) 625static int __devinit dataflash_probe(struct spi_device *spi)
533{ 626{
534 int status; 627 int status;
628 struct flash_info *info;
629
630 /*
631 * Try to detect dataflash by JEDEC ID.
632 * If it succeeds we know we have either a C or D part.
633 * D will support power of 2 pagesize option.
634 */
635
636 info = jedec_probe(spi);
637
638 if (info != NULL)
639 return add_dataflash(spi, info->name, info->nr_pages,
640 info->pagesize, info->pageoffset);
641
535 642
536 status = dataflash_status(spi); 643 status = dataflash_status(spi);
537 if (status <= 0 || status == 0xff) { 644 if (status <= 0 || status == 0xff) {
@@ -551,16 +658,16 @@ static int __devinit dataflash_probe(struct spi_device *spi)
551 status = add_dataflash(spi, "AT45DB011B", 512, 264, 9); 658 status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
552 break; 659 break;
553 case 0x14: /* 0 1 0 1 x x */ 660 case 0x14: /* 0 1 0 1 x x */
554 status = add_dataflash(spi, "AT45DB021B", 1025, 264, 9); 661 status = add_dataflash(spi, "AT45DB021B", 1024, 264, 9);
555 break; 662 break;
556 case 0x1c: /* 0 1 1 1 x x */ 663 case 0x1c: /* 0 1 1 1 x x */
557 status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9); 664 status = add_dataflash(spi, "AT45DB041B", 2048, 264, 9);
558 break; 665 break;
559 case 0x24: /* 1 0 0 1 x x */ 666 case 0x24: /* 1 0 0 1 x x */
560 status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9); 667 status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
561 break; 668 break;
562 case 0x2c: /* 1 0 1 1 x x */ 669 case 0x2c: /* 1 0 1 1 x x */
563 status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10); 670 status = add_dataflash(spi, "AT45DB161B", 4096, 528, 10);
564 break; 671 break;
565 case 0x34: /* 1 1 0 1 x x */ 672 case 0x34: /* 1 1 0 1 x x */
566 status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10); 673 status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 0399be178620..3aaca88847d3 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -1,6 +1,5 @@
1/* 1/*
2 * mtdram - a test mtd device 2 * mtdram - a test mtd device
3 * $Id: mtdram.c,v 1.37 2005/04/21 03:42:11 joern Exp $
4 * Author: Alexander Larsson <alex@cendio.se> 3 * Author: Alexander Larsson <alex@cendio.se>
5 * 4 *
6 * Copyright (c) 1999 Alexander Larsson <alex@cendio.se> 5 * Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index c7987b1c5e01..088fbb7595b5 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -1,6 +1,4 @@
1/** 1/**
2 * $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
3 *
4 * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de> 2 * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
5 * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de> 3 * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
6 * 4 *
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index bc9981749064..d38bca64bb15 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: pmc551.c,v 1.32 2005/11/07 11:14:25 gleixner Exp $
3 *
4 * PMC551 PCI Mezzanine Ram Device 2 * PMC551 PCI Mezzanine Ram Device
5 * 3 *
6 * Author: 4 * Author:
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index cb86db746f28..a425d09f35a0 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -1,7 +1,5 @@
1/*====================================================================== 1/*======================================================================
2 2
3 $Id: slram.c,v 1.36 2005/11/07 11:14:25 gleixner Exp $
4
5 This driver provides a method to access memory not used by the kernel 3 This driver provides a method to access memory not used by the kernel
6 itself (i.e. if the kernel commandline mem=xxx is used). To actually 4 itself (i.e. if the kernel commandline mem=xxx is used). To actually
7 use slram at least mtdblock or mtdchar is required (for block or 5 use slram at least mtdblock or mtdchar is required (for block or
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 5c29872184e6..f34f20c78911 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -1,5 +1,4 @@
1/* This version ported to the Linux-MTD system by dwmw2@infradead.org 1/* This version ported to the Linux-MTD system by dwmw2@infradead.org
2 * $Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $
3 * 2 *
4 * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> 3 * Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5 * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups 4 * - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
@@ -1078,8 +1077,6 @@ static struct mtd_blktrans_ops ftl_tr = {
1078 1077
1079static int init_ftl(void) 1078static int init_ftl(void)
1080{ 1079{
1081 DEBUG(0, "$Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $\n");
1082
1083 return register_mtd_blktrans(&ftl_tr); 1080 return register_mtd_blktrans(&ftl_tr);
1084} 1081}
1085 1082
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index b0e396504e67..c4f9d3378b24 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -7,8 +7,6 @@
7 * (c) 1999 Machine Vision Holdings, Inc. 7 * (c) 1999 Machine Vision Holdings, Inc.
8 * Author: David Woodhouse <dwmw2@infradead.org> 8 * Author: David Woodhouse <dwmw2@infradead.org>
9 * 9 *
10 * $Id: inftlcore.c,v 1.19 2005/11/07 11:14:20 gleixner Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or 12 * the Free Software Foundation; either version 2 of the License, or
@@ -953,9 +951,6 @@ static struct mtd_blktrans_ops inftl_tr = {
953 951
954static int __init init_inftl(void) 952static int __init init_inftl(void)
955{ 953{
956 printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.19 $, "
957 "inftlmount.c %s\n", inftlmountrev);
958
959 return register_mtd_blktrans(&inftl_tr); 954 return register_mtd_blktrans(&inftl_tr);
960} 955}
961 956
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index c551d2f0779c..9113628ed1ef 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -8,8 +8,6 @@
8 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 8 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
9 * Copyright (C) 2000 Netgem S.A. 9 * Copyright (C) 2000 Netgem S.A.
10 * 10 *
11 * $Id: inftlmount.c,v 1.18 2005/11/07 11:14:20 gleixner Exp $
12 *
13 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by 12 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or 13 * the Free Software Foundation; either version 2 of the License, or
@@ -39,8 +37,6 @@
39#include <linux/mtd/inftl.h> 37#include <linux/mtd/inftl.h>
40#include <linux/mtd/compatmac.h> 38#include <linux/mtd/compatmac.h>
41 39
42char inftlmountrev[]="$Revision: 1.18 $";
43
44/* 40/*
45 * find_boot_record: Find the INFTL Media Header and its Spare copy which 41 * find_boot_record: Find the INFTL Media Header and its Spare copy which
46 * contains the various device information of the INFTL partition and 42 * contains the various device information of the INFTL partition and
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index d2fbc2964523..df8e00bba07b 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -1,5 +1,4 @@
1# drivers/mtd/maps/Kconfig 1# drivers/mtd/maps/Kconfig
2# $Id: Kconfig,v 1.61 2005/11/07 11:14:26 gleixner Exp $
3 2
4menu "Mapping drivers for chip access" 3menu "Mapping drivers for chip access"
5 depends on MTD!=n 4 depends on MTD!=n
@@ -510,6 +509,17 @@ config MTD_PCMCIA_ANONYMOUS
510 509
511 If unsure, say N. 510 If unsure, say N.
512 511
512config MTD_BFIN_ASYNC
513 tristate "Blackfin BF533-STAMP Flash Chip Support"
514 depends on BFIN533_STAMP && MTD_CFI
515 select MTD_PARTITIONS
516 default y
517 help
518 Map driver which allows for simultaneous utilization of
519 ethernet and CFI parallel flash.
520
521 If compiled as a module, it will be called bfin-async-flash.
522
513config MTD_UCLINUX 523config MTD_UCLINUX
514 tristate "Generic uClinux RAM/ROM filesystem support" 524 tristate "Generic uClinux RAM/ROM filesystem support"
515 depends on MTD_PARTITIONS && !MMU 525 depends on MTD_PARTITIONS && !MMU
@@ -539,24 +549,6 @@ config MTD_DMV182
539 help 549 help
540 Map driver for Dy-4 SVME/DMV-182 board. 550 Map driver for Dy-4 SVME/DMV-182 board.
541 551
542config MTD_BAST
543 tristate "Map driver for Simtec BAST (EB2410ITX) or Thorcom VR1000"
544 depends on ARCH_BAST || MACH_VR1000
545 select MTD_PARTITIONS
546 select MTD_MAP_BANK_WIDTH_16
547 select MTD_JEDECPROBE
548 help
549 Map driver for NOR flash on the Simtec BAST (EB2410ITX), or the
550 Thorcom VR1000
551
552 Note, this driver *cannot* over-ride the WP link on the
553 board, or currently detect the state of the link.
554
555config MTD_BAST_MAXSIZE
556 int "Maximum size for BAST flash area (MiB)"
557 depends on MTD_BAST
558 default "4"
559
560config MTD_SHARP_SL 552config MTD_SHARP_SL
561 tristate "ROM mapped on Sharp SL Series" 553 tristate "ROM mapped on Sharp SL Series"
562 depends on ARCH_PXA 554 depends on ARCH_PXA
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index c6ce8673dab2..6cda6df973e5 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -1,7 +1,6 @@
1# 1#
2# linux/drivers/maps/Makefile 2# linux/drivers/maps/Makefile
3# 3#
4# $Id: Makefile.common,v 1.34 2005/11/07 11:14:26 gleixner Exp $
5 4
6ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y) 5ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
7obj-$(CONFIG_MTD) += map_funcs.o 6obj-$(CONFIG_MTD) += map_funcs.o
@@ -10,7 +9,6 @@ endif
10# Chip mappings 9# Chip mappings
11obj-$(CONFIG_MTD_CDB89712) += cdb89712.o 10obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
12obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o 11obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
13obj-$(CONFIG_MTD_BAST) += bast-flash.o
14obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o 12obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
15obj-$(CONFIG_MTD_DC21285) += dc21285.o 13obj-$(CONFIG_MTD_DC21285) += dc21285.o
16obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o 14obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
@@ -66,3 +64,4 @@ obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
66obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o 64obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
67obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o 65obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
68obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o 66obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
67obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 728aed6ad722..948b86f35ef4 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -2,7 +2,6 @@
2 * amd76xrom.c 2 * amd76xrom.c
3 * 3 *
4 * Normal mappings of chips in physical memory 4 * Normal mappings of chips in physical memory
5 * $Id: amd76xrom.c,v 1.21 2005/11/07 11:14:26 gleixner Exp $
6 */ 5 */
7 6
8#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index 7ed3424dd959..cf32267263df 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -2,8 +2,6 @@
2 * NV-RAM memory access on autcpu12 2 * NV-RAM memory access on autcpu12
3 * (C) 2002 Thomas Gleixner (gleixner@autronix.de) 3 * (C) 2002 Thomas Gleixner (gleixner@autronix.de)
4 * 4 *
5 * $Id: autcpu12-nvram.c,v 1.9 2005/11/07 11:14:26 gleixner Exp $
6 *
7 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c
deleted file mode 100644
index 1f492062f8ca..000000000000
--- a/drivers/mtd/maps/bast-flash.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/* linux/drivers/mtd/maps/bast-flash.c
2 *
3 * Copyright (c) 2004-2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Simtec Bast (EB2410ITX) NOR MTD Mapping driver
7 *
8 * Changelog:
9 * 20-Sep-2004 BJD Initial version
10 * 17-Jan-2005 BJD Add whole device if no partitions found
11 *
12 * $Id: bast-flash.c,v 1.5 2005/11/07 11:14:26 gleixner Exp $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27*/
28
29#include <linux/module.h>
30#include <linux/types.h>
31#include <linux/init.h>
32#include <linux/kernel.h>
33#include <linux/string.h>
34#include <linux/ioport.h>
35#include <linux/device.h>
36#include <linux/slab.h>
37#include <linux/platform_device.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/map.h>
40#include <linux/mtd/partitions.h>
41
42#include <asm/io.h>
43#include <asm/mach/flash.h>
44
45#include <asm/arch/map.h>
46#include <asm/arch/bast-map.h>
47#include <asm/arch/bast-cpld.h>
48
49#ifdef CONFIG_MTD_BAST_MAXSIZE
50#define AREA_MAXSIZE (CONFIG_MTD_BAST_MAXSIZE * SZ_1M)
51#else
52#define AREA_MAXSIZE (32 * SZ_1M)
53#endif
54
55#define PFX "bast-flash: "
56
57struct bast_flash_info {
58 struct mtd_info *mtd;
59 struct map_info map;
60 struct mtd_partition *partitions;
61 struct resource *area;
62};
63
64static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
65
66static void bast_flash_setrw(int to)
67{
68 unsigned int val;
69 unsigned long flags;
70
71 local_irq_save(flags);
72 val = __raw_readb(BAST_VA_CTRL3);
73
74 if (to)
75 val |= BAST_CPLD_CTRL3_ROMWEN;
76 else
77 val &= ~BAST_CPLD_CTRL3_ROMWEN;
78
79 pr_debug("new cpld ctrl3=%02x\n", val);
80
81 __raw_writeb(val, BAST_VA_CTRL3);
82 local_irq_restore(flags);
83}
84
85static int bast_flash_remove(struct platform_device *pdev)
86{
87 struct bast_flash_info *info = platform_get_drvdata(pdev);
88
89 platform_set_drvdata(pdev, NULL);
90
91 if (info == NULL)
92 return 0;
93
94 if (info->map.virt != NULL)
95 iounmap(info->map.virt);
96
97 if (info->mtd) {
98 del_mtd_partitions(info->mtd);
99 map_destroy(info->mtd);
100 }
101
102 kfree(info->partitions);
103
104 if (info->area) {
105 release_resource(info->area);
106 kfree(info->area);
107 }
108
109 kfree(info);
110
111 return 0;
112}
113
114static int bast_flash_probe(struct platform_device *pdev)
115{
116 struct bast_flash_info *info;
117 struct resource *res;
118 int err = 0;
119
120 info = kmalloc(sizeof(*info), GFP_KERNEL);
121 if (info == NULL) {
122 printk(KERN_ERR PFX "no memory for flash info\n");
123 err = -ENOMEM;
124 goto exit_error;
125 }
126
127 memzero(info, sizeof(*info));
128 platform_set_drvdata(pdev, info);
129
130 res = pdev->resource; /* assume that the flash has one resource */
131
132 info->map.phys = res->start;
133 info->map.size = res->end - res->start + 1;
134 info->map.name = pdev->dev.bus_id;
135 info->map.bankwidth = 2;
136
137 if (info->map.size > AREA_MAXSIZE)
138 info->map.size = AREA_MAXSIZE;
139
140 pr_debug("%s: area %08lx, size %ld\n", __func__,
141 info->map.phys, info->map.size);
142
143 info->area = request_mem_region(res->start, info->map.size,
144 pdev->name);
145 if (info->area == NULL) {
146 printk(KERN_ERR PFX "cannot reserve flash memory region\n");
147 err = -ENOENT;
148 goto exit_error;
149 }
150
151 info->map.virt = ioremap(res->start, info->map.size);
152 pr_debug("%s: virt at %08x\n", __func__, (int)info->map.virt);
153
154 if (info->map.virt == 0) {
155 printk(KERN_ERR PFX "failed to ioremap() region\n");
156 err = -EIO;
157 goto exit_error;
158 }
159
160 simple_map_init(&info->map);
161
162 /* enable the write to the flash area */
163
164 bast_flash_setrw(1);
165
166 /* probe for the device(s) */
167
168 info->mtd = do_map_probe("jedec_probe", &info->map);
169 if (info->mtd == NULL)
170 info->mtd = do_map_probe("cfi_probe", &info->map);
171
172 if (info->mtd == NULL) {
173 printk(KERN_ERR PFX "map_probe() failed\n");
174 err = -ENXIO;
175 goto exit_error;
176 }
177
178 /* mark ourselves as the owner */
179 info->mtd->owner = THIS_MODULE;
180
181 err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
182 if (err > 0) {
183 err = add_mtd_partitions(info->mtd, info->partitions, err);
184 if (err)
185 printk(KERN_ERR PFX "cannot add/parse partitions\n");
186 } else {
187 err = add_mtd_device(info->mtd);
188 }
189
190 if (err == 0)
191 return 0;
192
193 /* fall through to exit error */
194
195 exit_error:
196 bast_flash_remove(pdev);
197 return err;
198}
199
200static struct platform_driver bast_flash_driver = {
201 .probe = bast_flash_probe,
202 .remove = bast_flash_remove,
203 .driver = {
204 .name = "bast-nor",
205 .owner = THIS_MODULE,
206 },
207};
208
209static int __init bast_flash_init(void)
210{
211 printk("BAST NOR-Flash Driver, (c) 2004 Simtec Electronics\n");
212 return platform_driver_register(&bast_flash_driver);
213}
214
215static void __exit bast_flash_exit(void)
216{
217 platform_driver_unregister(&bast_flash_driver);
218}
219
220module_init(bast_flash_init);
221module_exit(bast_flash_exit);
222
223MODULE_LICENSE("GPL");
224MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
225MODULE_DESCRIPTION("BAST MTD Map driver");
226MODULE_ALIAS("platform:bast-nor");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
new file mode 100644
index 000000000000..6fec86aaed7e
--- /dev/null
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -0,0 +1,219 @@
1/*
2 * drivers/mtd/maps/bfin-async-flash.c
3 *
4 * Handle the case where flash memory and ethernet mac/phy are
5 * mapped onto the same async bank. The BF533-STAMP does this
6 * for example. All board-specific configuration goes in your
7 * board resources file.
8 *
9 * Copyright 2000 Nicolas Pitre <nico@cam.org>
10 * Copyright 2005-2008 Analog Devices Inc.
11 *
12 * Enter bugs at http://blackfin.uclinux.org/
13 *
14 * Licensed under the GPL-2 or later.
15 */
16
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mtd/mtd.h>
21#include <linux/mtd/map.h>
22#include <linux/mtd/partitions.h>
23#include <linux/mtd/physmap.h>
24#include <linux/platform_device.h>
25#include <linux/types.h>
26
27#include <asm/blackfin.h>
28#include <linux/gpio.h>
29#include <linux/io.h>
30#include <asm/unaligned.h>
31
32#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
33
34#define DRIVER_NAME "bfin-async-flash"
35
36struct async_state {
37 struct mtd_info *mtd;
38 struct map_info map;
39 int enet_flash_pin;
40 uint32_t flash_ambctl0, flash_ambctl1;
41 uint32_t save_ambctl0, save_ambctl1;
42 unsigned long irq_flags;
43};
44
45static void switch_to_flash(struct async_state *state)
46{
47 local_irq_save(state->irq_flags);
48
49 gpio_set_value(state->enet_flash_pin, 0);
50
51 state->save_ambctl0 = bfin_read_EBIU_AMBCTL0();
52 state->save_ambctl1 = bfin_read_EBIU_AMBCTL1();
53 bfin_write_EBIU_AMBCTL0(state->flash_ambctl0);
54 bfin_write_EBIU_AMBCTL1(state->flash_ambctl1);
55 SSYNC();
56}
57
58static void switch_back(struct async_state *state)
59{
60 bfin_write_EBIU_AMBCTL0(state->save_ambctl0);
61 bfin_write_EBIU_AMBCTL1(state->save_ambctl1);
62 SSYNC();
63
64 gpio_set_value(state->enet_flash_pin, 1);
65
66 local_irq_restore(state->irq_flags);
67}
68
69static map_word bfin_read(struct map_info *map, unsigned long ofs)
70{
71 struct async_state *state = (struct async_state *)map->map_priv_1;
72 uint16_t word;
73 map_word test;
74
75 switch_to_flash(state);
76
77 word = readw(map->virt + ofs);
78
79 switch_back(state);
80
81 test.x[0] = word;
82 return test;
83}
84
85static void bfin_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
86{
87 struct async_state *state = (struct async_state *)map->map_priv_1;
88
89 switch_to_flash(state);
90
91 memcpy(to, map->virt + from, len);
92
93 switch_back(state);
94}
95
96static void bfin_write(struct map_info *map, map_word d1, unsigned long ofs)
97{
98 struct async_state *state = (struct async_state *)map->map_priv_1;
99 uint16_t d;
100
101 d = d1.x[0];
102
103 switch_to_flash(state);
104
105 writew(d, map->virt + ofs);
106 SSYNC();
107
108 switch_back(state);
109}
110
111static void bfin_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
112{
113 struct async_state *state = (struct async_state *)map->map_priv_1;
114
115 switch_to_flash(state);
116
117 memcpy(map->virt + to, from, len);
118 SSYNC();
119
120 switch_back(state);
121}
122
123#ifdef CONFIG_MTD_PARTITIONS
124static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
125#endif
126
127static int __devinit bfin_flash_probe(struct platform_device *pdev)
128{
129 int ret;
130 struct physmap_flash_data *pdata = pdev->dev.platform_data;
131 struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132 struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
133 struct async_state *state;
134
135 state = kzalloc(sizeof(*state), GFP_KERNEL);
136 if (!state)
137 return -ENOMEM;
138
139 state->map.name = DRIVER_NAME;
140 state->map.read = bfin_read;
141 state->map.copy_from = bfin_copy_from;
142 state->map.write = bfin_write;
143 state->map.copy_to = bfin_copy_to;
144 state->map.bankwidth = pdata->width;
145 state->map.size = memory->end - memory->start + 1;
146 state->map.virt = (void __iomem *)memory->start;
147 state->map.phys = memory->start;
148 state->map.map_priv_1 = (unsigned long)state;
149 state->enet_flash_pin = platform_get_irq(pdev, 0);
150 state->flash_ambctl0 = flash_ambctl->start;
151 state->flash_ambctl1 = flash_ambctl->end;
152
153 if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) {
154 pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin);
155 return -EBUSY;
156 }
157 gpio_direction_output(state->enet_flash_pin, 1);
158
159 pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8);
160 state->mtd = do_map_probe(memory->name, &state->map);
161 if (!state->mtd)
162 return -ENXIO;
163
164#ifdef CONFIG_MTD_PARTITIONS
165 ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
166 if (ret > 0) {
167 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
168 add_mtd_partitions(state->mtd, pdata->parts, ret);
169
170 } else if (pdata->nr_parts) {
171 pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
172 add_mtd_partitions(state->mtd, pdata->parts, pdata->nr_parts);
173
174 } else
175#endif
176 {
177 pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
178 add_mtd_device(state->mtd);
179 }
180
181 platform_set_drvdata(pdev, state);
182
183 return 0;
184}
185
186static int __devexit bfin_flash_remove(struct platform_device *pdev)
187{
188 struct async_state *state = platform_get_drvdata(pdev);
189 gpio_free(state->enet_flash_pin);
190#ifdef CONFIG_MTD_PARTITIONS
191 del_mtd_partitions(state->mtd);
192#endif
193 map_destroy(state->mtd);
194 kfree(state);
195 return 0;
196}
197
198static struct platform_driver bfin_flash_driver = {
199 .probe = bfin_flash_probe,
200 .remove = __devexit_p(bfin_flash_remove),
201 .driver = {
202 .name = DRIVER_NAME,
203 },
204};
205
206static int __init bfin_flash_init(void)
207{
208 return platform_driver_register(&bfin_flash_driver);
209}
210module_init(bfin_flash_init);
211
212static void __exit bfin_flash_exit(void)
213{
214 platform_driver_unregister(&bfin_flash_driver);
215}
216module_exit(bfin_flash_exit);
217
218MODULE_LICENSE("GPL");
219MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/cdb89712.c b/drivers/mtd/maps/cdb89712.c
index 9f17bb6c5a9d..cb507da0a87d 100644
--- a/drivers/mtd/maps/cdb89712.c
+++ b/drivers/mtd/maps/cdb89712.c
@@ -1,7 +1,6 @@
1/* 1/*
2 * Flash on Cirrus CDB89712 2 * Flash on Cirrus CDB89712
3 * 3 *
4 * $Id: cdb89712.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
5 */ 4 */
6 5
7#include <linux/module.h> 6#include <linux/module.h>
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
index 629e6e2641a8..6464d487eb1a 100644
--- a/drivers/mtd/maps/ceiva.c
+++ b/drivers/mtd/maps/ceiva.c
@@ -11,7 +11,6 @@
11 * 11 *
12 * (C) 2000 Nicolas Pitre <nico@cam.org> 12 * (C) 2000 Nicolas Pitre <nico@cam.org>
13 * 13 *
14 * $Id: ceiva.c,v 1.11 2004/09/16 23:27:12 gleixner Exp $
15 */ 14 */
16 15
17#include <linux/module.h> 16#include <linux/module.h>
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index 65e5ee552010..0ecc3f6d735b 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is> 2 * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
3 * 3 *
4 * $Id: cfi_flagadm.c,v 1.15 2005/11/07 11:14:26 gleixner Exp $
5 *
6 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 5 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your 6 * Free Software Foundation; either version 2 of the License, or (at your
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index 92a9c7fac993..e115667bf1d0 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: dbox2-flash.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * D-Box 2 flash driver 2 * D-Box 2 flash driver
5 */ 3 */
6 4
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index b32bb9347d71..3aa018c092f8 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -4,8 +4,6 @@
4 * (C) 2000 Nicolas Pitre <nico@cam.org> 4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 * 5 *
6 * This code is GPL 6 * This code is GPL
7 *
8 * $Id: dc21285.c,v 1.24 2005/11/07 11:14:26 gleixner Exp $
9 */ 7 */
10#include <linux/module.h> 8#include <linux/module.h>
11#include <linux/types.h> 9#include <linux/types.h>
diff --git a/drivers/mtd/maps/dilnetpc.c b/drivers/mtd/maps/dilnetpc.c
index 1c3b34ad7325..0713e3a5a22c 100644
--- a/drivers/mtd/maps/dilnetpc.c
+++ b/drivers/mtd/maps/dilnetpc.c
@@ -14,8 +14,6 @@
14 * along with this program; if not, write to the Free Software 14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
16 * 16 *
17 * $Id: dilnetpc.c,v 1.20 2005/11/07 11:14:26 gleixner Exp $
18 *
19 * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems 17 * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
20 * featuring the AMD Elan SC410 processor. There are two variants of this 18 * featuring the AMD Elan SC410 processor. There are two variants of this
21 * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash 19 * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash
diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c
index e0558b0b2fe6..d171674eb2ed 100644
--- a/drivers/mtd/maps/dmv182.c
+++ b/drivers/mtd/maps/dmv182.c
@@ -4,8 +4,6 @@
4 * 4 *
5 * Flash map driver for the Dy4 SVME182 board 5 * Flash map driver for the Dy4 SVME182 board
6 * 6 *
7 * $Id: dmv182.c,v 1.6 2005/11/07 11:14:26 gleixner Exp $
8 *
9 * Copyright 2003-2004, TimeSys Corporation 7 * Copyright 2003-2004, TimeSys Corporation
10 * 8 *
11 * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp. 9 * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
diff --git a/drivers/mtd/maps/ebony.c b/drivers/mtd/maps/ebony.c
index 1488bb92f26f..d92b7c70d3ed 100644
--- a/drivers/mtd/maps/ebony.c
+++ b/drivers/mtd/maps/ebony.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ebony.c,v 1.16 2005/11/07 11:14:26 gleixner Exp $
3 *
4 * Mapping for Ebony user flash 2 * Mapping for Ebony user flash
5 * 3 *
6 * Matt Porter <mporter@kernel.crashing.org> 4 * Matt Porter <mporter@kernel.crashing.org>
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
index 1c5b97c89685..9433738c1664 100644
--- a/drivers/mtd/maps/edb7312.c
+++ b/drivers/mtd/maps/edb7312.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: edb7312.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Handle mapping of the NOR flash on Cogent EDB7312 boards 2 * Handle mapping of the NOR flash on Cogent EDB7312 boards
5 * 3 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH 4 * Copyright 2002 SYSGO Real-Time Solutions GmbH
diff --git a/drivers/mtd/maps/fortunet.c b/drivers/mtd/maps/fortunet.c
index 7c50c271651c..a8e3fde4cbd5 100644
--- a/drivers/mtd/maps/fortunet.c
+++ b/drivers/mtd/maps/fortunet.c
@@ -1,6 +1,5 @@
1/* fortunet.c memory map 1/* fortunet.c memory map
2 * 2 *
3 * $Id: fortunet.c,v 1.11 2005/11/07 11:14:27 gleixner Exp $
4 */ 3 */
5 4
6#include <linux/module.h> 5#include <linux/module.h>
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 6dde3182d64a..ef8915474462 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -2,8 +2,6 @@
2 * Flash memory access on Hynix GMS30C7201/HMS30C7202 based 2 * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
3 * evaluation boards 3 * evaluation boards
4 * 4 *
5 * $Id: h720x-flash.c,v 1.12 2005/11/07 11:14:27 gleixner Exp $
6 *
7 * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com> 5 * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
8 * 2003 Thomas Gleixner <tglx@linutronix.de> 6 * 2003 Thomas Gleixner <tglx@linutronix.de>
9 */ 7 */
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 2c884c49e84a..aeb6c916e23f 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -2,7 +2,6 @@
2 * ichxrom.c 2 * ichxrom.c
3 * 3 *
4 * Normal mappings of chips in physical memory 4 * Normal mappings of chips in physical memory
5 * $Id: ichxrom.c,v 1.19 2005/11/07 11:14:27 gleixner Exp $
6 */ 5 */
7 6
8#include <linux/module.h> 7#include <linux/module.h>
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index a0b4dc7155dc..2682ab51a367 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: impa7.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Handle mapping of the NOR flash on implementa A7 boards 2 * Handle mapping of the NOR flash on implementa A7 boards
5 * 3 *
6 * Copyright 2002 SYSGO Real-Time Solutions GmbH 4 * Copyright 2002 SYSGO Real-Time Solutions GmbH
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 325c8880c437..ee361aaadb1e 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -22,8 +22,6 @@
22 This is access code for flashes using ARM's flash partitioning 22 This is access code for flashes using ARM's flash partitioning
23 standards. 23 standards.
24 24
25 $Id: integrator-flash.c,v 1.20 2005/11/07 11:14:27 gleixner Exp $
26
27======================================================================*/ 25======================================================================*/
28 26
29#include <linux/module.h> 27#include <linux/module.h>
diff --git a/drivers/mtd/maps/ipaq-flash.c b/drivers/mtd/maps/ipaq-flash.c
index f27c132794c3..a806119797e0 100644
--- a/drivers/mtd/maps/ipaq-flash.c
+++ b/drivers/mtd/maps/ipaq-flash.c
@@ -4,8 +4,6 @@
4 * (C) 2000 Nicolas Pitre <nico@cam.org> 4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com> 5 * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
6 * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes 6 * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
7 *
8 * $Id: ipaq-flash.c,v 1.5 2005/11/07 11:14:27 gleixner Exp $
9 */ 7 */
10 8
11#include <linux/module.h> 9#include <linux/module.h>
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index c8396b8574c4..c2264792a20b 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ixp2000.c,v 1.9 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * drivers/mtd/maps/ixp2000.c 2 * drivers/mtd/maps/ixp2000.c
5 * 3 *
6 * Mapping for the Intel XScale IXP2000 based systems 4 * Mapping for the Intel XScale IXP2000 based systems
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 01f19a4714b5..9c7a5fbd4e51 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: ixp4xx.c,v 1.13 2005/11/16 16:23:21 dvrabel Exp $
3 *
4 * drivers/mtd/maps/ixp4xx.c 2 * drivers/mtd/maps/ixp4xx.c
5 * 3 *
6 * MTD Map file for IXP4XX based systems. Please do not make per-board 4 * MTD Map file for IXP4XX based systems. Please do not make per-board
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c
index 67620adf4811..9e054503c4cf 100644
--- a/drivers/mtd/maps/l440gx.c
+++ b/drivers/mtd/maps/l440gx.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: l440gx.c,v 1.18 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * BIOS Flash chip on Intel 440GX board. 2 * BIOS Flash chip on Intel 440GX board.
5 * 3 *
6 * Bugs this currently does not work under linuxBIOS. 4 * Bugs this currently does not work under linuxBIOS.
diff --git a/drivers/mtd/maps/map_funcs.c b/drivers/mtd/maps/map_funcs.c
index 9105e6ca0aa6..3f268370eeca 100644
--- a/drivers/mtd/maps/map_funcs.c
+++ b/drivers/mtd/maps/map_funcs.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: map_funcs.c,v 1.10 2005/06/06 23:04:36 tpoynor Exp $
3 *
4 * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS 2 * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
5 * is enabled. 3 * is enabled.
6 */ 4 */
diff --git a/drivers/mtd/maps/mbx860.c b/drivers/mtd/maps/mbx860.c
index 06b118727846..706f67394b07 100644
--- a/drivers/mtd/maps/mbx860.c
+++ b/drivers/mtd/maps/mbx860.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: mbx860.c,v 1.9 2005/11/07 11:14:27 gleixner Exp $
3 *
4 * Handle mapping of the flash on MBX860 boards 2 * Handle mapping of the flash on MBX860 boards
5 * 3 *
6 * Author: Anton Todorov 4 * Author: Anton Todorov
diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c
index 95dcab2146ad..c0cb319b2b70 100644
--- a/drivers/mtd/maps/netsc520.c
+++ b/drivers/mtd/maps/netsc520.c
@@ -3,8 +3,6 @@
3 * Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com) 3 * Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
4 * based on sc520cdp.c by Sysgo Real-Time Solutions GmbH 4 * based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
5 * 5 *
6 * $Id: netsc520.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 8 * the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 0c9b305a72e0..965e6c6d6ab0 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com) 6 * (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
7 * (C) Copyright 2001-2002, SnapGear (www.snapgear.com) 7 * (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
8 *
9 * $Id: nettel.c,v 1.12 2005/11/29 14:30:00 gleixner Exp $
10 */ 8 */
11 9
12/****************************************************************************/ 10/****************************************************************************/
diff --git a/drivers/mtd/maps/octagon-5066.c b/drivers/mtd/maps/octagon-5066.c
index a6642db3d325..43e04c1d22a9 100644
--- a/drivers/mtd/maps/octagon-5066.c
+++ b/drivers/mtd/maps/octagon-5066.c
@@ -1,4 +1,3 @@
1// $Id: octagon-5066.c,v 1.28 2005/11/07 11:14:27 gleixner Exp $
2/* ###################################################################### 1/* ######################################################################
3 2
4 Octagon 5066 MTD Driver. 3 Octagon 5066 MTD Driver.
diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c
index e6e391efbeb6..0a60ebbc2175 100644
--- a/drivers/mtd/maps/omap-toto-flash.c
+++ b/drivers/mtd/maps/omap-toto-flash.c
@@ -4,8 +4,6 @@
4 * jzhang@ti.com (C) 2003 Texas Instruments. 4 * jzhang@ti.com (C) 2003 Texas Instruments.
5 * 5 *
6 * (C) 2002 MontVista Software, Inc. 6 * (C) 2002 MontVista Software, Inc.
7 *
8 * $Id: omap-toto-flash.c,v 1.5 2005/11/07 11:14:27 gleixner Exp $
9 */ 7 */
10 8
11#include <linux/module.h> 9#include <linux/module.h>
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index d2ab1bae9c34..5c6a25c90380 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -7,8 +7,6 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * $Id: pci.c,v 1.14 2005/11/17 08:20:27 dwmw2 Exp $
11 *
12 * Generic PCI memory map driver. We support the following boards: 10 * Generic PCI memory map driver. We support the following boards:
13 * - Intel IQ80310 ATU. 11 * - Intel IQ80310 ATU.
14 * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001 12 * - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index 0cc31675aeb9..90924fb00481 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: pcmciamtd.c,v 1.55 2005/11/07 11:14:28 gleixner Exp $
3 *
4 * pcmciamtd.c - MTD driver for PCMCIA flash memory cards 2 * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
5 * 3 *
6 * Author: Simon Evans <spse@secret.org.uk> 4 * Author: Simon Evans <spse@secret.org.uk>
@@ -48,7 +46,6 @@ static const int debug = 0;
48 46
49 47
50#define DRIVER_DESC "PCMCIA Flash memory card driver" 48#define DRIVER_DESC "PCMCIA Flash memory card driver"
51#define DRIVER_VERSION "$Revision: 1.55 $"
52 49
53/* Size of the PCMCIA address space: 26 bits = 64 MB */ 50/* Size of the PCMCIA address space: 26 bits = 64 MB */
54#define MAX_PCMCIA_ADDR 0x4000000 51#define MAX_PCMCIA_ADDR 0x4000000
@@ -785,7 +782,7 @@ static struct pcmcia_driver pcmciamtd_driver = {
785 782
786static int __init init_pcmciamtd(void) 783static int __init init_pcmciamtd(void)
787{ 784{
788 info(DRIVER_DESC " " DRIVER_VERSION); 785 info(DRIVER_DESC);
789 786
790 if(bankwidth && bankwidth != 1 && bankwidth != 2) { 787 if(bankwidth && bankwidth != 1 && bankwidth != 2) {
791 info("bad bankwidth (%d), using default", bankwidth); 788 info("bad bankwidth (%d), using default", bankwidth);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 183255fcfdcb..42d844f8f6bf 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: physmap.c,v 1.39 2005/11/29 14:49:36 gleixner Exp $
3 *
4 * Normal mappings of chips in physical memory 2 * Normal mappings of chips in physical memory
5 * 3 *
6 * Copyright (C) 2003 MontaVista Software Inc. 4 * Copyright (C) 2003 MontaVista Software Inc.
@@ -203,7 +201,19 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
203 int i; 201 int i;
204 202
205 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) 203 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
206 ret |= info->mtd[i]->suspend(info->mtd[i]); 204 if (info->mtd[i]->suspend) {
205 ret = info->mtd[i]->suspend(info->mtd[i]);
206 if (ret)
207 goto fail;
208 }
209
210 return 0;
211fail:
212 for (--i; i >= 0; --i)
213 if (info->mtd[i]->suspend) {
214 BUG_ON(!info->mtd[i]->resume);
215 info->mtd[i]->resume(info->mtd[i]);
216 }
207 217
208 return ret; 218 return ret;
209} 219}
@@ -214,7 +224,8 @@ static int physmap_flash_resume(struct platform_device *dev)
214 int i; 224 int i;
215 225
216 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) 226 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
217 info->mtd[i]->resume(info->mtd[i]); 227 if (info->mtd[i]->resume)
228 info->mtd[i]->resume(info->mtd[i]);
218 229
219 return 0; 230 return 0;
220} 231}
@@ -225,8 +236,9 @@ static void physmap_flash_shutdown(struct platform_device *dev)
225 int i; 236 int i;
226 237
227 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++) 238 for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
228 if (info->mtd[i]->suspend(info->mtd[i]) == 0) 239 if (info->mtd[i]->suspend && info->mtd[i]->resume)
229 info->mtd[i]->resume(info->mtd[i]); 240 if (info->mtd[i]->suspend(info->mtd[i]) == 0)
241 info->mtd[i]->resume(info->mtd[i]);
230} 242}
231#else 243#else
232#define physmap_flash_suspend NULL 244#define physmap_flash_suspend NULL
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 3eb2643b2328..e7dd9c8a965e 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -6,8 +6,6 @@
6 * 6 *
7 * Generic platfrom device based RAM map 7 * Generic platfrom device based RAM map
8 * 8 *
9 * $Id: plat-ram.c,v 1.7 2005/11/07 11:14:28 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or 11 * the Free Software Foundation; either version 2 of the License, or
diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c
index 4d858b3d5f82..de002eb1a7fe 100644
--- a/drivers/mtd/maps/redwood.c
+++ b/drivers/mtd/maps/redwood.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: redwood.c,v 1.11 2005/11/07 11:14:28 gleixner Exp $
3 *
4 * drivers/mtd/maps/redwood.c 2 * drivers/mtd/maps/redwood.c
5 * 3 *
6 * FLASH map for the IBM Redwood 4/5/6 boards. 4 * FLASH map for the IBM Redwood 4/5/6 boards.
diff --git a/drivers/mtd/maps/rpxlite.c b/drivers/mtd/maps/rpxlite.c
index 809a0c8e7aaf..14d90edb4430 100644
--- a/drivers/mtd/maps/rpxlite.c
+++ b/drivers/mtd/maps/rpxlite.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: rpxlite.c,v 1.22 2004/11/04 13:24:15 gleixner Exp $
3 *
4 * Handle mapping of the flash on the RPX Lite and CLLF boards 2 * Handle mapping of the flash on the RPX Lite and CLLF boards
5 */ 3 */
6 4
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index c7d5a52a2d55..e177a43dfff0 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -2,8 +2,6 @@
2 * Flash memory access on SA11x0 based devices 2 * Flash memory access on SA11x0 based devices
3 * 3 *
4 * (C) 2000 Nicolas Pitre <nico@cam.org> 4 * (C) 2000 Nicolas Pitre <nico@cam.org>
5 *
6 * $Id: sa1100-flash.c,v 1.51 2005/11/07 11:14:28 gleixner Exp $
7 */ 5 */
8#include <linux/module.h> 6#include <linux/module.h>
9#include <linux/types.h> 7#include <linux/types.h>
diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c
index b8c1331b7a04..6e1e99cd2b59 100644
--- a/drivers/mtd/maps/sbc8240.c
+++ b/drivers/mtd/maps/sbc8240.c
@@ -4,9 +4,6 @@
4 * Carolyn Smith, Tektronix, Inc. 4 * Carolyn Smith, Tektronix, Inc.
5 * 5 *
6 * This code is GPLed 6 * This code is GPLed
7 *
8 * $Id: sbc8240.c,v 1.5 2005/11/07 11:14:28 gleixner Exp $
9 *
10 */ 7 */
11 8
12/* 9/*
diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c
index 7cc4041d096d..1b1c0b7e11ef 100644
--- a/drivers/mtd/maps/sbc_gxx.c
+++ b/drivers/mtd/maps/sbc_gxx.c
@@ -17,8 +17,6 @@
17 along with this program; if not, write to the Free Software 17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 18 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
19 19
20 $Id: sbc_gxx.c,v 1.35 2005/11/07 11:14:28 gleixner Exp $
21
22The SBC-MediaGX / SBC-GXx has up to 16 MiB of 20The SBC-MediaGX / SBC-GXx has up to 16 MiB of
23Intel StrataFlash (28F320/28F640) in x8 mode. 21Intel StrataFlash (28F320/28F640) in x8 mode.
24 22
diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c
index 4045e372b90d..85c1e56309ec 100644
--- a/drivers/mtd/maps/sc520cdp.c
+++ b/drivers/mtd/maps/sc520cdp.c
@@ -16,8 +16,6 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
18 * 18 *
19 * $Id: sc520cdp.c,v 1.23 2005/11/17 08:20:27 dwmw2 Exp $
20 *
21 * 19 *
22 * The SC520CDP is an evaluation board for the Elan SC520 processor available 20 * The SC520CDP is an evaluation board for the Elan SC520 processor available
23 * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size, 21 * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size,
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 0fc5584324e3..21169e6d646c 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -1,6 +1,5 @@
1/* 1/*
2 * MTD map driver for BIOS Flash on Intel SCB2 boards 2 * MTD map driver for BIOS Flash on Intel SCB2 boards
3 * $Id: scb2_flash.c,v 1.12 2005/03/18 14:04:35 gleixner Exp $
4 * Copyright (C) 2002 Sun Microsystems, Inc. 3 * Copyright (C) 2002 Sun Microsystems, Inc.
5 * Tim Hockin <thockin@sun.com> 4 * Tim Hockin <thockin@sun.com>
6 * 5 *
diff --git a/drivers/mtd/maps/scx200_docflash.c b/drivers/mtd/maps/scx200_docflash.c
index 5e2bce22f37c..b5391ebb736e 100644
--- a/drivers/mtd/maps/scx200_docflash.c
+++ b/drivers/mtd/maps/scx200_docflash.c
@@ -2,8 +2,6 @@
2 2
3 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> 3 Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
4 4
5 $Id: scx200_docflash.c,v 1.12 2005/11/07 11:14:28 gleixner Exp $
6
7 National Semiconductor SCx200 flash mapped with DOCCS 5 National Semiconductor SCx200 flash mapped with DOCCS
8*/ 6*/
9 7
diff --git a/drivers/mtd/maps/sharpsl-flash.c b/drivers/mtd/maps/sharpsl-flash.c
index 917dc778f24e..026eab028189 100644
--- a/drivers/mtd/maps/sharpsl-flash.c
+++ b/drivers/mtd/maps/sharpsl-flash.c
@@ -4,8 +4,6 @@
4 * Copyright (C) 2001 Lineo Japan, Inc. 4 * Copyright (C) 2001 Lineo Japan, Inc.
5 * Copyright (C) 2002 SHARP 5 * Copyright (C) 2002 SHARP
6 * 6 *
7 * $Id: sharpsl-flash.c,v 1.7 2005/11/07 11:14:28 gleixner Exp $
8 *
9 * based on rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp 7 * based on rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp
10 * Handle mapping of the flash on the RPX Lite and CLLF boards 8 * Handle mapping of the flash on the RPX Lite and CLLF boards
11 * 9 *
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index d76ceef453ce..0eb41d9c6786 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: solutionengine.c,v 1.15 2005/11/07 11:14:28 gleixner Exp $
3 *
4 * Flash and EPROM on Hitachi Solution Engine and similar boards. 2 * Flash and EPROM on Hitachi Solution Engine and similar boards.
5 * 3 *
6 * (C) 2001 Red Hat, Inc. 4 * (C) 2001 Red Hat, Inc.
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 001af7f7ddda..0d7c88396c88 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -1,4 +1,4 @@
1/* $Id: sun_uflash.c,v 1.13 2005/11/07 11:14:28 gleixner Exp $ 1/*
2 * 2 *
3 * sun_uflash - Driver implementation for user-programmable flash 3 * sun_uflash - Driver implementation for user-programmable flash
4 * present on many Sun Microsystems SME boardsets. 4 * present on many Sun Microsystems SME boardsets.
diff --git a/drivers/mtd/maps/tqm8xxl.c b/drivers/mtd/maps/tqm8xxl.c
index 521734057314..a5d3d8531faa 100644
--- a/drivers/mtd/maps/tqm8xxl.c
+++ b/drivers/mtd/maps/tqm8xxl.c
@@ -2,8 +2,6 @@
2 * Handle mapping of the flash memory access routines 2 * Handle mapping of the flash memory access routines
3 * on TQM8xxL based devices. 3 * on TQM8xxL based devices.
4 * 4 *
5 * $Id: tqm8xxl.c,v 1.15 2005/11/07 11:14:28 gleixner Exp $
6 *
7 * based on rpxlite.c 5 * based on rpxlite.c
8 * 6 *
9 * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw> 7 * Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c
index b47270e850bc..e2147bf11c88 100644
--- a/drivers/mtd/maps/ts5500_flash.c
+++ b/drivers/mtd/maps/ts5500_flash.c
@@ -22,8 +22,6 @@
22 * - Drive A and B use the resident flash disk (RFD) flash translation layer. 22 * - Drive A and B use the resident flash disk (RFD) flash translation layer.
23 * - If you have created your own jffs file system and the bios overwrites 23 * - If you have created your own jffs file system and the bios overwrites
24 * it during boot, try disabling Drive A: and B: in the boot order. 24 * it during boot, try disabling Drive A: and B: in the boot order.
25 *
26 * $Id: ts5500_flash.c,v 1.5 2005/11/07 11:14:28 gleixner Exp $
27 */ 25 */
28 26
29#include <linux/init.h> 27#include <linux/init.h>
diff --git a/drivers/mtd/maps/tsunami_flash.c b/drivers/mtd/maps/tsunami_flash.c
index 0f915ac3102e..77a8bfc02577 100644
--- a/drivers/mtd/maps/tsunami_flash.c
+++ b/drivers/mtd/maps/tsunami_flash.c
@@ -2,7 +2,6 @@
2 * tsunami_flash.c 2 * tsunami_flash.c
3 * 3 *
4 * flash chip on alpha ds10... 4 * flash chip on alpha ds10...
5 * $Id: tsunami_flash.c,v 1.10 2005/11/07 11:14:29 gleixner Exp $
6 */ 5 */
7#include <asm/io.h> 6#include <asm/io.h>
8#include <asm/core_tsunami.h> 7#include <asm/core_tsunami.h>
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 3fcf92130aa4..0dc645f8152f 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -4,8 +4,6 @@
4 * uclinux.c -- generic memory mapped MTD driver for uclinux 4 * uclinux.c -- generic memory mapped MTD driver for uclinux
5 * 5 *
6 * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com) 6 * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
7 *
8 * $Id: uclinux.c,v 1.12 2005/11/07 11:14:29 gleixner Exp $
9 */ 7 */
10 8
11/****************************************************************************/ 9/****************************************************************************/
diff --git a/drivers/mtd/maps/vmax301.c b/drivers/mtd/maps/vmax301.c
index b3e487395435..5a0c9a353b0f 100644
--- a/drivers/mtd/maps/vmax301.c
+++ b/drivers/mtd/maps/vmax301.c
@@ -1,4 +1,3 @@
1// $Id: vmax301.c,v 1.32 2005/11/07 11:14:29 gleixner Exp $
2/* ###################################################################### 1/* ######################################################################
3 2
4 Tempustech VMAX SBC301 MTD Driver. 3 Tempustech VMAX SBC301 MTD Driver.
diff --git a/drivers/mtd/maps/walnut.c b/drivers/mtd/maps/walnut.c
index ca932122fb64..e243476c8171 100644
--- a/drivers/mtd/maps/walnut.c
+++ b/drivers/mtd/maps/walnut.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: walnut.c,v 1.3 2005/11/07 11:14:29 gleixner Exp $
3 *
4 * Mapping for Walnut flash 2 * Mapping for Walnut flash
5 * (used ebony.c as a "framework") 3 * (used ebony.c as a "framework")
6 * 4 *
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index ac5b8105b6ef..413b0cf9bbd2 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: wr_sbc82xx_flash.c,v 1.8 2005/11/07 11:14:29 gleixner Exp $
3 *
4 * Map for flash chips on Wind River PowerQUICC II SBC82xx board. 2 * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
5 * 3 *
6 * Copyright (C) 2004 Red Hat, Inc. 4 * Copyright (C) 2004 Red Hat, Inc.
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 839eed8430a2..9ff007c4962c 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
3 *
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 2 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5 * 3 *
6 * Interface to Linux 2.5 block layer for MTD 'translation layers'. 4 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
@@ -212,7 +210,7 @@ static struct block_device_operations mtd_blktrans_ops = {
212int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) 210int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
213{ 211{
214 struct mtd_blktrans_ops *tr = new->tr; 212 struct mtd_blktrans_ops *tr = new->tr;
215 struct list_head *this; 213 struct mtd_blktrans_dev *d;
216 int last_devnum = -1; 214 int last_devnum = -1;
217 struct gendisk *gd; 215 struct gendisk *gd;
218 216
@@ -221,8 +219,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
221 BUG(); 219 BUG();
222 } 220 }
223 221
224 list_for_each(this, &tr->devs) { 222 list_for_each_entry(d, &tr->devs, list) {
225 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
226 if (new->devnum == -1) { 223 if (new->devnum == -1) {
227 /* Use first free number */ 224 /* Use first free number */
228 if (d->devnum != last_devnum+1) { 225 if (d->devnum != last_devnum+1) {
@@ -309,33 +306,24 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
309 306
310static void blktrans_notify_remove(struct mtd_info *mtd) 307static void blktrans_notify_remove(struct mtd_info *mtd)
311{ 308{
312 struct list_head *this, *this2, *next; 309 struct mtd_blktrans_ops *tr;
313 310 struct mtd_blktrans_dev *dev, *next;
314 list_for_each(this, &blktrans_majors) {
315 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
316
317 list_for_each_safe(this2, next, &tr->devs) {
318 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
319 311
312 list_for_each_entry(tr, &blktrans_majors, list)
313 list_for_each_entry_safe(dev, next, &tr->devs, list)
320 if (dev->mtd == mtd) 314 if (dev->mtd == mtd)
321 tr->remove_dev(dev); 315 tr->remove_dev(dev);
322 }
323 }
324} 316}
325 317
326static void blktrans_notify_add(struct mtd_info *mtd) 318static void blktrans_notify_add(struct mtd_info *mtd)
327{ 319{
328 struct list_head *this; 320 struct mtd_blktrans_ops *tr;
329 321
330 if (mtd->type == MTD_ABSENT) 322 if (mtd->type == MTD_ABSENT)
331 return; 323 return;
332 324
333 list_for_each(this, &blktrans_majors) { 325 list_for_each_entry(tr, &blktrans_majors, list)
334 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
335
336 tr->add_mtd(tr, mtd); 326 tr->add_mtd(tr, mtd);
337 }
338
339} 327}
340 328
341static struct mtd_notifier blktrans_notifier = { 329static struct mtd_notifier blktrans_notifier = {
@@ -406,7 +394,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
406 394
407int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) 395int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
408{ 396{
409 struct list_head *this, *next; 397 struct mtd_blktrans_dev *dev, *next;
410 398
411 mutex_lock(&mtd_table_mutex); 399 mutex_lock(&mtd_table_mutex);
412 400
@@ -416,10 +404,8 @@ int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
416 /* Remove it from the list of active majors */ 404 /* Remove it from the list of active majors */
417 list_del(&tr->list); 405 list_del(&tr->list);
418 406
419 list_for_each_safe(this, next, &tr->devs) { 407 list_for_each_entry_safe(dev, next, &tr->devs, list)
420 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
421 tr->remove_dev(dev); 408 tr->remove_dev(dev);
422 }
423 409
424 blk_cleanup_queue(tr->blkcore_priv->rq); 410 blk_cleanup_queue(tr->blkcore_priv->rq);
425 unregister_blkdev(tr->major, tr->name); 411 unregister_blkdev(tr->major, tr->name);
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 952da30b1745..208c6faa0358 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -1,8 +1,6 @@
1/* 1/*
2 * Direct MTD block device access 2 * Direct MTD block device access
3 * 3 *
4 * $Id: mtdblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
5 *
6 * (C) 2000-2003 Nicolas Pitre <nico@cam.org> 4 * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
7 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> 5 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
8 */ 6 */
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index f79dbb49b1a2..852165f8b1c3 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: mtdblock_ro.c,v 1.19 2004/11/16 18:28:59 dwmw2 Exp $
3 *
4 * (C) 2003 David Woodhouse <dwmw2@infradead.org> 2 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5 * 3 *
6 * Simple read-only (writable only for RAM) mtdblock driver 4 * Simple read-only (writable only for RAM) mtdblock driver
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index aef9f4b687c9..d2f331876e4c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
3 *
4 * Character-device access to raw MTD devices. 2 * Character-device access to raw MTD devices.
5 * 3 *
6 */ 4 */
@@ -494,6 +492,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
494 { 492 {
495 struct mtd_oob_buf buf; 493 struct mtd_oob_buf buf;
496 struct mtd_oob_ops ops; 494 struct mtd_oob_ops ops;
495 struct mtd_oob_buf __user *user_buf = argp;
497 uint32_t retlen; 496 uint32_t retlen;
498 497
499 if(!(file->f_mode & 2)) 498 if(!(file->f_mode & 2))
@@ -537,8 +536,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
537 if (ops.oobretlen > 0xFFFFFFFFU) 536 if (ops.oobretlen > 0xFFFFFFFFU)
538 ret = -EOVERFLOW; 537 ret = -EOVERFLOW;
539 retlen = ops.oobretlen; 538 retlen = ops.oobretlen;
540 if (copy_to_user(&((struct mtd_oob_buf *)argp)->length, 539 if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
541 &retlen, sizeof(buf.length)))
542 ret = -EFAULT; 540 ret = -EFAULT;
543 541
544 kfree(ops.oobbuf); 542 kfree(ops.oobbuf);
@@ -592,29 +590,29 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
592 590
593 case MEMLOCK: 591 case MEMLOCK:
594 { 592 {
595 struct erase_info_user info; 593 struct erase_info_user einfo;
596 594
597 if (copy_from_user(&info, argp, sizeof(info))) 595 if (copy_from_user(&einfo, argp, sizeof(einfo)))
598 return -EFAULT; 596 return -EFAULT;
599 597
600 if (!mtd->lock) 598 if (!mtd->lock)
601 ret = -EOPNOTSUPP; 599 ret = -EOPNOTSUPP;
602 else 600 else
603 ret = mtd->lock(mtd, info.start, info.length); 601 ret = mtd->lock(mtd, einfo.start, einfo.length);
604 break; 602 break;
605 } 603 }
606 604
607 case MEMUNLOCK: 605 case MEMUNLOCK:
608 { 606 {
609 struct erase_info_user info; 607 struct erase_info_user einfo;
610 608
611 if (copy_from_user(&info, argp, sizeof(info))) 609 if (copy_from_user(&einfo, argp, sizeof(einfo)))
612 return -EFAULT; 610 return -EFAULT;
613 611
614 if (!mtd->unlock) 612 if (!mtd->unlock)
615 ret = -EOPNOTSUPP; 613 ret = -EOPNOTSUPP;
616 else 614 else
617 ret = mtd->unlock(mtd, info.start, info.length); 615 ret = mtd->unlock(mtd, einfo.start, einfo.length);
618 break; 616 break;
619 } 617 }
620 618
@@ -714,15 +712,15 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
714 712
715 case OTPLOCK: 713 case OTPLOCK:
716 { 714 {
717 struct otp_info info; 715 struct otp_info oinfo;
718 716
719 if (mfi->mode != MTD_MODE_OTP_USER) 717 if (mfi->mode != MTD_MODE_OTP_USER)
720 return -EINVAL; 718 return -EINVAL;
721 if (copy_from_user(&info, argp, sizeof(info))) 719 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
722 return -EFAULT; 720 return -EFAULT;
723 if (!mtd->lock_user_prot_reg) 721 if (!mtd->lock_user_prot_reg)
724 return -EOPNOTSUPP; 722 return -EOPNOTSUPP;
725 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length); 723 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
726 break; 724 break;
727 } 725 }
728#endif 726#endif
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index d563dcd4b264..2972a5edb73d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -6,8 +6,6 @@
6 * NAND support by Christian Gan <cgan@iders.ca> 6 * NAND support by Christian Gan <cgan@iders.ca>
7 * 7 *
8 * This code is GPL 8 * This code is GPL
9 *
10 * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
11 */ 9 */
12 10
13#include <linux/kernel.h> 11#include <linux/kernel.h>
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index f7e7890e5bc6..a9d246949820 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: mtdcore.c,v 1.47 2005/11/07 11:14:20 gleixner Exp $
3 *
4 * Core registration and callback routines for MTD 2 * Core registration and callback routines for MTD
5 * drivers and users. 3 * drivers and users.
6 * 4 *
@@ -53,7 +51,7 @@ int add_mtd_device(struct mtd_info *mtd)
53 51
54 for (i=0; i < MAX_MTD_DEVICES; i++) 52 for (i=0; i < MAX_MTD_DEVICES; i++)
55 if (!mtd_table[i]) { 53 if (!mtd_table[i]) {
56 struct list_head *this; 54 struct mtd_notifier *not;
57 55
58 mtd_table[i] = mtd; 56 mtd_table[i] = mtd;
59 mtd->index = i; 57 mtd->index = i;
@@ -72,10 +70,8 @@ int add_mtd_device(struct mtd_info *mtd)
72 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name); 70 DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
73 /* No need to get a refcount on the module containing 71 /* No need to get a refcount on the module containing
74 the notifier, since we hold the mtd_table_mutex */ 72 the notifier, since we hold the mtd_table_mutex */
75 list_for_each(this, &mtd_notifiers) { 73 list_for_each_entry(not, &mtd_notifiers, list)
76 struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
77 not->add(mtd); 74 not->add(mtd);
78 }
79 75
80 mutex_unlock(&mtd_table_mutex); 76 mutex_unlock(&mtd_table_mutex);
81 /* We _know_ we aren't being removed, because 77 /* We _know_ we aren't being removed, because
@@ -113,14 +109,12 @@ int del_mtd_device (struct mtd_info *mtd)
113 mtd->index, mtd->name, mtd->usecount); 109 mtd->index, mtd->name, mtd->usecount);
114 ret = -EBUSY; 110 ret = -EBUSY;
115 } else { 111 } else {
116 struct list_head *this; 112 struct mtd_notifier *not;
117 113
118 /* No need to get a refcount on the module containing 114 /* No need to get a refcount on the module containing
119 the notifier, since we hold the mtd_table_mutex */ 115 the notifier, since we hold the mtd_table_mutex */
120 list_for_each(this, &mtd_notifiers) { 116 list_for_each_entry(not, &mtd_notifiers, list)
121 struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
122 not->remove(mtd); 117 not->remove(mtd);
123 }
124 118
125 mtd_table[mtd->index] = NULL; 119 mtd_table[mtd->index] = NULL;
126 120
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 07c701169344..edb90b58a9b1 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -5,8 +5,6 @@
5 * 5 *
6 * This code is GPL 6 * This code is GPL
7 * 7 *
8 * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
9 *
10 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de> 8 * 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
11 * added support for read_oob, write_oob 9 * added support for read_oob, write_oob
12 */ 10 */
@@ -46,8 +44,8 @@ struct mtd_part {
46 * to the _real_ device. 44 * to the _real_ device.
47 */ 45 */
48 46
49static int part_read (struct mtd_info *mtd, loff_t from, size_t len, 47static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
50 size_t *retlen, u_char *buf) 48 size_t *retlen, u_char *buf)
51{ 49{
52 struct mtd_part *part = PART(mtd); 50 struct mtd_part *part = PART(mtd);
53 int res; 51 int res;
@@ -56,7 +54,7 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
56 len = 0; 54 len = 0;
57 else if (from + len > mtd->size) 55 else if (from + len > mtd->size)
58 len = mtd->size - from; 56 len = mtd->size - from;
59 res = part->master->read (part->master, from + part->offset, 57 res = part->master->read(part->master, from + part->offset,
60 len, retlen, buf); 58 len, retlen, buf);
61 if (unlikely(res)) { 59 if (unlikely(res)) {
62 if (res == -EUCLEAN) 60 if (res == -EUCLEAN)
@@ -67,8 +65,8 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
67 return res; 65 return res;
68} 66}
69 67
70static int part_point (struct mtd_info *mtd, loff_t from, size_t len, 68static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
71 size_t *retlen, void **virt, resource_size_t *phys) 69 size_t *retlen, void **virt, resource_size_t *phys)
72{ 70{
73 struct mtd_part *part = PART(mtd); 71 struct mtd_part *part = PART(mtd);
74 if (from >= mtd->size) 72 if (from >= mtd->size)
@@ -87,7 +85,7 @@ static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
87} 85}
88 86
89static int part_read_oob(struct mtd_info *mtd, loff_t from, 87static int part_read_oob(struct mtd_info *mtd, loff_t from,
90 struct mtd_oob_ops *ops) 88 struct mtd_oob_ops *ops)
91{ 89{
92 struct mtd_part *part = PART(mtd); 90 struct mtd_part *part = PART(mtd);
93 int res; 91 int res;
@@ -107,38 +105,38 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
107 return res; 105 return res;
108} 106}
109 107
110static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 108static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
111 size_t *retlen, u_char *buf) 109 size_t len, size_t *retlen, u_char *buf)
112{ 110{
113 struct mtd_part *part = PART(mtd); 111 struct mtd_part *part = PART(mtd);
114 return part->master->read_user_prot_reg (part->master, from, 112 return part->master->read_user_prot_reg(part->master, from,
115 len, retlen, buf); 113 len, retlen, buf);
116} 114}
117 115
118static int part_get_user_prot_info (struct mtd_info *mtd, 116static int part_get_user_prot_info(struct mtd_info *mtd,
119 struct otp_info *buf, size_t len) 117 struct otp_info *buf, size_t len)
120{ 118{
121 struct mtd_part *part = PART(mtd); 119 struct mtd_part *part = PART(mtd);
122 return part->master->get_user_prot_info (part->master, buf, len); 120 return part->master->get_user_prot_info(part->master, buf, len);
123} 121}
124 122
125static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 123static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
126 size_t *retlen, u_char *buf) 124 size_t len, size_t *retlen, u_char *buf)
127{ 125{
128 struct mtd_part *part = PART(mtd); 126 struct mtd_part *part = PART(mtd);
129 return part->master->read_fact_prot_reg (part->master, from, 127 return part->master->read_fact_prot_reg(part->master, from,
130 len, retlen, buf); 128 len, retlen, buf);
131} 129}
132 130
133static int part_get_fact_prot_info (struct mtd_info *mtd, 131static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
134 struct otp_info *buf, size_t len) 132 size_t len)
135{ 133{
136 struct mtd_part *part = PART(mtd); 134 struct mtd_part *part = PART(mtd);
137 return part->master->get_fact_prot_info (part->master, buf, len); 135 return part->master->get_fact_prot_info(part->master, buf, len);
138} 136}
139 137
140static int part_write (struct mtd_info *mtd, loff_t to, size_t len, 138static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
141 size_t *retlen, const u_char *buf) 139 size_t *retlen, const u_char *buf)
142{ 140{
143 struct mtd_part *part = PART(mtd); 141 struct mtd_part *part = PART(mtd);
144 if (!(mtd->flags & MTD_WRITEABLE)) 142 if (!(mtd->flags & MTD_WRITEABLE))
@@ -147,12 +145,12 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
147 len = 0; 145 len = 0;
148 else if (to + len > mtd->size) 146 else if (to + len > mtd->size)
149 len = mtd->size - to; 147 len = mtd->size - to;
150 return part->master->write (part->master, to + part->offset, 148 return part->master->write(part->master, to + part->offset,
151 len, retlen, buf); 149 len, retlen, buf);
152} 150}
153 151
154static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len, 152static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
155 size_t *retlen, const u_char *buf) 153 size_t *retlen, const u_char *buf)
156{ 154{
157 struct mtd_part *part = PART(mtd); 155 struct mtd_part *part = PART(mtd);
158 if (!(mtd->flags & MTD_WRITEABLE)) 156 if (!(mtd->flags & MTD_WRITEABLE))
@@ -161,12 +159,12 @@ static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
161 len = 0; 159 len = 0;
162 else if (to + len > mtd->size) 160 else if (to + len > mtd->size)
163 len = mtd->size - to; 161 len = mtd->size - to;
164 return part->master->panic_write (part->master, to + part->offset, 162 return part->master->panic_write(part->master, to + part->offset,
165 len, retlen, buf); 163 len, retlen, buf);
166} 164}
167 165
168static int part_write_oob(struct mtd_info *mtd, loff_t to, 166static int part_write_oob(struct mtd_info *mtd, loff_t to,
169 struct mtd_oob_ops *ops) 167 struct mtd_oob_ops *ops)
170{ 168{
171 struct mtd_part *part = PART(mtd); 169 struct mtd_part *part = PART(mtd);
172 170
@@ -180,31 +178,32 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
180 return part->master->write_oob(part->master, to + part->offset, ops); 178 return part->master->write_oob(part->master, to + part->offset, ops);
181} 179}
182 180
183static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 181static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
184 size_t *retlen, u_char *buf) 182 size_t len, size_t *retlen, u_char *buf)
185{ 183{
186 struct mtd_part *part = PART(mtd); 184 struct mtd_part *part = PART(mtd);
187 return part->master->write_user_prot_reg (part->master, from, 185 return part->master->write_user_prot_reg(part->master, from,
188 len, retlen, buf); 186 len, retlen, buf);
189} 187}
190 188
191static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len) 189static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
190 size_t len)
192{ 191{
193 struct mtd_part *part = PART(mtd); 192 struct mtd_part *part = PART(mtd);
194 return part->master->lock_user_prot_reg (part->master, from, len); 193 return part->master->lock_user_prot_reg(part->master, from, len);
195} 194}
196 195
197static int part_writev (struct mtd_info *mtd, const struct kvec *vecs, 196static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
198 unsigned long count, loff_t to, size_t *retlen) 197 unsigned long count, loff_t to, size_t *retlen)
199{ 198{
200 struct mtd_part *part = PART(mtd); 199 struct mtd_part *part = PART(mtd);
201 if (!(mtd->flags & MTD_WRITEABLE)) 200 if (!(mtd->flags & MTD_WRITEABLE))
202 return -EROFS; 201 return -EROFS;
203 return part->master->writev (part->master, vecs, count, 202 return part->master->writev(part->master, vecs, count,
204 to + part->offset, retlen); 203 to + part->offset, retlen);
205} 204}
206 205
207static int part_erase (struct mtd_info *mtd, struct erase_info *instr) 206static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
208{ 207{
209 struct mtd_part *part = PART(mtd); 208 struct mtd_part *part = PART(mtd);
210 int ret; 209 int ret;
@@ -236,7 +235,7 @@ void mtd_erase_callback(struct erase_info *instr)
236} 235}
237EXPORT_SYMBOL_GPL(mtd_erase_callback); 236EXPORT_SYMBOL_GPL(mtd_erase_callback);
238 237
239static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len) 238static int part_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
240{ 239{
241 struct mtd_part *part = PART(mtd); 240 struct mtd_part *part = PART(mtd);
242 if ((len + ofs) > mtd->size) 241 if ((len + ofs) > mtd->size)
@@ -244,7 +243,7 @@ static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
244 return part->master->lock(part->master, ofs + part->offset, len); 243 return part->master->lock(part->master, ofs + part->offset, len);
245} 244}
246 245
247static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len) 246static int part_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
248{ 247{
249 struct mtd_part *part = PART(mtd); 248 struct mtd_part *part = PART(mtd);
250 if ((len + ofs) > mtd->size) 249 if ((len + ofs) > mtd->size)
@@ -270,7 +269,7 @@ static void part_resume(struct mtd_info *mtd)
270 part->master->resume(part->master); 269 part->master->resume(part->master);
271} 270}
272 271
273static int part_block_isbad (struct mtd_info *mtd, loff_t ofs) 272static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
274{ 273{
275 struct mtd_part *part = PART(mtd); 274 struct mtd_part *part = PART(mtd);
276 if (ofs >= mtd->size) 275 if (ofs >= mtd->size)
@@ -279,7 +278,7 @@ static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
279 return part->master->block_isbad(part->master, ofs); 278 return part->master->block_isbad(part->master, ofs);
280} 279}
281 280
282static int part_block_markbad (struct mtd_info *mtd, loff_t ofs) 281static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
283{ 282{
284 struct mtd_part *part = PART(mtd); 283 struct mtd_part *part = PART(mtd);
285 int res; 284 int res;
@@ -302,229 +301,237 @@ static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
302 301
303int del_mtd_partitions(struct mtd_info *master) 302int del_mtd_partitions(struct mtd_info *master)
304{ 303{
305 struct list_head *node; 304 struct mtd_part *slave, *next;
306 struct mtd_part *slave;
307 305
308 for (node = mtd_partitions.next; 306 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
309 node != &mtd_partitions;
310 node = node->next) {
311 slave = list_entry(node, struct mtd_part, list);
312 if (slave->master == master) { 307 if (slave->master == master) {
313 struct list_head *prev = node->prev; 308 list_del(&slave->list);
314 __list_del(prev, node->next); 309 if (slave->registered)
315 if(slave->registered)
316 del_mtd_device(&slave->mtd); 310 del_mtd_device(&slave->mtd);
317 kfree(slave); 311 kfree(slave);
318 node = prev;
319 } 312 }
320 }
321 313
322 return 0; 314 return 0;
323} 315}
316EXPORT_SYMBOL(del_mtd_partitions);
324 317
325/* 318static struct mtd_part *add_one_partition(struct mtd_info *master,
326 * This function, given a master MTD object and a partition table, creates 319 const struct mtd_partition *part, int partno,
327 * and registers slave MTD objects which are bound to the master according to 320 u_int32_t cur_offset)
328 * the partition definitions.
329 * (Q: should we register the master MTD object as well?)
330 */
331
332int add_mtd_partitions(struct mtd_info *master,
333 const struct mtd_partition *parts,
334 int nbparts)
335{ 321{
336 struct mtd_part *slave; 322 struct mtd_part *slave;
337 u_int32_t cur_offset = 0;
338 int i;
339
340 printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
341
342 for (i = 0; i < nbparts; i++) {
343 323
344 /* allocate the partition structure */ 324 /* allocate the partition structure */
345 slave = kzalloc (sizeof(*slave), GFP_KERNEL); 325 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
346 if (!slave) { 326 if (!slave) {
347 printk ("memory allocation error while creating partitions for \"%s\"\n", 327 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
348 master->name); 328 master->name);
349 del_mtd_partitions(master); 329 del_mtd_partitions(master);
350 return -ENOMEM; 330 return NULL;
351 } 331 }
352 list_add(&slave->list, &mtd_partitions); 332 list_add(&slave->list, &mtd_partitions);
353 333
354 /* set up the MTD object for this partition */ 334 /* set up the MTD object for this partition */
355 slave->mtd.type = master->type; 335 slave->mtd.type = master->type;
356 slave->mtd.flags = master->flags & ~parts[i].mask_flags; 336 slave->mtd.flags = master->flags & ~part->mask_flags;
357 slave->mtd.size = parts[i].size; 337 slave->mtd.size = part->size;
358 slave->mtd.writesize = master->writesize; 338 slave->mtd.writesize = master->writesize;
359 slave->mtd.oobsize = master->oobsize; 339 slave->mtd.oobsize = master->oobsize;
360 slave->mtd.oobavail = master->oobavail; 340 slave->mtd.oobavail = master->oobavail;
361 slave->mtd.subpage_sft = master->subpage_sft; 341 slave->mtd.subpage_sft = master->subpage_sft;
362 342
363 slave->mtd.name = parts[i].name; 343 slave->mtd.name = part->name;
364 slave->mtd.owner = master->owner; 344 slave->mtd.owner = master->owner;
365 345
366 slave->mtd.read = part_read; 346 slave->mtd.read = part_read;
367 slave->mtd.write = part_write; 347 slave->mtd.write = part_write;
368 348
369 if (master->panic_write) 349 if (master->panic_write)
370 slave->mtd.panic_write = part_panic_write; 350 slave->mtd.panic_write = part_panic_write;
371 351
372 if(master->point && master->unpoint){ 352 if (master->point && master->unpoint) {
373 slave->mtd.point = part_point; 353 slave->mtd.point = part_point;
374 slave->mtd.unpoint = part_unpoint; 354 slave->mtd.unpoint = part_unpoint;
375 } 355 }
376 356
377 if (master->read_oob) 357 if (master->read_oob)
378 slave->mtd.read_oob = part_read_oob; 358 slave->mtd.read_oob = part_read_oob;
379 if (master->write_oob) 359 if (master->write_oob)
380 slave->mtd.write_oob = part_write_oob; 360 slave->mtd.write_oob = part_write_oob;
381 if(master->read_user_prot_reg) 361 if (master->read_user_prot_reg)
382 slave->mtd.read_user_prot_reg = part_read_user_prot_reg; 362 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
383 if(master->read_fact_prot_reg) 363 if (master->read_fact_prot_reg)
384 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; 364 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
385 if(master->write_user_prot_reg) 365 if (master->write_user_prot_reg)
386 slave->mtd.write_user_prot_reg = part_write_user_prot_reg; 366 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
387 if(master->lock_user_prot_reg) 367 if (master->lock_user_prot_reg)
388 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; 368 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
389 if(master->get_user_prot_info) 369 if (master->get_user_prot_info)
390 slave->mtd.get_user_prot_info = part_get_user_prot_info; 370 slave->mtd.get_user_prot_info = part_get_user_prot_info;
391 if(master->get_fact_prot_info) 371 if (master->get_fact_prot_info)
392 slave->mtd.get_fact_prot_info = part_get_fact_prot_info; 372 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
393 if (master->sync) 373 if (master->sync)
394 slave->mtd.sync = part_sync; 374 slave->mtd.sync = part_sync;
395 if (!i && master->suspend && master->resume) { 375 if (!partno && master->suspend && master->resume) {
396 slave->mtd.suspend = part_suspend; 376 slave->mtd.suspend = part_suspend;
397 slave->mtd.resume = part_resume; 377 slave->mtd.resume = part_resume;
378 }
379 if (master->writev)
380 slave->mtd.writev = part_writev;
381 if (master->lock)
382 slave->mtd.lock = part_lock;
383 if (master->unlock)
384 slave->mtd.unlock = part_unlock;
385 if (master->block_isbad)
386 slave->mtd.block_isbad = part_block_isbad;
387 if (master->block_markbad)
388 slave->mtd.block_markbad = part_block_markbad;
389 slave->mtd.erase = part_erase;
390 slave->master = master;
391 slave->offset = part->offset;
392 slave->index = partno;
393
394 if (slave->offset == MTDPART_OFS_APPEND)
395 slave->offset = cur_offset;
396 if (slave->offset == MTDPART_OFS_NXTBLK) {
397 slave->offset = cur_offset;
398 if ((cur_offset % master->erasesize) != 0) {
399 /* Round up to next erasesize */
400 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
401 printk(KERN_NOTICE "Moving partition %d: "
402 "0x%08x -> 0x%08x\n", partno,
403 cur_offset, slave->offset);
398 } 404 }
399 if (master->writev) 405 }
400 slave->mtd.writev = part_writev; 406 if (slave->mtd.size == MTDPART_SIZ_FULL)
401 if (master->lock) 407 slave->mtd.size = master->size - slave->offset;
402 slave->mtd.lock = part_lock; 408
403 if (master->unlock) 409 printk(KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
404 slave->mtd.unlock = part_unlock; 410 slave->offset + slave->mtd.size, slave->mtd.name);
405 if (master->block_isbad) 411
406 slave->mtd.block_isbad = part_block_isbad; 412 /* let's do some sanity checks */
407 if (master->block_markbad) 413 if (slave->offset >= master->size) {
408 slave->mtd.block_markbad = part_block_markbad; 414 /* let's register it anyway to preserve ordering */
409 slave->mtd.erase = part_erase; 415 slave->offset = 0;
410 slave->master = master; 416 slave->mtd.size = 0;
411 slave->offset = parts[i].offset; 417 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
412 slave->index = i; 418 part->name);
413 419 goto out_register;
414 if (slave->offset == MTDPART_OFS_APPEND) 420 }
415 slave->offset = cur_offset; 421 if (slave->offset + slave->mtd.size > master->size) {
416 if (slave->offset == MTDPART_OFS_NXTBLK) { 422 slave->mtd.size = master->size - slave->offset;
417 slave->offset = cur_offset; 423 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
418 if ((cur_offset % master->erasesize) != 0) { 424 part->name, master->name, slave->mtd.size);
419 /* Round up to next erasesize */ 425 }
420 slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize; 426 if (master->numeraseregions > 1) {
421 printk(KERN_NOTICE "Moving partition %d: " 427 /* Deal with variable erase size stuff */
422 "0x%08x -> 0x%08x\n", i, 428 int i, max = master->numeraseregions;
423 cur_offset, slave->offset); 429 u32 end = slave->offset + slave->mtd.size;
430 struct mtd_erase_region_info *regions = master->eraseregions;
431
432 /* Find the first erase regions which is part of this
433 * partition. */
434 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
435 ;
436 /* The loop searched for the region _behind_ the first one */
437 i--;
438
439 /* Pick biggest erasesize */
440 for (; i < max && regions[i].offset < end; i++) {
441 if (slave->mtd.erasesize < regions[i].erasesize) {
442 slave->mtd.erasesize = regions[i].erasesize;
424 } 443 }
425 } 444 }
426 if (slave->mtd.size == MTDPART_SIZ_FULL) 445 BUG_ON(slave->mtd.erasesize == 0);
427 slave->mtd.size = master->size - slave->offset; 446 } else {
428 cur_offset = slave->offset + slave->mtd.size; 447 /* Single erase size */
448 slave->mtd.erasesize = master->erasesize;
449 }
429 450
430 printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset, 451 if ((slave->mtd.flags & MTD_WRITEABLE) &&
431 slave->offset + slave->mtd.size, slave->mtd.name); 452 (slave->offset % slave->mtd.erasesize)) {
453 /* Doesn't start on a boundary of major erase size */
454 /* FIXME: Let it be writable if it is on a boundary of
455 * _minor_ erase size though */
456 slave->mtd.flags &= ~MTD_WRITEABLE;
457 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
458 part->name);
459 }
460 if ((slave->mtd.flags & MTD_WRITEABLE) &&
461 (slave->mtd.size % slave->mtd.erasesize)) {
462 slave->mtd.flags &= ~MTD_WRITEABLE;
463 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
464 part->name);
465 }
432 466
433 /* let's do some sanity checks */ 467 slave->mtd.ecclayout = master->ecclayout;
434 if (slave->offset >= master->size) { 468 if (master->block_isbad) {
435 /* let's register it anyway to preserve ordering */ 469 uint32_t offs = 0;
436 slave->offset = 0;
437 slave->mtd.size = 0;
438 printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
439 parts[i].name);
440 }
441 if (slave->offset + slave->mtd.size > master->size) {
442 slave->mtd.size = master->size - slave->offset;
443 printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
444 parts[i].name, master->name, slave->mtd.size);
445 }
446 if (master->numeraseregions>1) {
447 /* Deal with variable erase size stuff */
448 int i;
449 struct mtd_erase_region_info *regions = master->eraseregions;
450
451 /* Find the first erase regions which is part of this partition. */
452 for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
453 ;
454
455 for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
456 if (slave->mtd.erasesize < regions[i].erasesize) {
457 slave->mtd.erasesize = regions[i].erasesize;
458 }
459 }
460 } else {
461 /* Single erase size */
462 slave->mtd.erasesize = master->erasesize;
463 }
464 470
465 if ((slave->mtd.flags & MTD_WRITEABLE) && 471 while (offs < slave->mtd.size) {
466 (slave->offset % slave->mtd.erasesize)) { 472 if (master->block_isbad(master,
467 /* Doesn't start on a boundary of major erase size */ 473 offs + slave->offset))
468 /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */ 474 slave->mtd.ecc_stats.badblocks++;
469 slave->mtd.flags &= ~MTD_WRITEABLE; 475 offs += slave->mtd.erasesize;
470 printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
471 parts[i].name);
472 }
473 if ((slave->mtd.flags & MTD_WRITEABLE) &&
474 (slave->mtd.size % slave->mtd.erasesize)) {
475 slave->mtd.flags &= ~MTD_WRITEABLE;
476 printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
477 parts[i].name);
478 } 476 }
477 }
479 478
480 slave->mtd.ecclayout = master->ecclayout; 479out_register:
481 if (master->block_isbad) { 480 if (part->mtdp) {
482 uint32_t offs = 0; 481 /* store the object pointer (caller may or may not register it*/
482 *part->mtdp = &slave->mtd;
483 slave->registered = 0;
484 } else {
485 /* register our partition */
486 add_mtd_device(&slave->mtd);
487 slave->registered = 1;
488 }
489 return slave;
490}
483 491
484 while(offs < slave->mtd.size) { 492/*
485 if (master->block_isbad(master, 493 * This function, given a master MTD object and a partition table, creates
486 offs + slave->offset)) 494 * and registers slave MTD objects which are bound to the master according to
487 slave->mtd.ecc_stats.badblocks++; 495 * the partition definitions.
488 offs += slave->mtd.erasesize; 496 * (Q: should we register the master MTD object as well?)
489 } 497 */
490 }
491 498
492 if(parts[i].mtdp) 499int add_mtd_partitions(struct mtd_info *master,
493 { /* store the object pointer (caller may or may not register it */ 500 const struct mtd_partition *parts,
494 *parts[i].mtdp = &slave->mtd; 501 int nbparts)
495 slave->registered = 0; 502{
496 } 503 struct mtd_part *slave;
497 else 504 u_int32_t cur_offset = 0;
498 { 505 int i;
499 /* register our partition */ 506
500 add_mtd_device(&slave->mtd); 507 printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
501 slave->registered = 1; 508
502 } 509 for (i = 0; i < nbparts; i++) {
510 slave = add_one_partition(master, parts + i, i, cur_offset);
511 if (!slave)
512 return -ENOMEM;
513 cur_offset = slave->offset + slave->mtd.size;
503 } 514 }
504 515
505 return 0; 516 return 0;
506} 517}
507
508EXPORT_SYMBOL(add_mtd_partitions); 518EXPORT_SYMBOL(add_mtd_partitions);
509EXPORT_SYMBOL(del_mtd_partitions);
510 519
511static DEFINE_SPINLOCK(part_parser_lock); 520static DEFINE_SPINLOCK(part_parser_lock);
512static LIST_HEAD(part_parsers); 521static LIST_HEAD(part_parsers);
513 522
514static struct mtd_part_parser *get_partition_parser(const char *name) 523static struct mtd_part_parser *get_partition_parser(const char *name)
515{ 524{
516 struct list_head *this; 525 struct mtd_part_parser *p, *ret = NULL;
517 void *ret = NULL;
518 spin_lock(&part_parser_lock);
519 526
520 list_for_each(this, &part_parsers) { 527 spin_lock(&part_parser_lock);
521 struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
522 528
529 list_for_each_entry(p, &part_parsers, list)
523 if (!strcmp(p->name, name) && try_module_get(p->owner)) { 530 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
524 ret = p; 531 ret = p;
525 break; 532 break;
526 } 533 }
527 } 534
528 spin_unlock(&part_parser_lock); 535 spin_unlock(&part_parser_lock);
529 536
530 return ret; 537 return ret;
@@ -538,6 +545,7 @@ int register_mtd_parser(struct mtd_part_parser *p)
538 545
539 return 0; 546 return 0;
540} 547}
548EXPORT_SYMBOL_GPL(register_mtd_parser);
541 549
542int deregister_mtd_parser(struct mtd_part_parser *p) 550int deregister_mtd_parser(struct mtd_part_parser *p)
543{ 551{
@@ -546,6 +554,7 @@ int deregister_mtd_parser(struct mtd_part_parser *p)
546 spin_unlock(&part_parser_lock); 554 spin_unlock(&part_parser_lock);
547 return 0; 555 return 0;
548} 556}
557EXPORT_SYMBOL_GPL(deregister_mtd_parser);
549 558
550int parse_mtd_partitions(struct mtd_info *master, const char **types, 559int parse_mtd_partitions(struct mtd_info *master, const char **types,
551 struct mtd_partition **pparts, unsigned long origin) 560 struct mtd_partition **pparts, unsigned long origin)
@@ -573,7 +582,4 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
573 } 582 }
574 return ret; 583 return ret;
575} 584}
576
577EXPORT_SYMBOL_GPL(parse_mtd_partitions); 585EXPORT_SYMBOL_GPL(parse_mtd_partitions);
578EXPORT_SYMBOL_GPL(register_mtd_parser);
579EXPORT_SYMBOL_GPL(deregister_mtd_parser);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5076faf9ca66..71406e517857 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,5 +1,4 @@
1# drivers/mtd/nand/Kconfig 1# drivers/mtd/nand/Kconfig
2# $Id: Kconfig,v 1.35 2005/11/07 11:14:30 gleixner Exp $
3 2
4menuconfig MTD_NAND 3menuconfig MTD_NAND
5 tristate "NAND Device Support" 4 tristate "NAND Device Support"
@@ -272,22 +271,23 @@ config MTD_NAND_CS553X
272 271
273 If you say "m", the module will be called "cs553x_nand.ko". 272 If you say "m", the module will be called "cs553x_nand.ko".
274 273
275config MTD_NAND_AT91 274config MTD_NAND_ATMEL
276 bool "Support for NAND Flash / SmartMedia on AT91" 275 tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
277 depends on ARCH_AT91 276 depends on ARCH_AT91 || AVR32
278 help 277 help
279 Enables support for NAND Flash / Smart Media Card interface 278 Enables support for NAND Flash / Smart Media Card interface
280 on Atmel AT91 processors. 279 on Atmel AT91 and AVR32 processors.
281choice 280choice
282 prompt "ECC management for NAND Flash / SmartMedia on AT91" 281 prompt "ECC management for NAND Flash / SmartMedia on AT91 / AVR32"
283 depends on MTD_NAND_AT91 282 depends on MTD_NAND_ATMEL
284 283
285config MTD_NAND_AT91_ECC_HW 284config MTD_NAND_ATMEL_ECC_HW
286 bool "Hardware ECC" 285 bool "Hardware ECC"
287 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 286 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9260 || AVR32
288 help 287 help
289 Uses hardware ECC provided by the at91sam9260/at91sam9263 chip 288 Use hardware ECC instead of software ECC when the chip
290 instead of software ECC. 289 supports it.
290
291 The hardware ECC controller is capable of single bit error 291 The hardware ECC controller is capable of single bit error
292 correction and 2-bit random detection per page. 292 correction and 2-bit random detection per page.
293 293
@@ -297,16 +297,16 @@ config MTD_NAND_AT91_ECC_HW
297 297
298 If unsure, say Y 298 If unsure, say Y
299 299
300config MTD_NAND_AT91_ECC_SOFT 300config MTD_NAND_ATMEL_ECC_SOFT
301 bool "Software ECC" 301 bool "Software ECC"
302 help 302 help
303 Uses software ECC. 303 Use software ECC.
304 304
305 NB : hardware and software ECC schemes are incompatible. 305 NB : hardware and software ECC schemes are incompatible.
306 If you switch from one to another, you'll have to erase your 306 If you switch from one to another, you'll have to erase your
307 mtd partition. 307 mtd partition.
308 308
309config MTD_NAND_AT91_ECC_NONE 309config MTD_NAND_ATMEL_ECC_NONE
310 bool "No ECC (testing only, DANGEROUS)" 310 bool "No ECC (testing only, DANGEROUS)"
311 depends on DEBUG_KERNEL 311 depends on DEBUG_KERNEL
312 help 312 help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index a6e74a46992a..d772581de573 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,7 +1,6 @@
1# 1#
2# linux/drivers/nand/Makefile 2# linux/drivers/nand/Makefile
3# 3#
4# $Id: Makefile.common,v 1.15 2004/11/26 12:28:22 dedekind Exp $
5 4
6obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o 5obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o 6obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
@@ -24,7 +23,7 @@ obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
24obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 23obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
25obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o 24obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
26obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o 25obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
27obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o 26obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
28obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o 27obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
29obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o 28obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
30obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o 29obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/atmel_nand.c
index 0adb287027a2..99aec46e2145 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/mtd/nand/at91_nand.c
3 *
4 * Copyright (C) 2003 Rick Bronson 2 * Copyright (C) 2003 Rick Bronson
5 * 3 *
6 * Derived from drivers/mtd/nand/autcpu12.c 4 * Derived from drivers/mtd/nand/autcpu12.c
@@ -31,20 +29,19 @@
31#include <linux/mtd/nand.h> 29#include <linux/mtd/nand.h>
32#include <linux/mtd/partitions.h> 30#include <linux/mtd/partitions.h>
33 31
34#include <asm/io.h> 32#include <linux/gpio.h>
35#include <asm/sizes.h> 33#include <linux/io.h>
36 34
37#include <asm/hardware.h>
38#include <asm/arch/board.h> 35#include <asm/arch/board.h>
39#include <asm/arch/gpio.h> 36#include <asm/arch/cpu.h>
40 37
41#ifdef CONFIG_MTD_NAND_AT91_ECC_HW 38#ifdef CONFIG_MTD_NAND_ATMEL_ECC_HW
42#define hard_ecc 1 39#define hard_ecc 1
43#else 40#else
44#define hard_ecc 0 41#define hard_ecc 0
45#endif 42#endif
46 43
47#ifdef CONFIG_MTD_NAND_AT91_ECC_NONE 44#ifdef CONFIG_MTD_NAND_ATMEL_ECC_NONE
48#define no_ecc 1 45#define no_ecc 1
49#else 46#else
50#define no_ecc 0 47#define no_ecc 0
@@ -52,18 +49,18 @@
52 49
53/* Register access macros */ 50/* Register access macros */
54#define ecc_readl(add, reg) \ 51#define ecc_readl(add, reg) \
55 __raw_readl(add + AT91_ECC_##reg) 52 __raw_readl(add + ATMEL_ECC_##reg)
56#define ecc_writel(add, reg, value) \ 53#define ecc_writel(add, reg, value) \
57 __raw_writel((value), add + AT91_ECC_##reg) 54 __raw_writel((value), add + ATMEL_ECC_##reg)
58 55
59#include <asm/arch/at91_ecc.h> /* AT91SAM9260/3 ECC registers */ 56#include "atmel_nand_ecc.h" /* Hardware ECC registers */
60 57
61/* oob layout for large page size 58/* oob layout for large page size
62 * bad block info is on bytes 0 and 1 59 * bad block info is on bytes 0 and 1
63 * the bytes have to be consecutives to avoid 60 * the bytes have to be consecutives to avoid
64 * several NAND_CMD_RNDOUT during read 61 * several NAND_CMD_RNDOUT during read
65 */ 62 */
66static struct nand_ecclayout at91_oobinfo_large = { 63static struct nand_ecclayout atmel_oobinfo_large = {
67 .eccbytes = 4, 64 .eccbytes = 4,
68 .eccpos = {60, 61, 62, 63}, 65 .eccpos = {60, 61, 62, 63},
69 .oobfree = { 66 .oobfree = {
@@ -76,7 +73,7 @@ static struct nand_ecclayout at91_oobinfo_large = {
76 * the bytes have to be consecutives to avoid 73 * the bytes have to be consecutives to avoid
77 * several NAND_CMD_RNDOUT during read 74 * several NAND_CMD_RNDOUT during read
78 */ 75 */
79static struct nand_ecclayout at91_oobinfo_small = { 76static struct nand_ecclayout atmel_oobinfo_small = {
80 .eccbytes = 4, 77 .eccbytes = 4,
81 .eccpos = {0, 1, 2, 3}, 78 .eccpos = {0, 1, 2, 3},
82 .oobfree = { 79 .oobfree = {
@@ -84,11 +81,11 @@ static struct nand_ecclayout at91_oobinfo_small = {
84 }, 81 },
85}; 82};
86 83
87struct at91_nand_host { 84struct atmel_nand_host {
88 struct nand_chip nand_chip; 85 struct nand_chip nand_chip;
89 struct mtd_info mtd; 86 struct mtd_info mtd;
90 void __iomem *io_base; 87 void __iomem *io_base;
91 struct at91_nand_data *board; 88 struct atmel_nand_data *board;
92 struct device *dev; 89 struct device *dev;
93 void __iomem *ecc; 90 void __iomem *ecc;
94}; 91};
@@ -96,34 +93,34 @@ struct at91_nand_host {
96/* 93/*
97 * Enable NAND. 94 * Enable NAND.
98 */ 95 */
99static void at91_nand_enable(struct at91_nand_host *host) 96static void atmel_nand_enable(struct atmel_nand_host *host)
100{ 97{
101 if (host->board->enable_pin) 98 if (host->board->enable_pin)
102 at91_set_gpio_value(host->board->enable_pin, 0); 99 gpio_set_value(host->board->enable_pin, 0);
103} 100}
104 101
105/* 102/*
106 * Disable NAND. 103 * Disable NAND.
107 */ 104 */
108static void at91_nand_disable(struct at91_nand_host *host) 105static void atmel_nand_disable(struct atmel_nand_host *host)
109{ 106{
110 if (host->board->enable_pin) 107 if (host->board->enable_pin)
111 at91_set_gpio_value(host->board->enable_pin, 1); 108 gpio_set_value(host->board->enable_pin, 1);
112} 109}
113 110
114/* 111/*
115 * Hardware specific access to control-lines 112 * Hardware specific access to control-lines
116 */ 113 */
117static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 114static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
118{ 115{
119 struct nand_chip *nand_chip = mtd->priv; 116 struct nand_chip *nand_chip = mtd->priv;
120 struct at91_nand_host *host = nand_chip->priv; 117 struct atmel_nand_host *host = nand_chip->priv;
121 118
122 if (ctrl & NAND_CTRL_CHANGE) { 119 if (ctrl & NAND_CTRL_CHANGE) {
123 if (ctrl & NAND_NCE) 120 if (ctrl & NAND_NCE)
124 at91_nand_enable(host); 121 atmel_nand_enable(host);
125 else 122 else
126 at91_nand_disable(host); 123 atmel_nand_disable(host);
127 } 124 }
128 if (cmd == NAND_CMD_NONE) 125 if (cmd == NAND_CMD_NONE)
129 return; 126 return;
@@ -137,18 +134,49 @@ static void at91_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
137/* 134/*
138 * Read the Device Ready pin. 135 * Read the Device Ready pin.
139 */ 136 */
140static int at91_nand_device_ready(struct mtd_info *mtd) 137static int atmel_nand_device_ready(struct mtd_info *mtd)
141{ 138{
142 struct nand_chip *nand_chip = mtd->priv; 139 struct nand_chip *nand_chip = mtd->priv;
143 struct at91_nand_host *host = nand_chip->priv; 140 struct atmel_nand_host *host = nand_chip->priv;
144 141
145 return at91_get_gpio_value(host->board->rdy_pin); 142 return gpio_get_value(host->board->rdy_pin);
143}
144
145/*
146 * Minimal-overhead PIO for data access.
147 */
148static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
149{
150 struct nand_chip *nand_chip = mtd->priv;
151
152 __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
153}
154
155static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
156{
157 struct nand_chip *nand_chip = mtd->priv;
158
159 __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
160}
161
162static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
163{
164 struct nand_chip *nand_chip = mtd->priv;
165
166 __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
167}
168
169static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
170{
171 struct nand_chip *nand_chip = mtd->priv;
172
173 __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
146} 174}
147 175
148/* 176/*
149 * write oob for small pages 177 * write oob for small pages
150 */ 178 */
151static int at91_nand_write_oob_512(struct mtd_info *mtd, 179static int atmel_nand_write_oob_512(struct mtd_info *mtd,
152 struct nand_chip *chip, int page) 180 struct nand_chip *chip, int page)
153{ 181{
154 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; 182 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -176,7 +204,7 @@ static int at91_nand_write_oob_512(struct mtd_info *mtd,
176/* 204/*
177 * read oob for small pages 205 * read oob for small pages
178 */ 206 */
179static int at91_nand_read_oob_512(struct mtd_info *mtd, 207static int atmel_nand_read_oob_512(struct mtd_info *mtd,
180 struct nand_chip *chip, int page, int sndcmd) 208 struct nand_chip *chip, int page, int sndcmd)
181{ 209{
182 if (sndcmd) { 210 if (sndcmd) {
@@ -196,11 +224,11 @@ static int at91_nand_read_oob_512(struct mtd_info *mtd,
196 * dat: raw data (unused) 224 * dat: raw data (unused)
197 * ecc_code: buffer for ECC 225 * ecc_code: buffer for ECC
198 */ 226 */
199static int at91_nand_calculate(struct mtd_info *mtd, 227static int atmel_nand_calculate(struct mtd_info *mtd,
200 const u_char *dat, unsigned char *ecc_code) 228 const u_char *dat, unsigned char *ecc_code)
201{ 229{
202 struct nand_chip *nand_chip = mtd->priv; 230 struct nand_chip *nand_chip = mtd->priv;
203 struct at91_nand_host *host = nand_chip->priv; 231 struct atmel_nand_host *host = nand_chip->priv;
204 uint32_t *eccpos = nand_chip->ecc.layout->eccpos; 232 uint32_t *eccpos = nand_chip->ecc.layout->eccpos;
205 unsigned int ecc_value; 233 unsigned int ecc_value;
206 234
@@ -211,7 +239,7 @@ static int at91_nand_calculate(struct mtd_info *mtd,
211 ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF; 239 ecc_code[eccpos[1]] = (ecc_value >> 8) & 0xFF;
212 240
213 /* get the last 2 ECC bytes */ 241 /* get the last 2 ECC bytes */
214 ecc_value = ecc_readl(host->ecc, NPR) & AT91_ECC_NPARITY; 242 ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
215 243
216 ecc_code[eccpos[2]] = ecc_value & 0xFF; 244 ecc_code[eccpos[2]] = ecc_value & 0xFF;
217 ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF; 245 ecc_code[eccpos[3]] = (ecc_value >> 8) & 0xFF;
@@ -226,7 +254,7 @@ static int at91_nand_calculate(struct mtd_info *mtd,
226 * chip: nand chip info structure 254 * chip: nand chip info structure
227 * buf: buffer to store read data 255 * buf: buffer to store read data
228 */ 256 */
229static int at91_nand_read_page(struct mtd_info *mtd, 257static int atmel_nand_read_page(struct mtd_info *mtd,
230 struct nand_chip *chip, uint8_t *buf) 258 struct nand_chip *chip, uint8_t *buf)
231{ 259{
232 int eccsize = chip->ecc.size; 260 int eccsize = chip->ecc.size;
@@ -237,6 +265,19 @@ static int at91_nand_read_page(struct mtd_info *mtd,
237 uint8_t *ecc_pos; 265 uint8_t *ecc_pos;
238 int stat; 266 int stat;
239 267
268 /*
269 * Errata: ALE is incorrectly wired up to the ECC controller
270 * on the AP7000, so it will include the address cycles in the
271 * ECC calculation.
272 *
273 * Workaround: Reset the parity registers before reading the
274 * actual data.
275 */
276 if (cpu_is_at32ap7000()) {
277 struct atmel_nand_host *host = chip->priv;
278 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
279 }
280
240 /* read the page */ 281 /* read the page */
241 chip->read_buf(mtd, p, eccsize); 282 chip->read_buf(mtd, p, eccsize);
242 283
@@ -285,11 +326,11 @@ static int at91_nand_read_page(struct mtd_info *mtd,
285 * 326 *
286 * Detect and correct a 1 bit error for a page 327 * Detect and correct a 1 bit error for a page
287 */ 328 */
288static int at91_nand_correct(struct mtd_info *mtd, u_char *dat, 329static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
289 u_char *read_ecc, u_char *isnull) 330 u_char *read_ecc, u_char *isnull)
290{ 331{
291 struct nand_chip *nand_chip = mtd->priv; 332 struct nand_chip *nand_chip = mtd->priv;
292 struct at91_nand_host *host = nand_chip->priv; 333 struct atmel_nand_host *host = nand_chip->priv;
293 unsigned int ecc_status; 334 unsigned int ecc_status;
294 unsigned int ecc_word, ecc_bit; 335 unsigned int ecc_word, ecc_bit;
295 336
@@ -297,43 +338,43 @@ static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
297 ecc_status = ecc_readl(host->ecc, SR); 338 ecc_status = ecc_readl(host->ecc, SR);
298 339
299 /* if there's no error */ 340 /* if there's no error */
300 if (likely(!(ecc_status & AT91_ECC_RECERR))) 341 if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
301 return 0; 342 return 0;
302 343
303 /* get error bit offset (4 bits) */ 344 /* get error bit offset (4 bits) */
304 ecc_bit = ecc_readl(host->ecc, PR) & AT91_ECC_BITADDR; 345 ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
305 /* get word address (12 bits) */ 346 /* get word address (12 bits) */
306 ecc_word = ecc_readl(host->ecc, PR) & AT91_ECC_WORDADDR; 347 ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
307 ecc_word >>= 4; 348 ecc_word >>= 4;
308 349
309 /* if there are multiple errors */ 350 /* if there are multiple errors */
310 if (ecc_status & AT91_ECC_MULERR) { 351 if (ecc_status & ATMEL_ECC_MULERR) {
311 /* check if it is a freshly erased block 352 /* check if it is a freshly erased block
312 * (filled with 0xff) */ 353 * (filled with 0xff) */
313 if ((ecc_bit == AT91_ECC_BITADDR) 354 if ((ecc_bit == ATMEL_ECC_BITADDR)
314 && (ecc_word == (AT91_ECC_WORDADDR >> 4))) { 355 && (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
315 /* the block has just been erased, return OK */ 356 /* the block has just been erased, return OK */
316 return 0; 357 return 0;
317 } 358 }
318 /* it doesn't seems to be a freshly 359 /* it doesn't seems to be a freshly
319 * erased block. 360 * erased block.
320 * We can't correct so many errors */ 361 * We can't correct so many errors */
321 dev_dbg(host->dev, "at91_nand : multiple errors detected." 362 dev_dbg(host->dev, "atmel_nand : multiple errors detected."
322 " Unable to correct.\n"); 363 " Unable to correct.\n");
323 return -EIO; 364 return -EIO;
324 } 365 }
325 366
326 /* if there's a single bit error : we can correct it */ 367 /* if there's a single bit error : we can correct it */
327 if (ecc_status & AT91_ECC_ECCERR) { 368 if (ecc_status & ATMEL_ECC_ECCERR) {
328 /* there's nothing much to do here. 369 /* there's nothing much to do here.
329 * the bit error is on the ECC itself. 370 * the bit error is on the ECC itself.
330 */ 371 */
331 dev_dbg(host->dev, "at91_nand : one bit error on ECC code." 372 dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
332 " Nothing to correct\n"); 373 " Nothing to correct\n");
333 return 0; 374 return 0;
334 } 375 }
335 376
336 dev_dbg(host->dev, "at91_nand : one bit error on data." 377 dev_dbg(host->dev, "atmel_nand : one bit error on data."
337 " (word offset in the page :" 378 " (word offset in the page :"
338 " 0x%x bit offset : 0x%x)\n", 379 " 0x%x bit offset : 0x%x)\n",
339 ecc_word, ecc_bit); 380 ecc_word, ecc_bit);
@@ -345,14 +386,21 @@ static int at91_nand_correct(struct mtd_info *mtd, u_char *dat,
345 /* 8 bits words */ 386 /* 8 bits words */
346 dat[ecc_word] ^= (1 << ecc_bit); 387 dat[ecc_word] ^= (1 << ecc_bit);
347 } 388 }
348 dev_dbg(host->dev, "at91_nand : error corrected\n"); 389 dev_dbg(host->dev, "atmel_nand : error corrected\n");
349 return 1; 390 return 1;
350} 391}
351 392
352/* 393/*
353 * Enable HW ECC : unsused 394 * Enable HW ECC : unused on most chips
354 */ 395 */
355static void at91_nand_hwctl(struct mtd_info *mtd, int mode) { ; } 396static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
397{
398 if (cpu_is_at32ap7000()) {
399 struct nand_chip *nand_chip = mtd->priv;
400 struct atmel_nand_host *host = nand_chip->priv;
401 ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
402 }
403}
356 404
357#ifdef CONFIG_MTD_PARTITIONS 405#ifdef CONFIG_MTD_PARTITIONS
358static const char *part_probes[] = { "cmdlinepart", NULL }; 406static const char *part_probes[] = { "cmdlinepart", NULL };
@@ -361,9 +409,9 @@ static const char *part_probes[] = { "cmdlinepart", NULL };
361/* 409/*
362 * Probe for the NAND device. 410 * Probe for the NAND device.
363 */ 411 */
364static int __init at91_nand_probe(struct platform_device *pdev) 412static int __init atmel_nand_probe(struct platform_device *pdev)
365{ 413{
366 struct at91_nand_host *host; 414 struct atmel_nand_host *host;
367 struct mtd_info *mtd; 415 struct mtd_info *mtd;
368 struct nand_chip *nand_chip; 416 struct nand_chip *nand_chip;
369 struct resource *regs; 417 struct resource *regs;
@@ -375,24 +423,24 @@ static int __init at91_nand_probe(struct platform_device *pdev)
375 int num_partitions = 0; 423 int num_partitions = 0;
376#endif 424#endif
377 425
378 /* Allocate memory for the device structure (and zero it) */
379 host = kzalloc(sizeof(struct at91_nand_host), GFP_KERNEL);
380 if (!host) {
381 printk(KERN_ERR "at91_nand: failed to allocate device structure.\n");
382 return -ENOMEM;
383 }
384
385 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 426 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
386 if (!mem) { 427 if (!mem) {
387 printk(KERN_ERR "at91_nand: can't get I/O resource mem\n"); 428 printk(KERN_ERR "atmel_nand: can't get I/O resource mem\n");
388 return -ENXIO; 429 return -ENXIO;
389 } 430 }
390 431
432 /* Allocate memory for the device structure (and zero it) */
433 host = kzalloc(sizeof(struct atmel_nand_host), GFP_KERNEL);
434 if (!host) {
435 printk(KERN_ERR "atmel_nand: failed to allocate device structure.\n");
436 return -ENOMEM;
437 }
438
391 host->io_base = ioremap(mem->start, mem->end - mem->start + 1); 439 host->io_base = ioremap(mem->start, mem->end - mem->start + 1);
392 if (host->io_base == NULL) { 440 if (host->io_base == NULL) {
393 printk(KERN_ERR "at91_nand: ioremap failed\n"); 441 printk(KERN_ERR "atmel_nand: ioremap failed\n");
394 kfree(host); 442 res = -EIO;
395 return -EIO; 443 goto err_nand_ioremap;
396 } 444 }
397 445
398 mtd = &host->mtd; 446 mtd = &host->mtd;
@@ -407,14 +455,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
407 /* Set address of NAND IO lines */ 455 /* Set address of NAND IO lines */
408 nand_chip->IO_ADDR_R = host->io_base; 456 nand_chip->IO_ADDR_R = host->io_base;
409 nand_chip->IO_ADDR_W = host->io_base; 457 nand_chip->IO_ADDR_W = host->io_base;
410 nand_chip->cmd_ctrl = at91_nand_cmd_ctrl; 458 nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
411 459
412 if (host->board->rdy_pin) 460 if (host->board->rdy_pin)
413 nand_chip->dev_ready = at91_nand_device_ready; 461 nand_chip->dev_ready = atmel_nand_device_ready;
414 462
415 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); 463 regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
416 if (!regs && hard_ecc) { 464 if (!regs && hard_ecc) {
417 printk(KERN_ERR "at91_nand: can't get I/O resource " 465 printk(KERN_ERR "atmel_nand: can't get I/O resource "
418 "regs\nFalling back on software ECC\n"); 466 "regs\nFalling back on software ECC\n");
419 } 467 }
420 468
@@ -424,15 +472,15 @@ static int __init at91_nand_probe(struct platform_device *pdev)
424 if (hard_ecc && regs) { 472 if (hard_ecc && regs) {
425 host->ecc = ioremap(regs->start, regs->end - regs->start + 1); 473 host->ecc = ioremap(regs->start, regs->end - regs->start + 1);
426 if (host->ecc == NULL) { 474 if (host->ecc == NULL) {
427 printk(KERN_ERR "at91_nand: ioremap failed\n"); 475 printk(KERN_ERR "atmel_nand: ioremap failed\n");
428 res = -EIO; 476 res = -EIO;
429 goto err_ecc_ioremap; 477 goto err_ecc_ioremap;
430 } 478 }
431 nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME; 479 nand_chip->ecc.mode = NAND_ECC_HW_SYNDROME;
432 nand_chip->ecc.calculate = at91_nand_calculate; 480 nand_chip->ecc.calculate = atmel_nand_calculate;
433 nand_chip->ecc.correct = at91_nand_correct; 481 nand_chip->ecc.correct = atmel_nand_correct;
434 nand_chip->ecc.hwctl = at91_nand_hwctl; 482 nand_chip->ecc.hwctl = atmel_nand_hwctl;
435 nand_chip->ecc.read_page = at91_nand_read_page; 483 nand_chip->ecc.read_page = atmel_nand_read_page;
436 nand_chip->ecc.bytes = 4; 484 nand_chip->ecc.bytes = 4;
437 nand_chip->ecc.prepad = 0; 485 nand_chip->ecc.prepad = 0;
438 nand_chip->ecc.postpad = 0; 486 nand_chip->ecc.postpad = 0;
@@ -440,24 +488,30 @@ static int __init at91_nand_probe(struct platform_device *pdev)
440 488
441 nand_chip->chip_delay = 20; /* 20us command delay time */ 489 nand_chip->chip_delay = 20; /* 20us command delay time */
442 490
443 if (host->board->bus_width_16) /* 16-bit bus width */ 491 if (host->board->bus_width_16) { /* 16-bit bus width */
444 nand_chip->options |= NAND_BUSWIDTH_16; 492 nand_chip->options |= NAND_BUSWIDTH_16;
493 nand_chip->read_buf = atmel_read_buf16;
494 nand_chip->write_buf = atmel_write_buf16;
495 } else {
496 nand_chip->read_buf = atmel_read_buf;
497 nand_chip->write_buf = atmel_write_buf;
498 }
445 499
446 platform_set_drvdata(pdev, host); 500 platform_set_drvdata(pdev, host);
447 at91_nand_enable(host); 501 atmel_nand_enable(host);
448 502
449 if (host->board->det_pin) { 503 if (host->board->det_pin) {
450 if (at91_get_gpio_value(host->board->det_pin)) { 504 if (gpio_get_value(host->board->det_pin)) {
451 printk ("No SmartMedia card inserted.\n"); 505 printk("No SmartMedia card inserted.\n");
452 res = ENXIO; 506 res = ENXIO;
453 goto out; 507 goto err_no_card;
454 } 508 }
455 } 509 }
456 510
457 /* first scan to find the device and get the page size */ 511 /* first scan to find the device and get the page size */
458 if (nand_scan_ident(mtd, 1)) { 512 if (nand_scan_ident(mtd, 1)) {
459 res = -ENXIO; 513 res = -ENXIO;
460 goto out; 514 goto err_scan_ident;
461 } 515 }
462 516
463 if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) { 517 if (nand_chip->ecc.mode == NAND_ECC_HW_SYNDROME) {
@@ -467,22 +521,22 @@ static int __init at91_nand_probe(struct platform_device *pdev)
467 /* set ECC page size and oob layout */ 521 /* set ECC page size and oob layout */
468 switch (mtd->writesize) { 522 switch (mtd->writesize) {
469 case 512: 523 case 512:
470 nand_chip->ecc.layout = &at91_oobinfo_small; 524 nand_chip->ecc.layout = &atmel_oobinfo_small;
471 nand_chip->ecc.read_oob = at91_nand_read_oob_512; 525 nand_chip->ecc.read_oob = atmel_nand_read_oob_512;
472 nand_chip->ecc.write_oob = at91_nand_write_oob_512; 526 nand_chip->ecc.write_oob = atmel_nand_write_oob_512;
473 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_528); 527 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
474 break; 528 break;
475 case 1024: 529 case 1024:
476 nand_chip->ecc.layout = &at91_oobinfo_large; 530 nand_chip->ecc.layout = &atmel_oobinfo_large;
477 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_1056); 531 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
478 break; 532 break;
479 case 2048: 533 case 2048:
480 nand_chip->ecc.layout = &at91_oobinfo_large; 534 nand_chip->ecc.layout = &atmel_oobinfo_large;
481 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_2112); 535 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
482 break; 536 break;
483 case 4096: 537 case 4096:
484 nand_chip->ecc.layout = &at91_oobinfo_large; 538 nand_chip->ecc.layout = &atmel_oobinfo_large;
485 ecc_writel(host->ecc, MR, AT91_ECC_PAGESIZE_4224); 539 ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
486 break; 540 break;
487 default: 541 default:
488 /* page size not handled by HW ECC */ 542 /* page size not handled by HW ECC */
@@ -502,12 +556,12 @@ static int __init at91_nand_probe(struct platform_device *pdev)
502 /* second phase scan */ 556 /* second phase scan */
503 if (nand_scan_tail(mtd)) { 557 if (nand_scan_tail(mtd)) {
504 res = -ENXIO; 558 res = -ENXIO;
505 goto out; 559 goto err_scan_tail;
506 } 560 }
507 561
508#ifdef CONFIG_MTD_PARTITIONS 562#ifdef CONFIG_MTD_PARTITIONS
509#ifdef CONFIG_MTD_CMDLINE_PARTS 563#ifdef CONFIG_MTD_CMDLINE_PARTS
510 mtd->name = "at91_nand"; 564 mtd->name = "atmel_nand";
511 num_partitions = parse_mtd_partitions(mtd, part_probes, 565 num_partitions = parse_mtd_partitions(mtd, part_probes,
512 &partitions, 0); 566 &partitions, 0);
513#endif 567#endif
@@ -516,9 +570,9 @@ static int __init at91_nand_probe(struct platform_device *pdev)
516 &num_partitions); 570 &num_partitions);
517 571
518 if ((!partitions) || (num_partitions == 0)) { 572 if ((!partitions) || (num_partitions == 0)) {
519 printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); 573 printk(KERN_ERR "atmel_nand: No parititions defined, or unsupported device.\n");
520 res = ENXIO; 574 res = ENXIO;
521 goto release; 575 goto err_no_partitions;
522 } 576 }
523 577
524 res = add_mtd_partitions(mtd, partitions, num_partitions); 578 res = add_mtd_partitions(mtd, partitions, num_partitions);
@@ -530,17 +584,19 @@ static int __init at91_nand_probe(struct platform_device *pdev)
530 return res; 584 return res;
531 585
532#ifdef CONFIG_MTD_PARTITIONS 586#ifdef CONFIG_MTD_PARTITIONS
533release: 587err_no_partitions:
534#endif 588#endif
535 nand_release(mtd); 589 nand_release(mtd);
536 590err_scan_tail:
537out: 591err_scan_ident:
538 iounmap(host->ecc); 592err_no_card:
539 593 atmel_nand_disable(host);
540err_ecc_ioremap:
541 at91_nand_disable(host);
542 platform_set_drvdata(pdev, NULL); 594 platform_set_drvdata(pdev, NULL);
595 if (host->ecc)
596 iounmap(host->ecc);
597err_ecc_ioremap:
543 iounmap(host->io_base); 598 iounmap(host->io_base);
599err_nand_ioremap:
544 kfree(host); 600 kfree(host);
545 return res; 601 return res;
546} 602}
@@ -548,47 +604,47 @@ err_ecc_ioremap:
548/* 604/*
549 * Remove a NAND device. 605 * Remove a NAND device.
550 */ 606 */
551static int __devexit at91_nand_remove(struct platform_device *pdev) 607static int __exit atmel_nand_remove(struct platform_device *pdev)
552{ 608{
553 struct at91_nand_host *host = platform_get_drvdata(pdev); 609 struct atmel_nand_host *host = platform_get_drvdata(pdev);
554 struct mtd_info *mtd = &host->mtd; 610 struct mtd_info *mtd = &host->mtd;
555 611
556 nand_release(mtd); 612 nand_release(mtd);
557 613
558 at91_nand_disable(host); 614 atmel_nand_disable(host);
559 615
616 if (host->ecc)
617 iounmap(host->ecc);
560 iounmap(host->io_base); 618 iounmap(host->io_base);
561 iounmap(host->ecc);
562 kfree(host); 619 kfree(host);
563 620
564 return 0; 621 return 0;
565} 622}
566 623
567static struct platform_driver at91_nand_driver = { 624static struct platform_driver atmel_nand_driver = {
568 .probe = at91_nand_probe, 625 .remove = __exit_p(atmel_nand_remove),
569 .remove = at91_nand_remove,
570 .driver = { 626 .driver = {
571 .name = "at91_nand", 627 .name = "atmel_nand",
572 .owner = THIS_MODULE, 628 .owner = THIS_MODULE,
573 }, 629 },
574}; 630};
575 631
576static int __init at91_nand_init(void) 632static int __init atmel_nand_init(void)
577{ 633{
578 return platform_driver_register(&at91_nand_driver); 634 return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe);
579} 635}
580 636
581 637
582static void __exit at91_nand_exit(void) 638static void __exit atmel_nand_exit(void)
583{ 639{
584 platform_driver_unregister(&at91_nand_driver); 640 platform_driver_unregister(&atmel_nand_driver);
585} 641}
586 642
587 643
588module_init(at91_nand_init); 644module_init(atmel_nand_init);
589module_exit(at91_nand_exit); 645module_exit(atmel_nand_exit);
590 646
591MODULE_LICENSE("GPL"); 647MODULE_LICENSE("GPL");
592MODULE_AUTHOR("Rick Bronson"); 648MODULE_AUTHOR("Rick Bronson");
593MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91RM9200 / AT91SAM9"); 649MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
594MODULE_ALIAS("platform:at91_nand"); 650MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
new file mode 100644
index 000000000000..1ee7f993db1c
--- /dev/null
+++ b/drivers/mtd/nand/atmel_nand_ecc.h
@@ -0,0 +1,36 @@
1/*
2 * Error Corrected Code Controller (ECC) - System peripherals regsters.
3 * Based on AT91SAM9260 datasheet revision B.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef ATMEL_NAND_ECC_H
12#define ATMEL_NAND_ECC_H
13
14#define ATMEL_ECC_CR 0x00 /* Control register */
15#define ATMEL_ECC_RST (1 << 0) /* Reset parity */
16
17#define ATMEL_ECC_MR 0x04 /* Mode register */
18#define ATMEL_ECC_PAGESIZE (3 << 0) /* Page Size */
19#define ATMEL_ECC_PAGESIZE_528 (0)
20#define ATMEL_ECC_PAGESIZE_1056 (1)
21#define ATMEL_ECC_PAGESIZE_2112 (2)
22#define ATMEL_ECC_PAGESIZE_4224 (3)
23
24#define ATMEL_ECC_SR 0x08 /* Status register */
25#define ATMEL_ECC_RECERR (1 << 0) /* Recoverable Error */
26#define ATMEL_ECC_ECCERR (1 << 1) /* ECC Single Bit Error */
27#define ATMEL_ECC_MULERR (1 << 2) /* Multiple Errors */
28
29#define ATMEL_ECC_PR 0x0c /* Parity register */
30#define ATMEL_ECC_BITADDR (0xf << 0) /* Bit Error Address */
31#define ATMEL_ECC_WORDADDR (0xfff << 4) /* Word Error Address */
32
33#define ATMEL_ECC_NPR 0x10 /* NParity register */
34#define ATMEL_ECC_NPARITY (0xffff << 0) /* NParity */
35
36#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 09e421a96893..761946ea45b1 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2004 Embedded Edge, LLC 4 * Copyright (C) 2004 Embedded Edge, LLC
5 * 5 *
6 * $Id: au1550nd.c,v 1.13 2005/11/07 11:14:30 gleixner Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
@@ -604,8 +602,6 @@ module_init(au1xxx_nand_init);
604 */ 602 */
605static void __exit au1550_cleanup(void) 603static void __exit au1550_cleanup(void)
606{ 604{
607 struct nand_chip *this = (struct nand_chip *)&au1550_mtd[1];
608
609 /* Release resources, unregister device */ 605 /* Release resources, unregister device */
610 nand_release(au1550_mtd); 606 nand_release(au1550_mtd);
611 607
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index dd38011ee0b7..553dd7e9b41c 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -6,8 +6,6 @@
6 * Derived from drivers/mtd/spia.c 6 * Derived from drivers/mtd/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 * 8 *
9 * $Id: autcpu12.c,v 1.23 2005/11/07 11:14:30 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index da6ceaa80ba1..95345d051579 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -626,10 +626,12 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
626{ 626{
627 struct mtd_info *mtd; 627 struct mtd_info *mtd;
628 struct cafe_priv *cafe; 628 struct cafe_priv *cafe;
629 struct mtd_partition *parts;
630 uint32_t ctrl; 629 uint32_t ctrl;
631 int nr_parts;
632 int err = 0; 630 int err = 0;
631#ifdef CONFIG_MTD_PARTITIONS
632 struct mtd_partition *parts;
633 int nr_parts;
634#endif
633 635
634 /* Very old versions shared the same PCI ident for all three 636 /* Very old versions shared the same PCI ident for all three
635 functions on the chip. Verify the class too... */ 637 functions on the chip. Verify the class too... */
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 0e72153b3297..765d4f0f7c86 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -15,8 +15,6 @@
15 * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de> 15 * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
16 * 16 *
17 * Interface to generic NAND code for M-Systems DiskOnChip devices 17 * Interface to generic NAND code for M-Systems DiskOnChip devices
18 *
19 * $Id: diskonchip.c,v 1.55 2005/11/07 11:14:30 gleixner Exp $
20 */ 18 */
21 19
22#include <linux/kernel.h> 20#include <linux/kernel.h>
@@ -54,8 +52,6 @@ static unsigned long __initdata doc_locations[] = {
54 0xe0000, 0xe2000, 0xe4000, 0xe6000, 52 0xe0000, 0xe2000, 0xe4000, 0xe6000,
55 0xe8000, 0xea000, 0xec000, 0xee000, 53 0xe8000, 0xea000, 0xec000, 0xee000,
56#endif /* CONFIG_MTD_DOCPROBE_HIGH */ 54#endif /* CONFIG_MTD_DOCPROBE_HIGH */
57#elif defined(__PPC__)
58 0xe4000000,
59#else 55#else
60#warning Unknown architecture for DiskOnChip. No default probe locations defined 56#warning Unknown architecture for DiskOnChip. No default probe locations defined
61#endif 57#endif
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index ba67bbec20d3..387e4352903e 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -6,8 +6,6 @@
6 * Derived from drivers/mtd/nand/autcpu12.c 6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
8 * 8 *
9 * $Id: edb7312.c,v 1.12 2005/11/07 11:14:30 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/excite_nandflash.c b/drivers/mtd/nand/excite_nandflash.c
index bed87290decc..ced14b5294d5 100644
--- a/drivers/mtd/nand/excite_nandflash.c
+++ b/drivers/mtd/nand/excite_nandflash.c
@@ -209,7 +209,7 @@ static int __init excite_nand_probe(struct device *dev)
209 if (likely(!scan_res)) { 209 if (likely(!scan_res)) {
210 DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id); 210 DEBUG(MTD_DEBUG_LEVEL2, "%s: register partitions\n", module_id);
211 add_mtd_partitions(&drvdata->board_mtd, partition_info, 211 add_mtd_partitions(&drvdata->board_mtd, partition_info,
212 sizeof partition_info / sizeof partition_info[0]); 212 ARRAY_SIZE(partition_info));
213 } else { 213 } else {
214 iounmap(drvdata->regs); 214 iounmap(drvdata->regs);
215 kfree(drvdata); 215 kfree(drvdata);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 4b69aacdf5ca..9dff51351f4f 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -89,7 +89,6 @@ static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
89 .eccbytes = 3, 89 .eccbytes = 3,
90 .eccpos = {6, 7, 8}, 90 .eccpos = {6, 7, 8},
91 .oobfree = { {0, 5}, {9, 7} }, 91 .oobfree = { {0, 5}, {9, 7} },
92 .oobavail = 12,
93}; 92};
94 93
95/* Small Page FLASH with FMR[ECCM] = 1 */ 94/* Small Page FLASH with FMR[ECCM] = 1 */
@@ -97,7 +96,6 @@ static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
97 .eccbytes = 3, 96 .eccbytes = 3,
98 .eccpos = {8, 9, 10}, 97 .eccpos = {8, 9, 10},
99 .oobfree = { {0, 5}, {6, 2}, {11, 5} }, 98 .oobfree = { {0, 5}, {6, 2}, {11, 5} },
100 .oobavail = 12,
101}; 99};
102 100
103/* Large Page FLASH with FMR[ECCM] = 0 */ 101/* Large Page FLASH with FMR[ECCM] = 0 */
@@ -105,7 +103,6 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
105 .eccbytes = 12, 103 .eccbytes = 12,
106 .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, 104 .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
107 .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, 105 .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
108 .oobavail = 48,
109}; 106};
110 107
111/* Large Page FLASH with FMR[ECCM] = 1 */ 108/* Large Page FLASH with FMR[ECCM] = 1 */
@@ -113,7 +110,48 @@ static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
113 .eccbytes = 12, 110 .eccbytes = 12,
114 .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, 111 .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
115 .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, 112 .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
116 .oobavail = 48, 113};
114
115/*
116 * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset
117 * 1, so we have to adjust bad block pattern. This pattern should be used for
118 * x8 chips only. So far hardware does not support x16 chips anyway.
119 */
120static u8 scan_ff_pattern[] = { 0xff, };
121
122static struct nand_bbt_descr largepage_memorybased = {
123 .options = 0,
124 .offs = 0,
125 .len = 1,
126 .pattern = scan_ff_pattern,
127};
128
129/*
130 * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
131 * interfere with ECC positions, that's why we implement our own descriptors.
132 * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
133 */
134static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
135static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
136
137static struct nand_bbt_descr bbt_main_descr = {
138 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
139 NAND_BBT_2BIT | NAND_BBT_VERSION,
140 .offs = 11,
141 .len = 4,
142 .veroffs = 15,
143 .maxblocks = 4,
144 .pattern = bbt_pattern,
145};
146
147static struct nand_bbt_descr bbt_mirror_descr = {
148 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
149 NAND_BBT_2BIT | NAND_BBT_VERSION,
150 .offs = 11,
151 .len = 4,
152 .veroffs = 15,
153 .maxblocks = 4,
154 .pattern = mirror_pattern,
117}; 155};
118 156
119/*=================================*/ 157/*=================================*/
@@ -687,8 +725,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
687 chip->ecc.layout = (priv->fmr & FMR_ECCM) ? 725 chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
688 &fsl_elbc_oob_lp_eccm1 : 726 &fsl_elbc_oob_lp_eccm1 :
689 &fsl_elbc_oob_lp_eccm0; 727 &fsl_elbc_oob_lp_eccm0;
690 mtd->ecclayout = chip->ecc.layout; 728 chip->badblock_pattern = &largepage_memorybased;
691 mtd->oobavail = chip->ecc.layout->oobavail;
692 } 729 }
693 } else { 730 } else {
694 dev_err(ctrl->dev, 731 dev_err(ctrl->dev,
@@ -752,8 +789,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
752 chip->cmdfunc = fsl_elbc_cmdfunc; 789 chip->cmdfunc = fsl_elbc_cmdfunc;
753 chip->waitfunc = fsl_elbc_wait; 790 chip->waitfunc = fsl_elbc_wait;
754 791
792 chip->bbt_td = &bbt_main_descr;
793 chip->bbt_md = &bbt_mirror_descr;
794
755 /* set up nand options */ 795 /* set up nand options */
756 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 796 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
797 NAND_USE_FLASH_BBT;
757 798
758 chip->controller = &ctrl->controller; 799 chip->controller = &ctrl->controller;
759 chip->priv = priv; 800 chip->priv = priv;
@@ -795,8 +836,8 @@ static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
795 return 0; 836 return 0;
796} 837}
797 838
798static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl, 839static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
799 struct device_node *node) 840 struct device_node *node)
800{ 841{
801 struct fsl_lbc_regs __iomem *lbc = ctrl->regs; 842 struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
802 struct fsl_elbc_mtd *priv; 843 struct fsl_elbc_mtd *priv;
@@ -917,7 +958,7 @@ static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
917 return 0; 958 return 0;
918} 959}
919 960
920static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev) 961static int fsl_elbc_ctrl_remove(struct of_device *ofdev)
921{ 962{
922 struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev); 963 struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
923 int i; 964 int i;
@@ -1041,7 +1082,7 @@ static struct of_platform_driver fsl_elbc_ctrl_driver = {
1041 }, 1082 },
1042 .match_table = fsl_elbc_match, 1083 .match_table = fsl_elbc_match,
1043 .probe = fsl_elbc_ctrl_probe, 1084 .probe = fsl_elbc_ctrl_probe,
1044 .remove = __devexit_p(fsl_elbc_ctrl_remove), 1085 .remove = fsl_elbc_ctrl_remove,
1045}; 1086};
1046 1087
1047static int __init fsl_elbc_init(void) 1088static int __init fsl_elbc_init(void)
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 2d585d2d090c..9e59de501c2e 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -7,8 +7,6 @@
7 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) 7 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
8 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 8 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
9 * 9 *
10 * $Id: h1910.c,v 1.6 2005/11/07 11:14:30 gleixner Exp $
11 *
12 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ba1bdf787323..d1129bae6c27 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -798,6 +798,87 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
798} 798}
799 799
800/** 800/**
801 * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
802 * @mtd: mtd info structure
803 * @chip: nand chip info structure
804 * @dataofs offset of requested data within the page
805 * @readlen data length
806 * @buf: buffer to store read data
807 */
808static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
809{
810 int start_step, end_step, num_steps;
811 uint32_t *eccpos = chip->ecc.layout->eccpos;
812 uint8_t *p;
813 int data_col_addr, i, gaps = 0;
814 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
815 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
816
817 /* Column address wihin the page aligned to ECC size (256bytes). */
818 start_step = data_offs / chip->ecc.size;
819 end_step = (data_offs + readlen - 1) / chip->ecc.size;
820 num_steps = end_step - start_step + 1;
821
822 /* Data size aligned to ECC ecc.size*/
823 datafrag_len = num_steps * chip->ecc.size;
824 eccfrag_len = num_steps * chip->ecc.bytes;
825
826 data_col_addr = start_step * chip->ecc.size;
827 /* If we read not a page aligned data */
828 if (data_col_addr != 0)
829 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
830
831 p = bufpoi + data_col_addr;
832 chip->read_buf(mtd, p, datafrag_len);
833
834 /* Calculate ECC */
835 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
836 chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
837
838 /* The performance is faster if to position offsets
839 according to ecc.pos. Let make sure here that
840 there are no gaps in ecc positions */
841 for (i = 0; i < eccfrag_len - 1; i++) {
842 if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
843 eccpos[i + start_step * chip->ecc.bytes + 1]) {
844 gaps = 1;
845 break;
846 }
847 }
848 if (gaps) {
849 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
850 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
851 } else {
852 /* send the command to read the particular ecc bytes */
853 /* take care about buswidth alignment in read_buf */
854 aligned_pos = eccpos[start_step * chip->ecc.bytes] & ~(busw - 1);
855 aligned_len = eccfrag_len;
856 if (eccpos[start_step * chip->ecc.bytes] & (busw - 1))
857 aligned_len++;
858 if (eccpos[(start_step + num_steps) * chip->ecc.bytes] & (busw - 1))
859 aligned_len++;
860
861 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + aligned_pos, -1);
862 chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
863 }
864
865 for (i = 0; i < eccfrag_len; i++)
866 chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + start_step * chip->ecc.bytes]];
867
868 p = bufpoi + data_col_addr;
869 for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
870 int stat;
871
872 stat = chip->ecc.correct(mtd, p, &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
873 if (stat == -1)
874 mtd->ecc_stats.failed++;
875 else
876 mtd->ecc_stats.corrected += stat;
877 }
878 return 0;
879}
880
881/**
801 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function 882 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
802 * @mtd: mtd info structure 883 * @mtd: mtd info structure
803 * @chip: nand chip info structure 884 * @chip: nand chip info structure
@@ -994,6 +1075,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
994 /* Now read the page into the buffer */ 1075 /* Now read the page into the buffer */
995 if (unlikely(ops->mode == MTD_OOB_RAW)) 1076 if (unlikely(ops->mode == MTD_OOB_RAW))
996 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi); 1077 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi);
1078 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
1079 ret = chip->ecc.read_subpage(mtd, chip, col, bytes, bufpoi);
997 else 1080 else
998 ret = chip->ecc.read_page(mtd, chip, bufpoi); 1081 ret = chip->ecc.read_page(mtd, chip, bufpoi);
999 if (ret < 0) 1082 if (ret < 0)
@@ -1001,7 +1084,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1001 1084
1002 /* Transfer not aligned data */ 1085 /* Transfer not aligned data */
1003 if (!aligned) { 1086 if (!aligned) {
1004 chip->pagebuf = realpage; 1087 if (!NAND_SUBPAGE_READ(chip) && !oob)
1088 chip->pagebuf = realpage;
1005 memcpy(buf, chip->buffers->databuf + col, bytes); 1089 memcpy(buf, chip->buffers->databuf + col, bytes);
1006 } 1090 }
1007 1091
@@ -2521,6 +2605,7 @@ int nand_scan_tail(struct mtd_info *mtd)
2521 chip->ecc.calculate = nand_calculate_ecc; 2605 chip->ecc.calculate = nand_calculate_ecc;
2522 chip->ecc.correct = nand_correct_data; 2606 chip->ecc.correct = nand_correct_data;
2523 chip->ecc.read_page = nand_read_page_swecc; 2607 chip->ecc.read_page = nand_read_page_swecc;
2608 chip->ecc.read_subpage = nand_read_subpage;
2524 chip->ecc.write_page = nand_write_page_swecc; 2609 chip->ecc.write_page = nand_write_page_swecc;
2525 chip->ecc.read_oob = nand_read_oob_std; 2610 chip->ecc.read_oob = nand_read_oob_std;
2526 chip->ecc.write_oob = nand_write_oob_std; 2611 chip->ecc.write_oob = nand_write_oob_std;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 5e121ceaa598..0b1c48595f12 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -6,8 +6,6 @@
6 * 6 *
7 * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) 7 * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
8 * 8 *
9 * $Id: nand_bbt.c,v 1.36 2005/11/07 11:14:30 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 9003a135e050..918a806a8471 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -9,8 +9,6 @@
9 * 9 *
10 * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de> 10 * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
11 * 11 *
12 * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $
13 *
14 * This file is free software; you can redistribute it and/or modify it 12 * This file is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 or (at your option) any 14 * Free Software Foundation; either version 2 or (at your option) any
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index a3e3ab0185d5..69ee2c90eb0b 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) 4 * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
5 * 5 *
6 * $Id: nand_ids.c,v 1.16 2005/11/07 11:14:31 gleixner Exp $
7 *
8 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index bb885d1fcab5..ecd70e2504f6 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -21,8 +21,6 @@
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software 22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
24 *
25 * $Id: nandsim.c,v 1.8 2005/03/19 15:33:56 dedekind Exp $
26 */ 24 */
27 25
28#include <linux/init.h> 26#include <linux/init.h>
@@ -39,6 +37,7 @@
39#include <linux/delay.h> 37#include <linux/delay.h>
40#include <linux/list.h> 38#include <linux/list.h>
41#include <linux/random.h> 39#include <linux/random.h>
40#include <asm/div64.h>
42 41
43/* Default simulator parameters values */ 42/* Default simulator parameters values */
44#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \ 43#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE) || \
@@ -298,11 +297,11 @@ struct nandsim {
298 297
299 /* NAND flash "geometry" */ 298 /* NAND flash "geometry" */
300 struct nandsin_geometry { 299 struct nandsin_geometry {
301 uint32_t totsz; /* total flash size, bytes */ 300 uint64_t totsz; /* total flash size, bytes */
302 uint32_t secsz; /* flash sector (erase block) size, bytes */ 301 uint32_t secsz; /* flash sector (erase block) size, bytes */
303 uint pgsz; /* NAND flash page size, bytes */ 302 uint pgsz; /* NAND flash page size, bytes */
304 uint oobsz; /* page OOB area size, bytes */ 303 uint oobsz; /* page OOB area size, bytes */
305 uint32_t totszoob; /* total flash size including OOB, bytes */ 304 uint64_t totszoob; /* total flash size including OOB, bytes */
306 uint pgszoob; /* page size including OOB , bytes*/ 305 uint pgszoob; /* page size including OOB , bytes*/
307 uint secszoob; /* sector size including OOB, bytes */ 306 uint secszoob; /* sector size including OOB, bytes */
308 uint pgnum; /* total number of pages */ 307 uint pgnum; /* total number of pages */
@@ -459,6 +458,12 @@ static char *get_partition_name(int i)
459 return kstrdup(buf, GFP_KERNEL); 458 return kstrdup(buf, GFP_KERNEL);
460} 459}
461 460
461static u_int64_t divide(u_int64_t n, u_int32_t d)
462{
463 do_div(n, d);
464 return n;
465}
466
462/* 467/*
463 * Initialize the nandsim structure. 468 * Initialize the nandsim structure.
464 * 469 *
@@ -469,8 +474,8 @@ static int init_nandsim(struct mtd_info *mtd)
469 struct nand_chip *chip = (struct nand_chip *)mtd->priv; 474 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
470 struct nandsim *ns = (struct nandsim *)(chip->priv); 475 struct nandsim *ns = (struct nandsim *)(chip->priv);
471 int i, ret = 0; 476 int i, ret = 0;
472 u_int32_t remains; 477 u_int64_t remains;
473 u_int32_t next_offset; 478 u_int64_t next_offset;
474 479
475 if (NS_IS_INITIALIZED(ns)) { 480 if (NS_IS_INITIALIZED(ns)) {
476 NS_ERR("init_nandsim: nandsim is already initialized\n"); 481 NS_ERR("init_nandsim: nandsim is already initialized\n");
@@ -487,8 +492,8 @@ static int init_nandsim(struct mtd_info *mtd)
487 ns->geom.oobsz = mtd->oobsize; 492 ns->geom.oobsz = mtd->oobsize;
488 ns->geom.secsz = mtd->erasesize; 493 ns->geom.secsz = mtd->erasesize;
489 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; 494 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
490 ns->geom.pgnum = ns->geom.totsz / ns->geom.pgsz; 495 ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
491 ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz; 496 ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
492 ns->geom.secshift = ffs(ns->geom.secsz) - 1; 497 ns->geom.secshift = ffs(ns->geom.secsz) - 1;
493 ns->geom.pgshift = chip->page_shift; 498 ns->geom.pgshift = chip->page_shift;
494 ns->geom.oobshift = ffs(ns->geom.oobsz) - 1; 499 ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
@@ -511,7 +516,7 @@ static int init_nandsim(struct mtd_info *mtd)
511 } 516 }
512 517
513 if (ns->options & OPT_SMALLPAGE) { 518 if (ns->options & OPT_SMALLPAGE) {
514 if (ns->geom.totsz < (32 << 20)) { 519 if (ns->geom.totsz <= (32 << 20)) {
515 ns->geom.pgaddrbytes = 3; 520 ns->geom.pgaddrbytes = 3;
516 ns->geom.secaddrbytes = 2; 521 ns->geom.secaddrbytes = 2;
517 } else { 522 } else {
@@ -537,15 +542,16 @@ static int init_nandsim(struct mtd_info *mtd)
537 remains = ns->geom.totsz; 542 remains = ns->geom.totsz;
538 next_offset = 0; 543 next_offset = 0;
539 for (i = 0; i < parts_num; ++i) { 544 for (i = 0; i < parts_num; ++i) {
540 unsigned long part = parts[i]; 545 u_int64_t part_sz = (u_int64_t)parts[i] * ns->geom.secsz;
541 if (!part || part > remains / ns->geom.secsz) { 546
547 if (!part_sz || part_sz > remains) {
542 NS_ERR("bad partition size.\n"); 548 NS_ERR("bad partition size.\n");
543 ret = -EINVAL; 549 ret = -EINVAL;
544 goto error; 550 goto error;
545 } 551 }
546 ns->partitions[i].name = get_partition_name(i); 552 ns->partitions[i].name = get_partition_name(i);
547 ns->partitions[i].offset = next_offset; 553 ns->partitions[i].offset = next_offset;
548 ns->partitions[i].size = part * ns->geom.secsz; 554 ns->partitions[i].size = part_sz;
549 next_offset += ns->partitions[i].size; 555 next_offset += ns->partitions[i].size;
550 remains -= ns->partitions[i].size; 556 remains -= ns->partitions[i].size;
551 } 557 }
@@ -573,7 +579,7 @@ static int init_nandsim(struct mtd_info *mtd)
573 if (ns->busw == 16) 579 if (ns->busw == 16)
574 NS_WARN("16-bit flashes support wasn't tested\n"); 580 NS_WARN("16-bit flashes support wasn't tested\n");
575 581
576 printk("flash size: %u MiB\n", ns->geom.totsz >> 20); 582 printk("flash size: %llu MiB\n", ns->geom.totsz >> 20);
577 printk("page size: %u bytes\n", ns->geom.pgsz); 583 printk("page size: %u bytes\n", ns->geom.pgsz);
578 printk("OOB area size: %u bytes\n", ns->geom.oobsz); 584 printk("OOB area size: %u bytes\n", ns->geom.oobsz);
579 printk("sector size: %u KiB\n", ns->geom.secsz >> 10); 585 printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
@@ -583,7 +589,7 @@ static int init_nandsim(struct mtd_info *mtd)
583 printk("bits in sector size: %u\n", ns->geom.secshift); 589 printk("bits in sector size: %u\n", ns->geom.secshift);
584 printk("bits in page size: %u\n", ns->geom.pgshift); 590 printk("bits in page size: %u\n", ns->geom.pgshift);
585 printk("bits in OOB size: %u\n", ns->geom.oobshift); 591 printk("bits in OOB size: %u\n", ns->geom.oobshift);
586 printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10); 592 printk("flash size with OOB: %llu KiB\n", ns->geom.totszoob >> 10);
587 printk("page address bytes: %u\n", ns->geom.pgaddrbytes); 593 printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
588 printk("sector address bytes: %u\n", ns->geom.secaddrbytes); 594 printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
589 printk("options: %#x\n", ns->options); 595 printk("options: %#x\n", ns->options);
@@ -825,7 +831,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
825 831
826 if (!rptwear) 832 if (!rptwear)
827 return 0; 833 return 0;
828 wear_eb_count = mtd->size / mtd->erasesize; 834 wear_eb_count = divide(mtd->size, mtd->erasesize);
829 mem = wear_eb_count * sizeof(unsigned long); 835 mem = wear_eb_count * sizeof(unsigned long);
830 if (mem / sizeof(unsigned long) != wear_eb_count) { 836 if (mem / sizeof(unsigned long) != wear_eb_count) {
831 NS_ERR("Too many erase blocks for wear reporting\n"); 837 NS_ERR("Too many erase blocks for wear reporting\n");
@@ -2013,7 +2019,7 @@ static int __init ns_init_module(void)
2013 } 2019 }
2014 2020
2015 if (overridesize) { 2021 if (overridesize) {
2016 u_int32_t new_size = nsmtd->erasesize << overridesize; 2022 u_int64_t new_size = (u_int64_t)nsmtd->erasesize << overridesize;
2017 if (new_size >> overridesize != nsmtd->erasesize) { 2023 if (new_size >> overridesize != nsmtd->erasesize) {
2018 NS_ERR("overridesize is too big\n"); 2024 NS_ERR("overridesize is too big\n");
2019 goto err_exit; 2025 goto err_exit;
@@ -2021,7 +2027,8 @@ static int __init ns_init_module(void)
2021 /* N.B. This relies on nand_scan not doing anything with the size before we change it */ 2027 /* N.B. This relies on nand_scan not doing anything with the size before we change it */
2022 nsmtd->size = new_size; 2028 nsmtd->size = new_size;
2023 chip->chipsize = new_size; 2029 chip->chipsize = new_size;
2024 chip->chip_shift = ffs(new_size) - 1; 2030 chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
2031 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2025 } 2032 }
2026 2033
2027 if ((retval = setup_wear_reporting(nsmtd)) != 0) 2034 if ((retval = setup_wear_reporting(nsmtd)) != 0)
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 082073acf20f..cc8658431851 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -6,8 +6,6 @@
6 * Derived from drivers/mtd/nand/edb7312.c 6 * Derived from drivers/mtd/nand/edb7312.c
7 * 7 *
8 * 8 *
9 * $Id: ppchameleonevb.c,v 1.7 2005/11/07 11:14:31 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 26f88215bc47..a033c4cd8e16 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -6,8 +6,6 @@
6 * Derived from drivers/mtd/nand/spia.c 6 * Derived from drivers/mtd/nand/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 * 8 *
9 * $Id: rtc_from4.c,v 1.10 2005/11/07 11:14:31 gleixner Exp $
10 *
11 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index b34a460ab679..556139ed1fdf 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -1,26 +1,10 @@
1/* linux/drivers/mtd/nand/s3c2410.c 1/* linux/drivers/mtd/nand/s3c2410.c
2 * 2 *
3 * Copyright (c) 2004,2005 Simtec Electronics 3 * Copyright © 2004-2008 Simtec Electronics
4 * http://www.simtec.co.uk/products/SWLINUX/ 4 * http://armlinux.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * Samsung S3C2410/S3C240 NAND driver 7 * Samsung S3C2410/S3C2440/S3C2412 NAND driver
8 *
9 * Changelog:
10 * 21-Sep-2004 BJD Initial version
11 * 23-Sep-2004 BJD Multiple device support
12 * 28-Sep-2004 BJD Fixed ECC placement for Hardware mode
13 * 12-Oct-2004 BJD Fixed errors in use of platform data
14 * 18-Feb-2005 BJD Fix sparse errors
15 * 14-Mar-2005 BJD Applied tglx's code reduction patch
16 * 02-May-2005 BJD Fixed s3c2440 support
17 * 02-May-2005 BJD Reduced hwcontrol decode
18 * 20-Jun-2005 BJD Updated s3c2440 support, fixed timing bug
19 * 08-Jul-2005 BJD Fix OOPS when no platform data supplied
20 * 20-Oct-2005 BJD Fix timing calculation bug
21 * 14-Jan-2006 BJD Allow clock to be stopped when idle
22 *
23 * $Id: s3c2410.c,v 1.23 2006/04/01 18:06:29 bjd Exp $
24 * 8 *
25 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -52,6 +36,7 @@
52#include <linux/err.h> 36#include <linux/err.h>
53#include <linux/slab.h> 37#include <linux/slab.h>
54#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/cpufreq.h>
55 40
56#include <linux/mtd/mtd.h> 41#include <linux/mtd/mtd.h>
57#include <linux/mtd/nand.h> 42#include <linux/mtd/nand.h>
@@ -120,8 +105,13 @@ struct s3c2410_nand_info {
120 int sel_bit; 105 int sel_bit;
121 int mtd_count; 106 int mtd_count;
122 unsigned long save_sel; 107 unsigned long save_sel;
108 unsigned long clk_rate;
123 109
124 enum s3c_cpu_type cpu_type; 110 enum s3c_cpu_type cpu_type;
111
112#ifdef CONFIG_CPU_FREQ
113 struct notifier_block freq_transition;
114#endif
125}; 115};
126 116
127/* conversion functions */ 117/* conversion functions */
@@ -179,17 +169,18 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
179 169
180/* controller setup */ 170/* controller setup */
181 171
182static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, 172static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
183 struct platform_device *pdev)
184{ 173{
185 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 174 struct s3c2410_platform_nand *plat = info->platform;
186 unsigned long clkrate = clk_get_rate(info->clk);
187 int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; 175 int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
188 int tacls, twrph0, twrph1; 176 int tacls, twrph0, twrph1;
189 unsigned long cfg = 0; 177 unsigned long clkrate = clk_get_rate(info->clk);
178 unsigned long set, cfg, mask;
179 unsigned long flags;
190 180
191 /* calculate the timing information for the controller */ 181 /* calculate the timing information for the controller */
192 182
183 info->clk_rate = clkrate;
193 clkrate /= 1000; /* turn clock into kHz for ease of use */ 184 clkrate /= 1000; /* turn clock into kHz for ease of use */
194 185
195 if (plat != NULL) { 186 if (plat != NULL) {
@@ -211,28 +202,69 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
211 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 202 dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
212 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); 203 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
213 204
205 switch (info->cpu_type) {
206 case TYPE_S3C2410:
207 mask = (S3C2410_NFCONF_TACLS(3) |
208 S3C2410_NFCONF_TWRPH0(7) |
209 S3C2410_NFCONF_TWRPH1(7));
210 set = S3C2410_NFCONF_EN;
211 set |= S3C2410_NFCONF_TACLS(tacls - 1);
212 set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
213 set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
214 break;
215
216 case TYPE_S3C2440:
217 case TYPE_S3C2412:
218 mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
219 S3C2410_NFCONF_TWRPH0(7) |
220 S3C2410_NFCONF_TWRPH1(7));
221
222 set = S3C2440_NFCONF_TACLS(tacls - 1);
223 set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
224 set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
225 break;
226
227 default:
228 /* keep compiler happy */
229 mask = 0;
230 set = 0;
231 BUG();
232 }
233
234 dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
235
236 local_irq_save(flags);
237
238 cfg = readl(info->regs + S3C2410_NFCONF);
239 cfg &= ~mask;
240 cfg |= set;
241 writel(cfg, info->regs + S3C2410_NFCONF);
242
243 local_irq_restore(flags);
244
245 return 0;
246}
247
248static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
249{
250 int ret;
251
252 ret = s3c2410_nand_setrate(info);
253 if (ret < 0)
254 return ret;
255
214 switch (info->cpu_type) { 256 switch (info->cpu_type) {
215 case TYPE_S3C2410: 257 case TYPE_S3C2410:
216 cfg = S3C2410_NFCONF_EN; 258 default:
217 cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
218 cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
219 cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
220 break; 259 break;
221 260
222 case TYPE_S3C2440: 261 case TYPE_S3C2440:
223 case TYPE_S3C2412: 262 case TYPE_S3C2412:
224 cfg = S3C2440_NFCONF_TACLS(tacls - 1);
225 cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
226 cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
227
228 /* enable the controller and de-assert nFCE */ 263 /* enable the controller and de-assert nFCE */
229 264
230 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); 265 writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
231 } 266 }
232 267
233 dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
234
235 writel(cfg, info->regs + S3C2410_NFCONF);
236 return 0; 268 return 0;
237} 269}
238 270
@@ -513,6 +545,52 @@ static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
513 writesl(info->regs + S3C2440_NFDATA, buf, len / 4); 545 writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
514} 546}
515 547
548/* cpufreq driver support */
549
550#ifdef CONFIG_CPU_FREQ
551
552static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
553 unsigned long val, void *data)
554{
555 struct s3c2410_nand_info *info;
556 unsigned long newclk;
557
558 info = container_of(nb, struct s3c2410_nand_info, freq_transition);
559 newclk = clk_get_rate(info->clk);
560
561 if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
562 (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
563 s3c2410_nand_setrate(info);
564 }
565
566 return 0;
567}
568
569static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
570{
571 info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
572
573 return cpufreq_register_notifier(&info->freq_transition,
574 CPUFREQ_TRANSITION_NOTIFIER);
575}
576
577static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
578{
579 cpufreq_unregister_notifier(&info->freq_transition,
580 CPUFREQ_TRANSITION_NOTIFIER);
581}
582
583#else
584static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
585{
586 return 0;
587}
588
589static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
590{
591}
592#endif
593
516/* device management functions */ 594/* device management functions */
517 595
518static int s3c2410_nand_remove(struct platform_device *pdev) 596static int s3c2410_nand_remove(struct platform_device *pdev)
@@ -524,9 +602,10 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
524 if (info == NULL) 602 if (info == NULL)
525 return 0; 603 return 0;
526 604
527 /* first thing we need to do is release all our mtds 605 s3c2410_nand_cpufreq_deregister(info);
528 * and their partitions, then go through freeing the 606
529 * resources used 607 /* Release all our mtds and their partitions, then go through
608 * freeing the resources used
530 */ 609 */
531 610
532 if (info->mtds != NULL) { 611 if (info->mtds != NULL) {
@@ -691,7 +770,8 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
691{ 770{
692 struct nand_chip *chip = &nmtd->chip; 771 struct nand_chip *chip = &nmtd->chip;
693 772
694 printk("%s: chip %p: %d\n", __func__, chip, chip->page_shift); 773 dev_dbg(info->device, "chip %p => page shift %d\n",
774 chip, chip->page_shift);
695 775
696 if (hardware_ecc) { 776 if (hardware_ecc) {
697 /* change the behaviour depending on wether we are using 777 /* change the behaviour depending on wether we are using
@@ -784,7 +864,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
784 864
785 /* initialise the hardware */ 865 /* initialise the hardware */
786 866
787 err = s3c2410_nand_inithw(info, pdev); 867 err = s3c2410_nand_inithw(info);
788 if (err != 0) 868 if (err != 0)
789 goto exit_error; 869 goto exit_error;
790 870
@@ -827,6 +907,12 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
827 sets++; 907 sets++;
828 } 908 }
829 909
910 err = s3c2410_nand_cpufreq_register(info);
911 if (err < 0) {
912 dev_err(&pdev->dev, "failed to init cpufreq support\n");
913 goto exit_error;
914 }
915
830 if (allow_clk_stop(info)) { 916 if (allow_clk_stop(info)) {
831 dev_info(&pdev->dev, "clock idle support enabled\n"); 917 dev_info(&pdev->dev, "clock idle support enabled\n");
832 clk_disable(info->clk); 918 clk_disable(info->clk);
@@ -874,7 +960,7 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
874 960
875 if (info) { 961 if (info) {
876 clk_enable(info->clk); 962 clk_enable(info->clk);
877 s3c2410_nand_inithw(info, dev); 963 s3c2410_nand_inithw(info);
878 964
879 /* Restore the state of the nFCE line. */ 965 /* Restore the state of the nFCE line. */
880 966
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 033f8800b1e6..6dba2fb66ae5 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2004 Richard Purdie 4 * Copyright (C) 2004 Richard Purdie
5 * 5 *
6 * $Id: sharpsl.c,v 1.7 2005/11/07 11:14:31 gleixner Exp $
7 *
8 * Based on Sharp's NAND driver sharp_sl.c 6 * Based on Sharp's NAND driver sharp_sl.c
9 * 7 *
10 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 1f6d429b1583..0cc6d0acb8fe 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -8,8 +8,6 @@
8 * to controllines (due to change in nand.c) 8 * to controllines (due to change in nand.c)
9 * page_cache added 9 * page_cache added
10 * 10 *
11 * $Id: spia.c,v 1.25 2005/11/07 11:14:31 gleixner Exp $
12 *
13 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
index f9e2d4a0ab8c..bbf492e6830d 100644
--- a/drivers/mtd/nand/toto.c
+++ b/drivers/mtd/nand/toto.c
@@ -14,8 +14,6 @@
14 * Overview: 14 * Overview:
15 * This is a device driver for the NAND flash device found on the 15 * This is a device driver for the NAND flash device found on the
16 * TI fido board. It supports 32MiB and 64MiB cards 16 * TI fido board. It supports 32MiB and 64MiB cards
17 *
18 * $Id: toto.c,v 1.5 2005/11/07 11:14:31 gleixner Exp $
19 */ 17 */
20 18
21#include <linux/slab.h> 19#include <linux/slab.h>
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index f40081069ab2..807a72752eeb 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -9,8 +9,6 @@
9 * Derived from drivers/mtd/nand/autcpu12.c 9 * Derived from drivers/mtd/nand/autcpu12.c
10 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 10 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
11 * 11 *
12 * $Id: ts7250.c,v 1.4 2004/12/30 22:02:07 joff Exp $
13 *
14 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index 0c9ce19ea27a..320b929abe79 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -1,7 +1,6 @@
1/* Linux driver for NAND Flash Translation Layer */ 1/* Linux driver for NAND Flash Translation Layer */
2/* (c) 1999 Machine Vision Holdings, Inc. */ 2/* (c) 1999 Machine Vision Holdings, Inc. */
3/* Author: David Woodhouse <dwmw2@infradead.org> */ 3/* Author: David Woodhouse <dwmw2@infradead.org> */
4/* $Id: nftlcore.c,v 1.98 2005/11/07 11:14:21 gleixner Exp $ */
5 4
6/* 5/*
7 The contents of this file are distributed under the GNU General 6 The contents of this file are distributed under the GNU General
@@ -803,12 +802,8 @@ static struct mtd_blktrans_ops nftl_tr = {
803 .owner = THIS_MODULE, 802 .owner = THIS_MODULE,
804}; 803};
805 804
806extern char nftlmountrev[];
807
808static int __init init_nftl(void) 805static int __init init_nftl(void)
809{ 806{
810 printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.98 $, nftlmount.c %s\n", nftlmountrev);
811
812 return register_mtd_blktrans(&nftl_tr); 807 return register_mtd_blktrans(&nftl_tr);
813} 808}
814 809
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 345e6eff89ce..ccc4f209fbb5 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -4,8 +4,6 @@
4 * Author: Fabrice Bellard (fabrice.bellard@netgem.com) 4 * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
5 * Copyright (C) 2000 Netgem S.A. 5 * Copyright (C) 2000 Netgem S.A.
6 * 6 *
7 * $Id: nftlmount.c,v 1.41 2005/11/07 11:14:21 gleixner Exp $
8 *
9 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or 9 * the Free Software Foundation; either version 2 of the License, or
@@ -31,8 +29,6 @@
31 29
32#define SECTORSIZE 512 30#define SECTORSIZE 512
33 31
34char nftlmountrev[]="$Revision: 1.41 $";
35
36/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 32/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
37 * various device information of the NFTL partition and Bad Unit Table. Update 33 * various device information of the NFTL partition and Bad Unit Table. Update
38 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] 34 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 5d7965f7e9ce..926cf3a4135d 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -325,28 +325,11 @@ static int onenand_wait(struct mtd_info *mtd, int state)
325 325
326 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 326 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
327 327
328 if (ctrl & ONENAND_CTRL_ERROR) { 328 /*
329 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl); 329 * In the Spec. it checks the controller status first
330 if (ctrl & ONENAND_CTRL_LOCK) 330 * However if you get the correct information in case of
331 printk(KERN_ERR "onenand_wait: it's locked error.\n"); 331 * power off recovery (POR) test, it should read ECC status first
332 if (state == FL_READING) { 332 */
333 /*
334 * A power loss while writing can result in a page
335 * becoming unreadable. When the device is mounted
336 * again, reading that page gives controller errors.
337 * Upper level software like JFFS2 treat -EIO as fatal,
338 * refusing to mount at all. That means it is necessary
339 * to treat the error as an ECC error to allow recovery.
340 * Note that typically in this case, the eraseblock can
341 * still be erased and rewritten i.e. it has not become
342 * a bad block.
343 */
344 mtd->ecc_stats.failed++;
345 return -EBADMSG;
346 }
347 return -EIO;
348 }
349
350 if (interrupt & ONENAND_INT_READ) { 333 if (interrupt & ONENAND_INT_READ) {
351 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS); 334 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
352 if (ecc) { 335 if (ecc) {
@@ -364,6 +347,15 @@ static int onenand_wait(struct mtd_info *mtd, int state)
364 return -EIO; 347 return -EIO;
365 } 348 }
366 349
350 /* If there's controller error, it's a real error */
351 if (ctrl & ONENAND_CTRL_ERROR) {
352 printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n",
353 ctrl);
354 if (ctrl & ONENAND_CTRL_LOCK)
355 printk(KERN_ERR "onenand_wait: it's locked error.\n");
356 return -EIO;
357 }
358
367 return 0; 359 return 0;
368} 360}
369 361
@@ -1135,22 +1127,26 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
1135 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 1127 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
1136 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); 1128 ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
1137 1129
1138 /* Initial bad block case: 0x2400 or 0x0400 */
1139 if (ctrl & ONENAND_CTRL_ERROR) {
1140 printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
1141 return ONENAND_BBT_READ_ERROR;
1142 }
1143
1144 if (interrupt & ONENAND_INT_READ) { 1130 if (interrupt & ONENAND_INT_READ) {
1145 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS); 1131 int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
1146 if (ecc & ONENAND_ECC_2BIT_ALL) 1132 if (ecc & ONENAND_ECC_2BIT_ALL) {
1133 printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
1134 ", controller error 0x%04x\n", ecc, ctrl);
1147 return ONENAND_BBT_READ_ERROR; 1135 return ONENAND_BBT_READ_ERROR;
1136 }
1148 } else { 1137 } else {
1149 printk(KERN_ERR "onenand_bbt_wait: read timeout!" 1138 printk(KERN_ERR "onenand_bbt_wait: read timeout!"
1150 "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); 1139 "ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt);
1151 return ONENAND_BBT_READ_FATAL_ERROR; 1140 return ONENAND_BBT_READ_FATAL_ERROR;
1152 } 1141 }
1153 1142
1143 /* Initial bad block case: 0x2400 or 0x0400 */
1144 if (ctrl & ONENAND_CTRL_ERROR) {
1145 printk(KERN_DEBUG "onenand_bbt_wait: "
1146 "controller error = 0x%04x\n", ctrl);
1147 return ONENAND_BBT_READ_ERROR;
1148 }
1149
1154 return 0; 1150 return 0;
1155} 1151}
1156 1152
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index c5030f94f04e..2d600a1bf2aa 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * $Id: redboot.c,v 1.21 2006/03/30 18:34:37 bjd Exp $
3 *
4 * Parse RedBoot-style Flash Image System (FIS) tables and 2 * Parse RedBoot-style Flash Image System (FIS) tables and
5 * produce a Linux partition array to match. 3 * produce a Linux partition array to match.
6 */ 4 */
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index c84e45465499..e538c0a72abb 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -3,8 +3,6 @@
3 * 3 *
4 * Copyright (C) 2005 Sean Young <sean@mess.org> 4 * Copyright (C) 2005 Sean Young <sean@mess.org>
5 * 5 *
6 * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
7 *
8 * This type of flash translation layer (FTL) is used by the Embedded BIOS 6 * This type of flash translation layer (FTL) is used by the Embedded BIOS
9 * by General Software. It is known as the Resident Flash Disk (RFD), see: 7 * by General Software. It is known as the Resident Flash Disk (RFD), see:
10 * 8 *
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index c7cc760a1777..af251a5df844 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -814,7 +814,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 } 814 }
815 815
816 /* release skb */ 816 /* release skb */
817 BUG_TRAP(skb); 817 WARN_ON(!skb);
818 dev_kfree_skb(skb); 818 dev_kfree_skb(skb);
819 tx_buf->first_bd = 0; 819 tx_buf->first_bd = 0;
820 tx_buf->skb = NULL; 820 tx_buf->skb = NULL;
@@ -837,9 +837,9 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; 837 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
838 838
839#ifdef BNX2X_STOP_ON_ERROR 839#ifdef BNX2X_STOP_ON_ERROR
840 BUG_TRAP(used >= 0); 840 WARN_ON(used < 0);
841 BUG_TRAP(used <= fp->bp->tx_ring_size); 841 WARN_ON(used > fp->bp->tx_ring_size);
842 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL); 842 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
843#endif 843#endif
844 844
845 return (s16)(fp->bp->tx_ring_size) - used; 845 return (s16)(fp->bp->tx_ring_size) - used;
@@ -4374,7 +4374,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4374 } 4374 }
4375 ring_prod = NEXT_RX_IDX(ring_prod); 4375 ring_prod = NEXT_RX_IDX(ring_prod);
4376 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); 4376 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4377 BUG_TRAP(ring_prod > i); 4377 WARN_ON(ring_prod <= i);
4378 } 4378 }
4379 4379
4380 fp->rx_bd_prod = ring_prod; 4380 fp->rx_bd_prod = ring_prod;
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f9d6b4dca180..096bca54bcf7 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c
index aa9528779044..f094ee00c416 100644
--- a/drivers/net/mlx4/catas.c
+++ b/drivers/net/mlx4/catas.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/cmd.c b/drivers/net/mlx4/cmd.c
index 04d5bc69a6f8..2845a0560b84 100644
--- a/drivers/net/mlx4/cmd.c
+++ b/drivers/net/mlx4/cmd.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 95e87a2f8896..9bb50e3f8974 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -2,7 +2,7 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 7df928d3a3d8..8a8b56135a58 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 57278224ba1e..7e32955da982 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index fbf0e22be122..decbb5c2ad41 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 2a5bef6388fe..baf4bf66062c 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index 6c44edf35847..ab56a2f89b65 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 4a6c4d526f1b..0e7eb1038f9f 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 8e1d24cda1b0..1252a919de2e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index b4b57870ddfd..c83f88ce0736 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 78038499cff5..5337e3ac3e78 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -2,7 +2,7 @@
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index a3c04c5f12c2..62071d9c4a55 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index ee5484c44a18..c49a86044bf7 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * 6 *
7 * This software is available to you under a choice of one of two 7 * This software is available to you under a choice of one of two
diff --git a/drivers/net/mlx4/reset.c b/drivers/net/mlx4/reset.c
index e199715fabd0..3951b884c0fb 100644
--- a/drivers/net/mlx4/reset.c
+++ b/drivers/net/mlx4/reset.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index d23f46d692ef..533eb6db24b3 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 739b3ab7bccc..ddccc074a76a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -581,12 +581,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
581 if (file == ppp->owner) 581 if (file == ppp->owner)
582 ppp_shutdown_interface(ppp); 582 ppp_shutdown_interface(ppp);
583 } 583 }
584 if (atomic_read(&file->f_count) <= 2) { 584 if (atomic_long_read(&file->f_count) <= 2) {
585 ppp_release(NULL, file); 585 ppp_release(NULL, file);
586 err = 0; 586 err = 0;
587 } else 587 } else
588 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%d\n", 588 printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
589 atomic_read(&file->f_count)); 589 atomic_long_read(&file->f_count));
590 unlock_kernel(); 590 unlock_kernel();
591 return err; 591 return err;
592 } 592 }
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
index 4a5ec39f9ca6..0815690ac1e0 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/kvm/Makefile
@@ -6,4 +6,4 @@
6# it under the terms of the GNU General Public License (version 2 only) 6# it under the terms of the GNU General Public License (version 2 only)
7# as published by the Free Software Foundation. 7# as published by the Free Software Foundation.
8 8
9obj-$(CONFIG_VIRTIO) += kvm_virtio.o 9obj-$(CONFIG_S390_GUEST) += kvm_virtio.o
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c3ad89e302bd..cebb25e36e82 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3321,7 +3321,7 @@ int qeth_change_mtu(struct net_device *dev, int new_mtu)
3321 struct qeth_card *card; 3321 struct qeth_card *card;
3322 char dbf_text[15]; 3322 char dbf_text[15];
3323 3323
3324 card = netdev_priv(dev); 3324 card = dev->ml_priv;
3325 3325
3326 QETH_DBF_TEXT(TRACE, 4, "chgmtu"); 3326 QETH_DBF_TEXT(TRACE, 4, "chgmtu");
3327 sprintf(dbf_text, "%8x", new_mtu); 3327 sprintf(dbf_text, "%8x", new_mtu);
@@ -3343,7 +3343,7 @@ struct net_device_stats *qeth_get_stats(struct net_device *dev)
3343{ 3343{
3344 struct qeth_card *card; 3344 struct qeth_card *card;
3345 3345
3346 card = netdev_priv(dev); 3346 card = dev->ml_priv;
3347 3347
3348 QETH_DBF_TEXT(TRACE, 5, "getstat"); 3348 QETH_DBF_TEXT(TRACE, 5, "getstat");
3349 3349
@@ -3395,7 +3395,7 @@ void qeth_tx_timeout(struct net_device *dev)
3395{ 3395{
3396 struct qeth_card *card; 3396 struct qeth_card *card;
3397 3397
3398 card = netdev_priv(dev); 3398 card = dev->ml_priv;
3399 card->stats.tx_errors++; 3399 card->stats.tx_errors++;
3400 qeth_schedule_recovery(card); 3400 qeth_schedule_recovery(card);
3401} 3401}
@@ -3403,7 +3403,7 @@ EXPORT_SYMBOL_GPL(qeth_tx_timeout);
3403 3403
3404int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) 3404int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
3405{ 3405{
3406 struct qeth_card *card = netdev_priv(dev); 3406 struct qeth_card *card = dev->ml_priv;
3407 int rc = 0; 3407 int rc = 0;
3408 3408
3409 switch (regnum) { 3409 switch (regnum) {
@@ -4253,7 +4253,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_stats_count);
4253void qeth_core_get_ethtool_stats(struct net_device *dev, 4253void qeth_core_get_ethtool_stats(struct net_device *dev,
4254 struct ethtool_stats *stats, u64 *data) 4254 struct ethtool_stats *stats, u64 *data)
4255{ 4255{
4256 struct qeth_card *card = netdev_priv(dev); 4256 struct qeth_card *card = dev->ml_priv;
4257 data[0] = card->stats.rx_packets - 4257 data[0] = card->stats.rx_packets -
4258 card->perf_stats.initial_rx_packets; 4258 card->perf_stats.initial_rx_packets;
4259 data[1] = card->perf_stats.bufs_rec; 4259 data[1] = card->perf_stats.bufs_rec;
@@ -4313,7 +4313,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_strings);
4313void qeth_core_get_drvinfo(struct net_device *dev, 4313void qeth_core_get_drvinfo(struct net_device *dev,
4314 struct ethtool_drvinfo *info) 4314 struct ethtool_drvinfo *info)
4315{ 4315{
4316 struct qeth_card *card = netdev_priv(dev); 4316 struct qeth_card *card = dev->ml_priv;
4317 if (card->options.layer2) 4317 if (card->options.layer2)
4318 strcpy(info->driver, "qeth_l2"); 4318 strcpy(info->driver, "qeth_l2");
4319 else 4319 else
@@ -4331,7 +4331,7 @@ EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
4331int qeth_core_ethtool_get_settings(struct net_device *netdev, 4331int qeth_core_ethtool_get_settings(struct net_device *netdev,
4332 struct ethtool_cmd *ecmd) 4332 struct ethtool_cmd *ecmd)
4333{ 4333{
4334 struct qeth_card *card = netdev_priv(netdev); 4334 struct qeth_card *card = netdev->ml_priv;
4335 enum qeth_link_types link_type; 4335 enum qeth_link_types link_type;
4336 4336
4337 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) 4337 if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 3fbc3bdec0c5..a8b069cd9a4c 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -35,7 +35,7 @@ static int qeth_l2_recover(void *);
35 35
36static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 36static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
37{ 37{
38 struct qeth_card *card = netdev_priv(dev); 38 struct qeth_card *card = dev->ml_priv;
39 struct mii_ioctl_data *mii_data; 39 struct mii_ioctl_data *mii_data;
40 int rc = 0; 40 int rc = 0;
41 41
@@ -317,7 +317,7 @@ static void qeth_l2_process_vlans(struct qeth_card *card, int clear)
317 317
318static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 318static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
319{ 319{
320 struct qeth_card *card = netdev_priv(dev); 320 struct qeth_card *card = dev->ml_priv;
321 struct qeth_vlan_vid *id; 321 struct qeth_vlan_vid *id;
322 322
323 QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid); 323 QETH_DBF_TEXT_(TRACE, 4, "aid:%d", vid);
@@ -334,7 +334,7 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
334static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 334static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
335{ 335{
336 struct qeth_vlan_vid *id, *tmpid = NULL; 336 struct qeth_vlan_vid *id, *tmpid = NULL;
337 struct qeth_card *card = netdev_priv(dev); 337 struct qeth_card *card = dev->ml_priv;
338 338
339 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); 339 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
340 spin_lock_bh(&card->vlanlock); 340 spin_lock_bh(&card->vlanlock);
@@ -566,7 +566,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
566static int qeth_l2_set_mac_address(struct net_device *dev, void *p) 566static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
567{ 567{
568 struct sockaddr *addr = p; 568 struct sockaddr *addr = p;
569 struct qeth_card *card = netdev_priv(dev); 569 struct qeth_card *card = dev->ml_priv;
570 int rc = 0; 570 int rc = 0;
571 571
572 QETH_DBF_TEXT(TRACE, 3, "setmac"); 572 QETH_DBF_TEXT(TRACE, 3, "setmac");
@@ -590,7 +590,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
590 590
591static void qeth_l2_set_multicast_list(struct net_device *dev) 591static void qeth_l2_set_multicast_list(struct net_device *dev)
592{ 592{
593 struct qeth_card *card = netdev_priv(dev); 593 struct qeth_card *card = dev->ml_priv;
594 struct dev_mc_list *dm; 594 struct dev_mc_list *dm;
595 595
596 if (card->info.type == QETH_CARD_TYPE_OSN) 596 if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -612,7 +612,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
612 int rc; 612 int rc;
613 struct qeth_hdr *hdr = NULL; 613 struct qeth_hdr *hdr = NULL;
614 int elements = 0; 614 int elements = 0;
615 struct qeth_card *card = netdev_priv(dev); 615 struct qeth_card *card = dev->ml_priv;
616 struct sk_buff *new_skb = skb; 616 struct sk_buff *new_skb = skb;
617 int ipv = qeth_get_ip_version(skb); 617 int ipv = qeth_get_ip_version(skb);
618 int cast_type = qeth_get_cast_type(card, skb); 618 int cast_type = qeth_get_cast_type(card, skb);
@@ -767,7 +767,7 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
767 767
768static int qeth_l2_open(struct net_device *dev) 768static int qeth_l2_open(struct net_device *dev)
769{ 769{
770 struct qeth_card *card = netdev_priv(dev); 770 struct qeth_card *card = dev->ml_priv;
771 771
772 QETH_DBF_TEXT(TRACE, 4, "qethopen"); 772 QETH_DBF_TEXT(TRACE, 4, "qethopen");
773 if (card->state != CARD_STATE_SOFTSETUP) 773 if (card->state != CARD_STATE_SOFTSETUP)
@@ -791,7 +791,7 @@ static int qeth_l2_open(struct net_device *dev)
791 791
792static int qeth_l2_stop(struct net_device *dev) 792static int qeth_l2_stop(struct net_device *dev)
793{ 793{
794 struct qeth_card *card = netdev_priv(dev); 794 struct qeth_card *card = dev->ml_priv;
795 795
796 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 796 QETH_DBF_TEXT(TRACE, 4, "qethstop");
797 netif_tx_disable(dev); 797 netif_tx_disable(dev);
@@ -838,7 +838,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
838 838
839static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data) 839static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
840{ 840{
841 struct qeth_card *card = netdev_priv(dev); 841 struct qeth_card *card = dev->ml_priv;
842 842
843 if (data) { 843 if (data) {
844 if (card->options.large_send == QETH_LARGE_SEND_NO) { 844 if (card->options.large_send == QETH_LARGE_SEND_NO) {
@@ -894,7 +894,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
894 if (!card->dev) 894 if (!card->dev)
895 return -ENODEV; 895 return -ENODEV;
896 896
897 card->dev->priv = card; 897 card->dev->ml_priv = card;
898 card->dev->tx_timeout = &qeth_tx_timeout; 898 card->dev->tx_timeout = &qeth_tx_timeout;
899 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 899 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
900 card->dev->open = qeth_l2_open; 900 card->dev->open = qeth_l2_open;
@@ -1178,7 +1178,7 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len)
1178 QETH_DBF_TEXT(TRACE, 2, "osnsdmc"); 1178 QETH_DBF_TEXT(TRACE, 2, "osnsdmc");
1179 if (!dev) 1179 if (!dev)
1180 return -ENODEV; 1180 return -ENODEV;
1181 card = netdev_priv(dev); 1181 card = dev->ml_priv;
1182 if (!card) 1182 if (!card)
1183 return -ENODEV; 1183 return -ENODEV;
1184 if ((card->state != CARD_STATE_UP) && 1184 if ((card->state != CARD_STATE_UP) &&
@@ -1201,7 +1201,7 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
1201 *dev = qeth_l2_netdev_by_devno(read_dev_no); 1201 *dev = qeth_l2_netdev_by_devno(read_dev_no);
1202 if (*dev == NULL) 1202 if (*dev == NULL)
1203 return -ENODEV; 1203 return -ENODEV;
1204 card = netdev_priv(*dev); 1204 card = (*dev)->ml_priv;
1205 if (!card) 1205 if (!card)
1206 return -ENODEV; 1206 return -ENODEV;
1207 if ((assist_cb == NULL) || (data_cb == NULL)) 1207 if ((assist_cb == NULL) || (data_cb == NULL))
@@ -1219,7 +1219,7 @@ void qeth_osn_deregister(struct net_device *dev)
1219 QETH_DBF_TEXT(TRACE, 2, "osndereg"); 1219 QETH_DBF_TEXT(TRACE, 2, "osndereg");
1220 if (!dev) 1220 if (!dev)
1221 return; 1221 return;
1222 card = netdev_priv(dev); 1222 card = dev->ml_priv;
1223 if (!card) 1223 if (!card)
1224 return; 1224 return;
1225 card->osn_info.assist_cb = NULL; 1225 card->osn_info.assist_cb = NULL;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 38de31b55708..3e1d13857350 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
1813static void qeth_l3_vlan_rx_register(struct net_device *dev, 1813static void qeth_l3_vlan_rx_register(struct net_device *dev,
1814 struct vlan_group *grp) 1814 struct vlan_group *grp)
1815{ 1815{
1816 struct qeth_card *card = netdev_priv(dev); 1816 struct qeth_card *card = dev->ml_priv;
1817 unsigned long flags; 1817 unsigned long flags;
1818 1818
1819 QETH_DBF_TEXT(TRACE, 4, "vlanreg"); 1819 QETH_DBF_TEXT(TRACE, 4, "vlanreg");
@@ -1825,7 +1825,7 @@ static void qeth_l3_vlan_rx_register(struct net_device *dev,
1825static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 1825static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1826{ 1826{
1827 struct net_device *vlandev; 1827 struct net_device *vlandev;
1828 struct qeth_card *card = (struct qeth_card *) dev->priv; 1828 struct qeth_card *card = dev->ml_priv;
1829 struct in_device *in_dev; 1829 struct in_device *in_dev;
1830 1830
1831 if (card->info.type == QETH_CARD_TYPE_IQD) 1831 if (card->info.type == QETH_CARD_TYPE_IQD)
@@ -1851,7 +1851,7 @@ static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1851 1851
1852static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 1852static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1853{ 1853{
1854 struct qeth_card *card = netdev_priv(dev); 1854 struct qeth_card *card = dev->ml_priv;
1855 unsigned long flags; 1855 unsigned long flags;
1856 1856
1857 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid); 1857 QETH_DBF_TEXT_(TRACE, 4, "kid:%d", vid);
@@ -2013,7 +2013,7 @@ static int qeth_l3_verify_vlan_dev(struct net_device *dev,
2013 } 2013 }
2014 } 2014 }
2015 2015
2016 if (rc && !(netdev_priv(vlan_dev_real_dev(dev)) == (void *)card)) 2016 if (rc && !(vlan_dev_real_dev(dev)->ml_priv == (void *)card))
2017 return 0; 2017 return 0;
2018 2018
2019 return rc; 2019 return rc;
@@ -2047,9 +2047,9 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
2047 2047
2048 rc = qeth_l3_verify_dev(dev); 2048 rc = qeth_l3_verify_dev(dev);
2049 if (rc == QETH_REAL_CARD) 2049 if (rc == QETH_REAL_CARD)
2050 card = netdev_priv(dev); 2050 card = dev->ml_priv;
2051 else if (rc == QETH_VLAN_CARD) 2051 else if (rc == QETH_VLAN_CARD)
2052 card = netdev_priv(vlan_dev_real_dev(dev)); 2052 card = vlan_dev_real_dev(dev)->ml_priv;
2053 if (card && card->options.layer2) 2053 if (card && card->options.layer2)
2054 card = NULL; 2054 card = NULL;
2055 QETH_DBF_TEXT_(TRACE, 4, "%d", rc); 2055 QETH_DBF_TEXT_(TRACE, 4, "%d", rc);
@@ -2110,7 +2110,7 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
2110 2110
2111static void qeth_l3_set_multicast_list(struct net_device *dev) 2111static void qeth_l3_set_multicast_list(struct net_device *dev)
2112{ 2112{
2113 struct qeth_card *card = netdev_priv(dev); 2113 struct qeth_card *card = dev->ml_priv;
2114 2114
2115 QETH_DBF_TEXT(TRACE, 3, "setmulti"); 2115 QETH_DBF_TEXT(TRACE, 3, "setmulti");
2116 qeth_l3_delete_mc_addresses(card); 2116 qeth_l3_delete_mc_addresses(card);
@@ -2438,7 +2438,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
2438 2438
2439static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2439static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2440{ 2440{
2441 struct qeth_card *card = netdev_priv(dev); 2441 struct qeth_card *card = dev->ml_priv;
2442 struct qeth_arp_cache_entry arp_entry; 2442 struct qeth_arp_cache_entry arp_entry;
2443 struct mii_ioctl_data *mii_data; 2443 struct mii_ioctl_data *mii_data;
2444 int rc = 0; 2444 int rc = 0;
@@ -2595,7 +2595,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2595 u16 *tag; 2595 u16 *tag;
2596 struct qeth_hdr *hdr = NULL; 2596 struct qeth_hdr *hdr = NULL;
2597 int elements_needed = 0; 2597 int elements_needed = 0;
2598 struct qeth_card *card = netdev_priv(dev); 2598 struct qeth_card *card = dev->ml_priv;
2599 struct sk_buff *new_skb = NULL; 2599 struct sk_buff *new_skb = NULL;
2600 int ipv = qeth_get_ip_version(skb); 2600 int ipv = qeth_get_ip_version(skb);
2601 int cast_type = qeth_get_cast_type(card, skb); 2601 int cast_type = qeth_get_cast_type(card, skb);
@@ -2763,7 +2763,7 @@ tx_drop:
2763 2763
2764static int qeth_l3_open(struct net_device *dev) 2764static int qeth_l3_open(struct net_device *dev)
2765{ 2765{
2766 struct qeth_card *card = netdev_priv(dev); 2766 struct qeth_card *card = dev->ml_priv;
2767 2767
2768 QETH_DBF_TEXT(TRACE, 4, "qethopen"); 2768 QETH_DBF_TEXT(TRACE, 4, "qethopen");
2769 if (card->state != CARD_STATE_SOFTSETUP) 2769 if (card->state != CARD_STATE_SOFTSETUP)
@@ -2780,7 +2780,7 @@ static int qeth_l3_open(struct net_device *dev)
2780 2780
2781static int qeth_l3_stop(struct net_device *dev) 2781static int qeth_l3_stop(struct net_device *dev)
2782{ 2782{
2783 struct qeth_card *card = netdev_priv(dev); 2783 struct qeth_card *card = dev->ml_priv;
2784 2784
2785 QETH_DBF_TEXT(TRACE, 4, "qethstop"); 2785 QETH_DBF_TEXT(TRACE, 4, "qethstop");
2786 netif_tx_disable(dev); 2786 netif_tx_disable(dev);
@@ -2792,14 +2792,14 @@ static int qeth_l3_stop(struct net_device *dev)
2792 2792
2793static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev) 2793static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2794{ 2794{
2795 struct qeth_card *card = netdev_priv(dev); 2795 struct qeth_card *card = dev->ml_priv;
2796 2796
2797 return (card->options.checksum_type == HW_CHECKSUMMING); 2797 return (card->options.checksum_type == HW_CHECKSUMMING);
2798} 2798}
2799 2799
2800static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2800static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2801{ 2801{
2802 struct qeth_card *card = netdev_priv(dev); 2802 struct qeth_card *card = dev->ml_priv;
2803 enum qeth_card_states old_state; 2803 enum qeth_card_states old_state;
2804 enum qeth_checksum_types csum_type; 2804 enum qeth_checksum_types csum_type;
2805 2805
@@ -2825,7 +2825,7 @@ static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2825 2825
2826static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2826static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2827{ 2827{
2828 struct qeth_card *card = netdev_priv(dev); 2828 struct qeth_card *card = dev->ml_priv;
2829 2829
2830 if (data) { 2830 if (data) {
2831 if (card->options.large_send == QETH_LARGE_SEND_NO) { 2831 if (card->options.large_send == QETH_LARGE_SEND_NO) {
@@ -2915,7 +2915,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
2915 return -ENODEV; 2915 return -ENODEV;
2916 2916
2917 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit; 2917 card->dev->hard_start_xmit = qeth_l3_hard_start_xmit;
2918 card->dev->priv = card; 2918 card->dev->ml_priv = card;
2919 card->dev->tx_timeout = &qeth_tx_timeout; 2919 card->dev->tx_timeout = &qeth_tx_timeout;
2920 card->dev->watchdog_timeo = QETH_TX_TIMEOUT; 2920 card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
2921 card->dev->open = qeth_l3_open; 2921 card->dev->open = qeth_l3_open;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7045511f9ad2..b92c19bb6876 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -71,6 +71,10 @@
71 Add support for 9650SE controllers. 71 Add support for 9650SE controllers.
72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails. 72 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
73 2.26.02.010 - Add support for 9690SA controllers. 73 2.26.02.010 - Add support for 9690SA controllers.
74 2.26.02.011 - Increase max AENs drained to 256.
75 Add MSI support and "use_msi" module parameter.
76 Fix bug in twa_get_param() on 4GB+.
77 Use pci_resource_len() for ioremap().
74*/ 78*/
75 79
76#include <linux/module.h> 80#include <linux/module.h>
@@ -95,7 +99,7 @@
95#include "3w-9xxx.h" 99#include "3w-9xxx.h"
96 100
97/* Globals */ 101/* Globals */
98#define TW_DRIVER_VERSION "2.26.02.010" 102#define TW_DRIVER_VERSION "2.26.02.011"
99static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; 103static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
100static unsigned int twa_device_extension_count; 104static unsigned int twa_device_extension_count;
101static int twa_major = -1; 105static int twa_major = -1;
@@ -107,6 +111,10 @@ MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
107MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
108MODULE_VERSION(TW_DRIVER_VERSION); 112MODULE_VERSION(TW_DRIVER_VERSION);
109 113
114static int use_msi = 0;
115module_param(use_msi, int, S_IRUGO);
116MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
117
110/* Function prototypes */ 118/* Function prototypes */
111static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); 119static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
112static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); 120static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
@@ -1038,7 +1046,6 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
1038 TW_Command_Full *full_command_packet; 1046 TW_Command_Full *full_command_packet;
1039 TW_Command *command_packet; 1047 TW_Command *command_packet;
1040 TW_Param_Apache *param; 1048 TW_Param_Apache *param;
1041 unsigned long param_value;
1042 void *retval = NULL; 1049 void *retval = NULL;
1043 1050
1044 /* Setup the command packet */ 1051 /* Setup the command packet */
@@ -1057,9 +1064,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
1057 param->table_id = cpu_to_le16(table_id | 0x8000); 1064 param->table_id = cpu_to_le16(table_id | 0x8000);
1058 param->parameter_id = cpu_to_le16(parameter_id); 1065 param->parameter_id = cpu_to_le16(parameter_id);
1059 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes); 1066 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1060 param_value = tw_dev->generic_buffer_phys[request_id];
1061 1067
1062 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(param_value); 1068 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1063 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE); 1069 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1064 1070
1065 /* Post the command packet to the board */ 1071 /* Post the command packet to the board */
@@ -2000,7 +2006,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2000{ 2006{
2001 struct Scsi_Host *host = NULL; 2007 struct Scsi_Host *host = NULL;
2002 TW_Device_Extension *tw_dev; 2008 TW_Device_Extension *tw_dev;
2003 u32 mem_addr; 2009 unsigned long mem_addr, mem_len;
2004 int retval = -ENODEV; 2010 int retval = -ENODEV;
2005 2011
2006 retval = pci_enable_device(pdev); 2012 retval = pci_enable_device(pdev);
@@ -2045,13 +2051,16 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2045 goto out_free_device_extension; 2051 goto out_free_device_extension;
2046 } 2052 }
2047 2053
2048 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) 2054 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2049 mem_addr = pci_resource_start(pdev, 1); 2055 mem_addr = pci_resource_start(pdev, 1);
2050 else 2056 mem_len = pci_resource_len(pdev, 1);
2057 } else {
2051 mem_addr = pci_resource_start(pdev, 2); 2058 mem_addr = pci_resource_start(pdev, 2);
2059 mem_len = pci_resource_len(pdev, 2);
2060 }
2052 2061
2053 /* Save base address */ 2062 /* Save base address */
2054 tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); 2063 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2055 if (!tw_dev->base_addr) { 2064 if (!tw_dev->base_addr) {
2056 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); 2065 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2057 goto out_release_mem_region; 2066 goto out_release_mem_region;
@@ -2086,7 +2095,7 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2086 2095
2087 pci_set_drvdata(pdev, host); 2096 pci_set_drvdata(pdev, host);
2088 2097
2089 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n", 2098 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2090 host->host_no, mem_addr, pdev->irq); 2099 host->host_no, mem_addr, pdev->irq);
2091 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", 2100 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2092 host->host_no, 2101 host->host_no,
@@ -2097,6 +2106,11 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2097 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, 2106 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2098 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH))); 2107 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2099 2108
2109 /* Try to enable MSI */
2110 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2111 !pci_enable_msi(pdev))
2112 set_bit(TW_USING_MSI, &tw_dev->flags);
2113
2100 /* Now setup the interrupt handler */ 2114 /* Now setup the interrupt handler */
2101 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev); 2115 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2102 if (retval) { 2116 if (retval) {
@@ -2120,6 +2134,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
2120 return 0; 2134 return 0;
2121 2135
2122out_remove_host: 2136out_remove_host:
2137 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2138 pci_disable_msi(pdev);
2123 scsi_remove_host(host); 2139 scsi_remove_host(host);
2124out_iounmap: 2140out_iounmap:
2125 iounmap(tw_dev->base_addr); 2141 iounmap(tw_dev->base_addr);
@@ -2151,6 +2167,10 @@ static void twa_remove(struct pci_dev *pdev)
2151 /* Shutdown the card */ 2167 /* Shutdown the card */
2152 __twa_shutdown(tw_dev); 2168 __twa_shutdown(tw_dev);
2153 2169
2170 /* Disable MSI if enabled */
2171 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2172 pci_disable_msi(pdev);
2173
2154 /* Free IO remapping */ 2174 /* Free IO remapping */
2155 iounmap(tw_dev->base_addr); 2175 iounmap(tw_dev->base_addr);
2156 2176
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d14a9479e389..1729a8785fea 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -4,7 +4,7 @@
4 Written By: Adam Radford <linuxraid@amcc.com> 4 Written By: Adam Radford <linuxraid@amcc.com>
5 Modifications By: Tom Couch <linuxraid@amcc.com> 5 Modifications By: Tom Couch <linuxraid@amcc.com>
6 6
7 Copyright (C) 2004-2007 Applied Micro Circuits Corporation. 7 Copyright (C) 2004-2008 Applied Micro Circuits Corporation.
8 8
9 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by 10 it under the terms of the GNU General Public License as published by
@@ -319,8 +319,8 @@ static twa_message_type twa_error_table[] = {
319 319
320/* Compatibility defines */ 320/* Compatibility defines */
321#define TW_9000_ARCH_ID 0x5 321#define TW_9000_ARCH_ID 0x5
322#define TW_CURRENT_DRIVER_SRL 30 322#define TW_CURRENT_DRIVER_SRL 35
323#define TW_CURRENT_DRIVER_BUILD 80 323#define TW_CURRENT_DRIVER_BUILD 0
324#define TW_CURRENT_DRIVER_BRANCH 0 324#define TW_CURRENT_DRIVER_BRANCH 0
325 325
326/* Phase defines */ 326/* Phase defines */
@@ -352,8 +352,9 @@ static twa_message_type twa_error_table[] = {
352#define TW_MAX_RESET_TRIES 2 352#define TW_MAX_RESET_TRIES 2
353#define TW_MAX_CMDS_PER_LUN 254 353#define TW_MAX_CMDS_PER_LUN 254
354#define TW_MAX_RESPONSE_DRAIN 256 354#define TW_MAX_RESPONSE_DRAIN 256
355#define TW_MAX_AEN_DRAIN 40 355#define TW_MAX_AEN_DRAIN 255
356#define TW_IN_RESET 2 356#define TW_IN_RESET 2
357#define TW_USING_MSI 3
357#define TW_IN_ATTENTION_LOOP 4 358#define TW_IN_ATTENTION_LOOP 4
358#define TW_MAX_SECTORS 256 359#define TW_MAX_SECTORS 256
359#define TW_AEN_WAIT_TIME 1000 360#define TW_AEN_WAIT_TIME 1000
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 26be540d1dd3..c7f06298bd3c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -63,6 +63,7 @@ comment "SCSI support type (disk, tape, CD-ROM)"
63config BLK_DEV_SD 63config BLK_DEV_SD
64 tristate "SCSI disk support" 64 tristate "SCSI disk support"
65 depends on SCSI 65 depends on SCSI
66 select CRC_T10DIF
66 ---help--- 67 ---help---
67 If you want to use SCSI hard disks, Fibre Channel disks, 68 If you want to use SCSI hard disks, Fibre Channel disks,
68 Serial ATA (SATA) or Parallel ATA (PATA) hard disks, 69 Serial ATA (SATA) or Parallel ATA (PATA) hard disks,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index a8149677de23..72fd5043cfa1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -151,6 +151,8 @@ scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o 151scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
152 152
153sd_mod-objs := sd.o 153sd_mod-objs := sd.o
154sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
155
154sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o 156sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
155ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ 157ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
156 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \ 158 := -DCONFIG_NCR53C8XX_PREFETCH -DSCSI_NCR_BIG_ENDIAN \
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 8591585e5cc5..218777bfc143 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2278,7 +2278,7 @@ do { \
2278#define ASC_DBG(lvl, format, arg...) { \ 2278#define ASC_DBG(lvl, format, arg...) { \
2279 if (asc_dbglvl >= (lvl)) \ 2279 if (asc_dbglvl >= (lvl)) \
2280 printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \ 2280 printk(KERN_DEBUG "%s: %s: " format, DRV_NAME, \
2281 __FUNCTION__ , ## arg); \ 2281 __func__ , ## arg); \
2282} 2282}
2283 2283
2284#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \ 2284#define ASC_DBG_PRT_SCSI_HOST(lvl, s) \
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 0899cb61e3dd..b5a868d85eb4 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -288,20 +288,20 @@ static LIST_HEAD(aha152x_host_list);
288#define DO_LOCK(flags) \ 288#define DO_LOCK(flags) \
289 do { \ 289 do { \
290 if(spin_is_locked(&QLOCK)) { \ 290 if(spin_is_locked(&QLOCK)) { \
291 DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 291 DPRINTK(debug_intr, DEBUG_LEAD "(%s:%d) already locked at %s:%d\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
292 } \ 292 } \
293 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 293 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locking\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
294 spin_lock_irqsave(&QLOCK,flags); \ 294 spin_lock_irqsave(&QLOCK,flags); \
295 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 295 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) locked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
296 QLOCKER=__FUNCTION__; \ 296 QLOCKER=__func__; \
297 QLOCKERL=__LINE__; \ 297 QLOCKERL=__LINE__; \
298 } while(0) 298 } while(0)
299 299
300#define DO_UNLOCK(flags) \ 300#define DO_UNLOCK(flags) \
301 do { \ 301 do { \
302 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__, QLOCKER, QLOCKERL); \ 302 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocking (locked at %s:%d)\n", CMDINFO(CURRENT_SC), __func__, __LINE__, QLOCKER, QLOCKERL); \
303 spin_unlock_irqrestore(&QLOCK,flags); \ 303 spin_unlock_irqrestore(&QLOCK,flags); \
304 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __FUNCTION__, __LINE__); \ 304 DPRINTK(debug_locking, DEBUG_LEAD "(%s:%d) unlocked\n", CMDINFO(CURRENT_SC), __func__, __LINE__); \
305 QLOCKER="(not locked)"; \ 305 QLOCKER="(not locked)"; \
306 QLOCKERL=0; \ 306 QLOCKERL=0; \
307 } while(0) 307 } while(0)
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2ef459e9cda1..2863a9d22851 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -39,9 +39,9 @@
39 39
40#ifdef ASD_ENTER_EXIT 40#ifdef ASD_ENTER_EXIT
41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \ 41#define ENTER printk(KERN_NOTICE "%s: ENTER %s\n", ASD_DRIVER_NAME, \
42 __FUNCTION__) 42 __func__)
43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \ 43#define EXIT printk(KERN_NOTICE "%s: --EXIT %s\n", ASD_DRIVER_NAME, \
44 __FUNCTION__) 44 __func__)
45#else 45#else
46#define ENTER 46#define ENTER
47#define EXIT 47#define EXIT
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 83a78222896d..eb9dc3195fdf 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1359,7 +1359,7 @@ int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask)
1359 struct asd_ascb *ascb_list; 1359 struct asd_ascb *ascb_list;
1360 1360
1361 if (!phy_mask) { 1361 if (!phy_mask) {
1362 asd_printk("%s called with phy_mask of 0!?\n", __FUNCTION__); 1362 asd_printk("%s called with phy_mask of 0!?\n", __func__);
1363 return 0; 1363 return 0;
1364 } 1364 }
1365 1365
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 46643319c520..ca55013b6ae5 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -211,7 +211,7 @@ static void asd_form_port(struct asd_ha_struct *asd_ha, struct asd_phy *phy)
211 phy->asd_port = port; 211 phy->asd_port = port;
212 } 212 }
213 ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n", 213 ASD_DPRINTK("%s: updating phy_mask 0x%x for phy%d\n",
214 __FUNCTION__, phy->asd_port->phy_mask, sas_phy->id); 214 __func__, phy->asd_port->phy_mask, sas_phy->id);
215 asd_update_port_links(asd_ha, phy); 215 asd_update_port_links(asd_ha, phy);
216 spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags); 216 spin_unlock_irqrestore(&asd_ha->asd_ports_lock, flags);
217} 217}
@@ -294,7 +294,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
294 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num, 294 struct asd_ascb *cp = asd_ascb_alloc_list(ascb->ha, &num,
295 GFP_ATOMIC); 295 GFP_ATOMIC);
296 if (!cp) { 296 if (!cp) {
297 asd_printk("%s: out of memory\n", __FUNCTION__); 297 asd_printk("%s: out of memory\n", __func__);
298 goto out; 298 goto out;
299 } 299 }
300 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n", 300 ASD_DPRINTK("phy%d: retries:0 performing link reset seq\n",
@@ -446,7 +446,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
446 struct domain_device *failed_dev = NULL; 446 struct domain_device *failed_dev = NULL;
447 447
448 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n", 448 ASD_DPRINTK("%s: REQ_TASK_ABORT, reason=0x%X\n",
449 __FUNCTION__, dl->status_block[3]); 449 __func__, dl->status_block[3]);
450 450
451 /* 451 /*
452 * Find the task that caused the abort and abort it first. 452 * Find the task that caused the abort and abort it first.
@@ -474,7 +474,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
474 474
475 if (!failed_dev) { 475 if (!failed_dev) {
476 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n", 476 ASD_DPRINTK("%s: Can't find task (tc=%d) to abort!\n",
477 __FUNCTION__, tc_abort); 477 __func__, tc_abort);
478 goto out; 478 goto out;
479 } 479 }
480 480
@@ -502,7 +502,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
502 conn_handle = *((u16*)(&dl->status_block[1])); 502 conn_handle = *((u16*)(&dl->status_block[1]));
503 conn_handle = le16_to_cpu(conn_handle); 503 conn_handle = le16_to_cpu(conn_handle);
504 504
505 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __FUNCTION__, 505 ASD_DPRINTK("%s: REQ_DEVICE_RESET, reason=0x%X\n", __func__,
506 dl->status_block[3]); 506 dl->status_block[3]);
507 507
508 /* Find the last pending task for the device... */ 508 /* Find the last pending task for the device... */
@@ -522,7 +522,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
522 522
523 if (!last_dev_task) { 523 if (!last_dev_task) {
524 ASD_DPRINTK("%s: Device reset for idle device %d?\n", 524 ASD_DPRINTK("%s: Device reset for idle device %d?\n",
525 __FUNCTION__, conn_handle); 525 __func__, conn_handle);
526 goto out; 526 goto out;
527 } 527 }
528 528
@@ -549,10 +549,10 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
549 goto out; 549 goto out;
550 } 550 }
551 case SIGNAL_NCQ_ERROR: 551 case SIGNAL_NCQ_ERROR:
552 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __FUNCTION__); 552 ASD_DPRINTK("%s: SIGNAL_NCQ_ERROR\n", __func__);
553 goto out; 553 goto out;
554 case CLEAR_NCQ_ERROR: 554 case CLEAR_NCQ_ERROR:
555 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __FUNCTION__); 555 ASD_DPRINTK("%s: CLEAR_NCQ_ERROR\n", __func__);
556 goto out; 556 goto out;
557 } 557 }
558 558
@@ -560,26 +560,26 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
560 560
561 switch (sb_opcode) { 561 switch (sb_opcode) {
562 case BYTES_DMAED: 562 case BYTES_DMAED:
563 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __FUNCTION__, phy_id); 563 ASD_DPRINTK("%s: phy%d: BYTES_DMAED\n", __func__, phy_id);
564 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id); 564 asd_bytes_dmaed_tasklet(ascb, dl, edb, phy_id);
565 break; 565 break;
566 case PRIMITIVE_RECVD: 566 case PRIMITIVE_RECVD:
567 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __FUNCTION__, 567 ASD_DPRINTK("%s: phy%d: PRIMITIVE_RECVD\n", __func__,
568 phy_id); 568 phy_id);
569 asd_primitive_rcvd_tasklet(ascb, dl, phy_id); 569 asd_primitive_rcvd_tasklet(ascb, dl, phy_id);
570 break; 570 break;
571 case PHY_EVENT: 571 case PHY_EVENT:
572 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __FUNCTION__, phy_id); 572 ASD_DPRINTK("%s: phy%d: PHY_EVENT\n", __func__, phy_id);
573 asd_phy_event_tasklet(ascb, dl); 573 asd_phy_event_tasklet(ascb, dl);
574 break; 574 break;
575 case LINK_RESET_ERROR: 575 case LINK_RESET_ERROR:
576 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __FUNCTION__, 576 ASD_DPRINTK("%s: phy%d: LINK_RESET_ERROR\n", __func__,
577 phy_id); 577 phy_id);
578 asd_link_reset_err_tasklet(ascb, dl, phy_id); 578 asd_link_reset_err_tasklet(ascb, dl, phy_id);
579 break; 579 break;
580 case TIMER_EVENT: 580 case TIMER_EVENT:
581 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n", 581 ASD_DPRINTK("%s: phy%d: TIMER_EVENT, lost dw sync\n",
582 __FUNCTION__, phy_id); 582 __func__, phy_id);
583 asd_turn_led(asd_ha, phy_id, 0); 583 asd_turn_led(asd_ha, phy_id, 0);
584 /* the device is gone */ 584 /* the device is gone */
585 sas_phy_disconnected(sas_phy); 585 sas_phy_disconnected(sas_phy);
@@ -587,7 +587,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
587 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT); 587 sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
588 break; 588 break;
589 default: 589 default:
590 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __FUNCTION__, 590 ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
591 phy_id, sb_opcode); 591 phy_id, sb_opcode);
592 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n", 592 ASD_DPRINTK("edb is 0x%x! dl->opcode is 0x%x\n",
593 edb, dl->opcode); 593 edb, dl->opcode);
@@ -654,7 +654,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
654 654
655 if (status != 0) { 655 if (status != 0) {
656 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n", 656 ASD_DPRINTK("%s: phy%d status block opcode:0x%x\n",
657 __FUNCTION__, phy_id, status); 657 __func__, phy_id, status);
658 goto out; 658 goto out;
659 } 659 }
660 660
@@ -663,7 +663,7 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
663 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id); 663 asd_ha->hw_prof.enabled_phys &= ~(1 << phy_id);
664 asd_turn_led(asd_ha, phy_id, 0); 664 asd_turn_led(asd_ha, phy_id, 0);
665 asd_control_led(asd_ha, phy_id, 0); 665 asd_control_led(asd_ha, phy_id, 0);
666 ASD_DPRINTK("%s: disable phy%d\n", __FUNCTION__, phy_id); 666 ASD_DPRINTK("%s: disable phy%d\n", __func__, phy_id);
667 break; 667 break;
668 668
669 case ENABLE_PHY: 669 case ENABLE_PHY:
@@ -673,40 +673,40 @@ static void control_phy_tasklet_complete(struct asd_ascb *ascb,
673 get_lrate_mode(phy, oob_mode); 673 get_lrate_mode(phy, oob_mode);
674 asd_turn_led(asd_ha, phy_id, 1); 674 asd_turn_led(asd_ha, phy_id, 1);
675 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n", 675 ASD_DPRINTK("%s: phy%d, lrate:0x%x, proto:0x%x\n",
676 __FUNCTION__, phy_id,phy->sas_phy.linkrate, 676 __func__, phy_id,phy->sas_phy.linkrate,
677 phy->sas_phy.iproto); 677 phy->sas_phy.iproto);
678 } else if (oob_status & CURRENT_SPINUP_HOLD) { 678 } else if (oob_status & CURRENT_SPINUP_HOLD) {
679 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 679 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
680 asd_turn_led(asd_ha, phy_id, 1); 680 asd_turn_led(asd_ha, phy_id, 1);
681 ASD_DPRINTK("%s: phy%d, spinup hold\n", __FUNCTION__, 681 ASD_DPRINTK("%s: phy%d, spinup hold\n", __func__,
682 phy_id); 682 phy_id);
683 } else if (oob_status & CURRENT_ERR_MASK) { 683 } else if (oob_status & CURRENT_ERR_MASK) {
684 asd_turn_led(asd_ha, phy_id, 0); 684 asd_turn_led(asd_ha, phy_id, 0);
685 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n", 685 ASD_DPRINTK("%s: phy%d: error: oob status:0x%02x\n",
686 __FUNCTION__, phy_id, oob_status); 686 __func__, phy_id, oob_status);
687 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT 687 } else if (oob_status & (CURRENT_HOT_PLUG_CNCT
688 | CURRENT_DEVICE_PRESENT)) { 688 | CURRENT_DEVICE_PRESENT)) {
689 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 689 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
690 asd_turn_led(asd_ha, phy_id, 1); 690 asd_turn_led(asd_ha, phy_id, 1);
691 ASD_DPRINTK("%s: phy%d: hot plug or device present\n", 691 ASD_DPRINTK("%s: phy%d: hot plug or device present\n",
692 __FUNCTION__, phy_id); 692 __func__, phy_id);
693 } else { 693 } else {
694 asd_ha->hw_prof.enabled_phys |= (1 << phy_id); 694 asd_ha->hw_prof.enabled_phys |= (1 << phy_id);
695 asd_turn_led(asd_ha, phy_id, 0); 695 asd_turn_led(asd_ha, phy_id, 0);
696 ASD_DPRINTK("%s: phy%d: no device present: " 696 ASD_DPRINTK("%s: phy%d: no device present: "
697 "oob_status:0x%x\n", 697 "oob_status:0x%x\n",
698 __FUNCTION__, phy_id, oob_status); 698 __func__, phy_id, oob_status);
699 } 699 }
700 break; 700 break;
701 case RELEASE_SPINUP_HOLD: 701 case RELEASE_SPINUP_HOLD:
702 case PHY_NO_OP: 702 case PHY_NO_OP:
703 case EXECUTE_HARD_RESET: 703 case EXECUTE_HARD_RESET:
704 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __FUNCTION__, 704 ASD_DPRINTK("%s: phy%d: sub_func:0x%x\n", __func__,
705 phy_id, control_phy->sub_func); 705 phy_id, control_phy->sub_func);
706 /* XXX finish */ 706 /* XXX finish */
707 break; 707 break;
708 default: 708 default:
709 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __FUNCTION__, 709 ASD_DPRINTK("%s: phy%d: sub_func:0x%x?\n", __func__,
710 phy_id, control_phy->sub_func); 710 phy_id, control_phy->sub_func);
711 break; 711 break;
712 } 712 }
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 326765c9caf8..75d20f72501f 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -320,7 +320,7 @@ Again:
320 case TC_RESUME: 320 case TC_RESUME:
321 case TC_PARTIAL_SG_LIST: 321 case TC_PARTIAL_SG_LIST:
322 default: 322 default:
323 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __FUNCTION__, opcode); 323 ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode);
324 break; 324 break;
325 } 325 }
326 326
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 633ff40c736a..d4640ef6d44f 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -75,12 +75,12 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
75 struct done_list_struct *dl) 75 struct done_list_struct *dl)
76{ 76{
77 struct tasklet_completion_status *tcs = ascb->uldd_task; 77 struct tasklet_completion_status *tcs = ascb->uldd_task;
78 ASD_DPRINTK("%s: here\n", __FUNCTION__); 78 ASD_DPRINTK("%s: here\n", __func__);
79 if (!del_timer(&ascb->timer)) { 79 if (!del_timer(&ascb->timer)) {
80 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); 80 ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
81 return; 81 return;
82 } 82 }
83 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); 83 ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
84 tcs->dl_opcode = dl->opcode; 84 tcs->dl_opcode = dl->opcode;
85 complete(ascb->completion); 85 complete(ascb->completion);
86 asd_ascb_free(ascb); 86 asd_ascb_free(ascb);
@@ -91,7 +91,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
91 struct asd_ascb *ascb = (void *)data; 91 struct asd_ascb *ascb = (void *)data;
92 struct tasklet_completion_status *tcs = ascb->uldd_task; 92 struct tasklet_completion_status *tcs = ascb->uldd_task;
93 93
94 ASD_DPRINTK("%s: here\n", __FUNCTION__); 94 ASD_DPRINTK("%s: here\n", __func__);
95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED; 95 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
96 complete(ascb->completion); 96 complete(ascb->completion);
97} 97}
@@ -103,7 +103,7 @@ static void asd_clear_nexus_timedout(unsigned long data)
103 DECLARE_COMPLETION_ONSTACK(completion); \ 103 DECLARE_COMPLETION_ONSTACK(completion); \
104 DECLARE_TCS(tcs); \ 104 DECLARE_TCS(tcs); \
105 \ 105 \
106 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ 106 ASD_DPRINTK("%s: PRE\n", __func__); \
107 res = 1; \ 107 res = 1; \
108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ 108 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
109 if (!ascb) \ 109 if (!ascb) \
@@ -115,12 +115,12 @@ static void asd_clear_nexus_timedout(unsigned long data)
115 scb->header.opcode = CLEAR_NEXUS 115 scb->header.opcode = CLEAR_NEXUS
116 116
117#define CLEAR_NEXUS_POST \ 117#define CLEAR_NEXUS_POST \
118 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \ 118 ASD_DPRINTK("%s: POST\n", __func__); \
119 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \ 119 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
120 asd_clear_nexus_timedout); \ 120 asd_clear_nexus_timedout); \
121 if (res) \ 121 if (res) \
122 goto out_err; \ 122 goto out_err; \
123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ 123 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
124 wait_for_completion(&completion); \ 124 wait_for_completion(&completion); \
125 res = tcs.dl_opcode; \ 125 res = tcs.dl_opcode; \
126 if (res == TC_NO_ERROR) \ 126 if (res == TC_NO_ERROR) \
@@ -417,7 +417,7 @@ int asd_abort_task(struct sas_task *task)
417 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 417 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
418 spin_unlock_irqrestore(&task->task_state_lock, flags); 418 spin_unlock_irqrestore(&task->task_state_lock, flags);
419 res = TMF_RESP_FUNC_COMPLETE; 419 res = TMF_RESP_FUNC_COMPLETE;
420 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 420 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
421 goto out_done; 421 goto out_done;
422 } 422 }
423 spin_unlock_irqrestore(&task->task_state_lock, flags); 423 spin_unlock_irqrestore(&task->task_state_lock, flags);
@@ -481,7 +481,7 @@ int asd_abort_task(struct sas_task *task)
481 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 481 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
482 spin_unlock_irqrestore(&task->task_state_lock, flags); 482 spin_unlock_irqrestore(&task->task_state_lock, flags);
483 res = TMF_RESP_FUNC_COMPLETE; 483 res = TMF_RESP_FUNC_COMPLETE;
484 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task); 484 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
485 goto out_done; 485 goto out_done;
486 } 486 }
487 spin_unlock_irqrestore(&task->task_state_lock, flags); 487 spin_unlock_irqrestore(&task->task_state_lock, flags);
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index a715632e19d4..477542602284 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -240,7 +240,7 @@ static void __fas216_checkmagic(FAS216_Info *info, const char *func)
240 panic("scsi memory space corrupted in %s", func); 240 panic("scsi memory space corrupted in %s", func);
241 } 241 }
242} 242}
243#define fas216_checkmagic(info) __fas216_checkmagic((info), __FUNCTION__) 243#define fas216_checkmagic(info) __fas216_checkmagic((info), __func__)
244#else 244#else
245#define fas216_checkmagic(info) 245#define fas216_checkmagic(info)
246#endif 246#endif
@@ -2658,7 +2658,7 @@ int fas216_eh_host_reset(struct scsi_cmnd *SCpnt)
2658 fas216_checkmagic(info); 2658 fas216_checkmagic(info);
2659 2659
2660 printk("scsi%d.%c: %s: resetting host\n", 2660 printk("scsi%d.%c: %s: resetting host\n",
2661 info->host->host_no, '0' + SCpnt->device->id, __FUNCTION__); 2661 info->host->host_no, '0' + SCpnt->device->id, __func__);
2662 2662
2663 /* 2663 /*
2664 * Reset the SCSI chip. 2664 * Reset the SCSI chip.
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index aa2011b64683..3c257fe0893e 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -930,6 +930,7 @@ static int ch_probe(struct device *dev)
930 if (init) 930 if (init)
931 ch_init_elem(ch); 931 ch_init_elem(ch);
932 932
933 dev_set_drvdata(dev, ch);
933 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name); 934 sdev_printk(KERN_INFO, sd, "Attached scsi changer %s\n", ch->name);
934 935
935 return 0; 936 return 0;
diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
index 2adc0f666b68..67070257919f 100644
--- a/drivers/scsi/device_handler/Kconfig
+++ b/drivers/scsi/device_handler/Kconfig
@@ -30,3 +30,11 @@ config SCSI_DH_EMC
30 depends on SCSI_DH 30 depends on SCSI_DH
31 help 31 help
32 If you have a EMC CLARiiON select y. Otherwise, say N. 32 If you have a EMC CLARiiON select y. Otherwise, say N.
33
34config SCSI_DH_ALUA
35 tristate "SPC-3 ALUA Device Handler (EXPERIMENTAL)"
36 depends on SCSI_DH && EXPERIMENTAL
37 help
38 SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
39 Access (ALUA).
40
diff --git a/drivers/scsi/device_handler/Makefile b/drivers/scsi/device_handler/Makefile
index 35272e93b1c8..e1d2ea083e15 100644
--- a/drivers/scsi/device_handler/Makefile
+++ b/drivers/scsi/device_handler/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SCSI_DH) += scsi_dh.o
5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o 5obj-$(CONFIG_SCSI_DH_RDAC) += scsi_dh_rdac.o
6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o 6obj-$(CONFIG_SCSI_DH_HP_SW) += scsi_dh_hp_sw.o
7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o 7obj-$(CONFIG_SCSI_DH_EMC) += scsi_dh_emc.o
8obj-$(CONFIG_SCSI_DH_ALUA) += scsi_dh_alua.o
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index ab6c21cd9689..a518f2eff19a 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -24,8 +24,16 @@
24#include <scsi/scsi_dh.h> 24#include <scsi/scsi_dh.h>
25#include "../scsi_priv.h" 25#include "../scsi_priv.h"
26 26
27struct scsi_dh_devinfo_list {
28 struct list_head node;
29 char vendor[9];
30 char model[17];
31 struct scsi_device_handler *handler;
32};
33
27static DEFINE_SPINLOCK(list_lock); 34static DEFINE_SPINLOCK(list_lock);
28static LIST_HEAD(scsi_dh_list); 35static LIST_HEAD(scsi_dh_list);
36static LIST_HEAD(scsi_dh_dev_list);
29 37
30static struct scsi_device_handler *get_device_handler(const char *name) 38static struct scsi_device_handler *get_device_handler(const char *name)
31{ 39{
@@ -33,7 +41,7 @@ static struct scsi_device_handler *get_device_handler(const char *name)
33 41
34 spin_lock(&list_lock); 42 spin_lock(&list_lock);
35 list_for_each_entry(tmp, &scsi_dh_list, list) { 43 list_for_each_entry(tmp, &scsi_dh_list, list) {
36 if (!strcmp(tmp->name, name)) { 44 if (!strncmp(tmp->name, name, strlen(tmp->name))) {
37 found = tmp; 45 found = tmp;
38 break; 46 break;
39 } 47 }
@@ -42,11 +50,307 @@ static struct scsi_device_handler *get_device_handler(const char *name)
42 return found; 50 return found;
43} 51}
44 52
53
54static struct scsi_device_handler *
55scsi_dh_cache_lookup(struct scsi_device *sdev)
56{
57 struct scsi_dh_devinfo_list *tmp;
58 struct scsi_device_handler *found_dh = NULL;
59
60 spin_lock(&list_lock);
61 list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
62 if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
63 !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
64 found_dh = tmp->handler;
65 break;
66 }
67 }
68 spin_unlock(&list_lock);
69
70 return found_dh;
71}
72
73static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
74 struct scsi_device *sdev)
75{
76 int i, found = 0;
77
78 for(i = 0; scsi_dh->devlist[i].vendor; i++) {
79 if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
80 strlen(scsi_dh->devlist[i].vendor)) &&
81 !strncmp(sdev->model, scsi_dh->devlist[i].model,
82 strlen(scsi_dh->devlist[i].model))) {
83 found = 1;
84 break;
85 }
86 }
87 return found;
88}
89
90/*
91 * device_handler_match - Attach a device handler to a device
92 * @scsi_dh - The device handler to match against or NULL
93 * @sdev - SCSI device to be tested against @scsi_dh
94 *
95 * Tests @sdev against the device handler @scsi_dh or against
96 * all registered device_handler if @scsi_dh == NULL.
97 * Returns the found device handler or NULL if not found.
98 */
99static struct scsi_device_handler *
100device_handler_match(struct scsi_device_handler *scsi_dh,
101 struct scsi_device *sdev)
102{
103 struct scsi_device_handler *found_dh = NULL;
104 struct scsi_dh_devinfo_list *tmp;
105
106 found_dh = scsi_dh_cache_lookup(sdev);
107 if (found_dh)
108 return found_dh;
109
110 if (scsi_dh) {
111 if (scsi_dh_handler_lookup(scsi_dh, sdev))
112 found_dh = scsi_dh;
113 } else {
114 struct scsi_device_handler *tmp_dh;
115
116 spin_lock(&list_lock);
117 list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
118 if (scsi_dh_handler_lookup(tmp_dh, sdev))
119 found_dh = tmp_dh;
120 }
121 spin_unlock(&list_lock);
122 }
123
124 if (found_dh) { /* If device is found, add it to the cache */
125 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
126 if (tmp) {
127 strncpy(tmp->vendor, sdev->vendor, 8);
128 strncpy(tmp->model, sdev->model, 16);
129 tmp->vendor[8] = '\0';
130 tmp->model[16] = '\0';
131 tmp->handler = found_dh;
132 spin_lock(&list_lock);
133 list_add(&tmp->node, &scsi_dh_dev_list);
134 spin_unlock(&list_lock);
135 } else {
136 found_dh = NULL;
137 }
138 }
139
140 return found_dh;
141}
142
143/*
144 * scsi_dh_handler_attach - Attach a device handler to a device
145 * @sdev - SCSI device the device handler should attach to
146 * @scsi_dh - The device handler to attach
147 */
148static int scsi_dh_handler_attach(struct scsi_device *sdev,
149 struct scsi_device_handler *scsi_dh)
150{
151 int err = 0;
152
153 if (sdev->scsi_dh_data) {
154 if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
155 err = -EBUSY;
156 } else if (scsi_dh->attach)
157 err = scsi_dh->attach(sdev);
158
159 return err;
160}
161
162/*
163 * scsi_dh_handler_detach - Detach a device handler from a device
164 * @sdev - SCSI device the device handler should be detached from
165 * @scsi_dh - Device handler to be detached
166 *
167 * Detach from a device handler. If a device handler is specified,
168 * only detach if the currently attached handler matches @scsi_dh.
169 */
170static void scsi_dh_handler_detach(struct scsi_device *sdev,
171 struct scsi_device_handler *scsi_dh)
172{
173 if (!sdev->scsi_dh_data)
174 return;
175
176 if (scsi_dh && scsi_dh != sdev->scsi_dh_data->scsi_dh)
177 return;
178
179 if (!scsi_dh)
180 scsi_dh = sdev->scsi_dh_data->scsi_dh;
181
182 if (scsi_dh && scsi_dh->detach)
183 scsi_dh->detach(sdev);
184}
185
186/*
187 * Functions for sysfs attribute 'dh_state'
188 */
189static ssize_t
190store_dh_state(struct device *dev, struct device_attribute *attr,
191 const char *buf, size_t count)
192{
193 struct scsi_device *sdev = to_scsi_device(dev);
194 struct scsi_device_handler *scsi_dh;
195 int err = -EINVAL;
196
197 if (!sdev->scsi_dh_data) {
198 /*
199 * Attach to a device handler
200 */
201 if (!(scsi_dh = get_device_handler(buf)))
202 return err;
203 err = scsi_dh_handler_attach(sdev, scsi_dh);
204 } else {
205 scsi_dh = sdev->scsi_dh_data->scsi_dh;
206 if (!strncmp(buf, "detach", 6)) {
207 /*
208 * Detach from a device handler
209 */
210 scsi_dh_handler_detach(sdev, scsi_dh);
211 err = 0;
212 } else if (!strncmp(buf, "activate", 8)) {
213 /*
214 * Activate a device handler
215 */
216 if (scsi_dh->activate)
217 err = scsi_dh->activate(sdev);
218 else
219 err = 0;
220 }
221 }
222
223 return err<0?err:count;
224}
225
226static ssize_t
227show_dh_state(struct device *dev, struct device_attribute *attr, char *buf)
228{
229 struct scsi_device *sdev = to_scsi_device(dev);
230
231 if (!sdev->scsi_dh_data)
232 return snprintf(buf, 20, "detached\n");
233
234 return snprintf(buf, 20, "%s\n", sdev->scsi_dh_data->scsi_dh->name);
235}
236
237static struct device_attribute scsi_dh_state_attr =
238 __ATTR(dh_state, S_IRUGO | S_IWUSR, show_dh_state,
239 store_dh_state);
240
241/*
242 * scsi_dh_sysfs_attr_add - Callback for scsi_init_dh
243 */
244static int scsi_dh_sysfs_attr_add(struct device *dev, void *data)
245{
246 struct scsi_device *sdev;
247 int err;
248
249 if (!scsi_is_sdev_device(dev))
250 return 0;
251
252 sdev = to_scsi_device(dev);
253
254 err = device_create_file(&sdev->sdev_gendev,
255 &scsi_dh_state_attr);
256
257 return 0;
258}
259
260/*
261 * scsi_dh_sysfs_attr_remove - Callback for scsi_exit_dh
262 */
263static int scsi_dh_sysfs_attr_remove(struct device *dev, void *data)
264{
265 struct scsi_device *sdev;
266
267 if (!scsi_is_sdev_device(dev))
268 return 0;
269
270 sdev = to_scsi_device(dev);
271
272 device_remove_file(&sdev->sdev_gendev,
273 &scsi_dh_state_attr);
274
275 return 0;
276}
277
278/*
279 * scsi_dh_notifier - notifier chain callback
280 */
281static int scsi_dh_notifier(struct notifier_block *nb,
282 unsigned long action, void *data)
283{
284 struct device *dev = data;
285 struct scsi_device *sdev;
286 int err = 0;
287 struct scsi_device_handler *devinfo = NULL;
288
289 if (!scsi_is_sdev_device(dev))
290 return 0;
291
292 sdev = to_scsi_device(dev);
293
294 if (action == BUS_NOTIFY_ADD_DEVICE) {
295 devinfo = device_handler_match(NULL, sdev);
296 if (!devinfo)
297 goto out;
298
299 err = scsi_dh_handler_attach(sdev, devinfo);
300 if (!err)
301 err = device_create_file(dev, &scsi_dh_state_attr);
302 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
303 device_remove_file(dev, &scsi_dh_state_attr);
304 scsi_dh_handler_detach(sdev, NULL);
305 }
306out:
307 return err;
308}
309
310/*
311 * scsi_dh_notifier_add - Callback for scsi_register_device_handler
312 */
45static int scsi_dh_notifier_add(struct device *dev, void *data) 313static int scsi_dh_notifier_add(struct device *dev, void *data)
46{ 314{
47 struct scsi_device_handler *scsi_dh = data; 315 struct scsi_device_handler *scsi_dh = data;
316 struct scsi_device *sdev;
317
318 if (!scsi_is_sdev_device(dev))
319 return 0;
320
321 if (!get_device(dev))
322 return 0;
323
324 sdev = to_scsi_device(dev);
325
326 if (device_handler_match(scsi_dh, sdev))
327 scsi_dh_handler_attach(sdev, scsi_dh);
328
329 put_device(dev);
330
331 return 0;
332}
333
334/*
335 * scsi_dh_notifier_remove - Callback for scsi_unregister_device_handler
336 */
337static int scsi_dh_notifier_remove(struct device *dev, void *data)
338{
339 struct scsi_device_handler *scsi_dh = data;
340 struct scsi_device *sdev;
341
342 if (!scsi_is_sdev_device(dev))
343 return 0;
344
345 if (!get_device(dev))
346 return 0;
347
348 sdev = to_scsi_device(dev);
349
350 scsi_dh_handler_detach(sdev, scsi_dh);
351
352 put_device(dev);
48 353
49 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_ADD_DEVICE, dev);
50 return 0; 354 return 0;
51} 355}
52 356
@@ -59,33 +363,19 @@ static int scsi_dh_notifier_add(struct device *dev, void *data)
59 */ 363 */
60int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) 364int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
61{ 365{
62 int ret = -EBUSY; 366 if (get_device_handler(scsi_dh->name))
63 struct scsi_device_handler *tmp; 367 return -EBUSY;
64 368
65 tmp = get_device_handler(scsi_dh->name);
66 if (tmp)
67 goto done;
68
69 ret = bus_register_notifier(&scsi_bus_type, &scsi_dh->nb);
70
71 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
72 spin_lock(&list_lock); 369 spin_lock(&list_lock);
73 list_add(&scsi_dh->list, &scsi_dh_list); 370 list_add(&scsi_dh->list, &scsi_dh_list);
74 spin_unlock(&list_lock); 371 spin_unlock(&list_lock);
372 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
373 printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
75 374
76done: 375 return SCSI_DH_OK;
77 return ret;
78} 376}
79EXPORT_SYMBOL_GPL(scsi_register_device_handler); 377EXPORT_SYMBOL_GPL(scsi_register_device_handler);
80 378
81static int scsi_dh_notifier_remove(struct device *dev, void *data)
82{
83 struct scsi_device_handler *scsi_dh = data;
84
85 scsi_dh->nb.notifier_call(&scsi_dh->nb, BUS_NOTIFY_DEL_DEVICE, dev);
86 return 0;
87}
88
89/* 379/*
90 * scsi_unregister_device_handler - register a device handler personality 380 * scsi_unregister_device_handler - register a device handler personality
91 * module. 381 * module.
@@ -95,23 +385,26 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
95 */ 385 */
96int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) 386int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
97{ 387{
98 int ret = -ENODEV; 388 struct scsi_dh_devinfo_list *tmp, *pos;
99 struct scsi_device_handler *tmp;
100
101 tmp = get_device_handler(scsi_dh->name);
102 if (!tmp)
103 goto done;
104 389
105 ret = bus_unregister_notifier(&scsi_bus_type, &scsi_dh->nb); 390 if (!get_device_handler(scsi_dh->name))
391 return -ENODEV;
106 392
107 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 393 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
108 scsi_dh_notifier_remove); 394 scsi_dh_notifier_remove);
395
109 spin_lock(&list_lock); 396 spin_lock(&list_lock);
110 list_del(&scsi_dh->list); 397 list_del(&scsi_dh->list);
398 list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
399 if (pos->handler == scsi_dh) {
400 list_del(&pos->node);
401 kfree(pos);
402 }
403 }
111 spin_unlock(&list_lock); 404 spin_unlock(&list_lock);
405 printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
112 406
113done: 407 return SCSI_DH_OK;
114 return ret;
115} 408}
116EXPORT_SYMBOL_GPL(scsi_unregister_device_handler); 409EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
117 410
@@ -157,6 +450,97 @@ int scsi_dh_handler_exist(const char *name)
157} 450}
158EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); 451EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
159 452
453/*
454 * scsi_dh_handler_attach - Attach device handler
455 * @sdev - sdev the handler should be attached to
456 * @name - name of the handler to attach
457 */
458int scsi_dh_attach(struct request_queue *q, const char *name)
459{
460 unsigned long flags;
461 struct scsi_device *sdev;
462 struct scsi_device_handler *scsi_dh;
463 int err = 0;
464
465 scsi_dh = get_device_handler(name);
466 if (!scsi_dh)
467 return -EINVAL;
468
469 spin_lock_irqsave(q->queue_lock, flags);
470 sdev = q->queuedata;
471 if (!sdev || !get_device(&sdev->sdev_gendev))
472 err = -ENODEV;
473 spin_unlock_irqrestore(q->queue_lock, flags);
474
475 if (!err) {
476 err = scsi_dh_handler_attach(sdev, scsi_dh);
477
478 put_device(&sdev->sdev_gendev);
479 }
480 return err;
481}
482EXPORT_SYMBOL_GPL(scsi_dh_attach);
483
484/*
485 * scsi_dh_handler_detach - Detach device handler
486 * @sdev - sdev the handler should be detached from
487 *
488 * This function will detach the device handler only
489 * if the sdev is not part of the internal list, ie
490 * if it has been attached manually.
491 */
492void scsi_dh_detach(struct request_queue *q)
493{
494 unsigned long flags;
495 struct scsi_device *sdev;
496 struct scsi_device_handler *scsi_dh = NULL;
497
498 spin_lock_irqsave(q->queue_lock, flags);
499 sdev = q->queuedata;
500 if (!sdev || !get_device(&sdev->sdev_gendev))
501 sdev = NULL;
502 spin_unlock_irqrestore(q->queue_lock, flags);
503
504 if (!sdev)
505 return;
506
507 if (sdev->scsi_dh_data) {
508 /* if sdev is not on internal list, detach */
509 scsi_dh = sdev->scsi_dh_data->scsi_dh;
510 if (!device_handler_match(scsi_dh, sdev))
511 scsi_dh_handler_detach(sdev, scsi_dh);
512 }
513 put_device(&sdev->sdev_gendev);
514}
515EXPORT_SYMBOL_GPL(scsi_dh_detach);
516
517static struct notifier_block scsi_dh_nb = {
518 .notifier_call = scsi_dh_notifier
519};
520
521static int __init scsi_dh_init(void)
522{
523 int r;
524
525 r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
526
527 if (!r)
528 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
529 scsi_dh_sysfs_attr_add);
530
531 return r;
532}
533
534static void __exit scsi_dh_exit(void)
535{
536 bus_for_each_dev(&scsi_bus_type, NULL, NULL,
537 scsi_dh_sysfs_attr_remove);
538 bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
539}
540
541module_init(scsi_dh_init);
542module_exit(scsi_dh_exit);
543
160MODULE_DESCRIPTION("SCSI device handler"); 544MODULE_DESCRIPTION("SCSI device handler");
161MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>"); 545MODULE_AUTHOR("Chandra Seetharaman <sekharan@us.ibm.com>");
162MODULE_LICENSE("GPL"); 546MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
new file mode 100644
index 000000000000..fcdd73f25625
--- /dev/null
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -0,0 +1,802 @@
1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler
3 *
4 * Copyright (C) 2007, 2008 Hannes Reinecke, SUSE Linux Products GmbH.
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
22#include <scsi/scsi.h>
23#include <scsi/scsi_eh.h>
24#include <scsi/scsi_dh.h>
25
26#define ALUA_DH_NAME "alua"
27#define ALUA_DH_VER "1.2"
28
29#define TPGS_STATE_OPTIMIZED 0x0
30#define TPGS_STATE_NONOPTIMIZED 0x1
31#define TPGS_STATE_STANDBY 0x2
32#define TPGS_STATE_UNAVAILABLE 0x3
33#define TPGS_STATE_OFFLINE 0xe
34#define TPGS_STATE_TRANSITIONING 0xf
35
36#define TPGS_SUPPORT_NONE 0x00
37#define TPGS_SUPPORT_OPTIMIZED 0x01
38#define TPGS_SUPPORT_NONOPTIMIZED 0x02
39#define TPGS_SUPPORT_STANDBY 0x04
40#define TPGS_SUPPORT_UNAVAILABLE 0x08
41#define TPGS_SUPPORT_OFFLINE 0x40
42#define TPGS_SUPPORT_TRANSITION 0x80
43
44#define TPGS_MODE_UNINITIALIZED -1
45#define TPGS_MODE_NONE 0x0
46#define TPGS_MODE_IMPLICIT 0x1
47#define TPGS_MODE_EXPLICIT 0x2
48
49#define ALUA_INQUIRY_SIZE 36
50#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
51#define ALUA_FAILOVER_RETRIES 5
52
53struct alua_dh_data {
54 int group_id;
55 int rel_port;
56 int tpgs;
57 int state;
58 unsigned char inq[ALUA_INQUIRY_SIZE];
59 unsigned char *buff;
60 int bufflen;
61 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
62 int senselen;
63};
64
65#define ALUA_POLICY_SWITCH_CURRENT 0
66#define ALUA_POLICY_SWITCH_ALL 1
67
68static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev)
69{
70 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
71 BUG_ON(scsi_dh_data == NULL);
72 return ((struct alua_dh_data *) scsi_dh_data->buf);
73}
74
75static int realloc_buffer(struct alua_dh_data *h, unsigned len)
76{
77 if (h->buff && h->buff != h->inq)
78 kfree(h->buff);
79
80 h->buff = kmalloc(len, GFP_NOIO);
81 if (!h->buff) {
82 h->buff = h->inq;
83 h->bufflen = ALUA_INQUIRY_SIZE;
84 return 1;
85 }
86 h->bufflen = len;
87 return 0;
88}
89
90static struct request *get_alua_req(struct scsi_device *sdev,
91 void *buffer, unsigned buflen, int rw)
92{
93 struct request *rq;
94 struct request_queue *q = sdev->request_queue;
95
96 rq = blk_get_request(q, rw, GFP_NOIO);
97
98 if (!rq) {
99 sdev_printk(KERN_INFO, sdev,
100 "%s: blk_get_request failed\n", __func__);
101 return NULL;
102 }
103
104 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
105 blk_put_request(rq);
106 sdev_printk(KERN_INFO, sdev,
107 "%s: blk_rq_map_kern failed\n", __func__);
108 return NULL;
109 }
110
111 rq->cmd_type = REQ_TYPE_BLOCK_PC;
112 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
113 rq->retries = ALUA_FAILOVER_RETRIES;
114 rq->timeout = ALUA_FAILOVER_TIMEOUT;
115
116 return rq;
117}
118
119/*
120 * submit_std_inquiry - Issue a standard INQUIRY command
121 * @sdev: sdev the command should be send to
122 */
123static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
124{
125 struct request *rq;
126 int err = SCSI_DH_RES_TEMP_UNAVAIL;
127
128 rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
129 if (!rq)
130 goto done;
131
132 /* Prepare the command. */
133 rq->cmd[0] = INQUIRY;
134 rq->cmd[1] = 0;
135 rq->cmd[2] = 0;
136 rq->cmd[4] = ALUA_INQUIRY_SIZE;
137 rq->cmd_len = COMMAND_SIZE(INQUIRY);
138
139 rq->sense = h->sense;
140 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
141 rq->sense_len = h->senselen = 0;
142
143 err = blk_execute_rq(rq->q, NULL, rq, 1);
144 if (err == -EIO) {
145 sdev_printk(KERN_INFO, sdev,
146 "%s: std inquiry failed with %x\n",
147 ALUA_DH_NAME, rq->errors);
148 h->senselen = rq->sense_len;
149 err = SCSI_DH_IO;
150 }
151 blk_put_request(rq);
152done:
153 return err;
154}
155
156/*
157 * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
158 * @sdev: sdev the command should be sent to
159 */
160static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
161{
162 struct request *rq;
163 int err = SCSI_DH_RES_TEMP_UNAVAIL;
164
165 rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
166 if (!rq)
167 goto done;
168
169 /* Prepare the command. */
170 rq->cmd[0] = INQUIRY;
171 rq->cmd[1] = 1;
172 rq->cmd[2] = 0x83;
173 rq->cmd[4] = h->bufflen;
174 rq->cmd_len = COMMAND_SIZE(INQUIRY);
175
176 rq->sense = h->sense;
177 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
178 rq->sense_len = h->senselen = 0;
179
180 err = blk_execute_rq(rq->q, NULL, rq, 1);
181 if (err == -EIO) {
182 sdev_printk(KERN_INFO, sdev,
183 "%s: evpd inquiry failed with %x\n",
184 ALUA_DH_NAME, rq->errors);
185 h->senselen = rq->sense_len;
186 err = SCSI_DH_IO;
187 }
188 blk_put_request(rq);
189done:
190 return err;
191}
192
193/*
194 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
195 * @sdev: sdev the command should be sent to
196 */
197static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
198{
199 struct request *rq;
200 int err = SCSI_DH_RES_TEMP_UNAVAIL;
201
202 rq = get_alua_req(sdev, h->buff, h->bufflen, READ);
203 if (!rq)
204 goto done;
205
206 /* Prepare the command. */
207 rq->cmd[0] = MAINTENANCE_IN;
208 rq->cmd[1] = MI_REPORT_TARGET_PGS;
209 rq->cmd[6] = (h->bufflen >> 24) & 0xff;
210 rq->cmd[7] = (h->bufflen >> 16) & 0xff;
211 rq->cmd[8] = (h->bufflen >> 8) & 0xff;
212 rq->cmd[9] = h->bufflen & 0xff;
213 rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN);
214
215 rq->sense = h->sense;
216 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
217 rq->sense_len = h->senselen = 0;
218
219 err = blk_execute_rq(rq->q, NULL, rq, 1);
220 if (err == -EIO) {
221 sdev_printk(KERN_INFO, sdev,
222 "%s: rtpg failed with %x\n",
223 ALUA_DH_NAME, rq->errors);
224 h->senselen = rq->sense_len;
225 err = SCSI_DH_IO;
226 }
227 blk_put_request(rq);
228done:
229 return err;
230}
231
232/*
233 * submit_stpg - Issue a SET TARGET GROUP STATES command
234 * @sdev: sdev the command should be sent to
235 *
236 * Currently we're only setting the current target port group state
237 * to 'active/optimized' and let the array firmware figure out
238 * the states of the remaining groups.
239 */
240static unsigned submit_stpg(struct scsi_device *sdev, struct alua_dh_data *h)
241{
242 struct request *rq;
243 int err = SCSI_DH_RES_TEMP_UNAVAIL;
244 int stpg_len = 8;
245
246 /* Prepare the data buffer */
247 memset(h->buff, 0, stpg_len);
248 h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
249 h->buff[6] = (h->group_id >> 8) & 0x0f;
250 h->buff[7] = h->group_id & 0x0f;
251
252 rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
253 if (!rq)
254 goto done;
255
256 /* Prepare the command. */
257 rq->cmd[0] = MAINTENANCE_OUT;
258 rq->cmd[1] = MO_SET_TARGET_PGS;
259 rq->cmd[6] = (stpg_len >> 24) & 0xff;
260 rq->cmd[7] = (stpg_len >> 16) & 0xff;
261 rq->cmd[8] = (stpg_len >> 8) & 0xff;
262 rq->cmd[9] = stpg_len & 0xff;
263 rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);
264
265 rq->sense = h->sense;
266 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
267 rq->sense_len = h->senselen = 0;
268
269 err = blk_execute_rq(rq->q, NULL, rq, 1);
270 if (err == -EIO) {
271 sdev_printk(KERN_INFO, sdev,
272 "%s: stpg failed with %x\n",
273 ALUA_DH_NAME, rq->errors);
274 h->senselen = rq->sense_len;
275 err = SCSI_DH_IO;
276 }
277 blk_put_request(rq);
278done:
279 return err;
280}
281
282/*
283 * alua_std_inquiry - Evaluate standard INQUIRY command
284 * @sdev: device to be checked
285 *
286 * Just extract the TPGS setting to find out if ALUA
287 * is supported.
288 */
289static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
290{
291 int err;
292
293 err = submit_std_inquiry(sdev, h);
294
295 if (err != SCSI_DH_OK)
296 return err;
297
298 /* Check TPGS setting */
299 h->tpgs = (h->inq[5] >> 4) & 0x3;
300 switch (h->tpgs) {
301 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
302 sdev_printk(KERN_INFO, sdev,
303 "%s: supports implicit and explicit TPGS\n",
304 ALUA_DH_NAME);
305 break;
306 case TPGS_MODE_EXPLICIT:
307 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
308 ALUA_DH_NAME);
309 break;
310 case TPGS_MODE_IMPLICIT:
311 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
312 ALUA_DH_NAME);
313 break;
314 default:
315 h->tpgs = TPGS_MODE_NONE;
316 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
317 ALUA_DH_NAME);
318 err = SCSI_DH_DEV_UNSUPP;
319 break;
320 }
321
322 return err;
323}
324
325/*
326 * alua_vpd_inquiry - Evaluate INQUIRY vpd page 0x83
327 * @sdev: device to be checked
328 *
329 * Extract the relative target port and the target port group
330 * descriptor from the list of identificators.
331 */
332static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
333{
334 int len;
335 unsigned err;
336 unsigned char *d;
337
338 retry:
339 err = submit_vpd_inquiry(sdev, h);
340
341 if (err != SCSI_DH_OK)
342 return err;
343
344 /* Check if vpd page exceeds initial buffer */
345 len = (h->buff[2] << 8) + h->buff[3] + 4;
346 if (len > h->bufflen) {
347 /* Resubmit with the correct length */
348 if (realloc_buffer(h, len)) {
349 sdev_printk(KERN_WARNING, sdev,
350 "%s: kmalloc buffer failed\n",
351 ALUA_DH_NAME);
352 /* Temporary failure, bypass */
353 return SCSI_DH_DEV_TEMP_BUSY;
354 }
355 goto retry;
356 }
357
358 /*
359 * Now look for the correct descriptor.
360 */
361 d = h->buff + 4;
362 while (d < h->buff + len) {
363 switch (d[1] & 0xf) {
364 case 0x4:
365 /* Relative target port */
366 h->rel_port = (d[6] << 8) + d[7];
367 break;
368 case 0x5:
369 /* Target port group */
370 h->group_id = (d[6] << 8) + d[7];
371 break;
372 default:
373 break;
374 }
375 d += d[3] + 4;
376 }
377
378 if (h->group_id == -1) {
379 /*
380 * Internal error; TPGS supported but required
381 * VPD identification descriptors not present.
382 * Disable ALUA support
383 */
384 sdev_printk(KERN_INFO, sdev,
385 "%s: No target port descriptors found\n",
386 ALUA_DH_NAME);
387 h->state = TPGS_STATE_OPTIMIZED;
388 h->tpgs = TPGS_MODE_NONE;
389 err = SCSI_DH_DEV_UNSUPP;
390 } else {
391 sdev_printk(KERN_INFO, sdev,
392 "%s: port group %02x rel port %02x\n",
393 ALUA_DH_NAME, h->group_id, h->rel_port);
394 }
395
396 return err;
397}
398
399static char print_alua_state(int state)
400{
401 switch (state) {
402 case TPGS_STATE_OPTIMIZED:
403 return 'A';
404 case TPGS_STATE_NONOPTIMIZED:
405 return 'N';
406 case TPGS_STATE_STANDBY:
407 return 'S';
408 case TPGS_STATE_UNAVAILABLE:
409 return 'U';
410 case TPGS_STATE_OFFLINE:
411 return 'O';
412 case TPGS_STATE_TRANSITIONING:
413 return 'T';
414 default:
415 return 'X';
416 }
417}
418
419static int alua_check_sense(struct scsi_device *sdev,
420 struct scsi_sense_hdr *sense_hdr)
421{
422 switch (sense_hdr->sense_key) {
423 case NOT_READY:
424 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a)
425 /*
426 * LUN Not Accessible - ALUA state transition
427 */
428 return NEEDS_RETRY;
429 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b)
430 /*
431 * LUN Not Accessible -- Target port in standby state
432 */
433 return SUCCESS;
434 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c)
435 /*
436 * LUN Not Accessible -- Target port in unavailable state
437 */
438 return SUCCESS;
439 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12)
440 /*
441 * LUN Not Ready -- Offline
442 */
443 return SUCCESS;
444 break;
445 case UNIT_ATTENTION:
446 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
447 /*
448 * Power On, Reset, or Bus Device Reset, just retry.
449 */
450 return NEEDS_RETRY;
451 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
452 /*
453 * ALUA state changed
454 */
455 return NEEDS_RETRY;
456 }
457 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
458 /*
459 * Implicit ALUA state transition failed
460 */
461 return NEEDS_RETRY;
462 }
463 break;
464 }
465
466 return SCSI_RETURN_NOT_HANDLED;
467}
468
469/*
470 * alua_stpg - Evaluate SET TARGET GROUP STATES
471 * @sdev: the device to be evaluated
472 * @state: the new target group state
473 *
474 * Send a SET TARGET GROUP STATES command to the device.
475 * We only have to test here if we should resubmit the command;
476 * any other error is assumed as a failure.
477 */
478static int alua_stpg(struct scsi_device *sdev, int state,
479 struct alua_dh_data *h)
480{
481 struct scsi_sense_hdr sense_hdr;
482 unsigned err;
483 int retry = ALUA_FAILOVER_RETRIES;
484
485 retry:
486 err = submit_stpg(sdev, h);
487 if (err == SCSI_DH_IO && h->senselen > 0) {
488 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
489 &sense_hdr);
490 if (!err)
491 return SCSI_DH_IO;
492 err = alua_check_sense(sdev, &sense_hdr);
493 if (retry > 0 && err == NEEDS_RETRY) {
494 retry--;
495 goto retry;
496 }
497 sdev_printk(KERN_INFO, sdev,
498 "%s: stpg sense code: %02x/%02x/%02x\n",
499 ALUA_DH_NAME, sense_hdr.sense_key,
500 sense_hdr.asc, sense_hdr.ascq);
501 err = SCSI_DH_IO;
502 }
503 if (err == SCSI_DH_OK) {
504 h->state = state;
505 sdev_printk(KERN_INFO, sdev,
506 "%s: port group %02x switched to state %c\n",
507 ALUA_DH_NAME, h->group_id,
508 print_alua_state(h->state) );
509 }
510 return err;
511}
512
513/*
514 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
515 * @sdev: the device to be evaluated.
516 *
517 * Evaluate the Target Port Group State.
518 * Returns SCSI_DH_DEV_OFFLINED if the path is
519 * found to be unuseable.
520 */
521static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
522{
523 struct scsi_sense_hdr sense_hdr;
524 int len, k, off, valid_states = 0;
525 char *ucp;
526 unsigned err;
527
528 retry:
529 err = submit_rtpg(sdev, h);
530
531 if (err == SCSI_DH_IO && h->senselen > 0) {
532 err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
533 &sense_hdr);
534 if (!err)
535 return SCSI_DH_IO;
536
537 err = alua_check_sense(sdev, &sense_hdr);
538 if (err == NEEDS_RETRY)
539 goto retry;
540 sdev_printk(KERN_INFO, sdev,
541 "%s: rtpg sense code %02x/%02x/%02x\n",
542 ALUA_DH_NAME, sense_hdr.sense_key,
543 sense_hdr.asc, sense_hdr.ascq);
544 err = SCSI_DH_IO;
545 }
546 if (err != SCSI_DH_OK)
547 return err;
548
549 len = (h->buff[0] << 24) + (h->buff[1] << 16) +
550 (h->buff[2] << 8) + h->buff[3] + 4;
551
552 if (len > h->bufflen) {
553 /* Resubmit with the correct length */
554 if (realloc_buffer(h, len)) {
555 sdev_printk(KERN_WARNING, sdev,
556 "%s: kmalloc buffer failed\n",__func__);
557 /* Temporary failure, bypass */
558 return SCSI_DH_DEV_TEMP_BUSY;
559 }
560 goto retry;
561 }
562
563 for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
564 if (h->group_id == (ucp[2] << 8) + ucp[3]) {
565 h->state = ucp[0] & 0x0f;
566 valid_states = ucp[1];
567 }
568 off = 8 + (ucp[7] * 4);
569 }
570
571 sdev_printk(KERN_INFO, sdev,
572 "%s: port group %02x state %c supports %c%c%c%c%c%c\n",
573 ALUA_DH_NAME, h->group_id, print_alua_state(h->state),
574 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
575 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
576 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
577 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
578 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
579 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
580
581 if (h->tpgs & TPGS_MODE_EXPLICIT) {
582 switch (h->state) {
583 case TPGS_STATE_TRANSITIONING:
584 /* State transition, retry */
585 goto retry;
586 break;
587 case TPGS_STATE_OFFLINE:
588 /* Path is offline, fail */
589 err = SCSI_DH_DEV_OFFLINED;
590 break;
591 default:
592 break;
593 }
594 } else {
595 /* Only Implicit ALUA support */
596 if (h->state == TPGS_STATE_OPTIMIZED ||
597 h->state == TPGS_STATE_NONOPTIMIZED ||
598 h->state == TPGS_STATE_STANDBY)
599 /* Useable path if active */
600 err = SCSI_DH_OK;
601 else
602 /* Path unuseable for unavailable/offline */
603 err = SCSI_DH_DEV_OFFLINED;
604 }
605 return err;
606}
607
608/*
609 * alua_initialize - Initialize ALUA state
610 * @sdev: the device to be initialized
611 *
612 * For the prep_fn to work correctly we have
613 * to initialize the ALUA state for the device.
614 */
615static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
616{
617 int err;
618
619 err = alua_std_inquiry(sdev, h);
620 if (err != SCSI_DH_OK)
621 goto out;
622
623 err = alua_vpd_inquiry(sdev, h);
624 if (err != SCSI_DH_OK)
625 goto out;
626
627 err = alua_rtpg(sdev, h);
628 if (err != SCSI_DH_OK)
629 goto out;
630
631out:
632 return err;
633}
634
635/*
636 * alua_activate - activate a path
637 * @sdev: device on the path to be activated
638 *
639 * We're currently switching the port group to be activated only and
640 * let the array figure out the rest.
641 * There may be other arrays which require us to switch all port groups
642 * based on a certain policy. But until we actually encounter them it
643 * should be okay.
644 */
645static int alua_activate(struct scsi_device *sdev)
646{
647 struct alua_dh_data *h = get_alua_data(sdev);
648 int err = SCSI_DH_OK;
649
650 if (h->group_id != -1) {
651 err = alua_rtpg(sdev, h);
652 if (err != SCSI_DH_OK)
653 goto out;
654 }
655
656 if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
657 err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
658
659out:
660 return err;
661}
662
663/*
664 * alua_prep_fn - request callback
665 *
666 * Fail I/O to all paths not in state
667 * active/optimized or active/non-optimized.
668 */
669static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
670{
671 struct alua_dh_data *h = get_alua_data(sdev);
672 int ret = BLKPREP_OK;
673
674 if (h->state != TPGS_STATE_OPTIMIZED &&
675 h->state != TPGS_STATE_NONOPTIMIZED) {
676 ret = BLKPREP_KILL;
677 req->cmd_flags |= REQ_QUIET;
678 }
679 return ret;
680
681}
682
683const struct scsi_dh_devlist alua_dev_list[] = {
684 {"HP", "MSA VOLUME" },
685 {"HP", "HSV101" },
686 {"HP", "HSV111" },
687 {"HP", "HSV200" },
688 {"HP", "HSV210" },
689 {"HP", "HSV300" },
690 {"IBM", "2107900" },
691 {"IBM", "2145" },
692 {"Pillar", "Axiom" },
693 {NULL, NULL}
694};
695
696static int alua_bus_attach(struct scsi_device *sdev);
697static void alua_bus_detach(struct scsi_device *sdev);
698
699static struct scsi_device_handler alua_dh = {
700 .name = ALUA_DH_NAME,
701 .module = THIS_MODULE,
702 .devlist = alua_dev_list,
703 .attach = alua_bus_attach,
704 .detach = alua_bus_detach,
705 .prep_fn = alua_prep_fn,
706 .check_sense = alua_check_sense,
707 .activate = alua_activate,
708};
709
710/*
711 * alua_bus_attach - Attach device handler
712 * @sdev: device to be attached to
713 */
714static int alua_bus_attach(struct scsi_device *sdev)
715{
716 struct scsi_dh_data *scsi_dh_data;
717 struct alua_dh_data *h;
718 unsigned long flags;
719 int err = SCSI_DH_OK;
720
721 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
722 + sizeof(*h) , GFP_KERNEL);
723 if (!scsi_dh_data) {
724 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
725 ALUA_DH_NAME);
726 return -ENOMEM;
727 }
728
729 scsi_dh_data->scsi_dh = &alua_dh;
730 h = (struct alua_dh_data *) scsi_dh_data->buf;
731 h->tpgs = TPGS_MODE_UNINITIALIZED;
732 h->state = TPGS_STATE_OPTIMIZED;
733 h->group_id = -1;
734 h->rel_port = -1;
735 h->buff = h->inq;
736 h->bufflen = ALUA_INQUIRY_SIZE;
737
738 err = alua_initialize(sdev, h);
739 if (err != SCSI_DH_OK)
740 goto failed;
741
742 if (!try_module_get(THIS_MODULE))
743 goto failed;
744
745 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
746 sdev->scsi_dh_data = scsi_dh_data;
747 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
748
749 return 0;
750
751failed:
752 kfree(scsi_dh_data);
753 sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME);
754 return -EINVAL;
755}
756
757/*
758 * alua_bus_detach - Detach device handler
759 * @sdev: device to be detached from
760 */
761static void alua_bus_detach(struct scsi_device *sdev)
762{
763 struct scsi_dh_data *scsi_dh_data;
764 struct alua_dh_data *h;
765 unsigned long flags;
766
767 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
768 scsi_dh_data = sdev->scsi_dh_data;
769 sdev->scsi_dh_data = NULL;
770 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
771
772 h = (struct alua_dh_data *) scsi_dh_data->buf;
773 if (h->buff && h->inq != h->buff)
774 kfree(h->buff);
775 kfree(scsi_dh_data);
776 module_put(THIS_MODULE);
777 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME);
778}
779
780static int __init alua_init(void)
781{
782 int r;
783
784 r = scsi_register_device_handler(&alua_dh);
785 if (r != 0)
786 printk(KERN_ERR "%s: Failed to register scsi device handler",
787 ALUA_DH_NAME);
788 return r;
789}
790
791static void __exit alua_exit(void)
792{
793 scsi_unregister_device_handler(&alua_dh);
794}
795
796module_init(alua_init);
797module_exit(alua_exit);
798
799MODULE_DESCRIPTION("DM Multipath ALUA support");
800MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
801MODULE_LICENSE("GPL");
802MODULE_VERSION(ALUA_DH_VER);
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index f2467e936e55..aa46b131b20e 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -25,28 +25,31 @@
25#include <scsi/scsi_dh.h> 25#include <scsi/scsi_dh.h>
26#include <scsi/scsi_device.h> 26#include <scsi/scsi_device.h>
27 27
28#define CLARIION_NAME "emc_clariion" 28#define CLARIION_NAME "emc"
29 29
30#define CLARIION_TRESPASS_PAGE 0x22 30#define CLARIION_TRESPASS_PAGE 0x22
31#define CLARIION_BUFFER_SIZE 0x80 31#define CLARIION_BUFFER_SIZE 0xFC
32#define CLARIION_TIMEOUT (60 * HZ) 32#define CLARIION_TIMEOUT (60 * HZ)
33#define CLARIION_RETRIES 3 33#define CLARIION_RETRIES 3
34#define CLARIION_UNBOUND_LU -1 34#define CLARIION_UNBOUND_LU -1
35#define CLARIION_SP_A 0
36#define CLARIION_SP_B 1
35 37
36static unsigned char long_trespass[] = { 38/* Flags */
37 0, 0, 0, 0, 39#define CLARIION_SHORT_TRESPASS 1
38 CLARIION_TRESPASS_PAGE, /* Page code */ 40#define CLARIION_HONOR_RESERVATIONS 2
39 0x09, /* Page length - 2 */
40 0x81, /* Trespass code + Honor reservation bit */
41 0xff, 0xff, /* Trespass target */
42 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
43};
44 41
45static unsigned char long_trespass_hr[] = { 42/* LUN states */
46 0, 0, 0, 0, 43#define CLARIION_LUN_UNINITIALIZED -1
44#define CLARIION_LUN_UNBOUND 0
45#define CLARIION_LUN_BOUND 1
46#define CLARIION_LUN_OWNED 2
47
48static unsigned char long_trespass[] = {
49 0, 0, 0, 0, 0, 0, 0, 0,
47 CLARIION_TRESPASS_PAGE, /* Page code */ 50 CLARIION_TRESPASS_PAGE, /* Page code */
48 0x09, /* Page length - 2 */ 51 0x09, /* Page length - 2 */
49 0x01, /* Trespass code + Honor reservation bit */ 52 0x01, /* Trespass code */
50 0xff, 0xff, /* Trespass target */ 53 0xff, 0xff, /* Trespass target */
51 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */ 54 0, 0, 0, 0, 0, 0 /* Reserved bytes / unknown */
52}; 55};
@@ -55,39 +58,56 @@ static unsigned char short_trespass[] = {
55 0, 0, 0, 0, 58 0, 0, 0, 0,
56 CLARIION_TRESPASS_PAGE, /* Page code */ 59 CLARIION_TRESPASS_PAGE, /* Page code */
57 0x02, /* Page length - 2 */ 60 0x02, /* Page length - 2 */
58 0x81, /* Trespass code + Honor reservation bit */ 61 0x01, /* Trespass code */
59 0xff, /* Trespass target */ 62 0xff, /* Trespass target */
60}; 63};
61 64
62static unsigned char short_trespass_hr[] = { 65static const char * lun_state[] =
63 0, 0, 0, 0, 66{
64 CLARIION_TRESPASS_PAGE, /* Page code */ 67 "not bound",
65 0x02, /* Page length - 2 */ 68 "bound",
66 0x01, /* Trespass code + Honor reservation bit */ 69 "owned",
67 0xff, /* Trespass target */
68}; 70};
69 71
70struct clariion_dh_data { 72struct clariion_dh_data {
71 /* 73 /*
74 * Flags:
75 * CLARIION_SHORT_TRESPASS
72 * Use short trespass command (FC-series) or the long version 76 * Use short trespass command (FC-series) or the long version
73 * (default for AX/CX CLARiiON arrays). 77 * (default for AX/CX CLARiiON arrays).
74 */ 78 *
75 unsigned short_trespass; 79 * CLARIION_HONOR_RESERVATIONS
76 /*
77 * Whether or not (default) to honor SCSI reservations when 80 * Whether or not (default) to honor SCSI reservations when
78 * initiating a switch-over. 81 * initiating a switch-over.
79 */ 82 */
80 unsigned hr; 83 unsigned flags;
81 /* I/O buffer for both MODE_SELECT and INQUIRY commands. */ 84 /*
85 * I/O buffer for both MODE_SELECT and INQUIRY commands.
86 */
82 char buffer[CLARIION_BUFFER_SIZE]; 87 char buffer[CLARIION_BUFFER_SIZE];
83 /* 88 /*
84 * SCSI sense buffer for commands -- assumes serial issuance 89 * SCSI sense buffer for commands -- assumes serial issuance
85 * and completion sequence of all commands for same multipath. 90 * and completion sequence of all commands for same multipath.
86 */ 91 */
87 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 92 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
88 /* which SP (A=0,B=1,UNBOUND=-1) is dflt SP for path's mapped dev */ 93 unsigned int senselen;
94 /*
95 * LUN state
96 */
97 int lun_state;
98 /*
99 * SP Port number
100 */
101 int port;
102 /*
103 * which SP (A=0,B=1,UNBOUND=-1) is the default SP for this
104 * path's mapped LUN
105 */
89 int default_sp; 106 int default_sp;
90 /* which SP (A=0,B=1,UNBOUND=-1) is active for path's mapped dev */ 107 /*
108 * which SP (A=0,B=1,UNBOUND=-1) is the active SP for this
109 * path's mapped LUN
110 */
91 int current_sp; 111 int current_sp;
92}; 112};
93 113
@@ -102,19 +122,16 @@ static inline struct clariion_dh_data
102/* 122/*
103 * Parse MODE_SELECT cmd reply. 123 * Parse MODE_SELECT cmd reply.
104 */ 124 */
105static int trespass_endio(struct scsi_device *sdev, int result) 125static int trespass_endio(struct scsi_device *sdev, char *sense)
106{ 126{
107 int err = SCSI_DH_OK; 127 int err = SCSI_DH_IO;
108 struct scsi_sense_hdr sshdr; 128 struct scsi_sense_hdr sshdr;
109 struct clariion_dh_data *csdev = get_clariion_data(sdev);
110 char *sense = csdev->sense;
111 129
112 if (status_byte(result) == CHECK_CONDITION && 130 if (!scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) {
113 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)) { 131 sdev_printk(KERN_ERR, sdev, "%s: Found valid sense data 0x%2x, "
114 sdev_printk(KERN_ERR, sdev, "Found valid sense data 0x%2x, "
115 "0x%2x, 0x%2x while sending CLARiiON trespass " 132 "0x%2x, 0x%2x while sending CLARiiON trespass "
116 "command.\n", sshdr.sense_key, sshdr.asc, 133 "command.\n", CLARIION_NAME, sshdr.sense_key,
117 sshdr.ascq); 134 sshdr.asc, sshdr.ascq);
118 135
119 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) && 136 if ((sshdr.sense_key == 0x05) && (sshdr.asc == 0x04) &&
120 (sshdr.ascq == 0x00)) { 137 (sshdr.ascq == 0x00)) {
@@ -122,9 +139,9 @@ static int trespass_endio(struct scsi_device *sdev, int result)
122 * Array based copy in progress -- do not send 139 * Array based copy in progress -- do not send
123 * mode_select or copy will be aborted mid-stream. 140 * mode_select or copy will be aborted mid-stream.
124 */ 141 */
125 sdev_printk(KERN_INFO, sdev, "Array Based Copy in " 142 sdev_printk(KERN_INFO, sdev, "%s: Array Based Copy in "
126 "progress while sending CLARiiON trespass " 143 "progress while sending CLARiiON trespass "
127 "command.\n"); 144 "command.\n", CLARIION_NAME);
128 err = SCSI_DH_DEV_TEMP_BUSY; 145 err = SCSI_DH_DEV_TEMP_BUSY;
129 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) && 146 } else if ((sshdr.sense_key == 0x02) && (sshdr.asc == 0x04) &&
130 (sshdr.ascq == 0x03)) { 147 (sshdr.ascq == 0x03)) {
@@ -132,160 +149,153 @@ static int trespass_endio(struct scsi_device *sdev, int result)
132 * LUN Not Ready - Manual Intervention Required 149 * LUN Not Ready - Manual Intervention Required
133 * indicates in-progress ucode upgrade (NDU). 150 * indicates in-progress ucode upgrade (NDU).
134 */ 151 */
135 sdev_printk(KERN_INFO, sdev, "Detected in-progress " 152 sdev_printk(KERN_INFO, sdev, "%s: Detected in-progress "
136 "ucode upgrade NDU operation while sending " 153 "ucode upgrade NDU operation while sending "
137 "CLARiiON trespass command.\n"); 154 "CLARiiON trespass command.\n", CLARIION_NAME);
138 err = SCSI_DH_DEV_TEMP_BUSY; 155 err = SCSI_DH_DEV_TEMP_BUSY;
139 } else 156 } else
140 err = SCSI_DH_DEV_FAILED; 157 err = SCSI_DH_DEV_FAILED;
141 } else if (result) { 158 } else {
142 sdev_printk(KERN_ERR, sdev, "Error 0x%x while sending " 159 sdev_printk(KERN_INFO, sdev,
143 "CLARiiON trespass command.\n", result); 160 "%s: failed to send MODE SELECT, no sense available\n",
144 err = SCSI_DH_IO; 161 CLARIION_NAME);
145 } 162 }
146
147 return err; 163 return err;
148} 164}
149 165
150static int parse_sp_info_reply(struct scsi_device *sdev, int result, 166static int parse_sp_info_reply(struct scsi_device *sdev,
151 int *default_sp, int *current_sp, int *new_current_sp) 167 struct clariion_dh_data *csdev)
152{ 168{
153 int err = SCSI_DH_OK; 169 int err = SCSI_DH_OK;
154 struct clariion_dh_data *csdev = get_clariion_data(sdev);
155 170
156 if (result == 0) { 171 /* check for in-progress ucode upgrade (NDU) */
157 /* check for in-progress ucode upgrade (NDU) */ 172 if (csdev->buffer[48] != 0) {
158 if (csdev->buffer[48] != 0) { 173 sdev_printk(KERN_NOTICE, sdev, "%s: Detected in-progress "
159 sdev_printk(KERN_NOTICE, sdev, "Detected in-progress " 174 "ucode upgrade NDU operation while finding "
160 "ucode upgrade NDU operation while finding " 175 "current active SP.", CLARIION_NAME);
161 "current active SP."); 176 err = SCSI_DH_DEV_TEMP_BUSY;
162 err = SCSI_DH_DEV_TEMP_BUSY; 177 goto out;
163 } else { 178 }
164 *default_sp = csdev->buffer[5]; 179 if (csdev->buffer[4] < 0 || csdev->buffer[4] > 2) {
165 180 /* Invalid buffer format */
166 if (csdev->buffer[4] == 2) 181 sdev_printk(KERN_NOTICE, sdev,
167 /* SP for path is current */ 182 "%s: invalid VPD page 0xC0 format\n",
168 *current_sp = csdev->buffer[8]; 183 CLARIION_NAME);
169 else { 184 err = SCSI_DH_NOSYS;
170 if (csdev->buffer[4] == 1) 185 goto out;
171 /* SP for this path is NOT current */ 186 }
172 if (csdev->buffer[8] == 0) 187 switch (csdev->buffer[28] & 0x0f) {
173 *current_sp = 1; 188 case 6:
174 else 189 sdev_printk(KERN_NOTICE, sdev,
175 *current_sp = 0; 190 "%s: ALUA failover mode detected\n",
176 else 191 CLARIION_NAME);
177 /* unbound LU or LUNZ */ 192 break;
178 *current_sp = CLARIION_UNBOUND_LU; 193 case 4:
179 } 194 /* Linux failover */
180 *new_current_sp = csdev->buffer[8]; 195 break;
181 } 196 default:
182 } else { 197 sdev_printk(KERN_WARNING, sdev,
183 struct scsi_sense_hdr sshdr; 198 "%s: Invalid failover mode %d\n",
184 199 CLARIION_NAME, csdev->buffer[28] & 0x0f);
185 err = SCSI_DH_IO; 200 err = SCSI_DH_NOSYS;
186 201 goto out;
187 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
188 &sshdr))
189 sdev_printk(KERN_ERR, sdev, "Found valid sense data "
190 "0x%2x, 0x%2x, 0x%2x while finding current "
191 "active SP.", sshdr.sense_key, sshdr.asc,
192 sshdr.ascq);
193 else
194 sdev_printk(KERN_ERR, sdev, "Error 0x%x finding "
195 "current active SP.", result);
196 } 202 }
197 203
204 csdev->default_sp = csdev->buffer[5];
205 csdev->lun_state = csdev->buffer[4];
206 csdev->current_sp = csdev->buffer[8];
207 csdev->port = csdev->buffer[7];
208
209out:
198 return err; 210 return err;
199} 211}
200 212
201static int sp_info_endio(struct scsi_device *sdev, int result, 213#define emc_default_str "FC (Legacy)"
202 int mode_select_sent, int *done) 214
215static char * parse_sp_model(struct scsi_device *sdev, unsigned char *buffer)
203{ 216{
204 struct clariion_dh_data *csdev = get_clariion_data(sdev); 217 unsigned char len = buffer[4] + 5;
205 int err_flags, default_sp, current_sp, new_current_sp; 218 char *sp_model = NULL;
219 unsigned char sp_len, serial_len;
220
221 if (len < 160) {
222 sdev_printk(KERN_WARNING, sdev,
223 "%s: Invalid information section length %d\n",
224 CLARIION_NAME, len);
225 /* Check for old FC arrays */
226 if (!strncmp(buffer + 8, "DGC", 3)) {
227 /* Old FC array, not supporting extended information */
228 sp_model = emc_default_str;
229 }
230 goto out;
231 }
206 232
207 err_flags = parse_sp_info_reply(sdev, result, &default_sp, 233 /*
208 &current_sp, &new_current_sp); 234 * Parse extended information for SP model number
235 */
236 serial_len = buffer[160];
237 if (serial_len == 0 || serial_len + 161 > len) {
238 sdev_printk(KERN_WARNING, sdev,
239 "%s: Invalid array serial number length %d\n",
240 CLARIION_NAME, serial_len);
241 goto out;
242 }
243 sp_len = buffer[99];
244 if (sp_len == 0 || serial_len + sp_len + 161 > len) {
245 sdev_printk(KERN_WARNING, sdev,
246 "%s: Invalid model number length %d\n",
247 CLARIION_NAME, sp_len);
248 goto out;
249 }
250 sp_model = &buffer[serial_len + 161];
251 /* Strip whitespace at the end */
252 while (sp_len > 1 && sp_model[sp_len - 1] == ' ')
253 sp_len--;
209 254
210 if (err_flags != SCSI_DH_OK) 255 sp_model[sp_len] = '\0';
211 goto done;
212 256
213 if (mode_select_sent) { 257out:
214 csdev->default_sp = default_sp; 258 return sp_model;
215 csdev->current_sp = current_sp;
216 } else {
217 /*
218 * Issue the actual module_selec request IFF either
219 * (1) we do not know the identity of the current SP OR
220 * (2) what we think we know is actually correct.
221 */
222 if ((current_sp != CLARIION_UNBOUND_LU) &&
223 (new_current_sp != current_sp)) {
224
225 csdev->default_sp = default_sp;
226 csdev->current_sp = current_sp;
227
228 sdev_printk(KERN_INFO, sdev, "Ignoring path group "
229 "switch-over command for CLARiiON SP%s since "
230 " mapped device is already initialized.",
231 current_sp ? "B" : "A");
232 if (done)
233 *done = 1; /* as good as doing it */
234 }
235 }
236done:
237 return err_flags;
238} 259}
239 260
240/* 261/*
241* Get block request for REQ_BLOCK_PC command issued to path. Currently 262 * Get block request for REQ_BLOCK_PC command issued to path. Currently
242* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands. 263 * limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
243* 264 *
244* Uses data and sense buffers in hardware handler context structure and 265 * Uses data and sense buffers in hardware handler context structure and
245* assumes serial servicing of commands, both issuance and completion. 266 * assumes serial servicing of commands, both issuance and completion.
246*/ 267 */
247static struct request *get_req(struct scsi_device *sdev, int cmd) 268static struct request *get_req(struct scsi_device *sdev, int cmd,
269 unsigned char *buffer)
248{ 270{
249 struct clariion_dh_data *csdev = get_clariion_data(sdev);
250 struct request *rq; 271 struct request *rq;
251 unsigned char *page22;
252 int len = 0; 272 int len = 0;
253 273
254 rq = blk_get_request(sdev->request_queue, 274 rq = blk_get_request(sdev->request_queue,
255 (cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC); 275 (cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
256 if (!rq) { 276 if (!rq) {
257 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed"); 277 sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
258 return NULL; 278 return NULL;
259 } 279 }
260 280
261 memset(&rq->cmd, 0, BLK_MAX_CDB); 281 memset(rq->cmd, 0, BLK_MAX_CDB);
282 rq->cmd_len = COMMAND_SIZE(cmd);
262 rq->cmd[0] = cmd; 283 rq->cmd[0] = cmd;
263 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
264 284
265 switch (cmd) { 285 switch (cmd) {
266 case MODE_SELECT: 286 case MODE_SELECT:
267 if (csdev->short_trespass) { 287 len = sizeof(short_trespass);
268 page22 = csdev->hr ? short_trespass_hr : short_trespass; 288 rq->cmd_flags |= REQ_RW;
269 len = sizeof(short_trespass); 289 rq->cmd[1] = 0x10;
270 } else { 290 break;
271 page22 = csdev->hr ? long_trespass_hr : long_trespass; 291 case MODE_SELECT_10:
272 len = sizeof(long_trespass); 292 len = sizeof(long_trespass);
273 }
274 /*
275 * Can't DMA from kernel BSS -- must copy selected trespass
276 * command mode page contents to context buffer which is
277 * allocated by kmalloc.
278 */
279 BUG_ON((len > CLARIION_BUFFER_SIZE));
280 memcpy(csdev->buffer, page22, len);
281 rq->cmd_flags |= REQ_RW; 293 rq->cmd_flags |= REQ_RW;
282 rq->cmd[1] = 0x10; 294 rq->cmd[1] = 0x10;
283 break; 295 break;
284 case INQUIRY: 296 case INQUIRY:
285 rq->cmd[1] = 0x1;
286 rq->cmd[2] = 0xC0;
287 len = CLARIION_BUFFER_SIZE; 297 len = CLARIION_BUFFER_SIZE;
288 memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE); 298 memset(buffer, 0, len);
289 break; 299 break;
290 default: 300 default:
291 BUG_ON(1); 301 BUG_ON(1);
@@ -298,47 +308,94 @@ static struct request *get_req(struct scsi_device *sdev, int cmd)
298 rq->timeout = CLARIION_TIMEOUT; 308 rq->timeout = CLARIION_TIMEOUT;
299 rq->retries = CLARIION_RETRIES; 309 rq->retries = CLARIION_RETRIES;
300 310
301 rq->sense = csdev->sense; 311 if (blk_rq_map_kern(rq->q, rq, buffer, len, GFP_NOIO)) {
302 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 312 blk_put_request(rq);
303 rq->sense_len = 0;
304
305 if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
306 len, GFP_ATOMIC)) {
307 __blk_put_request(rq->q, rq);
308 return NULL; 313 return NULL;
309 } 314 }
310 315
311 return rq; 316 return rq;
312} 317}
313 318
314static int send_cmd(struct scsi_device *sdev, int cmd) 319static int send_inquiry_cmd(struct scsi_device *sdev, int page,
320 struct clariion_dh_data *csdev)
315{ 321{
316 struct request *rq = get_req(sdev, cmd); 322 struct request *rq = get_req(sdev, INQUIRY, csdev->buffer);
323 int err;
317 324
318 if (!rq) 325 if (!rq)
319 return SCSI_DH_RES_TEMP_UNAVAIL; 326 return SCSI_DH_RES_TEMP_UNAVAIL;
320 327
321 return blk_execute_rq(sdev->request_queue, NULL, rq, 1); 328 rq->sense = csdev->sense;
329 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
330 rq->sense_len = csdev->senselen = 0;
331
332 rq->cmd[0] = INQUIRY;
333 if (page != 0) {
334 rq->cmd[1] = 1;
335 rq->cmd[2] = page;
336 }
337 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
338 if (err == -EIO) {
339 sdev_printk(KERN_INFO, sdev,
340 "%s: failed to send %s INQUIRY: %x\n",
341 CLARIION_NAME, page?"EVPD":"standard",
342 rq->errors);
343 csdev->senselen = rq->sense_len;
344 err = SCSI_DH_IO;
345 }
346
347 blk_put_request(rq);
348
349 return err;
322} 350}
323 351
324static int clariion_activate(struct scsi_device *sdev) 352static int send_trespass_cmd(struct scsi_device *sdev,
353 struct clariion_dh_data *csdev)
325{ 354{
326 int result, done = 0; 355 struct request *rq;
356 unsigned char *page22;
357 int err, len, cmd;
358
359 if (csdev->flags & CLARIION_SHORT_TRESPASS) {
360 page22 = short_trespass;
361 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
362 /* Set Honor Reservations bit */
363 page22[6] |= 0x80;
364 len = sizeof(short_trespass);
365 cmd = MODE_SELECT;
366 } else {
367 page22 = long_trespass;
368 if (!(csdev->flags & CLARIION_HONOR_RESERVATIONS))
369 /* Set Honor Reservations bit */
370 page22[10] |= 0x80;
371 len = sizeof(long_trespass);
372 cmd = MODE_SELECT_10;
373 }
374 BUG_ON((len > CLARIION_BUFFER_SIZE));
375 memcpy(csdev->buffer, page22, len);
327 376
328 result = send_cmd(sdev, INQUIRY); 377 rq = get_req(sdev, cmd, csdev->buffer);
329 result = sp_info_endio(sdev, result, 0, &done); 378 if (!rq)
330 if (result || done) 379 return SCSI_DH_RES_TEMP_UNAVAIL;
331 goto done;
332 380
333 result = send_cmd(sdev, MODE_SELECT); 381 rq->sense = csdev->sense;
334 result = trespass_endio(sdev, result); 382 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
335 if (result) 383 rq->sense_len = csdev->senselen = 0;
336 goto done;
337 384
338 result = send_cmd(sdev, INQUIRY); 385 err = blk_execute_rq(sdev->request_queue, NULL, rq, 1);
339 result = sp_info_endio(sdev, result, 1, NULL); 386 if (err == -EIO) {
340done: 387 if (rq->sense_len) {
341 return result; 388 err = trespass_endio(sdev, csdev->sense);
389 } else {
390 sdev_printk(KERN_INFO, sdev,
391 "%s: failed to send MODE SELECT: %x\n",
392 CLARIION_NAME, rq->errors);
393 }
394 }
395
396 blk_put_request(rq);
397
398 return err;
342} 399}
343 400
344static int clariion_check_sense(struct scsi_device *sdev, 401static int clariion_check_sense(struct scsi_device *sdev,
@@ -386,99 +443,215 @@ static int clariion_check_sense(struct scsi_device *sdev,
386 break; 443 break;
387 } 444 }
388 445
389 /* success just means we do not care what scsi-ml does */ 446 return SCSI_RETURN_NOT_HANDLED;
390 return SUCCESS; 447}
448
449static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
450{
451 struct clariion_dh_data *h = get_clariion_data(sdev);
452 int ret = BLKPREP_OK;
453
454 if (h->lun_state != CLARIION_LUN_OWNED) {
455 ret = BLKPREP_KILL;
456 req->cmd_flags |= REQ_QUIET;
457 }
458 return ret;
459
460}
461
462static int clariion_std_inquiry(struct scsi_device *sdev,
463 struct clariion_dh_data *csdev)
464{
465 int err;
466 char *sp_model;
467
468 err = send_inquiry_cmd(sdev, 0, csdev);
469 if (err != SCSI_DH_OK && csdev->senselen) {
470 struct scsi_sense_hdr sshdr;
471
472 if (scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
473 &sshdr)) {
474 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
475 "%02x/%02x/%02x\n", CLARIION_NAME,
476 sshdr.sense_key, sshdr.asc, sshdr.ascq);
477 }
478 err = SCSI_DH_IO;
479 goto out;
480 }
481
482 sp_model = parse_sp_model(sdev, csdev->buffer);
483 if (!sp_model) {
484 err = SCSI_DH_DEV_UNSUPP;
485 goto out;
486 }
487
488 /*
489 * FC Series arrays do not support long trespass
490 */
491 if (!strlen(sp_model) || !strncmp(sp_model, "FC",2))
492 csdev->flags |= CLARIION_SHORT_TRESPASS;
493
494 sdev_printk(KERN_INFO, sdev,
495 "%s: detected Clariion %s, flags %x\n",
496 CLARIION_NAME, sp_model, csdev->flags);
497out:
498 return err;
391} 499}
392 500
393static const struct { 501static int clariion_send_inquiry(struct scsi_device *sdev,
394 char *vendor; 502 struct clariion_dh_data *csdev)
395 char *model; 503{
396} clariion_dev_list[] = { 504 int err, retry = CLARIION_RETRIES;
505
506retry:
507 err = send_inquiry_cmd(sdev, 0xC0, csdev);
508 if (err != SCSI_DH_OK && csdev->senselen) {
509 struct scsi_sense_hdr sshdr;
510
511 err = scsi_normalize_sense(csdev->sense, SCSI_SENSE_BUFFERSIZE,
512 &sshdr);
513 if (!err)
514 return SCSI_DH_IO;
515
516 err = clariion_check_sense(sdev, &sshdr);
517 if (retry > 0 && err == NEEDS_RETRY) {
518 retry--;
519 goto retry;
520 }
521 sdev_printk(KERN_ERR, sdev, "%s: INQUIRY sense code "
522 "%02x/%02x/%02x\n", CLARIION_NAME,
523 sshdr.sense_key, sshdr.asc, sshdr.ascq);
524 err = SCSI_DH_IO;
525 } else {
526 err = parse_sp_info_reply(sdev, csdev);
527 }
528 return err;
529}
530
531static int clariion_activate(struct scsi_device *sdev)
532{
533 struct clariion_dh_data *csdev = get_clariion_data(sdev);
534 int result;
535
536 result = clariion_send_inquiry(sdev, csdev);
537 if (result != SCSI_DH_OK)
538 goto done;
539
540 if (csdev->lun_state == CLARIION_LUN_OWNED)
541 goto done;
542
543 result = send_trespass_cmd(sdev, csdev);
544 if (result != SCSI_DH_OK)
545 goto done;
546 sdev_printk(KERN_INFO, sdev,"%s: %s trespass command sent\n",
547 CLARIION_NAME,
548 csdev->flags&CLARIION_SHORT_TRESPASS?"short":"long" );
549
550 /* Update status */
551 result = clariion_send_inquiry(sdev, csdev);
552 if (result != SCSI_DH_OK)
553 goto done;
554
555done:
556 sdev_printk(KERN_INFO, sdev,
557 "%s: at SP %c Port %d (%s, default SP %c)\n",
558 CLARIION_NAME, csdev->current_sp + 'A',
559 csdev->port, lun_state[csdev->lun_state],
560 csdev->default_sp + 'A');
561
562 return result;
563}
564
565const struct scsi_dh_devlist clariion_dev_list[] = {
397 {"DGC", "RAID"}, 566 {"DGC", "RAID"},
398 {"DGC", "DISK"}, 567 {"DGC", "DISK"},
568 {"DGC", "VRAID"},
399 {NULL, NULL}, 569 {NULL, NULL},
400}; 570};
401 571
402static int clariion_bus_notify(struct notifier_block *, unsigned long, void *); 572static int clariion_bus_attach(struct scsi_device *sdev);
573static void clariion_bus_detach(struct scsi_device *sdev);
403 574
404static struct scsi_device_handler clariion_dh = { 575static struct scsi_device_handler clariion_dh = {
405 .name = CLARIION_NAME, 576 .name = CLARIION_NAME,
406 .module = THIS_MODULE, 577 .module = THIS_MODULE,
407 .nb.notifier_call = clariion_bus_notify, 578 .devlist = clariion_dev_list,
579 .attach = clariion_bus_attach,
580 .detach = clariion_bus_detach,
408 .check_sense = clariion_check_sense, 581 .check_sense = clariion_check_sense,
409 .activate = clariion_activate, 582 .activate = clariion_activate,
583 .prep_fn = clariion_prep_fn,
410}; 584};
411 585
412/* 586/*
413 * TODO: need some interface so we can set trespass values 587 * TODO: need some interface so we can set trespass values
414 */ 588 */
415static int clariion_bus_notify(struct notifier_block *nb, 589static int clariion_bus_attach(struct scsi_device *sdev)
416 unsigned long action, void *data)
417{ 590{
418 struct device *dev = data;
419 struct scsi_device *sdev;
420 struct scsi_dh_data *scsi_dh_data; 591 struct scsi_dh_data *scsi_dh_data;
421 struct clariion_dh_data *h; 592 struct clariion_dh_data *h;
422 int i, found = 0;
423 unsigned long flags; 593 unsigned long flags;
594 int err;
424 595
425 if (!scsi_is_sdev_device(dev)) 596 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
426 return 0; 597 + sizeof(*h) , GFP_KERNEL);
598 if (!scsi_dh_data) {
599 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
600 CLARIION_NAME);
601 return -ENOMEM;
602 }
427 603
428 sdev = to_scsi_device(dev); 604 scsi_dh_data->scsi_dh = &clariion_dh;
605 h = (struct clariion_dh_data *) scsi_dh_data->buf;
606 h->lun_state = CLARIION_LUN_UNINITIALIZED;
607 h->default_sp = CLARIION_UNBOUND_LU;
608 h->current_sp = CLARIION_UNBOUND_LU;
429 609
430 if (action == BUS_NOTIFY_ADD_DEVICE) { 610 err = clariion_std_inquiry(sdev, h);
431 for (i = 0; clariion_dev_list[i].vendor; i++) { 611 if (err != SCSI_DH_OK)
432 if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor, 612 goto failed;
433 strlen(clariion_dev_list[i].vendor)) &&
434 !strncmp(sdev->model, clariion_dev_list[i].model,
435 strlen(clariion_dev_list[i].model))) {
436 found = 1;
437 break;
438 }
439 }
440 if (!found)
441 goto out;
442
443 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
444 + sizeof(*h) , GFP_KERNEL);
445 if (!scsi_dh_data) {
446 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
447 CLARIION_NAME);
448 goto out;
449 }
450 613
451 scsi_dh_data->scsi_dh = &clariion_dh; 614 err = clariion_send_inquiry(sdev, h);
452 h = (struct clariion_dh_data *) scsi_dh_data->buf; 615 if (err != SCSI_DH_OK)
453 h->default_sp = CLARIION_UNBOUND_LU; 616 goto failed;
454 h->current_sp = CLARIION_UNBOUND_LU;
455 617
456 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 618 if (!try_module_get(THIS_MODULE))
457 sdev->scsi_dh_data = scsi_dh_data; 619 goto failed;
458 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
459 620
460 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", CLARIION_NAME); 621 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
461 try_module_get(THIS_MODULE); 622 sdev->scsi_dh_data = scsi_dh_data;
623 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
462 624
463 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 625 sdev_printk(KERN_INFO, sdev,
464 if (sdev->scsi_dh_data == NULL || 626 "%s: connected to SP %c Port %d (%s, default SP %c)\n",
465 sdev->scsi_dh_data->scsi_dh != &clariion_dh) 627 CLARIION_NAME, h->current_sp + 'A',
466 goto out; 628 h->port, lun_state[h->lun_state],
629 h->default_sp + 'A');
467 630
468 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 631 return 0;
469 scsi_dh_data = sdev->scsi_dh_data;
470 sdev->scsi_dh_data = NULL;
471 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
472 632
473 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", 633failed:
474 CLARIION_NAME); 634 kfree(scsi_dh_data);
635 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
636 CLARIION_NAME);
637 return -EINVAL;
638}
475 639
476 kfree(scsi_dh_data); 640static void clariion_bus_detach(struct scsi_device *sdev)
477 module_put(THIS_MODULE); 641{
478 } 642 struct scsi_dh_data *scsi_dh_data;
643 unsigned long flags;
479 644
480out: 645 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
481 return 0; 646 scsi_dh_data = sdev->scsi_dh_data;
647 sdev->scsi_dh_data = NULL;
648 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
649
650 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n",
651 CLARIION_NAME);
652
653 kfree(scsi_dh_data);
654 module_put(THIS_MODULE);
482} 655}
483 656
484static int __init clariion_init(void) 657static int __init clariion_init(void)
@@ -487,7 +660,8 @@ static int __init clariion_init(void)
487 660
488 r = scsi_register_device_handler(&clariion_dh); 661 r = scsi_register_device_handler(&clariion_dh);
489 if (r != 0) 662 if (r != 0)
490 printk(KERN_ERR "Failed to register scsi device handler."); 663 printk(KERN_ERR "%s: Failed to register scsi device handler.",
664 CLARIION_NAME);
491 return r; 665 return r;
492} 666}
493 667
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index ae6be87d6a83..9c7a1f8ebb72 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved. 5 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
6 * Copyright (C) 2006 Mike Christie 6 * Copyright (C) 2006 Mike Christie
7 * Copyright (C) 2008 Hannes Reinecke <hare@suse.de>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -25,13 +26,18 @@
25#include <scsi/scsi_eh.h> 26#include <scsi/scsi_eh.h>
26#include <scsi/scsi_dh.h> 27#include <scsi/scsi_dh.h>
27 28
28#define HP_SW_NAME "hp_sw" 29#define HP_SW_NAME "hp_sw"
29 30
30#define HP_SW_TIMEOUT (60 * HZ) 31#define HP_SW_TIMEOUT (60 * HZ)
31#define HP_SW_RETRIES 3 32#define HP_SW_RETRIES 3
33
34#define HP_SW_PATH_UNINITIALIZED -1
35#define HP_SW_PATH_ACTIVE 0
36#define HP_SW_PATH_PASSIVE 1
32 37
33struct hp_sw_dh_data { 38struct hp_sw_dh_data {
34 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 39 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
40 int path_state;
35 int retries; 41 int retries;
36}; 42};
37 43
@@ -42,51 +48,161 @@ static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev)
42 return ((struct hp_sw_dh_data *) scsi_dh_data->buf); 48 return ((struct hp_sw_dh_data *) scsi_dh_data->buf);
43} 49}
44 50
45static int hp_sw_done(struct scsi_device *sdev) 51/*
52 * tur_done - Handle TEST UNIT READY return status
53 * @sdev: sdev the command has been sent to
54 * @errors: blk error code
55 *
56 * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path
57 */
58static int tur_done(struct scsi_device *sdev, unsigned char *sense)
46{ 59{
47 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
48 struct scsi_sense_hdr sshdr; 60 struct scsi_sense_hdr sshdr;
49 int rc; 61 int ret;
50
51 sdev_printk(KERN_INFO, sdev, "hp_sw_done\n");
52 62
53 rc = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sshdr); 63 ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
54 if (!rc) 64 if (!ret) {
65 sdev_printk(KERN_WARNING, sdev,
66 "%s: sending tur failed, no sense available\n",
67 HP_SW_NAME);
68 ret = SCSI_DH_IO;
55 goto done; 69 goto done;
70 }
56 switch (sshdr.sense_key) { 71 switch (sshdr.sense_key) {
72 case UNIT_ATTENTION:
73 ret = SCSI_DH_IMM_RETRY;
74 break;
57 case NOT_READY: 75 case NOT_READY:
58 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { 76 if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) {
59 rc = SCSI_DH_RETRY; 77 /*
60 h->retries++; 78 * LUN not ready - Initialization command required
79 *
80 * This is the passive path
81 */
82 ret = SCSI_DH_DEV_OFFLINED;
61 break; 83 break;
62 } 84 }
63 /* fall through */ 85 /* Fallthrough */
64 default: 86 default:
65 h->retries++; 87 sdev_printk(KERN_WARNING, sdev,
66 rc = SCSI_DH_IMM_RETRY; 88 "%s: sending tur failed, sense %x/%x/%x\n",
89 HP_SW_NAME, sshdr.sense_key, sshdr.asc,
90 sshdr.ascq);
91 break;
67 } 92 }
68 93
69done: 94done:
70 if (rc == SCSI_DH_OK || rc == SCSI_DH_IO) 95 return ret;
71 h->retries = 0; 96}
72 else if (h->retries > HP_SW_RETRIES) { 97
73 h->retries = 0; 98/*
99 * hp_sw_tur - Send TEST UNIT READY
100 * @sdev: sdev command should be sent to
101 *
102 * Use the TEST UNIT READY command to determine
103 * the path state.
104 */
105static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
106{
107 struct request *req;
108 int ret;
109
110 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
111 if (!req)
112 return SCSI_DH_RES_TEMP_UNAVAIL;
113
114 req->cmd_type = REQ_TYPE_BLOCK_PC;
115 req->cmd_flags |= REQ_FAILFAST;
116 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
117 memset(req->cmd, 0, MAX_COMMAND_SIZE);
118 req->cmd[0] = TEST_UNIT_READY;
119 req->timeout = HP_SW_TIMEOUT;
120 req->sense = h->sense;
121 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
122 req->sense_len = 0;
123
124retry:
125 ret = blk_execute_rq(req->q, NULL, req, 1);
126 if (ret == -EIO) {
127 if (req->sense_len > 0) {
128 ret = tur_done(sdev, h->sense);
129 } else {
130 sdev_printk(KERN_WARNING, sdev,
131 "%s: sending tur failed with %x\n",
132 HP_SW_NAME, req->errors);
133 ret = SCSI_DH_IO;
134 }
135 } else {
136 h->path_state = HP_SW_PATH_ACTIVE;
137 ret = SCSI_DH_OK;
138 }
139 if (ret == SCSI_DH_IMM_RETRY)
140 goto retry;
141 if (ret == SCSI_DH_DEV_OFFLINED) {
142 h->path_state = HP_SW_PATH_PASSIVE;
143 ret = SCSI_DH_OK;
144 }
145
146 blk_put_request(req);
147
148 return ret;
149}
150
151/*
152 * start_done - Handle START STOP UNIT return status
153 * @sdev: sdev the command has been sent to
154 * @errors: blk error code
155 */
156static int start_done(struct scsi_device *sdev, unsigned char *sense)
157{
158 struct scsi_sense_hdr sshdr;
159 int rc;
160
161 rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr);
162 if (!rc) {
163 sdev_printk(KERN_WARNING, sdev,
164 "%s: sending start_stop_unit failed, "
165 "no sense available\n",
166 HP_SW_NAME);
167 return SCSI_DH_IO;
168 }
169 switch (sshdr.sense_key) {
170 case NOT_READY:
171 if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) {
172 /*
173 * LUN not ready - manual intervention required
174 *
175 * Switch-over in progress, retry.
176 */
177 rc = SCSI_DH_RETRY;
178 break;
179 }
180 /* fall through */
181 default:
182 sdev_printk(KERN_WARNING, sdev,
183 "%s: sending start_stop_unit failed, sense %x/%x/%x\n",
184 HP_SW_NAME, sshdr.sense_key, sshdr.asc,
185 sshdr.ascq);
74 rc = SCSI_DH_IO; 186 rc = SCSI_DH_IO;
75 } 187 }
188
76 return rc; 189 return rc;
77} 190}
78 191
79static int hp_sw_activate(struct scsi_device *sdev) 192/*
193 * hp_sw_start_stop - Send START STOP UNIT command
194 * @sdev: sdev command should be sent to
195 *
196 * Sending START STOP UNIT activates the SP.
197 */
198static int hp_sw_start_stop(struct scsi_device *sdev, struct hp_sw_dh_data *h)
80{ 199{
81 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
82 struct request *req; 200 struct request *req;
83 int ret = SCSI_DH_RES_TEMP_UNAVAIL; 201 int ret, retry;
84 202
85 req = blk_get_request(sdev->request_queue, WRITE, GFP_ATOMIC); 203 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO);
86 if (!req) 204 if (!req)
87 goto done; 205 return SCSI_DH_RES_TEMP_UNAVAIL;
88
89 sdev_printk(KERN_INFO, sdev, "sending START_STOP.");
90 206
91 req->cmd_type = REQ_TYPE_BLOCK_PC; 207 req->cmd_type = REQ_TYPE_BLOCK_PC;
92 req->cmd_flags |= REQ_FAILFAST; 208 req->cmd_flags |= REQ_FAILFAST;
@@ -98,95 +214,153 @@ static int hp_sw_activate(struct scsi_device *sdev)
98 req->sense = h->sense; 214 req->sense = h->sense;
99 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 215 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
100 req->sense_len = 0; 216 req->sense_len = 0;
217 retry = h->retries;
101 218
219retry:
102 ret = blk_execute_rq(req->q, NULL, req, 1); 220 ret = blk_execute_rq(req->q, NULL, req, 1);
103 if (!ret) /* SUCCESS */ 221 if (ret == -EIO) {
104 ret = hp_sw_done(sdev); 222 if (req->sense_len > 0) {
105 else 223 ret = start_done(sdev, h->sense);
224 } else {
225 sdev_printk(KERN_WARNING, sdev,
226 "%s: sending start_stop_unit failed with %x\n",
227 HP_SW_NAME, req->errors);
228 ret = SCSI_DH_IO;
229 }
230 } else
231 ret = SCSI_DH_OK;
232
233 if (ret == SCSI_DH_RETRY) {
234 if (--retry)
235 goto retry;
106 ret = SCSI_DH_IO; 236 ret = SCSI_DH_IO;
107done: 237 }
238
239 blk_put_request(req);
240
241 return ret;
242}
243
244static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
245{
246 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
247 int ret = BLKPREP_OK;
248
249 if (h->path_state != HP_SW_PATH_ACTIVE) {
250 ret = BLKPREP_KILL;
251 req->cmd_flags |= REQ_QUIET;
252 }
253 return ret;
254
255}
256
257/*
258 * hp_sw_activate - Activate a path
259 * @sdev: sdev on the path to be activated
260 *
261 * The HP Active/Passive firmware is pretty simple;
262 * the passive path reports NOT READY with sense codes
263 * 0x04/0x02; a START STOP UNIT command will then
264 * activate the passive path (and deactivate the
265 * previously active one).
266 */
267static int hp_sw_activate(struct scsi_device *sdev)
268{
269 int ret = SCSI_DH_OK;
270 struct hp_sw_dh_data *h = get_hp_sw_data(sdev);
271
272 ret = hp_sw_tur(sdev, h);
273
274 if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) {
275 ret = hp_sw_start_stop(sdev, h);
276 if (ret == SCSI_DH_OK)
277 sdev_printk(KERN_INFO, sdev,
278 "%s: activated path\n",
279 HP_SW_NAME);
280 }
281
108 return ret; 282 return ret;
109} 283}
110 284
111static const struct { 285const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
112 char *vendor; 286 {"COMPAQ", "MSA1000 VOLUME"},
113 char *model; 287 {"COMPAQ", "HSV110"},
114} hp_sw_dh_data_list[] = { 288 {"HP", "HSV100"},
115 {"COMPAQ", "MSA"},
116 {"HP", "HSV"},
117 {"DEC", "HSG80"}, 289 {"DEC", "HSG80"},
118 {NULL, NULL}, 290 {NULL, NULL},
119}; 291};
120 292
121static int hp_sw_bus_notify(struct notifier_block *, unsigned long, void *); 293static int hp_sw_bus_attach(struct scsi_device *sdev);
294static void hp_sw_bus_detach(struct scsi_device *sdev);
122 295
123static struct scsi_device_handler hp_sw_dh = { 296static struct scsi_device_handler hp_sw_dh = {
124 .name = HP_SW_NAME, 297 .name = HP_SW_NAME,
125 .module = THIS_MODULE, 298 .module = THIS_MODULE,
126 .nb.notifier_call = hp_sw_bus_notify, 299 .devlist = hp_sw_dh_data_list,
300 .attach = hp_sw_bus_attach,
301 .detach = hp_sw_bus_detach,
127 .activate = hp_sw_activate, 302 .activate = hp_sw_activate,
303 .prep_fn = hp_sw_prep_fn,
128}; 304};
129 305
130static int hp_sw_bus_notify(struct notifier_block *nb, 306static int hp_sw_bus_attach(struct scsi_device *sdev)
131 unsigned long action, void *data)
132{ 307{
133 struct device *dev = data;
134 struct scsi_device *sdev;
135 struct scsi_dh_data *scsi_dh_data; 308 struct scsi_dh_data *scsi_dh_data;
136 int i, found = 0; 309 struct hp_sw_dh_data *h;
137 unsigned long flags; 310 unsigned long flags;
311 int ret;
138 312
139 if (!scsi_is_sdev_device(dev)) 313 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
314 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
315 if (!scsi_dh_data) {
316 sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
317 HP_SW_NAME);
140 return 0; 318 return 0;
319 }
141 320
142 sdev = to_scsi_device(dev); 321 scsi_dh_data->scsi_dh = &hp_sw_dh;
143 322 h = (struct hp_sw_dh_data *) scsi_dh_data->buf;
144 if (action == BUS_NOTIFY_ADD_DEVICE) { 323 h->path_state = HP_SW_PATH_UNINITIALIZED;
145 for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { 324 h->retries = HP_SW_RETRIES;
146 if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
147 strlen(hp_sw_dh_data_list[i].vendor)) &&
148 !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
149 strlen(hp_sw_dh_data_list[i].model))) {
150 found = 1;
151 break;
152 }
153 }
154 if (!found)
155 goto out;
156 325
157 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 326 ret = hp_sw_tur(sdev, h);
158 + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); 327 if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED)
159 if (!scsi_dh_data) { 328 goto failed;
160 sdev_printk(KERN_ERR, sdev, "Attach Failed %s.\n",
161 HP_SW_NAME);
162 goto out;
163 }
164 329
165 scsi_dh_data->scsi_dh = &hp_sw_dh; 330 if (!try_module_get(THIS_MODULE))
166 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 331 goto failed;
167 sdev->scsi_dh_data = scsi_dh_data;
168 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
169 try_module_get(THIS_MODULE);
170 332
171 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", HP_SW_NAME); 333 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
172 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 334 sdev->scsi_dh_data = scsi_dh_data;
173 if (sdev->scsi_dh_data == NULL || 335 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
174 sdev->scsi_dh_data->scsi_dh != &hp_sw_dh)
175 goto out;
176 336
177 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 337 sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n",
178 scsi_dh_data = sdev->scsi_dh_data; 338 HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE?
179 sdev->scsi_dh_data = NULL; 339 "active":"passive");
180 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
181 module_put(THIS_MODULE);
182 340
183 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", HP_SW_NAME); 341 return 0;
184 342
185 kfree(scsi_dh_data); 343failed:
186 } 344 kfree(scsi_dh_data);
345 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
346 HP_SW_NAME);
347 return -EINVAL;
348}
187 349
188out: 350static void hp_sw_bus_detach( struct scsi_device *sdev )
189 return 0; 351{
352 struct scsi_dh_data *scsi_dh_data;
353 unsigned long flags;
354
355 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
356 scsi_dh_data = sdev->scsi_dh_data;
357 sdev->scsi_dh_data = NULL;
358 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
359 module_put(THIS_MODULE);
360
361 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME);
362
363 kfree(scsi_dh_data);
190} 364}
191 365
192static int __init hp_sw_init(void) 366static int __init hp_sw_init(void)
@@ -202,6 +376,6 @@ static void __exit hp_sw_exit(void)
202module_init(hp_sw_init); 376module_init(hp_sw_init);
203module_exit(hp_sw_exit); 377module_exit(hp_sw_exit);
204 378
205MODULE_DESCRIPTION("HP MSA 1000"); 379MODULE_DESCRIPTION("HP Active/Passive driver");
206MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); 380MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu");
207MODULE_LICENSE("GPL"); 381MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fdf34b0ec6e1..b093a501f8ae 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -173,6 +173,11 @@ struct rdac_dh_data {
173#define RDAC_STATE_ACTIVE 0 173#define RDAC_STATE_ACTIVE 0
174#define RDAC_STATE_PASSIVE 1 174#define RDAC_STATE_PASSIVE 1
175 unsigned char state; 175 unsigned char state;
176
177#define RDAC_LUN_UNOWNED 0
178#define RDAC_LUN_OWNED 1
179#define RDAC_LUN_AVT 2
180 char lun_state;
176 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 181 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
177 union { 182 union {
178 struct c2_inquiry c2; 183 struct c2_inquiry c2;
@@ -182,6 +187,13 @@ struct rdac_dh_data {
182 } inq; 187 } inq;
183}; 188};
184 189
190static const char *lun_state[] =
191{
192 "unowned",
193 "owned",
194 "owned (AVT mode)",
195};
196
185static LIST_HEAD(ctlr_list); 197static LIST_HEAD(ctlr_list);
186static DEFINE_SPINLOCK(list_lock); 198static DEFINE_SPINLOCK(list_lock);
187 199
@@ -197,9 +209,8 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
197{ 209{
198 struct request *rq; 210 struct request *rq;
199 struct request_queue *q = sdev->request_queue; 211 struct request_queue *q = sdev->request_queue;
200 struct rdac_dh_data *h = get_rdac_data(sdev);
201 212
202 rq = blk_get_request(q, rw, GFP_KERNEL); 213 rq = blk_get_request(q, rw, GFP_NOIO);
203 214
204 if (!rq) { 215 if (!rq) {
205 sdev_printk(KERN_INFO, sdev, 216 sdev_printk(KERN_INFO, sdev,
@@ -207,17 +218,14 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
207 return NULL; 218 return NULL;
208 } 219 }
209 220
210 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_KERNEL)) { 221 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
211 blk_put_request(rq); 222 blk_put_request(rq);
212 sdev_printk(KERN_INFO, sdev, 223 sdev_printk(KERN_INFO, sdev,
213 "get_rdac_req: blk_rq_map_kern failed.\n"); 224 "get_rdac_req: blk_rq_map_kern failed.\n");
214 return NULL; 225 return NULL;
215 } 226 }
216 227
217 memset(&rq->cmd, 0, BLK_MAX_CDB); 228 memset(rq->cmd, 0, BLK_MAX_CDB);
218 rq->sense = h->sense;
219 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
220 rq->sense_len = 0;
221 229
222 rq->cmd_type = REQ_TYPE_BLOCK_PC; 230 rq->cmd_type = REQ_TYPE_BLOCK_PC;
223 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE; 231 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
@@ -227,12 +235,12 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
227 return rq; 235 return rq;
228} 236}
229 237
230static struct request *rdac_failover_get(struct scsi_device *sdev) 238static struct request *rdac_failover_get(struct scsi_device *sdev,
239 struct rdac_dh_data *h)
231{ 240{
232 struct request *rq; 241 struct request *rq;
233 struct rdac_mode_common *common; 242 struct rdac_mode_common *common;
234 unsigned data_size; 243 unsigned data_size;
235 struct rdac_dh_data *h = get_rdac_data(sdev);
236 244
237 if (h->ctlr->use_ms10) { 245 if (h->ctlr->use_ms10) {
238 struct rdac_pg_expanded *rdac_pg; 246 struct rdac_pg_expanded *rdac_pg;
@@ -277,6 +285,10 @@ static struct request *rdac_failover_get(struct scsi_device *sdev)
277 } 285 }
278 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); 286 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
279 287
288 rq->sense = h->sense;
289 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
290 rq->sense_len = 0;
291
280 return rq; 292 return rq;
281} 293}
282 294
@@ -321,11 +333,10 @@ done:
321} 333}
322 334
323static int submit_inquiry(struct scsi_device *sdev, int page_code, 335static int submit_inquiry(struct scsi_device *sdev, int page_code,
324 unsigned int len) 336 unsigned int len, struct rdac_dh_data *h)
325{ 337{
326 struct request *rq; 338 struct request *rq;
327 struct request_queue *q = sdev->request_queue; 339 struct request_queue *q = sdev->request_queue;
328 struct rdac_dh_data *h = get_rdac_data(sdev);
329 int err = SCSI_DH_RES_TEMP_UNAVAIL; 340 int err = SCSI_DH_RES_TEMP_UNAVAIL;
330 341
331 rq = get_rdac_req(sdev, &h->inq, len, READ); 342 rq = get_rdac_req(sdev, &h->inq, len, READ);
@@ -338,59 +349,68 @@ static int submit_inquiry(struct scsi_device *sdev, int page_code,
338 rq->cmd[2] = page_code; 349 rq->cmd[2] = page_code;
339 rq->cmd[4] = len; 350 rq->cmd[4] = len;
340 rq->cmd_len = COMMAND_SIZE(INQUIRY); 351 rq->cmd_len = COMMAND_SIZE(INQUIRY);
352
353 rq->sense = h->sense;
354 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
355 rq->sense_len = 0;
356
341 err = blk_execute_rq(q, NULL, rq, 1); 357 err = blk_execute_rq(q, NULL, rq, 1);
342 if (err == -EIO) 358 if (err == -EIO)
343 err = SCSI_DH_IO; 359 err = SCSI_DH_IO;
360
361 blk_put_request(rq);
344done: 362done:
345 return err; 363 return err;
346} 364}
347 365
348static int get_lun(struct scsi_device *sdev) 366static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
349{ 367{
350 int err; 368 int err;
351 struct c8_inquiry *inqp; 369 struct c8_inquiry *inqp;
352 struct rdac_dh_data *h = get_rdac_data(sdev);
353 370
354 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry)); 371 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
355 if (err == SCSI_DH_OK) { 372 if (err == SCSI_DH_OK) {
356 inqp = &h->inq.c8; 373 inqp = &h->inq.c8;
357 h->lun = inqp->lun[7]; /* currently it uses only one byte */ 374 if (inqp->page_code != 0xc8)
375 return SCSI_DH_NOSYS;
376 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
378 return SCSI_DH_NOSYS;
379 h->lun = scsilun_to_int((struct scsi_lun *)inqp->lun);
358 } 380 }
359 return err; 381 return err;
360} 382}
361 383
362#define RDAC_OWNED 0 384static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
363#define RDAC_UNOWNED 1
364#define RDAC_FAILED 2
365static int check_ownership(struct scsi_device *sdev)
366{ 385{
367 int err; 386 int err;
368 struct c9_inquiry *inqp; 387 struct c9_inquiry *inqp;
369 struct rdac_dh_data *h = get_rdac_data(sdev);
370 388
371 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry)); 389 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
372 if (err == SCSI_DH_OK) { 390 if (err == SCSI_DH_OK) {
373 err = RDAC_UNOWNED;
374 inqp = &h->inq.c9; 391 inqp = &h->inq.c9;
375 /* 392 if ((inqp->avte_cvp >> 7) == 0x1) {
376 * If in AVT mode or if the path already owns the LUN, 393 /* LUN in AVT mode */
377 * return RDAC_OWNED; 394 sdev_printk(KERN_NOTICE, sdev,
378 */ 395 "%s: AVT mode detected\n",
379 if (((inqp->avte_cvp >> 7) == 0x1) || 396 RDAC_NAME);
380 ((inqp->avte_cvp & 0x1) != 0)) 397 h->lun_state = RDAC_LUN_AVT;
381 err = RDAC_OWNED; 398 } else if ((inqp->avte_cvp & 0x1) != 0) {
382 } else 399 /* LUN was owned by the controller */
383 err = RDAC_FAILED; 400 h->lun_state = RDAC_LUN_OWNED;
401 }
402 }
403
384 return err; 404 return err;
385} 405}
386 406
387static int initialize_controller(struct scsi_device *sdev) 407static int initialize_controller(struct scsi_device *sdev,
408 struct rdac_dh_data *h)
388{ 409{
389 int err; 410 int err;
390 struct c4_inquiry *inqp; 411 struct c4_inquiry *inqp;
391 struct rdac_dh_data *h = get_rdac_data(sdev);
392 412
393 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry)); 413 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
394 if (err == SCSI_DH_OK) { 414 if (err == SCSI_DH_OK) {
395 inqp = &h->inq.c4; 415 inqp = &h->inq.c4;
396 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id); 416 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
@@ -400,13 +420,12 @@ static int initialize_controller(struct scsi_device *sdev)
400 return err; 420 return err;
401} 421}
402 422
403static int set_mode_select(struct scsi_device *sdev) 423static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
404{ 424{
405 int err; 425 int err;
406 struct c2_inquiry *inqp; 426 struct c2_inquiry *inqp;
407 struct rdac_dh_data *h = get_rdac_data(sdev);
408 427
409 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry)); 428 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
410 if (err == SCSI_DH_OK) { 429 if (err == SCSI_DH_OK) {
411 inqp = &h->inq.c2; 430 inqp = &h->inq.c2;
412 /* 431 /*
@@ -421,13 +440,13 @@ static int set_mode_select(struct scsi_device *sdev)
421 return err; 440 return err;
422} 441}
423 442
424static int mode_select_handle_sense(struct scsi_device *sdev) 443static int mode_select_handle_sense(struct scsi_device *sdev,
444 unsigned char *sensebuf)
425{ 445{
426 struct scsi_sense_hdr sense_hdr; 446 struct scsi_sense_hdr sense_hdr;
427 struct rdac_dh_data *h = get_rdac_data(sdev);
428 int sense, err = SCSI_DH_IO, ret; 447 int sense, err = SCSI_DH_IO, ret;
429 448
430 ret = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); 449 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
431 if (!ret) 450 if (!ret)
432 goto done; 451 goto done;
433 452
@@ -451,14 +470,13 @@ done:
451 return err; 470 return err;
452} 471}
453 472
454static int send_mode_select(struct scsi_device *sdev) 473static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
455{ 474{
456 struct request *rq; 475 struct request *rq;
457 struct request_queue *q = sdev->request_queue; 476 struct request_queue *q = sdev->request_queue;
458 struct rdac_dh_data *h = get_rdac_data(sdev);
459 int err = SCSI_DH_RES_TEMP_UNAVAIL; 477 int err = SCSI_DH_RES_TEMP_UNAVAIL;
460 478
461 rq = rdac_failover_get(sdev); 479 rq = rdac_failover_get(sdev, h);
462 if (!rq) 480 if (!rq)
463 goto done; 481 goto done;
464 482
@@ -466,9 +484,11 @@ static int send_mode_select(struct scsi_device *sdev)
466 484
467 err = blk_execute_rq(q, NULL, rq, 1); 485 err = blk_execute_rq(q, NULL, rq, 1);
468 if (err != SCSI_DH_OK) 486 if (err != SCSI_DH_OK)
469 err = mode_select_handle_sense(sdev); 487 err = mode_select_handle_sense(sdev, h->sense);
470 if (err == SCSI_DH_OK) 488 if (err == SCSI_DH_OK)
471 h->state = RDAC_STATE_ACTIVE; 489 h->state = RDAC_STATE_ACTIVE;
490
491 blk_put_request(rq);
472done: 492done:
473 return err; 493 return err;
474} 494}
@@ -478,38 +498,23 @@ static int rdac_activate(struct scsi_device *sdev)
478 struct rdac_dh_data *h = get_rdac_data(sdev); 498 struct rdac_dh_data *h = get_rdac_data(sdev);
479 int err = SCSI_DH_OK; 499 int err = SCSI_DH_OK;
480 500
481 if (h->lun == UNINITIALIZED_LUN) { 501 err = check_ownership(sdev, h);
482 err = get_lun(sdev); 502 if (err != SCSI_DH_OK)
483 if (err != SCSI_DH_OK)
484 goto done;
485 }
486
487 err = check_ownership(sdev);
488 switch (err) {
489 case RDAC_UNOWNED:
490 break;
491 case RDAC_OWNED:
492 err = SCSI_DH_OK;
493 goto done;
494 case RDAC_FAILED:
495 default:
496 err = SCSI_DH_IO;
497 goto done; 503 goto done;
498 }
499 504
500 if (!h->ctlr) { 505 if (!h->ctlr) {
501 err = initialize_controller(sdev); 506 err = initialize_controller(sdev, h);
502 if (err != SCSI_DH_OK) 507 if (err != SCSI_DH_OK)
503 goto done; 508 goto done;
504 } 509 }
505 510
506 if (h->ctlr->use_ms10 == -1) { 511 if (h->ctlr->use_ms10 == -1) {
507 err = set_mode_select(sdev); 512 err = set_mode_select(sdev, h);
508 if (err != SCSI_DH_OK) 513 if (err != SCSI_DH_OK)
509 goto done; 514 goto done;
510 } 515 }
511 516 if (h->lun_state == RDAC_LUN_UNOWNED)
512 err = send_mode_select(sdev); 517 err = send_mode_select(sdev, h);
513done: 518done:
514 return err; 519 return err;
515} 520}
@@ -569,10 +574,7 @@ static int rdac_check_sense(struct scsi_device *sdev,
569 return SCSI_RETURN_NOT_HANDLED; 574 return SCSI_RETURN_NOT_HANDLED;
570} 575}
571 576
572static const struct { 577const struct scsi_dh_devlist rdac_dev_list[] = {
573 char *vendor;
574 char *model;
575} rdac_dev_list[] = {
576 {"IBM", "1722"}, 578 {"IBM", "1722"},
577 {"IBM", "1724"}, 579 {"IBM", "1724"},
578 {"IBM", "1726"}, 580 {"IBM", "1726"},
@@ -590,89 +592,89 @@ static const struct {
590 {NULL, NULL}, 592 {NULL, NULL},
591}; 593};
592 594
593static int rdac_bus_notify(struct notifier_block *, unsigned long, void *); 595static int rdac_bus_attach(struct scsi_device *sdev);
596static void rdac_bus_detach(struct scsi_device *sdev);
594 597
595static struct scsi_device_handler rdac_dh = { 598static struct scsi_device_handler rdac_dh = {
596 .name = RDAC_NAME, 599 .name = RDAC_NAME,
597 .module = THIS_MODULE, 600 .module = THIS_MODULE,
598 .nb.notifier_call = rdac_bus_notify, 601 .devlist = rdac_dev_list,
599 .prep_fn = rdac_prep_fn, 602 .prep_fn = rdac_prep_fn,
600 .check_sense = rdac_check_sense, 603 .check_sense = rdac_check_sense,
604 .attach = rdac_bus_attach,
605 .detach = rdac_bus_detach,
601 .activate = rdac_activate, 606 .activate = rdac_activate,
602}; 607};
603 608
604/* 609static int rdac_bus_attach(struct scsi_device *sdev)
605 * TODO: need some interface so we can set trespass values
606 */
607static int rdac_bus_notify(struct notifier_block *nb,
608 unsigned long action, void *data)
609{ 610{
610 struct device *dev = data;
611 struct scsi_device *sdev;
612 struct scsi_dh_data *scsi_dh_data; 611 struct scsi_dh_data *scsi_dh_data;
613 struct rdac_dh_data *h; 612 struct rdac_dh_data *h;
614 int i, found = 0;
615 unsigned long flags; 613 unsigned long flags;
614 int err;
616 615
617 if (!scsi_is_sdev_device(dev)) 616 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
617 + sizeof(*h) , GFP_KERNEL);
618 if (!scsi_dh_data) {
619 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
620 RDAC_NAME);
618 return 0; 621 return 0;
622 }
619 623
620 sdev = to_scsi_device(dev); 624 scsi_dh_data->scsi_dh = &rdac_dh;
621 625 h = (struct rdac_dh_data *) scsi_dh_data->buf;
622 if (action == BUS_NOTIFY_ADD_DEVICE) { 626 h->lun = UNINITIALIZED_LUN;
623 for (i = 0; rdac_dev_list[i].vendor; i++) { 627 h->state = RDAC_STATE_ACTIVE;
624 if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
625 strlen(rdac_dev_list[i].vendor)) &&
626 !strncmp(sdev->model, rdac_dev_list[i].model,
627 strlen(rdac_dev_list[i].model))) {
628 found = 1;
629 break;
630 }
631 }
632 if (!found)
633 goto out;
634 628
635 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) 629 err = get_lun(sdev, h);
636 + sizeof(*h) , GFP_KERNEL); 630 if (err != SCSI_DH_OK)
637 if (!scsi_dh_data) { 631 goto failed;
638 sdev_printk(KERN_ERR, sdev, "Attach failed %s.\n",
639 RDAC_NAME);
640 goto out;
641 }
642 632
643 scsi_dh_data->scsi_dh = &rdac_dh; 633 err = check_ownership(sdev, h);
644 h = (struct rdac_dh_data *) scsi_dh_data->buf; 634 if (err != SCSI_DH_OK)
645 h->lun = UNINITIALIZED_LUN; 635 goto failed;
646 h->state = RDAC_STATE_ACTIVE; 636
647 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 637 if (!try_module_get(THIS_MODULE))
648 sdev->scsi_dh_data = scsi_dh_data; 638 goto failed;
649 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); 639
650 try_module_get(THIS_MODULE); 640 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
651 641 sdev->scsi_dh_data = scsi_dh_data;
652 sdev_printk(KERN_NOTICE, sdev, "Attached %s.\n", RDAC_NAME); 642 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
653 643
654 } else if (action == BUS_NOTIFY_DEL_DEVICE) { 644 sdev_printk(KERN_NOTICE, sdev,
655 if (sdev->scsi_dh_data == NULL || 645 "%s: LUN %d (%s)\n",
656 sdev->scsi_dh_data->scsi_dh != &rdac_dh) 646 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
657 goto out;
658
659 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
660 scsi_dh_data = sdev->scsi_dh_data;
661 sdev->scsi_dh_data = NULL;
662 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
663
664 h = (struct rdac_dh_data *) scsi_dh_data->buf;
665 if (h->ctlr)
666 kref_put(&h->ctlr->kref, release_controller);
667 kfree(scsi_dh_data);
668 module_put(THIS_MODULE);
669 sdev_printk(KERN_NOTICE, sdev, "Dettached %s.\n", RDAC_NAME);
670 }
671 647
672out:
673 return 0; 648 return 0;
649
650failed:
651 kfree(scsi_dh_data);
652 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
653 RDAC_NAME);
654 return -EINVAL;
655}
656
657static void rdac_bus_detach( struct scsi_device *sdev )
658{
659 struct scsi_dh_data *scsi_dh_data;
660 struct rdac_dh_data *h;
661 unsigned long flags;
662
663 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
664 scsi_dh_data = sdev->scsi_dh_data;
665 sdev->scsi_dh_data = NULL;
666 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
667
668 h = (struct rdac_dh_data *) scsi_dh_data->buf;
669 if (h->ctlr)
670 kref_put(&h->ctlr->kref, release_controller);
671 kfree(scsi_dh_data);
672 module_put(THIS_MODULE);
673 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
674} 674}
675 675
676
677
676static int __init rdac_init(void) 678static int __init rdac_init(void)
677{ 679{
678 int r; 680 int r;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 61f8fdea2d96..ae560bc04f9d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -521,9 +521,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost) 521static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
522{ 522{
523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) { 523 if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
524 scsi_block_requests(vhost->host); 524 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
525 ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING); 525 scsi_block_requests(vhost->host);
526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); 526 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
527 }
527 } else 528 } else
528 vhost->reinit = 1; 529 vhost->reinit = 1;
529 530
@@ -854,39 +855,41 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
854} 855}
855 856
856/** 857/**
857 * __ibmvfc_find_target - Find the specified scsi_target (no locking) 858 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
858 * @starget: scsi target struct 859 * @starget: scsi target struct
859 * 860 *
860 * Return value: 861 * Return value:
861 * ibmvfc_target struct / NULL if not found 862 * ibmvfc_target struct / NULL if not found
862 **/ 863 **/
863static struct ibmvfc_target *__ibmvfc_find_target(struct scsi_target *starget) 864static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
864{ 865{
865 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 866 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
866 struct ibmvfc_host *vhost = shost_priv(shost); 867 struct ibmvfc_host *vhost = shost_priv(shost);
867 struct ibmvfc_target *tgt; 868 struct ibmvfc_target *tgt;
868 869
869 list_for_each_entry(tgt, &vhost->targets, queue) 870 list_for_each_entry(tgt, &vhost->targets, queue)
870 if (tgt->target_id == starget->id) 871 if (tgt->target_id == starget->id) {
872 kref_get(&tgt->kref);
871 return tgt; 873 return tgt;
874 }
872 return NULL; 875 return NULL;
873} 876}
874 877
875/** 878/**
876 * ibmvfc_find_target - Find the specified scsi_target 879 * ibmvfc_get_target - Find the specified scsi_target
877 * @starget: scsi target struct 880 * @starget: scsi target struct
878 * 881 *
879 * Return value: 882 * Return value:
880 * ibmvfc_target struct / NULL if not found 883 * ibmvfc_target struct / NULL if not found
881 **/ 884 **/
882static struct ibmvfc_target *ibmvfc_find_target(struct scsi_target *starget) 885static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
883{ 886{
884 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 887 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
885 struct ibmvfc_target *tgt; 888 struct ibmvfc_target *tgt;
886 unsigned long flags; 889 unsigned long flags;
887 890
888 spin_lock_irqsave(shost->host_lock, flags); 891 spin_lock_irqsave(shost->host_lock, flags);
889 tgt = __ibmvfc_find_target(starget); 892 tgt = __ibmvfc_get_target(starget);
890 spin_unlock_irqrestore(shost->host_lock, flags); 893 spin_unlock_irqrestore(shost->host_lock, flags);
891 return tgt; 894 return tgt;
892} 895}
@@ -963,6 +966,9 @@ static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
963 case IBMVFC_HALTED: 966 case IBMVFC_HALTED:
964 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED; 967 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
965 break; 968 break;
969 case IBMVFC_NO_CRQ:
970 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
971 break;
966 default: 972 default:
967 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state); 973 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
968 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN; 974 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
@@ -988,6 +994,17 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
988} 994}
989 995
990/** 996/**
997 * ibmvfc_release_tgt - Free memory allocated for a target
998 * @kref: kref struct
999 *
1000 **/
1001static void ibmvfc_release_tgt(struct kref *kref)
1002{
1003 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1004 kfree(tgt);
1005}
1006
1007/**
991 * ibmvfc_get_starget_node_name - Get SCSI target's node name 1008 * ibmvfc_get_starget_node_name - Get SCSI target's node name
992 * @starget: scsi target struct 1009 * @starget: scsi target struct
993 * 1010 *
@@ -996,8 +1013,10 @@ static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
996 **/ 1013 **/
997static void ibmvfc_get_starget_node_name(struct scsi_target *starget) 1014static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
998{ 1015{
999 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1016 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1000 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0; 1017 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1018 if (tgt)
1019 kref_put(&tgt->kref, ibmvfc_release_tgt);
1001} 1020}
1002 1021
1003/** 1022/**
@@ -1009,8 +1028,10 @@ static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1009 **/ 1028 **/
1010static void ibmvfc_get_starget_port_name(struct scsi_target *starget) 1029static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1011{ 1030{
1012 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1031 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1013 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0; 1032 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1033 if (tgt)
1034 kref_put(&tgt->kref, ibmvfc_release_tgt);
1014} 1035}
1015 1036
1016/** 1037/**
@@ -1022,8 +1043,10 @@ static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1022 **/ 1043 **/
1023static void ibmvfc_get_starget_port_id(struct scsi_target *starget) 1044static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1024{ 1045{
1025 struct ibmvfc_target *tgt = ibmvfc_find_target(starget); 1046 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1026 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1; 1047 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1048 if (tgt)
1049 kref_put(&tgt->kref, ibmvfc_release_tgt);
1027} 1050}
1028 1051
1029/** 1052/**
@@ -1113,7 +1136,7 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1113 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; 1136 login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1114 login_info->capabilities = IBMVFC_CAN_MIGRATE; 1137 login_info->capabilities = IBMVFC_CAN_MIGRATE;
1115 login_info->async.va = vhost->async_crq.msg_token; 1138 login_info->async.va = vhost->async_crq.msg_token;
1116 login_info->async.len = vhost->async_crq.size; 1139 login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
1117 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME); 1140 strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1118 strncpy(login_info->device_name, 1141 strncpy(login_info->device_name,
1119 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME); 1142 vhost->host->shost_gendev.bus_id, IBMVFC_MAX_NAME);
@@ -1404,7 +1427,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1404 err = cmd_status[index].name; 1427 err = cmd_status[index].name;
1405 } 1428 }
1406 1429
1407 if (!logerr && (vhost->log_level <= IBMVFC_DEFAULT_LOG_LEVEL)) 1430 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1408 return; 1431 return;
1409 1432
1410 if (rsp->flags & FCP_RSP_LEN_VALID) 1433 if (rsp->flags & FCP_RSP_LEN_VALID)
@@ -2054,7 +2077,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2054{ 2077{
2055 const char *desc = ibmvfc_get_ae_desc(crq->event); 2078 const char *desc = ibmvfc_get_ae_desc(crq->event);
2056 2079
2057 ibmvfc_log(vhost, 2, "%s event received\n", desc); 2080 ibmvfc_log(vhost, 3, "%s event received\n", desc);
2058 2081
2059 switch (crq->event) { 2082 switch (crq->event) {
2060 case IBMVFC_AE_LINK_UP: 2083 case IBMVFC_AE_LINK_UP:
@@ -2648,17 +2671,6 @@ static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
2648} 2671}
2649 2672
2650/** 2673/**
2651 * ibmvfc_release_tgt - Free memory allocated for a target
2652 * @kref: kref struct
2653 *
2654 **/
2655static void ibmvfc_release_tgt(struct kref *kref)
2656{
2657 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
2658 kfree(tgt);
2659}
2660
2661/**
2662 * ibmvfc_tgt_prli_done - Completion handler for Process Login 2674 * ibmvfc_tgt_prli_done - Completion handler for Process Login
2663 * @evt: ibmvfc event struct 2675 * @evt: ibmvfc event struct
2664 * 2676 *
@@ -2902,6 +2914,139 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
2902} 2914}
2903 2915
2904/** 2916/**
2917 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
2918 * @mad: ibmvfc passthru mad struct
2919 * @tgt: ibmvfc target struct
2920 *
2921 * Returns:
2922 * 1 if PLOGI needed / 0 if PLOGI not needed
2923 **/
2924static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
2925 struct ibmvfc_target *tgt)
2926{
2927 if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
2928 sizeof(tgt->ids.port_name)))
2929 return 1;
2930 if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
2931 sizeof(tgt->ids.node_name)))
2932 return 1;
2933 if (mad->fc_iu.response[6] != tgt->scsi_id)
2934 return 1;
2935 return 0;
2936}
2937
2938/**
2939 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
2940 * @evt: ibmvfc event struct
2941 *
2942 **/
2943static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
2944{
2945 struct ibmvfc_target *tgt = evt->tgt;
2946 struct ibmvfc_host *vhost = evt->vhost;
2947 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
2948 u32 status = mad->common.status;
2949 u8 fc_reason, fc_explain;
2950
2951 vhost->discovery_threads--;
2952 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
2953
2954 switch (status) {
2955 case IBMVFC_MAD_SUCCESS:
2956 tgt_dbg(tgt, "ADISC succeeded\n");
2957 if (ibmvfc_adisc_needs_plogi(mad, tgt))
2958 tgt->need_login = 1;
2959 break;
2960 case IBMVFC_MAD_DRIVER_FAILED:
2961 break;
2962 case IBMVFC_MAD_FAILED:
2963 default:
2964 tgt->need_login = 1;
2965 fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
2966 fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
2967 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
2968 ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
2969 mad->iu.status, mad->iu.error,
2970 ibmvfc_get_fc_type(fc_reason), fc_reason,
2971 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
2972 break;
2973 };
2974
2975 kref_put(&tgt->kref, ibmvfc_release_tgt);
2976 ibmvfc_free_event(evt);
2977 wake_up(&vhost->work_wait_q);
2978}
2979
2980/**
2981 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
2982 * @evt: ibmvfc event struct
2983 *
2984 **/
2985static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
2986{
2987 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
2988
2989 memset(mad, 0, sizeof(*mad));
2990 mad->common.version = 1;
2991 mad->common.opcode = IBMVFC_PASSTHRU;
2992 mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
2993 mad->cmd_ioba.va = (u64)evt->crq.ioba +
2994 offsetof(struct ibmvfc_passthru_mad, iu);
2995 mad->cmd_ioba.len = sizeof(mad->iu);
2996 mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
2997 mad->iu.rsp_len = sizeof(mad->fc_iu.response);
2998 mad->iu.cmd.va = (u64)evt->crq.ioba +
2999 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3000 offsetof(struct ibmvfc_passthru_fc_iu, payload);
3001 mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
3002 mad->iu.rsp.va = (u64)evt->crq.ioba +
3003 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3004 offsetof(struct ibmvfc_passthru_fc_iu, response);
3005 mad->iu.rsp.len = sizeof(mad->fc_iu.response);
3006}
3007
3008/**
3009 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
3010 * @tgt: ibmvfc target struct
3011 *
3012 **/
3013static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
3014{
3015 struct ibmvfc_passthru_mad *mad;
3016 struct ibmvfc_host *vhost = tgt->vhost;
3017 struct ibmvfc_event *evt;
3018
3019 if (vhost->discovery_threads >= disc_threads)
3020 return;
3021
3022 kref_get(&tgt->kref);
3023 evt = ibmvfc_get_event(vhost);
3024 vhost->discovery_threads++;
3025 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
3026 evt->tgt = tgt;
3027
3028 ibmvfc_init_passthru(evt);
3029 mad = &evt->iu.passthru;
3030 mad->iu.flags = IBMVFC_FC_ELS;
3031 mad->iu.scsi_id = tgt->scsi_id;
3032
3033 mad->fc_iu.payload[0] = IBMVFC_ADISC;
3034 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
3035 sizeof(vhost->login_buf->resp.port_name));
3036 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
3037 sizeof(vhost->login_buf->resp.node_name));
3038 mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
3039
3040 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3041 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3042 vhost->discovery_threads--;
3043 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3044 kref_put(&tgt->kref, ibmvfc_release_tgt);
3045 } else
3046 tgt_dbg(tgt, "Sent ADISC\n");
3047}
3048
3049/**
2905 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD 3050 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
2906 * @evt: ibmvfc event struct 3051 * @evt: ibmvfc event struct
2907 * 3052 *
@@ -2921,6 +3066,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
2921 tgt->new_scsi_id = rsp->scsi_id; 3066 tgt->new_scsi_id = rsp->scsi_id;
2922 if (rsp->scsi_id != tgt->scsi_id) 3067 if (rsp->scsi_id != tgt->scsi_id)
2923 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); 3068 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3069 else
3070 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
2924 break; 3071 break;
2925 case IBMVFC_MAD_DRIVER_FAILED: 3072 case IBMVFC_MAD_DRIVER_FAILED:
2926 break; 3073 break;
@@ -3336,6 +3483,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
3336 tgt_dbg(tgt, "rport add succeeded\n"); 3483 tgt_dbg(tgt, "rport add succeeded\n");
3337 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; 3484 rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
3338 rport->supported_classes = 0; 3485 rport->supported_classes = 0;
3486 tgt->target_id = rport->scsi_target_id;
3339 if (tgt->service_parms.class1_parms[0] & 0x80000000) 3487 if (tgt->service_parms.class1_parms[0] & 0x80000000)
3340 rport->supported_classes |= FC_COS_CLASS1; 3488 rport->supported_classes |= FC_COS_CLASS1;
3341 if (tgt->service_parms.class2_parms[0] & 0x80000000) 3489 if (tgt->service_parms.class2_parms[0] & 0x80000000)
@@ -3800,10 +3948,12 @@ static int ibmvfc_remove(struct vio_dev *vdev)
3800 3948
3801 ENTER; 3949 ENTER;
3802 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr); 3950 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
3951 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
3952 ibmvfc_wait_while_resetting(vhost);
3953 ibmvfc_release_crq_queue(vhost);
3803 kthread_stop(vhost->work_thread); 3954 kthread_stop(vhost->work_thread);
3804 fc_remove_host(vhost->host); 3955 fc_remove_host(vhost->host);
3805 scsi_remove_host(vhost->host); 3956 scsi_remove_host(vhost->host);
3806 ibmvfc_release_crq_queue(vhost);
3807 3957
3808 spin_lock_irqsave(vhost->host->host_lock, flags); 3958 spin_lock_irqsave(vhost->host->host_lock, flags);
3809 ibmvfc_purge_requests(vhost, DID_ERROR); 3959 ibmvfc_purge_requests(vhost, DID_ERROR);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 057f3c01ed61..4bf6e374f076 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -29,8 +29,8 @@
29#include "viosrp.h" 29#include "viosrp.h"
30 30
31#define IBMVFC_NAME "ibmvfc" 31#define IBMVFC_NAME "ibmvfc"
32#define IBMVFC_DRIVER_VERSION "1.0.0" 32#define IBMVFC_DRIVER_VERSION "1.0.1"
33#define IBMVFC_DRIVER_DATE "(July 1, 2008)" 33#define IBMVFC_DRIVER_DATE "(July 11, 2008)"
34 34
35#define IBMVFC_DEFAULT_TIMEOUT 15 35#define IBMVFC_DEFAULT_TIMEOUT 15
36#define IBMVFC_INIT_TIMEOUT 30 36#define IBMVFC_INIT_TIMEOUT 30
@@ -119,6 +119,7 @@ enum ibmvfc_mad_types {
119 IBMVFC_PROCESS_LOGIN = 0x0008, 119 IBMVFC_PROCESS_LOGIN = 0x0008,
120 IBMVFC_QUERY_TARGET = 0x0010, 120 IBMVFC_QUERY_TARGET = 0x0010,
121 IBMVFC_IMPLICIT_LOGOUT = 0x0040, 121 IBMVFC_IMPLICIT_LOGOUT = 0x0040,
122 IBMVFC_PASSTHRU = 0x0200,
122 IBMVFC_TMF_MAD = 0x0100, 123 IBMVFC_TMF_MAD = 0x0100,
123}; 124};
124 125
@@ -439,6 +440,37 @@ struct ibmvfc_cmd {
439 struct ibmvfc_fcp_rsp rsp; 440 struct ibmvfc_fcp_rsp rsp;
440}__attribute__((packed, aligned (8))); 441}__attribute__((packed, aligned (8)));
441 442
443struct ibmvfc_passthru_fc_iu {
444 u32 payload[7];
445#define IBMVFC_ADISC 0x52000000
446 u32 response[7];
447};
448
449struct ibmvfc_passthru_iu {
450 u64 task_tag;
451 u32 cmd_len;
452 u32 rsp_len;
453 u16 status;
454 u16 error;
455 u32 flags;
456#define IBMVFC_FC_ELS 0x01
457 u32 cancel_key;
458 u32 reserved;
459 struct srp_direct_buf cmd;
460 struct srp_direct_buf rsp;
461 u64 correlation;
462 u64 scsi_id;
463 u64 tag;
464 u64 reserved2[2];
465}__attribute__((packed, aligned (8)));
466
467struct ibmvfc_passthru_mad {
468 struct ibmvfc_mad_common common;
469 struct srp_direct_buf cmd_ioba;
470 struct ibmvfc_passthru_iu iu;
471 struct ibmvfc_passthru_fc_iu fc_iu;
472}__attribute__((packed, aligned (8)));
473
442struct ibmvfc_trace_start_entry { 474struct ibmvfc_trace_start_entry {
443 u32 xfer_len; 475 u32 xfer_len;
444}__attribute__((packed)); 476}__attribute__((packed));
@@ -531,6 +563,7 @@ union ibmvfc_iu {
531 struct ibmvfc_implicit_logout implicit_logout; 563 struct ibmvfc_implicit_logout implicit_logout;
532 struct ibmvfc_tmf tmf; 564 struct ibmvfc_tmf tmf;
533 struct ibmvfc_cmd cmd; 565 struct ibmvfc_cmd cmd;
566 struct ibmvfc_passthru_mad passthru;
534}__attribute__((packed, aligned (8))); 567}__attribute__((packed, aligned (8)));
535 568
536enum ibmvfc_target_action { 569enum ibmvfc_target_action {
@@ -656,6 +689,9 @@ struct ibmvfc_host {
656#define tgt_dbg(t, fmt, ...) \ 689#define tgt_dbg(t, fmt, ...) \
657 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)) 690 DBG_CMD(dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__))
658 691
692#define tgt_info(t, fmt, ...) \
693 dev_info((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
694
659#define tgt_err(t, fmt, ...) \ 695#define tgt_err(t, fmt, ...) \
660 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__) 696 dev_err((t)->vhost->dev, "%lX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
661 697
@@ -668,8 +704,8 @@ struct ibmvfc_host {
668 dev_err((vhost)->dev, ##__VA_ARGS__); \ 704 dev_err((vhost)->dev, ##__VA_ARGS__); \
669 } while (0) 705 } while (0)
670 706
671#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __FUNCTION__)) 707#define ENTER DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Entering %s\n", __func__))
672#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __FUNCTION__)) 708#define LEAVE DBG_CMD(printk(KERN_INFO IBMVFC_NAME": Leaving %s\n", __func__))
673 709
674#ifdef CONFIG_SCSI_IBMVFC_TRACE 710#ifdef CONFIG_SCSI_IBMVFC_TRACE
675#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) 711#define ibmvfc_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index 2e13ec00172a..2a5b29d12172 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -55,7 +55,7 @@
55/* tmp - will replace with SCSI logging stuff */ 55/* tmp - will replace with SCSI logging stuff */
56#define eprintk(fmt, args...) \ 56#define eprintk(fmt, args...) \
57do { \ 57do { \
58 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 58 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
59} while (0) 59} while (0)
60/* #define dprintk eprintk */ 60/* #define dprintk eprintk */
61#define dprintk(fmt, args...) 61#define dprintk(fmt, args...)
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index f97d172844be..c2a9a13d788f 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -163,7 +163,7 @@ static int imm_proc_info(struct Scsi_Host *host, char *buffer, char **start,
163 163
164#if IMM_DEBUG > 0 164#if IMM_DEBUG > 0
165#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\ 165#define imm_fail(x,y) printk("imm: imm_fail(%i) from %s at line %d\n",\
166 y, __FUNCTION__, __LINE__); imm_fail_func(x,y); 166 y, __func__, __LINE__); imm_fail_func(x,y);
167static inline void 167static inline void
168imm_fail_func(imm_struct *dev, int error_code) 168imm_fail_func(imm_struct *dev, int error_code)
169#else 169#else
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index d93156671e93..4871dd1f2582 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1403,10 +1403,10 @@ struct ipr_ucode_image_header {
1403} 1403}
1404 1404
1405#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ 1405#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
1406 __FILE__, __FUNCTION__, __LINE__) 1406 __FILE__, __func__, __LINE__)
1407 1407
1408#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__)) 1408#define ENTER IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Entering %s\n", __func__))
1409#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__)) 1409#define LEAVE IPR_DBG_CMD(printk(KERN_INFO IPR_NAME": Leaving %s\n", __func__))
1410 1410
1411#define ipr_err_separator \ 1411#define ipr_err_separator \
1412ipr_err("----------------------------------------------------------\n") 1412ipr_err("----------------------------------------------------------\n")
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 744f06d04a36..48ee8c7f5bdd 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -74,7 +74,7 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
74 case SAS_OPEN_TO: 74 case SAS_OPEN_TO:
75 case SAS_OPEN_REJECT: 75 case SAS_OPEN_REJECT:
76 SAS_DPRINTK("%s: Saw error %d. What to do?\n", 76 SAS_DPRINTK("%s: Saw error %d. What to do?\n",
77 __FUNCTION__, ts->stat); 77 __func__, ts->stat);
78 return AC_ERR_OTHER; 78 return AC_ERR_OTHER;
79 79
80 case SAS_ABORTED_TASK: 80 case SAS_ABORTED_TASK:
@@ -115,7 +115,7 @@ static void sas_ata_task_done(struct sas_task *task)
115 } else if (stat->stat != SAM_STAT_GOOD) { 115 } else if (stat->stat != SAM_STAT_GOOD) {
116 ac = sas_to_ata_err(stat); 116 ac = sas_to_ata_err(stat);
117 if (ac) { 117 if (ac) {
118 SAS_DPRINTK("%s: SAS error %x\n", __FUNCTION__, 118 SAS_DPRINTK("%s: SAS error %x\n", __func__,
119 stat->stat); 119 stat->stat);
120 /* We saw a SAS error. Send a vague error. */ 120 /* We saw a SAS error. Send a vague error. */
121 qc->err_mask = ac; 121 qc->err_mask = ac;
@@ -244,20 +244,20 @@ static void sas_ata_phy_reset(struct ata_port *ap)
244 res = i->dft->lldd_I_T_nexus_reset(dev); 244 res = i->dft->lldd_I_T_nexus_reset(dev);
245 245
246 if (res != TMF_RESP_FUNC_COMPLETE) 246 if (res != TMF_RESP_FUNC_COMPLETE)
247 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); 247 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
248 248
249 switch (dev->sata_dev.command_set) { 249 switch (dev->sata_dev.command_set) {
250 case ATA_COMMAND_SET: 250 case ATA_COMMAND_SET:
251 SAS_DPRINTK("%s: Found ATA device.\n", __FUNCTION__); 251 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
252 ap->link.device[0].class = ATA_DEV_ATA; 252 ap->link.device[0].class = ATA_DEV_ATA;
253 break; 253 break;
254 case ATAPI_COMMAND_SET: 254 case ATAPI_COMMAND_SET:
255 SAS_DPRINTK("%s: Found ATAPI device.\n", __FUNCTION__); 255 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
256 ap->link.device[0].class = ATA_DEV_ATAPI; 256 ap->link.device[0].class = ATA_DEV_ATAPI;
257 break; 257 break;
258 default: 258 default:
259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 259 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
260 __FUNCTION__, 260 __func__,
261 dev->sata_dev.command_set); 261 dev->sata_dev.command_set);
262 ap->link.device[0].class = ATA_DEV_UNKNOWN; 262 ap->link.device[0].class = ATA_DEV_UNKNOWN;
263 break; 263 break;
@@ -299,7 +299,7 @@ static int sas_ata_scr_write(struct ata_port *ap, unsigned int sc_reg_in,
299{ 299{
300 struct domain_device *dev = ap->private_data; 300 struct domain_device *dev = ap->private_data;
301 301
302 SAS_DPRINTK("STUB %s\n", __FUNCTION__); 302 SAS_DPRINTK("STUB %s\n", __func__);
303 switch (sc_reg_in) { 303 switch (sc_reg_in) {
304 case SCR_STATUS: 304 case SCR_STATUS:
305 dev->sata_dev.sstatus = val; 305 dev->sata_dev.sstatus = val;
@@ -324,7 +324,7 @@ static int sas_ata_scr_read(struct ata_port *ap, unsigned int sc_reg_in,
324{ 324{
325 struct domain_device *dev = ap->private_data; 325 struct domain_device *dev = ap->private_data;
326 326
327 SAS_DPRINTK("STUB %s\n", __FUNCTION__); 327 SAS_DPRINTK("STUB %s\n", __func__);
328 switch (sc_reg_in) { 328 switch (sc_reg_in) {
329 case SCR_STATUS: 329 case SCR_STATUS:
330 *val = dev->sata_dev.sstatus; 330 *val = dev->sata_dev.sstatus;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index aefd865a5788..3da02e436788 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -121,7 +121,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
121 break; 121 break;
122 } else { 122 } else {
123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 123 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
124 "status 0x%x\n", __FUNCTION__, 124 "status 0x%x\n", __func__,
125 SAS_ADDR(dev->sas_addr), 125 SAS_ADDR(dev->sas_addr),
126 task->task_status.resp, 126 task->task_status.resp,
127 task->task_status.stat); 127 task->task_status.stat);
@@ -1279,7 +1279,7 @@ static int sas_configure_present(struct domain_device *dev, int phy_id,
1279 goto out; 1279 goto out;
1280 } else if (res != SMP_RESP_FUNC_ACC) { 1280 } else if (res != SMP_RESP_FUNC_ACC) {
1281 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x " 1281 SAS_DPRINTK("%s: dev %016llx phy 0x%x index 0x%x "
1282 "result 0x%x\n", __FUNCTION__, 1282 "result 0x%x\n", __func__,
1283 SAS_ADDR(dev->sas_addr), phy_id, i, res); 1283 SAS_ADDR(dev->sas_addr), phy_id, i, res);
1284 goto out; 1284 goto out;
1285 } 1285 }
@@ -1901,7 +1901,7 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1901 1901
1902 if (!rsp) { 1902 if (!rsp) {
1903 printk("%s: space for a smp response is missing\n", 1903 printk("%s: space for a smp response is missing\n",
1904 __FUNCTION__); 1904 __func__);
1905 return -EINVAL; 1905 return -EINVAL;
1906 } 1906 }
1907 1907
@@ -1914,20 +1914,20 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1914 if (type != SAS_EDGE_EXPANDER_DEVICE && 1914 if (type != SAS_EDGE_EXPANDER_DEVICE &&
1915 type != SAS_FANOUT_EXPANDER_DEVICE) { 1915 type != SAS_FANOUT_EXPANDER_DEVICE) {
1916 printk("%s: can we send a smp request to a device?\n", 1916 printk("%s: can we send a smp request to a device?\n",
1917 __FUNCTION__); 1917 __func__);
1918 return -EINVAL; 1918 return -EINVAL;
1919 } 1919 }
1920 1920
1921 dev = sas_find_dev_by_rphy(rphy); 1921 dev = sas_find_dev_by_rphy(rphy);
1922 if (!dev) { 1922 if (!dev) {
1923 printk("%s: fail to find a domain_device?\n", __FUNCTION__); 1923 printk("%s: fail to find a domain_device?\n", __func__);
1924 return -EINVAL; 1924 return -EINVAL;
1925 } 1925 }
1926 1926
1927 /* do we need to support multiple segments? */ 1927 /* do we need to support multiple segments? */
1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n",
1930 __FUNCTION__, req->bio->bi_vcnt, req->data_len, 1930 __func__, req->bio->bi_vcnt, req->data_len,
1931 rsp->bio->bi_vcnt, rsp->data_len); 1931 rsp->bio->bi_vcnt, rsp->data_len);
1932 return -EINVAL; 1932 return -EINVAL;
1933 } 1933 }
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 39ae68a3b0ef..139935a121b4 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -50,7 +50,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
50 sas_deform_port(phy); 50 sas_deform_port(phy);
51 else { 51 else {
52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n", 52 SAS_DPRINTK("%s: phy%d belongs to port%d already(%d)!\n",
53 __FUNCTION__, phy->id, phy->port->id, 53 __func__, phy->id, phy->port->id,
54 phy->port->num_phys); 54 phy->port->num_phys);
55 return; 55 return;
56 } 56 }
@@ -78,7 +78,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
78 78
79 if (i >= sas_ha->num_phys) { 79 if (i >= sas_ha->num_phys) {
80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n", 80 printk(KERN_NOTICE "%s: couldn't find a free port, bug?\n",
81 __FUNCTION__); 81 __func__);
82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 82 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
83 return; 83 return;
84 } 84 }
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 601ec5b6a7f6..a8e3ef309070 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -343,7 +343,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
343 flags); 343 flags);
344 SAS_DPRINTK("%s: task 0x%p aborted from " 344 SAS_DPRINTK("%s: task 0x%p aborted from "
345 "task_queue\n", 345 "task_queue\n",
346 __FUNCTION__, task); 346 __func__, task);
347 return TASK_IS_ABORTED; 347 return TASK_IS_ABORTED;
348 } 348 }
349 } 349 }
@@ -351,13 +351,13 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
351 } 351 }
352 352
353 for (i = 0; i < 5; i++) { 353 for (i = 0; i < 5; i++) {
354 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__, task); 354 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
355 res = si->dft->lldd_abort_task(task); 355 res = si->dft->lldd_abort_task(task);
356 356
357 spin_lock_irqsave(&task->task_state_lock, flags); 357 spin_lock_irqsave(&task->task_state_lock, flags);
358 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 358 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
359 spin_unlock_irqrestore(&task->task_state_lock, flags); 359 spin_unlock_irqrestore(&task->task_state_lock, flags);
360 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 360 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
361 task); 361 task);
362 return TASK_IS_DONE; 362 return TASK_IS_DONE;
363 } 363 }
@@ -365,24 +365,24 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
365 365
366 if (res == TMF_RESP_FUNC_COMPLETE) { 366 if (res == TMF_RESP_FUNC_COMPLETE) {
367 SAS_DPRINTK("%s: task 0x%p is aborted\n", 367 SAS_DPRINTK("%s: task 0x%p is aborted\n",
368 __FUNCTION__, task); 368 __func__, task);
369 return TASK_IS_ABORTED; 369 return TASK_IS_ABORTED;
370 } else if (si->dft->lldd_query_task) { 370 } else if (si->dft->lldd_query_task) {
371 SAS_DPRINTK("%s: querying task 0x%p\n", 371 SAS_DPRINTK("%s: querying task 0x%p\n",
372 __FUNCTION__, task); 372 __func__, task);
373 res = si->dft->lldd_query_task(task); 373 res = si->dft->lldd_query_task(task);
374 switch (res) { 374 switch (res) {
375 case TMF_RESP_FUNC_SUCC: 375 case TMF_RESP_FUNC_SUCC:
376 SAS_DPRINTK("%s: task 0x%p at LU\n", 376 SAS_DPRINTK("%s: task 0x%p at LU\n",
377 __FUNCTION__, task); 377 __func__, task);
378 return TASK_IS_AT_LU; 378 return TASK_IS_AT_LU;
379 case TMF_RESP_FUNC_COMPLETE: 379 case TMF_RESP_FUNC_COMPLETE:
380 SAS_DPRINTK("%s: task 0x%p not at LU\n", 380 SAS_DPRINTK("%s: task 0x%p not at LU\n",
381 __FUNCTION__, task); 381 __func__, task);
382 return TASK_IS_NOT_AT_LU; 382 return TASK_IS_NOT_AT_LU;
383 case TMF_RESP_FUNC_FAILED: 383 case TMF_RESP_FUNC_FAILED:
384 SAS_DPRINTK("%s: task 0x%p failed to abort\n", 384 SAS_DPRINTK("%s: task 0x%p failed to abort\n",
385 __FUNCTION__, task); 385 __func__, task);
386 return TASK_ABORT_FAILED; 386 return TASK_ABORT_FAILED;
387 } 387 }
388 388
@@ -545,7 +545,7 @@ Again:
545 545
546 if (need_reset) { 546 if (need_reset) {
547 SAS_DPRINTK("%s: task 0x%p requests reset\n", 547 SAS_DPRINTK("%s: task 0x%p requests reset\n",
548 __FUNCTION__, task); 548 __func__, task);
549 goto reset; 549 goto reset;
550 } 550 }
551 551
@@ -556,13 +556,13 @@ Again:
556 556
557 switch (res) { 557 switch (res) {
558 case TASK_IS_DONE: 558 case TASK_IS_DONE:
559 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, 559 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
560 task); 560 task);
561 sas_eh_finish_cmd(cmd); 561 sas_eh_finish_cmd(cmd);
562 continue; 562 continue;
563 case TASK_IS_ABORTED: 563 case TASK_IS_ABORTED:
564 SAS_DPRINTK("%s: task 0x%p is aborted\n", 564 SAS_DPRINTK("%s: task 0x%p is aborted\n",
565 __FUNCTION__, task); 565 __func__, task);
566 sas_eh_finish_cmd(cmd); 566 sas_eh_finish_cmd(cmd);
567 continue; 567 continue;
568 case TASK_IS_AT_LU: 568 case TASK_IS_AT_LU:
@@ -633,7 +633,7 @@ Again:
633 } 633 }
634 return list_empty(work_q); 634 return list_empty(work_q);
635clear_q: 635clear_q:
636 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__); 636 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
637 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 637 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
638 sas_eh_finish_cmd(cmd); 638 sas_eh_finish_cmd(cmd);
639 639
@@ -650,7 +650,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
650 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 650 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
651 spin_unlock_irqrestore(shost->host_lock, flags); 651 spin_unlock_irqrestore(shost->host_lock, flags);
652 652
653 SAS_DPRINTK("Enter %s\n", __FUNCTION__); 653 SAS_DPRINTK("Enter %s\n", __func__);
654 /* 654 /*
655 * Deal with commands that still have SAS tasks (i.e. they didn't 655 * Deal with commands that still have SAS tasks (i.e. they didn't
656 * complete via the normal sas_task completion mechanism) 656 * complete via the normal sas_task completion mechanism)
@@ -669,7 +669,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
669 669
670out: 670out:
671 scsi_eh_flush_done_q(&ha->eh_done_q); 671 scsi_eh_flush_done_q(&ha->eh_done_q);
672 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__); 672 SAS_DPRINTK("--- Exit %s\n", __func__);
673 return; 673 return;
674} 674}
675 675
@@ -990,7 +990,7 @@ int __sas_task_abort(struct sas_task *task)
990 if (task->task_state_flags & SAS_TASK_STATE_ABORTED || 990 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
991 task->task_state_flags & SAS_TASK_STATE_DONE) { 991 task->task_state_flags & SAS_TASK_STATE_DONE) {
992 spin_unlock_irqrestore(&task->task_state_lock, flags); 992 spin_unlock_irqrestore(&task->task_state_lock, flags);
993 SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__, 993 SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
994 task); 994 task);
995 return 0; 995 return 0;
996 } 996 }
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 6d6a76e65a6c..15e2d132e8b9 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -39,7 +39,7 @@ enum srp_task_attributes {
39/* tmp - will replace with SCSI logging stuff */ 39/* tmp - will replace with SCSI logging stuff */
40#define eprintk(fmt, args...) \ 40#define eprintk(fmt, args...) \
41do { \ 41do { \
42 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 42 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
43} while (0) 43} while (0)
44/* #define dprintk eprintk */ 44/* #define dprintk eprintk */
45#define dprintk(fmt, args...) 45#define dprintk(fmt, args...)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 5b6e5395c8eb..d51a2a4b43eb 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2083,7 +2083,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2083 if (iocbq_entry == NULL) { 2083 if (iocbq_entry == NULL) {
2084 printk(KERN_ERR "%s: only allocated %d iocbs of " 2084 printk(KERN_ERR "%s: only allocated %d iocbs of "
2085 "expected %d count. Unloading driver.\n", 2085 "expected %d count. Unloading driver.\n",
2086 __FUNCTION__, i, LPFC_IOCB_LIST_CNT); 2086 __func__, i, LPFC_IOCB_LIST_CNT);
2087 error = -ENOMEM; 2087 error = -ENOMEM;
2088 goto out_free_iocbq; 2088 goto out_free_iocbq;
2089 } 2089 }
@@ -2093,7 +2093,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2093 kfree (iocbq_entry); 2093 kfree (iocbq_entry);
2094 printk(KERN_ERR "%s: failed to allocate IOTAG. " 2094 printk(KERN_ERR "%s: failed to allocate IOTAG. "
2095 "Unloading driver.\n", 2095 "Unloading driver.\n",
2096 __FUNCTION__); 2096 __func__);
2097 error = -ENOMEM; 2097 error = -ENOMEM;
2098 goto out_free_iocbq; 2098 goto out_free_iocbq;
2099 } 2099 }
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c94da4f2b8a6..1bcebbd3dfac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -341,7 +341,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
341 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { 341 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
342 printk(KERN_ERR "%s: Too many sg segments from " 342 printk(KERN_ERR "%s: Too many sg segments from "
343 "dma_map_sg. Config %d, seg_cnt %d", 343 "dma_map_sg. Config %d, seg_cnt %d",
344 __FUNCTION__, phba->cfg_sg_seg_cnt, 344 __func__, phba->cfg_sg_seg_cnt,
345 lpfc_cmd->seg_cnt); 345 lpfc_cmd->seg_cnt);
346 scsi_dma_unmap(scsi_cmnd); 346 scsi_dma_unmap(scsi_cmnd);
347 return 1; 347 return 1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f40aa7b905f7..50fe07646738 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -219,7 +219,7 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
219 case CMD_IOCB_LOGENTRY_CN: 219 case CMD_IOCB_LOGENTRY_CN:
220 case CMD_IOCB_LOGENTRY_ASYNC_CN: 220 case CMD_IOCB_LOGENTRY_ASYNC_CN:
221 printk("%s - Unhandled SLI-3 Command x%x\n", 221 printk("%s - Unhandled SLI-3 Command x%x\n",
222 __FUNCTION__, iocb_cmnd); 222 __func__, iocb_cmnd);
223 type = LPFC_UNKNOWN_IOCB; 223 type = LPFC_UNKNOWN_IOCB;
224 break; 224 break;
225 default: 225 default:
@@ -1715,7 +1715,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1715 rspiocbp = __lpfc_sli_get_iocbq(phba); 1715 rspiocbp = __lpfc_sli_get_iocbq(phba);
1716 if (rspiocbp == NULL) { 1716 if (rspiocbp == NULL) {
1717 printk(KERN_ERR "%s: out of buffers! Failing " 1717 printk(KERN_ERR "%s: out of buffers! Failing "
1718 "completion.\n", __FUNCTION__); 1718 "completion.\n", __func__);
1719 break; 1719 break;
1720 } 1720 }
1721 1721
@@ -3793,7 +3793,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3793 break; 3793 break;
3794 default: 3794 default:
3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3796 __FUNCTION__, ctx_cmd); 3796 __func__, ctx_cmd);
3797 break; 3797 break;
3798 } 3798 }
3799 3799
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index f62ed468ada0..5ead1283a844 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -265,7 +265,7 @@ typedef struct {
265#define ASSERT(expression) \ 265#define ASSERT(expression) \
266 if (!(expression)) { \ 266 if (!(expression)) { \
267 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \ 267 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
268 #expression, __FILE__, __LINE__, __FUNCTION__); \ 268 #expression, __FILE__, __LINE__, __func__); \
269 } 269 }
270#else 270#else
271#define ASSERT(expression) 271#define ASSERT(expression)
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 70a0f11f48b2..805bb61dde18 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -458,7 +458,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
458 458
459 if (adapter == NULL) { 459 if (adapter == NULL) {
460 con_log(CL_ANN, (KERN_WARNING 460 con_log(CL_ANN, (KERN_WARNING
461 "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__)); 461 "megaraid: out of memory, %s %d.\n", __func__, __LINE__));
462 462
463 goto out_probe_one; 463 goto out_probe_one;
464 } 464 }
@@ -1002,7 +1002,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1002 1002
1003 if (!raid_dev->una_mbox64) { 1003 if (!raid_dev->una_mbox64) {
1004 con_log(CL_ANN, (KERN_WARNING 1004 con_log(CL_ANN, (KERN_WARNING
1005 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1005 "megaraid: out of memory, %s %d\n", __func__,
1006 __LINE__)); 1006 __LINE__));
1007 return -1; 1007 return -1;
1008 } 1008 }
@@ -1030,7 +1030,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1030 if (!adapter->ibuf) { 1030 if (!adapter->ibuf) {
1031 1031
1032 con_log(CL_ANN, (KERN_WARNING 1032 con_log(CL_ANN, (KERN_WARNING
1033 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1033 "megaraid: out of memory, %s %d\n", __func__,
1034 __LINE__)); 1034 __LINE__));
1035 1035
1036 goto out_free_common_mbox; 1036 goto out_free_common_mbox;
@@ -1052,7 +1052,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1052 1052
1053 if (adapter->kscb_list == NULL) { 1053 if (adapter->kscb_list == NULL) {
1054 con_log(CL_ANN, (KERN_WARNING 1054 con_log(CL_ANN, (KERN_WARNING
1055 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1055 "megaraid: out of memory, %s %d\n", __func__,
1056 __LINE__)); 1056 __LINE__));
1057 goto out_free_ibuf; 1057 goto out_free_ibuf;
1058 } 1058 }
@@ -1060,7 +1060,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
1060 // memory allocation for our command packets 1060 // memory allocation for our command packets
1061 if (megaraid_mbox_setup_dma_pools(adapter) != 0) { 1061 if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1062 con_log(CL_ANN, (KERN_WARNING 1062 con_log(CL_ANN, (KERN_WARNING
1063 "megaraid: out of memory, %s %d\n", __FUNCTION__, 1063 "megaraid: out of memory, %s %d\n", __func__,
1064 __LINE__)); 1064 __LINE__));
1065 goto out_free_scb_list; 1065 goto out_free_scb_list;
1066 } 1066 }
@@ -2981,7 +2981,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
2981 2981
2982 if (pinfo == NULL) { 2982 if (pinfo == NULL) {
2983 con_log(CL_ANN, (KERN_WARNING 2983 con_log(CL_ANN, (KERN_WARNING
2984 "megaraid: out of memory, %s %d\n", __FUNCTION__, 2984 "megaraid: out of memory, %s %d\n", __func__,
2985 __LINE__)); 2985 __LINE__));
2986 2986
2987 return -1; 2987 return -1;
@@ -3508,7 +3508,7 @@ megaraid_cmm_register(adapter_t *adapter)
3508 3508
3509 if (adapter->uscb_list == NULL) { 3509 if (adapter->uscb_list == NULL) {
3510 con_log(CL_ANN, (KERN_WARNING 3510 con_log(CL_ANN, (KERN_WARNING
3511 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3511 "megaraid: out of memory, %s %d\n", __func__,
3512 __LINE__)); 3512 __LINE__));
3513 return -1; 3513 return -1;
3514 } 3514 }
@@ -3879,7 +3879,7 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
3879 !raid_dev->sysfs_buffer) { 3879 !raid_dev->sysfs_buffer) {
3880 3880
3881 con_log(CL_ANN, (KERN_WARNING 3881 con_log(CL_ANN, (KERN_WARNING
3882 "megaraid: out of memory, %s %d\n", __FUNCTION__, 3882 "megaraid: out of memory, %s %d\n", __func__,
3883 __LINE__)); 3883 __LINE__));
3884 3884
3885 rval = -ENOMEM; 3885 rval = -ENOMEM;
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index ac3b280c2a72..f680561d2c6f 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -929,7 +929,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
929 !adapter->pthru_dma_pool) { 929 !adapter->pthru_dma_pool) {
930 930
931 con_log(CL_ANN, (KERN_WARNING 931 con_log(CL_ANN, (KERN_WARNING
932 "megaraid cmm: out of memory, %s %d\n", __FUNCTION__, 932 "megaraid cmm: out of memory, %s %d\n", __func__,
933 __LINE__)); 933 __LINE__));
934 934
935 rval = (-ENOMEM); 935 rval = (-ENOMEM);
@@ -957,7 +957,7 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
957 957
958 con_log(CL_ANN, (KERN_WARNING 958 con_log(CL_ANN, (KERN_WARNING
959 "megaraid cmm: out of memory, %s %d\n", 959 "megaraid cmm: out of memory, %s %d\n",
960 __FUNCTION__, __LINE__)); 960 __func__, __LINE__));
961 961
962 rval = (-ENOMEM); 962 rval = (-ENOMEM);
963 963
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 7fed35372150..edf9fdb3cb3c 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -299,9 +299,9 @@ static struct scsi_host_template nsp32_template = {
299#else 299#else
300# define NSP32_DEBUG_MASK 0xffffff 300# define NSP32_DEBUG_MASK 0xffffff
301# define nsp32_msg(type, args...) \ 301# define nsp32_msg(type, args...) \
302 nsp32_message (__FUNCTION__, __LINE__, (type), args) 302 nsp32_message (__func__, __LINE__, (type), args)
303# define nsp32_dbg(mask, args...) \ 303# define nsp32_dbg(mask, args...) \
304 nsp32_dmessage(__FUNCTION__, __LINE__, (mask), args) 304 nsp32_dmessage(__func__, __LINE__, (mask), args)
305#endif 305#endif
306 306
307#define NSP32_DEBUG_QUEUECOMMAND BIT(0) 307#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c
index ef3c59cbcff6..2fb3fb58858d 100644
--- a/drivers/scsi/nsp32_debug.c
+++ b/drivers/scsi/nsp32_debug.c
@@ -88,7 +88,7 @@ static void print_commandk (unsigned char *command)
88 int i,s; 88 int i,s;
89// printk(KERN_DEBUG); 89// printk(KERN_DEBUG);
90 print_opcodek(command[0]); 90 print_opcodek(command[0]);
91 /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 91 /*printk(KERN_DEBUG "%s ", __func__);*/
92 if ((command[0] >> 5) == 6 || 92 if ((command[0] >> 5) == 6 ||
93 (command[0] >> 5) == 7 ) { 93 (command[0] >> 5) == 7 ) {
94 s = 12; /* vender specific */ 94 s = 12; /* vender specific */
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 5082ca3c6876..a221b6ef9fa9 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -107,9 +107,9 @@ static nsp_hw_data nsp_data_base; /* attach <-> detect glue */
107#else 107#else
108# define NSP_DEBUG_MASK 0xffffff 108# define NSP_DEBUG_MASK 0xffffff
109# define nsp_msg(type, args...) \ 109# define nsp_msg(type, args...) \
110 nsp_cs_message (__FUNCTION__, __LINE__, (type), args) 110 nsp_cs_message (__func__, __LINE__, (type), args)
111# define nsp_dbg(mask, args...) \ 111# define nsp_dbg(mask, args...) \
112 nsp_cs_dmessage(__FUNCTION__, __LINE__, (mask), args) 112 nsp_cs_dmessage(__func__, __LINE__, (mask), args)
113#endif 113#endif
114 114
115#define NSP_DEBUG_QUEUECOMMAND BIT(0) 115#define NSP_DEBUG_QUEUECOMMAND BIT(0)
diff --git a/drivers/scsi/pcmcia/nsp_debug.c b/drivers/scsi/pcmcia/nsp_debug.c
index 2f75fe6e35a7..3c6ef64fcbff 100644
--- a/drivers/scsi/pcmcia/nsp_debug.c
+++ b/drivers/scsi/pcmcia/nsp_debug.c
@@ -90,7 +90,7 @@ static void print_commandk (unsigned char *command)
90 int i, s; 90 int i, s;
91 printk(KERN_DEBUG); 91 printk(KERN_DEBUG);
92 print_opcodek(command[0]); 92 print_opcodek(command[0]);
93 /*printk(KERN_DEBUG "%s ", __FUNCTION__);*/ 93 /*printk(KERN_DEBUG "%s ", __func__);*/
94 if ((command[0] >> 5) == 6 || 94 if ((command[0] >> 5) == 6 ||
95 (command[0] >> 5) == 7 ) { 95 (command[0] >> 5) == 7 ) {
96 s = 12; /* vender specific */ 96 s = 12; /* vender specific */
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index f655ae320b48..8aa0bd987e29 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -171,7 +171,7 @@ static int device_check(ppa_struct *dev);
171 171
172#if PPA_DEBUG > 0 172#if PPA_DEBUG > 0
173#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\ 173#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
174 y, __FUNCTION__, __LINE__); ppa_fail_func(x,y); 174 y, __func__, __LINE__); ppa_fail_func(x,y);
175static inline void ppa_fail_func(ppa_struct *dev, int error_code) 175static inline void ppa_fail_func(ppa_struct *dev, int error_code)
176#else 176#else
177static inline void ppa_fail(ppa_struct *dev, int error_code) 177static inline void ppa_fail(ppa_struct *dev, int error_code)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 3754ab87f89a..37f9ba0cd798 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1695,7 +1695,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen; 1695 risc_code_size = *ql1280_board_tbl[ha->devnum].fwlen;
1696 1696
1697 dprintk(1, "%s: DMA RISC code (%i) words\n", 1697 dprintk(1, "%s: DMA RISC code (%i) words\n",
1698 __FUNCTION__, risc_code_size); 1698 __func__, risc_code_size);
1699 1699
1700 num = 0; 1700 num = 0;
1701 while (risc_code_size > 0) { 1701 while (risc_code_size > 0) {
@@ -1721,7 +1721,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff; 1721 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16; 1722 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n", 1723 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1724 __FUNCTION__, mb[0], 1724 __func__, mb[0],
1725 (void *)(long)ha->request_dma, 1725 (void *)(long)ha->request_dma,
1726 mb[6], mb[7], mb[2], mb[3]); 1726 mb[6], mb[7], mb[2], mb[3]);
1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | 1727 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
@@ -1753,10 +1753,10 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1753 if (tbuf[i] != sp[i] && warn++ < 10) { 1753 if (tbuf[i] != sp[i] && warn++ < 10) {
1754 printk(KERN_ERR "%s: FW compare error @ " 1754 printk(KERN_ERR "%s: FW compare error @ "
1755 "byte(0x%x) loop#=%x\n", 1755 "byte(0x%x) loop#=%x\n",
1756 __FUNCTION__, i, num); 1756 __func__, i, num);
1757 printk(KERN_ERR "%s: FWbyte=%x " 1757 printk(KERN_ERR "%s: FWbyte=%x "
1758 "FWfromChip=%x\n", 1758 "FWfromChip=%x\n",
1759 __FUNCTION__, sp[i], tbuf[i]); 1759 __func__, sp[i], tbuf[i]);
1760 /*break; */ 1760 /*break; */
1761 } 1761 }
1762 } 1762 }
@@ -1781,7 +1781,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
1781 int err; 1781 int err;
1782 1782
1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n", 1783 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1784 __FUNCTION__); 1784 __func__);
1785 1785
1786 /* Verify checksum of loaded RISC code. */ 1786 /* Verify checksum of loaded RISC code. */
1787 mb[0] = MBC_VERIFY_CHECKSUM; 1787 mb[0] = MBC_VERIFY_CHECKSUM;
@@ -1794,7 +1794,7 @@ qla1280_start_firmware(struct scsi_qla_host *ha)
1794 } 1794 }
1795 1795
1796 /* Start firmware execution. */ 1796 /* Start firmware execution. */
1797 dprintk(1, "%s: start firmware running.\n", __FUNCTION__); 1797 dprintk(1, "%s: start firmware running.\n", __func__);
1798 mb[0] = MBC_EXECUTE_FIRMWARE; 1798 mb[0] = MBC_EXECUTE_FIRMWARE;
1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart; 1799 mb[1] = *ql1280_board_tbl[ha->devnum].fwstart;
1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); 1800 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 8dd88fc1244a..7a4409ab30ea 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -20,18 +20,12 @@ qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
20{ 20{
21 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 21 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
22 struct device, kobj))); 22 struct device, kobj)));
23 char *rbuf = (char *)ha->fw_dump;
24 23
25 if (ha->fw_dump_reading == 0) 24 if (ha->fw_dump_reading == 0)
26 return 0; 25 return 0;
27 if (off > ha->fw_dump_len)
28 return 0;
29 if (off + count > ha->fw_dump_len)
30 count = ha->fw_dump_len - off;
31 26
32 memcpy(buf, &rbuf[off], count); 27 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33 28 ha->fw_dump_len);
34 return (count);
35} 29}
36 30
37static ssize_t 31static ssize_t
@@ -94,20 +88,13 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
94{ 88{
95 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 89 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
96 struct device, kobj))); 90 struct device, kobj)));
97 int size = ha->nvram_size;
98 char *nvram_cache = ha->nvram;
99 91
100 if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 92 if (!capable(CAP_SYS_ADMIN))
101 return 0; 93 return 0;
102 if (off + count > size) {
103 size -= off;
104 count = size;
105 }
106 94
107 /* Read NVRAM data from cache. */ 95 /* Read NVRAM data from cache. */
108 memcpy(buf, &nvram_cache[off], count); 96 return memory_read_from_buffer(buf, count, &off, ha->nvram,
109 97 ha->nvram_size);
110 return count;
111} 98}
112 99
113static ssize_t 100static ssize_t
@@ -175,14 +162,9 @@ qla2x00_sysfs_read_optrom(struct kobject *kobj,
175 162
176 if (ha->optrom_state != QLA_SREADING) 163 if (ha->optrom_state != QLA_SREADING)
177 return 0; 164 return 0;
178 if (off > ha->optrom_region_size)
179 return 0;
180 if (off + count > ha->optrom_region_size)
181 count = ha->optrom_region_size - off;
182
183 memcpy(buf, &ha->optrom_buffer[off], count);
184 165
185 return count; 166 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
167 ha->optrom_region_size);
186} 168}
187 169
188static ssize_t 170static ssize_t
@@ -374,20 +356,12 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
374{ 356{
375 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj, 357 struct scsi_qla_host *ha = shost_priv(dev_to_shost(container_of(kobj,
376 struct device, kobj))); 358 struct device, kobj)));
377 int size = ha->vpd_size;
378 char *vpd_cache = ha->vpd;
379 359
380 if (!capable(CAP_SYS_ADMIN) || off > size || count == 0) 360 if (!capable(CAP_SYS_ADMIN))
381 return 0; 361 return 0;
382 if (off + count > size) {
383 size -= off;
384 count = size;
385 }
386 362
387 /* Read NVRAM data from cache. */ 363 /* Read NVRAM data from cache. */
388 memcpy(buf, &vpd_cache[off], count); 364 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
389
390 return count;
391} 365}
392 366
393static ssize_t 367static ssize_t
@@ -557,8 +531,10 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
557 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev)); 531 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
558 uint32_t sn; 532 uint32_t sn;
559 533
560 if (IS_FWI2_CAPABLE(ha)) 534 if (IS_FWI2_CAPABLE(ha)) {
561 return snprintf(buf, PAGE_SIZE, "\n"); 535 qla2xxx_get_vpd_field(ha, "SN", buf, PAGE_SIZE);
536 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
537 }
562 538
563 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 539 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
564 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, 540 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
@@ -809,6 +785,16 @@ qla2x00_optrom_fw_version_show(struct device *dev,
809 ha->fw_revision[3]); 785 ha->fw_revision[3]);
810} 786}
811 787
788static ssize_t
789qla2x00_total_isp_aborts_show(struct device *dev,
790 struct device_attribute *attr, char *buf)
791{
792 scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
793
794 return snprintf(buf, PAGE_SIZE, "%d\n",
795 ha->qla_stats.total_isp_aborts);
796}
797
812static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 798static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
813static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 799static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
814static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 800static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -831,6 +817,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
831 qla2x00_optrom_fcode_version_show, NULL); 817 qla2x00_optrom_fcode_version_show, NULL);
832static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show, 818static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
833 NULL); 819 NULL);
820static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
821 NULL);
834 822
835struct device_attribute *qla2x00_host_attrs[] = { 823struct device_attribute *qla2x00_host_attrs[] = {
836 &dev_attr_driver_version, 824 &dev_attr_driver_version,
@@ -849,6 +837,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
849 &dev_attr_optrom_efi_version, 837 &dev_attr_optrom_efi_version,
850 &dev_attr_optrom_fcode_version, 838 &dev_attr_optrom_fcode_version,
851 &dev_attr_optrom_fw_version, 839 &dev_attr_optrom_fw_version,
840 &dev_attr_total_isp_aborts,
852 NULL, 841 NULL,
853}; 842};
854 843
@@ -972,26 +961,39 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
972} 961}
973 962
974static void 963static void
975qla2x00_get_rport_loss_tmo(struct fc_rport *rport) 964qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
976{ 965{
977 struct Scsi_Host *host = rport_to_shost(rport); 966 if (timeout)
978 scsi_qla_host_t *ha = shost_priv(host); 967 rport->dev_loss_tmo = timeout;
979 968 else
980 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 969 rport->dev_loss_tmo = 1;
981} 970}
982 971
983static void 972static void
984qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) 973qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
985{ 974{
986 struct Scsi_Host *host = rport_to_shost(rport); 975 struct Scsi_Host *host = rport_to_shost(rport);
987 scsi_qla_host_t *ha = shost_priv(host); 976 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
977
978 qla2x00_abort_fcport_cmds(fcport);
979
980 /*
981 * Transport has effectively 'deleted' the rport, clear
982 * all local references.
983 */
984 spin_lock_irq(host->host_lock);
985 fcport->rport = NULL;
986 *((fc_port_t **)rport->dd_data) = NULL;
987 spin_unlock_irq(host->host_lock);
988}
988 989
989 if (timeout) 990static void
990 ha->port_down_retry_count = timeout; 991qla2x00_terminate_rport_io(struct fc_rport *rport)
991 else 992{
992 ha->port_down_retry_count = 1; 993 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
993 994
994 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 995 qla2x00_abort_fcport_cmds(fcport);
996 scsi_target_unblock(&rport->dev);
995} 997}
996 998
997static int 999static int
@@ -1045,6 +1047,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1045 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt; 1047 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1046 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt; 1048 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1047 if (IS_FWI2_CAPABLE(ha)) { 1049 if (IS_FWI2_CAPABLE(ha)) {
1050 pfc_host_stat->lip_count = stats->lip_cnt;
1048 pfc_host_stat->tx_frames = stats->tx_frames; 1051 pfc_host_stat->tx_frames = stats->tx_frames;
1049 pfc_host_stat->rx_frames = stats->rx_frames; 1052 pfc_host_stat->rx_frames = stats->rx_frames;
1050 pfc_host_stat->dumped_frames = stats->dumped_frames; 1053 pfc_host_stat->dumped_frames = stats->dumped_frames;
@@ -1173,17 +1176,16 @@ vport_create_failed_2:
1173static int 1176static int
1174qla24xx_vport_delete(struct fc_vport *fc_vport) 1177qla24xx_vport_delete(struct fc_vport *fc_vport)
1175{ 1178{
1176 scsi_qla_host_t *ha = shost_priv(fc_vport->shost);
1177 scsi_qla_host_t *vha = fc_vport->dd_data; 1179 scsi_qla_host_t *vha = fc_vport->dd_data;
1180 scsi_qla_host_t *pha = to_qla_parent(vha);
1181
1182 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1183 test_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags))
1184 msleep(1000);
1178 1185
1179 qla24xx_disable_vp(vha); 1186 qla24xx_disable_vp(vha);
1180 qla24xx_deallocate_vp_id(vha); 1187 qla24xx_deallocate_vp_id(vha);
1181 1188
1182 mutex_lock(&ha->vport_lock);
1183 ha->cur_vport_count--;
1184 clear_bit(vha->vp_idx, ha->vp_idx_map);
1185 mutex_unlock(&ha->vport_lock);
1186
1187 kfree(vha->node_name); 1189 kfree(vha->node_name);
1188 kfree(vha->port_name); 1190 kfree(vha->port_name);
1189 1191
@@ -1248,11 +1250,12 @@ struct fc_function_template qla2xxx_transport_functions = {
1248 .get_starget_port_id = qla2x00_get_starget_port_id, 1250 .get_starget_port_id = qla2x00_get_starget_port_id,
1249 .show_starget_port_id = 1, 1251 .show_starget_port_id = 1,
1250 1252
1251 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1252 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1253 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1253 .show_rport_dev_loss_tmo = 1, 1254 .show_rport_dev_loss_tmo = 1,
1254 1255
1255 .issue_fc_host_lip = qla2x00_issue_lip, 1256 .issue_fc_host_lip = qla2x00_issue_lip,
1257 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1258 .terminate_rport_io = qla2x00_terminate_rport_io,
1256 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1259 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1257 1260
1258 .vport_create = qla24xx_vport_create, 1261 .vport_create = qla24xx_vport_create,
@@ -1291,11 +1294,12 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
1291 .get_starget_port_id = qla2x00_get_starget_port_id, 1294 .get_starget_port_id = qla2x00_get_starget_port_id,
1292 .show_starget_port_id = 1, 1295 .show_starget_port_id = 1,
1293 1296
1294 .get_rport_dev_loss_tmo = qla2x00_get_rport_loss_tmo,
1295 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo, 1297 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
1296 .show_rport_dev_loss_tmo = 1, 1298 .show_rport_dev_loss_tmo = 1,
1297 1299
1298 .issue_fc_host_lip = qla2x00_issue_lip, 1300 .issue_fc_host_lip = qla2x00_issue_lip,
1301 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
1302 .terminate_rport_io = qla2x00_terminate_rport_io,
1299 .get_fc_host_stats = qla2x00_get_fc_host_stats, 1303 .get_fc_host_stats = qla2x00_get_fc_host_stats,
1300}; 1304};
1301 1305
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cbef785765cf..510ba64bc286 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -216,7 +216,7 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
216 216
217static int 217static int
218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram, 218qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
219 uint16_t ram_words, void **nxt) 219 uint32_t ram_words, void **nxt)
220{ 220{
221 int rval; 221 int rval;
222 uint32_t cnt, stat, timer, words, idx; 222 uint32_t cnt, stat, timer, words, idx;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 8dd600013bd1..6da31ba94404 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -864,7 +864,8 @@ struct link_statistics {
864 uint32_t prim_seq_err_cnt; 864 uint32_t prim_seq_err_cnt;
865 uint32_t inval_xmit_word_cnt; 865 uint32_t inval_xmit_word_cnt;
866 uint32_t inval_crc_cnt; 866 uint32_t inval_crc_cnt;
867 uint32_t unused1[0x1b]; 867 uint32_t lip_cnt;
868 uint32_t unused1[0x1a];
868 uint32_t tx_frames; 869 uint32_t tx_frames;
869 uint32_t rx_frames; 870 uint32_t rx_frames;
870 uint32_t dumped_frames; 871 uint32_t dumped_frames;
@@ -1544,7 +1545,6 @@ typedef struct fc_port {
1544 int login_retry; 1545 int login_retry;
1545 atomic_t port_down_timer; 1546 atomic_t port_down_timer;
1546 1547
1547 spinlock_t rport_lock;
1548 struct fc_rport *rport, *drport; 1548 struct fc_rport *rport, *drport;
1549 u32 supported_classes; 1549 u32 supported_classes;
1550 1550
@@ -2155,6 +2155,10 @@ struct qla_chip_state_84xx {
2155 uint32_t gold_fw_version; 2155 uint32_t gold_fw_version;
2156}; 2156};
2157 2157
2158struct qla_statistics {
2159 uint32_t total_isp_aborts;
2160};
2161
2158/* 2162/*
2159 * Linux Host Adapter structure 2163 * Linux Host Adapter structure
2160 */ 2164 */
@@ -2166,7 +2170,6 @@ typedef struct scsi_qla_host {
2166 struct pci_dev *pdev; 2170 struct pci_dev *pdev;
2167 2171
2168 unsigned long host_no; 2172 unsigned long host_no;
2169 unsigned long instance;
2170 2173
2171 volatile struct { 2174 volatile struct {
2172 uint32_t init_done :1; 2175 uint32_t init_done :1;
@@ -2515,7 +2518,7 @@ typedef struct scsi_qla_host {
2515 2518
2516 uint8_t model_number[16+1]; 2519 uint8_t model_number[16+1];
2517#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" 2520#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
2518 char *model_desc; 2521 char model_desc[80];
2519 uint8_t adapter_id[16+1]; 2522 uint8_t adapter_id[16+1];
2520 2523
2521 uint8_t *node_name; 2524 uint8_t *node_name;
@@ -2596,6 +2599,7 @@ typedef struct scsi_qla_host {
2596 int cur_vport_count; 2599 int cur_vport_count;
2597 2600
2598 struct qla_chip_state_84xx *cs84xx; 2601 struct qla_chip_state_84xx *cs84xx;
2602 struct qla_statistics qla_stats;
2599} scsi_qla_host_t; 2603} scsi_qla_host_t;
2600 2604
2601 2605
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9b4bebee6879..0b156735e9a6 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -62,7 +62,7 @@ extern int ql2xfdmienable;
62extern int ql2xallocfwdump; 62extern int ql2xallocfwdump;
63extern int ql2xextended_error_logging; 63extern int ql2xextended_error_logging;
64extern int ql2xqfullrampup; 64extern int ql2xqfullrampup;
65extern int num_hosts; 65extern int ql2xiidmaenable;
66 66
67extern int qla2x00_loop_reset(scsi_qla_host_t *); 67extern int qla2x00_loop_reset(scsi_qla_host_t *);
68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 68extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -71,6 +71,8 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
71extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t, 71extern int qla2x00_post_hwe_work(struct scsi_qla_host *, uint16_t , uint16_t,
72 uint16_t, uint16_t); 72 uint16_t, uint16_t);
73 73
74extern void qla2x00_abort_fcport_cmds(fc_port_t *);
75
74/* 76/*
75 * Global Functions in qla_mid.c source file. 77 * Global Functions in qla_mid.c source file.
76 */ 78 */
@@ -312,6 +314,7 @@ extern int qla2xxx_hw_event_log(scsi_qla_host_t *, uint16_t , uint16_t,
312 uint16_t, uint16_t); 314 uint16_t, uint16_t);
313 315
314extern void qla2xxx_get_flash_info(scsi_qla_host_t *); 316extern void qla2xxx_get_flash_info(scsi_qla_host_t *);
317extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
315 318
316/* 319/*
317 * Global Function Prototypes in qla_dbg.c source file. 320 * Global Function Prototypes in qla_dbg.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4cb80b476c85..c2a4bfbcb05b 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1661,6 +1661,12 @@ qla2x00_fdmi_register(scsi_qla_host_t *ha)
1661{ 1661{
1662 int rval; 1662 int rval;
1663 1663
1664 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
1665 DEBUG2(printk("scsi(%ld): FDMI unsupported on "
1666 "ISP2100/ISP2200.\n", ha->host_no));
1667 return QLA_SUCCESS;
1668 }
1669
1664 rval = qla2x00_mgmt_svr_login(ha); 1670 rval = qla2x00_mgmt_svr_login(ha);
1665 if (rval) 1671 if (rval)
1666 return rval; 1672 return rval;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index bbbc5a632a1d..601a6b29750c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -334,6 +334,8 @@ static int
334qla2x00_isp_firmware(scsi_qla_host_t *ha) 334qla2x00_isp_firmware(scsi_qla_host_t *ha)
335{ 335{
336 int rval; 336 int rval;
337 uint16_t loop_id, topo, sw_cap;
338 uint8_t domain, area, al_pa;
337 339
338 /* Assume loading risc code */ 340 /* Assume loading risc code */
339 rval = QLA_FUNCTION_FAILED; 341 rval = QLA_FUNCTION_FAILED;
@@ -345,6 +347,11 @@ qla2x00_isp_firmware(scsi_qla_host_t *ha)
345 347
346 /* Verify checksum of loaded RISC code. */ 348 /* Verify checksum of loaded RISC code. */
347 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address); 349 rval = qla2x00_verify_checksum(ha, ha->fw_srisc_address);
350 if (rval == QLA_SUCCESS) {
351 /* And, verify we are not in ROM code. */
352 rval = qla2x00_get_adapter_id(ha, &loop_id, &al_pa,
353 &area, &domain, &topo, &sw_cap);
354 }
348 } 355 }
349 356
350 if (rval) { 357 if (rval) {
@@ -722,7 +729,7 @@ qla24xx_chip_diag(scsi_qla_host_t *ha)
722 /* Perform RISC reset. */ 729 /* Perform RISC reset. */
723 qla24xx_reset_risc(ha); 730 qla24xx_reset_risc(ha);
724 731
725 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024; 732 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * ha->request_q_length;
726 733
727 rval = qla2x00_mbx_reg_test(ha); 734 rval = qla2x00_mbx_reg_test(ha);
728 if (rval) { 735 if (rval) {
@@ -768,42 +775,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
768 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 775 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
769 sizeof(uint32_t); 776 sizeof(uint32_t);
770 777
771 /* Allocate memory for Extended Trace Buffer. */
772 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
773 GFP_KERNEL);
774 if (!tc) {
775 qla_printk(KERN_WARNING, ha, "Unable to allocate "
776 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
777 goto cont_alloc;
778 }
779
780 memset(tc, 0, EFT_SIZE);
781 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
782 if (rval) {
783 qla_printk(KERN_WARNING, ha, "Unable to initialize "
784 "EFT (%d).\n", rval);
785 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
786 tc_dma);
787 goto cont_alloc;
788 }
789
790 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
791 EFT_SIZE / 1024);
792
793 eft_size = EFT_SIZE;
794 ha->eft_dma = tc_dma;
795 ha->eft = tc;
796
797 /* Allocate memory for Fibre Channel Event Buffer. */ 778 /* Allocate memory for Fibre Channel Event Buffer. */
798 if (!IS_QLA25XX(ha)) 779 if (!IS_QLA25XX(ha))
799 goto cont_alloc; 780 goto try_eft;
800 781
801 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 782 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
802 GFP_KERNEL); 783 GFP_KERNEL);
803 if (!tc) { 784 if (!tc) {
804 qla_printk(KERN_WARNING, ha, "Unable to allocate " 785 qla_printk(KERN_WARNING, ha, "Unable to allocate "
805 "(%d KB) for FCE.\n", FCE_SIZE / 1024); 786 "(%d KB) for FCE.\n", FCE_SIZE / 1024);
806 goto cont_alloc; 787 goto try_eft;
807 } 788 }
808 789
809 memset(tc, 0, FCE_SIZE); 790 memset(tc, 0, FCE_SIZE);
@@ -815,7 +796,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
815 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, 796 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc,
816 tc_dma); 797 tc_dma);
817 ha->flags.fce_enabled = 0; 798 ha->flags.fce_enabled = 0;
818 goto cont_alloc; 799 goto try_eft;
819 } 800 }
820 801
821 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n", 802 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for FCE...\n",
@@ -825,6 +806,32 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *ha)
825 ha->flags.fce_enabled = 1; 806 ha->flags.fce_enabled = 1;
826 ha->fce_dma = tc_dma; 807 ha->fce_dma = tc_dma;
827 ha->fce = tc; 808 ha->fce = tc;
809try_eft:
810 /* Allocate memory for Extended Trace Buffer. */
811 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
812 GFP_KERNEL);
813 if (!tc) {
814 qla_printk(KERN_WARNING, ha, "Unable to allocate "
815 "(%d KB) for EFT.\n", EFT_SIZE / 1024);
816 goto cont_alloc;
817 }
818
819 memset(tc, 0, EFT_SIZE);
820 rval = qla2x00_enable_eft_trace(ha, tc_dma, EFT_NUM_BUFFERS);
821 if (rval) {
822 qla_printk(KERN_WARNING, ha, "Unable to initialize "
823 "EFT (%d).\n", rval);
824 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
825 tc_dma);
826 goto cont_alloc;
827 }
828
829 qla_printk(KERN_INFO, ha, "Allocated (%d KB) for EFT...\n",
830 EFT_SIZE / 1024);
831
832 eft_size = EFT_SIZE;
833 ha->eft_dma = tc_dma;
834 ha->eft = tc;
828 } 835 }
829cont_alloc: 836cont_alloc:
830 req_q_size = ha->request_q_length * sizeof(request_t); 837 req_q_size = ha->request_q_length * sizeof(request_t);
@@ -1501,18 +1508,25 @@ qla2x00_set_model_info(scsi_qla_host_t *ha, uint8_t *model, size_t len, char *de
1501 index = (ha->pdev->subsystem_device & 0xff); 1508 index = (ha->pdev->subsystem_device & 0xff);
1502 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1509 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1503 index < QLA_MODEL_NAMES) 1510 index < QLA_MODEL_NAMES)
1504 ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1511 strncpy(ha->model_desc,
1512 qla2x00_model_name[index * 2 + 1],
1513 sizeof(ha->model_desc) - 1);
1505 } else { 1514 } else {
1506 index = (ha->pdev->subsystem_device & 0xff); 1515 index = (ha->pdev->subsystem_device & 0xff);
1507 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && 1516 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
1508 index < QLA_MODEL_NAMES) { 1517 index < QLA_MODEL_NAMES) {
1509 strcpy(ha->model_number, 1518 strcpy(ha->model_number,
1510 qla2x00_model_name[index * 2]); 1519 qla2x00_model_name[index * 2]);
1511 ha->model_desc = qla2x00_model_name[index * 2 + 1]; 1520 strncpy(ha->model_desc,
1521 qla2x00_model_name[index * 2 + 1],
1522 sizeof(ha->model_desc) - 1);
1512 } else { 1523 } else {
1513 strcpy(ha->model_number, def); 1524 strcpy(ha->model_number, def);
1514 } 1525 }
1515 } 1526 }
1527 if (IS_FWI2_CAPABLE(ha))
1528 qla2xxx_get_vpd_field(ha, "\x82", ha->model_desc,
1529 sizeof(ha->model_desc));
1516} 1530}
1517 1531
1518/* On sparc systems, obtain port and node WWN from firmware 1532/* On sparc systems, obtain port and node WWN from firmware
@@ -1864,12 +1878,11 @@ qla2x00_rport_del(void *data)
1864{ 1878{
1865 fc_port_t *fcport = data; 1879 fc_port_t *fcport = data;
1866 struct fc_rport *rport; 1880 struct fc_rport *rport;
1867 unsigned long flags;
1868 1881
1869 spin_lock_irqsave(&fcport->rport_lock, flags); 1882 spin_lock_irq(fcport->ha->host->host_lock);
1870 rport = fcport->drport; 1883 rport = fcport->drport;
1871 fcport->drport = NULL; 1884 fcport->drport = NULL;
1872 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1885 spin_unlock_irq(fcport->ha->host->host_lock);
1873 if (rport) 1886 if (rport)
1874 fc_remote_port_delete(rport); 1887 fc_remote_port_delete(rport);
1875} 1888}
@@ -1898,7 +1911,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags)
1898 atomic_set(&fcport->state, FCS_UNCONFIGURED); 1911 atomic_set(&fcport->state, FCS_UNCONFIGURED);
1899 fcport->flags = FCF_RLC_SUPPORT; 1912 fcport->flags = FCF_RLC_SUPPORT;
1900 fcport->supported_classes = FC_COS_UNSPECIFIED; 1913 fcport->supported_classes = FC_COS_UNSPECIFIED;
1901 spin_lock_init(&fcport->rport_lock);
1902 1914
1903 return fcport; 1915 return fcport;
1904} 1916}
@@ -2007,8 +2019,10 @@ qla2x00_configure_loop(scsi_qla_host_t *ha)
2007 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) { 2019 if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) {
2008 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) 2020 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
2009 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 2021 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
2010 if (test_bit(RSCN_UPDATE, &save_flags)) 2022 if (test_bit(RSCN_UPDATE, &save_flags)) {
2023 ha->flags.rscn_queue_overflow = 1;
2011 set_bit(RSCN_UPDATE, &ha->dpc_flags); 2024 set_bit(RSCN_UPDATE, &ha->dpc_flags);
2025 }
2012 } 2026 }
2013 2027
2014 return (rval); 2028 return (rval);
@@ -2243,28 +2257,24 @@ qla2x00_reg_remote_port(scsi_qla_host_t *ha, fc_port_t *fcport)
2243{ 2257{
2244 struct fc_rport_identifiers rport_ids; 2258 struct fc_rport_identifiers rport_ids;
2245 struct fc_rport *rport; 2259 struct fc_rport *rport;
2246 unsigned long flags;
2247 2260
2248 if (fcport->drport) 2261 if (fcport->drport)
2249 qla2x00_rport_del(fcport); 2262 qla2x00_rport_del(fcport);
2250 if (fcport->rport)
2251 return;
2252 2263
2253 rport_ids.node_name = wwn_to_u64(fcport->node_name); 2264 rport_ids.node_name = wwn_to_u64(fcport->node_name);
2254 rport_ids.port_name = wwn_to_u64(fcport->port_name); 2265 rport_ids.port_name = wwn_to_u64(fcport->port_name);
2255 rport_ids.port_id = fcport->d_id.b.domain << 16 | 2266 rport_ids.port_id = fcport->d_id.b.domain << 16 |
2256 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa; 2267 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
2257 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2268 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
2258 rport = fc_remote_port_add(ha->host, 0, &rport_ids); 2269 fcport->rport = rport = fc_remote_port_add(ha->host, 0, &rport_ids);
2259 if (!rport) { 2270 if (!rport) {
2260 qla_printk(KERN_WARNING, ha, 2271 qla_printk(KERN_WARNING, ha,
2261 "Unable to allocate fc remote port!\n"); 2272 "Unable to allocate fc remote port!\n");
2262 return; 2273 return;
2263 } 2274 }
2264 spin_lock_irqsave(&fcport->rport_lock, flags); 2275 spin_lock_irq(fcport->ha->host->host_lock);
2265 fcport->rport = rport;
2266 *((fc_port_t **)rport->dd_data) = fcport; 2276 *((fc_port_t **)rport->dd_data) = fcport;
2267 spin_unlock_irqrestore(&fcport->rport_lock, flags); 2277 spin_unlock_irq(fcport->ha->host->host_lock);
2268 2278
2269 rport->supported_classes = fcport->supported_classes; 2279 rport->supported_classes = fcport->supported_classes;
2270 2280
@@ -2565,7 +2575,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *ha, struct list_head *new_fcports)
2565 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) { 2575 } else if (qla2x00_gnn_id(ha, swl) != QLA_SUCCESS) {
2566 kfree(swl); 2576 kfree(swl);
2567 swl = NULL; 2577 swl = NULL;
2568 } else if (qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) { 2578 } else if (ql2xiidmaenable &&
2579 qla2x00_gfpn_id(ha, swl) == QLA_SUCCESS) {
2569 qla2x00_gpsc(ha, swl); 2580 qla2x00_gpsc(ha, swl);
2570 } 2581 }
2571 } 2582 }
@@ -3220,7 +3231,8 @@ qla2x00_update_fcports(scsi_qla_host_t *ha)
3220 3231
3221 /* Go with deferred removal of rport references. */ 3232 /* Go with deferred removal of rport references. */
3222 list_for_each_entry(fcport, &ha->fcports, list) 3233 list_for_each_entry(fcport, &ha->fcports, list)
3223 if (fcport->drport) 3234 if (fcport->drport &&
3235 atomic_read(&fcport->state) != FCS_UNCONFIGURED)
3224 qla2x00_rport_del(fcport); 3236 qla2x00_rport_del(fcport);
3225} 3237}
3226 3238
@@ -3243,6 +3255,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3243 if (ha->flags.online) { 3255 if (ha->flags.online) {
3244 ha->flags.online = 0; 3256 ha->flags.online = 0;
3245 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 3257 clear_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
3258 ha->qla_stats.total_isp_aborts++;
3246 3259
3247 qla_printk(KERN_INFO, ha, 3260 qla_printk(KERN_INFO, ha,
3248 "Performing ISP error recovery - ha= %p.\n", ha); 3261 "Performing ISP error recovery - ha= %p.\n", ha);
@@ -3283,17 +3296,6 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3283 ha->isp_abort_cnt = 0; 3296 ha->isp_abort_cnt = 0;
3284 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags); 3297 clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
3285 3298
3286 if (ha->eft) {
3287 memset(ha->eft, 0, EFT_SIZE);
3288 rval = qla2x00_enable_eft_trace(ha,
3289 ha->eft_dma, EFT_NUM_BUFFERS);
3290 if (rval) {
3291 qla_printk(KERN_WARNING, ha,
3292 "Unable to reinitialize EFT "
3293 "(%d).\n", rval);
3294 }
3295 }
3296
3297 if (ha->fce) { 3299 if (ha->fce) {
3298 ha->flags.fce_enabled = 1; 3300 ha->flags.fce_enabled = 1;
3299 memset(ha->fce, 0, 3301 memset(ha->fce, 0,
@@ -3308,6 +3310,17 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
3308 ha->flags.fce_enabled = 0; 3310 ha->flags.fce_enabled = 0;
3309 } 3311 }
3310 } 3312 }
3313
3314 if (ha->eft) {
3315 memset(ha->eft, 0, EFT_SIZE);
3316 rval = qla2x00_enable_eft_trace(ha,
3317 ha->eft_dma, EFT_NUM_BUFFERS);
3318 if (rval) {
3319 qla_printk(KERN_WARNING, ha,
3320 "Unable to reinitialize EFT "
3321 "(%d).\n", rval);
3322 }
3323 }
3311 } else { /* failed the ISP abort */ 3324 } else { /* failed the ISP abort */
3312 ha->flags.online = 1; 3325 ha->flags.online = 1;
3313 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) { 3326 if (test_bit(ISP_ABORT_RETRY, &ha->dpc_flags)) {
@@ -4026,8 +4039,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha)
4026 ret = qla2x00_stop_firmware(ha); 4039 ret = qla2x00_stop_firmware(ha);
4027 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && 4040 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
4028 retries ; retries--) { 4041 retries ; retries--) {
4029 qla2x00_reset_chip(ha); 4042 ha->isp_ops->reset_chip(ha);
4030 if (qla2x00_chip_diag(ha) != QLA_SUCCESS) 4043 if (ha->isp_ops->chip_diag(ha) != QLA_SUCCESS)
4031 continue; 4044 continue;
4032 if (qla2x00_setup_chip(ha) != QLA_SUCCESS) 4045 if (qla2x00_setup_chip(ha) != QLA_SUCCESS)
4033 continue; 4046 continue;
@@ -4049,7 +4062,7 @@ qla24xx_configure_vhba(scsi_qla_host_t *ha)
4049 rval = qla2x00_fw_ready(ha->parent); 4062 rval = qla2x00_fw_ready(ha->parent);
4050 if (rval == QLA_SUCCESS) { 4063 if (rval == QLA_SUCCESS) {
4051 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 4064 clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
4052 qla2x00_marker(ha->parent, 0, 0, MK_SYNC_ALL); 4065 qla2x00_marker(ha, 0, 0, MK_SYNC_ALL);
4053 } 4066 }
4054 4067
4055 ha->flags.management_server_logged_in = 0; 4068 ha->flags.management_server_logged_in = 0;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 5489d5024673..d57669aa4615 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -454,10 +454,11 @@ qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
454{ 454{
455 int ret; 455 int ret;
456 unsigned long flags = 0; 456 unsigned long flags = 0;
457 scsi_qla_host_t *pha = to_qla_parent(ha);
457 458
458 spin_lock_irqsave(&ha->hardware_lock, flags); 459 spin_lock_irqsave(&pha->hardware_lock, flags);
459 ret = __qla2x00_marker(ha, loop_id, lun, type); 460 ret = __qla2x00_marker(ha, loop_id, lun, type);
460 spin_unlock_irqrestore(&ha->hardware_lock, flags); 461 spin_unlock_irqrestore(&pha->hardware_lock, flags);
461 462
462 return (ret); 463 return (ret);
463} 464}
@@ -672,7 +673,7 @@ qla24xx_start_scsi(srb_t *sp)
672{ 673{
673 int ret, nseg; 674 int ret, nseg;
674 unsigned long flags; 675 unsigned long flags;
675 scsi_qla_host_t *ha; 676 scsi_qla_host_t *ha, *pha;
676 struct scsi_cmnd *cmd; 677 struct scsi_cmnd *cmd;
677 uint32_t *clr_ptr; 678 uint32_t *clr_ptr;
678 uint32_t index; 679 uint32_t index;
@@ -686,6 +687,7 @@ qla24xx_start_scsi(srb_t *sp)
686 /* Setup device pointers. */ 687 /* Setup device pointers. */
687 ret = 0; 688 ret = 0;
688 ha = sp->ha; 689 ha = sp->ha;
690 pha = to_qla_parent(ha);
689 reg = &ha->iobase->isp24; 691 reg = &ha->iobase->isp24;
690 cmd = sp->cmd; 692 cmd = sp->cmd;
691 /* So we know we haven't pci_map'ed anything yet */ 693 /* So we know we haven't pci_map'ed anything yet */
@@ -700,7 +702,7 @@ qla24xx_start_scsi(srb_t *sp)
700 } 702 }
701 703
702 /* Acquire ring specific lock */ 704 /* Acquire ring specific lock */
703 spin_lock_irqsave(&ha->hardware_lock, flags); 705 spin_lock_irqsave(&pha->hardware_lock, flags);
704 706
705 /* Check for room in outstanding command list. */ 707 /* Check for room in outstanding command list. */
706 handle = ha->current_outstanding_cmd; 708 handle = ha->current_outstanding_cmd;
@@ -795,14 +797,14 @@ qla24xx_start_scsi(srb_t *sp)
795 ha->response_ring_ptr->signature != RESPONSE_PROCESSED) 797 ha->response_ring_ptr->signature != RESPONSE_PROCESSED)
796 qla24xx_process_response_queue(ha); 798 qla24xx_process_response_queue(ha);
797 799
798 spin_unlock_irqrestore(&ha->hardware_lock, flags); 800 spin_unlock_irqrestore(&pha->hardware_lock, flags);
799 return QLA_SUCCESS; 801 return QLA_SUCCESS;
800 802
801queuing_error: 803queuing_error:
802 if (tot_dsds) 804 if (tot_dsds)
803 scsi_dma_unmap(cmd); 805 scsi_dma_unmap(cmd);
804 806
805 spin_unlock_irqrestore(&ha->hardware_lock, flags); 807 spin_unlock_irqrestore(&pha->hardware_lock, flags);
806 808
807 return QLA_FUNCTION_FAILED; 809 return QLA_FUNCTION_FAILED;
808} 810}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ec63b79f900a..874d802edb7d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -542,10 +542,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
542 break; 542 break;
543 543
544 case MBA_PORT_UPDATE: /* Port database update */ 544 case MBA_PORT_UPDATE: /* Port database update */
545 /* Only handle SCNs for our Vport index. */
546 if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
547 break;
548
549 /* 545 /*
550 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
551 * event etc. earlier indicating loop is down) then process 547 * event etc. earlier indicating loop is down) then process
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 250d2f604397..bc90d6b8d0a0 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -918,6 +918,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *ha, uint16_t *id, uint8_t *al_pa,
918 rval = qla2x00_mailbox_command(ha, mcp); 918 rval = qla2x00_mailbox_command(ha, mcp);
919 if (mcp->mb[0] == MBS_COMMAND_ERROR) 919 if (mcp->mb[0] == MBS_COMMAND_ERROR)
920 rval = QLA_COMMAND_ERROR; 920 rval = QLA_COMMAND_ERROR;
921 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
922 rval = QLA_INVALID_COMMAND;
921 923
922 /* Return data. */ 924 /* Return data. */
923 *id = mcp->mb[1]; 925 *id = mcp->mb[1];
@@ -2161,17 +2163,18 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
2161 struct abort_entry_24xx *abt; 2163 struct abort_entry_24xx *abt;
2162 dma_addr_t abt_dma; 2164 dma_addr_t abt_dma;
2163 uint32_t handle; 2165 uint32_t handle;
2166 scsi_qla_host_t *pha = to_qla_parent(ha);
2164 2167
2165 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no)); 2168 DEBUG11(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2166 2169
2167 fcport = sp->fcport; 2170 fcport = sp->fcport;
2168 2171
2169 spin_lock_irqsave(&ha->hardware_lock, flags); 2172 spin_lock_irqsave(&pha->hardware_lock, flags);
2170 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2173 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2171 if (ha->outstanding_cmds[handle] == sp) 2174 if (pha->outstanding_cmds[handle] == sp)
2172 break; 2175 break;
2173 } 2176 }
2174 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2177 spin_unlock_irqrestore(&pha->hardware_lock, flags);
2175 if (handle == MAX_OUTSTANDING_COMMANDS) { 2178 if (handle == MAX_OUTSTANDING_COMMANDS) {
2176 /* Command not found. */ 2179 /* Command not found. */
2177 return QLA_FUNCTION_FAILED; 2180 return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 62a3ad6e8ecb..50baf6a1d67c 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -43,6 +43,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
43 43
44 set_bit(vp_id, ha->vp_idx_map); 44 set_bit(vp_id, ha->vp_idx_map);
45 ha->num_vhosts++; 45 ha->num_vhosts++;
46 ha->cur_vport_count++;
46 vha->vp_idx = vp_id; 47 vha->vp_idx = vp_id;
47 list_add_tail(&vha->vp_list, &ha->vp_list); 48 list_add_tail(&vha->vp_list, &ha->vp_list);
48 mutex_unlock(&ha->vport_lock); 49 mutex_unlock(&ha->vport_lock);
@@ -58,6 +59,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
58 mutex_lock(&ha->vport_lock); 59 mutex_lock(&ha->vport_lock);
59 vp_id = vha->vp_idx; 60 vp_id = vha->vp_idx;
60 ha->num_vhosts--; 61 ha->num_vhosts--;
62 ha->cur_vport_count--;
61 clear_bit(vp_id, ha->vp_idx_map); 63 clear_bit(vp_id, ha->vp_idx_map);
62 list_del(&vha->vp_list); 64 list_del(&vha->vp_list);
63 mutex_unlock(&ha->vport_lock); 65 mutex_unlock(&ha->vport_lock);
@@ -103,8 +105,8 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
103 "loop_id=0x%04x :%x\n", 105 "loop_id=0x%04x :%x\n",
104 vha->host_no, fcport->loop_id, fcport->vp_idx)); 106 vha->host_no, fcport->loop_id, fcport->vp_idx));
105 107
106 atomic_set(&fcport->state, FCS_DEVICE_DEAD);
107 qla2x00_mark_device_lost(vha, fcport, 0, 0); 108 qla2x00_mark_device_lost(vha, fcport, 0, 0);
109 atomic_set(&fcport->state, FCS_UNCONFIGURED);
108 } 110 }
109} 111}
110 112
@@ -276,7 +278,8 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
276 clear_bit(RESET_ACTIVE, &vha->dpc_flags); 278 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
277 } 279 }
278 280
279 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { 281 if (atomic_read(&vha->vp_state) == VP_ACTIVE &&
282 test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
280 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { 283 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
281 qla2x00_loop_resync(vha); 284 qla2x00_loop_resync(vha);
282 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); 285 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
@@ -390,7 +393,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
390 vha->parent = ha; 393 vha->parent = ha;
391 vha->fc_vport = fc_vport; 394 vha->fc_vport = fc_vport;
392 vha->device_flags = 0; 395 vha->device_flags = 0;
393 vha->instance = num_hosts;
394 vha->vp_idx = qla24xx_allocate_vp_id(vha); 396 vha->vp_idx = qla24xx_allocate_vp_id(vha);
395 if (vha->vp_idx > ha->max_npiv_vports) { 397 if (vha->vp_idx > ha->max_npiv_vports) {
396 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n", 398 DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
@@ -428,7 +430,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
428 host->max_cmd_len = MAX_CMDSZ; 430 host->max_cmd_len = MAX_CMDSZ;
429 host->max_channel = MAX_BUSES - 1; 431 host->max_channel = MAX_BUSES - 1;
430 host->max_lun = MAX_LUNS; 432 host->max_lun = MAX_LUNS;
431 host->unique_id = vha->instance; 433 host->unique_id = host->host_no;
432 host->max_id = MAX_TARGETS_2200; 434 host->max_id = MAX_TARGETS_2200;
433 host->transportt = qla2xxx_transport_vport_template; 435 host->transportt = qla2xxx_transport_vport_template;
434 436
@@ -436,12 +438,6 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
436 vha->host_no, vha)); 438 vha->host_no, vha));
437 439
438 vha->flags.init_done = 1; 440 vha->flags.init_done = 1;
439 num_hosts++;
440
441 mutex_lock(&ha->vport_lock);
442 set_bit(vha->vp_idx, ha->vp_idx_map);
443 ha->cur_vport_count++;
444 mutex_unlock(&ha->vport_lock);
445 441
446 return vha; 442 return vha;
447 443
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 48eaa3bb5433..7c8af7ed2a5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -27,7 +27,6 @@ char qla2x00_version_str[40];
27 */ 27 */
28static struct kmem_cache *srb_cachep; 28static struct kmem_cache *srb_cachep;
29 29
30int num_hosts;
31int ql2xlogintimeout = 20; 30int ql2xlogintimeout = 20;
32module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR); 31module_param(ql2xlogintimeout, int, S_IRUGO|S_IRUSR);
33MODULE_PARM_DESC(ql2xlogintimeout, 32MODULE_PARM_DESC(ql2xlogintimeout,
@@ -87,6 +86,13 @@ MODULE_PARM_DESC(ql2xqfullrampup,
87 "depth for a device after a queue-full condition has been " 86 "depth for a device after a queue-full condition has been "
88 "detected. Default is 120 seconds."); 87 "detected. Default is 120 seconds.");
89 88
89int ql2xiidmaenable=1;
90module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
91MODULE_PARM_DESC(ql2xiidmaenable,
92 "Enables iIDMA settings "
93 "Default is 1 - perform iIDMA. 0 - no iIDMA.");
94
95
90/* 96/*
91 * SCSI host template entry points 97 * SCSI host template entry points
92 */ 98 */
@@ -388,7 +394,7 @@ qla2x00_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
388 } 394 }
389 395
390 /* Close window on fcport/rport state-transitioning. */ 396 /* Close window on fcport/rport state-transitioning. */
391 if (!*(fc_port_t **)rport->dd_data) { 397 if (fcport->drport) {
392 cmd->result = DID_IMM_RETRY << 16; 398 cmd->result = DID_IMM_RETRY << 16;
393 goto qc_fail_command; 399 goto qc_fail_command;
394 } 400 }
@@ -443,7 +449,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
443 int rval; 449 int rval;
444 scsi_qla_host_t *pha = to_qla_parent(ha); 450 scsi_qla_host_t *pha = to_qla_parent(ha);
445 451
446 if (unlikely(pci_channel_offline(ha->pdev))) { 452 if (unlikely(pci_channel_offline(pha->pdev))) {
447 cmd->result = DID_REQUEUE << 16; 453 cmd->result = DID_REQUEUE << 16;
448 goto qc24_fail_command; 454 goto qc24_fail_command;
449 } 455 }
@@ -455,7 +461,7 @@ qla24xx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
455 } 461 }
456 462
457 /* Close window on fcport/rport state-transitioning. */ 463 /* Close window on fcport/rport state-transitioning. */
458 if (!*(fc_port_t **)rport->dd_data) { 464 if (fcport->drport) {
459 cmd->result = DID_IMM_RETRY << 16; 465 cmd->result = DID_IMM_RETRY << 16;
460 goto qc24_fail_command; 466 goto qc24_fail_command;
461 } 467 }
@@ -617,6 +623,40 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha)
617 return (return_status); 623 return (return_status);
618} 624}
619 625
626void
627qla2x00_abort_fcport_cmds(fc_port_t *fcport)
628{
629 int cnt;
630 unsigned long flags;
631 srb_t *sp;
632 scsi_qla_host_t *ha = fcport->ha;
633 scsi_qla_host_t *pha = to_qla_parent(ha);
634
635 spin_lock_irqsave(&pha->hardware_lock, flags);
636 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
637 sp = pha->outstanding_cmds[cnt];
638 if (!sp)
639 continue;
640 if (sp->fcport != fcport)
641 continue;
642
643 spin_unlock_irqrestore(&pha->hardware_lock, flags);
644 if (ha->isp_ops->abort_command(ha, sp)) {
645 DEBUG2(qla_printk(KERN_WARNING, ha,
646 "Abort failed -- %lx\n", sp->cmd->serial_number));
647 } else {
648 if (qla2x00_eh_wait_on_command(ha, sp->cmd) !=
649 QLA_SUCCESS)
650 DEBUG2(qla_printk(KERN_WARNING, ha,
651 "Abort failed while waiting -- %lx\n",
652 sp->cmd->serial_number));
653
654 }
655 spin_lock_irqsave(&pha->hardware_lock, flags);
656 }
657 spin_unlock_irqrestore(&pha->hardware_lock, flags);
658}
659
620static void 660static void
621qla2x00_block_error_handler(struct scsi_cmnd *cmnd) 661qla2x00_block_error_handler(struct scsi_cmnd *cmnd)
622{ 662{
@@ -1073,7 +1113,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
1073 else 1113 else
1074 scsi_deactivate_tcq(sdev, ha->max_q_depth); 1114 scsi_deactivate_tcq(sdev, ha->max_q_depth);
1075 1115
1076 rport->dev_loss_tmo = ha->port_down_retry_count + 5; 1116 rport->dev_loss_tmo = ha->port_down_retry_count;
1077 1117
1078 return 0; 1118 return 0;
1079} 1119}
@@ -1629,9 +1669,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1629 } 1669 }
1630 host->can_queue = ha->request_q_length + 128; 1670 host->can_queue = ha->request_q_length + 128;
1631 1671
1632 /* load the F/W, read paramaters, and init the H/W */
1633 ha->instance = num_hosts;
1634
1635 mutex_init(&ha->vport_lock); 1672 mutex_init(&ha->vport_lock);
1636 init_completion(&ha->mbx_cmd_comp); 1673 init_completion(&ha->mbx_cmd_comp);
1637 complete(&ha->mbx_cmd_comp); 1674 complete(&ha->mbx_cmd_comp);
@@ -1679,7 +1716,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1679 1716
1680 host->this_id = 255; 1717 host->this_id = 255;
1681 host->cmd_per_lun = 3; 1718 host->cmd_per_lun = 3;
1682 host->unique_id = ha->instance; 1719 host->unique_id = host->host_no;
1683 host->max_cmd_len = MAX_CMDSZ; 1720 host->max_cmd_len = MAX_CMDSZ;
1684 host->max_channel = MAX_BUSES - 1; 1721 host->max_channel = MAX_BUSES - 1;
1685 host->max_lun = MAX_LUNS; 1722 host->max_lun = MAX_LUNS;
@@ -1700,8 +1737,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1700 ha->flags.init_done = 1; 1737 ha->flags.init_done = 1;
1701 ha->flags.online = 1; 1738 ha->flags.online = 1;
1702 1739
1703 num_hosts++;
1704
1705 ret = scsi_add_host(host, &pdev->dev); 1740 ret = scsi_add_host(host, &pdev->dev);
1706 if (ret) 1741 if (ret)
1707 goto probe_failed; 1742 goto probe_failed;
@@ -1813,27 +1848,21 @@ static inline void
1813qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport, 1848qla2x00_schedule_rport_del(struct scsi_qla_host *ha, fc_port_t *fcport,
1814 int defer) 1849 int defer)
1815{ 1850{
1816 unsigned long flags;
1817 struct fc_rport *rport; 1851 struct fc_rport *rport;
1852 scsi_qla_host_t *pha = to_qla_parent(ha);
1818 1853
1819 if (!fcport->rport) 1854 if (!fcport->rport)
1820 return; 1855 return;
1821 1856
1822 rport = fcport->rport; 1857 rport = fcport->rport;
1823 if (defer) { 1858 if (defer) {
1824 spin_lock_irqsave(&fcport->rport_lock, flags); 1859 spin_lock_irq(ha->host->host_lock);
1825 fcport->drport = rport; 1860 fcport->drport = rport;
1826 fcport->rport = NULL; 1861 spin_unlock_irq(ha->host->host_lock);
1827 *(fc_port_t **)rport->dd_data = NULL; 1862 set_bit(FCPORT_UPDATE_NEEDED, &pha->dpc_flags);
1828 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1863 qla2xxx_wake_dpc(pha);
1829 set_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags); 1864 } else
1830 } else {
1831 spin_lock_irqsave(&fcport->rport_lock, flags);
1832 fcport->rport = NULL;
1833 *(fc_port_t **)rport->dd_data = NULL;
1834 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1835 fc_remote_port_delete(rport); 1865 fc_remote_port_delete(rport);
1836 }
1837} 1866}
1838 1867
1839/* 1868/*
@@ -1903,7 +1932,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1903 scsi_qla_host_t *pha = to_qla_parent(ha); 1932 scsi_qla_host_t *pha = to_qla_parent(ha);
1904 1933
1905 list_for_each_entry(fcport, &pha->fcports, list) { 1934 list_for_each_entry(fcport, &pha->fcports, list) {
1906 if (ha->vp_idx != 0 && ha->vp_idx != fcport->vp_idx) 1935 if (ha->vp_idx != fcport->vp_idx)
1907 continue; 1936 continue;
1908 /* 1937 /*
1909 * No point in marking the device as lost, if the device is 1938 * No point in marking the device as lost, if the device is
@@ -1911,17 +1940,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
1911 */ 1940 */
1912 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) 1941 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
1913 continue; 1942 continue;
1914 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1943 if (atomic_read(&fcport->state) == FCS_ONLINE)
1915 if (defer) 1944 qla2x00_schedule_rport_del(ha, fcport, defer);
1916 qla2x00_schedule_rport_del(ha, fcport, defer);
1917 else if (ha->vp_idx == fcport->vp_idx)
1918 qla2x00_schedule_rport_del(ha, fcport, defer);
1919 }
1920 atomic_set(&fcport->state, FCS_DEVICE_LOST); 1945 atomic_set(&fcport->state, FCS_DEVICE_LOST);
1921 } 1946 }
1922
1923 if (defer)
1924 qla2xxx_wake_dpc(ha);
1925} 1947}
1926 1948
1927/* 1949/*
@@ -2156,7 +2178,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
2156static int 2178static int
2157qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked) 2179qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
2158{ 2180{
2159 unsigned long flags; 2181 unsigned long uninitialized_var(flags);
2160 scsi_qla_host_t *pha = to_qla_parent(ha); 2182 scsi_qla_host_t *pha = to_qla_parent(ha);
2161 2183
2162 if (!locked) 2184 if (!locked)
@@ -2313,8 +2335,10 @@ qla2x00_do_dpc(void *data)
2313 ha->host_no)); 2335 ha->host_no));
2314 } 2336 }
2315 2337
2316 if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) 2338 if (test_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags)) {
2317 qla2x00_update_fcports(ha); 2339 qla2x00_update_fcports(ha);
2340 clear_bit(FCPORT_UPDATE_NEEDED, &ha->dpc_flags);
2341 }
2318 2342
2319 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) && 2343 if (test_and_clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) &&
2320 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) { 2344 (!(test_and_set_bit(RESET_ACTIVE, &ha->dpc_flags)))) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 1728ab3ccb20..1bca74474935 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -869,11 +869,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
869 uint32_t i; 869 uint32_t i;
870 uint32_t *dwptr; 870 uint32_t *dwptr;
871 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 871 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
872 unsigned long flags;
873 872
874 ret = QLA_SUCCESS; 873 ret = QLA_SUCCESS;
875 874
876 spin_lock_irqsave(&ha->hardware_lock, flags);
877 /* Enable flash write. */ 875 /* Enable flash write. */
878 WRT_REG_DWORD(&reg->ctrl_status, 876 WRT_REG_DWORD(&reg->ctrl_status,
879 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE); 877 RD_REG_DWORD(&reg->ctrl_status) | CSRX_FLASH_ENABLE);
@@ -907,7 +905,6 @@ qla24xx_write_nvram_data(scsi_qla_host_t *ha, uint8_t *buf, uint32_t naddr,
907 WRT_REG_DWORD(&reg->ctrl_status, 905 WRT_REG_DWORD(&reg->ctrl_status,
908 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE); 906 RD_REG_DWORD(&reg->ctrl_status) & ~CSRX_FLASH_ENABLE);
909 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */ 907 RD_REG_DWORD(&reg->ctrl_status); /* PCI Posting. */
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911 908
912 return ret; 909 return ret;
913} 910}
@@ -2306,6 +2303,51 @@ qla24xx_get_flash_version(scsi_qla_host_t *ha, void *mbuf)
2306} 2303}
2307 2304
2308static int 2305static int
2306qla2xxx_is_vpd_valid(uint8_t *pos, uint8_t *end)
2307{
2308 if (pos >= end || *pos != 0x82)
2309 return 0;
2310
2311 pos += 3 + pos[1];
2312 if (pos >= end || *pos != 0x90)
2313 return 0;
2314
2315 pos += 3 + pos[1];
2316 if (pos >= end || *pos != 0x78)
2317 return 0;
2318
2319 return 1;
2320}
2321
2322int
2323qla2xxx_get_vpd_field(scsi_qla_host_t *ha, char *key, char *str, size_t size)
2324{
2325 uint8_t *pos = ha->vpd;
2326 uint8_t *end = pos + ha->vpd_size;
2327 int len = 0;
2328
2329 if (!IS_FWI2_CAPABLE(ha) || !qla2xxx_is_vpd_valid(pos, end))
2330 return 0;
2331
2332 while (pos < end && *pos != 0x78) {
2333 len = (*pos == 0x82) ? pos[1] : pos[2];
2334
2335 if (!strncmp(pos, key, strlen(key)))
2336 break;
2337
2338 if (*pos != 0x90 && *pos != 0x91)
2339 pos += len;
2340
2341 pos += 3;
2342 }
2343
2344 if (pos < end - len && *pos != 0x78)
2345 return snprintf(str, size, "%.*s", len, pos + 3);
2346
2347 return 0;
2348}
2349
2350static int
2309qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata) 2351qla2xxx_hw_event_store(scsi_qla_host_t *ha, uint32_t *fdata)
2310{ 2352{
2311 uint32_t d[2], faddr; 2353 uint32_t d[2], faddr;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index d058c8862b35..676c390db354 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.02.01-k4" 10#define QLA2XXX_VERSION "8.02.01-k6"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 2 13#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 5822dd595826..88bebb13bc52 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -46,6 +46,8 @@ MODULE_PARM_DESC(ql4xextended_error_logging,
46 46
47int ql4_mod_unload = 0; 47int ql4_mod_unload = 0;
48 48
49#define QL4_DEF_QDEPTH 32
50
49/* 51/*
50 * SCSI host template entry points 52 * SCSI host template entry points
51 */ 53 */
@@ -1387,7 +1389,7 @@ static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1387 1389
1388 sdev->hostdata = ddb; 1390 sdev->hostdata = ddb;
1389 sdev->tagged_supported = 1; 1391 sdev->tagged_supported = 1;
1390 scsi_activate_tcq(sdev, sdev->host->can_queue); 1392 scsi_activate_tcq(sdev, QL4_DEF_QDEPTH);
1391 return 0; 1393 return 0;
1392} 1394}
1393 1395
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 36c92f961e15..ee6be596503d 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -197,11 +197,43 @@ static void
197scsi_pool_free_command(struct scsi_host_cmd_pool *pool, 197scsi_pool_free_command(struct scsi_host_cmd_pool *pool,
198 struct scsi_cmnd *cmd) 198 struct scsi_cmnd *cmd)
199{ 199{
200 if (cmd->prot_sdb)
201 kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
202
200 kmem_cache_free(pool->sense_slab, cmd->sense_buffer); 203 kmem_cache_free(pool->sense_slab, cmd->sense_buffer);
201 kmem_cache_free(pool->cmd_slab, cmd); 204 kmem_cache_free(pool->cmd_slab, cmd);
202} 205}
203 206
204/** 207/**
208 * scsi_host_alloc_command - internal function to allocate command
209 * @shost: SCSI host whose pool to allocate from
210 * @gfp_mask: mask for the allocation
211 *
212 * Returns a fully allocated command with sense buffer and protection
213 * data buffer (where applicable) or NULL on failure
214 */
215static struct scsi_cmnd *
216scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)
217{
218 struct scsi_cmnd *cmd;
219
220 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask);
221 if (!cmd)
222 return NULL;
223
224 if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
225 cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask);
226
227 if (!cmd->prot_sdb) {
228 scsi_pool_free_command(shost->cmd_pool, cmd);
229 return NULL;
230 }
231 }
232
233 return cmd;
234}
235
236/**
205 * __scsi_get_command - Allocate a struct scsi_cmnd 237 * __scsi_get_command - Allocate a struct scsi_cmnd
206 * @shost: host to transmit command 238 * @shost: host to transmit command
207 * @gfp_mask: allocation mask 239 * @gfp_mask: allocation mask
@@ -214,7 +246,7 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
214 struct scsi_cmnd *cmd; 246 struct scsi_cmnd *cmd;
215 unsigned char *buf; 247 unsigned char *buf;
216 248
217 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 249 cmd = scsi_host_alloc_command(shost, gfp_mask);
218 250
219 if (unlikely(!cmd)) { 251 if (unlikely(!cmd)) {
220 unsigned long flags; 252 unsigned long flags;
@@ -457,7 +489,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)
457 /* 489 /*
458 * Get one backup command for this host. 490 * Get one backup command for this host.
459 */ 491 */
460 cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); 492 cmd = scsi_host_alloc_command(shost, gfp_mask);
461 if (!cmd) { 493 if (!cmd) {
462 scsi_put_host_cmd_pool(gfp_mask); 494 scsi_put_host_cmd_pool(gfp_mask);
463 shost->cmd_pool = NULL; 495 shost->cmd_pool = NULL;
@@ -902,11 +934,20 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
902 934
903 spin_lock_irqsave(sdev->request_queue->queue_lock, flags); 935 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
904 936
905 /* Check to see if the queue is managed by the block layer. 937 /*
906 * If it is, and we fail to adjust the depth, exit. */ 938 * Check to see if the queue is managed by the block layer.
907 if (blk_queue_tagged(sdev->request_queue) && 939 * If it is, and we fail to adjust the depth, exit.
908 blk_queue_resize_tags(sdev->request_queue, tags) != 0) 940 *
909 goto out; 941 * Do not resize the tag map if it is a host wide share bqt,
942 * because the size should be the hosts's can_queue. If there
943 * is more IO than the LLD's can_queue (so there are not enuogh
944 * tags) request_fn's host queue ready check will handle it.
945 */
946 if (!sdev->host->bqt) {
947 if (blk_queue_tagged(sdev->request_queue) &&
948 blk_queue_resize_tags(sdev->request_queue, tags) != 0)
949 goto out;
950 }
910 951
911 sdev->queue_depth = tags; 952 sdev->queue_depth = tags;
912 switch (tagged) { 953 switch (tagged) {
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 01d11a01ffbf..27c633f55794 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1753,7 +1753,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
1753 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC); 1753 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
1754 if (!open_devip) { 1754 if (!open_devip) {
1755 printk(KERN_ERR "%s: out of memory at line %d\n", 1755 printk(KERN_ERR "%s: out of memory at line %d\n",
1756 __FUNCTION__, __LINE__); 1756 __func__, __LINE__);
1757 return NULL; 1757 return NULL;
1758 } 1758 }
1759 } 1759 }
@@ -2656,7 +2656,7 @@ static int sdebug_add_adapter(void)
2656 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL); 2656 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
2657 if (NULL == sdbg_host) { 2657 if (NULL == sdbg_host) {
2658 printk(KERN_ERR "%s: out of memory at line %d\n", 2658 printk(KERN_ERR "%s: out of memory at line %d\n",
2659 __FUNCTION__, __LINE__); 2659 __func__, __LINE__);
2660 return -ENOMEM; 2660 return -ENOMEM;
2661 } 2661 }
2662 2662
@@ -2667,7 +2667,7 @@ static int sdebug_add_adapter(void)
2667 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL); 2667 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
2668 if (!sdbg_devinfo) { 2668 if (!sdbg_devinfo) {
2669 printk(KERN_ERR "%s: out of memory at line %d\n", 2669 printk(KERN_ERR "%s: out of memory at line %d\n",
2670 __FUNCTION__, __LINE__); 2670 __func__, __LINE__);
2671 error = -ENOMEM; 2671 error = -ENOMEM;
2672 goto clean; 2672 goto clean;
2673 } 2673 }
@@ -2987,7 +2987,7 @@ static int sdebug_driver_probe(struct device * dev)
2987 2987
2988 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host)); 2988 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
2989 if (NULL == hpnt) { 2989 if (NULL == hpnt) {
2990 printk(KERN_ERR "%s: scsi_register failed\n", __FUNCTION__); 2990 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
2991 error = -ENODEV; 2991 error = -ENODEV;
2992 return error; 2992 return error;
2993 } 2993 }
@@ -3002,7 +3002,7 @@ static int sdebug_driver_probe(struct device * dev)
3002 3002
3003 error = scsi_add_host(hpnt, &sdbg_host->dev); 3003 error = scsi_add_host(hpnt, &sdbg_host->dev);
3004 if (error) { 3004 if (error) {
3005 printk(KERN_ERR "%s: scsi_add_host failed\n", __FUNCTION__); 3005 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3006 error = -ENODEV; 3006 error = -ENODEV;
3007 scsi_host_put(hpnt); 3007 scsi_host_put(hpnt);
3008 } else 3008 } else
@@ -3021,7 +3021,7 @@ static int sdebug_driver_remove(struct device * dev)
3021 3021
3022 if (!sdbg_host) { 3022 if (!sdbg_host) {
3023 printk(KERN_ERR "%s: Unable to locate host info\n", 3023 printk(KERN_ERR "%s: Unable to locate host info\n",
3024 __FUNCTION__); 3024 __func__);
3025 return -ENODEV; 3025 return -ENODEV;
3026 } 3026 }
3027 3027
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index a235802f2981..4969e4ec75ea 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -272,7 +272,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
272 } 272 }
273 if (from_length > to_length) 273 if (from_length > to_length)
274 printk(KERN_WARNING "%s: %s string '%s' is too long\n", 274 printk(KERN_WARNING "%s: %s string '%s' is too long\n",
275 __FUNCTION__, name, from); 275 __func__, name, from);
276} 276}
277 277
278/** 278/**
@@ -298,7 +298,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
298 298
299 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL); 299 devinfo = kmalloc(sizeof(*devinfo), GFP_KERNEL);
300 if (!devinfo) { 300 if (!devinfo) {
301 printk(KERN_ERR "%s: no memory\n", __FUNCTION__); 301 printk(KERN_ERR "%s: no memory\n", __func__);
302 return -ENOMEM; 302 return -ENOMEM;
303 } 303 }
304 304
@@ -363,7 +363,7 @@ static int scsi_dev_info_list_add_str(char *dev_list)
363 strflags = strsep(&next, next_check); 363 strflags = strsep(&next, next_check);
364 if (!model || !strflags) { 364 if (!model || !strflags) {
365 printk(KERN_ERR "%s: bad dev info string '%s' '%s'" 365 printk(KERN_ERR "%s: bad dev info string '%s' '%s'"
366 " '%s'\n", __FUNCTION__, vendor, model, 366 " '%s'\n", __func__, vendor, model,
367 strflags); 367 strflags);
368 res = -EINVAL; 368 res = -EINVAL;
369 } else 369 } else
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 006a95916f72..880051c89bde 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -139,7 +139,7 @@ void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete; 139 scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
140 140
141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" 141 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
142 " %d, (%p)\n", __FUNCTION__, 142 " %d, (%p)\n", __func__,
143 scmd, timeout, complete)); 143 scmd, timeout, complete));
144 144
145 add_timer(&scmd->eh_timeout); 145 add_timer(&scmd->eh_timeout);
@@ -163,7 +163,7 @@ int scsi_delete_timer(struct scsi_cmnd *scmd)
163 rtn = del_timer(&scmd->eh_timeout); 163 rtn = del_timer(&scmd->eh_timeout);
164 164
165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," 165 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
166 " rtn: %d\n", __FUNCTION__, 166 " rtn: %d\n", __func__,
167 scmd, rtn)); 167 scmd, rtn));
168 168
169 scmd->eh_timeout.data = (unsigned long)NULL; 169 scmd->eh_timeout.data = (unsigned long)NULL;
@@ -233,7 +233,7 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
233 233
234 online = scsi_device_online(sdev); 234 online = scsi_device_online(sdev);
235 235
236 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, 236 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
237 online)); 237 online));
238 238
239 return online; 239 return online;
@@ -271,7 +271,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
271 SCSI_LOG_ERROR_RECOVERY(3, 271 SCSI_LOG_ERROR_RECOVERY(3,
272 sdev_printk(KERN_INFO, sdev, 272 sdev_printk(KERN_INFO, sdev,
273 "%s: cmds failed: %d, cancel: %d\n", 273 "%s: cmds failed: %d, cancel: %d\n",
274 __FUNCTION__, cmd_failed, 274 __func__, cmd_failed,
275 cmd_cancel)); 275 cmd_cancel));
276 cmd_cancel = 0; 276 cmd_cancel = 0;
277 cmd_failed = 0; 277 cmd_failed = 0;
@@ -344,6 +344,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
344 return /* soft_error */ SUCCESS; 344 return /* soft_error */ SUCCESS;
345 345
346 case ABORTED_COMMAND: 346 case ABORTED_COMMAND:
347 if (sshdr.asc == 0x10) /* DIF */
348 return SUCCESS;
349
347 return NEEDS_RETRY; 350 return NEEDS_RETRY;
348 case NOT_READY: 351 case NOT_READY:
349 case UNIT_ATTENTION: 352 case UNIT_ATTENTION:
@@ -470,7 +473,7 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
470 473
471 SCSI_LOG_ERROR_RECOVERY(3, 474 SCSI_LOG_ERROR_RECOVERY(3,
472 printk("%s scmd: %p result: %x\n", 475 printk("%s scmd: %p result: %x\n",
473 __FUNCTION__, scmd, scmd->result)); 476 __func__, scmd, scmd->result));
474 477
475 eh_action = scmd->device->host->eh_action; 478 eh_action = scmd->device->host->eh_action;
476 if (eh_action) 479 if (eh_action)
@@ -487,7 +490,7 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
487 int rtn; 490 int rtn;
488 491
489 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", 492 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
490 __FUNCTION__)); 493 __func__));
491 494
492 if (!scmd->device->host->hostt->eh_host_reset_handler) 495 if (!scmd->device->host->hostt->eh_host_reset_handler)
493 return FAILED; 496 return FAILED;
@@ -516,7 +519,7 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
516 int rtn; 519 int rtn;
517 520
518 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", 521 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
519 __FUNCTION__)); 522 __func__));
520 523
521 if (!scmd->device->host->hostt->eh_bus_reset_handler) 524 if (!scmd->device->host->hostt->eh_bus_reset_handler)
522 return FAILED; 525 return FAILED;
@@ -664,7 +667,10 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
664 ses->sdb = scmd->sdb; 667 ses->sdb = scmd->sdb;
665 ses->next_rq = scmd->request->next_rq; 668 ses->next_rq = scmd->request->next_rq;
666 ses->result = scmd->result; 669 ses->result = scmd->result;
670 ses->underflow = scmd->underflow;
671 ses->prot_op = scmd->prot_op;
667 672
673 scmd->prot_op = SCSI_PROT_NORMAL;
668 scmd->cmnd = ses->eh_cmnd; 674 scmd->cmnd = ses->eh_cmnd;
669 memset(scmd->cmnd, 0, BLK_MAX_CDB); 675 memset(scmd->cmnd, 0, BLK_MAX_CDB);
670 memset(&scmd->sdb, 0, sizeof(scmd->sdb)); 676 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
@@ -722,6 +728,8 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
722 scmd->sdb = ses->sdb; 728 scmd->sdb = ses->sdb;
723 scmd->request->next_rq = ses->next_rq; 729 scmd->request->next_rq = ses->next_rq;
724 scmd->result = ses->result; 730 scmd->result = ses->result;
731 scmd->underflow = ses->underflow;
732 scmd->prot_op = ses->prot_op;
725} 733}
726EXPORT_SYMBOL(scsi_eh_restore_cmnd); 734EXPORT_SYMBOL(scsi_eh_restore_cmnd);
727 735
@@ -766,7 +774,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
766 774
767 SCSI_LOG_ERROR_RECOVERY(3, 775 SCSI_LOG_ERROR_RECOVERY(3,
768 printk("%s: scmd: %p, timeleft: %ld\n", 776 printk("%s: scmd: %p, timeleft: %ld\n",
769 __FUNCTION__, scmd, timeleft)); 777 __func__, scmd, timeleft));
770 778
771 /* 779 /*
772 * If there is time left scsi_eh_done got called, and we will 780 * If there is time left scsi_eh_done got called, and we will
@@ -778,7 +786,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
778 rtn = scsi_eh_completed_normally(scmd); 786 rtn = scsi_eh_completed_normally(scmd);
779 SCSI_LOG_ERROR_RECOVERY(3, 787 SCSI_LOG_ERROR_RECOVERY(3,
780 printk("%s: scsi_eh_completed_normally %x\n", 788 printk("%s: scsi_eh_completed_normally %x\n",
781 __FUNCTION__, rtn)); 789 __func__, rtn));
782 790
783 switch (rtn) { 791 switch (rtn) {
784 case SUCCESS: 792 case SUCCESS:
@@ -913,7 +921,7 @@ retry_tur:
913 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0); 921 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6, SENSE_TIMEOUT, 0);
914 922
915 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", 923 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
916 __FUNCTION__, scmd, rtn)); 924 __func__, scmd, rtn));
917 925
918 switch (rtn) { 926 switch (rtn) {
919 case NEEDS_RETRY: 927 case NEEDS_RETRY:
@@ -1296,7 +1304,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1296 if (!scsi_device_online(scmd->device)) { 1304 if (!scsi_device_online(scmd->device)) {
1297 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" 1305 SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
1298 " as SUCCESS\n", 1306 " as SUCCESS\n",
1299 __FUNCTION__)); 1307 __func__));
1300 return SUCCESS; 1308 return SUCCESS;
1301 } 1309 }
1302 1310
@@ -1511,7 +1519,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
1511 * ioctls to queued block devices. 1519 * ioctls to queued block devices.
1512 */ 1520 */
1513 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", 1521 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n",
1514 __FUNCTION__)); 1522 __func__));
1515 1523
1516 spin_lock_irqsave(shost->host_lock, flags); 1524 spin_lock_irqsave(shost->host_lock, flags);
1517 if (scsi_host_set_state(shost, SHOST_RUNNING)) 1525 if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -1835,7 +1843,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
1835 */ 1843 */
1836 SCSI_LOG_ERROR_RECOVERY(3, 1844 SCSI_LOG_ERROR_RECOVERY(3,
1837 printk("%s: waking up host to restart after TMF\n", 1845 printk("%s: waking up host to restart after TMF\n",
1838 __FUNCTION__)); 1846 __func__));
1839 1847
1840 wake_up(&shost->host_wait); 1848 wake_up(&shost->host_wait);
1841 1849
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 88d1b5f44e59..ff5d56b3ee4d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -65,7 +65,7 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
65}; 65};
66#undef SP 66#undef SP
67 67
68static struct kmem_cache *scsi_sdb_cache; 68struct kmem_cache *scsi_sdb_cache;
69 69
70static void scsi_run_queue(struct request_queue *q); 70static void scsi_run_queue(struct request_queue *q);
71 71
@@ -787,6 +787,9 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
787 kmem_cache_free(scsi_sdb_cache, bidi_sdb); 787 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
788 cmd->request->next_rq->special = NULL; 788 cmd->request->next_rq->special = NULL;
789 } 789 }
790
791 if (scsi_prot_sg_count(cmd))
792 scsi_free_sgtable(cmd->prot_sdb);
790} 793}
791EXPORT_SYMBOL(scsi_release_buffers); 794EXPORT_SYMBOL(scsi_release_buffers);
792 795
@@ -947,9 +950,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
947 * 6-byte command. 950 * 6-byte command.
948 */ 951 */
949 scsi_requeue_command(q, cmd); 952 scsi_requeue_command(q, cmd);
950 return; 953 } else if (sshdr.asc == 0x10) /* DIX */
951 } else { 954 scsi_end_request(cmd, -EIO, this_count, 0);
955 else
952 scsi_end_request(cmd, -EIO, this_count, 1); 956 scsi_end_request(cmd, -EIO, this_count, 1);
957 return;
958 case ABORTED_COMMAND:
959 if (sshdr.asc == 0x10) { /* DIF */
960 scsi_end_request(cmd, -EIO, this_count, 0);
953 return; 961 return;
954 } 962 }
955 break; 963 break;
@@ -1072,6 +1080,26 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1072 goto err_exit; 1080 goto err_exit;
1073 } 1081 }
1074 1082
1083 if (blk_integrity_rq(cmd->request)) {
1084 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1085 int ivecs, count;
1086
1087 BUG_ON(prot_sdb == NULL);
1088 ivecs = blk_rq_count_integrity_sg(cmd->request);
1089
1090 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1091 error = BLKPREP_DEFER;
1092 goto err_exit;
1093 }
1094
1095 count = blk_rq_map_integrity_sg(cmd->request,
1096 prot_sdb->table.sgl);
1097 BUG_ON(unlikely(count > ivecs));
1098
1099 cmd->prot_sdb = prot_sdb;
1100 cmd->prot_sdb->table.nents = count;
1101 }
1102
1075 return BLKPREP_OK ; 1103 return BLKPREP_OK ;
1076 1104
1077err_exit: 1105err_exit:
@@ -1367,7 +1395,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1367 1395
1368 if (unlikely(cmd == NULL)) { 1396 if (unlikely(cmd == NULL)) {
1369 printk(KERN_CRIT "impossible request in %s.\n", 1397 printk(KERN_CRIT "impossible request in %s.\n",
1370 __FUNCTION__); 1398 __func__);
1371 BUG(); 1399 BUG();
1372 } 1400 }
1373 1401
@@ -1491,12 +1519,27 @@ static void scsi_request_fn(struct request_queue *q)
1491 printk(KERN_CRIT "impossible request in %s.\n" 1519 printk(KERN_CRIT "impossible request in %s.\n"
1492 "please mail a stack trace to " 1520 "please mail a stack trace to "
1493 "linux-scsi@vger.kernel.org\n", 1521 "linux-scsi@vger.kernel.org\n",
1494 __FUNCTION__); 1522 __func__);
1495 blk_dump_rq_flags(req, "foo"); 1523 blk_dump_rq_flags(req, "foo");
1496 BUG(); 1524 BUG();
1497 } 1525 }
1498 spin_lock(shost->host_lock); 1526 spin_lock(shost->host_lock);
1499 1527
1528 /*
1529 * We hit this when the driver is using a host wide
1530 * tag map. For device level tag maps the queue_depth check
1531 * in the device ready fn would prevent us from trying
1532 * to allocate a tag. Since the map is a shared host resource
1533 * we add the dev to the starved list so it eventually gets
1534 * a run when a tag is freed.
1535 */
1536 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1537 if (list_empty(&sdev->starved_entry))
1538 list_add_tail(&sdev->starved_entry,
1539 &shost->starved_list);
1540 goto not_ready;
1541 }
1542
1500 if (!scsi_host_queue_ready(q, shost, sdev)) 1543 if (!scsi_host_queue_ready(q, shost, sdev))
1501 goto not_ready; 1544 goto not_ready;
1502 if (scsi_target(sdev)->single_lun) { 1545 if (scsi_target(sdev)->single_lun) {
@@ -2486,7 +2529,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2486 if (unlikely(i == sg_count)) { 2529 if (unlikely(i == sg_count)) {
2487 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " 2530 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2488 "elements %d\n", 2531 "elements %d\n",
2489 __FUNCTION__, sg_len, *offset, sg_count); 2532 __func__, sg_len, *offset, sg_count);
2490 WARN_ON(1); 2533 WARN_ON(1);
2491 return NULL; 2534 return NULL;
2492 } 2535 }
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 370c78cc1cb5..ae7ed9a22662 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -55,7 +55,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
55 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || 55 if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) ||
56 (skb->len < nlh->nlmsg_len)) { 56 (skb->len < nlh->nlmsg_len)) {
57 printk(KERN_WARNING "%s: discarding partial skb\n", 57 printk(KERN_WARNING "%s: discarding partial skb\n",
58 __FUNCTION__); 58 __func__);
59 return; 59 return;
60 } 60 }
61 61
@@ -82,7 +82,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
82 82
83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) { 83 if (nlh->nlmsg_len < (sizeof(*nlh) + hdr->msglen)) {
84 printk(KERN_WARNING "%s: discarding partial message\n", 84 printk(KERN_WARNING "%s: discarding partial message\n",
85 __FUNCTION__); 85 __func__);
86 return; 86 return;
87 } 87 }
88 88
@@ -139,7 +139,7 @@ scsi_netlink_init(void)
139 error = netlink_register_notifier(&scsi_netlink_notifier); 139 error = netlink_register_notifier(&scsi_netlink_notifier);
140 if (error) { 140 if (error) {
141 printk(KERN_ERR "%s: register of event handler failed - %d\n", 141 printk(KERN_ERR "%s: register of event handler failed - %d\n",
142 __FUNCTION__, error); 142 __func__, error);
143 return; 143 return;
144 } 144 }
145 145
@@ -148,7 +148,7 @@ scsi_netlink_init(void)
148 THIS_MODULE); 148 THIS_MODULE);
149 if (!scsi_nl_sock) { 149 if (!scsi_nl_sock) {
150 printk(KERN_ERR "%s: register of recieve handler failed\n", 150 printk(KERN_ERR "%s: register of recieve handler failed\n",
151 __FUNCTION__); 151 __func__);
152 netlink_unregister_notifier(&scsi_netlink_notifier); 152 netlink_unregister_notifier(&scsi_netlink_notifier);
153 } 153 }
154 154
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b33e72516ef8..79f0f7511204 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -77,6 +77,7 @@ extern void scsi_exit_queue(void);
77struct request_queue; 77struct request_queue;
78struct request; 78struct request;
79extern int scsi_prep_fn(struct request_queue *, struct request *); 79extern int scsi_prep_fn(struct request_queue *, struct request *);
80extern struct kmem_cache *scsi_sdb_cache;
80 81
81/* scsi_proc.c */ 82/* scsi_proc.c */
82#ifdef CONFIG_SCSI_PROC_FS 83#ifdef CONFIG_SCSI_PROC_FS
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index e4a0d2f9b357..c6a904a45bf9 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -114,7 +114,7 @@ void scsi_proc_hostdir_add(struct scsi_host_template *sht)
114 sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); 114 sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi);
115 if (!sht->proc_dir) 115 if (!sht->proc_dir)
116 printk(KERN_ERR "%s: proc_mkdir failed for %s\n", 116 printk(KERN_ERR "%s: proc_mkdir failed for %s\n",
117 __FUNCTION__, sht->proc_name); 117 __func__, sht->proc_name);
118 else 118 else
119 sht->proc_dir->owner = sht->module; 119 sht->proc_dir->owner = sht->module;
120 } 120 }
@@ -157,7 +157,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost)
157 sht->proc_dir, proc_scsi_read, shost); 157 sht->proc_dir, proc_scsi_read, shost);
158 if (!p) { 158 if (!p) {
159 printk(KERN_ERR "%s: Failed to register host %d in" 159 printk(KERN_ERR "%s: Failed to register host %d in"
160 "%s\n", __FUNCTION__, shost->host_no, 160 "%s\n", __func__, shost->host_no,
161 sht->proc_name); 161 sht->proc_name);
162 return; 162 return;
163 } 163 }
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 196fe3af0d5e..84b4879cff11 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -318,7 +318,7 @@ out_device_destroy:
318 put_device(&sdev->sdev_gendev); 318 put_device(&sdev->sdev_gendev);
319out: 319out:
320 if (display_failure_msg) 320 if (display_failure_msg)
321 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 321 printk(ALLOC_FAILURE_MSG, __func__);
322 return NULL; 322 return NULL;
323} 323}
324 324
@@ -404,7 +404,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
404 404
405 starget = kzalloc(size, GFP_KERNEL); 405 starget = kzalloc(size, GFP_KERNEL);
406 if (!starget) { 406 if (!starget) {
407 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 407 printk(KERN_ERR "%s: allocation failure\n", __func__);
408 return NULL; 408 return NULL;
409 } 409 }
410 dev = &starget->dev; 410 dev = &starget->dev;
@@ -1337,7 +1337,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
1337 lun_data = kmalloc(length, GFP_ATOMIC | 1337 lun_data = kmalloc(length, GFP_ATOMIC |
1338 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); 1338 (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0));
1339 if (!lun_data) { 1339 if (!lun_data) {
1340 printk(ALLOC_FAILURE_MSG, __FUNCTION__); 1340 printk(ALLOC_FAILURE_MSG, __func__);
1341 goto out; 1341 goto out;
1342 } 1342 }
1343 1343
@@ -1649,7 +1649,7 @@ int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1649{ 1649{
1650 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1650 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1651 "%s: <%u:%u:%u>\n", 1651 "%s: <%u:%u:%u>\n",
1652 __FUNCTION__, channel, id, lun)); 1652 __func__, channel, id, lun));
1653 1653
1654 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1654 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1655 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1655 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
@@ -1703,7 +1703,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1703 return NULL; 1703 return NULL;
1704 1704
1705 if (shost->async_scan) { 1705 if (shost->async_scan) {
1706 printk("%s called twice for host %d", __FUNCTION__, 1706 printk("%s called twice for host %d", __func__,
1707 shost->host_no); 1707 shost->host_no);
1708 dump_stack(); 1708 dump_stack();
1709 return NULL; 1709 return NULL;
@@ -1757,9 +1757,10 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
1757 mutex_lock(&shost->scan_mutex); 1757 mutex_lock(&shost->scan_mutex);
1758 1758
1759 if (!shost->async_scan) { 1759 if (!shost->async_scan) {
1760 printk("%s called twice for host %d", __FUNCTION__, 1760 printk("%s called twice for host %d", __func__,
1761 shost->host_no); 1761 shost->host_no);
1762 dump_stack(); 1762 dump_stack();
1763 mutex_unlock(&shost->scan_mutex);
1763 return; 1764 return;
1764 } 1765 }
1765 1766
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b6e561059779..ab3c71869be5 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -249,6 +249,8 @@ shost_rd_attr(cmd_per_lun, "%hd\n");
249shost_rd_attr(can_queue, "%hd\n"); 249shost_rd_attr(can_queue, "%hd\n");
250shost_rd_attr(sg_tablesize, "%hu\n"); 250shost_rd_attr(sg_tablesize, "%hu\n");
251shost_rd_attr(unchecked_isa_dma, "%d\n"); 251shost_rd_attr(unchecked_isa_dma, "%d\n");
252shost_rd_attr(prot_capabilities, "%u\n");
253shost_rd_attr(prot_guard_type, "%hd\n");
252shost_rd_attr2(proc_name, hostt->proc_name, "%s\n"); 254shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
253 255
254static struct attribute *scsi_sysfs_shost_attrs[] = { 256static struct attribute *scsi_sysfs_shost_attrs[] = {
@@ -263,6 +265,8 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
263 &dev_attr_hstate.attr, 265 &dev_attr_hstate.attr,
264 &dev_attr_supported_mode.attr, 266 &dev_attr_supported_mode.attr,
265 &dev_attr_active_mode.attr, 267 &dev_attr_active_mode.attr,
268 &dev_attr_prot_capabilities.attr,
269 &dev_attr_prot_guard_type.attr,
266 NULL 270 NULL
267}; 271};
268 272
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
index cb92888948f9..fe4c62177f78 100644
--- a/drivers/scsi/scsi_tgt_priv.h
+++ b/drivers/scsi/scsi_tgt_priv.h
@@ -6,7 +6,7 @@ struct task_struct;
6/* tmp - will replace with SCSI logging stuff */ 6/* tmp - will replace with SCSI logging stuff */
7#define eprintk(fmt, args...) \ 7#define eprintk(fmt, args...) \
8do { \ 8do { \
9 printk("%s(%d) " fmt, __FUNCTION__, __LINE__, ##args); \ 9 printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
10} while (0) 10} while (0)
11 11
12#define dprintk(fmt, args...) 12#define dprintk(fmt, args...)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a272b9a2c869..56823fd1fb84 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -571,7 +571,7 @@ send_fail:
571 name = get_fc_host_event_code_name(event_code); 571 name = get_fc_host_event_code_name(event_code);
572 printk(KERN_WARNING 572 printk(KERN_WARNING
573 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n", 573 "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
574 __FUNCTION__, shost->host_no, 574 __func__, shost->host_no,
575 (name) ? name : "<unknown>", event_data, err); 575 (name) ? name : "<unknown>", event_data, err);
576 return; 576 return;
577} 577}
@@ -644,7 +644,7 @@ send_vendor_fail_skb:
644send_vendor_fail: 644send_vendor_fail:
645 printk(KERN_WARNING 645 printk(KERN_WARNING
646 "%s: Dropped Event : host %d vendor_unique - err %d\n", 646 "%s: Dropped Event : host %d vendor_unique - err %d\n",
647 __FUNCTION__, shost->host_no, err); 647 __func__, shost->host_no, err);
648 return; 648 return;
649} 649}
650EXPORT_SYMBOL(fc_host_post_vendor_event); 650EXPORT_SYMBOL(fc_host_post_vendor_event);
@@ -2464,7 +2464,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
2464 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); 2464 size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
2465 rport = kzalloc(size, GFP_KERNEL); 2465 rport = kzalloc(size, GFP_KERNEL);
2466 if (unlikely(!rport)) { 2466 if (unlikely(!rport)) {
2467 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 2467 printk(KERN_ERR "%s: allocation failure\n", __func__);
2468 return NULL; 2468 return NULL;
2469 } 2469 }
2470 2470
@@ -3137,7 +3137,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3137 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); 3137 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3138 vport = kzalloc(size, GFP_KERNEL); 3138 vport = kzalloc(size, GFP_KERNEL);
3139 if (unlikely(!vport)) { 3139 if (unlikely(!vport)) {
3140 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__); 3140 printk(KERN_ERR "%s: allocation failure\n", __func__);
3141 return -ENOMEM; 3141 return -ENOMEM;
3142 } 3142 }
3143 3143
@@ -3201,7 +3201,7 @@ fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3201 printk(KERN_ERR 3201 printk(KERN_ERR
3202 "%s: Cannot create vport symlinks for " 3202 "%s: Cannot create vport symlinks for "
3203 "%s, err=%d\n", 3203 "%s, err=%d\n",
3204 __FUNCTION__, dev->bus_id, error); 3204 __func__, dev->bus_id, error);
3205 } 3205 }
3206 spin_lock_irqsave(shost->host_lock, flags); 3206 spin_lock_irqsave(shost->host_lock, flags);
3207 vport->flags &= ~FC_VPORT_CREATING; 3207 vport->flags &= ~FC_VPORT_CREATING;
@@ -3314,7 +3314,7 @@ fc_vport_sched_delete(struct work_struct *work)
3314 if (stat) 3314 if (stat)
3315 dev_printk(KERN_ERR, vport->dev.parent, 3315 dev_printk(KERN_ERR, vport->dev.parent,
3316 "%s: %s could not be deleted created via " 3316 "%s: %s could not be deleted created via "
3317 "shost%d channel %d - error %d\n", __FUNCTION__, 3317 "shost%d channel %d - error %d\n", __func__,
3318 vport->dev.bus_id, vport->shost->host_no, 3318 vport->dev.bus_id, vport->shost->host_no,
3319 vport->channel, stat); 3319 vport->channel, stat);
3320} 3320}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f4461d35ffb9..366609386be1 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -779,7 +779,7 @@ static void sas_port_create_link(struct sas_port *port,
779 return; 779 return;
780err: 780err:
781 printk(KERN_ERR "%s: Cannot create port links, err=%d\n", 781 printk(KERN_ERR "%s: Cannot create port links, err=%d\n",
782 __FUNCTION__, res); 782 __func__, res);
783} 783}
784 784
785static void sas_port_delete_link(struct sas_port *port, 785static void sas_port_delete_link(struct sas_port *port,
@@ -1029,7 +1029,7 @@ void sas_port_mark_backlink(struct sas_port *port)
1029 return; 1029 return;
1030err: 1030err:
1031 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", 1031 printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n",
1032 __FUNCTION__, res); 1032 __func__, res);
1033 1033
1034} 1034}
1035EXPORT_SYMBOL(sas_port_mark_backlink); 1035EXPORT_SYMBOL(sas_port_mark_backlink);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 0c63947d8a9d..e5e7d7856454 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -99,8 +99,7 @@ static void scsi_disk_release(struct device *cdev);
99static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 99static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
100static void sd_print_result(struct scsi_disk *, int); 100static void sd_print_result(struct scsi_disk *, int);
101 101
102static DEFINE_IDR(sd_index_idr); 102static DEFINE_IDA(sd_index_ida);
103static DEFINE_SPINLOCK(sd_index_lock);
104 103
105/* This semaphore is used to mediate the 0->1 reference get in the 104/* This semaphore is used to mediate the 0->1 reference get in the
106 * face of object destruction (i.e. we can't allow a get on an 105 * face of object destruction (i.e. we can't allow a get on an
@@ -234,6 +233,24 @@ sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
234 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart); 233 return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
235} 234}
236 235
236static ssize_t
237sd_show_protection_type(struct device *dev, struct device_attribute *attr,
238 char *buf)
239{
240 struct scsi_disk *sdkp = to_scsi_disk(dev);
241
242 return snprintf(buf, 20, "%u\n", sdkp->protection_type);
243}
244
245static ssize_t
246sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
247 char *buf)
248{
249 struct scsi_disk *sdkp = to_scsi_disk(dev);
250
251 return snprintf(buf, 20, "%u\n", sdkp->ATO);
252}
253
237static struct device_attribute sd_disk_attrs[] = { 254static struct device_attribute sd_disk_attrs[] = {
238 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 255 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
239 sd_store_cache_type), 256 sd_store_cache_type),
@@ -242,6 +259,8 @@ static struct device_attribute sd_disk_attrs[] = {
242 sd_store_allow_restart), 259 sd_store_allow_restart),
243 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, 260 __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
244 sd_store_manage_start_stop), 261 sd_store_manage_start_stop),
262 __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
263 __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
245 __ATTR_NULL, 264 __ATTR_NULL,
246}; 265};
247 266
@@ -354,7 +373,9 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
354 struct scsi_cmnd *SCpnt; 373 struct scsi_cmnd *SCpnt;
355 struct scsi_device *sdp = q->queuedata; 374 struct scsi_device *sdp = q->queuedata;
356 struct gendisk *disk = rq->rq_disk; 375 struct gendisk *disk = rq->rq_disk;
376 struct scsi_disk *sdkp;
357 sector_t block = rq->sector; 377 sector_t block = rq->sector;
378 sector_t threshold;
358 unsigned int this_count = rq->nr_sectors; 379 unsigned int this_count = rq->nr_sectors;
359 unsigned int timeout = sdp->timeout; 380 unsigned int timeout = sdp->timeout;
360 int ret; 381 int ret;
@@ -370,6 +391,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
370 if (ret != BLKPREP_OK) 391 if (ret != BLKPREP_OK)
371 goto out; 392 goto out;
372 SCpnt = rq->special; 393 SCpnt = rq->special;
394 sdkp = scsi_disk(disk);
373 395
374 /* from here on until we're complete, any goto out 396 /* from here on until we're complete, any goto out
375 * is used for a killable error condition */ 397 * is used for a killable error condition */
@@ -401,13 +423,21 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
401 } 423 }
402 424
403 /* 425 /*
404 * Some devices (some sdcards for one) don't like it if the 426 * Some SD card readers can't handle multi-sector accesses which touch
405 * last sector gets read in a larger then 1 sector read. 427 * the last one or two hardware sectors. Split accesses as needed.
406 */ 428 */
407 if (unlikely(sdp->last_sector_bug && 429 threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS *
408 rq->nr_sectors > sdp->sector_size / 512 && 430 (sdp->sector_size / 512);
409 block + this_count == get_capacity(disk))) 431
410 this_count -= sdp->sector_size / 512; 432 if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) {
433 if (block < threshold) {
434 /* Access up to the threshold but not beyond */
435 this_count = threshold - block;
436 } else {
437 /* Access only a single hardware sector */
438 this_count = sdp->sector_size / 512;
439 }
440 }
411 441
412 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n", 442 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n",
413 (unsigned long long)block)); 443 (unsigned long long)block));
@@ -459,6 +489,11 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
459 } 489 }
460 SCpnt->cmnd[0] = WRITE_6; 490 SCpnt->cmnd[0] = WRITE_6;
461 SCpnt->sc_data_direction = DMA_TO_DEVICE; 491 SCpnt->sc_data_direction = DMA_TO_DEVICE;
492
493 if (blk_integrity_rq(rq) &&
494 sd_dif_prepare(rq, block, sdp->sector_size) == -EIO)
495 goto out;
496
462 } else if (rq_data_dir(rq) == READ) { 497 } else if (rq_data_dir(rq) == READ) {
463 SCpnt->cmnd[0] = READ_6; 498 SCpnt->cmnd[0] = READ_6;
464 SCpnt->sc_data_direction = DMA_FROM_DEVICE; 499 SCpnt->sc_data_direction = DMA_FROM_DEVICE;
@@ -473,8 +508,12 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
473 "writing" : "reading", this_count, 508 "writing" : "reading", this_count,
474 rq->nr_sectors)); 509 rq->nr_sectors));
475 510
476 SCpnt->cmnd[1] = 0; 511 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */
477 512 if (scsi_host_dif_capable(sdp->host, sdkp->protection_type))
513 SCpnt->cmnd[1] = 1 << 5;
514 else
515 SCpnt->cmnd[1] = 0;
516
478 if (block > 0xffffffff) { 517 if (block > 0xffffffff) {
479 SCpnt->cmnd[0] += READ_16 - READ_6; 518 SCpnt->cmnd[0] += READ_16 - READ_6;
480 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0; 519 SCpnt->cmnd[1] |= blk_fua_rq(rq) ? 0x8 : 0;
@@ -492,6 +531,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
492 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; 531 SCpnt->cmnd[13] = (unsigned char) this_count & 0xff;
493 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; 532 SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0;
494 } else if ((this_count > 0xff) || (block > 0x1fffff) || 533 } else if ((this_count > 0xff) || (block > 0x1fffff) ||
534 scsi_device_protection(SCpnt->device) ||
495 SCpnt->device->use_10_for_rw) { 535 SCpnt->device->use_10_for_rw) {
496 if (this_count > 0xffff) 536 if (this_count > 0xffff)
497 this_count = 0xffff; 537 this_count = 0xffff;
@@ -526,6 +566,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
526 } 566 }
527 SCpnt->sdb.length = this_count * sdp->sector_size; 567 SCpnt->sdb.length = this_count * sdp->sector_size;
528 568
569 /* If DIF or DIX is enabled, tell HBA how to handle request */
570 if (sdkp->protection_type || scsi_prot_sg_count(SCpnt))
571 sd_dif_op(SCpnt, sdkp->protection_type, scsi_prot_sg_count(SCpnt));
572
529 /* 573 /*
530 * We shouldn't disconnect in the middle of a sector, so with a dumb 574 * We shouldn't disconnect in the middle of a sector, so with a dumb
531 * host adapter, it's safe to assume that we can at least transfer 575 * host adapter, it's safe to assume that we can at least transfer
@@ -920,6 +964,48 @@ static struct block_device_operations sd_fops = {
920 .revalidate_disk = sd_revalidate_disk, 964 .revalidate_disk = sd_revalidate_disk,
921}; 965};
922 966
967static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
968{
969 u64 start_lba = scmd->request->sector;
970 u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512);
971 u64 bad_lba;
972 int info_valid;
973
974 if (!blk_fs_request(scmd->request))
975 return 0;
976
977 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer,
978 SCSI_SENSE_BUFFERSIZE,
979 &bad_lba);
980 if (!info_valid)
981 return 0;
982
983 if (scsi_bufflen(scmd) <= scmd->device->sector_size)
984 return 0;
985
986 if (scmd->device->sector_size < 512) {
987 /* only legitimate sector_size here is 256 */
988 start_lba <<= 1;
989 end_lba <<= 1;
990 } else {
991 /* be careful ... don't want any overflows */
992 u64 factor = scmd->device->sector_size / 512;
993 do_div(start_lba, factor);
994 do_div(end_lba, factor);
995 }
996
997 /* The bad lba was reported incorrectly, we have no idea where
998 * the error is.
999 */
1000 if (bad_lba < start_lba || bad_lba >= end_lba)
1001 return 0;
1002
1003 /* This computation should always be done in terms of
1004 * the resolution of the device's medium.
1005 */
1006 return (bad_lba - start_lba) * scmd->device->sector_size;
1007}
1008
923/** 1009/**
924 * sd_done - bottom half handler: called when the lower level 1010 * sd_done - bottom half handler: called when the lower level
925 * driver has completed (successfully or otherwise) a scsi command. 1011 * driver has completed (successfully or otherwise) a scsi command.
@@ -930,15 +1016,10 @@ static struct block_device_operations sd_fops = {
930static int sd_done(struct scsi_cmnd *SCpnt) 1016static int sd_done(struct scsi_cmnd *SCpnt)
931{ 1017{
932 int result = SCpnt->result; 1018 int result = SCpnt->result;
933 unsigned int xfer_size = scsi_bufflen(SCpnt); 1019 unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
934 unsigned int good_bytes = result ? 0 : xfer_size;
935 u64 start_lba = SCpnt->request->sector;
936 u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
937 u64 bad_lba;
938 struct scsi_sense_hdr sshdr; 1020 struct scsi_sense_hdr sshdr;
939 int sense_valid = 0; 1021 int sense_valid = 0;
940 int sense_deferred = 0; 1022 int sense_deferred = 0;
941 int info_valid;
942 1023
943 if (result) { 1024 if (result) {
944 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); 1025 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -963,36 +1044,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
963 switch (sshdr.sense_key) { 1044 switch (sshdr.sense_key) {
964 case HARDWARE_ERROR: 1045 case HARDWARE_ERROR:
965 case MEDIUM_ERROR: 1046 case MEDIUM_ERROR:
966 if (!blk_fs_request(SCpnt->request)) 1047 good_bytes = sd_completed_bytes(SCpnt);
967 goto out;
968 info_valid = scsi_get_sense_info_fld(SCpnt->sense_buffer,
969 SCSI_SENSE_BUFFERSIZE,
970 &bad_lba);
971 if (!info_valid)
972 goto out;
973 if (xfer_size <= SCpnt->device->sector_size)
974 goto out;
975 if (SCpnt->device->sector_size < 512) {
976 /* only legitimate sector_size here is 256 */
977 start_lba <<= 1;
978 end_lba <<= 1;
979 } else {
980 /* be careful ... don't want any overflows */
981 u64 factor = SCpnt->device->sector_size / 512;
982 do_div(start_lba, factor);
983 do_div(end_lba, factor);
984 }
985
986 if (bad_lba < start_lba || bad_lba >= end_lba)
987 /* the bad lba was reported incorrectly, we have
988 * no idea where the error is
989 */
990 goto out;
991
992 /* This computation should always be done in terms of
993 * the resolution of the device's medium.
994 */
995 good_bytes = (bad_lba - start_lba)*SCpnt->device->sector_size;
996 break; 1048 break;
997 case RECOVERED_ERROR: 1049 case RECOVERED_ERROR:
998 case NO_SENSE: 1050 case NO_SENSE:
@@ -1002,10 +1054,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1002 scsi_print_sense("sd", SCpnt); 1054 scsi_print_sense("sd", SCpnt);
1003 SCpnt->result = 0; 1055 SCpnt->result = 0;
1004 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1056 memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1005 good_bytes = xfer_size; 1057 good_bytes = scsi_bufflen(SCpnt);
1058 break;
1059 case ABORTED_COMMAND:
1060 if (sshdr.asc == 0x10) { /* DIF: Disk detected corruption */
1061 scsi_print_result(SCpnt);
1062 scsi_print_sense("sd", SCpnt);
1063 good_bytes = sd_completed_bytes(SCpnt);
1064 }
1006 break; 1065 break;
1007 case ILLEGAL_REQUEST: 1066 case ILLEGAL_REQUEST:
1008 if (SCpnt->device->use_10_for_rw && 1067 if (sshdr.asc == 0x10) { /* DIX: HBA detected corruption */
1068 scsi_print_result(SCpnt);
1069 scsi_print_sense("sd", SCpnt);
1070 good_bytes = sd_completed_bytes(SCpnt);
1071 }
1072 if (!scsi_device_protection(SCpnt->device) &&
1073 SCpnt->device->use_10_for_rw &&
1009 (SCpnt->cmnd[0] == READ_10 || 1074 (SCpnt->cmnd[0] == READ_10 ||
1010 SCpnt->cmnd[0] == WRITE_10)) 1075 SCpnt->cmnd[0] == WRITE_10))
1011 SCpnt->device->use_10_for_rw = 0; 1076 SCpnt->device->use_10_for_rw = 0;
@@ -1018,6 +1083,9 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1018 break; 1083 break;
1019 } 1084 }
1020 out: 1085 out:
1086 if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
1087 sd_dif_complete(SCpnt, good_bytes);
1088
1021 return good_bytes; 1089 return good_bytes;
1022} 1090}
1023 1091
@@ -1165,6 +1233,49 @@ sd_spinup_disk(struct scsi_disk *sdkp)
1165 } 1233 }
1166} 1234}
1167 1235
1236
1237/*
1238 * Determine whether disk supports Data Integrity Field.
1239 */
1240void sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
1241{
1242 struct scsi_device *sdp = sdkp->device;
1243 u8 type;
1244
1245 if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
1246 type = 0;
1247 else
1248 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1249
1250 switch (type) {
1251 case SD_DIF_TYPE0_PROTECTION:
1252 sdkp->protection_type = 0;
1253 break;
1254
1255 case SD_DIF_TYPE1_PROTECTION:
1256 case SD_DIF_TYPE3_PROTECTION:
1257 sdkp->protection_type = type;
1258 break;
1259
1260 case SD_DIF_TYPE2_PROTECTION:
1261 sd_printk(KERN_ERR, sdkp, "formatted with DIF Type 2 " \
1262 "protection which is currently unsupported. " \
1263 "Disabling disk!\n");
1264 goto disable;
1265
1266 default:
1267 sd_printk(KERN_ERR, sdkp, "formatted with unknown " \
1268 "protection type %d. Disabling disk!\n", type);
1269 goto disable;
1270 }
1271
1272 return;
1273
1274disable:
1275 sdkp->protection_type = 0;
1276 sdkp->capacity = 0;
1277}
1278
1168/* 1279/*
1169 * read disk capacity 1280 * read disk capacity
1170 */ 1281 */
@@ -1174,7 +1285,8 @@ sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
1174 unsigned char cmd[16]; 1285 unsigned char cmd[16];
1175 int the_result, retries; 1286 int the_result, retries;
1176 int sector_size = 0; 1287 int sector_size = 0;
1177 int longrc = 0; 1288 /* Force READ CAPACITY(16) when PROTECT=1 */
1289 int longrc = scsi_device_protection(sdkp->device) ? 1 : 0;
1178 struct scsi_sense_hdr sshdr; 1290 struct scsi_sense_hdr sshdr;
1179 int sense_valid = 0; 1291 int sense_valid = 0;
1180 struct scsi_device *sdp = sdkp->device; 1292 struct scsi_device *sdp = sdkp->device;
@@ -1186,8 +1298,8 @@ repeat:
1186 memset((void *) cmd, 0, 16); 1298 memset((void *) cmd, 0, 16);
1187 cmd[0] = SERVICE_ACTION_IN; 1299 cmd[0] = SERVICE_ACTION_IN;
1188 cmd[1] = SAI_READ_CAPACITY_16; 1300 cmd[1] = SAI_READ_CAPACITY_16;
1189 cmd[13] = 12; 1301 cmd[13] = 13;
1190 memset((void *) buffer, 0, 12); 1302 memset((void *) buffer, 0, 13);
1191 } else { 1303 } else {
1192 cmd[0] = READ_CAPACITY; 1304 cmd[0] = READ_CAPACITY;
1193 memset((void *) &cmd[1], 0, 9); 1305 memset((void *) &cmd[1], 0, 9);
@@ -1195,7 +1307,7 @@ repeat:
1195 } 1307 }
1196 1308
1197 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE, 1309 the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
1198 buffer, longrc ? 12 : 8, &sshdr, 1310 buffer, longrc ? 13 : 8, &sshdr,
1199 SD_TIMEOUT, SD_MAX_RETRIES); 1311 SD_TIMEOUT, SD_MAX_RETRIES);
1200 1312
1201 if (media_not_present(sdkp, &sshdr)) 1313 if (media_not_present(sdkp, &sshdr))
@@ -1270,6 +1382,8 @@ repeat:
1270 1382
1271 sector_size = (buffer[8] << 24) | 1383 sector_size = (buffer[8] << 24) |
1272 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11]; 1384 (buffer[9] << 16) | (buffer[10] << 8) | buffer[11];
1385
1386 sd_read_protection_type(sdkp, buffer);
1273 } 1387 }
1274 1388
1275 /* Some devices return the total number of sectors, not the 1389 /* Some devices return the total number of sectors, not the
@@ -1531,6 +1645,52 @@ defaults:
1531 sdkp->DPOFUA = 0; 1645 sdkp->DPOFUA = 0;
1532} 1646}
1533 1647
1648/*
1649 * The ATO bit indicates whether the DIF application tag is available
1650 * for use by the operating system.
1651 */
1652void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
1653{
1654 int res, offset;
1655 struct scsi_device *sdp = sdkp->device;
1656 struct scsi_mode_data data;
1657 struct scsi_sense_hdr sshdr;
1658
1659 if (sdp->type != TYPE_DISK)
1660 return;
1661
1662 if (sdkp->protection_type == 0)
1663 return;
1664
1665 res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
1666 SD_MAX_RETRIES, &data, &sshdr);
1667
1668 if (!scsi_status_is_good(res) || !data.header_length ||
1669 data.length < 6) {
1670 sd_printk(KERN_WARNING, sdkp,
1671 "getting Control mode page failed, assume no ATO\n");
1672
1673 if (scsi_sense_valid(&sshdr))
1674 sd_print_sense_hdr(sdkp, &sshdr);
1675
1676 return;
1677 }
1678
1679 offset = data.header_length + data.block_descriptor_length;
1680
1681 if ((buffer[offset] & 0x3f) != 0x0a) {
1682 sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
1683 return;
1684 }
1685
1686 if ((buffer[offset + 5] & 0x80) == 0)
1687 return;
1688
1689 sdkp->ATO = 1;
1690
1691 return;
1692}
1693
1534/** 1694/**
1535 * sd_revalidate_disk - called the first time a new disk is seen, 1695 * sd_revalidate_disk - called the first time a new disk is seen,
1536 * performs disk spin up, read_capacity, etc. 1696 * performs disk spin up, read_capacity, etc.
@@ -1567,6 +1727,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1567 sdkp->write_prot = 0; 1727 sdkp->write_prot = 0;
1568 sdkp->WCE = 0; 1728 sdkp->WCE = 0;
1569 sdkp->RCD = 0; 1729 sdkp->RCD = 0;
1730 sdkp->ATO = 0;
1570 1731
1571 sd_spinup_disk(sdkp); 1732 sd_spinup_disk(sdkp);
1572 1733
@@ -1578,6 +1739,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1578 sd_read_capacity(sdkp, buffer); 1739 sd_read_capacity(sdkp, buffer);
1579 sd_read_write_protect_flag(sdkp, buffer); 1740 sd_read_write_protect_flag(sdkp, buffer);
1580 sd_read_cache_type(sdkp, buffer); 1741 sd_read_cache_type(sdkp, buffer);
1742 sd_read_app_tag_own(sdkp, buffer);
1581 } 1743 }
1582 1744
1583 /* 1745 /*
@@ -1643,18 +1805,20 @@ static int sd_probe(struct device *dev)
1643 if (!gd) 1805 if (!gd)
1644 goto out_free; 1806 goto out_free;
1645 1807
1646 if (!idr_pre_get(&sd_index_idr, GFP_KERNEL)) 1808 do {
1647 goto out_put; 1809 if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
1810 goto out_put;
1648 1811
1649 spin_lock(&sd_index_lock); 1812 error = ida_get_new(&sd_index_ida, &index);
1650 error = idr_get_new(&sd_index_idr, NULL, &index); 1813 } while (error == -EAGAIN);
1651 spin_unlock(&sd_index_lock);
1652 1814
1653 if (index >= SD_MAX_DISKS)
1654 error = -EBUSY;
1655 if (error) 1815 if (error)
1656 goto out_put; 1816 goto out_put;
1657 1817
1818 error = -EBUSY;
1819 if (index >= SD_MAX_DISKS)
1820 goto out_free_index;
1821
1658 sdkp->device = sdp; 1822 sdkp->device = sdp;
1659 sdkp->driver = &sd_template; 1823 sdkp->driver = &sd_template;
1660 sdkp->disk = gd; 1824 sdkp->disk = gd;
@@ -1675,7 +1839,7 @@ static int sd_probe(struct device *dev)
1675 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE); 1839 strncpy(sdkp->dev.bus_id, sdp->sdev_gendev.bus_id, BUS_ID_SIZE);
1676 1840
1677 if (device_add(&sdkp->dev)) 1841 if (device_add(&sdkp->dev))
1678 goto out_put; 1842 goto out_free_index;
1679 1843
1680 get_device(&sdp->sdev_gendev); 1844 get_device(&sdp->sdev_gendev);
1681 1845
@@ -1711,12 +1875,15 @@ static int sd_probe(struct device *dev)
1711 1875
1712 dev_set_drvdata(dev, sdkp); 1876 dev_set_drvdata(dev, sdkp);
1713 add_disk(gd); 1877 add_disk(gd);
1878 sd_dif_config_host(sdkp);
1714 1879
1715 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", 1880 sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
1716 sdp->removable ? "removable " : ""); 1881 sdp->removable ? "removable " : "");
1717 1882
1718 return 0; 1883 return 0;
1719 1884
1885 out_free_index:
1886 ida_remove(&sd_index_ida, index);
1720 out_put: 1887 out_put:
1721 put_disk(gd); 1888 put_disk(gd);
1722 out_free: 1889 out_free:
@@ -1766,9 +1933,7 @@ static void scsi_disk_release(struct device *dev)
1766 struct scsi_disk *sdkp = to_scsi_disk(dev); 1933 struct scsi_disk *sdkp = to_scsi_disk(dev);
1767 struct gendisk *disk = sdkp->disk; 1934 struct gendisk *disk = sdkp->disk;
1768 1935
1769 spin_lock(&sd_index_lock); 1936 ida_remove(&sd_index_ida, sdkp->index);
1770 idr_remove(&sd_index_idr, sdkp->index);
1771 spin_unlock(&sd_index_lock);
1772 1937
1773 disk->private_data = NULL; 1938 disk->private_data = NULL;
1774 put_disk(disk); 1939 put_disk(disk);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 03a3d45cfa42..95b9f06534d5 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -31,6 +31,12 @@
31 */ 31 */
32#define SD_BUF_SIZE 512 32#define SD_BUF_SIZE 512
33 33
34/*
35 * Number of sectors at the end of the device to avoid multi-sector
36 * accesses to in the case of last_sector_bug
37 */
38#define SD_LAST_BUGGY_SECTORS 8
39
34struct scsi_disk { 40struct scsi_disk {
35 struct scsi_driver *driver; /* always &sd_template */ 41 struct scsi_driver *driver; /* always &sd_template */
36 struct scsi_device *device; 42 struct scsi_device *device;
@@ -41,7 +47,9 @@ struct scsi_disk {
41 u32 index; 47 u32 index;
42 u8 media_present; 48 u8 media_present;
43 u8 write_prot; 49 u8 write_prot;
50 u8 protection_type;/* Data Integrity Field */
44 unsigned previous_state : 1; 51 unsigned previous_state : 1;
52 unsigned ATO : 1; /* state of disk ATO bit */
45 unsigned WCE : 1; /* state of disk WCE bit */ 53 unsigned WCE : 1; /* state of disk WCE bit */
46 unsigned RCD : 1; /* state of disk RCD bit, unused */ 54 unsigned RCD : 1; /* state of disk RCD bit, unused */
47 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ 55 unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
@@ -59,4 +67,50 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
59 (sdsk)->disk->disk_name, ##a) : \ 67 (sdsk)->disk->disk_name, ##a) : \
60 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 68 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
61 69
70/*
71 * A DIF-capable target device can be formatted with different
72 * protection schemes. Currently 0 through 3 are defined:
73 *
74 * Type 0 is regular (unprotected) I/O
75 *
76 * Type 1 defines the contents of the guard and reference tags
77 *
78 * Type 2 defines the contents of the guard and reference tags and
79 * uses 32-byte commands to seed the latter
80 *
81 * Type 3 defines the contents of the guard tag only
82 */
83
84enum sd_dif_target_protection_types {
85 SD_DIF_TYPE0_PROTECTION = 0x0,
86 SD_DIF_TYPE1_PROTECTION = 0x1,
87 SD_DIF_TYPE2_PROTECTION = 0x2,
88 SD_DIF_TYPE3_PROTECTION = 0x3,
89};
90
91/*
92 * Data Integrity Field tuple.
93 */
94struct sd_dif_tuple {
95 __be16 guard_tag; /* Checksum */
96 __be16 app_tag; /* Opaque storage */
97 __be32 ref_tag; /* Target LBA or indirect LBA */
98};
99
100#if defined(CONFIG_BLK_DEV_INTEGRITY)
101
102extern void sd_dif_op(struct scsi_cmnd *, unsigned int, unsigned int);
103extern void sd_dif_config_host(struct scsi_disk *);
104extern int sd_dif_prepare(struct request *rq, sector_t, unsigned int);
105extern void sd_dif_complete(struct scsi_cmnd *, unsigned int);
106
107#else /* CONFIG_BLK_DEV_INTEGRITY */
108
109#define sd_dif_op(a, b, c) do { } while (0)
110#define sd_dif_config_host(a) do { } while (0)
111#define sd_dif_prepare(a, b, c) (0)
112#define sd_dif_complete(a, b) (0)
113
114#endif /* CONFIG_BLK_DEV_INTEGRITY */
115
62#endif /* _SCSI_DISK_H */ 116#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
new file mode 100644
index 000000000000..4d17f3d35aac
--- /dev/null
+++ b/drivers/scsi/sd_dif.c
@@ -0,0 +1,538 @@
1/*
2 * sd_dif.c - SCSI Data Integrity Field
3 *
4 * Copyright (C) 2007, 2008 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/crc-t10dif.h>
25
26#include <scsi/scsi.h>
27#include <scsi/scsi_cmnd.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_driver.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_ioctl.h>
34#include <scsi/scsicam.h>
35
36#include <net/checksum.h>
37
38#include "sd.h"
39
40typedef __u16 (csum_fn) (void *, unsigned int);
41
42static __u16 sd_dif_crc_fn(void *data, unsigned int len)
43{
44 return cpu_to_be16(crc_t10dif(data, len));
45}
46
47static __u16 sd_dif_ip_fn(void *data, unsigned int len)
48{
49 return ip_compute_csum(data, len);
50}
51
52/*
53 * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
54 * 16 bit app tag, 32 bit reference tag.
55 */
56static void sd_dif_type1_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
57{
58 void *buf = bix->data_buf;
59 struct sd_dif_tuple *sdt = bix->prot_buf;
60 sector_t sector = bix->sector;
61 unsigned int i;
62
63 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
64 sdt->guard_tag = fn(buf, bix->sector_size);
65 sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
66 sdt->app_tag = 0;
67
68 buf += bix->sector_size;
69 sector++;
70 }
71}
72
73static void sd_dif_type1_generate_crc(struct blk_integrity_exchg *bix)
74{
75 sd_dif_type1_generate(bix, sd_dif_crc_fn);
76}
77
78static void sd_dif_type1_generate_ip(struct blk_integrity_exchg *bix)
79{
80 sd_dif_type1_generate(bix, sd_dif_ip_fn);
81}
82
83static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
84{
85 void *buf = bix->data_buf;
86 struct sd_dif_tuple *sdt = bix->prot_buf;
87 sector_t sector = bix->sector;
88 unsigned int i;
89 __u16 csum;
90
91 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
92 /* Unwritten sectors */
93 if (sdt->app_tag == 0xffff)
94 return 0;
95
96 /* Bad ref tag received from disk */
97 if (sdt->ref_tag == 0xffffffff) {
98 printk(KERN_ERR
99 "%s: bad phys ref tag on sector %lu\n",
100 bix->disk_name, (unsigned long)sector);
101 return -EIO;
102 }
103
104 if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
105 printk(KERN_ERR
106 "%s: ref tag error on sector %lu (rcvd %u)\n",
107 bix->disk_name, (unsigned long)sector,
108 be32_to_cpu(sdt->ref_tag));
109 return -EIO;
110 }
111
112 csum = fn(buf, bix->sector_size);
113
114 if (sdt->guard_tag != csum) {
115 printk(KERN_ERR "%s: guard tag error on sector %lu " \
116 "(rcvd %04x, data %04x)\n", bix->disk_name,
117 (unsigned long)sector,
118 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
119 return -EIO;
120 }
121
122 buf += bix->sector_size;
123 sector++;
124 }
125
126 return 0;
127}
128
129static int sd_dif_type1_verify_crc(struct blk_integrity_exchg *bix)
130{
131 return sd_dif_type1_verify(bix, sd_dif_crc_fn);
132}
133
134static int sd_dif_type1_verify_ip(struct blk_integrity_exchg *bix)
135{
136 return sd_dif_type1_verify(bix, sd_dif_ip_fn);
137}
138
139/*
140 * Functions for interleaving and deinterleaving application tags
141 */
142static void sd_dif_type1_set_tag(void *prot, void *tag_buf, unsigned int sectors)
143{
144 struct sd_dif_tuple *sdt = prot;
145 char *tag = tag_buf;
146 unsigned int i, j;
147
148 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
149 sdt->app_tag = tag[j] << 8 | tag[j+1];
150 BUG_ON(sdt->app_tag == 0xffff);
151 }
152}
153
154static void sd_dif_type1_get_tag(void *prot, void *tag_buf, unsigned int sectors)
155{
156 struct sd_dif_tuple *sdt = prot;
157 char *tag = tag_buf;
158 unsigned int i, j;
159
160 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
161 tag[j] = (sdt->app_tag & 0xff00) >> 8;
162 tag[j+1] = sdt->app_tag & 0xff;
163 }
164}
165
166static struct blk_integrity dif_type1_integrity_crc = {
167 .name = "T10-DIF-TYPE1-CRC",
168 .generate_fn = sd_dif_type1_generate_crc,
169 .verify_fn = sd_dif_type1_verify_crc,
170 .get_tag_fn = sd_dif_type1_get_tag,
171 .set_tag_fn = sd_dif_type1_set_tag,
172 .tuple_size = sizeof(struct sd_dif_tuple),
173 .tag_size = 0,
174};
175
176static struct blk_integrity dif_type1_integrity_ip = {
177 .name = "T10-DIF-TYPE1-IP",
178 .generate_fn = sd_dif_type1_generate_ip,
179 .verify_fn = sd_dif_type1_verify_ip,
180 .get_tag_fn = sd_dif_type1_get_tag,
181 .set_tag_fn = sd_dif_type1_set_tag,
182 .tuple_size = sizeof(struct sd_dif_tuple),
183 .tag_size = 0,
184};
185
186
187/*
188 * Type 3 protection has a 16-bit guard tag and 16 + 32 bits of opaque
189 * tag space.
190 */
191static void sd_dif_type3_generate(struct blk_integrity_exchg *bix, csum_fn *fn)
192{
193 void *buf = bix->data_buf;
194 struct sd_dif_tuple *sdt = bix->prot_buf;
195 unsigned int i;
196
197 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
198 sdt->guard_tag = fn(buf, bix->sector_size);
199 sdt->ref_tag = 0;
200 sdt->app_tag = 0;
201
202 buf += bix->sector_size;
203 }
204}
205
206static void sd_dif_type3_generate_crc(struct blk_integrity_exchg *bix)
207{
208 sd_dif_type3_generate(bix, sd_dif_crc_fn);
209}
210
211static void sd_dif_type3_generate_ip(struct blk_integrity_exchg *bix)
212{
213 sd_dif_type3_generate(bix, sd_dif_ip_fn);
214}
215
216static int sd_dif_type3_verify(struct blk_integrity_exchg *bix, csum_fn *fn)
217{
218 void *buf = bix->data_buf;
219 struct sd_dif_tuple *sdt = bix->prot_buf;
220 sector_t sector = bix->sector;
221 unsigned int i;
222 __u16 csum;
223
224 for (i = 0 ; i < bix->data_size ; i += bix->sector_size, sdt++) {
225 /* Unwritten sectors */
226 if (sdt->app_tag == 0xffff && sdt->ref_tag == 0xffffffff)
227 return 0;
228
229 csum = fn(buf, bix->sector_size);
230
231 if (sdt->guard_tag != csum) {
232 printk(KERN_ERR "%s: guard tag error on sector %lu " \
233 "(rcvd %04x, data %04x)\n", bix->disk_name,
234 (unsigned long)sector,
235 be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
236 return -EIO;
237 }
238
239 buf += bix->sector_size;
240 sector++;
241 }
242
243 return 0;
244}
245
246static int sd_dif_type3_verify_crc(struct blk_integrity_exchg *bix)
247{
248 return sd_dif_type3_verify(bix, sd_dif_crc_fn);
249}
250
251static int sd_dif_type3_verify_ip(struct blk_integrity_exchg *bix)
252{
253 return sd_dif_type3_verify(bix, sd_dif_ip_fn);
254}
255
256static void sd_dif_type3_set_tag(void *prot, void *tag_buf, unsigned int sectors)
257{
258 struct sd_dif_tuple *sdt = prot;
259 char *tag = tag_buf;
260 unsigned int i, j;
261
262 for (i = 0, j = 0 ; i < sectors ; i++, j += 6, sdt++) {
263 sdt->app_tag = tag[j] << 8 | tag[j+1];
264 sdt->ref_tag = tag[j+2] << 24 | tag[j+3] << 16 |
265 tag[j+4] << 8 | tag[j+5];
266 }
267}
268
269static void sd_dif_type3_get_tag(void *prot, void *tag_buf, unsigned int sectors)
270{
271 struct sd_dif_tuple *sdt = prot;
272 char *tag = tag_buf;
273 unsigned int i, j;
274
275 for (i = 0, j = 0 ; i < sectors ; i++, j += 2, sdt++) {
276 tag[j] = (sdt->app_tag & 0xff00) >> 8;
277 tag[j+1] = sdt->app_tag & 0xff;
278 tag[j+2] = (sdt->ref_tag & 0xff000000) >> 24;
279 tag[j+3] = (sdt->ref_tag & 0xff0000) >> 16;
280 tag[j+4] = (sdt->ref_tag & 0xff00) >> 8;
281 tag[j+5] = sdt->ref_tag & 0xff;
282 BUG_ON(sdt->app_tag == 0xffff || sdt->ref_tag == 0xffffffff);
283 }
284}
285
286static struct blk_integrity dif_type3_integrity_crc = {
287 .name = "T10-DIF-TYPE3-CRC",
288 .generate_fn = sd_dif_type3_generate_crc,
289 .verify_fn = sd_dif_type3_verify_crc,
290 .get_tag_fn = sd_dif_type3_get_tag,
291 .set_tag_fn = sd_dif_type3_set_tag,
292 .tuple_size = sizeof(struct sd_dif_tuple),
293 .tag_size = 0,
294};
295
296static struct blk_integrity dif_type3_integrity_ip = {
297 .name = "T10-DIF-TYPE3-IP",
298 .generate_fn = sd_dif_type3_generate_ip,
299 .verify_fn = sd_dif_type3_verify_ip,
300 .get_tag_fn = sd_dif_type3_get_tag,
301 .set_tag_fn = sd_dif_type3_set_tag,
302 .tuple_size = sizeof(struct sd_dif_tuple),
303 .tag_size = 0,
304};
305
306/*
307 * Configure exchange of protection information between OS and HBA.
308 */
309void sd_dif_config_host(struct scsi_disk *sdkp)
310{
311 struct scsi_device *sdp = sdkp->device;
312 struct gendisk *disk = sdkp->disk;
313 u8 type = sdkp->protection_type;
314
315 /* If this HBA doesn't support DIX, resort to normal I/O or DIF */
316 if (scsi_host_dix_capable(sdp->host, type) == 0) {
317
318 if (type == SD_DIF_TYPE0_PROTECTION)
319 return;
320
321 if (scsi_host_dif_capable(sdp->host, type) == 0) {
322 sd_printk(KERN_INFO, sdkp, "Type %d protection " \
323 "unsupported by HBA. Disabling DIF.\n", type);
324 sdkp->protection_type = 0;
325 return;
326 }
327
328 sd_printk(KERN_INFO, sdkp, "Enabling DIF Type %d protection\n",
329 type);
330
331 return;
332 }
333
334 /* Enable DMA of protection information */
335 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP)
336 if (type == SD_DIF_TYPE3_PROTECTION)
337 blk_integrity_register(disk, &dif_type3_integrity_ip);
338 else
339 blk_integrity_register(disk, &dif_type1_integrity_ip);
340 else
341 if (type == SD_DIF_TYPE3_PROTECTION)
342 blk_integrity_register(disk, &dif_type3_integrity_crc);
343 else
344 blk_integrity_register(disk, &dif_type1_integrity_crc);
345
346 sd_printk(KERN_INFO, sdkp,
347 "Enabling %s integrity protection\n", disk->integrity->name);
348
349 /* Signal to block layer that we support sector tagging */
350 if (type && sdkp->ATO) {
351 if (type == SD_DIF_TYPE3_PROTECTION)
352 disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
353 else
354 disk->integrity->tag_size = sizeof(u16);
355
356 sd_printk(KERN_INFO, sdkp, "DIF application tag size %u\n",
357 disk->integrity->tag_size);
358 }
359}
360
361/*
362 * DIF DMA operation magic decoder ring.
363 */
364void sd_dif_op(struct scsi_cmnd *scmd, unsigned int dif, unsigned int dix)
365{
366 int csum_convert, prot_op;
367
368 prot_op = 0;
369
370 /* Convert checksum? */
371 if (scsi_host_get_guard(scmd->device->host) != SHOST_DIX_GUARD_CRC)
372 csum_convert = 1;
373 else
374 csum_convert = 0;
375
376 switch (scmd->cmnd[0]) {
377 case READ_10:
378 case READ_12:
379 case READ_16:
380 if (dif && dix)
381 if (csum_convert)
382 prot_op = SCSI_PROT_READ_CONVERT;
383 else
384 prot_op = SCSI_PROT_READ_PASS;
385 else if (dif && !dix)
386 prot_op = SCSI_PROT_READ_STRIP;
387 else if (!dif && dix)
388 prot_op = SCSI_PROT_READ_INSERT;
389
390 break;
391
392 case WRITE_10:
393 case WRITE_12:
394 case WRITE_16:
395 if (dif && dix)
396 if (csum_convert)
397 prot_op = SCSI_PROT_WRITE_CONVERT;
398 else
399 prot_op = SCSI_PROT_WRITE_PASS;
400 else if (dif && !dix)
401 prot_op = SCSI_PROT_WRITE_INSERT;
402 else if (!dif && dix)
403 prot_op = SCSI_PROT_WRITE_STRIP;
404
405 break;
406 }
407
408 scsi_set_prot_op(scmd, prot_op);
409 scsi_set_prot_type(scmd, dif);
410}
411
412/*
413 * The virtual start sector is the one that was originally submitted
414 * by the block layer. Due to partitioning, MD/DM cloning, etc. the
415 * actual physical start sector is likely to be different. Remap
416 * protection information to match the physical LBA.
417 *
418 * From a protocol perspective there's a slight difference between
419 * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
420 * reference tag is seeded in the CDB. This gives us the potential to
421 * avoid virt->phys remapping during write. However, at read time we
422 * don't know whether the virt sector is the same as when we wrote it
423 * (we could be reading from real disk as opposed to MD/DM device. So
424 * we always remap Type 2 making it identical to Type 1.
425 *
426 * Type 3 does not have a reference tag so no remapping is required.
427 */
428int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_sz)
429{
430 const int tuple_sz = sizeof(struct sd_dif_tuple);
431 struct bio *bio;
432 struct scsi_disk *sdkp;
433 struct sd_dif_tuple *sdt;
434 unsigned int i, j;
435 u32 phys, virt;
436
437 /* Already remapped? */
438 if (rq->cmd_flags & REQ_INTEGRITY)
439 return 0;
440
441 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
442
443 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
444 return 0;
445
446 rq->cmd_flags |= REQ_INTEGRITY;
447 phys = hw_sector & 0xffffffff;
448
449 __rq_for_each_bio(bio, rq) {
450 struct bio_vec *iv;
451
452 virt = bio->bi_integrity->bip_sector & 0xffffffff;
453
454 bip_for_each_vec(iv, bio->bi_integrity, i) {
455 sdt = kmap_atomic(iv->bv_page, KM_USER0)
456 + iv->bv_offset;
457
458 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
459
460 if (be32_to_cpu(sdt->ref_tag) != virt)
461 goto error;
462
463 sdt->ref_tag = cpu_to_be32(phys);
464 virt++;
465 phys++;
466 }
467
468 kunmap_atomic(sdt, KM_USER0);
469 }
470 }
471
472 return 0;
473
474error:
475 kunmap_atomic(sdt, KM_USER0);
476 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u\n",
477 __func__, virt, phys, be32_to_cpu(sdt->ref_tag));
478
479 return -EIO;
480}
481
482/*
483 * Remap physical sector values in the reference tag to the virtual
484 * values expected by the block layer.
485 */
486void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
487{
488 const int tuple_sz = sizeof(struct sd_dif_tuple);
489 struct scsi_disk *sdkp;
490 struct bio *bio;
491 struct sd_dif_tuple *sdt;
492 unsigned int i, j, sectors, sector_sz;
493 u32 phys, virt;
494
495 sdkp = scsi_disk(scmd->request->rq_disk);
496
497 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
498 return;
499
500 sector_sz = scmd->device->sector_size;
501 sectors = good_bytes / sector_sz;
502
503 phys = scmd->request->sector & 0xffffffff;
504 if (sector_sz == 4096)
505 phys >>= 3;
506
507 __rq_for_each_bio(bio, scmd->request) {
508 struct bio_vec *iv;
509
510 virt = bio->bi_integrity->bip_sector & 0xffffffff;
511
512 bip_for_each_vec(iv, bio->bi_integrity, i) {
513 sdt = kmap_atomic(iv->bv_page, KM_USER0)
514 + iv->bv_offset;
515
516 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
517
518 if (sectors == 0) {
519 kunmap_atomic(sdt, KM_USER0);
520 return;
521 }
522
523 if (be32_to_cpu(sdt->ref_tag) != phys &&
524 sdt->app_tag != 0xffff)
525 sdt->ref_tag = 0xffffffff; /* Bad ref */
526 else
527 sdt->ref_tag = cpu_to_be32(virt);
528
529 virt++;
530 phys++;
531 sectors--;
532 }
533
534 kunmap_atomic(sdt, KM_USER0);
535 }
536 }
537}
538
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 4684cc716aa4..c2bb53e3d941 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -17,7 +17,7 @@
17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support 17 Last modified: 18-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
18 */ 18 */
19 19
20static const char *verstr = "20080224"; 20static const char *verstr = "20080504";
21 21
22#include <linux/module.h> 22#include <linux/module.h>
23 23
@@ -631,7 +631,7 @@ static int cross_eof(struct scsi_tape * STp, int forward)
631/* Flush the write buffer (never need to write if variable blocksize). */ 631/* Flush the write buffer (never need to write if variable blocksize). */
632static int st_flush_write_buffer(struct scsi_tape * STp) 632static int st_flush_write_buffer(struct scsi_tape * STp)
633{ 633{
634 int offset, transfer, blks; 634 int transfer, blks;
635 int result; 635 int result;
636 unsigned char cmd[MAX_COMMAND_SIZE]; 636 unsigned char cmd[MAX_COMMAND_SIZE];
637 struct st_request *SRpnt; 637 struct st_request *SRpnt;
@@ -644,14 +644,10 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
644 result = 0; 644 result = 0;
645 if (STp->dirty == 1) { 645 if (STp->dirty == 1) {
646 646
647 offset = (STp->buffer)->buffer_bytes; 647 transfer = STp->buffer->buffer_bytes;
648 transfer = ((offset + STp->block_size - 1) /
649 STp->block_size) * STp->block_size;
650 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n", 648 DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
651 tape_name(STp), transfer)); 649 tape_name(STp), transfer));
652 650
653 memset((STp->buffer)->b_data + offset, 0, transfer - offset);
654
655 memset(cmd, 0, MAX_COMMAND_SIZE); 651 memset(cmd, 0, MAX_COMMAND_SIZE);
656 cmd[0] = WRITE_6; 652 cmd[0] = WRITE_6;
657 cmd[1] = 1; 653 cmd[1] = 1;
@@ -1670,6 +1666,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
1670 if (undone <= do_count) { 1666 if (undone <= do_count) {
1671 /* Only data from this write is not written */ 1667 /* Only data from this write is not written */
1672 count += undone; 1668 count += undone;
1669 b_point -= undone;
1673 do_count -= undone; 1670 do_count -= undone;
1674 if (STp->block_size) 1671 if (STp->block_size)
1675 blks = (transfer - undone) / STp->block_size; 1672 blks = (transfer - undone) / STp->block_size;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index f308a0308829..3790906a77d1 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -467,7 +467,7 @@ stex_slave_alloc(struct scsi_device *sdev)
467 /* Cheat: usually extracted from Inquiry data */ 467 /* Cheat: usually extracted from Inquiry data */
468 sdev->tagged_supported = 1; 468 sdev->tagged_supported = 1;
469 469
470 scsi_activate_tcq(sdev, sdev->host->can_queue); 470 scsi_activate_tcq(sdev, ST_CMD_PER_LUN);
471 471
472 return 0; 472 return 0;
473} 473}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 22a6aae78699..98df1651404f 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -5741,6 +5741,8 @@ void sym_hcb_free(struct sym_hcb *np)
5741 5741
5742 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) { 5742 for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
5743 tp = &np->target[target]; 5743 tp = &np->target[target];
5744 if (tp->luntbl)
5745 sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
5744#if SYM_CONF_MAX_LUN > 1 5746#if SYM_CONF_MAX_LUN > 1
5745 kfree(tp->lunmp); 5747 kfree(tp->lunmp);
5746#endif 5748#endif
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index 5b04ddfed26c..1723d71cbf3f 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -452,7 +452,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
452 /* TODO: error handling */ 452 /* TODO: error handling */
453 if (pSRB->SGcount != 1) 453 if (pSRB->SGcount != 1)
454 error = 1; 454 error = 1;
455 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 455 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle));
456 /* Map SG list */ 456 /* Map SG list */
457 } else if (scsi_sg_count(pcmd)) { 457 } else if (scsi_sg_count(pcmd)) {
458 int nseg; 458 int nseg;
@@ -466,7 +466,7 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
466 if (nseg < 0) 466 if (nseg < 0)
467 error = 1; 467 error = 1;
468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ 468 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
469 __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); 469 __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
470 /* Map single segment */ 470 /* Map single segment */
471 } else 471 } else
472 pSRB->SGcount = 0; 472 pSRB->SGcount = 0;
@@ -483,11 +483,11 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
483 483
484 if (pSRB->SRBFlag) { 484 if (pSRB->SRBFlag) {
485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); 485 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
486 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle)); 486 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle));
487 } else { 487 } else {
488 scsi_dma_unmap(pcmd); 488 scsi_dma_unmap(pcmd);
489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", 489 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
490 __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); 490 __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
491 } 491 }
492} 492}
493 493
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index c975c01b3a02..d4c13561f4a6 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -148,7 +148,7 @@
148 * 148 *
149 * 2002/10/04 - Alan Cox <alan@redhat.com> 149 * 2002/10/04 - Alan Cox <alan@redhat.com>
150 * 150 *
151 * Use dev_id for interrupts, kill __FUNCTION__ pasting 151 * Use dev_id for interrupts, kill __func__ pasting
152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff 152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff
153 * Use the adapter lock for the other places we had the cli's 153 * Use the adapter lock for the other places we had the cli's
154 * 154 *
@@ -640,12 +640,12 @@ static int __init wd7000_setup(char *str)
640 (void) get_options(str, ARRAY_SIZE(ints), ints); 640 (void) get_options(str, ARRAY_SIZE(ints), ints);
641 641
642 if (wd7000_card_num >= NUM_CONFIGS) { 642 if (wd7000_card_num >= NUM_CONFIGS) {
643 printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __FUNCTION__); 643 printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
644 return 0; 644 return 0;
645 } 645 }
646 646
647 if ((ints[0] < 3) || (ints[0] > 5)) { 647 if ((ints[0] < 3) || (ints[0] > 5)) {
648 printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __FUNCTION__); 648 printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
649 } else { 649 } else {
650 for (i = 0; i < NUM_IRQS; i++) 650 for (i = 0; i < NUM_IRQS; i++)
651 if (ints[1] == wd7000_irq[i]) 651 if (ints[1] == wd7000_irq[i])
@@ -1642,7 +1642,7 @@ static int wd7000_biosparam(struct scsi_device *sdev,
1642 ip[2] = info[2]; 1642 ip[2] = info[2];
1643 1643
1644 if (info[0] == 255) 1644 if (info[0] == 255)
1645 printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __FUNCTION__); 1645 printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
1646 } 1646 }
1647 } 1647 }
1648 1648
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 4b5f908d35c3..3c4a300494a4 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -68,11 +68,11 @@ lasi_scsi_clock(void * hpa, int defaultclock)
68 if (status == PDC_RET_OK) { 68 if (status == PDC_RET_OK) {
69 clock = (int) pdc_result[16]; 69 clock = (int) pdc_result[16];
70 } else { 70 } else {
71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __FUNCTION__, status); 71 printk(KERN_WARNING "%s: pdc_iodc_read returned %d\n", __func__, status);
72 clock = defaultclock; 72 clock = defaultclock;
73 } 73 }
74 74
75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __FUNCTION__, clock); 75 printk(KERN_DEBUG "%s: SCSI clock %d\n", __func__, clock);
76 return clock; 76 return clock;
77} 77}
78#endif 78#endif
@@ -108,13 +108,13 @@ zalon_probe(struct parisc_device *dev)
108 */ 108 */
109 dev->irq = gsc_alloc_irq(&gsc_irq); 109 dev->irq = gsc_alloc_irq(&gsc_irq);
110 110
111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __FUNCTION__, 111 printk(KERN_INFO "%s: Zalon version %d, IRQ %d\n", __func__,
112 zalon_vers, dev->irq); 112 zalon_vers, dev->irq);
113 113
114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM); 114 __raw_writel(gsc_irq.txn_addr | gsc_irq.txn_data, zalon + IO_MODULE_EIM);
115 115
116 if (zalon_vers == 0) 116 if (zalon_vers == 0)
117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __FUNCTION__); 117 printk(KERN_WARNING "%s: Zalon 1.1 or earlier\n", __func__);
118 118
119 memset(&device, 0, sizeof(struct ncr_device)); 119 memset(&device, 0, sizeof(struct ncr_device));
120 120
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 832a5a4f3cb3..cd9a2e138c8b 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -651,15 +651,17 @@ static int ipaq_open(struct tty_struct *tty,
651 */ 651 */
652 652
653 kfree(port->bulk_in_buffer); 653 kfree(port->bulk_in_buffer);
654 kfree(port->bulk_out_buffer);
655 /* make sure the generic serial code knows */
656 port->bulk_out_buffer = NULL;
657
654 port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL); 658 port->bulk_in_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
655 if (port->bulk_in_buffer == NULL) { 659 if (port->bulk_in_buffer == NULL)
656 port->bulk_out_buffer = NULL; /* prevent double free */
657 goto enomem; 660 goto enomem;
658 }
659 661
660 kfree(port->bulk_out_buffer);
661 port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL); 662 port->bulk_out_buffer = kmalloc(URBDATA_SIZE, GFP_KERNEL);
662 if (port->bulk_out_buffer == NULL) { 663 if (port->bulk_out_buffer == NULL) {
664 /* the buffer is useless, free it */
663 kfree(port->bulk_in_buffer); 665 kfree(port->bulk_in_buffer);
664 port->bulk_in_buffer = NULL; 666 port->bulk_in_buffer = NULL;
665 goto enomem; 667 goto enomem;