aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/osl.c64
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/block/Kconfig15
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/floppy.c23
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/pktcdvd.c26
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c48
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c554
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1057
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c2
-rw-r--r--drivers/char/mem.c18
-rw-r--r--drivers/char/tlclk.c1
-rw-r--r--drivers/edac/Kconfig10
-rw-r--r--drivers/edac/amd76x_edac.c126
-rw-r--r--drivers/edac/e752x_edac.c354
-rw-r--r--drivers/edac/e7xxx_edac.c228
-rw-r--r--drivers/edac/edac_mc.c808
-rw-r--r--drivers/edac/edac_mc.h133
-rw-r--r--drivers/edac/i82860_edac.c127
-rw-r--r--drivers/edac/i82875p_edac.c208
-rw-r--r--drivers/edac/r82600_edac.c140
-rw-r--r--drivers/firmware/efivars.c28
-rw-r--r--drivers/firmware/pcdp.c19
-rw-r--r--drivers/ieee1394/highlevel.c3
-rw-r--r--drivers/input/touchscreen/ads7846.c2
-rw-r--r--drivers/isdn/Makefile1
-rw-r--r--drivers/isdn/gigaset/Kconfig42
-rw-r--r--drivers/isdn/gigaset/Makefile6
-rw-r--r--drivers/isdn/gigaset/asyncdata.c597
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c2365
-rw-r--r--drivers/isdn/gigaset/common.c1203
-rw-r--r--drivers/isdn/gigaset/ev-layer.c1983
-rw-r--r--drivers/isdn/gigaset/gigaset.h938
-rw-r--r--drivers/isdn/gigaset/i4l.c567
-rw-r--r--drivers/isdn/gigaset/interface.c718
-rw-r--r--drivers/isdn/gigaset/isocdata.c1009
-rw-r--r--drivers/isdn/gigaset/proc.c81
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c1008
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h4
-rw-r--r--drivers/isdn/i4l/Kconfig1
-rw-r--r--drivers/macintosh/smu.c9
-rw-r--r--drivers/md/bitmap.c14
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-io.c13
-rw-r--r--drivers/md/dm-mpath.c3
-rw-r--r--drivers/md/dm-raid1.c14
-rw-r--r--drivers/md/dm-snap.c3
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/kcopyd.c19
-rw-r--r--drivers/md/multipath.c17
-rw-r--r--drivers/message/i2o/i2o_block.c7
-rw-r--r--drivers/net/3c59x.c245
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/wan/dscc4.c7
-rw-r--r--drivers/parport/share.c19
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c12
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c19
-rw-r--r--drivers/pnp/isapnp/core.c7
-rw-r--r--drivers/s390/char/raw3270.c39
-rw-r--r--drivers/s390/scsi/zfcp_aux.c60
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/telephony/phonedev.c21
70 files changed, 12979 insertions, 2149 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index ac5bbaedac1b..13b5fd5854a8 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -156,12 +156,10 @@ acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr)
156{ 156{
157 if (efi_enabled) { 157 if (efi_enabled) {
158 addr->pointer_type = ACPI_PHYSICAL_POINTER; 158 addr->pointer_type = ACPI_PHYSICAL_POINTER;
159 if (efi.acpi20) 159 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
160 addr->pointer.physical = 160 addr->pointer.physical = efi.acpi20;
161 (acpi_physical_address) virt_to_phys(efi.acpi20); 161 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
162 else if (efi.acpi) 162 addr->pointer.physical = efi.acpi;
163 addr->pointer.physical =
164 (acpi_physical_address) virt_to_phys(efi.acpi);
165 else { 163 else {
166 printk(KERN_ERR PREFIX 164 printk(KERN_ERR PREFIX
167 "System description tables not found\n"); 165 "System description tables not found\n");
@@ -182,22 +180,14 @@ acpi_status
182acpi_os_map_memory(acpi_physical_address phys, acpi_size size, 180acpi_os_map_memory(acpi_physical_address phys, acpi_size size,
183 void __iomem ** virt) 181 void __iomem ** virt)
184{ 182{
185 if (efi_enabled) { 183 if (phys > ULONG_MAX) {
186 if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { 184 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
187 *virt = (void __iomem *)phys_to_virt(phys); 185 return AE_BAD_PARAMETER;
188 } else {
189 *virt = ioremap(phys, size);
190 }
191 } else {
192 if (phys > ULONG_MAX) {
193 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
194 return AE_BAD_PARAMETER;
195 }
196 /*
197 * ioremap checks to ensure this is in reserved space
198 */
199 *virt = ioremap((unsigned long)phys, size);
200 } 186 }
187 /*
188 * ioremap checks to ensure this is in reserved space
189 */
190 *virt = ioremap((unsigned long)phys, size);
201 191
202 if (!*virt) 192 if (!*virt)
203 return AE_NO_MEMORY; 193 return AE_NO_MEMORY;
@@ -409,18 +399,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
409{ 399{
410 u32 dummy; 400 u32 dummy;
411 void __iomem *virt_addr; 401 void __iomem *virt_addr;
412 int iomem = 0;
413 402
414 if (efi_enabled) { 403 virt_addr = ioremap(phys_addr, width);
415 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
416 /* HACK ALERT! We can use readb/w/l on real memory too.. */
417 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
418 } else {
419 iomem = 1;
420 virt_addr = ioremap(phys_addr, width);
421 }
422 } else
423 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
424 if (!value) 404 if (!value)
425 value = &dummy; 405 value = &dummy;
426 406
@@ -438,10 +418,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
438 BUG(); 418 BUG();
439 } 419 }
440 420
441 if (efi_enabled) { 421 iounmap(virt_addr);
442 if (iomem)
443 iounmap(virt_addr);
444 }
445 422
446 return AE_OK; 423 return AE_OK;
447} 424}
@@ -450,18 +427,8 @@ acpi_status
450acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 427acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
451{ 428{
452 void __iomem *virt_addr; 429 void __iomem *virt_addr;
453 int iomem = 0;
454 430
455 if (efi_enabled) { 431 virt_addr = ioremap(phys_addr, width);
456 if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) {
457 /* HACK ALERT! We can use writeb/w/l on real memory too */
458 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
459 } else {
460 iomem = 1;
461 virt_addr = ioremap(phys_addr, width);
462 }
463 } else
464 virt_addr = (void __iomem *)phys_to_virt(phys_addr);
465 432
466 switch (width) { 433 switch (width) {
467 case 8: 434 case 8:
@@ -477,8 +444,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
477 BUG(); 444 BUG();
478 } 445 }
479 446
480 if (iomem) 447 iounmap(virt_addr);
481 iounmap(virt_addr);
482 448
483 return AE_OK; 449 return AE_OK;
484} 450}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 99a3a28594da..713b763884a9 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -246,7 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr)
246} 246}
247 247
248/* -------------------------------------------------------------------------- 248/* --------------------------------------------------------------------------
249 Common ACPI processor fucntions 249 Common ACPI processor functions
250 -------------------------------------------------------------------------- */ 250 -------------------------------------------------------------------------- */
251 251
252/* 252/*
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 31d4f3ffc265..7f37c7cc5ef1 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -587,7 +587,8 @@ int __init acpi_table_init(void)
587 return -ENODEV; 587 return -ENODEV;
588 } 588 }
589 589
590 rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); 590 rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys,
591 sizeof(struct acpi_table_rsdp));
591 if (!rsdp) { 592 if (!rsdp) {
592 printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); 593 printk(KERN_WARNING PREFIX "Unable to map RSDP\n");
593 return -ENODEV; 594 return -ENODEV;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e57ac5a43246..875ae7699025 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -400,13 +400,16 @@ config BLK_DEV_RAM_SIZE
400 8192. 400 8192.
401 401
402config BLK_DEV_INITRD 402config BLK_DEV_INITRD
403 bool "Initial RAM disk (initrd) support" 403 bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support"
404 help 404 help
405 The initial RAM disk is a RAM disk that is loaded by the boot loader 405 The initial RAM filesystem is a ramfs which is loaded by the
406 (loadlin or lilo) and that is mounted as root before the normal boot 406 boot loader (loadlin or lilo) and that is mounted as root
407 procedure. It is typically used to load modules needed to mount the 407 before the normal boot procedure. It is typically used to
408 "real" root file system, etc. See <file:Documentation/initrd.txt> 408 load modules needed to mount the "real" root file system,
409 for details. 409 etc. See <file:Documentation/initrd.txt> for details.
410
411 If RAM disk support (BLK_DEV_RAM) is also included, this
412 also enables initial RAM disk (initrd) support.
410 413
411 414
412config CDROM_PKTCDVD 415config CDROM_PKTCDVD
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 32fea55fac48..393b86a3dbf8 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp)
211 return; 211 return;
212 } 212 }
213 213
214 d->bufpool = mempool_create(MIN_BUFS, 214 d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
215 mempool_alloc_slab, mempool_free_slab,
216 buf_pool_cache);
217 if (d->bufpool == NULL) { 215 if (d->bufpool == NULL) {
218 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool " 216 printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
219 "for %ld.%ld\n", d->aoemajor, d->aoeminor); 217 "for %ld.%ld\n", d->aoemajor, d->aoeminor);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 840919bba76c..d3ad9081697e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -250,6 +250,18 @@ static int irqdma_allocated;
250#include <linux/cdrom.h> /* for the compatibility eject ioctl */ 250#include <linux/cdrom.h> /* for the compatibility eject ioctl */
251#include <linux/completion.h> 251#include <linux/completion.h>
252 252
253/*
254 * Interrupt freeing also means /proc VFS work - dont do it
255 * from interrupt context. We push this work into keventd:
256 */
257static void fd_free_irq_fn(void *data)
258{
259 fd_free_irq();
260}
261
262static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL);
263
264
253static struct request *current_req; 265static struct request *current_req;
254static struct request_queue *floppy_queue; 266static struct request_queue *floppy_queue;
255static void do_fd_request(request_queue_t * q); 267static void do_fd_request(request_queue_t * q);
@@ -4433,6 +4445,13 @@ static int floppy_grab_irq_and_dma(void)
4433 return 0; 4445 return 0;
4434 } 4446 }
4435 spin_unlock_irqrestore(&floppy_usage_lock, flags); 4447 spin_unlock_irqrestore(&floppy_usage_lock, flags);
4448
4449 /*
4450 * We might have scheduled a free_irq(), wait it to
4451 * drain first:
4452 */
4453 flush_scheduled_work();
4454
4436 if (fd_request_irq()) { 4455 if (fd_request_irq()) {
4437 DPRINT("Unable to grab IRQ%d for the floppy driver\n", 4456 DPRINT("Unable to grab IRQ%d for the floppy driver\n",
4438 FLOPPY_IRQ); 4457 FLOPPY_IRQ);
@@ -4522,7 +4541,7 @@ static void floppy_release_irq_and_dma(void)
4522 if (irqdma_allocated) { 4541 if (irqdma_allocated) {
4523 fd_disable_dma(); 4542 fd_disable_dma();
4524 fd_free_dma(); 4543 fd_free_dma();
4525 fd_free_irq(); 4544 schedule_work(&fd_free_irq_work);
4526 irqdma_allocated = 0; 4545 irqdma_allocated = 0;
4527 } 4546 }
4528 set_dor(0, ~0, 8); 4547 set_dor(0, ~0, 8);
@@ -4633,6 +4652,8 @@ void cleanup_module(void)
4633 /* eject disk, if any */ 4652 /* eject disk, if any */
4634 fd_eject(0); 4653 fd_eject(0);
4635 4654
4655 flush_scheduled_work(); /* fd_free_irq() might be pending */
4656
4636 wait_for_completion(&device_release); 4657 wait_for_completion(&device_release);
4637} 4658}
4638 4659
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 74bf0255e98f..9c3b94e8f03b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -839,7 +839,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
839 839
840 set_blocksize(bdev, lo_blocksize); 840 set_blocksize(bdev, lo_blocksize);
841 841
842 kernel_thread(loop_thread, lo, CLONE_KERNEL); 842 error = kernel_thread(loop_thread, lo, CLONE_KERNEL);
843 if (error < 0)
844 goto out_putf;
843 wait_for_completion(&lo->lo_done); 845 wait_for_completion(&lo->lo_done);
844 return 0; 846 return 0;
845 847
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 1d261f985f31..a04f60693c39 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
230 return 1; 230 return 1;
231} 231}
232 232
233static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
234{
235 return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
236}
237
238static void pkt_rb_free(void *ptr, void *data)
239{
240 kfree(ptr);
241}
242
243static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) 233static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
244{ 234{
245 struct rb_node *n = rb_next(&node->rb_node); 235 struct rb_node *n = rb_next(&node->rb_node);
@@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file)
2073} 2063}
2074 2064
2075 2065
2076static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
2077{
2078 return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
2079}
2080
2081static void psd_pool_free(void *ptr, void *data)
2082{
2083 kfree(ptr);
2084}
2085
2086static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) 2066static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
2087{ 2067{
2088 struct packet_stacked_data *psd = bio->bi_private; 2068 struct packet_stacked_data *psd = bio->bi_private;
@@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
2475 if (!pd) 2455 if (!pd)
2476 return ret; 2456 return ret;
2477 2457
2478 pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); 2458 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
2459 sizeof(struct pkt_rb_node));
2479 if (!pd->rb_pool) 2460 if (!pd->rb_pool)
2480 goto out_mem; 2461 goto out_mem;
2481 2462
@@ -2639,7 +2620,8 @@ static int __init pkt_init(void)
2639{ 2620{
2640 int ret; 2621 int ret;
2641 2622
2642 psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL); 2623 psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
2624 sizeof(struct packet_stacked_data));
2643 if (!psd_pool) 2625 if (!psd_pool)
2644 return -ENOMEM; 2626 return -ENOMEM;
2645 2627
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 5980f3e886fc..facc3f1d9e37 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -187,6 +187,7 @@ config MOXA_SMARTIO
187config ISI 187config ISI
188 tristate "Multi-Tech multiport card support (EXPERIMENTAL)" 188 tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
189 depends on SERIAL_NONSTANDARD 189 depends on SERIAL_NONSTANDARD
190 select FW_LOADER
190 help 191 help
191 This is a driver for the Multi-Tech cards which provide several 192 This is a driver for the Multi-Tech cards which provide several
192 serial ports. The driver is experimental and can currently only be 193 serial ports. The driver is experimental and can currently only be
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 7c0684deea06..932feedda262 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -90,7 +90,7 @@ static unsigned int ipmi_poll(struct file *file, poll_table *wait)
90 90
91 spin_lock_irqsave(&priv->recv_msg_lock, flags); 91 spin_lock_irqsave(&priv->recv_msg_lock, flags);
92 92
93 if (! list_empty(&(priv->recv_msgs))) 93 if (!list_empty(&(priv->recv_msgs)))
94 mask |= (POLLIN | POLLRDNORM); 94 mask |= (POLLIN | POLLRDNORM);
95 95
96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags); 96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
@@ -789,21 +789,53 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
789 " interface. Other values will set the major device number" 789 " interface. Other values will set the major device number"
790 " to that value."); 790 " to that value.");
791 791
792/* Keep track of the devices that are registered. */
793struct ipmi_reg_list {
794 dev_t dev;
795 struct list_head link;
796};
797static LIST_HEAD(reg_list);
798static DEFINE_MUTEX(reg_list_mutex);
799
792static struct class *ipmi_class; 800static struct class *ipmi_class;
793 801
794static void ipmi_new_smi(int if_num) 802static void ipmi_new_smi(int if_num, struct device *device)
795{ 803{
796 dev_t dev = MKDEV(ipmi_major, if_num); 804 dev_t dev = MKDEV(ipmi_major, if_num);
805 struct ipmi_reg_list *entry;
797 806
798 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, 807 devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR,
799 "ipmidev/%d", if_num); 808 "ipmidev/%d", if_num);
800 809
801 class_device_create(ipmi_class, NULL, dev, NULL, "ipmi%d", if_num); 810 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
811 if (!entry) {
812 printk(KERN_ERR "ipmi_devintf: Unable to create the"
813 " ipmi class device link\n");
814 return;
815 }
816 entry->dev = dev;
817
818 mutex_lock(&reg_list_mutex);
819 class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num);
820 list_add(&entry->link, &reg_list);
821 mutex_unlock(&reg_list_mutex);
802} 822}
803 823
804static void ipmi_smi_gone(int if_num) 824static void ipmi_smi_gone(int if_num)
805{ 825{
806 class_device_destroy(ipmi_class, MKDEV(ipmi_major, if_num)); 826 dev_t dev = MKDEV(ipmi_major, if_num);
827 struct ipmi_reg_list *entry;
828
829 mutex_lock(&reg_list_mutex);
830 list_for_each_entry(entry, &reg_list, link) {
831 if (entry->dev == dev) {
832 list_del(&entry->link);
833 kfree(entry);
834 break;
835 }
836 }
837 class_device_destroy(ipmi_class, dev);
838 mutex_unlock(&reg_list_mutex);
807 devfs_remove("ipmidev/%d", if_num); 839 devfs_remove("ipmidev/%d", if_num);
808} 840}
809 841
@@ -856,6 +888,14 @@ module_init(init_ipmi_devintf);
856 888
857static __exit void cleanup_ipmi(void) 889static __exit void cleanup_ipmi(void)
858{ 890{
891 struct ipmi_reg_list *entry, *entry2;
892 mutex_lock(&reg_list_mutex);
893 list_for_each_entry_safe(entry, entry2, &reg_list, link) {
894 list_del(&entry->link);
895 class_device_destroy(ipmi_class, entry->dev);
896 kfree(entry);
897 }
898 mutex_unlock(&reg_list_mutex);
859 class_destroy(ipmi_class); 899 class_destroy(ipmi_class);
860 ipmi_smi_watcher_unregister(&smi_watcher); 900 ipmi_smi_watcher_unregister(&smi_watcher);
861 devfs_remove(DEVICE_NAME); 901 devfs_remove(DEVICE_NAME);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index abd4c5118a1b..b8fb87c6c29f 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -48,7 +48,7 @@
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50 50
51#define IPMI_DRIVER_VERSION "38.0" 51#define IPMI_DRIVER_VERSION "39.0"
52 52
53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54static int ipmi_init_msghandler(void); 54static int ipmi_init_msghandler(void);
@@ -162,6 +162,28 @@ struct ipmi_proc_entry
162}; 162};
163#endif 163#endif
164 164
165struct bmc_device
166{
167 struct platform_device *dev;
168 struct ipmi_device_id id;
169 unsigned char guid[16];
170 int guid_set;
171
172 struct kref refcount;
173
174 /* bmc device attributes */
175 struct device_attribute device_id_attr;
176 struct device_attribute provides_dev_sdrs_attr;
177 struct device_attribute revision_attr;
178 struct device_attribute firmware_rev_attr;
179 struct device_attribute version_attr;
180 struct device_attribute add_dev_support_attr;
181 struct device_attribute manufacturer_id_attr;
182 struct device_attribute product_id_attr;
183 struct device_attribute guid_attr;
184 struct device_attribute aux_firmware_rev_attr;
185};
186
165#define IPMI_IPMB_NUM_SEQ 64 187#define IPMI_IPMB_NUM_SEQ 64
166#define IPMI_MAX_CHANNELS 16 188#define IPMI_MAX_CHANNELS 16
167struct ipmi_smi 189struct ipmi_smi
@@ -178,9 +200,8 @@ struct ipmi_smi
178 /* Used for wake ups at startup. */ 200 /* Used for wake ups at startup. */
179 wait_queue_head_t waitq; 201 wait_queue_head_t waitq;
180 202
181 /* The IPMI version of the BMC on the other end. */ 203 struct bmc_device *bmc;
182 unsigned char version_major; 204 char *my_dev_name;
183 unsigned char version_minor;
184 205
185 /* This is the lower-layer's sender routine. */ 206 /* This is the lower-layer's sender routine. */
186 struct ipmi_smi_handlers *handlers; 207 struct ipmi_smi_handlers *handlers;
@@ -194,6 +215,9 @@ struct ipmi_smi
194 struct ipmi_proc_entry *proc_entries; 215 struct ipmi_proc_entry *proc_entries;
195#endif 216#endif
196 217
218 /* Driver-model device for the system interface. */
219 struct device *si_dev;
220
197 /* A table of sequence numbers for this interface. We use the 221 /* A table of sequence numbers for this interface. We use the
198 sequence numbers for IPMB messages that go out of the 222 sequence numbers for IPMB messages that go out of the
199 interface to match them up with their responses. A routine 223 interface to match them up with their responses. A routine
@@ -312,6 +336,7 @@ struct ipmi_smi
312 /* Events that were received with the proper format. */ 336 /* Events that were received with the proper format. */
313 unsigned int events; 337 unsigned int events;
314}; 338};
339#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
315 340
316/* Used to mark an interface entry that cannot be used but is not a 341/* Used to mark an interface entry that cannot be used but is not a
317 * free entry, either, primarily used at creation and deletion time so 342 * free entry, either, primarily used at creation and deletion time so
@@ -320,6 +345,15 @@ struct ipmi_smi
320#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ 345#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
321 || (i == IPMI_INVALID_INTERFACE_ENTRY)) 346 || (i == IPMI_INVALID_INTERFACE_ENTRY))
322 347
348/**
349 * The driver model view of the IPMI messaging driver.
350 */
351static struct device_driver ipmidriver = {
352 .name = "ipmi",
353 .bus = &platform_bus_type
354};
355static DEFINE_MUTEX(ipmidriver_mutex);
356
323#define MAX_IPMI_INTERFACES 4 357#define MAX_IPMI_INTERFACES 4
324static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 358static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
325 359
@@ -393,7 +427,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
393 if (IPMI_INVALID_INTERFACE(intf)) 427 if (IPMI_INVALID_INTERFACE(intf))
394 continue; 428 continue;
395 spin_unlock_irqrestore(&interfaces_lock, flags); 429 spin_unlock_irqrestore(&interfaces_lock, flags);
396 watcher->new_smi(i); 430 watcher->new_smi(i, intf->si_dev);
397 spin_lock_irqsave(&interfaces_lock, flags); 431 spin_lock_irqsave(&interfaces_lock, flags);
398 } 432 }
399 spin_unlock_irqrestore(&interfaces_lock, flags); 433 spin_unlock_irqrestore(&interfaces_lock, flags);
@@ -409,14 +443,14 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
409} 443}
410 444
411static void 445static void
412call_smi_watchers(int i) 446call_smi_watchers(int i, struct device *dev)
413{ 447{
414 struct ipmi_smi_watcher *w; 448 struct ipmi_smi_watcher *w;
415 449
416 down_read(&smi_watchers_sem); 450 down_read(&smi_watchers_sem);
417 list_for_each_entry(w, &smi_watchers, link) { 451 list_for_each_entry(w, &smi_watchers, link) {
418 if (try_module_get(w->owner)) { 452 if (try_module_get(w->owner)) {
419 w->new_smi(i); 453 w->new_smi(i, dev);
420 module_put(w->owner); 454 module_put(w->owner);
421 } 455 }
422 } 456 }
@@ -844,8 +878,8 @@ void ipmi_get_version(ipmi_user_t user,
844 unsigned char *major, 878 unsigned char *major,
845 unsigned char *minor) 879 unsigned char *minor)
846{ 880{
847 *major = user->intf->version_major; 881 *major = ipmi_version_major(&user->intf->bmc->id);
848 *minor = user->intf->version_minor; 882 *minor = ipmi_version_minor(&user->intf->bmc->id);
849} 883}
850 884
851int ipmi_set_my_address(ipmi_user_t user, 885int ipmi_set_my_address(ipmi_user_t user,
@@ -1553,7 +1587,8 @@ static int version_file_read_proc(char *page, char **start, off_t off,
1553 ipmi_smi_t intf = data; 1587 ipmi_smi_t intf = data;
1554 1588
1555 return sprintf(out, "%d.%d\n", 1589 return sprintf(out, "%d.%d\n",
1556 intf->version_major, intf->version_minor); 1590 ipmi_version_major(&intf->bmc->id),
1591 ipmi_version_minor(&intf->bmc->id));
1557} 1592}
1558 1593
1559static int stat_file_read_proc(char *page, char **start, off_t off, 1594static int stat_file_read_proc(char *page, char **start, off_t off,
@@ -1712,6 +1747,470 @@ static void remove_proc_entries(ipmi_smi_t smi)
1712#endif /* CONFIG_PROC_FS */ 1747#endif /* CONFIG_PROC_FS */
1713} 1748}
1714 1749
1750static int __find_bmc_guid(struct device *dev, void *data)
1751{
1752 unsigned char *id = data;
1753 struct bmc_device *bmc = dev_get_drvdata(dev);
1754 return memcmp(bmc->guid, id, 16) == 0;
1755}
1756
1757static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1758 unsigned char *guid)
1759{
1760 struct device *dev;
1761
1762 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1763 if (dev)
1764 return dev_get_drvdata(dev);
1765 else
1766 return NULL;
1767}
1768
1769struct prod_dev_id {
1770 unsigned int product_id;
1771 unsigned char device_id;
1772};
1773
1774static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1775{
1776 struct prod_dev_id *id = data;
1777 struct bmc_device *bmc = dev_get_drvdata(dev);
1778
1779 return (bmc->id.product_id == id->product_id
1780 && bmc->id.product_id == id->product_id
1781 && bmc->id.device_id == id->device_id);
1782}
1783
1784static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1785 struct device_driver *drv,
1786 unsigned char product_id, unsigned char device_id)
1787{
1788 struct prod_dev_id id = {
1789 .product_id = product_id,
1790 .device_id = device_id,
1791 };
1792 struct device *dev;
1793
1794 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
1795 if (dev)
1796 return dev_get_drvdata(dev);
1797 else
1798 return NULL;
1799}
1800
1801static ssize_t device_id_show(struct device *dev,
1802 struct device_attribute *attr,
1803 char *buf)
1804{
1805 struct bmc_device *bmc = dev_get_drvdata(dev);
1806
1807 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
1808}
1809
1810static ssize_t provides_dev_sdrs_show(struct device *dev,
1811 struct device_attribute *attr,
1812 char *buf)
1813{
1814 struct bmc_device *bmc = dev_get_drvdata(dev);
1815
1816 return snprintf(buf, 10, "%u\n",
1817 bmc->id.device_revision && 0x80 >> 7);
1818}
1819
1820static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
1821 char *buf)
1822{
1823 struct bmc_device *bmc = dev_get_drvdata(dev);
1824
1825 return snprintf(buf, 20, "%u\n",
1826 bmc->id.device_revision && 0x0F);
1827}
1828
1829static ssize_t firmware_rev_show(struct device *dev,
1830 struct device_attribute *attr,
1831 char *buf)
1832{
1833 struct bmc_device *bmc = dev_get_drvdata(dev);
1834
1835 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
1836 bmc->id.firmware_revision_2);
1837}
1838
1839static ssize_t ipmi_version_show(struct device *dev,
1840 struct device_attribute *attr,
1841 char *buf)
1842{
1843 struct bmc_device *bmc = dev_get_drvdata(dev);
1844
1845 return snprintf(buf, 20, "%u.%u\n",
1846 ipmi_version_major(&bmc->id),
1847 ipmi_version_minor(&bmc->id));
1848}
1849
1850static ssize_t add_dev_support_show(struct device *dev,
1851 struct device_attribute *attr,
1852 char *buf)
1853{
1854 struct bmc_device *bmc = dev_get_drvdata(dev);
1855
1856 return snprintf(buf, 10, "0x%02x\n",
1857 bmc->id.additional_device_support);
1858}
1859
1860static ssize_t manufacturer_id_show(struct device *dev,
1861 struct device_attribute *attr,
1862 char *buf)
1863{
1864 struct bmc_device *bmc = dev_get_drvdata(dev);
1865
1866 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
1867}
1868
1869static ssize_t product_id_show(struct device *dev,
1870 struct device_attribute *attr,
1871 char *buf)
1872{
1873 struct bmc_device *bmc = dev_get_drvdata(dev);
1874
1875 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
1876}
1877
1878static ssize_t aux_firmware_rev_show(struct device *dev,
1879 struct device_attribute *attr,
1880 char *buf)
1881{
1882 struct bmc_device *bmc = dev_get_drvdata(dev);
1883
1884 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
1885 bmc->id.aux_firmware_revision[3],
1886 bmc->id.aux_firmware_revision[2],
1887 bmc->id.aux_firmware_revision[1],
1888 bmc->id.aux_firmware_revision[0]);
1889}
1890
1891static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
1892 char *buf)
1893{
1894 struct bmc_device *bmc = dev_get_drvdata(dev);
1895
1896 return snprintf(buf, 100, "%Lx%Lx\n",
1897 (long long) bmc->guid[0],
1898 (long long) bmc->guid[8]);
1899}
1900
1901static void
1902cleanup_bmc_device(struct kref *ref)
1903{
1904 struct bmc_device *bmc;
1905
1906 bmc = container_of(ref, struct bmc_device, refcount);
1907
1908 device_remove_file(&bmc->dev->dev,
1909 &bmc->device_id_attr);
1910 device_remove_file(&bmc->dev->dev,
1911 &bmc->provides_dev_sdrs_attr);
1912 device_remove_file(&bmc->dev->dev,
1913 &bmc->revision_attr);
1914 device_remove_file(&bmc->dev->dev,
1915 &bmc->firmware_rev_attr);
1916 device_remove_file(&bmc->dev->dev,
1917 &bmc->version_attr);
1918 device_remove_file(&bmc->dev->dev,
1919 &bmc->add_dev_support_attr);
1920 device_remove_file(&bmc->dev->dev,
1921 &bmc->manufacturer_id_attr);
1922 device_remove_file(&bmc->dev->dev,
1923 &bmc->product_id_attr);
1924 if (bmc->id.aux_firmware_revision_set)
1925 device_remove_file(&bmc->dev->dev,
1926 &bmc->aux_firmware_rev_attr);
1927 if (bmc->guid_set)
1928 device_remove_file(&bmc->dev->dev,
1929 &bmc->guid_attr);
1930 platform_device_unregister(bmc->dev);
1931 kfree(bmc);
1932}
1933
1934static void ipmi_bmc_unregister(ipmi_smi_t intf)
1935{
1936 struct bmc_device *bmc = intf->bmc;
1937
1938 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
1939 if (intf->my_dev_name) {
1940 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
1941 kfree(intf->my_dev_name);
1942 intf->my_dev_name = NULL;
1943 }
1944
1945 mutex_lock(&ipmidriver_mutex);
1946 kref_put(&bmc->refcount, cleanup_bmc_device);
1947 mutex_unlock(&ipmidriver_mutex);
1948}
1949
1950static int ipmi_bmc_register(ipmi_smi_t intf)
1951{
1952 int rv;
1953 struct bmc_device *bmc = intf->bmc;
1954 struct bmc_device *old_bmc;
1955 int size;
1956 char dummy[1];
1957
1958 mutex_lock(&ipmidriver_mutex);
1959
1960 /*
1961 * Try to find if there is an bmc_device struct
1962 * representing the interfaced BMC already
1963 */
1964 if (bmc->guid_set)
1965 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
1966 else
1967 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
1968 bmc->id.product_id,
1969 bmc->id.device_id);
1970
1971 /*
1972 * If there is already an bmc_device, free the new one,
1973 * otherwise register the new BMC device
1974 */
1975 if (old_bmc) {
1976 kfree(bmc);
1977 intf->bmc = old_bmc;
1978 bmc = old_bmc;
1979
1980 kref_get(&bmc->refcount);
1981 mutex_unlock(&ipmidriver_mutex);
1982
1983 printk(KERN_INFO
1984 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
1985 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
1986 bmc->id.manufacturer_id,
1987 bmc->id.product_id,
1988 bmc->id.device_id);
1989 } else {
1990 bmc->dev = platform_device_alloc("ipmi_bmc",
1991 bmc->id.device_id);
1992 if (! bmc->dev) {
1993 printk(KERN_ERR
1994 "ipmi_msghandler:"
1995 " Unable to allocate platform device\n");
1996 return -ENOMEM;
1997 }
1998 bmc->dev->dev.driver = &ipmidriver;
1999 dev_set_drvdata(&bmc->dev->dev, bmc);
2000 kref_init(&bmc->refcount);
2001
2002 rv = platform_device_register(bmc->dev);
2003 mutex_unlock(&ipmidriver_mutex);
2004 if (rv) {
2005 printk(KERN_ERR
2006 "ipmi_msghandler:"
2007 " Unable to register bmc device: %d\n",
2008 rv);
2009 /* Don't go to out_err, you can only do that if
2010 the device is registered already. */
2011 return rv;
2012 }
2013
2014 bmc->device_id_attr.attr.name = "device_id";
2015 bmc->device_id_attr.attr.owner = THIS_MODULE;
2016 bmc->device_id_attr.attr.mode = S_IRUGO;
2017 bmc->device_id_attr.show = device_id_show;
2018
2019 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2020 bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE;
2021 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2022 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2023
2024
2025 bmc->revision_attr.attr.name = "revision";
2026 bmc->revision_attr.attr.owner = THIS_MODULE;
2027 bmc->revision_attr.attr.mode = S_IRUGO;
2028 bmc->revision_attr.show = revision_show;
2029
2030 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2031 bmc->firmware_rev_attr.attr.owner = THIS_MODULE;
2032 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2033 bmc->firmware_rev_attr.show = firmware_rev_show;
2034
2035 bmc->version_attr.attr.name = "ipmi_version";
2036 bmc->version_attr.attr.owner = THIS_MODULE;
2037 bmc->version_attr.attr.mode = S_IRUGO;
2038 bmc->version_attr.show = ipmi_version_show;
2039
2040 bmc->add_dev_support_attr.attr.name
2041 = "additional_device_support";
2042 bmc->add_dev_support_attr.attr.owner = THIS_MODULE;
2043 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2044 bmc->add_dev_support_attr.show = add_dev_support_show;
2045
2046 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2047 bmc->manufacturer_id_attr.attr.owner = THIS_MODULE;
2048 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2049 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2050
2051 bmc->product_id_attr.attr.name = "product_id";
2052 bmc->product_id_attr.attr.owner = THIS_MODULE;
2053 bmc->product_id_attr.attr.mode = S_IRUGO;
2054 bmc->product_id_attr.show = product_id_show;
2055
2056 bmc->guid_attr.attr.name = "guid";
2057 bmc->guid_attr.attr.owner = THIS_MODULE;
2058 bmc->guid_attr.attr.mode = S_IRUGO;
2059 bmc->guid_attr.show = guid_show;
2060
2061 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2062 bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE;
2063 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2064 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2065
2066 device_create_file(&bmc->dev->dev,
2067 &bmc->device_id_attr);
2068 device_create_file(&bmc->dev->dev,
2069 &bmc->provides_dev_sdrs_attr);
2070 device_create_file(&bmc->dev->dev,
2071 &bmc->revision_attr);
2072 device_create_file(&bmc->dev->dev,
2073 &bmc->firmware_rev_attr);
2074 device_create_file(&bmc->dev->dev,
2075 &bmc->version_attr);
2076 device_create_file(&bmc->dev->dev,
2077 &bmc->add_dev_support_attr);
2078 device_create_file(&bmc->dev->dev,
2079 &bmc->manufacturer_id_attr);
2080 device_create_file(&bmc->dev->dev,
2081 &bmc->product_id_attr);
2082 if (bmc->id.aux_firmware_revision_set)
2083 device_create_file(&bmc->dev->dev,
2084 &bmc->aux_firmware_rev_attr);
2085 if (bmc->guid_set)
2086 device_create_file(&bmc->dev->dev,
2087 &bmc->guid_attr);
2088
2089 printk(KERN_INFO
2090 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2091 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2092 bmc->id.manufacturer_id,
2093 bmc->id.product_id,
2094 bmc->id.device_id);
2095 }
2096
2097 /*
2098 * create symlink from system interface device to bmc device
2099 * and back.
2100 */
2101 rv = sysfs_create_link(&intf->si_dev->kobj,
2102 &bmc->dev->dev.kobj, "bmc");
2103 if (rv) {
2104 printk(KERN_ERR
2105 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2106 rv);
2107 goto out_err;
2108 }
2109
2110 size = snprintf(dummy, 0, "ipmi%d", intf->intf_num);
2111 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2112 if (!intf->my_dev_name) {
2113 rv = -ENOMEM;
2114 printk(KERN_ERR
2115 "ipmi_msghandler: allocate link from BMC: %d\n",
2116 rv);
2117 goto out_err;
2118 }
2119 snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num);
2120
2121 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2122 intf->my_dev_name);
2123 if (rv) {
2124 kfree(intf->my_dev_name);
2125 intf->my_dev_name = NULL;
2126 printk(KERN_ERR
2127 "ipmi_msghandler:"
2128 " Unable to create symlink to bmc: %d\n",
2129 rv);
2130 goto out_err;
2131 }
2132
2133 return 0;
2134
2135out_err:
2136 ipmi_bmc_unregister(intf);
2137 return rv;
2138}
2139
2140static int
2141send_guid_cmd(ipmi_smi_t intf, int chan)
2142{
2143 struct kernel_ipmi_msg msg;
2144 struct ipmi_system_interface_addr si;
2145
2146 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2147 si.channel = IPMI_BMC_CHANNEL;
2148 si.lun = 0;
2149
2150 msg.netfn = IPMI_NETFN_APP_REQUEST;
2151 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2152 msg.data = NULL;
2153 msg.data_len = 0;
2154 return i_ipmi_request(NULL,
2155 intf,
2156 (struct ipmi_addr *) &si,
2157 0,
2158 &msg,
2159 intf,
2160 NULL,
2161 NULL,
2162 0,
2163 intf->channels[0].address,
2164 intf->channels[0].lun,
2165 -1, 0);
2166}
2167
2168static void
2169guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2170{
2171 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2172 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2173 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2174 /* Not for me */
2175 return;
2176
2177 if (msg->msg.data[0] != 0) {
2178 /* Error from getting the GUID, the BMC doesn't have one. */
2179 intf->bmc->guid_set = 0;
2180 goto out;
2181 }
2182
2183 if (msg->msg.data_len < 17) {
2184 intf->bmc->guid_set = 0;
2185 printk(KERN_WARNING PFX
2186 "guid_handler: The GUID response from the BMC was too"
2187 " short, it was %d but should have been 17. Assuming"
2188 " GUID is not available.\n",
2189 msg->msg.data_len);
2190 goto out;
2191 }
2192
2193 memcpy(intf->bmc->guid, msg->msg.data, 16);
2194 intf->bmc->guid_set = 1;
2195 out:
2196 wake_up(&intf->waitq);
2197}
2198
2199static void
2200get_guid(ipmi_smi_t intf)
2201{
2202 int rv;
2203
2204 intf->bmc->guid_set = 0x2;
2205 intf->null_user_handler = guid_handler;
2206 rv = send_guid_cmd(intf, 0);
2207 if (rv)
2208 /* Send failed, no GUID available. */
2209 intf->bmc->guid_set = 0;
2210 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2211 intf->null_user_handler = NULL;
2212}
2213
1715static int 2214static int
1716send_channel_info_cmd(ipmi_smi_t intf, int chan) 2215send_channel_info_cmd(ipmi_smi_t intf, int chan)
1717{ 2216{
@@ -1804,8 +2303,8 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1804 2303
1805int ipmi_register_smi(struct ipmi_smi_handlers *handlers, 2304int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1806 void *send_info, 2305 void *send_info,
1807 unsigned char version_major, 2306 struct ipmi_device_id *device_id,
1808 unsigned char version_minor, 2307 struct device *si_dev,
1809 unsigned char slave_addr, 2308 unsigned char slave_addr,
1810 ipmi_smi_t *new_intf) 2309 ipmi_smi_t *new_intf)
1811{ 2310{
@@ -1813,7 +2312,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1813 int rv; 2312 int rv;
1814 ipmi_smi_t intf; 2313 ipmi_smi_t intf;
1815 unsigned long flags; 2314 unsigned long flags;
2315 int version_major;
2316 int version_minor;
1816 2317
2318 version_major = ipmi_version_major(device_id);
2319 version_minor = ipmi_version_minor(device_id);
1817 2320
1818 /* Make sure the driver is actually initialized, this handles 2321 /* Make sure the driver is actually initialized, this handles
1819 problems with initialization order. */ 2322 problems with initialization order. */
@@ -1831,10 +2334,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1831 if (!intf) 2334 if (!intf)
1832 return -ENOMEM; 2335 return -ENOMEM;
1833 memset(intf, 0, sizeof(*intf)); 2336 memset(intf, 0, sizeof(*intf));
2337 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2338 if (!intf->bmc) {
2339 kfree(intf);
2340 return -ENOMEM;
2341 }
1834 intf->intf_num = -1; 2342 intf->intf_num = -1;
1835 kref_init(&intf->refcount); 2343 kref_init(&intf->refcount);
1836 intf->version_major = version_major; 2344 intf->bmc->id = *device_id;
1837 intf->version_minor = version_minor; 2345 intf->si_dev = si_dev;
1838 for (j = 0; j < IPMI_MAX_CHANNELS; j++) { 2346 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1839 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; 2347 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1840 intf->channels[j].lun = 2; 2348 intf->channels[j].lun = 2;
@@ -1884,6 +2392,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1884 caller before sending any messages with it. */ 2392 caller before sending any messages with it. */
1885 *new_intf = intf; 2393 *new_intf = intf;
1886 2394
2395 get_guid(intf);
2396
1887 if ((version_major > 1) 2397 if ((version_major > 1)
1888 || ((version_major == 1) && (version_minor >= 5))) 2398 || ((version_major == 1) && (version_minor >= 5)))
1889 { 2399 {
@@ -1898,6 +2408,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1898 /* Wait for the channel info to be read. */ 2408 /* Wait for the channel info to be read. */
1899 wait_event(intf->waitq, 2409 wait_event(intf->waitq,
1900 intf->curr_channel >= IPMI_MAX_CHANNELS); 2410 intf->curr_channel >= IPMI_MAX_CHANNELS);
2411 intf->null_user_handler = NULL;
1901 } else { 2412 } else {
1902 /* Assume a single IPMB channel at zero. */ 2413 /* Assume a single IPMB channel at zero. */
1903 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 2414 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
@@ -1907,6 +2418,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1907 if (rv == 0) 2418 if (rv == 0)
1908 rv = add_proc_entries(intf, i); 2419 rv = add_proc_entries(intf, i);
1909 2420
2421 rv = ipmi_bmc_register(intf);
2422
1910 out: 2423 out:
1911 if (rv) { 2424 if (rv) {
1912 if (intf->proc_dir) 2425 if (intf->proc_dir)
@@ -1921,7 +2434,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1921 spin_lock_irqsave(&interfaces_lock, flags); 2434 spin_lock_irqsave(&interfaces_lock, flags);
1922 ipmi_interfaces[i] = intf; 2435 ipmi_interfaces[i] = intf;
1923 spin_unlock_irqrestore(&interfaces_lock, flags); 2436 spin_unlock_irqrestore(&interfaces_lock, flags);
1924 call_smi_watchers(i); 2437 call_smi_watchers(i, intf->si_dev);
1925 } 2438 }
1926 2439
1927 return rv; 2440 return rv;
@@ -1933,6 +2446,8 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
1933 struct ipmi_smi_watcher *w; 2446 struct ipmi_smi_watcher *w;
1934 unsigned long flags; 2447 unsigned long flags;
1935 2448
2449 ipmi_bmc_unregister(intf);
2450
1936 spin_lock_irqsave(&interfaces_lock, flags); 2451 spin_lock_irqsave(&interfaces_lock, flags);
1937 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2452 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1938 if (ipmi_interfaces[i] == intf) { 2453 if (ipmi_interfaces[i] == intf) {
@@ -3196,10 +3711,17 @@ static struct notifier_block panic_block = {
3196static int ipmi_init_msghandler(void) 3711static int ipmi_init_msghandler(void)
3197{ 3712{
3198 int i; 3713 int i;
3714 int rv;
3199 3715
3200 if (initialized) 3716 if (initialized)
3201 return 0; 3717 return 0;
3202 3718
3719 rv = driver_register(&ipmidriver);
3720 if (rv) {
3721 printk(KERN_ERR PFX "Could not register IPMI driver\n");
3722 return rv;
3723 }
3724
3203 printk(KERN_INFO "ipmi message handler version " 3725 printk(KERN_INFO "ipmi message handler version "
3204 IPMI_DRIVER_VERSION "\n"); 3726 IPMI_DRIVER_VERSION "\n");
3205 3727
@@ -3256,6 +3778,8 @@ static __exit void cleanup_ipmi(void)
3256 remove_proc_entry(proc_ipmi_root->name, &proc_root); 3778 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3257#endif /* CONFIG_PROC_FS */ 3779#endif /* CONFIG_PROC_FS */
3258 3780
3781 driver_unregister(&ipmidriver);
3782
3259 initialized = 0; 3783 initialized = 0;
3260 3784
3261 /* Check for buffer leaks. */ 3785 /* Check for buffer leaks. */
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index e8ed26b77d4c..786a2802ca34 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -464,7 +464,7 @@ static void ipmi_poweroff_function (void)
464 464
465/* Wait for an IPMI interface to be installed, the first one installed 465/* Wait for an IPMI interface to be installed, the first one installed
466 will be grabbed by this code and used to perform the powerdown. */ 466 will be grabbed by this code and used to perform the powerdown. */
467static void ipmi_po_new_smi(int if_num) 467static void ipmi_po_new_smi(int if_num, struct device *device)
468{ 468{
469 struct ipmi_system_interface_addr smi_addr; 469 struct ipmi_system_interface_addr smi_addr;
470 struct kernel_ipmi_msg send_msg; 470 struct kernel_ipmi_msg send_msg;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index e59b638766ef..12f858dc9994 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -52,6 +52,7 @@
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/ioport.h> 53#include <linux/ioport.h>
54#include <linux/notifier.h> 54#include <linux/notifier.h>
55#include <linux/mutex.h>
55#include <linux/kthread.h> 56#include <linux/kthread.h>
56#include <asm/irq.h> 57#include <asm/irq.h>
57#ifdef CONFIG_HIGH_RES_TIMERS 58#ifdef CONFIG_HIGH_RES_TIMERS
@@ -109,21 +110,15 @@ enum si_intf_state {
109enum si_type { 110enum si_type {
110 SI_KCS, SI_SMIC, SI_BT 111 SI_KCS, SI_SMIC, SI_BT
111}; 112};
113static char *si_to_str[] = { "KCS", "SMIC", "BT" };
112 114
113struct ipmi_device_id { 115#define DEVICE_NAME "ipmi_si"
114 unsigned char device_id; 116
115 unsigned char device_revision; 117static struct device_driver ipmi_driver =
116 unsigned char firmware_revision_1; 118{
117 unsigned char firmware_revision_2; 119 .name = DEVICE_NAME,
118 unsigned char ipmi_version; 120 .bus = &platform_bus_type
119 unsigned char additional_device_support; 121};
120 unsigned char manufacturer_id[3];
121 unsigned char product_id[2];
122 unsigned char aux_firmware_revision[4];
123} __attribute__((packed));
124
125#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
126#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
127 122
128struct smi_info 123struct smi_info
129{ 124{
@@ -147,6 +142,9 @@ struct smi_info
147 int (*irq_setup)(struct smi_info *info); 142 int (*irq_setup)(struct smi_info *info);
148 void (*irq_cleanup)(struct smi_info *info); 143 void (*irq_cleanup)(struct smi_info *info);
149 unsigned int io_size; 144 unsigned int io_size;
145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data;
150 148
151 /* Per-OEM handler, called from handle_flags(). 149 /* Per-OEM handler, called from handle_flags().
152 Returns 1 when handle_flags() needs to be re-run 150 Returns 1 when handle_flags() needs to be re-run
@@ -203,8 +201,17 @@ struct smi_info
203 interrupts. */ 201 interrupts. */
204 int interrupt_disabled; 202 int interrupt_disabled;
205 203
204 /* From the get device id response... */
206 struct ipmi_device_id device_id; 205 struct ipmi_device_id device_id;
207 206
207 /* Driver model stuff. */
208 struct device *dev;
209 struct platform_device *pdev;
210
211 /* True if we allocated the device, false if it came from
212 * someplace else (like PCI). */
213 int dev_registered;
214
208 /* Slave address, could be reported from DMI. */ 215 /* Slave address, could be reported from DMI. */
209 unsigned char slave_addr; 216 unsigned char slave_addr;
210 217
@@ -224,8 +231,12 @@ struct smi_info
224 unsigned long incoming_messages; 231 unsigned long incoming_messages;
225 232
226 struct task_struct *thread; 233 struct task_struct *thread;
234
235 struct list_head link;
227}; 236};
228 237
238static int try_smi_init(struct smi_info *smi);
239
229static struct notifier_block *xaction_notifier_list; 240static struct notifier_block *xaction_notifier_list;
230static int register_xaction_notifier(struct notifier_block * nb) 241static int register_xaction_notifier(struct notifier_block * nb)
231{ 242{
@@ -271,13 +282,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
271 spin_lock(&(smi_info->msg_lock)); 282 spin_lock(&(smi_info->msg_lock));
272 283
273 /* Pick the high priority queue first. */ 284 /* Pick the high priority queue first. */
274 if (! list_empty(&(smi_info->hp_xmit_msgs))) { 285 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
275 entry = smi_info->hp_xmit_msgs.next; 286 entry = smi_info->hp_xmit_msgs.next;
276 } else if (! list_empty(&(smi_info->xmit_msgs))) { 287 } else if (!list_empty(&(smi_info->xmit_msgs))) {
277 entry = smi_info->xmit_msgs.next; 288 entry = smi_info->xmit_msgs.next;
278 } 289 }
279 290
280 if (! entry) { 291 if (!entry) {
281 smi_info->curr_msg = NULL; 292 smi_info->curr_msg = NULL;
282 rv = SI_SM_IDLE; 293 rv = SI_SM_IDLE;
283 } else { 294 } else {
@@ -344,7 +355,7 @@ static void start_clear_flags(struct smi_info *smi_info)
344 memory, we will re-enable the interrupt. */ 355 memory, we will re-enable the interrupt. */
345static inline void disable_si_irq(struct smi_info *smi_info) 356static inline void disable_si_irq(struct smi_info *smi_info)
346{ 357{
347 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { 358 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
348 disable_irq_nosync(smi_info->irq); 359 disable_irq_nosync(smi_info->irq);
349 smi_info->interrupt_disabled = 1; 360 smi_info->interrupt_disabled = 1;
350 } 361 }
@@ -375,7 +386,7 @@ static void handle_flags(struct smi_info *smi_info)
375 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 386 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
376 /* Messages available. */ 387 /* Messages available. */
377 smi_info->curr_msg = ipmi_alloc_smi_msg(); 388 smi_info->curr_msg = ipmi_alloc_smi_msg();
378 if (! smi_info->curr_msg) { 389 if (!smi_info->curr_msg) {
379 disable_si_irq(smi_info); 390 disable_si_irq(smi_info);
380 smi_info->si_state = SI_NORMAL; 391 smi_info->si_state = SI_NORMAL;
381 return; 392 return;
@@ -394,7 +405,7 @@ static void handle_flags(struct smi_info *smi_info)
394 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 405 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
395 /* Events available. */ 406 /* Events available. */
396 smi_info->curr_msg = ipmi_alloc_smi_msg(); 407 smi_info->curr_msg = ipmi_alloc_smi_msg();
397 if (! smi_info->curr_msg) { 408 if (!smi_info->curr_msg) {
398 disable_si_irq(smi_info); 409 disable_si_irq(smi_info);
399 smi_info->si_state = SI_NORMAL; 410 smi_info->si_state = SI_NORMAL;
400 return; 411 return;
@@ -430,7 +441,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
430#endif 441#endif
431 switch (smi_info->si_state) { 442 switch (smi_info->si_state) {
432 case SI_NORMAL: 443 case SI_NORMAL:
433 if (! smi_info->curr_msg) 444 if (!smi_info->curr_msg)
434 break; 445 break;
435 446
436 smi_info->curr_msg->rsp_size 447 smi_info->curr_msg->rsp_size
@@ -880,7 +891,7 @@ static void smi_timeout(unsigned long data)
880 891
881 smi_info->last_timeout_jiffies = jiffies_now; 892 smi_info->last_timeout_jiffies = jiffies_now;
882 893
883 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { 894 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
884 /* Running with interrupts, only do long timeouts. */ 895 /* Running with interrupts, only do long timeouts. */
885 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 896 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
886 spin_lock_irqsave(&smi_info->count_lock, flags); 897 spin_lock_irqsave(&smi_info->count_lock, flags);
@@ -974,15 +985,10 @@ static struct ipmi_smi_handlers handlers =
974 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 985 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
975 986
976#define SI_MAX_PARMS 4 987#define SI_MAX_PARMS 4
977#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2) 988static LIST_HEAD(smi_infos);
978static struct smi_info *smi_infos[SI_MAX_DRIVERS] = 989static DECLARE_MUTEX(smi_infos_lock);
979{ NULL, NULL, NULL, NULL }; 990static int smi_num; /* Used to sequence the SMIs */
980 991
981#define DEVICE_NAME "ipmi_si"
982
983#define DEFAULT_KCS_IO_PORT 0xca2
984#define DEFAULT_SMIC_IO_PORT 0xca9
985#define DEFAULT_BT_IO_PORT 0xe4
986#define DEFAULT_REGSPACING 1 992#define DEFAULT_REGSPACING 1
987 993
988static int si_trydefaults = 1; 994static int si_trydefaults = 1;
@@ -1053,38 +1059,23 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1053 " by interface number."); 1059 " by interface number.");
1054 1060
1055 1061
1062#define IPMI_IO_ADDR_SPACE 0
1056#define IPMI_MEM_ADDR_SPACE 1 1063#define IPMI_MEM_ADDR_SPACE 1
1057#define IPMI_IO_ADDR_SPACE 2 1064static char *addr_space_to_str[] = { "I/O", "memory" };
1058 1065
1059#if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI) 1066static void std_irq_cleanup(struct smi_info *info)
1060static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
1061{ 1067{
1062 int i; 1068 if (info->si_type == SI_BT)
1063 1069 /* Disable the interrupt in the BT interface. */
1064 for (i = 0; i < SI_MAX_PARMS; ++i) { 1070 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1065 /* Don't check our address. */ 1071 free_irq(info->irq, info);
1066 if (i == intf)
1067 continue;
1068 if (si_type[i] != NULL) {
1069 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
1070 base_addr == addrs[i]) ||
1071 (addr_space == IPMI_IO_ADDR_SPACE &&
1072 base_addr == ports[i]))
1073 return 0;
1074 }
1075 else
1076 break;
1077 }
1078
1079 return 1;
1080} 1072}
1081#endif
1082 1073
1083static int std_irq_setup(struct smi_info *info) 1074static int std_irq_setup(struct smi_info *info)
1084{ 1075{
1085 int rv; 1076 int rv;
1086 1077
1087 if (! info->irq) 1078 if (!info->irq)
1088 return 0; 1079 return 0;
1089 1080
1090 if (info->si_type == SI_BT) { 1081 if (info->si_type == SI_BT) {
@@ -1093,7 +1084,7 @@ static int std_irq_setup(struct smi_info *info)
1093 SA_INTERRUPT, 1084 SA_INTERRUPT,
1094 DEVICE_NAME, 1085 DEVICE_NAME,
1095 info); 1086 info);
1096 if (! rv) 1087 if (!rv)
1097 /* Enable the interrupt in the BT interface. */ 1088 /* Enable the interrupt in the BT interface. */
1098 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1089 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1099 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1090 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1110,88 +1101,77 @@ static int std_irq_setup(struct smi_info *info)
1110 DEVICE_NAME, info->irq); 1101 DEVICE_NAME, info->irq);
1111 info->irq = 0; 1102 info->irq = 0;
1112 } else { 1103 } else {
1104 info->irq_cleanup = std_irq_cleanup;
1113 printk(" Using irq %d\n", info->irq); 1105 printk(" Using irq %d\n", info->irq);
1114 } 1106 }
1115 1107
1116 return rv; 1108 return rv;
1117} 1109}
1118 1110
1119static void std_irq_cleanup(struct smi_info *info)
1120{
1121 if (! info->irq)
1122 return;
1123
1124 if (info->si_type == SI_BT)
1125 /* Disable the interrupt in the BT interface. */
1126 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1127 free_irq(info->irq, info);
1128}
1129
1130static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) 1111static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1131{ 1112{
1132 unsigned int *addr = io->info; 1113 unsigned int addr = io->addr_data;
1133 1114
1134 return inb((*addr)+(offset*io->regspacing)); 1115 return inb(addr + (offset * io->regspacing));
1135} 1116}
1136 1117
1137static void port_outb(struct si_sm_io *io, unsigned int offset, 1118static void port_outb(struct si_sm_io *io, unsigned int offset,
1138 unsigned char b) 1119 unsigned char b)
1139{ 1120{
1140 unsigned int *addr = io->info; 1121 unsigned int addr = io->addr_data;
1141 1122
1142 outb(b, (*addr)+(offset * io->regspacing)); 1123 outb(b, addr + (offset * io->regspacing));
1143} 1124}
1144 1125
1145static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) 1126static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1146{ 1127{
1147 unsigned int *addr = io->info; 1128 unsigned int addr = io->addr_data;
1148 1129
1149 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; 1130 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1150} 1131}
1151 1132
1152static void port_outw(struct si_sm_io *io, unsigned int offset, 1133static void port_outw(struct si_sm_io *io, unsigned int offset,
1153 unsigned char b) 1134 unsigned char b)
1154{ 1135{
1155 unsigned int *addr = io->info; 1136 unsigned int addr = io->addr_data;
1156 1137
1157 outw(b << io->regshift, (*addr)+(offset * io->regspacing)); 1138 outw(b << io->regshift, addr + (offset * io->regspacing));
1158} 1139}
1159 1140
1160static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) 1141static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1161{ 1142{
1162 unsigned int *addr = io->info; 1143 unsigned int addr = io->addr_data;
1163 1144
1164 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; 1145 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1165} 1146}
1166 1147
1167static void port_outl(struct si_sm_io *io, unsigned int offset, 1148static void port_outl(struct si_sm_io *io, unsigned int offset,
1168 unsigned char b) 1149 unsigned char b)
1169{ 1150{
1170 unsigned int *addr = io->info; 1151 unsigned int addr = io->addr_data;
1171 1152
1172 outl(b << io->regshift, (*addr)+(offset * io->regspacing)); 1153 outl(b << io->regshift, addr+(offset * io->regspacing));
1173} 1154}
1174 1155
1175static void port_cleanup(struct smi_info *info) 1156static void port_cleanup(struct smi_info *info)
1176{ 1157{
1177 unsigned int *addr = info->io.info; 1158 unsigned int addr = info->io.addr_data;
1178 int mapsize; 1159 int mapsize;
1179 1160
1180 if (addr && (*addr)) { 1161 if (addr) {
1181 mapsize = ((info->io_size * info->io.regspacing) 1162 mapsize = ((info->io_size * info->io.regspacing)
1182 - (info->io.regspacing - info->io.regsize)); 1163 - (info->io.regspacing - info->io.regsize));
1183 1164
1184 release_region (*addr, mapsize); 1165 release_region (addr, mapsize);
1185 } 1166 }
1186 kfree(info);
1187} 1167}
1188 1168
1189static int port_setup(struct smi_info *info) 1169static int port_setup(struct smi_info *info)
1190{ 1170{
1191 unsigned int *addr = info->io.info; 1171 unsigned int addr = info->io.addr_data;
1192 int mapsize; 1172 int mapsize;
1193 1173
1194 if (! addr || (! *addr)) 1174 if (!addr)
1195 return -ENODEV; 1175 return -ENODEV;
1196 1176
1197 info->io_cleanup = port_cleanup; 1177 info->io_cleanup = port_cleanup;
@@ -1225,51 +1205,11 @@ static int port_setup(struct smi_info *info)
1225 mapsize = ((info->io_size * info->io.regspacing) 1205 mapsize = ((info->io_size * info->io.regspacing)
1226 - (info->io.regspacing - info->io.regsize)); 1206 - (info->io.regspacing - info->io.regsize));
1227 1207
1228 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL) 1208 if (request_region(addr, mapsize, DEVICE_NAME) == NULL)
1229 return -EIO; 1209 return -EIO;
1230 return 0; 1210 return 0;
1231} 1211}
1232 1212
1233static int try_init_port(int intf_num, struct smi_info **new_info)
1234{
1235 struct smi_info *info;
1236
1237 if (! ports[intf_num])
1238 return -ENODEV;
1239
1240 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1241 ports[intf_num]))
1242 return -ENODEV;
1243
1244 info = kmalloc(sizeof(*info), GFP_KERNEL);
1245 if (! info) {
1246 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1247 return -ENOMEM;
1248 }
1249 memset(info, 0, sizeof(*info));
1250
1251 info->io_setup = port_setup;
1252 info->io.info = &(ports[intf_num]);
1253 info->io.addr = NULL;
1254 info->io.regspacing = regspacings[intf_num];
1255 if (! info->io.regspacing)
1256 info->io.regspacing = DEFAULT_REGSPACING;
1257 info->io.regsize = regsizes[intf_num];
1258 if (! info->io.regsize)
1259 info->io.regsize = DEFAULT_REGSPACING;
1260 info->io.regshift = regshifts[intf_num];
1261 info->irq = 0;
1262 info->irq_setup = NULL;
1263 *new_info = info;
1264
1265 if (si_type[intf_num] == NULL)
1266 si_type[intf_num] = "kcs";
1267
1268 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1269 si_type[intf_num], ports[intf_num]);
1270 return 0;
1271}
1272
1273static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) 1213static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1274{ 1214{
1275 return readb((io->addr)+(offset * io->regspacing)); 1215 return readb((io->addr)+(offset * io->regspacing));
@@ -1321,7 +1261,7 @@ static void mem_outq(struct si_sm_io *io, unsigned int offset,
1321 1261
1322static void mem_cleanup(struct smi_info *info) 1262static void mem_cleanup(struct smi_info *info)
1323{ 1263{
1324 unsigned long *addr = info->io.info; 1264 unsigned long addr = info->io.addr_data;
1325 int mapsize; 1265 int mapsize;
1326 1266
1327 if (info->io.addr) { 1267 if (info->io.addr) {
@@ -1330,17 +1270,16 @@ static void mem_cleanup(struct smi_info *info)
1330 mapsize = ((info->io_size * info->io.regspacing) 1270 mapsize = ((info->io_size * info->io.regspacing)
1331 - (info->io.regspacing - info->io.regsize)); 1271 - (info->io.regspacing - info->io.regsize));
1332 1272
1333 release_mem_region(*addr, mapsize); 1273 release_mem_region(addr, mapsize);
1334 } 1274 }
1335 kfree(info);
1336} 1275}
1337 1276
1338static int mem_setup(struct smi_info *info) 1277static int mem_setup(struct smi_info *info)
1339{ 1278{
1340 unsigned long *addr = info->io.info; 1279 unsigned long addr = info->io.addr_data;
1341 int mapsize; 1280 int mapsize;
1342 1281
1343 if (! addr || (! *addr)) 1282 if (!addr)
1344 return -ENODEV; 1283 return -ENODEV;
1345 1284
1346 info->io_cleanup = mem_cleanup; 1285 info->io_cleanup = mem_cleanup;
@@ -1380,57 +1319,83 @@ static int mem_setup(struct smi_info *info)
1380 mapsize = ((info->io_size * info->io.regspacing) 1319 mapsize = ((info->io_size * info->io.regspacing)
1381 - (info->io.regspacing - info->io.regsize)); 1320 - (info->io.regspacing - info->io.regsize));
1382 1321
1383 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL) 1322 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1384 return -EIO; 1323 return -EIO;
1385 1324
1386 info->io.addr = ioremap(*addr, mapsize); 1325 info->io.addr = ioremap(addr, mapsize);
1387 if (info->io.addr == NULL) { 1326 if (info->io.addr == NULL) {
1388 release_mem_region(*addr, mapsize); 1327 release_mem_region(addr, mapsize);
1389 return -EIO; 1328 return -EIO;
1390 } 1329 }
1391 return 0; 1330 return 0;
1392} 1331}
1393 1332
1394static int try_init_mem(int intf_num, struct smi_info **new_info) 1333
1334static __devinit void hardcode_find_bmc(void)
1395{ 1335{
1336 int i;
1396 struct smi_info *info; 1337 struct smi_info *info;
1397 1338
1398 if (! addrs[intf_num]) 1339 for (i = 0; i < SI_MAX_PARMS; i++) {
1399 return -ENODEV; 1340 if (!ports[i] && !addrs[i])
1341 continue;
1400 1342
1401 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, 1343 info = kzalloc(sizeof(*info), GFP_KERNEL);
1402 addrs[intf_num])) 1344 if (!info)
1403 return -ENODEV; 1345 return;
1404 1346
1405 info = kmalloc(sizeof(*info), GFP_KERNEL); 1347 info->addr_source = "hardcoded";
1406 if (! info) {
1407 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1408 return -ENOMEM;
1409 }
1410 memset(info, 0, sizeof(*info));
1411 1348
1412 info->io_setup = mem_setup; 1349 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1413 info->io.info = &addrs[intf_num]; 1350 info->si_type = SI_KCS;
1414 info->io.addr = NULL; 1351 } else if (strcmp(si_type[i], "smic") == 0) {
1415 info->io.regspacing = regspacings[intf_num]; 1352 info->si_type = SI_SMIC;
1416 if (! info->io.regspacing) 1353 } else if (strcmp(si_type[i], "bt") == 0) {
1417 info->io.regspacing = DEFAULT_REGSPACING; 1354 info->si_type = SI_BT;
1418 info->io.regsize = regsizes[intf_num]; 1355 } else {
1419 if (! info->io.regsize) 1356 printk(KERN_WARNING
1420 info->io.regsize = DEFAULT_REGSPACING; 1357 "ipmi_si: Interface type specified "
1421 info->io.regshift = regshifts[intf_num]; 1358 "for interface %d, was invalid: %s\n",
1422 info->irq = 0; 1359 i, si_type[i]);
1423 info->irq_setup = NULL; 1360 kfree(info);
1424 *new_info = info; 1361 continue;
1362 }
1425 1363
1426 if (si_type[intf_num] == NULL) 1364 if (ports[i]) {
1427 si_type[intf_num] = "kcs"; 1365 /* An I/O port */
1366 info->io_setup = port_setup;
1367 info->io.addr_data = ports[i];
1368 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1369 } else if (addrs[i]) {
1370 /* A memory port */
1371 info->io_setup = mem_setup;
1372 info->io.addr_data = addrs[i];
1373 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1374 } else {
1375 printk(KERN_WARNING
1376 "ipmi_si: Interface type specified "
1377 "for interface %d, "
1378 "but port and address were not set or "
1379 "set to zero.\n", i);
1380 kfree(info);
1381 continue;
1382 }
1428 1383
1429 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n", 1384 info->io.addr = NULL;
1430 si_type[intf_num], addrs[intf_num]); 1385 info->io.regspacing = regspacings[i];
1431 return 0; 1386 if (!info->io.regspacing)
1432} 1387 info->io.regspacing = DEFAULT_REGSPACING;
1388 info->io.regsize = regsizes[i];
1389 if (!info->io.regsize)
1390 info->io.regsize = DEFAULT_REGSPACING;
1391 info->io.regshift = regshifts[i];
1392 info->irq = irqs[i];
1393 if (info->irq)
1394 info->irq_setup = std_irq_setup;
1433 1395
1396 try_smi_init(info);
1397 }
1398}
1434 1399
1435#ifdef CONFIG_ACPI 1400#ifdef CONFIG_ACPI
1436 1401
@@ -1470,11 +1435,19 @@ static u32 ipmi_acpi_gpe(void *context)
1470 return ACPI_INTERRUPT_HANDLED; 1435 return ACPI_INTERRUPT_HANDLED;
1471} 1436}
1472 1437
1438static void acpi_gpe_irq_cleanup(struct smi_info *info)
1439{
1440 if (!info->irq)
1441 return;
1442
1443 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1444}
1445
1473static int acpi_gpe_irq_setup(struct smi_info *info) 1446static int acpi_gpe_irq_setup(struct smi_info *info)
1474{ 1447{
1475 acpi_status status; 1448 acpi_status status;
1476 1449
1477 if (! info->irq) 1450 if (!info->irq)
1478 return 0; 1451 return 0;
1479 1452
1480 /* FIXME - is level triggered right? */ 1453 /* FIXME - is level triggered right? */
@@ -1491,19 +1464,12 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1491 info->irq = 0; 1464 info->irq = 0;
1492 return -EINVAL; 1465 return -EINVAL;
1493 } else { 1466 } else {
1467 info->irq_cleanup = acpi_gpe_irq_cleanup;
1494 printk(" Using ACPI GPE %d\n", info->irq); 1468 printk(" Using ACPI GPE %d\n", info->irq);
1495 return 0; 1469 return 0;
1496 } 1470 }
1497} 1471}
1498 1472
1499static void acpi_gpe_irq_cleanup(struct smi_info *info)
1500{
1501 if (! info->irq)
1502 return;
1503
1504 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1505}
1506
1507/* 1473/*
1508 * Defined at 1474 * Defined at
1509 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1475 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
@@ -1546,28 +1512,12 @@ struct SPMITable {
1546 s8 spmi_id[1]; /* A '\0' terminated array starts here. */ 1512 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1547}; 1513};
1548 1514
1549static int try_init_acpi(int intf_num, struct smi_info **new_info) 1515static __devinit int try_init_acpi(struct SPMITable *spmi)
1550{ 1516{
1551 struct smi_info *info; 1517 struct smi_info *info;
1552 acpi_status status;
1553 struct SPMITable *spmi;
1554 char *io_type; 1518 char *io_type;
1555 u8 addr_space; 1519 u8 addr_space;
1556 1520
1557 if (acpi_disabled)
1558 return -ENODEV;
1559
1560 if (acpi_failure)
1561 return -ENODEV;
1562
1563 status = acpi_get_firmware_table("SPMI", intf_num+1,
1564 ACPI_LOGICAL_ADDRESSING,
1565 (struct acpi_table_header **) &spmi);
1566 if (status != AE_OK) {
1567 acpi_failure = 1;
1568 return -ENODEV;
1569 }
1570
1571 if (spmi->IPMIlegacy != 1) { 1521 if (spmi->IPMIlegacy != 1) {
1572 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1522 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1573 return -ENODEV; 1523 return -ENODEV;
@@ -1577,47 +1527,42 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1577 addr_space = IPMI_MEM_ADDR_SPACE; 1527 addr_space = IPMI_MEM_ADDR_SPACE;
1578 else 1528 else
1579 addr_space = IPMI_IO_ADDR_SPACE; 1529 addr_space = IPMI_IO_ADDR_SPACE;
1580 if (! is_new_interface(-1, addr_space, spmi->addr.address)) 1530
1581 return -ENODEV; 1531 info = kzalloc(sizeof(*info), GFP_KERNEL);
1532 if (!info) {
1533 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1534 return -ENOMEM;
1535 }
1536
1537 info->addr_source = "ACPI";
1582 1538
1583 /* Figure out the interface type. */ 1539 /* Figure out the interface type. */
1584 switch (spmi->InterfaceType) 1540 switch (spmi->InterfaceType)
1585 { 1541 {
1586 case 1: /* KCS */ 1542 case 1: /* KCS */
1587 si_type[intf_num] = "kcs"; 1543 info->si_type = SI_KCS;
1588 break; 1544 break;
1589
1590 case 2: /* SMIC */ 1545 case 2: /* SMIC */
1591 si_type[intf_num] = "smic"; 1546 info->si_type = SI_SMIC;
1592 break; 1547 break;
1593
1594 case 3: /* BT */ 1548 case 3: /* BT */
1595 si_type[intf_num] = "bt"; 1549 info->si_type = SI_BT;
1596 break; 1550 break;
1597
1598 default: 1551 default:
1599 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", 1552 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1600 spmi->InterfaceType); 1553 spmi->InterfaceType);
1554 kfree(info);
1601 return -EIO; 1555 return -EIO;
1602 } 1556 }
1603 1557
1604 info = kmalloc(sizeof(*info), GFP_KERNEL);
1605 if (! info) {
1606 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1607 return -ENOMEM;
1608 }
1609 memset(info, 0, sizeof(*info));
1610
1611 if (spmi->InterruptType & 1) { 1558 if (spmi->InterruptType & 1) {
1612 /* We've got a GPE interrupt. */ 1559 /* We've got a GPE interrupt. */
1613 info->irq = spmi->GPE; 1560 info->irq = spmi->GPE;
1614 info->irq_setup = acpi_gpe_irq_setup; 1561 info->irq_setup = acpi_gpe_irq_setup;
1615 info->irq_cleanup = acpi_gpe_irq_cleanup;
1616 } else if (spmi->InterruptType & 2) { 1562 } else if (spmi->InterruptType & 2) {
1617 /* We've got an APIC/SAPIC interrupt. */ 1563 /* We've got an APIC/SAPIC interrupt. */
1618 info->irq = spmi->GlobalSystemInterrupt; 1564 info->irq = spmi->GlobalSystemInterrupt;
1619 info->irq_setup = std_irq_setup; 1565 info->irq_setup = std_irq_setup;
1620 info->irq_cleanup = std_irq_cleanup;
1621 } else { 1566 } else {
1622 /* Use the default interrupt setting. */ 1567 /* Use the default interrupt setting. */
1623 info->irq = 0; 1568 info->irq = 0;
@@ -1626,43 +1571,60 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1626 1571
1627 if (spmi->addr.register_bit_width) { 1572 if (spmi->addr.register_bit_width) {
1628 /* A (hopefully) properly formed register bit width. */ 1573 /* A (hopefully) properly formed register bit width. */
1629 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1630 info->io.regspacing = spmi->addr.register_bit_width / 8; 1574 info->io.regspacing = spmi->addr.register_bit_width / 8;
1631 } else { 1575 } else {
1632 regspacings[intf_num] = DEFAULT_REGSPACING;
1633 info->io.regspacing = DEFAULT_REGSPACING; 1576 info->io.regspacing = DEFAULT_REGSPACING;
1634 } 1577 }
1635 regsizes[intf_num] = regspacings[intf_num]; 1578 info->io.regsize = info->io.regspacing;
1636 info->io.regsize = regsizes[intf_num]; 1579 info->io.regshift = spmi->addr.register_bit_offset;
1637 regshifts[intf_num] = spmi->addr.register_bit_offset;
1638 info->io.regshift = regshifts[intf_num];
1639 1580
1640 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 1581 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1641 io_type = "memory"; 1582 io_type = "memory";
1642 info->io_setup = mem_setup; 1583 info->io_setup = mem_setup;
1643 addrs[intf_num] = spmi->addr.address; 1584 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1644 info->io.info = &(addrs[intf_num]);
1645 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 1585 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1646 io_type = "I/O"; 1586 io_type = "I/O";
1647 info->io_setup = port_setup; 1587 info->io_setup = port_setup;
1648 ports[intf_num] = spmi->addr.address; 1588 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1649 info->io.info = &(ports[intf_num]);
1650 } else { 1589 } else {
1651 kfree(info); 1590 kfree(info);
1652 printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1591 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1653 return -EIO; 1592 return -EIO;
1654 } 1593 }
1594 info->io.addr_data = spmi->addr.address;
1655 1595
1656 *new_info = info; 1596 try_smi_init(info);
1657 1597
1658 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1659 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1660 return 0; 1598 return 0;
1661} 1599}
1600
1601static __devinit void acpi_find_bmc(void)
1602{
1603 acpi_status status;
1604 struct SPMITable *spmi;
1605 int i;
1606
1607 if (acpi_disabled)
1608 return;
1609
1610 if (acpi_failure)
1611 return;
1612
1613 for (i = 0; ; i++) {
1614 status = acpi_get_firmware_table("SPMI", i+1,
1615 ACPI_LOGICAL_ADDRESSING,
1616 (struct acpi_table_header **)
1617 &spmi);
1618 if (status != AE_OK)
1619 return;
1620
1621 try_init_acpi(spmi);
1622 }
1623}
1662#endif 1624#endif
1663 1625
1664#ifdef CONFIG_DMI 1626#ifdef CONFIG_DMI
1665typedef struct dmi_ipmi_data 1627struct dmi_ipmi_data
1666{ 1628{
1667 u8 type; 1629 u8 type;
1668 u8 addr_space; 1630 u8 addr_space;
@@ -1670,49 +1632,46 @@ typedef struct dmi_ipmi_data
1670 u8 irq; 1632 u8 irq;
1671 u8 offset; 1633 u8 offset;
1672 u8 slave_addr; 1634 u8 slave_addr;
1673} dmi_ipmi_data_t; 1635};
1674
1675static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1676static int dmi_data_entries;
1677 1636
1678static int __init decode_dmi(struct dmi_header *dm, int intf_num) 1637static int __devinit decode_dmi(struct dmi_header *dm,
1638 struct dmi_ipmi_data *dmi)
1679{ 1639{
1680 u8 *data = (u8 *)dm; 1640 u8 *data = (u8 *)dm;
1681 unsigned long base_addr; 1641 unsigned long base_addr;
1682 u8 reg_spacing; 1642 u8 reg_spacing;
1683 u8 len = dm->length; 1643 u8 len = dm->length;
1684 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1685 1644
1686 ipmi_data->type = data[4]; 1645 dmi->type = data[4];
1687 1646
1688 memcpy(&base_addr, data+8, sizeof(unsigned long)); 1647 memcpy(&base_addr, data+8, sizeof(unsigned long));
1689 if (len >= 0x11) { 1648 if (len >= 0x11) {
1690 if (base_addr & 1) { 1649 if (base_addr & 1) {
1691 /* I/O */ 1650 /* I/O */
1692 base_addr &= 0xFFFE; 1651 base_addr &= 0xFFFE;
1693 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; 1652 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1694 } 1653 }
1695 else { 1654 else {
1696 /* Memory */ 1655 /* Memory */
1697 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE; 1656 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1698 } 1657 }
1699 /* If bit 4 of byte 0x10 is set, then the lsb for the address 1658 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1700 is odd. */ 1659 is odd. */
1701 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 1660 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1702 1661
1703 ipmi_data->irq = data[0x11]; 1662 dmi->irq = data[0x11];
1704 1663
1705 /* The top two bits of byte 0x10 hold the register spacing. */ 1664 /* The top two bits of byte 0x10 hold the register spacing. */
1706 reg_spacing = (data[0x10] & 0xC0) >> 6; 1665 reg_spacing = (data[0x10] & 0xC0) >> 6;
1707 switch(reg_spacing){ 1666 switch(reg_spacing){
1708 case 0x00: /* Byte boundaries */ 1667 case 0x00: /* Byte boundaries */
1709 ipmi_data->offset = 1; 1668 dmi->offset = 1;
1710 break; 1669 break;
1711 case 0x01: /* 32-bit boundaries */ 1670 case 0x01: /* 32-bit boundaries */
1712 ipmi_data->offset = 4; 1671 dmi->offset = 4;
1713 break; 1672 break;
1714 case 0x02: /* 16-byte boundaries */ 1673 case 0x02: /* 16-byte boundaries */
1715 ipmi_data->offset = 16; 1674 dmi->offset = 16;
1716 break; 1675 break;
1717 default: 1676 default:
1718 /* Some other interface, just ignore it. */ 1677 /* Some other interface, just ignore it. */
@@ -1726,217 +1685,227 @@ static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1726 * wrong (and all that I have seen are I/O) so we just 1685 * wrong (and all that I have seen are I/O) so we just
1727 * ignore that bit and assume I/O. Systems that use 1686 * ignore that bit and assume I/O. Systems that use
1728 * memory should use the newer spec, anyway. */ 1687 * memory should use the newer spec, anyway. */
1729 ipmi_data->base_addr = base_addr & 0xfffe; 1688 dmi->base_addr = base_addr & 0xfffe;
1730 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; 1689 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1731 ipmi_data->offset = 1; 1690 dmi->offset = 1;
1732 }
1733
1734 ipmi_data->slave_addr = data[6];
1735
1736 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1737 dmi_data_entries++;
1738 return 0;
1739 } 1691 }
1740 1692
1741 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t)); 1693 dmi->slave_addr = data[6];
1742 1694
1743 return -1; 1695 return 0;
1744} 1696}
1745 1697
1746static void __init dmi_find_bmc(void) 1698static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1747{ 1699{
1748 struct dmi_device *dev = NULL; 1700 struct smi_info *info;
1749 int intf_num = 0;
1750
1751 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1752 if (intf_num >= SI_MAX_DRIVERS)
1753 break;
1754 1701
1755 decode_dmi((struct dmi_header *) dev->device_data, intf_num++); 1702 info = kzalloc(sizeof(*info), GFP_KERNEL);
1703 if (!info) {
1704 printk(KERN_ERR
1705 "ipmi_si: Could not allocate SI data\n");
1706 return;
1756 } 1707 }
1757}
1758
1759static int try_init_smbios(int intf_num, struct smi_info **new_info)
1760{
1761 struct smi_info *info;
1762 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1763 char *io_type;
1764 1708
1765 if (intf_num >= dmi_data_entries) 1709 info->addr_source = "SMBIOS";
1766 return -ENODEV;
1767 1710
1768 switch (ipmi_data->type) { 1711 switch (ipmi_data->type) {
1769 case 0x01: /* KCS */ 1712 case 0x01: /* KCS */
1770 si_type[intf_num] = "kcs"; 1713 info->si_type = SI_KCS;
1771 break; 1714 break;
1772 case 0x02: /* SMIC */ 1715 case 0x02: /* SMIC */
1773 si_type[intf_num] = "smic"; 1716 info->si_type = SI_SMIC;
1774 break; 1717 break;
1775 case 0x03: /* BT */ 1718 case 0x03: /* BT */
1776 si_type[intf_num] = "bt"; 1719 info->si_type = SI_BT;
1777 break; 1720 break;
1778 default: 1721 default:
1779 return -EIO; 1722 return;
1780 }
1781
1782 info = kmalloc(sizeof(*info), GFP_KERNEL);
1783 if (! info) {
1784 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1785 return -ENOMEM;
1786 } 1723 }
1787 memset(info, 0, sizeof(*info));
1788 1724
1789 if (ipmi_data->addr_space == 1) { 1725 switch (ipmi_data->addr_space) {
1790 io_type = "memory"; 1726 case IPMI_MEM_ADDR_SPACE:
1791 info->io_setup = mem_setup; 1727 info->io_setup = mem_setup;
1792 addrs[intf_num] = ipmi_data->base_addr; 1728 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1793 info->io.info = &(addrs[intf_num]); 1729 break;
1794 } else if (ipmi_data->addr_space == 2) { 1730
1795 io_type = "I/O"; 1731 case IPMI_IO_ADDR_SPACE:
1796 info->io_setup = port_setup; 1732 info->io_setup = port_setup;
1797 ports[intf_num] = ipmi_data->base_addr; 1733 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1798 info->io.info = &(ports[intf_num]); 1734 break;
1799 } else { 1735
1736 default:
1800 kfree(info); 1737 kfree(info);
1801 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n"); 1738 printk(KERN_WARNING
1802 return -EIO; 1739 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
1740 ipmi_data->addr_space);
1741 return;
1803 } 1742 }
1743 info->io.addr_data = ipmi_data->base_addr;
1804 1744
1805 regspacings[intf_num] = ipmi_data->offset; 1745 info->io.regspacing = ipmi_data->offset;
1806 info->io.regspacing = regspacings[intf_num]; 1746 if (!info->io.regspacing)
1807 if (! info->io.regspacing)
1808 info->io.regspacing = DEFAULT_REGSPACING; 1747 info->io.regspacing = DEFAULT_REGSPACING;
1809 info->io.regsize = DEFAULT_REGSPACING; 1748 info->io.regsize = DEFAULT_REGSPACING;
1810 info->io.regshift = regshifts[intf_num]; 1749 info->io.regshift = 0;
1811 1750
1812 info->slave_addr = ipmi_data->slave_addr; 1751 info->slave_addr = ipmi_data->slave_addr;
1813 1752
1814 irqs[intf_num] = ipmi_data->irq; 1753 info->irq = ipmi_data->irq;
1754 if (info->irq)
1755 info->irq_setup = std_irq_setup;
1815 1756
1816 *new_info = info; 1757 try_smi_init(info);
1758}
1817 1759
1818 printk("ipmi_si: Found SMBIOS-specified state machine at %s" 1760static void __devinit dmi_find_bmc(void)
1819 " address 0x%lx, slave address 0x%x\n", 1761{
1820 io_type, (unsigned long)ipmi_data->base_addr, 1762 struct dmi_device *dev = NULL;
1821 ipmi_data->slave_addr); 1763 struct dmi_ipmi_data data;
1822 return 0; 1764 int rv;
1765
1766 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1767 rv = decode_dmi((struct dmi_header *) dev->device_data, &data);
1768 if (!rv)
1769 try_init_dmi(&data);
1770 }
1823} 1771}
1824#endif /* CONFIG_DMI */ 1772#endif /* CONFIG_DMI */
1825 1773
1826#ifdef CONFIG_PCI 1774#ifdef CONFIG_PCI
1827 1775
1828#define PCI_ERMC_CLASSCODE 0x0C0700 1776#define PCI_ERMC_CLASSCODE 0x0C0700
1777#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
1778#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
1779#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
1780#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
1781#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
1782
1829#define PCI_HP_VENDOR_ID 0x103C 1783#define PCI_HP_VENDOR_ID 0x103C
1830#define PCI_MMC_DEVICE_ID 0x121A 1784#define PCI_MMC_DEVICE_ID 0x121A
1831#define PCI_MMC_ADDR_CW 0x10 1785#define PCI_MMC_ADDR_CW 0x10
1832 1786
1833/* Avoid more than one attempt to probe pci smic. */ 1787static void ipmi_pci_cleanup(struct smi_info *info)
1834static int pci_smic_checked = 0; 1788{
1789 struct pci_dev *pdev = info->addr_source_data;
1790
1791 pci_disable_device(pdev);
1792}
1835 1793
1836static int find_pci_smic(int intf_num, struct smi_info **new_info) 1794static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
1795 const struct pci_device_id *ent)
1837{ 1796{
1838 struct smi_info *info; 1797 int rv;
1839 int error; 1798 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
1840 struct pci_dev *pci_dev = NULL; 1799 struct smi_info *info;
1841 u16 base_addr; 1800 int first_reg_offset = 0;
1842 int fe_rmc = 0;
1843 1801
1844 if (pci_smic_checked) 1802 info = kzalloc(sizeof(*info), GFP_KERNEL);
1845 return -ENODEV; 1803 if (!info)
1804 return ENOMEM;
1846 1805
1847 pci_smic_checked = 1; 1806 info->addr_source = "PCI";
1848 1807
1849 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL); 1808 switch (class_type) {
1850 if (! pci_dev) { 1809 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
1851 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL); 1810 info->si_type = SI_SMIC;
1852 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)) 1811 break;
1853 fe_rmc = 1;
1854 else
1855 return -ENODEV;
1856 }
1857 1812
1858 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); 1813 case PCI_ERMC_CLASSCODE_TYPE_KCS:
1859 if (error) 1814 info->si_type = SI_KCS;
1860 { 1815 break;
1861 pci_dev_put(pci_dev); 1816
1862 printk(KERN_ERR 1817 case PCI_ERMC_CLASSCODE_TYPE_BT:
1863 "ipmi_si: pci_read_config_word() failed (%d).\n", 1818 info->si_type = SI_BT;
1864 error); 1819 break;
1865 return -ENODEV; 1820
1821 default:
1822 kfree(info);
1823 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
1824 pci_name(pdev), class_type);
1825 return ENOMEM;
1866 } 1826 }
1867 1827
1868 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ 1828 rv = pci_enable_device(pdev);
1869 if (! (base_addr & 0x0001)) 1829 if (rv) {
1870 { 1830 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
1871 pci_dev_put(pci_dev); 1831 pci_name(pdev));
1872 printk(KERN_ERR 1832 kfree(info);
1873 "ipmi_si: memory mapped I/O not supported for PCI" 1833 return rv;
1874 " smic.\n");
1875 return -ENODEV;
1876 } 1834 }
1877 1835
1878 base_addr &= 0xFFFE; 1836 info->addr_source_cleanup = ipmi_pci_cleanup;
1879 if (! fe_rmc) 1837 info->addr_source_data = pdev;
1880 /* Data register starts at base address + 1 in eRMC */
1881 ++base_addr;
1882 1838
1883 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { 1839 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
1884 pci_dev_put(pci_dev); 1840 first_reg_offset = 1;
1885 return -ENODEV;
1886 }
1887 1841
1888 info = kmalloc(sizeof(*info), GFP_KERNEL); 1842 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
1889 if (! info) { 1843 info->io_setup = port_setup;
1890 pci_dev_put(pci_dev); 1844 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1891 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); 1845 } else {
1892 return -ENOMEM; 1846 info->io_setup = mem_setup;
1847 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1893 } 1848 }
1894 memset(info, 0, sizeof(*info)); 1849 info->io.addr_data = pci_resource_start(pdev, 0);
1895 1850
1896 info->io_setup = port_setup; 1851 info->io.regspacing = DEFAULT_REGSPACING;
1897 ports[intf_num] = base_addr;
1898 info->io.info = &(ports[intf_num]);
1899 info->io.regspacing = regspacings[intf_num];
1900 if (! info->io.regspacing)
1901 info->io.regspacing = DEFAULT_REGSPACING;
1902 info->io.regsize = DEFAULT_REGSPACING; 1852 info->io.regsize = DEFAULT_REGSPACING;
1903 info->io.regshift = regshifts[intf_num]; 1853 info->io.regshift = 0;
1904 1854
1905 *new_info = info; 1855 info->irq = pdev->irq;
1856 if (info->irq)
1857 info->irq_setup = std_irq_setup;
1906 1858
1907 irqs[intf_num] = pci_dev->irq; 1859 info->dev = &pdev->dev;
1908 si_type[intf_num] = "smic";
1909 1860
1910 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n", 1861 return try_smi_init(info);
1911 (long unsigned int) base_addr); 1862}
1912 1863
1913 pci_dev_put(pci_dev); 1864static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
1865{
1866}
1867
1868#ifdef CONFIG_PM
1869static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1870{
1914 return 0; 1871 return 0;
1915} 1872}
1916#endif /* CONFIG_PCI */
1917 1873
1918static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) 1874static int ipmi_pci_resume(struct pci_dev *pdev)
1919{ 1875{
1920#ifdef CONFIG_PCI 1876 return 0;
1921 if (find_pci_smic(intf_num, new_info) == 0) 1877}
1922 return 0;
1923#endif 1878#endif
1924 /* Include other methods here. */
1925 1879
1926 return -ENODEV; 1880static struct pci_device_id ipmi_pci_devices[] = {
1927} 1881 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
1882 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) }
1883};
1884MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
1885
1886static struct pci_driver ipmi_pci_driver = {
1887 .name = DEVICE_NAME,
1888 .id_table = ipmi_pci_devices,
1889 .probe = ipmi_pci_probe,
1890 .remove = __devexit_p(ipmi_pci_remove),
1891#ifdef CONFIG_PM
1892 .suspend = ipmi_pci_suspend,
1893 .resume = ipmi_pci_resume,
1894#endif
1895};
1896#endif /* CONFIG_PCI */
1928 1897
1929 1898
1930static int try_get_dev_id(struct smi_info *smi_info) 1899static int try_get_dev_id(struct smi_info *smi_info)
1931{ 1900{
1932 unsigned char msg[2]; 1901 unsigned char msg[2];
1933 unsigned char *resp; 1902 unsigned char *resp;
1934 unsigned long resp_len; 1903 unsigned long resp_len;
1935 enum si_sm_result smi_result; 1904 enum si_sm_result smi_result;
1936 int rv = 0; 1905 int rv = 0;
1937 1906
1938 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1907 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1939 if (! resp) 1908 if (!resp)
1940 return -ENOMEM; 1909 return -ENOMEM;
1941 1910
1942 /* Do a Get Device ID command, since it comes back with some 1911 /* Do a Get Device ID command, since it comes back with some
@@ -1972,7 +1941,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1972 /* Otherwise, we got some data. */ 1941 /* Otherwise, we got some data. */
1973 resp_len = smi_info->handlers->get_result(smi_info->si_sm, 1942 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1974 resp, IPMI_MAX_MSG_LENGTH); 1943 resp, IPMI_MAX_MSG_LENGTH);
1975 if (resp_len < 6) { 1944 if (resp_len < 14) {
1976 /* That's odd, it should be longer. */ 1945 /* That's odd, it should be longer. */
1977 rv = -EINVAL; 1946 rv = -EINVAL;
1978 goto out; 1947 goto out;
@@ -1985,8 +1954,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1985 } 1954 }
1986 1955
1987 /* Record info from the get device id, in case we need it. */ 1956 /* Record info from the get device id, in case we need it. */
1988 memcpy(&smi_info->device_id, &resp[3], 1957 ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id);
1989 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1990 1958
1991 out: 1959 out:
1992 kfree(resp); 1960 kfree(resp);
@@ -2018,7 +1986,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2018 struct smi_info *smi = data; 1986 struct smi_info *smi = data;
2019 1987
2020 out += sprintf(out, "interrupts_enabled: %d\n", 1988 out += sprintf(out, "interrupts_enabled: %d\n",
2021 smi->irq && ! smi->interrupt_disabled); 1989 smi->irq && !smi->interrupt_disabled);
2022 out += sprintf(out, "short_timeouts: %ld\n", 1990 out += sprintf(out, "short_timeouts: %ld\n",
2023 smi->short_timeouts); 1991 smi->short_timeouts);
2024 out += sprintf(out, "long_timeouts: %ld\n", 1992 out += sprintf(out, "long_timeouts: %ld\n",
@@ -2089,15 +2057,14 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2089#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 2057#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2090#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 2058#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2091#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 2059#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2092#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} 2060#define DELL_IANA_MFR_ID 0x0002a2
2093static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) 2061static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2094{ 2062{
2095 struct ipmi_device_id *id = &smi_info->device_id; 2063 struct ipmi_device_id *id = &smi_info->device_id;
2096 const char mfr[3]=DELL_IANA_MFR_ID; 2064 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2097 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2098 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && 2065 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2099 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && 2066 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2100 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2067 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2101 smi_info->oem_data_avail_handler = 2068 smi_info->oem_data_avail_handler =
2102 oem_data_avail_to_receive_msg_avail; 2069 oem_data_avail_to_receive_msg_avail;
2103 } 2070 }
@@ -2169,8 +2136,7 @@ static void
2169setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) 2136setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2170{ 2137{
2171 struct ipmi_device_id *id = &smi_info->device_id; 2138 struct ipmi_device_id *id = &smi_info->device_id;
2172 const char mfr[3]=DELL_IANA_MFR_ID; 2139 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
2173 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2174 smi_info->si_type == SI_BT) 2140 smi_info->si_type == SI_BT)
2175 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); 2141 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2176} 2142}
@@ -2200,62 +2166,110 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2200 del_timer_sync(&smi_info->si_timer); 2166 del_timer_sync(&smi_info->si_timer);
2201} 2167}
2202 2168
2203/* Returns 0 if initialized, or negative on an error. */ 2169static struct ipmi_default_vals
2204static int init_one_smi(int intf_num, struct smi_info **smi)
2205{ 2170{
2206 int rv; 2171 int type;
2207 struct smi_info *new_smi; 2172 int port;
2173} __devinit ipmi_defaults[] =
2174{
2175 { .type = SI_KCS, .port = 0xca2 },
2176 { .type = SI_SMIC, .port = 0xca9 },
2177 { .type = SI_BT, .port = 0xe4 },
2178 { .port = 0 }
2179};
2208 2180
2181static __devinit void default_find_bmc(void)
2182{
2183 struct smi_info *info;
2184 int i;
2209 2185
2210 rv = try_init_mem(intf_num, &new_smi); 2186 for (i = 0; ; i++) {
2211 if (rv) 2187 if (!ipmi_defaults[i].port)
2212 rv = try_init_port(intf_num, &new_smi); 2188 break;
2213#ifdef CONFIG_ACPI
2214 if (rv && si_trydefaults)
2215 rv = try_init_acpi(intf_num, &new_smi);
2216#endif
2217#ifdef CONFIG_DMI
2218 if (rv && si_trydefaults)
2219 rv = try_init_smbios(intf_num, &new_smi);
2220#endif
2221 if (rv && si_trydefaults)
2222 rv = try_init_plug_and_play(intf_num, &new_smi);
2223 2189
2224 if (rv) 2190 info = kzalloc(sizeof(*info), GFP_KERNEL);
2225 return rv; 2191 if (!info)
2192 return;
2226 2193
2227 /* So we know not to free it unless we have allocated one. */ 2194 info->addr_source = NULL;
2228 new_smi->intf = NULL;
2229 new_smi->si_sm = NULL;
2230 new_smi->handlers = NULL;
2231 2195
2232 if (! new_smi->irq_setup) { 2196 info->si_type = ipmi_defaults[i].type;
2233 new_smi->irq = irqs[intf_num]; 2197 info->io_setup = port_setup;
2234 new_smi->irq_setup = std_irq_setup; 2198 info->io.addr_data = ipmi_defaults[i].port;
2235 new_smi->irq_cleanup = std_irq_cleanup; 2199 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2236 }
2237 2200
2238 /* Default to KCS if no type is specified. */ 2201 info->io.addr = NULL;
2239 if (si_type[intf_num] == NULL) { 2202 info->io.regspacing = DEFAULT_REGSPACING;
2240 if (si_trydefaults) 2203 info->io.regsize = DEFAULT_REGSPACING;
2241 si_type[intf_num] = "kcs"; 2204 info->io.regshift = 0;
2242 else { 2205
2243 rv = -EINVAL; 2206 if (try_smi_init(info) == 0) {
2244 goto out_err; 2207 /* Found one... */
2208 printk(KERN_INFO "ipmi_si: Found default %s state"
2209 " machine at %s address 0x%lx\n",
2210 si_to_str[info->si_type],
2211 addr_space_to_str[info->io.addr_type],
2212 info->io.addr_data);
2213 return;
2245 } 2214 }
2246 } 2215 }
2216}
2217
2218static int is_new_interface(struct smi_info *info)
2219{
2220 struct smi_info *e;
2221
2222 list_for_each_entry(e, &smi_infos, link) {
2223 if (e->io.addr_type != info->io.addr_type)
2224 continue;
2225 if (e->io.addr_data == info->io.addr_data)
2226 return 0;
2227 }
2228
2229 return 1;
2230}
2231
2232static int try_smi_init(struct smi_info *new_smi)
2233{
2234 int rv;
2235
2236 if (new_smi->addr_source) {
2237 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2238 " machine at %s address 0x%lx, slave address 0x%x,"
2239 " irq %d\n",
2240 new_smi->addr_source,
2241 si_to_str[new_smi->si_type],
2242 addr_space_to_str[new_smi->io.addr_type],
2243 new_smi->io.addr_data,
2244 new_smi->slave_addr, new_smi->irq);
2245 }
2246
2247 down(&smi_infos_lock);
2248 if (!is_new_interface(new_smi)) {
2249 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2250 rv = -EBUSY;
2251 goto out_err;
2252 }
2247 2253
2248 /* Set up the state machine to use. */ 2254 /* So we know not to free it unless we have allocated one. */
2249 if (strcmp(si_type[intf_num], "kcs") == 0) { 2255 new_smi->intf = NULL;
2256 new_smi->si_sm = NULL;
2257 new_smi->handlers = NULL;
2258
2259 switch (new_smi->si_type) {
2260 case SI_KCS:
2250 new_smi->handlers = &kcs_smi_handlers; 2261 new_smi->handlers = &kcs_smi_handlers;
2251 new_smi->si_type = SI_KCS; 2262 break;
2252 } else if (strcmp(si_type[intf_num], "smic") == 0) { 2263
2264 case SI_SMIC:
2253 new_smi->handlers = &smic_smi_handlers; 2265 new_smi->handlers = &smic_smi_handlers;
2254 new_smi->si_type = SI_SMIC; 2266 break;
2255 } else if (strcmp(si_type[intf_num], "bt") == 0) { 2267
2268 case SI_BT:
2256 new_smi->handlers = &bt_smi_handlers; 2269 new_smi->handlers = &bt_smi_handlers;
2257 new_smi->si_type = SI_BT; 2270 break;
2258 } else { 2271
2272 default:
2259 /* No support for anything else yet. */ 2273 /* No support for anything else yet. */
2260 rv = -EIO; 2274 rv = -EIO;
2261 goto out_err; 2275 goto out_err;
@@ -2263,7 +2277,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2263 2277
2264 /* Allocate the state machine's data and initialize it. */ 2278 /* Allocate the state machine's data and initialize it. */
2265 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2279 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2266 if (! new_smi->si_sm) { 2280 if (!new_smi->si_sm) {
2267 printk(" Could not allocate state machine memory\n"); 2281 printk(" Could not allocate state machine memory\n");
2268 rv = -ENOMEM; 2282 rv = -ENOMEM;
2269 goto out_err; 2283 goto out_err;
@@ -2284,21 +2298,29 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2284 2298
2285 /* Do low-level detection first. */ 2299 /* Do low-level detection first. */
2286 if (new_smi->handlers->detect(new_smi->si_sm)) { 2300 if (new_smi->handlers->detect(new_smi->si_sm)) {
2301 if (new_smi->addr_source)
2302 printk(KERN_INFO "ipmi_si: Interface detection"
2303 " failed\n");
2287 rv = -ENODEV; 2304 rv = -ENODEV;
2288 goto out_err; 2305 goto out_err;
2289 } 2306 }
2290 2307
2291 /* Attempt a get device id command. If it fails, we probably 2308 /* Attempt a get device id command. If it fails, we probably
2292 don't have a SMI here. */ 2309 don't have a BMC here. */
2293 rv = try_get_dev_id(new_smi); 2310 rv = try_get_dev_id(new_smi);
2294 if (rv) 2311 if (rv) {
2312 if (new_smi->addr_source)
2313 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2314 " at this location\n");
2295 goto out_err; 2315 goto out_err;
2316 }
2296 2317
2297 setup_oem_data_handler(new_smi); 2318 setup_oem_data_handler(new_smi);
2298 setup_xaction_handlers(new_smi); 2319 setup_xaction_handlers(new_smi);
2299 2320
2300 /* Try to claim any interrupts. */ 2321 /* Try to claim any interrupts. */
2301 new_smi->irq_setup(new_smi); 2322 if (new_smi->irq_setup)
2323 new_smi->irq_setup(new_smi);
2302 2324
2303 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 2325 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2304 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); 2326 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
@@ -2308,7 +2330,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2308 2330
2309 new_smi->interrupt_disabled = 0; 2331 new_smi->interrupt_disabled = 0;
2310 atomic_set(&new_smi->stop_operation, 0); 2332 atomic_set(&new_smi->stop_operation, 0);
2311 new_smi->intf_num = intf_num; 2333 new_smi->intf_num = smi_num;
2334 smi_num++;
2312 2335
2313 /* Start clearing the flags before we enable interrupts or the 2336 /* Start clearing the flags before we enable interrupts or the
2314 timer to avoid racing with the timer. */ 2337 timer to avoid racing with the timer. */
@@ -2332,10 +2355,36 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2332 new_smi->thread = kthread_run(ipmi_thread, new_smi, 2355 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2333 "kipmi%d", new_smi->intf_num); 2356 "kipmi%d", new_smi->intf_num);
2334 2357
2358 if (!new_smi->dev) {
2359 /* If we don't already have a device from something
2360 * else (like PCI), then register a new one. */
2361 new_smi->pdev = platform_device_alloc("ipmi_si",
2362 new_smi->intf_num);
2363 if (rv) {
2364 printk(KERN_ERR
2365 "ipmi_si_intf:"
2366 " Unable to allocate platform device\n");
2367 goto out_err_stop_timer;
2368 }
2369 new_smi->dev = &new_smi->pdev->dev;
2370 new_smi->dev->driver = &ipmi_driver;
2371
2372 rv = platform_device_register(new_smi->pdev);
2373 if (rv) {
2374 printk(KERN_ERR
2375 "ipmi_si_intf:"
2376 " Unable to register system interface device:"
2377 " %d\n",
2378 rv);
2379 goto out_err_stop_timer;
2380 }
2381 new_smi->dev_registered = 1;
2382 }
2383
2335 rv = ipmi_register_smi(&handlers, 2384 rv = ipmi_register_smi(&handlers,
2336 new_smi, 2385 new_smi,
2337 ipmi_version_major(&new_smi->device_id), 2386 &new_smi->device_id,
2338 ipmi_version_minor(&new_smi->device_id), 2387 new_smi->dev,
2339 new_smi->slave_addr, 2388 new_smi->slave_addr,
2340 &(new_smi->intf)); 2389 &(new_smi->intf));
2341 if (rv) { 2390 if (rv) {
@@ -2365,9 +2414,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2365 goto out_err_stop_timer; 2414 goto out_err_stop_timer;
2366 } 2415 }
2367 2416
2368 *smi = new_smi; 2417 list_add_tail(&new_smi->link, &smi_infos);
2418
2419 up(&smi_infos_lock);
2369 2420
2370 printk(" IPMI %s interface initialized\n", si_type[intf_num]); 2421 printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
2371 2422
2372 return 0; 2423 return 0;
2373 2424
@@ -2379,7 +2430,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2379 if (new_smi->intf) 2430 if (new_smi->intf)
2380 ipmi_unregister_smi(new_smi->intf); 2431 ipmi_unregister_smi(new_smi->intf);
2381 2432
2382 new_smi->irq_cleanup(new_smi); 2433 if (new_smi->irq_cleanup)
2434 new_smi->irq_cleanup(new_smi);
2383 2435
2384 /* Wait until we know that we are out of any interrupt 2436 /* Wait until we know that we are out of any interrupt
2385 handlers might have been running before we freed the 2437 handlers might have been running before we freed the
@@ -2391,23 +2443,41 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2391 new_smi->handlers->cleanup(new_smi->si_sm); 2443 new_smi->handlers->cleanup(new_smi->si_sm);
2392 kfree(new_smi->si_sm); 2444 kfree(new_smi->si_sm);
2393 } 2445 }
2446 if (new_smi->addr_source_cleanup)
2447 new_smi->addr_source_cleanup(new_smi);
2394 if (new_smi->io_cleanup) 2448 if (new_smi->io_cleanup)
2395 new_smi->io_cleanup(new_smi); 2449 new_smi->io_cleanup(new_smi);
2396 2450
2451 if (new_smi->dev_registered)
2452 platform_device_unregister(new_smi->pdev);
2453
2454 kfree(new_smi);
2455
2456 up(&smi_infos_lock);
2457
2397 return rv; 2458 return rv;
2398} 2459}
2399 2460
2400static __init int init_ipmi_si(void) 2461static __devinit int init_ipmi_si(void)
2401{ 2462{
2402 int rv = 0;
2403 int pos = 0;
2404 int i; 2463 int i;
2405 char *str; 2464 char *str;
2465 int rv;
2406 2466
2407 if (initialized) 2467 if (initialized)
2408 return 0; 2468 return 0;
2409 initialized = 1; 2469 initialized = 1;
2410 2470
2471 /* Register the device drivers. */
2472 rv = driver_register(&ipmi_driver);
2473 if (rv) {
2474 printk(KERN_ERR
2475 "init_ipmi_si: Unable to register driver: %d\n",
2476 rv);
2477 return rv;
2478 }
2479
2480
2411 /* Parse out the si_type string into its components. */ 2481 /* Parse out the si_type string into its components. */
2412 str = si_type_str; 2482 str = si_type_str;
2413 if (*str != '\0') { 2483 if (*str != '\0') {
@@ -2425,63 +2495,66 @@ static __init int init_ipmi_si(void)
2425 2495
2426 printk(KERN_INFO "IPMI System Interface driver.\n"); 2496 printk(KERN_INFO "IPMI System Interface driver.\n");
2427 2497
2498 hardcode_find_bmc();
2499
2428#ifdef CONFIG_DMI 2500#ifdef CONFIG_DMI
2429 dmi_find_bmc(); 2501 dmi_find_bmc();
2430#endif 2502#endif
2431 2503
2432 rv = init_one_smi(0, &(smi_infos[pos])); 2504#ifdef CONFIG_ACPI
2433 if (rv && ! ports[0] && si_trydefaults) { 2505 if (si_trydefaults)
2434 /* If we are trying defaults and the initial port is 2506 acpi_find_bmc();
2435 not set, then set it. */ 2507#endif
2436 si_type[0] = "kcs";
2437 ports[0] = DEFAULT_KCS_IO_PORT;
2438 rv = init_one_smi(0, &(smi_infos[pos]));
2439 if (rv) {
2440 /* No KCS - try SMIC */
2441 si_type[0] = "smic";
2442 ports[0] = DEFAULT_SMIC_IO_PORT;
2443 rv = init_one_smi(0, &(smi_infos[pos]));
2444 }
2445 if (rv) {
2446 /* No SMIC - try BT */
2447 si_type[0] = "bt";
2448 ports[0] = DEFAULT_BT_IO_PORT;
2449 rv = init_one_smi(0, &(smi_infos[pos]));
2450 }
2451 }
2452 if (rv == 0)
2453 pos++;
2454 2508
2455 for (i = 1; i < SI_MAX_PARMS; i++) { 2509#ifdef CONFIG_PCI
2456 rv = init_one_smi(i, &(smi_infos[pos])); 2510 pci_module_init(&ipmi_pci_driver);
2457 if (rv == 0) 2511#endif
2458 pos++; 2512
2513 if (si_trydefaults) {
2514 down(&smi_infos_lock);
2515 if (list_empty(&smi_infos)) {
2516 /* No BMC was found, try defaults. */
2517 up(&smi_infos_lock);
2518 default_find_bmc();
2519 } else {
2520 up(&smi_infos_lock);
2521 }
2459 } 2522 }
2460 2523
2461 if (smi_infos[0] == NULL) { 2524 down(&smi_infos_lock);
2525 if (list_empty(&smi_infos)) {
2526 up(&smi_infos_lock);
2527#ifdef CONFIG_PCI
2528 pci_unregister_driver(&ipmi_pci_driver);
2529#endif
2462 printk("ipmi_si: Unable to find any System Interface(s)\n"); 2530 printk("ipmi_si: Unable to find any System Interface(s)\n");
2463 return -ENODEV; 2531 return -ENODEV;
2532 } else {
2533 up(&smi_infos_lock);
2534 return 0;
2464 } 2535 }
2465
2466 return 0;
2467} 2536}
2468module_init(init_ipmi_si); 2537module_init(init_ipmi_si);
2469 2538
2470static void __exit cleanup_one_si(struct smi_info *to_clean) 2539static void __devexit cleanup_one_si(struct smi_info *to_clean)
2471{ 2540{
2472 int rv; 2541 int rv;
2473 unsigned long flags; 2542 unsigned long flags;
2474 2543
2475 if (! to_clean) 2544 if (!to_clean)
2476 return; 2545 return;
2477 2546
2547 list_del(&to_clean->link);
2548
2478 /* Tell the timer and interrupt handlers that we are shutting 2549 /* Tell the timer and interrupt handlers that we are shutting
2479 down. */ 2550 down. */
2480 spin_lock_irqsave(&(to_clean->si_lock), flags); 2551 spin_lock_irqsave(&(to_clean->si_lock), flags);
2481 spin_lock(&(to_clean->msg_lock)); 2552 spin_lock(&(to_clean->msg_lock));
2482 2553
2483 atomic_inc(&to_clean->stop_operation); 2554 atomic_inc(&to_clean->stop_operation);
2484 to_clean->irq_cleanup(to_clean); 2555
2556 if (to_clean->irq_cleanup)
2557 to_clean->irq_cleanup(to_clean);
2485 2558
2486 spin_unlock(&(to_clean->msg_lock)); 2559 spin_unlock(&(to_clean->msg_lock));
2487 spin_unlock_irqrestore(&(to_clean->si_lock), flags); 2560 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
@@ -2511,20 +2584,34 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2511 2584
2512 kfree(to_clean->si_sm); 2585 kfree(to_clean->si_sm);
2513 2586
2587 if (to_clean->addr_source_cleanup)
2588 to_clean->addr_source_cleanup(to_clean);
2514 if (to_clean->io_cleanup) 2589 if (to_clean->io_cleanup)
2515 to_clean->io_cleanup(to_clean); 2590 to_clean->io_cleanup(to_clean);
2591
2592 if (to_clean->dev_registered)
2593 platform_device_unregister(to_clean->pdev);
2594
2595 kfree(to_clean);
2516} 2596}
2517 2597
2518static __exit void cleanup_ipmi_si(void) 2598static __exit void cleanup_ipmi_si(void)
2519{ 2599{
2520 int i; 2600 struct smi_info *e, *tmp_e;
2521 2601
2522 if (! initialized) 2602 if (!initialized)
2523 return; 2603 return;
2524 2604
2525 for (i = 0; i < SI_MAX_DRIVERS; i++) { 2605#ifdef CONFIG_PCI
2526 cleanup_one_si(smi_infos[i]); 2606 pci_unregister_driver(&ipmi_pci_driver);
2527 } 2607#endif
2608
2609 down(&smi_infos_lock);
2610 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2611 cleanup_one_si(e);
2612 up(&smi_infos_lock);
2613
2614 driver_unregister(&ipmi_driver);
2528} 2615}
2529module_exit(cleanup_ipmi_si); 2616module_exit(cleanup_ipmi_si);
2530 2617
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index bf3d4962d6a5..4b731b24dc16 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -50,11 +50,12 @@ struct si_sm_io
50 50
51 /* Generic info used by the actual handling routines, the 51 /* Generic info used by the actual handling routines, the
52 state machine shouldn't touch these. */ 52 state machine shouldn't touch these. */
53 void *info;
54 void __iomem *addr; 53 void __iomem *addr;
55 int regspacing; 54 int regspacing;
56 int regsize; 55 int regsize;
57 int regshift; 56 int regshift;
57 int addr_type;
58 long addr_data;
58}; 59};
59 60
60/* Results of SMI events. */ 61/* Results of SMI events. */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 1f3159eb1ede..616539310d9a 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -996,7 +996,7 @@ static struct notifier_block wdog_panic_notifier = {
996}; 996};
997 997
998 998
999static void ipmi_new_smi(int if_num) 999static void ipmi_new_smi(int if_num, struct device *device)
1000{ 1000{
1001 ipmi_register_watchdog(if_num); 1001 ipmi_register_watchdog(if_num);
1002} 1002}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 26d0116b48d4..5245ba1649ed 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -88,21 +88,15 @@ static inline int uncached_access(struct file *file, unsigned long addr)
88} 88}
89 89
90#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 90#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
91static inline int valid_phys_addr_range(unsigned long addr, size_t *count) 91static inline int valid_phys_addr_range(unsigned long addr, size_t count)
92{ 92{
93 unsigned long end_mem; 93 if (addr + count > __pa(high_memory))
94
95 end_mem = __pa(high_memory);
96 if (addr >= end_mem)
97 return 0; 94 return 0;
98 95
99 if (*count > end_mem - addr)
100 *count = end_mem - addr;
101
102 return 1; 96 return 1;
103} 97}
104 98
105static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size) 99static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size)
106{ 100{
107 return 1; 101 return 1;
108} 102}
@@ -119,7 +113,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
119 ssize_t read, sz; 113 ssize_t read, sz;
120 char *ptr; 114 char *ptr;
121 115
122 if (!valid_phys_addr_range(p, &count)) 116 if (!valid_phys_addr_range(p, count))
123 return -EFAULT; 117 return -EFAULT;
124 read = 0; 118 read = 0;
125#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 119#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
@@ -177,7 +171,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
177 unsigned long copied; 171 unsigned long copied;
178 void *ptr; 172 void *ptr;
179 173
180 if (!valid_phys_addr_range(p, &count)) 174 if (!valid_phys_addr_range(p, count))
181 return -EFAULT; 175 return -EFAULT;
182 176
183 written = 0; 177 written = 0;
@@ -249,7 +243,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
249{ 243{
250 size_t size = vma->vm_end - vma->vm_start; 244 size_t size = vma->vm_end - vma->vm_start;
251 245
252 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size)) 246 if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
253 return -EINVAL; 247 return -EINVAL;
254 248
255 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, 249 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 4c272189cd42..2546637a55c0 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -767,6 +767,7 @@ static int __init tlclk_init(void)
767 printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); 767 printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
768 return ret; 768 return ret;
769 } 769 }
770 tlclk_major = ret;
770 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); 771 alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
771 if (!alarm_events) 772 if (!alarm_events)
772 goto out1; 773 goto out1;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 52f3eb45d2b9..b582d0cdc24f 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -64,35 +64,35 @@ config EDAC_AMD76X
64 64
65config EDAC_E7XXX 65config EDAC_E7XXX
66 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)" 66 tristate "Intel e7xxx (e7205, e7500, e7501, e7505)"
67 depends on EDAC_MM_EDAC && PCI 67 depends on EDAC_MM_EDAC && PCI && X86_32
68 help 68 help
69 Support for error detection and correction on the Intel 69 Support for error detection and correction on the Intel
70 E7205, E7500, E7501 and E7505 server chipsets. 70 E7205, E7500, E7501 and E7505 server chipsets.
71 71
72config EDAC_E752X 72config EDAC_E752X
73 tristate "Intel e752x (e7520, e7525, e7320)" 73 tristate "Intel e752x (e7520, e7525, e7320)"
74 depends on EDAC_MM_EDAC && PCI 74 depends on EDAC_MM_EDAC && PCI && X86
75 help 75 help
76 Support for error detection and correction on the Intel 76 Support for error detection and correction on the Intel
77 E7520, E7525, E7320 server chipsets. 77 E7520, E7525, E7320 server chipsets.
78 78
79config EDAC_I82875P 79config EDAC_I82875P
80 tristate "Intel 82875p (D82875P, E7210)" 80 tristate "Intel 82875p (D82875P, E7210)"
81 depends on EDAC_MM_EDAC && PCI 81 depends on EDAC_MM_EDAC && PCI && X86_32
82 help 82 help
83 Support for error detection and correction on the Intel 83 Support for error detection and correction on the Intel
84 DP82785P and E7210 server chipsets. 84 DP82785P and E7210 server chipsets.
85 85
86config EDAC_I82860 86config EDAC_I82860
87 tristate "Intel 82860" 87 tristate "Intel 82860"
88 depends on EDAC_MM_EDAC && PCI 88 depends on EDAC_MM_EDAC && PCI && X86_32
89 help 89 help
90 Support for error detection and correction on the Intel 90 Support for error detection and correction on the Intel
91 82860 chipset. 91 82860 chipset.
92 92
93config EDAC_R82600 93config EDAC_R82600
94 tristate "Radisys 82600 embedded chipset" 94 tristate "Radisys 82600 embedded chipset"
95 depends on EDAC_MM_EDAC 95 depends on EDAC_MM_EDAC && PCI && X86_32
96 help 96 help
97 Support for error detection and correction on the Radisys 97 Support for error detection and correction on the Radisys
98 82600 embedded chipset. 98 82600 embedded chipset.
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index 2fcc8120b53c..53423ad6d4a3 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -12,25 +12,26 @@
12 * 12 *
13 */ 13 */
14 14
15
16#include <linux/config.h> 15#include <linux/config.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/init.h> 17#include <linux/init.h>
19
20#include <linux/pci.h> 18#include <linux/pci.h>
21#include <linux/pci_ids.h> 19#include <linux/pci_ids.h>
22
23#include <linux/slab.h> 20#include <linux/slab.h>
24
25#include "edac_mc.h" 21#include "edac_mc.h"
26 22
23#define amd76x_printk(level, fmt, arg...) \
24 edac_printk(level, "amd76x", fmt, ##arg)
25
26#define amd76x_mc_printk(mci, level, fmt, arg...) \
27 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
27 28
28#define AMD76X_NR_CSROWS 8 29#define AMD76X_NR_CSROWS 8
29#define AMD76X_NR_CHANS 1 30#define AMD76X_NR_CHANS 1
30#define AMD76X_NR_DIMMS 4 31#define AMD76X_NR_DIMMS 4
31 32
32
33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */ 33/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
34
34#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) 35#define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b)
35 * 36 *
36 * 31:16 reserved 37 * 31:16 reserved
@@ -42,6 +43,7 @@
42 * 7:4 UE cs row 43 * 7:4 UE cs row
43 * 3:0 CE cs row 44 * 3:0 CE cs row
44 */ 45 */
46
45#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) 47#define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b)
46 * 48 *
47 * 31:26 clock disable 5 - 0 49 * 31:26 clock disable 5 - 0
@@ -56,6 +58,7 @@
56 * 15:8 reserved 58 * 15:8 reserved
57 * 7:0 x4 mode enable 7 - 0 59 * 7:0 x4 mode enable 7 - 0
58 */ 60 */
61
59#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) 62#define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b)
60 * 63 *
61 * 31:23 chip-select base 64 * 31:23 chip-select base
@@ -66,29 +69,28 @@
66 * 0 chip-select enable 69 * 0 chip-select enable
67 */ 70 */
68 71
69
70struct amd76x_error_info { 72struct amd76x_error_info {
71 u32 ecc_mode_status; 73 u32 ecc_mode_status;
72}; 74};
73 75
74
75enum amd76x_chips { 76enum amd76x_chips {
76 AMD761 = 0, 77 AMD761 = 0,
77 AMD762 78 AMD762
78}; 79};
79 80
80
81struct amd76x_dev_info { 81struct amd76x_dev_info {
82 const char *ctl_name; 82 const char *ctl_name;
83}; 83};
84 84
85
86static const struct amd76x_dev_info amd76x_devs[] = { 85static const struct amd76x_dev_info amd76x_devs[] = {
87 [AMD761] = {.ctl_name = "AMD761"}, 86 [AMD761] = {
88 [AMD762] = {.ctl_name = "AMD762"}, 87 .ctl_name = "AMD761"
88 },
89 [AMD762] = {
90 .ctl_name = "AMD762"
91 },
89}; 92};
90 93
91
92/** 94/**
93 * amd76x_get_error_info - fetch error information 95 * amd76x_get_error_info - fetch error information
94 * @mci: Memory controller 96 * @mci: Memory controller
@@ -97,23 +99,21 @@ static const struct amd76x_dev_info amd76x_devs[] = {
97 * Fetch and store the AMD76x ECC status. Clear pending status 99 * Fetch and store the AMD76x ECC status. Clear pending status
98 * on the chip so that further errors will be reported 100 * on the chip so that further errors will be reported
99 */ 101 */
100 102static void amd76x_get_error_info(struct mem_ctl_info *mci,
101static void amd76x_get_error_info (struct mem_ctl_info *mci, 103 struct amd76x_error_info *info)
102 struct amd76x_error_info *info)
103{ 104{
104 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, 105 pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS,
105 &info->ecc_mode_status); 106 &info->ecc_mode_status);
106 107
107 if (info->ecc_mode_status & BIT(8)) 108 if (info->ecc_mode_status & BIT(8))
108 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 109 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
109 (u32) BIT(8), (u32) BIT(8)); 110 (u32) BIT(8), (u32) BIT(8));
110 111
111 if (info->ecc_mode_status & BIT(9)) 112 if (info->ecc_mode_status & BIT(9))
112 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, 113 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS,
113 (u32) BIT(9), (u32) BIT(9)); 114 (u32) BIT(9), (u32) BIT(9));
114} 115}
115 116
116
117/** 117/**
118 * amd76x_process_error_info - Error check 118 * amd76x_process_error_info - Error check
119 * @mci: Memory controller 119 * @mci: Memory controller
@@ -124,8 +124,7 @@ static void amd76x_get_error_info (struct mem_ctl_info *mci,
124 * A return of 1 indicates an error. Also if handle_errors is true 124 * A return of 1 indicates an error. Also if handle_errors is true
125 * then attempt to handle and clean up after the error 125 * then attempt to handle and clean up after the error
126 */ 126 */
127 127static int amd76x_process_error_info(struct mem_ctl_info *mci,
128static int amd76x_process_error_info (struct mem_ctl_info *mci,
129 struct amd76x_error_info *info, int handle_errors) 128 struct amd76x_error_info *info, int handle_errors)
130{ 129{
131 int error_found; 130 int error_found;
@@ -141,9 +140,8 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
141 140
142 if (handle_errors) { 141 if (handle_errors) {
143 row = (info->ecc_mode_status >> 4) & 0xf; 142 row = (info->ecc_mode_status >> 4) & 0xf;
144 edac_mc_handle_ue(mci, 143 edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
145 mci->csrows[row].first_page, 0, row, 144 row, mci->ctl_name);
146 mci->ctl_name);
147 } 145 }
148 } 146 }
149 147
@@ -155,11 +153,11 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
155 153
156 if (handle_errors) { 154 if (handle_errors) {
157 row = info->ecc_mode_status & 0xf; 155 row = info->ecc_mode_status & 0xf;
158 edac_mc_handle_ce(mci, 156 edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
159 mci->csrows[row].first_page, 0, 0, row, 0, 157 0, row, 0, mci->ctl_name);
160 mci->ctl_name);
161 } 158 }
162 } 159 }
160
163 return error_found; 161 return error_found;
164} 162}
165 163
@@ -170,16 +168,14 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci,
170 * Called by the poll handlers this function reads the status 168 * Called by the poll handlers this function reads the status
171 * from the controller and checks for errors. 169 * from the controller and checks for errors.
172 */ 170 */
173
174static void amd76x_check(struct mem_ctl_info *mci) 171static void amd76x_check(struct mem_ctl_info *mci)
175{ 172{
176 struct amd76x_error_info info; 173 struct amd76x_error_info info;
177 debugf3("MC: " __FILE__ ": %s()\n", __func__); 174 debugf3("%s()\n", __func__);
178 amd76x_get_error_info(mci, &info); 175 amd76x_get_error_info(mci, &info);
179 amd76x_process_error_info(mci, &info, 1); 176 amd76x_process_error_info(mci, &info, 1);
180} 177}
181 178
182
183/** 179/**
184 * amd76x_probe1 - Perform set up for detected device 180 * amd76x_probe1 - Perform set up for detected device
185 * @pdev; PCI device detected 181 * @pdev; PCI device detected
@@ -189,7 +185,6 @@ static void amd76x_check(struct mem_ctl_info *mci)
189 * controller status reporting. We configure and set up the 185 * controller status reporting. We configure and set up the
190 * memory controller reporting and claim the device. 186 * memory controller reporting and claim the device.
191 */ 187 */
192
193static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) 188static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
194{ 189{
195 int rc = -ENODEV; 190 int rc = -ENODEV;
@@ -203,12 +198,11 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
203 }; 198 };
204 u32 ems; 199 u32 ems;
205 u32 ems_mode; 200 u32 ems_mode;
201 struct amd76x_error_info discard;
206 202
207 debugf0("MC: " __FILE__ ": %s()\n", __func__); 203 debugf0("%s()\n", __func__);
208
209 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 204 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
210 ems_mode = (ems >> 10) & 0x3; 205 ems_mode = (ems >> 10) & 0x3;
211
212 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); 206 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS);
213 207
214 if (mci == NULL) { 208 if (mci == NULL) {
@@ -216,16 +210,13 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
216 goto fail; 210 goto fail;
217 } 211 }
218 212
219 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 213 debugf0("%s(): mci = %p\n", __func__, mci);
220 214 mci->pdev = pdev;
221 mci->pdev = pci_dev_get(pdev);
222 mci->mtype_cap = MEM_FLAG_RDDR; 215 mci->mtype_cap = MEM_FLAG_RDDR;
223
224 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 216 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
225 mci->edac_cap = ems_mode ? 217 mci->edac_cap = ems_mode ?
226 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; 218 (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE;
227 219 mci->mod_name = EDAC_MOD_STR;
228 mci->mod_name = BS_MOD_STR;
229 mci->mod_ver = "$Revision: 1.4.2.5 $"; 220 mci->mod_ver = "$Revision: 1.4.2.5 $";
230 mci->ctl_name = amd76x_devs[dev_idx].ctl_name; 221 mci->ctl_name = amd76x_devs[dev_idx].ctl_name;
231 mci->edac_check = amd76x_check; 222 mci->edac_check = amd76x_check;
@@ -240,18 +231,15 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
240 231
241 /* find the DRAM Chip Select Base address and mask */ 232 /* find the DRAM Chip Select Base address and mask */
242 pci_read_config_dword(mci->pdev, 233 pci_read_config_dword(mci->pdev,
243 AMD76X_MEM_BASE_ADDR + (index * 4), 234 AMD76X_MEM_BASE_ADDR + (index * 4), &mba);
244 &mba);
245 235
246 if (!(mba & BIT(0))) 236 if (!(mba & BIT(0)))
247 continue; 237 continue;
248 238
249 mba_base = mba & 0xff800000UL; 239 mba_base = mba & 0xff800000UL;
250 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; 240 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
251
252 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS, 241 pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS,
253 &dms); 242 &dms);
254
255 csrow->first_page = mba_base >> PAGE_SHIFT; 243 csrow->first_page = mba_base >> PAGE_SHIFT;
256 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; 244 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
257 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 245 csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
@@ -262,40 +250,33 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
262 csrow->edac_mode = ems_modes[ems_mode]; 250 csrow->edac_mode = ems_modes[ems_mode];
263 } 251 }
264 252
265 /* clear counters */ 253 amd76x_get_error_info(mci, &discard); /* clear counters */
266 pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8),
267 (u32) (0x3 << 8));
268 254
269 if (edac_mc_add_mc(mci)) { 255 if (edac_mc_add_mc(mci)) {
270 debugf3("MC: " __FILE__ 256 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
271 ": %s(): failed edac_mc_add_mc()\n", __func__);
272 goto fail; 257 goto fail;
273 } 258 }
274 259
275 /* get this far and it's successful */ 260 /* get this far and it's successful */
276 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 261 debugf3("%s(): success\n", __func__);
277 return 0; 262 return 0;
278 263
279fail: 264fail:
280 if (mci) { 265 if (mci != NULL)
281 if(mci->pdev)
282 pci_dev_put(mci->pdev);
283 edac_mc_free(mci); 266 edac_mc_free(mci);
284 }
285 return rc; 267 return rc;
286} 268}
287 269
288/* returns count (>= 0), or negative on error */ 270/* returns count (>= 0), or negative on error */
289static int __devinit amd76x_init_one(struct pci_dev *pdev, 271static int __devinit amd76x_init_one(struct pci_dev *pdev,
290 const struct pci_device_id *ent) 272 const struct pci_device_id *ent)
291{ 273{
292 debugf0("MC: " __FILE__ ": %s()\n", __func__); 274 debugf0("%s()\n", __func__);
293 275
294 /* don't need to call pci_device_enable() */ 276 /* don't need to call pci_device_enable() */
295 return amd76x_probe1(pdev, ent->driver_data); 277 return amd76x_probe1(pdev, ent->driver_data);
296} 278}
297 279
298
299/** 280/**
300 * amd76x_remove_one - driver shutdown 281 * amd76x_remove_one - driver shutdown
301 * @pdev: PCI device being handed back 282 * @pdev: PCI device being handed back
@@ -304,35 +285,36 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev,
304 * structure for the device then delete the mci and free the 285 * structure for the device then delete the mci and free the
305 * resources. 286 * resources.
306 */ 287 */
307
308static void __devexit amd76x_remove_one(struct pci_dev *pdev) 288static void __devexit amd76x_remove_one(struct pci_dev *pdev)
309{ 289{
310 struct mem_ctl_info *mci; 290 struct mem_ctl_info *mci;
311 291
312 debugf0(__FILE__ ": %s()\n", __func__); 292 debugf0("%s()\n", __func__);
313 293
314 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) 294 if ((mci = edac_mc_del_mc(pdev)) == NULL)
315 return; 295 return;
316 if (edac_mc_del_mc(mci)) 296
317 return;
318 pci_dev_put(mci->pdev);
319 edac_mc_free(mci); 297 edac_mc_free(mci);
320} 298}
321 299
322
323static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { 300static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
324 {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 301 {
325 AMD762}, 302 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
326 {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 303 AMD762
327 AMD761}, 304 },
328 {0,} /* 0 terminated list. */ 305 {
306 PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
307 AMD761
308 },
309 {
310 0,
311 } /* 0 terminated list. */
329}; 312};
330 313
331MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); 314MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl);
332 315
333
334static struct pci_driver amd76x_driver = { 316static struct pci_driver amd76x_driver = {
335 .name = BS_MOD_STR, 317 .name = EDAC_MOD_STR,
336 .probe = amd76x_init_one, 318 .probe = amd76x_init_one,
337 .remove = __devexit_p(amd76x_remove_one), 319 .remove = __devexit_p(amd76x_remove_one),
338 .id_table = amd76x_pci_tbl, 320 .id_table = amd76x_pci_tbl,
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index c454ded2b060..66572c5323ad 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -17,18 +17,19 @@
17 * 17 *
18 */ 18 */
19 19
20
21#include <linux/config.h> 20#include <linux/config.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/init.h> 22#include <linux/init.h>
24
25#include <linux/pci.h> 23#include <linux/pci.h>
26#include <linux/pci_ids.h> 24#include <linux/pci_ids.h>
27
28#include <linux/slab.h> 25#include <linux/slab.h>
29
30#include "edac_mc.h" 26#include "edac_mc.h"
31 27
28#define e752x_printk(level, fmt, arg...) \
29 edac_printk(level, "e752x", fmt, ##arg)
30
31#define e752x_mc_printk(mci, level, fmt, arg...) \
32 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
32 33
33#ifndef PCI_DEVICE_ID_INTEL_7520_0 34#ifndef PCI_DEVICE_ID_INTEL_7520_0
34#define PCI_DEVICE_ID_INTEL_7520_0 0x3590 35#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
@@ -56,7 +57,6 @@
56 57
57#define E752X_NR_CSROWS 8 /* number of csrows */ 58#define E752X_NR_CSROWS 8 /* number of csrows */
58 59
59
60/* E752X register addresses - device 0 function 0 */ 60/* E752X register addresses - device 0 function 0 */
61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ 61#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ 62#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -156,7 +156,6 @@ enum e752x_chips {
156 E7320 = 2 156 E7320 = 2
157}; 157};
158 158
159
160struct e752x_pvt { 159struct e752x_pvt {
161 struct pci_dev *bridge_ck; 160 struct pci_dev *bridge_ck;
162 struct pci_dev *dev_d0f0; 161 struct pci_dev *dev_d0f0;
@@ -170,9 +169,9 @@ struct e752x_pvt {
170 const struct e752x_dev_info *dev_info; 169 const struct e752x_dev_info *dev_info;
171}; 170};
172 171
173
174struct e752x_dev_info { 172struct e752x_dev_info {
175 u16 err_dev; 173 u16 err_dev;
174 u16 ctl_dev;
176 const char *ctl_name; 175 const char *ctl_name;
177}; 176};
178 177
@@ -198,38 +197,47 @@ struct e752x_error_info {
198 197
199static const struct e752x_dev_info e752x_devs[] = { 198static const struct e752x_dev_info e752x_devs[] = {
200 [E7520] = { 199 [E7520] = {
201 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, 200 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
202 .ctl_name = "E7520"}, 201 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
202 .ctl_name = "E7520"
203 },
203 [E7525] = { 204 [E7525] = {
204 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, 205 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
205 .ctl_name = "E7525"}, 206 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
207 .ctl_name = "E7525"
208 },
206 [E7320] = { 209 [E7320] = {
207 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, 210 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
208 .ctl_name = "E7320"}, 211 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
212 .ctl_name = "E7320"
213 },
209}; 214};
210 215
211
212static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, 216static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
213 unsigned long page) 217 unsigned long page)
214{ 218{
215 u32 remap; 219 u32 remap;
216 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 220 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
217 221
218 debugf3("MC: " __FILE__ ": %s()\n", __func__); 222 debugf3("%s()\n", __func__);
219 223
220 if (page < pvt->tolm) 224 if (page < pvt->tolm)
221 return page; 225 return page;
226
222 if ((page >= 0x100000) && (page < pvt->remapbase)) 227 if ((page >= 0x100000) && (page < pvt->remapbase))
223 return page; 228 return page;
229
224 remap = (page - pvt->tolm) + pvt->remapbase; 230 remap = (page - pvt->tolm) + pvt->remapbase;
231
225 if (remap < pvt->remaplimit) 232 if (remap < pvt->remaplimit)
226 return remap; 233 return remap;
227 printk(KERN_ERR "Invalid page %lx - out of range\n", page); 234
235 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
228 return pvt->tolm - 1; 236 return pvt->tolm - 1;
229} 237}
230 238
231static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, 239static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
232 u32 sec1_add, u16 sec1_syndrome) 240 u32 sec1_add, u16 sec1_syndrome)
233{ 241{
234 u32 page; 242 u32 page;
235 int row; 243 int row;
@@ -237,7 +245,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
237 int i; 245 int i;
238 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 246 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
239 247
240 debugf3("MC: " __FILE__ ": %s()\n", __func__); 248 debugf3("%s()\n", __func__);
241 249
242 /* convert the addr to 4k page */ 250 /* convert the addr to 4k page */
243 page = sec1_add >> (PAGE_SHIFT - 4); 251 page = sec1_add >> (PAGE_SHIFT - 4);
@@ -246,36 +254,37 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
246 if (pvt->mc_symmetric) { 254 if (pvt->mc_symmetric) {
247 /* chip select are bits 14 & 13 */ 255 /* chip select are bits 14 & 13 */
248 row = ((page >> 1) & 3); 256 row = ((page >> 1) & 3);
249 printk(KERN_WARNING 257 e752x_printk(KERN_WARNING,
250 "Test row %d Table %d %d %d %d %d %d %d %d\n", 258 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
251 row, pvt->map[0], pvt->map[1], pvt->map[2], 259 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
252 pvt->map[3], pvt->map[4], pvt->map[5], 260 pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]);
253 pvt->map[6], pvt->map[7]);
254 261
255 /* test for channel remapping */ 262 /* test for channel remapping */
256 for (i = 0; i < 8; i++) { 263 for (i = 0; i < 8; i++) {
257 if (pvt->map[i] == row) 264 if (pvt->map[i] == row)
258 break; 265 break;
259 } 266 }
260 printk(KERN_WARNING "Test computed row %d\n", i); 267
268 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
269
261 if (i < 8) 270 if (i < 8)
262 row = i; 271 row = i;
263 else 272 else
264 printk(KERN_WARNING 273 e752x_mc_printk(mci, KERN_WARNING,
265 "MC%d: row %d not found in remap table\n", 274 "row %d not found in remap table\n", row);
266 mci->mc_idx, row);
267 } else 275 } else
268 row = edac_mc_find_csrow_by_page(mci, page); 276 row = edac_mc_find_csrow_by_page(mci, page);
277
269 /* 0 = channel A, 1 = channel B */ 278 /* 0 = channel A, 1 = channel B */
270 channel = !(error_one & 1); 279 channel = !(error_one & 1);
271 280
272 if (!pvt->map_type) 281 if (!pvt->map_type)
273 row = 7 - row; 282 row = 7 - row;
283
274 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, 284 edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel,
275 "e752x CE"); 285 "e752x CE");
276} 286}
277 287
278
279static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 288static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
280 u32 sec1_add, u16 sec1_syndrome, int *error_found, 289 u32 sec1_add, u16 sec1_syndrome, int *error_found,
281 int handle_error) 290 int handle_error)
@@ -286,36 +295,42 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
286 do_process_ce(mci, error_one, sec1_add, sec1_syndrome); 295 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
287} 296}
288 297
289static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add, 298static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
290 u32 scrb_add) 299 u32 ded_add, u32 scrb_add)
291{ 300{
292 u32 error_2b, block_page; 301 u32 error_2b, block_page;
293 int row; 302 int row;
294 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 303 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
295 304
296 debugf3("MC: " __FILE__ ": %s()\n", __func__); 305 debugf3("%s()\n", __func__);
297 306
298 if (error_one & 0x0202) { 307 if (error_one & 0x0202) {
299 error_2b = ded_add; 308 error_2b = ded_add;
309
300 /* convert to 4k address */ 310 /* convert to 4k address */
301 block_page = error_2b >> (PAGE_SHIFT - 4); 311 block_page = error_2b >> (PAGE_SHIFT - 4);
312
302 row = pvt->mc_symmetric ? 313 row = pvt->mc_symmetric ?
303 /* chip select are bits 14 & 13 */ 314 /* chip select are bits 14 & 13 */
304 ((block_page >> 1) & 3) : 315 ((block_page >> 1) & 3) :
305 edac_mc_find_csrow_by_page(mci, block_page); 316 edac_mc_find_csrow_by_page(mci, block_page);
317
306 edac_mc_handle_ue(mci, block_page, 0, row, 318 edac_mc_handle_ue(mci, block_page, 0, row,
307 "e752x UE from Read"); 319 "e752x UE from Read");
308 } 320 }
309 if (error_one & 0x0404) { 321 if (error_one & 0x0404) {
310 error_2b = scrb_add; 322 error_2b = scrb_add;
323
311 /* convert to 4k address */ 324 /* convert to 4k address */
312 block_page = error_2b >> (PAGE_SHIFT - 4); 325 block_page = error_2b >> (PAGE_SHIFT - 4);
326
313 row = pvt->mc_symmetric ? 327 row = pvt->mc_symmetric ?
314 /* chip select are bits 14 & 13 */ 328 /* chip select are bits 14 & 13 */
315 ((block_page >> 1) & 3) : 329 ((block_page >> 1) & 3) :
316 edac_mc_find_csrow_by_page(mci, block_page); 330 edac_mc_find_csrow_by_page(mci, block_page);
331
317 edac_mc_handle_ue(mci, block_page, 0, row, 332 edac_mc_handle_ue(mci, block_page, 0, row,
318 "e752x UE from Scruber"); 333 "e752x UE from Scruber");
319 } 334 }
320} 335}
321 336
@@ -336,7 +351,7 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
336 if (!handle_error) 351 if (!handle_error)
337 return; 352 return;
338 353
339 debugf3("MC: " __FILE__ ": %s()\n", __func__); 354 debugf3("%s()\n", __func__);
340 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); 355 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
341} 356}
342 357
@@ -348,13 +363,13 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
348 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; 363 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
349 364
350 error_1b = retry_add; 365 error_1b = retry_add;
351 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ 366 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
352 row = pvt->mc_symmetric ? 367 row = pvt->mc_symmetric ?
353 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ 368 ((page >> 1) & 3) : /* chip select are bits 14 & 13 */
354 edac_mc_find_csrow_by_page(mci, page); 369 edac_mc_find_csrow_by_page(mci, page);
355 printk(KERN_WARNING 370 e752x_mc_printk(mci, KERN_WARNING,
356 "MC%d: CE page 0x%lx, row %d : Memory read retry\n", 371 "CE page 0x%lx, row %d : Memory read retry\n",
357 mci->mc_idx, (long unsigned int) page, row); 372 (long unsigned int) page, row);
358} 373}
359 374
360static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, 375static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -372,8 +387,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
372 *error_found = 1; 387 *error_found = 1;
373 388
374 if (handle_error) 389 if (handle_error)
375 printk(KERN_WARNING "MC%d: Memory threshold CE\n", 390 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
376 mci->mc_idx);
377} 391}
378 392
379static char *global_message[11] = { 393static char *global_message[11] = {
@@ -391,8 +405,8 @@ static void do_global_error(int fatal, u32 errors)
391 405
392 for (i = 0; i < 11; i++) { 406 for (i = 0; i < 11; i++) {
393 if (errors & (1 << i)) 407 if (errors & (1 << i))
394 printk(KERN_WARNING "%sError %s\n", 408 e752x_printk(KERN_WARNING, "%sError %s\n",
395 fatal_message[fatal], global_message[i]); 409 fatal_message[fatal], global_message[i]);
396 } 410 }
397} 411}
398 412
@@ -418,8 +432,8 @@ static void do_hub_error(int fatal, u8 errors)
418 432
419 for (i = 0; i < 7; i++) { 433 for (i = 0; i < 7; i++) {
420 if (errors & (1 << i)) 434 if (errors & (1 << i))
421 printk(KERN_WARNING "%sError %s\n", 435 e752x_printk(KERN_WARNING, "%sError %s\n",
422 fatal_message[fatal], hub_message[i]); 436 fatal_message[fatal], hub_message[i]);
423 } 437 }
424} 438}
425 439
@@ -445,8 +459,8 @@ static void do_membuf_error(u8 errors)
445 459
446 for (i = 0; i < 4; i++) { 460 for (i = 0; i < 4; i++) {
447 if (errors & (1 << i)) 461 if (errors & (1 << i))
448 printk(KERN_WARNING "Non-Fatal Error %s\n", 462 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
449 membuf_message[i]); 463 membuf_message[i]);
450 } 464 }
451} 465}
452 466
@@ -458,8 +472,7 @@ static inline void membuf_error(u8 errors, int *error_found, int handle_error)
458 do_membuf_error(errors); 472 do_membuf_error(errors);
459} 473}
460 474
461#if 0 475static char *sysbus_message[10] = {
462char *sysbus_message[10] = {
463 "Addr or Request Parity", 476 "Addr or Request Parity",
464 "Data Strobe Glitch", 477 "Data Strobe Glitch",
465 "Addr Strobe Glitch", 478 "Addr Strobe Glitch",
@@ -470,7 +483,6 @@ char *sysbus_message[10] = {
470 "Memory Parity", 483 "Memory Parity",
471 "IO Subsystem Parity" 484 "IO Subsystem Parity"
472}; 485};
473#endif /* 0 */
474 486
475static void do_sysbus_error(int fatal, u32 errors) 487static void do_sysbus_error(int fatal, u32 errors)
476{ 488{
@@ -478,8 +490,8 @@ static void do_sysbus_error(int fatal, u32 errors)
478 490
479 for (i = 0; i < 10; i++) { 491 for (i = 0; i < 10; i++) {
480 if (errors & (1 << i)) 492 if (errors & (1 << i))
481 printk(KERN_WARNING "%sError System Bus %s\n", 493 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
482 fatal_message[fatal], global_message[i]); 494 fatal_message[fatal], sysbus_message[i]);
483 } 495 }
484} 496}
485 497
@@ -492,33 +504,42 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found,
492 do_sysbus_error(fatal, errors); 504 do_sysbus_error(fatal, errors);
493} 505}
494 506
495static void e752x_check_hub_interface (struct e752x_error_info *info, 507static void e752x_check_hub_interface(struct e752x_error_info *info,
496 int *error_found, int handle_error) 508 int *error_found, int handle_error)
497{ 509{
498 u8 stat8; 510 u8 stat8;
499 511
500 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); 512 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
513
501 stat8 = info->hi_ferr; 514 stat8 = info->hi_ferr;
515
502 if(stat8 & 0x7f) { /* Error, so process */ 516 if(stat8 & 0x7f) { /* Error, so process */
503 stat8 &= 0x7f; 517 stat8 &= 0x7f;
518
504 if(stat8 & 0x2b) 519 if(stat8 & 0x2b)
505 hub_error(1, stat8 & 0x2b, error_found, handle_error); 520 hub_error(1, stat8 & 0x2b, error_found, handle_error);
521
506 if(stat8 & 0x54) 522 if(stat8 & 0x54)
507 hub_error(0, stat8 & 0x54, error_found, handle_error); 523 hub_error(0, stat8 & 0x54, error_found, handle_error);
508 } 524 }
525
509 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); 526 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
527
510 stat8 = info->hi_nerr; 528 stat8 = info->hi_nerr;
529
511 if(stat8 & 0x7f) { /* Error, so process */ 530 if(stat8 & 0x7f) { /* Error, so process */
512 stat8 &= 0x7f; 531 stat8 &= 0x7f;
532
513 if (stat8 & 0x2b) 533 if (stat8 & 0x2b)
514 hub_error(1, stat8 & 0x2b, error_found, handle_error); 534 hub_error(1, stat8 & 0x2b, error_found, handle_error);
535
515 if(stat8 & 0x54) 536 if(stat8 & 0x54)
516 hub_error(0, stat8 & 0x54, error_found, handle_error); 537 hub_error(0, stat8 & 0x54, error_found, handle_error);
517 } 538 }
518} 539}
519 540
520static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, 541static void e752x_check_sysbus(struct e752x_error_info *info,
521 int handle_error) 542 int *error_found, int handle_error)
522{ 543{
523 u32 stat32, error32; 544 u32 stat32, error32;
524 545
@@ -530,27 +551,34 @@ static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found,
530 551
531 error32 = (stat32 >> 16) & 0x3ff; 552 error32 = (stat32 >> 16) & 0x3ff;
532 stat32 = stat32 & 0x3ff; 553 stat32 = stat32 & 0x3ff;
554
533 if(stat32 & 0x083) 555 if(stat32 & 0x083)
534 sysbus_error(1, stat32 & 0x083, error_found, handle_error); 556 sysbus_error(1, stat32 & 0x083, error_found, handle_error);
557
535 if(stat32 & 0x37c) 558 if(stat32 & 0x37c)
536 sysbus_error(0, stat32 & 0x37c, error_found, handle_error); 559 sysbus_error(0, stat32 & 0x37c, error_found, handle_error);
560
537 if(error32 & 0x083) 561 if(error32 & 0x083)
538 sysbus_error(1, error32 & 0x083, error_found, handle_error); 562 sysbus_error(1, error32 & 0x083, error_found, handle_error);
563
539 if(error32 & 0x37c) 564 if(error32 & 0x37c)
540 sysbus_error(0, error32 & 0x37c, error_found, handle_error); 565 sysbus_error(0, error32 & 0x37c, error_found, handle_error);
541} 566}
542 567
543static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, 568static void e752x_check_membuf (struct e752x_error_info *info,
544 int handle_error) 569 int *error_found, int handle_error)
545{ 570{
546 u8 stat8; 571 u8 stat8;
547 572
548 stat8 = info->buf_ferr; 573 stat8 = info->buf_ferr;
574
549 if (stat8 & 0x0f) { /* Error, so process */ 575 if (stat8 & 0x0f) { /* Error, so process */
550 stat8 &= 0x0f; 576 stat8 &= 0x0f;
551 membuf_error(stat8, error_found, handle_error); 577 membuf_error(stat8, error_found, handle_error);
552 } 578 }
579
553 stat8 = info->buf_nerr; 580 stat8 = info->buf_nerr;
581
554 if (stat8 & 0x0f) { /* Error, so process */ 582 if (stat8 & 0x0f) { /* Error, so process */
555 stat8 &= 0x0f; 583 stat8 &= 0x0f;
556 membuf_error(stat8, error_found, handle_error); 584 membuf_error(stat8, error_found, handle_error);
@@ -558,7 +586,8 @@ static void e752x_check_membuf (struct e752x_error_info *info, int *error_found,
558} 586}
559 587
560static void e752x_check_dram (struct mem_ctl_info *mci, 588static void e752x_check_dram (struct mem_ctl_info *mci,
561 struct e752x_error_info *info, int *error_found, int handle_error) 589 struct e752x_error_info *info, int *error_found,
590 int handle_error)
562{ 591{
563 u16 error_one, error_next; 592 u16 error_one, error_next;
564 593
@@ -608,7 +637,7 @@ static void e752x_check_dram (struct mem_ctl_info *mci,
608} 637}
609 638
610static void e752x_get_error_info (struct mem_ctl_info *mci, 639static void e752x_get_error_info (struct mem_ctl_info *mci,
611 struct e752x_error_info *info) 640 struct e752x_error_info *info)
612{ 641{
613 struct pci_dev *dev; 642 struct pci_dev *dev;
614 struct e752x_pvt *pvt; 643 struct e752x_pvt *pvt;
@@ -616,7 +645,6 @@ static void e752x_get_error_info (struct mem_ctl_info *mci,
616 memset(info, 0, sizeof(*info)); 645 memset(info, 0, sizeof(*info));
617 pvt = (struct e752x_pvt *) mci->pvt_info; 646 pvt = (struct e752x_pvt *) mci->pvt_info;
618 dev = pvt->dev_d0f1; 647 dev = pvt->dev_d0f1;
619
620 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); 648 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
621 649
622 if (info->ferr_global) { 650 if (info->ferr_global) {
@@ -727,7 +755,8 @@ static int e752x_process_error_info (struct mem_ctl_info *mci,
727static void e752x_check(struct mem_ctl_info *mci) 755static void e752x_check(struct mem_ctl_info *mci)
728{ 756{
729 struct e752x_error_info info; 757 struct e752x_error_info info;
730 debugf3("MC: " __FILE__ ": %s()\n", __func__); 758
759 debugf3("%s()\n", __func__);
731 e752x_get_error_info(mci, &info); 760 e752x_get_error_info(mci, &info);
732 e752x_process_error_info(mci, &info, 1); 761 e752x_process_error_info(mci, &info, 1);
733} 762}
@@ -736,23 +765,21 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
736{ 765{
737 int rc = -ENODEV; 766 int rc = -ENODEV;
738 int index; 767 int index;
739 u16 pci_data, stat; 768 u16 pci_data;
740 u32 stat32;
741 u16 stat16;
742 u8 stat8; 769 u8 stat8;
743 struct mem_ctl_info *mci = NULL; 770 struct mem_ctl_info *mci = NULL;
744 struct e752x_pvt *pvt = NULL; 771 struct e752x_pvt *pvt = NULL;
745 u16 ddrcsr; 772 u16 ddrcsr;
746 u32 drc; 773 u32 drc;
747 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 774 int drc_chan; /* Number of channels 0=1chan,1=2chan */
748 int drc_drbg; /* DRB granularity 0=64mb,1=128mb */ 775 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
749 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 776 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
750 u32 dra; 777 u32 dra;
751 unsigned long last_cumul_size; 778 unsigned long last_cumul_size;
752 struct pci_dev *pres_dev;
753 struct pci_dev *dev = NULL; 779 struct pci_dev *dev = NULL;
780 struct e752x_error_info discard;
754 781
755 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); 782 debugf0("%s(): mci\n", __func__);
756 debugf0("Starting Probe1\n"); 783 debugf0("Starting Probe1\n");
757 784
758 /* enable device 0 function 1 */ 785 /* enable device 0 function 1 */
@@ -776,34 +803,35 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
776 goto fail; 803 goto fail;
777 } 804 }
778 805
779 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 806 debugf3("%s(): init mci\n", __func__);
780
781 mci->mtype_cap = MEM_FLAG_RDDR; 807 mci->mtype_cap = MEM_FLAG_RDDR;
782 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | 808 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
783 EDAC_FLAG_S4ECD4ED; 809 EDAC_FLAG_S4ECD4ED;
784 /* FIXME - what if different memory types are in different csrows? */ 810 /* FIXME - what if different memory types are in different csrows? */
785 mci->mod_name = BS_MOD_STR; 811 mci->mod_name = EDAC_MOD_STR;
786 mci->mod_ver = "$Revision: 1.5.2.11 $"; 812 mci->mod_ver = "$Revision: 1.5.2.11 $";
787 mci->pdev = pdev; 813 mci->pdev = pdev;
788 814
789 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); 815 debugf3("%s(): init pvt\n", __func__);
790 pvt = (struct e752x_pvt *) mci->pvt_info; 816 pvt = (struct e752x_pvt *) mci->pvt_info;
791 pvt->dev_info = &e752x_devs[dev_idx]; 817 pvt->dev_info = &e752x_devs[dev_idx];
792 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 818 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
793 pvt->dev_info->err_dev, 819 pvt->dev_info->err_dev,
794 pvt->bridge_ck); 820 pvt->bridge_ck);
821
795 if (pvt->bridge_ck == NULL) 822 if (pvt->bridge_ck == NULL)
796 pvt->bridge_ck = pci_scan_single_device(pdev->bus, 823 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
797 PCI_DEVFN(0, 1)); 824 PCI_DEVFN(0, 1));
825
798 if (pvt->bridge_ck == NULL) { 826 if (pvt->bridge_ck == NULL) {
799 printk(KERN_ERR "MC: error reporting device not found:" 827 e752x_printk(KERN_ERR, "error reporting device not found:"
800 "vendor %x device 0x%x (broken BIOS?)\n", 828 "vendor %x device 0x%x (broken BIOS?)\n",
801 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); 829 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
802 goto fail; 830 goto fail;
803 } 831 }
804 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
805 832
806 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); 833 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
834 debugf3("%s(): more mci init\n", __func__);
807 mci->ctl_name = pvt->dev_info->ctl_name; 835 mci->ctl_name = pvt->dev_info->ctl_name;
808 mci->edac_check = e752x_check; 836 mci->edac_check = e752x_check;
809 mci->ctl_page_to_phys = ctl_page_to_phys; 837 mci->ctl_page_to_phys = ctl_page_to_phys;
@@ -820,6 +848,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
820 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { 848 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
821 u8 value; 849 u8 value;
822 u32 cumul_size; 850 u32 cumul_size;
851
823 /* mem_dev 0=x8, 1=x4 */ 852 /* mem_dev 0=x8, 1=x4 */
824 int mem_dev = (dra >> (index * 4 + 2)) & 0x3; 853 int mem_dev = (dra >> (index * 4 + 2)) & 0x3;
825 struct csrow_info *csrow = &mci->csrows[index]; 854 struct csrow_info *csrow = &mci->csrows[index];
@@ -828,17 +857,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
828 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value); 857 pci_read_config_byte(mci->pdev, E752X_DRB + index, &value);
829 /* convert a 128 or 64 MiB DRB to a page size. */ 858 /* convert a 128 or 64 MiB DRB to a page size. */
830 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 859 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
831 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 860 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
832 __func__, index, cumul_size); 861 cumul_size);
862
833 if (cumul_size == last_cumul_size) 863 if (cumul_size == last_cumul_size)
834 continue; /* not populated */ 864 continue; /* not populated */
835 865
836 csrow->first_page = last_cumul_size; 866 csrow->first_page = last_cumul_size;
837 csrow->last_page = cumul_size - 1; 867 csrow->last_page = cumul_size - 1;
838 csrow->nr_pages = cumul_size - last_cumul_size; 868 csrow->nr_pages = cumul_size - last_cumul_size;
839 last_cumul_size = cumul_size; 869 last_cumul_size = cumul_size;
840 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 870 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
841 csrow->mtype = MEM_RDDR; /* only one type supported */ 871 csrow->mtype = MEM_RDDR; /* only one type supported */
842 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 872 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
843 873
844 /* 874 /*
@@ -862,29 +892,32 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
862 u8 value; 892 u8 value;
863 u8 last = 0; 893 u8 last = 0;
864 u8 row = 0; 894 u8 row = 0;
865 for (index = 0; index < 8; index += 2) {
866 895
896 for (index = 0; index < 8; index += 2) {
867 pci_read_config_byte(mci->pdev, E752X_DRB + index, 897 pci_read_config_byte(mci->pdev, E752X_DRB + index,
868 &value); 898 &value);
899
869 /* test if there is a dimm in this slot */ 900 /* test if there is a dimm in this slot */
870 if (value == last) { 901 if (value == last) {
871 /* no dimm in the slot, so flag it as empty */ 902 /* no dimm in the slot, so flag it as empty */
872 pvt->map[index] = 0xff; 903 pvt->map[index] = 0xff;
873 pvt->map[index + 1] = 0xff; 904 pvt->map[index + 1] = 0xff;
874 } else { /* there is a dimm in the slot */ 905 } else { /* there is a dimm in the slot */
875 pvt->map[index] = row; 906 pvt->map[index] = row;
876 row++; 907 row++;
877 last = value; 908 last = value;
878 /* test the next value to see if the dimm is 909 /* test the next value to see if the dimm is
879 double sided */ 910 double sided */
880 pci_read_config_byte(mci->pdev, 911 pci_read_config_byte(mci->pdev,
881 E752X_DRB + index + 1, 912 E752X_DRB + index + 1,
882 &value); 913 &value);
883 pvt->map[index + 1] = (value == last) ? 914 pvt->map[index + 1] = (value == last) ?
884 0xff : /* the dimm is single sided, 915 0xff : /* the dimm is single sided,
885 so flag as empty */ 916 * so flag as empty
886 row; /* this is a double sided dimm 917 */
887 to save the next row # */ 918 row; /* this is a double sided dimm
919 * to save the next row #
920 */
888 row++; 921 row++;
889 last = value; 922 last = value;
890 } 923 }
@@ -896,9 +929,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
896 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); 929 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
897 930
898 mci->edac_cap |= EDAC_FLAG_NONE; 931 mci->edac_cap |= EDAC_FLAG_NONE;
932 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
899 933
900 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n",
901 __func__);
902 /* load the top of low memory, remap base, and remap limit vars */ 934 /* load the top of low memory, remap base, and remap limit vars */
903 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); 935 pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data);
904 pvt->tolm = ((u32) pci_data) << 4; 936 pvt->tolm = ((u32) pci_data) << 4;
@@ -906,43 +938,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
906 pvt->remapbase = ((u32) pci_data) << 14; 938 pvt->remapbase = ((u32) pci_data) << 14;
907 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); 939 pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data);
908 pvt->remaplimit = ((u32) pci_data) << 14; 940 pvt->remaplimit = ((u32) pci_data) << 14;
909 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 941 e752x_printk(KERN_INFO,
910 pvt->remapbase, pvt->remaplimit); 942 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
943 pvt->remapbase, pvt->remaplimit);
911 944
912 if (edac_mc_add_mc(mci)) { 945 if (edac_mc_add_mc(mci)) {
913 debugf3("MC: " __FILE__ 946 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
914 ": %s(): failed edac_mc_add_mc()\n",
915 __func__);
916 goto fail; 947 goto fail;
917 } 948 }
918 949
919 /* Walk through the PCI table and clear errors */ 950 dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev,
920 switch (dev_idx) { 951 NULL);
921 case E7520:
922 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
923 PCI_DEVICE_ID_INTEL_7520_0, NULL);
924 break;
925 case E7525:
926 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
927 PCI_DEVICE_ID_INTEL_7525_0, NULL);
928 break;
929 case E7320:
930 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
931 PCI_DEVICE_ID_INTEL_7320_0, NULL);
932 break;
933 }
934
935
936 pvt->dev_d0f0 = dev; 952 pvt->dev_d0f0 = dev;
937 for (pres_dev = dev;
938 ((struct pci_dev *) pres_dev->global_list.next != dev);
939 pres_dev = (struct pci_dev *) pres_dev->global_list.next) {
940 pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32);
941 stat = (u16) (stat32 >> 16);
942 /* clear any error bits */
943 if (stat32 & ((1 << 6) + (1 << 8)))
944 pci_write_config_word(pres_dev, PCI_STATUS, stat);
945 }
946 /* find the error reporting device and clear errors */ 953 /* find the error reporting device and clear errors */
947 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); 954 dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
948 /* Turn off error disable & SMI in case the BIOS turned it on */ 955 /* Turn off error disable & SMI in case the BIOS turned it on */
@@ -954,67 +961,51 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
954 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); 961 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
955 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); 962 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
956 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); 963 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
957 /* clear other MCH errors */ 964
958 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32); 965 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
959 pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32);
960 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32);
961 pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32);
962 pci_read_config_byte(dev, E752X_HI_FERR, &stat8);
963 pci_write_config_byte(dev, E752X_HI_FERR, stat8);
964 pci_read_config_byte(dev, E752X_HI_NERR, &stat8);
965 pci_write_config_byte(dev, E752X_HI_NERR, stat8);
966 pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32);
967 pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32);
968 pci_read_config_byte(dev, E752X_BUF_FERR, &stat8);
969 pci_write_config_byte(dev, E752X_BUF_FERR, stat8);
970 pci_read_config_byte(dev, E752X_BUF_NERR, &stat8);
971 pci_write_config_byte(dev, E752X_BUF_NERR, stat8);
972 pci_read_config_word(dev, E752X_DRAM_FERR, &stat16);
973 pci_write_config_word(dev, E752X_DRAM_FERR, stat16);
974 pci_read_config_word(dev, E752X_DRAM_NERR, &stat16);
975 pci_write_config_word(dev, E752X_DRAM_NERR, stat16);
976 966
977 /* get this far and it's successful */ 967 /* get this far and it's successful */
978 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 968 debugf3("%s(): success\n", __func__);
979 return 0; 969 return 0;
980 970
981fail: 971fail:
982 if (mci) { 972 if (mci) {
983 if (pvt->dev_d0f0) 973 if (pvt->dev_d0f0)
984 pci_dev_put(pvt->dev_d0f0); 974 pci_dev_put(pvt->dev_d0f0);
975
985 if (pvt->dev_d0f1) 976 if (pvt->dev_d0f1)
986 pci_dev_put(pvt->dev_d0f1); 977 pci_dev_put(pvt->dev_d0f1);
978
987 if (pvt->bridge_ck) 979 if (pvt->bridge_ck)
988 pci_dev_put(pvt->bridge_ck); 980 pci_dev_put(pvt->bridge_ck);
981
989 edac_mc_free(mci); 982 edac_mc_free(mci);
990 } 983 }
984
991 return rc; 985 return rc;
992} 986}
993 987
994/* returns count (>= 0), or negative on error */ 988/* returns count (>= 0), or negative on error */
995static int __devinit e752x_init_one(struct pci_dev *pdev, 989static int __devinit e752x_init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent) 990 const struct pci_device_id *ent)
997{ 991{
998 debugf0("MC: " __FILE__ ": %s()\n", __func__); 992 debugf0("%s()\n", __func__);
999 993
1000 /* wake up and enable device */ 994 /* wake up and enable device */
1001 if(pci_enable_device(pdev) < 0) 995 if(pci_enable_device(pdev) < 0)
1002 return -EIO; 996 return -EIO;
997
1003 return e752x_probe1(pdev, ent->driver_data); 998 return e752x_probe1(pdev, ent->driver_data);
1004} 999}
1005 1000
1006
1007static void __devexit e752x_remove_one(struct pci_dev *pdev) 1001static void __devexit e752x_remove_one(struct pci_dev *pdev)
1008{ 1002{
1009 struct mem_ctl_info *mci; 1003 struct mem_ctl_info *mci;
1010 struct e752x_pvt *pvt; 1004 struct e752x_pvt *pvt;
1011 1005
1012 debugf0(__FILE__ ": %s()\n", __func__); 1006 debugf0("%s()\n", __func__);
1013
1014 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL)
1015 return;
1016 1007
1017 if (edac_mc_del_mc(mci)) 1008 if ((mci = edac_mc_del_mc(pdev)) == NULL)
1018 return; 1009 return;
1019 1010
1020 pvt = (struct e752x_pvt *) mci->pvt_info; 1011 pvt = (struct e752x_pvt *) mci->pvt_info;
@@ -1024,45 +1015,48 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
1024 edac_mc_free(mci); 1015 edac_mc_free(mci);
1025} 1016}
1026 1017
1027
1028static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { 1018static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
1029 {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1019 {
1030 E7520}, 1020 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1031 {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1021 E7520
1032 E7525}, 1022 },
1033 {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1023 {
1034 E7320}, 1024 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1035 {0,} /* 0 terminated list. */ 1025 E7525
1026 },
1027 {
1028 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1029 E7320
1030 },
1031 {
1032 0,
1033 } /* 0 terminated list. */
1036}; 1034};
1037 1035
1038MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); 1036MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1039 1037
1040
1041static struct pci_driver e752x_driver = { 1038static struct pci_driver e752x_driver = {
1042 .name = BS_MOD_STR, 1039 .name = EDAC_MOD_STR,
1043 .probe = e752x_init_one, 1040 .probe = e752x_init_one,
1044 .remove = __devexit_p(e752x_remove_one), 1041 .remove = __devexit_p(e752x_remove_one),
1045 .id_table = e752x_pci_tbl, 1042 .id_table = e752x_pci_tbl,
1046}; 1043};
1047 1044
1048
1049static int __init e752x_init(void) 1045static int __init e752x_init(void)
1050{ 1046{
1051 int pci_rc; 1047 int pci_rc;
1052 1048
1053 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1049 debugf3("%s()\n", __func__);
1054 pci_rc = pci_register_driver(&e752x_driver); 1050 pci_rc = pci_register_driver(&e752x_driver);
1055 return (pci_rc < 0) ? pci_rc : 0; 1051 return (pci_rc < 0) ? pci_rc : 0;
1056} 1052}
1057 1053
1058
1059static void __exit e752x_exit(void) 1054static void __exit e752x_exit(void)
1060{ 1055{
1061 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1056 debugf3("%s()\n", __func__);
1062 pci_unregister_driver(&e752x_driver); 1057 pci_unregister_driver(&e752x_driver);
1063} 1058}
1064 1059
1065
1066module_init(e752x_init); 1060module_init(e752x_init);
1067module_exit(e752x_exit); 1061module_exit(e752x_exit);
1068 1062
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index d5e320dfc66f..a9518d3e4be4 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -11,9 +11,9 @@
11 * http://www.anime.net/~goemon/linux-ecc/ 11 * http://www.anime.net/~goemon/linux-ecc/
12 * 12 *
13 * Contributors: 13 * Contributors:
14 * Eric Biederman (Linux Networx) 14 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx) 15 * Tom Zimmerman (Linux Networx)
16 * Jim Garlick (Lawrence Livermore National Labs) 16 * Jim Garlick (Lawrence Livermore National Labs)
17 * Dave Peterson (Lawrence Livermore National Labs) 17 * Dave Peterson (Lawrence Livermore National Labs)
18 * That One Guy (Some other place) 18 * That One Guy (Some other place)
19 * Wang Zhenyu (intel.com) 19 * Wang Zhenyu (intel.com)
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25
26#include <linux/config.h> 25#include <linux/config.h>
27#include <linux/module.h> 26#include <linux/module.h>
28#include <linux/init.h> 27#include <linux/init.h>
@@ -31,6 +30,11 @@
31#include <linux/slab.h> 30#include <linux/slab.h>
32#include "edac_mc.h" 31#include "edac_mc.h"
33 32
33#define e7xxx_printk(level, fmt, arg...) \
34 edac_printk(level, "e7xxx", fmt, ##arg)
35
36#define e7xxx_mc_printk(mci, level, fmt, arg...) \
37 edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg)
34 38
35#ifndef PCI_DEVICE_ID_INTEL_7205_0 39#ifndef PCI_DEVICE_ID_INTEL_7205_0
36#define PCI_DEVICE_ID_INTEL_7205_0 0x255d 40#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
@@ -64,11 +68,9 @@
64#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 68#define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551
65#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ 69#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
66 70
67
68#define E7XXX_NR_CSROWS 8 /* number of csrows */ 71#define E7XXX_NR_CSROWS 8 /* number of csrows */
69#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ 72#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
70 73
71
72/* E7XXX register addresses - device 0 function 0 */ 74/* E7XXX register addresses - device 0 function 0 */
73#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ 75#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
74#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ 76#define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */
@@ -118,7 +120,6 @@ enum e7xxx_chips {
118 E7205, 120 E7205,
119}; 121};
120 122
121
122struct e7xxx_pvt { 123struct e7xxx_pvt {
123 struct pci_dev *bridge_ck; 124 struct pci_dev *bridge_ck;
124 u32 tolm; 125 u32 tolm;
@@ -127,13 +128,11 @@ struct e7xxx_pvt {
127 const struct e7xxx_dev_info *dev_info; 128 const struct e7xxx_dev_info *dev_info;
128}; 129};
129 130
130
131struct e7xxx_dev_info { 131struct e7xxx_dev_info {
132 u16 err_dev; 132 u16 err_dev;
133 const char *ctl_name; 133 const char *ctl_name;
134}; 134};
135 135
136
137struct e7xxx_error_info { 136struct e7xxx_error_info {
138 u8 dram_ferr; 137 u8 dram_ferr;
139 u8 dram_nerr; 138 u8 dram_nerr;
@@ -144,108 +143,110 @@ struct e7xxx_error_info {
144 143
145static const struct e7xxx_dev_info e7xxx_devs[] = { 144static const struct e7xxx_dev_info e7xxx_devs[] = {
146 [E7500] = { 145 [E7500] = {
147 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, 146 .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR,
148 .ctl_name = "E7500"}, 147 .ctl_name = "E7500"
148 },
149 [E7501] = { 149 [E7501] = {
150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, 150 .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR,
151 .ctl_name = "E7501"}, 151 .ctl_name = "E7501"
152 },
152 [E7505] = { 153 [E7505] = {
153 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, 154 .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR,
154 .ctl_name = "E7505"}, 155 .ctl_name = "E7505"
156 },
155 [E7205] = { 157 [E7205] = {
156 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, 158 .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR,
157 .ctl_name = "E7205"}, 159 .ctl_name = "E7205"
160 },
158}; 161};
159 162
160
161/* FIXME - is this valid for both SECDED and S4ECD4ED? */ 163/* FIXME - is this valid for both SECDED and S4ECD4ED? */
162static inline int e7xxx_find_channel(u16 syndrome) 164static inline int e7xxx_find_channel(u16 syndrome)
163{ 165{
164 debugf3("MC: " __FILE__ ": %s()\n", __func__); 166 debugf3("%s()\n", __func__);
165 167
166 if ((syndrome & 0xff00) == 0) 168 if ((syndrome & 0xff00) == 0)
167 return 0; 169 return 0;
170
168 if ((syndrome & 0x00ff) == 0) 171 if ((syndrome & 0x00ff) == 0)
169 return 1; 172 return 1;
173
170 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) 174 if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0)
171 return 0; 175 return 0;
176
172 return 1; 177 return 1;
173} 178}
174 179
175 180static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
176static unsigned long 181 unsigned long page)
177ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page)
178{ 182{
179 u32 remap; 183 u32 remap;
180 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; 184 struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info;
181 185
182 debugf3("MC: " __FILE__ ": %s()\n", __func__); 186 debugf3("%s()\n", __func__);
183 187
184 if ((page < pvt->tolm) || 188 if ((page < pvt->tolm) ||
185 ((page >= 0x100000) && (page < pvt->remapbase))) 189 ((page >= 0x100000) && (page < pvt->remapbase)))
186 return page; 190 return page;
191
187 remap = (page - pvt->tolm) + pvt->remapbase; 192 remap = (page - pvt->tolm) + pvt->remapbase;
193
188 if (remap < pvt->remaplimit) 194 if (remap < pvt->remaplimit)
189 return remap; 195 return remap;
190 printk(KERN_ERR "Invalid page %lx - out of range\n", page); 196
197 e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
191 return pvt->tolm - 1; 198 return pvt->tolm - 1;
192} 199}
193 200
194 201static void process_ce(struct mem_ctl_info *mci,
195static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 202 struct e7xxx_error_info *info)
196{ 203{
197 u32 error_1b, page; 204 u32 error_1b, page;
198 u16 syndrome; 205 u16 syndrome;
199 int row; 206 int row;
200 int channel; 207 int channel;
201 208
202 debugf3("MC: " __FILE__ ": %s()\n", __func__); 209 debugf3("%s()\n", __func__);
203
204 /* read the error address */ 210 /* read the error address */
205 error_1b = info->dram_celog_add; 211 error_1b = info->dram_celog_add;
206 /* FIXME - should use PAGE_SHIFT */ 212 /* FIXME - should use PAGE_SHIFT */
207 page = error_1b >> 6; /* convert the address to 4k page */ 213 page = error_1b >> 6; /* convert the address to 4k page */
208 /* read the syndrome */ 214 /* read the syndrome */
209 syndrome = info->dram_celog_syndrome; 215 syndrome = info->dram_celog_syndrome;
210 /* FIXME - check for -1 */ 216 /* FIXME - check for -1 */
211 row = edac_mc_find_csrow_by_page(mci, page); 217 row = edac_mc_find_csrow_by_page(mci, page);
212 /* convert syndrome to channel */ 218 /* convert syndrome to channel */
213 channel = e7xxx_find_channel(syndrome); 219 channel = e7xxx_find_channel(syndrome);
214 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, 220 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
215 "e7xxx CE");
216} 221}
217 222
218
219static void process_ce_no_info(struct mem_ctl_info *mci) 223static void process_ce_no_info(struct mem_ctl_info *mci)
220{ 224{
221 debugf3("MC: " __FILE__ ": %s()\n", __func__); 225 debugf3("%s()\n", __func__);
222 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); 226 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
223} 227}
224 228
225 229static void process_ue(struct mem_ctl_info *mci,
226static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 230 struct e7xxx_error_info *info)
227{ 231{
228 u32 error_2b, block_page; 232 u32 error_2b, block_page;
229 int row; 233 int row;
230 234
231 debugf3("MC: " __FILE__ ": %s()\n", __func__); 235 debugf3("%s()\n", __func__);
232
233 /* read the error address */ 236 /* read the error address */
234 error_2b = info->dram_uelog_add; 237 error_2b = info->dram_uelog_add;
235 /* FIXME - should use PAGE_SHIFT */ 238 /* FIXME - should use PAGE_SHIFT */
236 block_page = error_2b >> 6; /* convert to 4k address */ 239 block_page = error_2b >> 6; /* convert to 4k address */
237 row = edac_mc_find_csrow_by_page(mci, block_page); 240 row = edac_mc_find_csrow_by_page(mci, block_page);
238 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); 241 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
239} 242}
240 243
241
242static void process_ue_no_info(struct mem_ctl_info *mci) 244static void process_ue_no_info(struct mem_ctl_info *mci)
243{ 245{
244 debugf3("MC: " __FILE__ ": %s()\n", __func__); 246 debugf3("%s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); 247 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
246} 248}
247 249
248
249static void e7xxx_get_error_info (struct mem_ctl_info *mci, 250static void e7xxx_get_error_info (struct mem_ctl_info *mci,
250 struct e7xxx_error_info *info) 251 struct e7xxx_error_info *info)
251{ 252{
@@ -253,31 +254,29 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci,
253 254
254 pvt = (struct e7xxx_pvt *) mci->pvt_info; 255 pvt = (struct e7xxx_pvt *) mci->pvt_info;
255 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, 256 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR,
256 &info->dram_ferr); 257 &info->dram_ferr);
257 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, 258 pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR,
258 &info->dram_nerr); 259 &info->dram_nerr);
259 260
260 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { 261 if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) {
261 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, 262 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD,
262 &info->dram_celog_add); 263 &info->dram_celog_add);
263 pci_read_config_word(pvt->bridge_ck, 264 pci_read_config_word(pvt->bridge_ck,
264 E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); 265 E7XXX_DRAM_CELOG_SYNDROME,
266 &info->dram_celog_syndrome);
265 } 267 }
266 268
267 if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) 269 if ((info->dram_ferr & 2) || (info->dram_nerr & 2))
268 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, 270 pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD,
269 &info->dram_uelog_add); 271 &info->dram_uelog_add);
270 272
271 if (info->dram_ferr & 3) 273 if (info->dram_ferr & 3)
272 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 274 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03);
273 0x03);
274 275
275 if (info->dram_nerr & 3) 276 if (info->dram_nerr & 3)
276 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 277 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
277 0x03);
278} 278}
279 279
280
281static int e7xxx_process_error_info (struct mem_ctl_info *mci, 280static int e7xxx_process_error_info (struct mem_ctl_info *mci,
282 struct e7xxx_error_info *info, int handle_errors) 281 struct e7xxx_error_info *info, int handle_errors)
283{ 282{
@@ -325,17 +324,15 @@ static int e7xxx_process_error_info (struct mem_ctl_info *mci,
325 return error_found; 324 return error_found;
326} 325}
327 326
328
329static void e7xxx_check(struct mem_ctl_info *mci) 327static void e7xxx_check(struct mem_ctl_info *mci)
330{ 328{
331 struct e7xxx_error_info info; 329 struct e7xxx_error_info info;
332 330
333 debugf3("MC: " __FILE__ ": %s()\n", __func__); 331 debugf3("%s()\n", __func__);
334 e7xxx_get_error_info(mci, &info); 332 e7xxx_get_error_info(mci, &info);
335 e7xxx_process_error_info(mci, &info, 1); 333 e7xxx_process_error_info(mci, &info, 1);
336} 334}
337 335
338
339static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) 336static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
340{ 337{
341 int rc = -ENODEV; 338 int rc = -ENODEV;
@@ -349,19 +346,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
349 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 346 int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 dra; 347 u32 dra;
351 unsigned long last_cumul_size; 348 unsigned long last_cumul_size;
349 struct e7xxx_error_info discard;
352 350
353 351 debugf0("%s(): mci\n", __func__);
354 debugf0("MC: " __FILE__ ": %s(): mci\n", __func__);
355 352
356 /* need to find out the number of channels */ 353 /* need to find out the number of channels */
357 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 354 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
355
358 /* only e7501 can be single channel */ 356 /* only e7501 can be single channel */
359 if (dev_idx == E7501) { 357 if (dev_idx == E7501) {
360 drc_chan = ((drc >> 22) & 0x1); 358 drc_chan = ((drc >> 22) & 0x1);
361 drc_drbg = (drc >> 18) & 0x3; 359 drc_drbg = (drc >> 18) & 0x3;
362 } 360 }
363 drc_ddim = (drc >> 20) & 0x3;
364 361
362 drc_ddim = (drc >> 20) & 0x3;
365 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); 363 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1);
366 364
367 if (mci == NULL) { 365 if (mci == NULL) {
@@ -369,33 +367,31 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
369 goto fail; 367 goto fail;
370 } 368 }
371 369
372 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 370 debugf3("%s(): init mci\n", __func__);
373
374 mci->mtype_cap = MEM_FLAG_RDDR; 371 mci->mtype_cap = MEM_FLAG_RDDR;
375 mci->edac_ctl_cap = 372 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
376 EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; 373 EDAC_FLAG_S4ECD4ED;
377 /* FIXME - what if different memory types are in different csrows? */ 374 /* FIXME - what if different memory types are in different csrows? */
378 mci->mod_name = BS_MOD_STR; 375 mci->mod_name = EDAC_MOD_STR;
379 mci->mod_ver = "$Revision: 1.5.2.9 $"; 376 mci->mod_ver = "$Revision: 1.5.2.9 $";
380 mci->pdev = pdev; 377 mci->pdev = pdev;
381 378
382 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); 379 debugf3("%s(): init pvt\n", __func__);
383 pvt = (struct e7xxx_pvt *) mci->pvt_info; 380 pvt = (struct e7xxx_pvt *) mci->pvt_info;
384 pvt->dev_info = &e7xxx_devs[dev_idx]; 381 pvt->dev_info = &e7xxx_devs[dev_idx];
385 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, 382 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
386 pvt->dev_info->err_dev, 383 pvt->dev_info->err_dev,
387 pvt->bridge_ck); 384 pvt->bridge_ck);
385
388 if (!pvt->bridge_ck) { 386 if (!pvt->bridge_ck) {
389 printk(KERN_ERR 387 e7xxx_printk(KERN_ERR, "error reporting device not found:"
390 "MC: error reporting device not found:" 388 "vendor %x device 0x%x (broken BIOS?)\n",
391 "vendor %x device 0x%x (broken BIOS?)\n", 389 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
392 PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev);
393 goto fail; 390 goto fail;
394 } 391 }
395 392
396 debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); 393 debugf3("%s(): more mci init\n", __func__);
397 mci->ctl_name = pvt->dev_info->ctl_name; 394 mci->ctl_name = pvt->dev_info->ctl_name;
398
399 mci->edac_check = e7xxx_check; 395 mci->edac_check = e7xxx_check;
400 mci->ctl_page_to_phys = ctl_page_to_phys; 396 mci->ctl_page_to_phys = ctl_page_to_phys;
401 397
@@ -418,17 +414,18 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
418 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); 414 pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value);
419 /* convert a 64 or 32 MiB DRB to a page size. */ 415 /* convert a 64 or 32 MiB DRB to a page size. */
420 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); 416 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
421 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 417 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
422 __func__, index, cumul_size); 418 cumul_size);
419
423 if (cumul_size == last_cumul_size) 420 if (cumul_size == last_cumul_size)
424 continue; /* not populated */ 421 continue; /* not populated */
425 422
426 csrow->first_page = last_cumul_size; 423 csrow->first_page = last_cumul_size;
427 csrow->last_page = cumul_size - 1; 424 csrow->last_page = cumul_size - 1;
428 csrow->nr_pages = cumul_size - last_cumul_size; 425 csrow->nr_pages = cumul_size - last_cumul_size;
429 last_cumul_size = cumul_size; 426 last_cumul_size = cumul_size;
430 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 427 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
431 csrow->mtype = MEM_RDDR; /* only one type supported */ 428 csrow->mtype = MEM_RDDR; /* only one type supported */
432 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 429 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
433 430
434 /* 431 /*
@@ -449,8 +446,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
449 446
450 mci->edac_cap |= EDAC_FLAG_NONE; 447 mci->edac_cap |= EDAC_FLAG_NONE;
451 448
452 debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", 449 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
453 __func__);
454 /* load the top of low memory, remap base, and remap limit vars */ 450 /* load the top of low memory, remap base, and remap limit vars */
455 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); 451 pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data);
456 pvt->tolm = ((u32) pci_data) << 4; 452 pvt->tolm = ((u32) pci_data) << 4;
@@ -458,22 +454,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
458 pvt->remapbase = ((u32) pci_data) << 14; 454 pvt->remapbase = ((u32) pci_data) << 14;
459 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); 455 pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data);
460 pvt->remaplimit = ((u32) pci_data) << 14; 456 pvt->remaplimit = ((u32) pci_data) << 14;
461 printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, 457 e7xxx_printk(KERN_INFO,
462 pvt->remapbase, pvt->remaplimit); 458 "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm,
459 pvt->remapbase, pvt->remaplimit);
463 460
464 /* clear any pending errors, or initial state bits */ 461 /* clear any pending errors, or initial state bits */
465 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); 462 e7xxx_get_error_info(mci, &discard);
466 pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03);
467 463
468 if (edac_mc_add_mc(mci) != 0) { 464 if (edac_mc_add_mc(mci) != 0) {
469 debugf3("MC: " __FILE__ 465 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
470 ": %s(): failed edac_mc_add_mc()\n",
471 __func__);
472 goto fail; 466 goto fail;
473 } 467 }
474 468
475 /* get this far and it's successful */ 469 /* get this far and it's successful */
476 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 470 debugf3("%s(): success\n", __func__);
477 return 0; 471 return 0;
478 472
479fail: 473fail:
@@ -487,62 +481,67 @@ fail:
487} 481}
488 482
489/* returns count (>= 0), or negative on error */ 483/* returns count (>= 0), or negative on error */
490static int __devinit 484static int __devinit e7xxx_init_one(struct pci_dev *pdev,
491e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 485 const struct pci_device_id *ent)
492{ 486{
493 debugf0("MC: " __FILE__ ": %s()\n", __func__); 487 debugf0("%s()\n", __func__);
494 488
495 /* wake up and enable device */ 489 /* wake up and enable device */
496 return pci_enable_device(pdev) ? 490 return pci_enable_device(pdev) ?
497 -EIO : e7xxx_probe1(pdev, ent->driver_data); 491 -EIO : e7xxx_probe1(pdev, ent->driver_data);
498} 492}
499 493
500
501static void __devexit e7xxx_remove_one(struct pci_dev *pdev) 494static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
502{ 495{
503 struct mem_ctl_info *mci; 496 struct mem_ctl_info *mci;
504 struct e7xxx_pvt *pvt; 497 struct e7xxx_pvt *pvt;
505 498
506 debugf0(__FILE__ ": %s()\n", __func__); 499 debugf0("%s()\n", __func__);
507 500
508 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) && 501 if ((mci = edac_mc_del_mc(pdev)) == NULL)
509 edac_mc_del_mc(mci)) { 502 return;
510 pvt = (struct e7xxx_pvt *) mci->pvt_info;
511 pci_dev_put(pvt->bridge_ck);
512 edac_mc_free(mci);
513 }
514}
515 503
504 pvt = (struct e7xxx_pvt *) mci->pvt_info;
505 pci_dev_put(pvt->bridge_ck);
506 edac_mc_free(mci);
507}
516 508
517static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { 509static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
518 {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 510 {
519 E7205}, 511 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
520 {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 512 E7205
521 E7500}, 513 },
522 {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 514 {
523 E7501}, 515 PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
524 {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 516 E7500
525 E7505}, 517 },
526 {0,} /* 0 terminated list. */ 518 {
519 PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
520 E7501
521 },
522 {
523 PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
524 E7505
525 },
526 {
527 0,
528 } /* 0 terminated list. */
527}; 529};
528 530
529MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); 531MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl);
530 532
531
532static struct pci_driver e7xxx_driver = { 533static struct pci_driver e7xxx_driver = {
533 .name = BS_MOD_STR, 534 .name = EDAC_MOD_STR,
534 .probe = e7xxx_init_one, 535 .probe = e7xxx_init_one,
535 .remove = __devexit_p(e7xxx_remove_one), 536 .remove = __devexit_p(e7xxx_remove_one),
536 .id_table = e7xxx_pci_tbl, 537 .id_table = e7xxx_pci_tbl,
537}; 538};
538 539
539
540static int __init e7xxx_init(void) 540static int __init e7xxx_init(void)
541{ 541{
542 return pci_register_driver(&e7xxx_driver); 542 return pci_register_driver(&e7xxx_driver);
543} 543}
544 544
545
546static void __exit e7xxx_exit(void) 545static void __exit e7xxx_exit(void)
547{ 546{
548 pci_unregister_driver(&e7xxx_driver); 547 pci_unregister_driver(&e7xxx_driver);
@@ -551,8 +550,7 @@ static void __exit e7xxx_exit(void)
551module_init(e7xxx_init); 550module_init(e7xxx_init);
552module_exit(e7xxx_exit); 551module_exit(e7xxx_exit);
553 552
554
555MODULE_LICENSE("GPL"); 553MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 554MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
557 "Based on.work by Dan Hollis et al"); 555 "Based on.work by Dan Hollis et al");
558MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); 556MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers");
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9c205274c1cb..905f58ba8e16 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15
16#include <linux/config.h> 15#include <linux/config.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/proc_fs.h> 17#include <linux/proc_fs.h>
@@ -29,14 +28,13 @@
29#include <linux/list.h> 28#include <linux/list.h>
30#include <linux/sysdev.h> 29#include <linux/sysdev.h>
31#include <linux/ctype.h> 30#include <linux/ctype.h>
32 31#include <linux/kthread.h>
33#include <asm/uaccess.h> 32#include <asm/uaccess.h>
34#include <asm/page.h> 33#include <asm/page.h>
35#include <asm/edac.h> 34#include <asm/edac.h>
36
37#include "edac_mc.h" 35#include "edac_mc.h"
38 36
39#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ 37#define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__
40 38
41/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC 39/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC
42 * presents to user space needs more thought, and is likely to change 40 * presents to user space needs more thought, and is likely to change
@@ -47,7 +45,7 @@
47#ifdef CONFIG_EDAC_DEBUG 45#ifdef CONFIG_EDAC_DEBUG
48/* Values of 0 to 4 will generate output */ 46/* Values of 0 to 4 will generate output */
49int edac_debug_level = 1; 47int edac_debug_level = 1;
50EXPORT_SYMBOL(edac_debug_level); 48EXPORT_SYMBOL_GPL(edac_debug_level);
51#endif 49#endif
52 50
53/* EDAC Controls, setable by module parameter, and sysfs */ 51/* EDAC Controls, setable by module parameter, and sysfs */
@@ -64,13 +62,14 @@ static atomic_t pci_parity_count = ATOMIC_INIT(0);
64static DECLARE_MUTEX(mem_ctls_mutex); 62static DECLARE_MUTEX(mem_ctls_mutex);
65static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); 63static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
66 64
65static struct task_struct *edac_thread;
66
67/* Structure of the whitelist and blacklist arrays */ 67/* Structure of the whitelist and blacklist arrays */
68struct edac_pci_device_list { 68struct edac_pci_device_list {
69 unsigned int vendor; /* Vendor ID */ 69 unsigned int vendor; /* Vendor ID */
70 unsigned int device; /* Deviice ID */ 70 unsigned int device; /* Deviice ID */
71}; 71};
72 72
73
74#define MAX_LISTED_PCI_DEVICES 32 73#define MAX_LISTED_PCI_DEVICES 32
75 74
76/* List of PCI devices (vendor-id:device-id) that should be skipped */ 75/* List of PCI devices (vendor-id:device-id) that should be skipped */
@@ -123,7 +122,6 @@ static const char *edac_caps[] = {
123 [EDAC_S16ECD16ED] = "S16ECD16ED" 122 [EDAC_S16ECD16ED] = "S16ECD16ED"
124}; 123};
125 124
126
127/* sysfs object: /sys/devices/system/edac */ 125/* sysfs object: /sys/devices/system/edac */
128static struct sysdev_class edac_class = { 126static struct sysdev_class edac_class = {
129 set_kset_name("edac"), 127 set_kset_name("edac"),
@@ -136,9 +134,15 @@ static struct sysdev_class edac_class = {
136static struct kobject edac_memctrl_kobj; 134static struct kobject edac_memctrl_kobj;
137static struct kobject edac_pci_kobj; 135static struct kobject edac_pci_kobj;
138 136
137/* We use these to wait for the reference counts on edac_memctrl_kobj and
138 * edac_pci_kobj to reach 0.
139 */
140static struct completion edac_memctrl_kobj_complete;
141static struct completion edac_pci_kobj_complete;
142
139/* 143/*
140 * /sys/devices/system/edac/mc; 144 * /sys/devices/system/edac/mc;
141 * data structures and methods 145 * data structures and methods
142 */ 146 */
143#if 0 147#if 0
144static ssize_t memctrl_string_show(void *ptr, char *buffer) 148static ssize_t memctrl_string_show(void *ptr, char *buffer)
@@ -165,33 +169,34 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
165} 169}
166 170
167struct memctrl_dev_attribute { 171struct memctrl_dev_attribute {
168 struct attribute attr; 172 struct attribute attr;
169 void *value; 173 void *value;
170 ssize_t (*show)(void *,char *); 174 ssize_t (*show)(void *,char *);
171 ssize_t (*store)(void *, const char *, size_t); 175 ssize_t (*store)(void *, const char *, size_t);
172}; 176};
173 177
174/* Set of show/store abstract level functions for memory control object */ 178/* Set of show/store abstract level functions for memory control object */
175static ssize_t 179static ssize_t memctrl_dev_show(struct kobject *kobj,
176memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer) 180 struct attribute *attr, char *buffer)
177{ 181{
178 struct memctrl_dev_attribute *memctrl_dev; 182 struct memctrl_dev_attribute *memctrl_dev;
179 memctrl_dev = (struct memctrl_dev_attribute*)attr; 183 memctrl_dev = (struct memctrl_dev_attribute*)attr;
180 184
181 if (memctrl_dev->show) 185 if (memctrl_dev->show)
182 return memctrl_dev->show(memctrl_dev->value, buffer); 186 return memctrl_dev->show(memctrl_dev->value, buffer);
187
183 return -EIO; 188 return -EIO;
184} 189}
185 190
186static ssize_t 191static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
187memctrl_dev_store(struct kobject *kobj, struct attribute *attr, 192 const char *buffer, size_t count)
188 const char *buffer, size_t count)
189{ 193{
190 struct memctrl_dev_attribute *memctrl_dev; 194 struct memctrl_dev_attribute *memctrl_dev;
191 memctrl_dev = (struct memctrl_dev_attribute*)attr; 195 memctrl_dev = (struct memctrl_dev_attribute*)attr;
192 196
193 if (memctrl_dev->store) 197 if (memctrl_dev->store)
194 return memctrl_dev->store(memctrl_dev->value, buffer, count); 198 return memctrl_dev->store(memctrl_dev->value, buffer, count);
199
195 return -EIO; 200 return -EIO;
196} 201}
197 202
@@ -227,7 +232,6 @@ MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
227MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 232MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
228MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); 233MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
229 234
230
231/* Base Attributes of the memory ECC object */ 235/* Base Attributes of the memory ECC object */
232static struct memctrl_dev_attribute *memctrl_attr[] = { 236static struct memctrl_dev_attribute *memctrl_attr[] = {
233 &attr_panic_on_ue, 237 &attr_panic_on_ue,
@@ -240,13 +244,14 @@ static struct memctrl_dev_attribute *memctrl_attr[] = {
240/* Main MC kobject release() function */ 244/* Main MC kobject release() function */
241static void edac_memctrl_master_release(struct kobject *kobj) 245static void edac_memctrl_master_release(struct kobject *kobj)
242{ 246{
243 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); 247 debugf1("%s()\n", __func__);
248 complete(&edac_memctrl_kobj_complete);
244} 249}
245 250
246static struct kobj_type ktype_memctrl = { 251static struct kobj_type ktype_memctrl = {
247 .release = edac_memctrl_master_release, 252 .release = edac_memctrl_master_release,
248 .sysfs_ops = &memctrlfs_ops, 253 .sysfs_ops = &memctrlfs_ops,
249 .default_attrs = (struct attribute **) memctrl_attr, 254 .default_attrs = (struct attribute **) memctrl_attr,
250}; 255};
251 256
252#endif /* DISABLE_EDAC_SYSFS */ 257#endif /* DISABLE_EDAC_SYSFS */
@@ -268,32 +273,31 @@ static int edac_sysfs_memctrl_setup(void)
268{ 273{
269 int err=0; 274 int err=0;
270 275
271 debugf1("MC: " __FILE__ ": %s()\n", __func__); 276 debugf1("%s()\n", __func__);
272 277
273 /* create the /sys/devices/system/edac directory */ 278 /* create the /sys/devices/system/edac directory */
274 err = sysdev_class_register(&edac_class); 279 err = sysdev_class_register(&edac_class);
280
275 if (!err) { 281 if (!err) {
276 /* Init the MC's kobject */ 282 /* Init the MC's kobject */
277 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); 283 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
278 kobject_init(&edac_memctrl_kobj);
279
280 edac_memctrl_kobj.parent = &edac_class.kset.kobj; 284 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
281 edac_memctrl_kobj.ktype = &ktype_memctrl; 285 edac_memctrl_kobj.ktype = &ktype_memctrl;
282 286
283 /* generate sysfs "..../edac/mc" */ 287 /* generate sysfs "..../edac/mc" */
284 err = kobject_set_name(&edac_memctrl_kobj,"mc"); 288 err = kobject_set_name(&edac_memctrl_kobj,"mc");
289
285 if (!err) { 290 if (!err) {
286 /* FIXME: maybe new sysdev_create_subdir() */ 291 /* FIXME: maybe new sysdev_create_subdir() */
287 err = kobject_register(&edac_memctrl_kobj); 292 err = kobject_register(&edac_memctrl_kobj);
288 if (err) { 293
294 if (err)
289 debugf1("Failed to register '.../edac/mc'\n"); 295 debugf1("Failed to register '.../edac/mc'\n");
290 } else { 296 else
291 debugf1("Registered '.../edac/mc' kobject\n"); 297 debugf1("Registered '.../edac/mc' kobject\n");
292 }
293 } 298 }
294 } else { 299 } else
295 debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err); 300 debugf1("%s() error=%d\n", __func__, err);
296 }
297 301
298 return err; 302 return err;
299} 303}
@@ -308,11 +312,12 @@ static void edac_sysfs_memctrl_teardown(void)
308#ifndef DISABLE_EDAC_SYSFS 312#ifndef DISABLE_EDAC_SYSFS
309 debugf0("MC: " __FILE__ ": %s()\n", __func__); 313 debugf0("MC: " __FILE__ ": %s()\n", __func__);
310 314
311 /* Unregister the MC's kobject */ 315 /* Unregister the MC's kobject and wait for reference count to reach
316 * 0.
317 */
318 init_completion(&edac_memctrl_kobj_complete);
312 kobject_unregister(&edac_memctrl_kobj); 319 kobject_unregister(&edac_memctrl_kobj);
313 320 wait_for_completion(&edac_memctrl_kobj_complete);
314 /* release the master edac mc kobject */
315 kobject_put(&edac_memctrl_kobj);
316 321
317 /* Unregister the 'edac' object */ 322 /* Unregister the 'edac' object */
318 sysdev_class_unregister(&edac_class); 323 sysdev_class_unregister(&edac_class);
@@ -331,7 +336,6 @@ struct list_control {
331 int *count; 336 int *count;
332}; 337};
333 338
334
335#if 0 339#if 0
336/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ 340/* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
337static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) 341static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
@@ -356,7 +360,6 @@ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
356 } 360 }
357 361
358 len += snprintf(p + len,(PAGE_SIZE-len), "\n"); 362 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
359
360 return (ssize_t) len; 363 return (ssize_t) len;
361} 364}
362 365
@@ -378,7 +381,7 @@ static int parse_one_device(const char **s,const char **e,
378 381
379 /* if null byte, we are done */ 382 /* if null byte, we are done */
380 if (!**s) { 383 if (!**s) {
381 (*s)++; /* keep *s moving */ 384 (*s)++; /* keep *s moving */
382 return 0; 385 return 0;
383 } 386 }
384 387
@@ -395,6 +398,7 @@ static int parse_one_device(const char **s,const char **e,
395 398
396 /* parse vendor_id */ 399 /* parse vendor_id */
397 runner = *s; 400 runner = *s;
401
398 while (runner < *e) { 402 while (runner < *e) {
399 /* scan for vendor:device delimiter */ 403 /* scan for vendor:device delimiter */
400 if (*runner == ':') { 404 if (*runner == ':') {
@@ -402,6 +406,7 @@ static int parse_one_device(const char **s,const char **e,
402 runner = p + 1; 406 runner = p + 1;
403 break; 407 break;
404 } 408 }
409
405 runner++; 410 runner++;
406 } 411 }
407 412
@@ -417,12 +422,11 @@ static int parse_one_device(const char **s,const char **e,
417 } 422 }
418 423
419 *s = runner; 424 *s = runner;
420
421 return 1; 425 return 1;
422} 426}
423 427
424static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, 428static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
425 size_t count) 429 size_t count)
426{ 430{
427 struct list_control *listctl; 431 struct list_control *listctl;
428 struct edac_pci_device_list *list; 432 struct edac_pci_device_list *list;
@@ -432,14 +436,12 @@ static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
432 436
433 s = (char*)buffer; 437 s = (char*)buffer;
434 e = s + count; 438 e = s + count;
435
436 listctl = ptr; 439 listctl = ptr;
437 list = listctl->list; 440 list = listctl->list;
438 index = listctl->count; 441 index = listctl->count;
439
440 *index = 0; 442 *index = 0;
441 while (*index < MAX_LISTED_PCI_DEVICES) {
442 443
444 while (*index < MAX_LISTED_PCI_DEVICES) {
443 if (parse_one_device(&s,&e,&vendor_id,&device_id)) { 445 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
444 list[ *index ].vendor = vendor_id; 446 list[ *index ].vendor = vendor_id;
445 list[ *index ].device = device_id; 447 list[ *index ].device = device_id;
@@ -472,15 +474,15 @@ static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
472} 474}
473 475
474struct edac_pci_dev_attribute { 476struct edac_pci_dev_attribute {
475 struct attribute attr; 477 struct attribute attr;
476 void *value; 478 void *value;
477 ssize_t (*show)(void *,char *); 479 ssize_t (*show)(void *,char *);
478 ssize_t (*store)(void *, const char *,size_t); 480 ssize_t (*store)(void *, const char *,size_t);
479}; 481};
480 482
481/* Set of show/store abstract level functions for PCI Parity object */ 483/* Set of show/store abstract level functions for PCI Parity object */
482static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, 484static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
483 char *buffer) 485 char *buffer)
484{ 486{
485 struct edac_pci_dev_attribute *edac_pci_dev; 487 struct edac_pci_dev_attribute *edac_pci_dev;
486 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 488 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -490,8 +492,8 @@ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
490 return -EIO; 492 return -EIO;
491} 493}
492 494
493static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr, 495static ssize_t edac_pci_dev_store(struct kobject *kobj,
494 const char *buffer, size_t count) 496 struct attribute *attr, const char *buffer, size_t count)
495{ 497{
496 struct edac_pci_dev_attribute *edac_pci_dev; 498 struct edac_pci_dev_attribute *edac_pci_dev;
497 edac_pci_dev= (struct edac_pci_dev_attribute*)attr; 499 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
@@ -506,7 +508,6 @@ static struct sysfs_ops edac_pci_sysfs_ops = {
506 .store = edac_pci_dev_store 508 .store = edac_pci_dev_store
507}; 509};
508 510
509
510#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ 511#define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
511struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ 512struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
512 .attr = {.name = __stringify(_name), .mode = _mode }, \ 513 .attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -549,9 +550,11 @@ EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
549#endif 550#endif
550 551
551/* PCI Parity control files */ 552/* PCI Parity control files */
552EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); 553EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
553EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); 554 edac_pci_int_store);
554EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL); 555EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
556 edac_pci_int_store);
557EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
555 558
556/* Base Attributes of the memory ECC object */ 559/* Base Attributes of the memory ECC object */
557static struct edac_pci_dev_attribute *edac_pci_attr[] = { 560static struct edac_pci_dev_attribute *edac_pci_attr[] = {
@@ -564,13 +567,14 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = {
564/* No memory to release */ 567/* No memory to release */
565static void edac_pci_release(struct kobject *kobj) 568static void edac_pci_release(struct kobject *kobj)
566{ 569{
567 debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__); 570 debugf1("%s()\n", __func__);
571 complete(&edac_pci_kobj_complete);
568} 572}
569 573
570static struct kobj_type ktype_edac_pci = { 574static struct kobj_type ktype_edac_pci = {
571 .release = edac_pci_release, 575 .release = edac_pci_release,
572 .sysfs_ops = &edac_pci_sysfs_ops, 576 .sysfs_ops = &edac_pci_sysfs_ops,
573 .default_attrs = (struct attribute **) edac_pci_attr, 577 .default_attrs = (struct attribute **) edac_pci_attr,
574}; 578};
575 579
576#endif /* DISABLE_EDAC_SYSFS */ 580#endif /* DISABLE_EDAC_SYSFS */
@@ -588,24 +592,24 @@ static int edac_sysfs_pci_setup(void)
588{ 592{
589 int err; 593 int err;
590 594
591 debugf1("MC: " __FILE__ ": %s()\n", __func__); 595 debugf1("%s()\n", __func__);
592 596
593 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); 597 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
594
595 kobject_init(&edac_pci_kobj);
596 edac_pci_kobj.parent = &edac_class.kset.kobj; 598 edac_pci_kobj.parent = &edac_class.kset.kobj;
597 edac_pci_kobj.ktype = &ktype_edac_pci; 599 edac_pci_kobj.ktype = &ktype_edac_pci;
598
599 err = kobject_set_name(&edac_pci_kobj, "pci"); 600 err = kobject_set_name(&edac_pci_kobj, "pci");
601
600 if (!err) { 602 if (!err) {
601 /* Instanstiate the csrow object */ 603 /* Instanstiate the csrow object */
602 /* FIXME: maybe new sysdev_create_subdir() */ 604 /* FIXME: maybe new sysdev_create_subdir() */
603 err = kobject_register(&edac_pci_kobj); 605 err = kobject_register(&edac_pci_kobj);
606
604 if (err) 607 if (err)
605 debugf1("Failed to register '.../edac/pci'\n"); 608 debugf1("Failed to register '.../edac/pci'\n");
606 else 609 else
607 debugf1("Registered '.../edac/pci' kobject\n"); 610 debugf1("Registered '.../edac/pci' kobject\n");
608 } 611 }
612
609 return err; 613 return err;
610} 614}
611#endif /* DISABLE_EDAC_SYSFS */ 615#endif /* DISABLE_EDAC_SYSFS */
@@ -613,10 +617,10 @@ static int edac_sysfs_pci_setup(void)
613static void edac_sysfs_pci_teardown(void) 617static void edac_sysfs_pci_teardown(void)
614{ 618{
615#ifndef DISABLE_EDAC_SYSFS 619#ifndef DISABLE_EDAC_SYSFS
616 debugf0("MC: " __FILE__ ": %s()\n", __func__); 620 debugf0("%s()\n", __func__);
617 621 init_completion(&edac_pci_kobj_complete);
618 kobject_unregister(&edac_pci_kobj); 622 kobject_unregister(&edac_pci_kobj);
619 kobject_put(&edac_pci_kobj); 623 wait_for_completion(&edac_pci_kobj_complete);
620#endif 624#endif
621} 625}
622 626
@@ -633,6 +637,7 @@ static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
633 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", 637 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
634 csrow->channels[0].label); 638 csrow->channels[0].label);
635 } 639 }
640
636 return size; 641 return size;
637} 642}
638 643
@@ -644,11 +649,12 @@ static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
644 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 649 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
645 csrow->channels[1].label); 650 csrow->channels[1].label);
646 } 651 }
652
647 return size; 653 return size;
648} 654}
649 655
650static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, 656static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
651 const char *data, size_t size) 657 const char *data, size_t size)
652{ 658{
653 ssize_t max_size = 0; 659 ssize_t max_size = 0;
654 660
@@ -657,11 +663,12 @@ static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
657 strncpy(csrow->channels[0].label, data, max_size); 663 strncpy(csrow->channels[0].label, data, max_size);
658 csrow->channels[0].label[max_size] = '\0'; 664 csrow->channels[0].label[max_size] = '\0';
659 } 665 }
666
660 return size; 667 return size;
661} 668}
662 669
663static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, 670static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
664 const char *data, size_t size) 671 const char *data, size_t size)
665{ 672{
666 ssize_t max_size = 0; 673 ssize_t max_size = 0;
667 674
@@ -670,6 +677,7 @@ static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
670 strncpy(csrow->channels[1].label, data, max_size); 677 strncpy(csrow->channels[1].label, data, max_size);
671 csrow->channels[1].label[max_size] = '\0'; 678 csrow->channels[1].label[max_size] = '\0';
672 } 679 }
680
673 return max_size; 681 return max_size;
674} 682}
675 683
@@ -690,6 +698,7 @@ static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
690 if (csrow->nr_channels > 0) { 698 if (csrow->nr_channels > 0) {
691 size = sprintf(data,"%u\n", csrow->channels[0].ce_count); 699 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
692 } 700 }
701
693 return size; 702 return size;
694} 703}
695 704
@@ -700,6 +709,7 @@ static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
700 if (csrow->nr_channels > 1) { 709 if (csrow->nr_channels > 1) {
701 size = sprintf(data,"%u\n", csrow->channels[1].ce_count); 710 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
702 } 711 }
712
703 return size; 713 return size;
704} 714}
705 715
@@ -724,7 +734,7 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
724} 734}
725 735
726struct csrowdev_attribute { 736struct csrowdev_attribute {
727 struct attribute attr; 737 struct attribute attr;
728 ssize_t (*show)(struct csrow_info *,char *); 738 ssize_t (*show)(struct csrow_info *,char *);
729 ssize_t (*store)(struct csrow_info *, const char *,size_t); 739 ssize_t (*store)(struct csrow_info *, const char *,size_t);
730}; 740};
@@ -734,24 +744,26 @@ struct csrowdev_attribute {
734 744
735/* Set of show/store higher level functions for csrow objects */ 745/* Set of show/store higher level functions for csrow objects */
736static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, 746static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
737 char *buffer) 747 char *buffer)
738{ 748{
739 struct csrow_info *csrow = to_csrow(kobj); 749 struct csrow_info *csrow = to_csrow(kobj);
740 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); 750 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
741 751
742 if (csrowdev_attr->show) 752 if (csrowdev_attr->show)
743 return csrowdev_attr->show(csrow, buffer); 753 return csrowdev_attr->show(csrow, buffer);
754
744 return -EIO; 755 return -EIO;
745} 756}
746 757
747static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, 758static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
748 const char *buffer, size_t count) 759 const char *buffer, size_t count)
749{ 760{
750 struct csrow_info *csrow = to_csrow(kobj); 761 struct csrow_info *csrow = to_csrow(kobj);
751 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); 762 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
752 763
753 if (csrowdev_attr->store) 764 if (csrowdev_attr->store)
754 return csrowdev_attr->store(csrow, buffer, count); 765 return csrowdev_attr->store(csrow, buffer, count);
766
755 return -EIO; 767 return -EIO;
756} 768}
757 769
@@ -785,7 +797,6 @@ CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
785 csrow_ch1_dimm_label_show, 797 csrow_ch1_dimm_label_show,
786 csrow_ch1_dimm_label_store); 798 csrow_ch1_dimm_label_store);
787 799
788
789/* Attributes of the CSROW<id> object */ 800/* Attributes of the CSROW<id> object */
790static struct csrowdev_attribute *csrow_attr[] = { 801static struct csrowdev_attribute *csrow_attr[] = {
791 &attr_dev_type, 802 &attr_dev_type,
@@ -801,40 +812,43 @@ static struct csrowdev_attribute *csrow_attr[] = {
801 NULL, 812 NULL,
802}; 813};
803 814
804
805/* No memory to release */ 815/* No memory to release */
806static void edac_csrow_instance_release(struct kobject *kobj) 816static void edac_csrow_instance_release(struct kobject *kobj)
807{ 817{
808 debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); 818 struct csrow_info *cs;
819
820 debugf1("%s()\n", __func__);
821 cs = container_of(kobj, struct csrow_info, kobj);
822 complete(&cs->kobj_complete);
809} 823}
810 824
811static struct kobj_type ktype_csrow = { 825static struct kobj_type ktype_csrow = {
812 .release = edac_csrow_instance_release, 826 .release = edac_csrow_instance_release,
813 .sysfs_ops = &csrowfs_ops, 827 .sysfs_ops = &csrowfs_ops,
814 .default_attrs = (struct attribute **) csrow_attr, 828 .default_attrs = (struct attribute **) csrow_attr,
815}; 829};
816 830
817/* Create a CSROW object under specifed edac_mc_device */ 831/* Create a CSROW object under specifed edac_mc_device */
818static int edac_create_csrow_object(struct kobject *edac_mci_kobj, 832static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
819 struct csrow_info *csrow, int index ) 833 struct csrow_info *csrow, int index)
820{ 834{
821 int err = 0; 835 int err = 0;
822 836
823 debugf0("MC: " __FILE__ ": %s()\n", __func__); 837 debugf0("%s()\n", __func__);
824
825 memset(&csrow->kobj, 0, sizeof(csrow->kobj)); 838 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
826 839
827 /* generate ..../edac/mc/mc<id>/csrow<index> */ 840 /* generate ..../edac/mc/mc<id>/csrow<index> */
828 841
829 kobject_init(&csrow->kobj);
830 csrow->kobj.parent = edac_mci_kobj; 842 csrow->kobj.parent = edac_mci_kobj;
831 csrow->kobj.ktype = &ktype_csrow; 843 csrow->kobj.ktype = &ktype_csrow;
832 844
833 /* name this instance of csrow<id> */ 845 /* name this instance of csrow<id> */
834 err = kobject_set_name(&csrow->kobj,"csrow%d",index); 846 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
847
835 if (!err) { 848 if (!err) {
836 /* Instanstiate the csrow object */ 849 /* Instanstiate the csrow object */
837 err = kobject_register(&csrow->kobj); 850 err = kobject_register(&csrow->kobj);
851
838 if (err) 852 if (err)
839 debugf0("Failed to register CSROW%d\n",index); 853 debugf0("Failed to register CSROW%d\n",index);
840 else 854 else
@@ -846,8 +860,8 @@ static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
846 860
847/* sysfs data structures and methods for the MCI kobjects */ 861/* sysfs data structures and methods for the MCI kobjects */
848 862
849static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, 863static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
850 const char *data, size_t count ) 864 const char *data, size_t count)
851{ 865{
852 int row, chan; 866 int row, chan;
853 867
@@ -855,16 +869,18 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
855 mci->ce_noinfo_count = 0; 869 mci->ce_noinfo_count = 0;
856 mci->ue_count = 0; 870 mci->ue_count = 0;
857 mci->ce_count = 0; 871 mci->ce_count = 0;
872
858 for (row = 0; row < mci->nr_csrows; row++) { 873 for (row = 0; row < mci->nr_csrows; row++) {
859 struct csrow_info *ri = &mci->csrows[row]; 874 struct csrow_info *ri = &mci->csrows[row];
860 875
861 ri->ue_count = 0; 876 ri->ue_count = 0;
862 ri->ce_count = 0; 877 ri->ce_count = 0;
878
863 for (chan = 0; chan < ri->nr_channels; chan++) 879 for (chan = 0; chan < ri->nr_channels; chan++)
864 ri->channels[chan].ce_count = 0; 880 ri->channels[chan].ce_count = 0;
865 } 881 }
866 mci->start_time = jiffies;
867 882
883 mci->start_time = jiffies;
868 return count; 884 return count;
869} 885}
870 886
@@ -922,18 +938,16 @@ static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
922 938
923 p += mci_output_edac_cap(p,mci->edac_ctl_cap); 939 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
924 p += sprintf(p, "\n"); 940 p += sprintf(p, "\n");
925
926 return p - data; 941 return p - data;
927} 942}
928 943
929static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, 944static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
930 char *data) 945 char *data)
931{ 946{
932 char *p = data; 947 char *p = data;
933 948
934 p += mci_output_edac_cap(p,mci->edac_cap); 949 p += mci_output_edac_cap(p,mci->edac_cap);
935 p += sprintf(p, "\n"); 950 p += sprintf(p, "\n");
936
937 return p - data; 951 return p - data;
938} 952}
939 953
@@ -950,13 +964,13 @@ static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
950 return p - buf; 964 return p - buf;
951} 965}
952 966
953static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data) 967static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci,
968 char *data)
954{ 969{
955 char *p = data; 970 char *p = data;
956 971
957 p += mci_output_mtype_cap(p,mci->mtype_cap); 972 p += mci_output_mtype_cap(p,mci->mtype_cap);
958 p += sprintf(p, "\n"); 973 p += sprintf(p, "\n");
959
960 return p - data; 974 return p - data;
961} 975}
962 976
@@ -970,6 +984,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
970 984
971 if (!csrow->nr_pages) 985 if (!csrow->nr_pages)
972 continue; 986 continue;
987
973 total_pages += csrow->nr_pages; 988 total_pages += csrow->nr_pages;
974 } 989 }
975 990
@@ -977,7 +992,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
977} 992}
978 993
979struct mcidev_attribute { 994struct mcidev_attribute {
980 struct attribute attr; 995 struct attribute attr;
981 ssize_t (*show)(struct mem_ctl_info *,char *); 996 ssize_t (*show)(struct mem_ctl_info *,char *);
982 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); 997 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
983}; 998};
@@ -986,30 +1001,32 @@ struct mcidev_attribute {
986#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) 1001#define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
987 1002
988static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, 1003static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
989 char *buffer) 1004 char *buffer)
990{ 1005{
991 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1006 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
992 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1007 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
993 1008
994 if (mcidev_attr->show) 1009 if (mcidev_attr->show)
995 return mcidev_attr->show(mem_ctl_info, buffer); 1010 return mcidev_attr->show(mem_ctl_info, buffer);
1011
996 return -EIO; 1012 return -EIO;
997} 1013}
998 1014
999static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, 1015static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
1000 const char *buffer, size_t count) 1016 const char *buffer, size_t count)
1001{ 1017{
1002 struct mem_ctl_info *mem_ctl_info = to_mci(kobj); 1018 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1003 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); 1019 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1004 1020
1005 if (mcidev_attr->store) 1021 if (mcidev_attr->store)
1006 return mcidev_attr->store(mem_ctl_info, buffer, count); 1022 return mcidev_attr->store(mem_ctl_info, buffer, count);
1023
1007 return -EIO; 1024 return -EIO;
1008} 1025}
1009 1026
1010static struct sysfs_ops mci_ops = { 1027static struct sysfs_ops mci_ops = {
1011 .show = mcidev_show, 1028 .show = mcidev_show,
1012 .store = mcidev_store 1029 .store = mcidev_store
1013}; 1030};
1014 1031
1015#define MCIDEV_ATTR(_name,_mode,_show,_store) \ 1032#define MCIDEV_ATTR(_name,_mode,_show,_store) \
@@ -1037,7 +1054,6 @@ MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1037MCIDEV_ATTR(supported_mem_type,S_IRUGO, 1054MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1038 mci_supported_mem_type_show,NULL); 1055 mci_supported_mem_type_show,NULL);
1039 1056
1040
1041static struct mcidev_attribute *mci_attr[] = { 1057static struct mcidev_attribute *mci_attr[] = {
1042 &mci_attr_reset_counters, 1058 &mci_attr_reset_counters,
1043 &mci_attr_module_name, 1059 &mci_attr_module_name,
@@ -1054,25 +1070,22 @@ static struct mcidev_attribute *mci_attr[] = {
1054 NULL 1070 NULL
1055}; 1071};
1056 1072
1057
1058/* 1073/*
1059 * Release of a MC controlling instance 1074 * Release of a MC controlling instance
1060 */ 1075 */
1061static void edac_mci_instance_release(struct kobject *kobj) 1076static void edac_mci_instance_release(struct kobject *kobj)
1062{ 1077{
1063 struct mem_ctl_info *mci; 1078 struct mem_ctl_info *mci;
1064 mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj);
1065 1079
1066 debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n", 1080 mci = to_mci(kobj);
1067 __func__, mci->mc_idx); 1081 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1068 1082 complete(&mci->kobj_complete);
1069 kfree(mci);
1070} 1083}
1071 1084
1072static struct kobj_type ktype_mci = { 1085static struct kobj_type ktype_mci = {
1073 .release = edac_mci_instance_release, 1086 .release = edac_mci_instance_release,
1074 .sysfs_ops = &mci_ops, 1087 .sysfs_ops = &mci_ops,
1075 .default_attrs = (struct attribute **) mci_attr, 1088 .default_attrs = (struct attribute **) mci_attr,
1076}; 1089};
1077 1090
1078#endif /* DISABLE_EDAC_SYSFS */ 1091#endif /* DISABLE_EDAC_SYSFS */
@@ -1099,13 +1112,12 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1099 struct csrow_info *csrow; 1112 struct csrow_info *csrow;
1100 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; 1113 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1101 1114
1102 debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx); 1115 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1103
1104 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); 1116 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1105 kobject_init(edac_mci_kobj);
1106 1117
1107 /* set the name of the mc<id> object */ 1118 /* set the name of the mc<id> object */
1108 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); 1119 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1120
1109 if (err) 1121 if (err)
1110 return err; 1122 return err;
1111 1123
@@ -1115,50 +1127,48 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1115 1127
1116 /* register the mc<id> kobject */ 1128 /* register the mc<id> kobject */
1117 err = kobject_register(edac_mci_kobj); 1129 err = kobject_register(edac_mci_kobj);
1130
1118 if (err) 1131 if (err)
1119 return err; 1132 return err;
1120 1133
1121 /* create a symlink for the device */ 1134 /* create a symlink for the device */
1122 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, 1135 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1123 EDAC_DEVICE_SYMLINK); 1136 EDAC_DEVICE_SYMLINK);
1124 if (err) { 1137
1125 kobject_unregister(edac_mci_kobj); 1138 if (err)
1126 return err; 1139 goto fail0;
1127 }
1128 1140
1129 /* Make directories for each CSROW object 1141 /* Make directories for each CSROW object
1130 * under the mc<id> kobject 1142 * under the mc<id> kobject
1131 */ 1143 */
1132 for (i = 0; i < mci->nr_csrows; i++) { 1144 for (i = 0; i < mci->nr_csrows; i++) {
1133
1134 csrow = &mci->csrows[i]; 1145 csrow = &mci->csrows[i];
1135 1146
1136 /* Only expose populated CSROWs */ 1147 /* Only expose populated CSROWs */
1137 if (csrow->nr_pages > 0) { 1148 if (csrow->nr_pages > 0) {
1138 err = edac_create_csrow_object(edac_mci_kobj,csrow,i); 1149 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1150
1139 if (err) 1151 if (err)
1140 goto fail; 1152 goto fail1;
1141 } 1153 }
1142 } 1154 }
1143 1155
1144 /* Mark this MCI instance as having sysfs entries */
1145 mci->sysfs_active = MCI_SYSFS_ACTIVE;
1146
1147 return 0; 1156 return 0;
1148 1157
1149
1150 /* CSROW error: backout what has already been registered, */ 1158 /* CSROW error: backout what has already been registered, */
1151fail: 1159fail1:
1152 for ( i--; i >= 0; i--) { 1160 for ( i--; i >= 0; i--) {
1153 if (csrow->nr_pages > 0) { 1161 if (csrow->nr_pages > 0) {
1162 init_completion(&csrow->kobj_complete);
1154 kobject_unregister(&mci->csrows[i].kobj); 1163 kobject_unregister(&mci->csrows[i].kobj);
1155 kobject_put(&mci->csrows[i].kobj); 1164 wait_for_completion(&csrow->kobj_complete);
1156 } 1165 }
1157 } 1166 }
1158 1167
1168fail0:
1169 init_completion(&mci->kobj_complete);
1159 kobject_unregister(edac_mci_kobj); 1170 kobject_unregister(edac_mci_kobj);
1160 kobject_put(edac_mci_kobj); 1171 wait_for_completion(&mci->kobj_complete);
1161
1162 return err; 1172 return err;
1163} 1173}
1164#endif /* DISABLE_EDAC_SYSFS */ 1174#endif /* DISABLE_EDAC_SYSFS */
@@ -1171,20 +1181,21 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1171#ifndef DISABLE_EDAC_SYSFS 1181#ifndef DISABLE_EDAC_SYSFS
1172 int i; 1182 int i;
1173 1183
1174 debugf0("MC: " __FILE__ ": %s()\n", __func__); 1184 debugf0("%s()\n", __func__);
1175 1185
1176 /* remove all csrow kobjects */ 1186 /* remove all csrow kobjects */
1177 for (i = 0; i < mci->nr_csrows; i++) { 1187 for (i = 0; i < mci->nr_csrows; i++) {
1178 if (mci->csrows[i].nr_pages > 0) { 1188 if (mci->csrows[i].nr_pages > 0) {
1189 init_completion(&mci->csrows[i].kobj_complete);
1179 kobject_unregister(&mci->csrows[i].kobj); 1190 kobject_unregister(&mci->csrows[i].kobj);
1180 kobject_put(&mci->csrows[i].kobj); 1191 wait_for_completion(&mci->csrows[i].kobj_complete);
1181 } 1192 }
1182 } 1193 }
1183 1194
1184 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); 1195 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1185 1196 init_completion(&mci->kobj_complete);
1186 kobject_unregister(&mci->edac_mci_kobj); 1197 kobject_unregister(&mci->edac_mci_kobj);
1187 kobject_put(&mci->edac_mci_kobj); 1198 wait_for_completion(&mci->kobj_complete);
1188#endif /* DISABLE_EDAC_SYSFS */ 1199#endif /* DISABLE_EDAC_SYSFS */
1189} 1200}
1190 1201
@@ -1192,8 +1203,6 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1192 1203
1193#ifdef CONFIG_EDAC_DEBUG 1204#ifdef CONFIG_EDAC_DEBUG
1194 1205
1195EXPORT_SYMBOL(edac_mc_dump_channel);
1196
1197void edac_mc_dump_channel(struct channel_info *chan) 1206void edac_mc_dump_channel(struct channel_info *chan)
1198{ 1207{
1199 debugf4("\tchannel = %p\n", chan); 1208 debugf4("\tchannel = %p\n", chan);
@@ -1202,9 +1211,7 @@ void edac_mc_dump_channel(struct channel_info *chan)
1202 debugf4("\tchannel->label = '%s'\n", chan->label); 1211 debugf4("\tchannel->label = '%s'\n", chan->label);
1203 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 1212 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1204} 1213}
1205 1214EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
1206
1207EXPORT_SYMBOL(edac_mc_dump_csrow);
1208 1215
1209void edac_mc_dump_csrow(struct csrow_info *csrow) 1216void edac_mc_dump_csrow(struct csrow_info *csrow)
1210{ 1217{
@@ -1220,9 +1227,7 @@ void edac_mc_dump_csrow(struct csrow_info *csrow)
1220 debugf4("\tcsrow->channels = %p\n", csrow->channels); 1227 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1221 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 1228 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1222} 1229}
1223 1230EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
1224
1225EXPORT_SYMBOL(edac_mc_dump_mci);
1226 1231
1227void edac_mc_dump_mci(struct mem_ctl_info *mci) 1232void edac_mc_dump_mci(struct mem_ctl_info *mci)
1228{ 1233{
@@ -1238,9 +1243,9 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
1238 mci->mod_name, mci->ctl_name); 1243 mci->mod_name, mci->ctl_name);
1239 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 1244 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1240} 1245}
1246EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
1241 1247
1242 1248#endif /* CONFIG_EDAC_DEBUG */
1243#endif /* CONFIG_EDAC_DEBUG */
1244 1249
1245/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 1250/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1246 * Adjust 'ptr' so that its alignment is at least as stringent as what the 1251 * Adjust 'ptr' so that its alignment is at least as stringent as what the
@@ -1249,7 +1254,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci)
1249 * If 'size' is a constant, the compiler will optimize this whole function 1254 * If 'size' is a constant, the compiler will optimize this whole function
1250 * down to either a no-op or the addition of a constant to the value of 'ptr'. 1255 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1251 */ 1256 */
1252static inline char * align_ptr (void *ptr, unsigned size) 1257static inline char * align_ptr(void *ptr, unsigned size)
1253{ 1258{
1254 unsigned align, r; 1259 unsigned align, r;
1255 1260
@@ -1276,9 +1281,6 @@ static inline char * align_ptr (void *ptr, unsigned size)
1276 return (char *) (((unsigned long) ptr) + align - r); 1281 return (char *) (((unsigned long) ptr) + align - r);
1277} 1282}
1278 1283
1279
1280EXPORT_SYMBOL(edac_mc_alloc);
1281
1282/** 1284/**
1283 * edac_mc_alloc: Allocate a struct mem_ctl_info structure 1285 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1284 * @size_pvt: size of private storage needed 1286 * @size_pvt: size of private storage needed
@@ -1296,7 +1298,7 @@ EXPORT_SYMBOL(edac_mc_alloc);
1296 * struct mem_ctl_info pointer 1298 * struct mem_ctl_info pointer
1297 */ 1299 */
1298struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 1300struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1299 unsigned nr_chans) 1301 unsigned nr_chans)
1300{ 1302{
1301 struct mem_ctl_info *mci; 1303 struct mem_ctl_info *mci;
1302 struct csrow_info *csi, *csrow; 1304 struct csrow_info *csi, *csrow;
@@ -1327,8 +1329,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1327 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); 1329 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1328 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; 1330 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1329 1331
1330 memset(mci, 0, size); /* clear all fields */ 1332 memset(mci, 0, size); /* clear all fields */
1331
1332 mci->csrows = csi; 1333 mci->csrows = csi;
1333 mci->pvt_info = pvt; 1334 mci->pvt_info = pvt;
1334 mci->nr_csrows = nr_csrows; 1335 mci->nr_csrows = nr_csrows;
@@ -1350,50 +1351,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1350 1351
1351 return mci; 1352 return mci;
1352} 1353}
1353 1354EXPORT_SYMBOL_GPL(edac_mc_alloc);
1354
1355EXPORT_SYMBOL(edac_mc_free);
1356 1355
1357/** 1356/**
1358 * edac_mc_free: Free a previously allocated 'mci' structure 1357 * edac_mc_free: Free a previously allocated 'mci' structure
1359 * @mci: pointer to a struct mem_ctl_info structure 1358 * @mci: pointer to a struct mem_ctl_info structure
1360 *
1361 * Free up a previously allocated mci structure
1362 * A MCI structure can be in 2 states after being allocated
1363 * by edac_mc_alloc().
1364 * 1) Allocated in a MC driver's probe, but not yet committed
1365 * 2) Allocated and committed, by a call to edac_mc_add_mc()
1366 * edac_mc_add_mc() is the function that adds the sysfs entries
1367 * thus, this free function must determine which state the 'mci'
1368 * structure is in, then either free it directly or
1369 * perform kobject cleanup by calling edac_remove_sysfs_mci_device().
1370 *
1371 * VOID Return
1372 */ 1359 */
1373void edac_mc_free(struct mem_ctl_info *mci) 1360void edac_mc_free(struct mem_ctl_info *mci)
1374{ 1361{
1375 /* only if sysfs entries for this mci instance exist 1362 kfree(mci);
1376 * do we remove them and defer the actual kfree via
1377 * the kobject 'release()' callback.
1378 *
1379 * Otherwise, do a straight kfree now.
1380 */
1381 if (mci->sysfs_active == MCI_SYSFS_ACTIVE)
1382 edac_remove_sysfs_mci_device(mci);
1383 else
1384 kfree(mci);
1385} 1363}
1364EXPORT_SYMBOL_GPL(edac_mc_free);
1386 1365
1387 1366static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev)
1388
1389EXPORT_SYMBOL(edac_mc_find_mci_by_pdev);
1390
1391struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1392{ 1367{
1393 struct mem_ctl_info *mci; 1368 struct mem_ctl_info *mci;
1394 struct list_head *item; 1369 struct list_head *item;
1395 1370
1396 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1371 debugf3("%s()\n", __func__);
1397 1372
1398 list_for_each(item, &mc_devices) { 1373 list_for_each(item, &mc_devices) {
1399 mci = list_entry(item, struct mem_ctl_info, link); 1374 mci = list_entry(item, struct mem_ctl_info, link);
@@ -1405,7 +1380,7 @@ struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev)
1405 return NULL; 1380 return NULL;
1406} 1381}
1407 1382
1408static int add_mc_to_global_list (struct mem_ctl_info *mci) 1383static int add_mc_to_global_list(struct mem_ctl_info *mci)
1409{ 1384{
1410 struct list_head *item, *insert_before; 1385 struct list_head *item, *insert_before;
1411 struct mem_ctl_info *p; 1386 struct mem_ctl_info *p;
@@ -1415,11 +1390,12 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
1415 mci->mc_idx = 0; 1390 mci->mc_idx = 0;
1416 insert_before = &mc_devices; 1391 insert_before = &mc_devices;
1417 } else { 1392 } else {
1418 if (edac_mc_find_mci_by_pdev(mci->pdev)) { 1393 if (find_mci_by_pdev(mci->pdev)) {
1419 printk(KERN_WARNING 1394 edac_printk(KERN_WARNING, EDAC_MC,
1420 "EDAC MC: %s (%s) %s %s already assigned %d\n", 1395 "%s (%s) %s %s already assigned %d\n",
1421 mci->pdev->dev.bus_id, pci_name(mci->pdev), 1396 mci->pdev->dev.bus_id,
1422 mci->mod_name, mci->ctl_name, mci->mc_idx); 1397 pci_name(mci->pdev), mci->mod_name,
1398 mci->ctl_name, mci->mc_idx);
1423 return 1; 1399 return 1;
1424 } 1400 }
1425 1401
@@ -1447,12 +1423,26 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci)
1447 return 0; 1423 return 0;
1448} 1424}
1449 1425
1426static void complete_mc_list_del(struct rcu_head *head)
1427{
1428 struct mem_ctl_info *mci;
1450 1429
1430 mci = container_of(head, struct mem_ctl_info, rcu);
1431 INIT_LIST_HEAD(&mci->link);
1432 complete(&mci->complete);
1433}
1451 1434
1452EXPORT_SYMBOL(edac_mc_add_mc); 1435static void del_mc_from_global_list(struct mem_ctl_info *mci)
1436{
1437 list_del_rcu(&mci->link);
1438 init_completion(&mci->complete);
1439 call_rcu(&mci->rcu, complete_mc_list_del);
1440 wait_for_completion(&mci->complete);
1441}
1453 1442
1454/** 1443/**
1455 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list 1444 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
1445 * create sysfs entries associated with mci structure
1456 * @mci: pointer to the mci structure to be added to the list 1446 * @mci: pointer to the mci structure to be added to the list
1457 * 1447 *
1458 * Return: 1448 * Return:
@@ -1463,111 +1453,90 @@ EXPORT_SYMBOL(edac_mc_add_mc);
1463/* FIXME - should a warning be printed if no error detection? correction? */ 1453/* FIXME - should a warning be printed if no error detection? correction? */
1464int edac_mc_add_mc(struct mem_ctl_info *mci) 1454int edac_mc_add_mc(struct mem_ctl_info *mci)
1465{ 1455{
1466 int rc = 1; 1456 debugf0("%s()\n", __func__);
1467
1468 debugf0("MC: " __FILE__ ": %s()\n", __func__);
1469#ifdef CONFIG_EDAC_DEBUG 1457#ifdef CONFIG_EDAC_DEBUG
1470 if (edac_debug_level >= 3) 1458 if (edac_debug_level >= 3)
1471 edac_mc_dump_mci(mci); 1459 edac_mc_dump_mci(mci);
1460
1472 if (edac_debug_level >= 4) { 1461 if (edac_debug_level >= 4) {
1473 int i; 1462 int i;
1474 1463
1475 for (i = 0; i < mci->nr_csrows; i++) { 1464 for (i = 0; i < mci->nr_csrows; i++) {
1476 int j; 1465 int j;
1466
1477 edac_mc_dump_csrow(&mci->csrows[i]); 1467 edac_mc_dump_csrow(&mci->csrows[i]);
1478 for (j = 0; j < mci->csrows[i].nr_channels; j++) 1468 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1479 edac_mc_dump_channel(&mci->csrows[i]. 1469 edac_mc_dump_channel(
1480 channels[j]); 1470 &mci->csrows[i].channels[j]);
1481 } 1471 }
1482 } 1472 }
1483#endif 1473#endif
1484 down(&mem_ctls_mutex); 1474 down(&mem_ctls_mutex);
1485 1475
1486 if (add_mc_to_global_list(mci)) 1476 if (add_mc_to_global_list(mci))
1487 goto finish; 1477 goto fail0;
1488 1478
1489 /* set load time so that error rate can be tracked */ 1479 /* set load time so that error rate can be tracked */
1490 mci->start_time = jiffies; 1480 mci->start_time = jiffies;
1491 1481
1492 if (edac_create_sysfs_mci_device(mci)) { 1482 if (edac_create_sysfs_mci_device(mci)) {
1493 printk(KERN_WARNING 1483 edac_mc_printk(mci, KERN_WARNING,
1494 "EDAC MC%d: failed to create sysfs device\n", 1484 "failed to create sysfs device\n");
1495 mci->mc_idx); 1485 goto fail1;
1496 /* FIXME - should there be an error code and unwind? */
1497 goto finish;
1498 } 1486 }
1499 1487
1500 /* Report action taken */ 1488 /* Report action taken */
1501 printk(KERN_INFO 1489 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n",
1502 "EDAC MC%d: Giving out device to %s %s: PCI %s\n", 1490 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1503 mci->mc_idx, mci->mod_name, mci->ctl_name,
1504 pci_name(mci->pdev));
1505 1491
1506
1507 rc = 0;
1508
1509finish:
1510 up(&mem_ctls_mutex); 1492 up(&mem_ctls_mutex);
1511 return rc; 1493 return 0;
1512}
1513
1514
1515
1516static void complete_mc_list_del (struct rcu_head *head)
1517{
1518 struct mem_ctl_info *mci;
1519 1494
1520 mci = container_of(head, struct mem_ctl_info, rcu); 1495fail1:
1521 INIT_LIST_HEAD(&mci->link); 1496 del_mc_from_global_list(mci);
1522 complete(&mci->complete);
1523}
1524 1497
1525static void del_mc_from_global_list (struct mem_ctl_info *mci) 1498fail0:
1526{ 1499 up(&mem_ctls_mutex);
1527 list_del_rcu(&mci->link); 1500 return 1;
1528 init_completion(&mci->complete);
1529 call_rcu(&mci->rcu, complete_mc_list_del);
1530 wait_for_completion(&mci->complete);
1531} 1501}
1532 1502EXPORT_SYMBOL_GPL(edac_mc_add_mc);
1533EXPORT_SYMBOL(edac_mc_del_mc);
1534 1503
1535/** 1504/**
1536 * edac_mc_del_mc: Remove the specified mci structure from global list 1505 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
1537 * @mci: Pointer to struct mem_ctl_info structure 1506 * remove mci structure from global list
1507 * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove.
1538 * 1508 *
1539 * Returns: 1509 * Return pointer to removed mci structure, or NULL if device not found.
1540 * 0 Success
1541 * 1 Failure
1542 */ 1510 */
1543int edac_mc_del_mc(struct mem_ctl_info *mci) 1511struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev)
1544{ 1512{
1545 int rc = 1; 1513 struct mem_ctl_info *mci;
1546 1514
1547 debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1515 debugf0("MC: %s()\n", __func__);
1548 down(&mem_ctls_mutex); 1516 down(&mem_ctls_mutex);
1517
1518 if ((mci = find_mci_by_pdev(pdev)) == NULL) {
1519 up(&mem_ctls_mutex);
1520 return NULL;
1521 }
1522
1523 edac_remove_sysfs_mci_device(mci);
1549 del_mc_from_global_list(mci); 1524 del_mc_from_global_list(mci);
1550 printk(KERN_INFO
1551 "EDAC MC%d: Removed device %d for %s %s: PCI %s\n",
1552 mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name,
1553 pci_name(mci->pdev));
1554 rc = 0;
1555 up(&mem_ctls_mutex); 1525 up(&mem_ctls_mutex);
1556 1526 edac_printk(KERN_INFO, EDAC_MC,
1557 return rc; 1527 "Removed device %d for %s %s: PCI %s\n", mci->mc_idx,
1528 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1529 return mci;
1558} 1530}
1531EXPORT_SYMBOL_GPL(edac_mc_del_mc);
1559 1532
1560 1533void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
1561EXPORT_SYMBOL(edac_mc_scrub_block);
1562
1563void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1564 u32 size)
1565{ 1534{
1566 struct page *pg; 1535 struct page *pg;
1567 void *virt_addr; 1536 void *virt_addr;
1568 unsigned long flags = 0; 1537 unsigned long flags = 0;
1569 1538
1570 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1539 debugf3("%s()\n", __func__);
1571 1540
1572 /* ECC error page was not in our memory. Ignore it. */ 1541 /* ECC error page was not in our memory. Ignore it. */
1573 if(!pfn_valid(page)) 1542 if(!pfn_valid(page))
@@ -1590,19 +1559,15 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset,
1590 if (PageHighMem(pg)) 1559 if (PageHighMem(pg))
1591 local_irq_restore(flags); 1560 local_irq_restore(flags);
1592} 1561}
1593 1562EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
1594 1563
1595/* FIXME - should return -1 */ 1564/* FIXME - should return -1 */
1596EXPORT_SYMBOL(edac_mc_find_csrow_by_page); 1565int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
1597
1598int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1599 unsigned long page)
1600{ 1566{
1601 struct csrow_info *csrows = mci->csrows; 1567 struct csrow_info *csrows = mci->csrows;
1602 int row, i; 1568 int row, i;
1603 1569
1604 debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__, 1570 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
1605 page);
1606 row = -1; 1571 row = -1;
1607 1572
1608 for (i = 0; i < mci->nr_csrows; i++) { 1573 for (i = 0; i < mci->nr_csrows; i++) {
@@ -1611,11 +1576,10 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1611 if (csrow->nr_pages == 0) 1576 if (csrow->nr_pages == 0)
1612 continue; 1577 continue;
1613 1578
1614 debugf3("MC%d: " __FILE__ 1579 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
1615 ": %s(): first(0x%lx) page(0x%lx)" 1580 "mask(0x%lx)\n", mci->mc_idx, __func__,
1616 " last(0x%lx) mask(0x%lx)\n", mci->mc_idx, 1581 csrow->first_page, page, csrow->last_page,
1617 __func__, csrow->first_page, page, 1582 csrow->page_mask);
1618 csrow->last_page, csrow->page_mask);
1619 1583
1620 if ((page >= csrow->first_page) && 1584 if ((page >= csrow->first_page) &&
1621 (page <= csrow->last_page) && 1585 (page <= csrow->last_page) &&
@@ -1627,56 +1591,52 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
1627 } 1591 }
1628 1592
1629 if (row == -1) 1593 if (row == -1)
1630 printk(KERN_ERR 1594 edac_mc_printk(mci, KERN_ERR,
1631 "EDAC MC%d: could not look up page error address %lx\n", 1595 "could not look up page error address %lx\n",
1632 mci->mc_idx, (unsigned long) page); 1596 (unsigned long) page);
1633 1597
1634 return row; 1598 return row;
1635} 1599}
1636 1600EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
1637
1638EXPORT_SYMBOL(edac_mc_handle_ce);
1639 1601
1640/* FIXME - setable log (warning/emerg) levels */ 1602/* FIXME - setable log (warning/emerg) levels */
1641/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ 1603/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1642void edac_mc_handle_ce(struct mem_ctl_info *mci, 1604void edac_mc_handle_ce(struct mem_ctl_info *mci,
1643 unsigned long page_frame_number, 1605 unsigned long page_frame_number, unsigned long offset_in_page,
1644 unsigned long offset_in_page, 1606 unsigned long syndrome, int row, int channel, const char *msg)
1645 unsigned long syndrome, int row, int channel,
1646 const char *msg)
1647{ 1607{
1648 unsigned long remapped_page; 1608 unsigned long remapped_page;
1649 1609
1650 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1610 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1651 1611
1652 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1612 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1653 if (row >= mci->nr_csrows || row < 0) { 1613 if (row >= mci->nr_csrows || row < 0) {
1654 /* something is wrong */ 1614 /* something is wrong */
1655 printk(KERN_ERR 1615 edac_mc_printk(mci, KERN_ERR,
1656 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", 1616 "INTERNAL ERROR: row out of range "
1657 mci->mc_idx, row, mci->nr_csrows); 1617 "(%d >= %d)\n", row, mci->nr_csrows);
1658 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1618 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1659 return; 1619 return;
1660 } 1620 }
1621
1661 if (channel >= mci->csrows[row].nr_channels || channel < 0) { 1622 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1662 /* something is wrong */ 1623 /* something is wrong */
1663 printk(KERN_ERR 1624 edac_mc_printk(mci, KERN_ERR,
1664 "EDAC MC%d: INTERNAL ERROR: channel out of range " 1625 "INTERNAL ERROR: channel out of range "
1665 "(%d >= %d)\n", 1626 "(%d >= %d)\n", channel,
1666 mci->mc_idx, channel, mci->csrows[row].nr_channels); 1627 mci->csrows[row].nr_channels);
1667 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); 1628 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1668 return; 1629 return;
1669 } 1630 }
1670 1631
1671 if (log_ce) 1632 if (log_ce)
1672 /* FIXME - put in DIMM location */ 1633 /* FIXME - put in DIMM location */
1673 printk(KERN_WARNING 1634 edac_mc_printk(mci, KERN_WARNING,
1674 "EDAC MC%d: CE page 0x%lx, offset 0x%lx," 1635 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
1675 " grain %d, syndrome 0x%lx, row %d, channel %d," 1636 "0x%lx, row %d, channel %d, label \"%s\": %s\n",
1676 " label \"%s\": %s\n", mci->mc_idx, 1637 page_frame_number, offset_in_page,
1677 page_frame_number, offset_in_page, 1638 mci->csrows[row].grain, syndrome, row, channel,
1678 mci->csrows[row].grain, syndrome, row, channel, 1639 mci->csrows[row].channels[channel].label, msg);
1679 mci->csrows[row].channels[channel].label, msg);
1680 1640
1681 mci->ce_count++; 1641 mci->ce_count++;
1682 mci->csrows[row].ce_count++; 1642 mci->csrows[row].ce_count++;
@@ -1697,31 +1657,25 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci,
1697 page_frame_number; 1657 page_frame_number;
1698 1658
1699 edac_mc_scrub_block(remapped_page, offset_in_page, 1659 edac_mc_scrub_block(remapped_page, offset_in_page,
1700 mci->csrows[row].grain); 1660 mci->csrows[row].grain);
1701 } 1661 }
1702} 1662}
1663EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
1703 1664
1704 1665void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
1705EXPORT_SYMBOL(edac_mc_handle_ce_no_info);
1706
1707void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
1708 const char *msg)
1709{ 1666{
1710 if (log_ce) 1667 if (log_ce)
1711 printk(KERN_WARNING 1668 edac_mc_printk(mci, KERN_WARNING,
1712 "EDAC MC%d: CE - no information available: %s\n", 1669 "CE - no information available: %s\n", msg);
1713 mci->mc_idx, msg); 1670
1714 mci->ce_noinfo_count++; 1671 mci->ce_noinfo_count++;
1715 mci->ce_count++; 1672 mci->ce_count++;
1716} 1673}
1717 1674EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
1718
1719EXPORT_SYMBOL(edac_mc_handle_ue);
1720 1675
1721void edac_mc_handle_ue(struct mem_ctl_info *mci, 1676void edac_mc_handle_ue(struct mem_ctl_info *mci,
1722 unsigned long page_frame_number, 1677 unsigned long page_frame_number, unsigned long offset_in_page,
1723 unsigned long offset_in_page, int row, 1678 int row, const char *msg)
1724 const char *msg)
1725{ 1679{
1726 int len = EDAC_MC_LABEL_LEN * 4; 1680 int len = EDAC_MC_LABEL_LEN * 4;
1727 char labels[len + 1]; 1681 char labels[len + 1];
@@ -1729,65 +1683,61 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci,
1729 int chan; 1683 int chan;
1730 int chars; 1684 int chars;
1731 1685
1732 debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 1686 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1733 1687
1734 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 1688 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1735 if (row >= mci->nr_csrows || row < 0) { 1689 if (row >= mci->nr_csrows || row < 0) {
1736 /* something is wrong */ 1690 /* something is wrong */
1737 printk(KERN_ERR 1691 edac_mc_printk(mci, KERN_ERR,
1738 "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", 1692 "INTERNAL ERROR: row out of range "
1739 mci->mc_idx, row, mci->nr_csrows); 1693 "(%d >= %d)\n", row, mci->nr_csrows);
1740 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1694 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1741 return; 1695 return;
1742 } 1696 }
1743 1697
1744 chars = snprintf(pos, len + 1, "%s", 1698 chars = snprintf(pos, len + 1, "%s",
1745 mci->csrows[row].channels[0].label); 1699 mci->csrows[row].channels[0].label);
1746 len -= chars; 1700 len -= chars;
1747 pos += chars; 1701 pos += chars;
1702
1748 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); 1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1749 chan++) { 1704 chan++) {
1750 chars = snprintf(pos, len + 1, ":%s", 1705 chars = snprintf(pos, len + 1, ":%s",
1751 mci->csrows[row].channels[chan].label); 1706 mci->csrows[row].channels[chan].label);
1752 len -= chars; 1707 len -= chars;
1753 pos += chars; 1708 pos += chars;
1754 } 1709 }
1755 1710
1756 if (log_ue) 1711 if (log_ue)
1757 printk(KERN_EMERG 1712 edac_mc_printk(mci, KERN_EMERG,
1758 "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," 1713 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
1759 " labels \"%s\": %s\n", mci->mc_idx, 1714 "labels \"%s\": %s\n", page_frame_number,
1760 page_frame_number, offset_in_page, 1715 offset_in_page, mci->csrows[row].grain, row, labels,
1761 mci->csrows[row].grain, row, labels, msg); 1716 msg);
1762 1717
1763 if (panic_on_ue) 1718 if (panic_on_ue)
1764 panic 1719 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
1765 ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," 1720 "row %d, labels \"%s\": %s\n", mci->mc_idx,
1766 " labels \"%s\": %s\n", mci->mc_idx, 1721 page_frame_number, offset_in_page,
1767 page_frame_number, offset_in_page, 1722 mci->csrows[row].grain, row, labels, msg);
1768 mci->csrows[row].grain, row, labels, msg);
1769 1723
1770 mci->ue_count++; 1724 mci->ue_count++;
1771 mci->csrows[row].ue_count++; 1725 mci->csrows[row].ue_count++;
1772} 1726}
1727EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
1773 1728
1774 1729void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
1775EXPORT_SYMBOL(edac_mc_handle_ue_no_info);
1776
1777void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
1778 const char *msg)
1779{ 1730{
1780 if (panic_on_ue) 1731 if (panic_on_ue)
1781 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); 1732 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1782 1733
1783 if (log_ue) 1734 if (log_ue)
1784 printk(KERN_WARNING 1735 edac_mc_printk(mci, KERN_WARNING,
1785 "EDAC MC%d: UE - no information available: %s\n", 1736 "UE - no information available: %s\n", msg);
1786 mci->mc_idx, msg);
1787 mci->ue_noinfo_count++; 1737 mci->ue_noinfo_count++;
1788 mci->ue_count++; 1738 mci->ue_count++;
1789} 1739}
1790 1740EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
1791 1741
1792#ifdef CONFIG_PCI 1742#ifdef CONFIG_PCI
1793 1743
@@ -1799,18 +1749,22 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1799 where = secondary ? PCI_SEC_STATUS : PCI_STATUS; 1749 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1800 pci_read_config_word(dev, where, &status); 1750 pci_read_config_word(dev, where, &status);
1801 1751
1802 /* If we get back 0xFFFF then we must suspect that the card has been pulled but 1752 /* If we get back 0xFFFF then we must suspect that the card has been
1803 the Linux PCI layer has not yet finished cleaning up. We don't want to report 1753 * pulled but the Linux PCI layer has not yet finished cleaning up.
1804 on such devices */ 1754 * We don't want to report on such devices
1755 */
1805 1756
1806 if (status == 0xFFFF) { 1757 if (status == 0xFFFF) {
1807 u32 sanity; 1758 u32 sanity;
1759
1808 pci_read_config_dword(dev, 0, &sanity); 1760 pci_read_config_dword(dev, 0, &sanity);
1761
1809 if (sanity == 0xFFFFFFFF) 1762 if (sanity == 0xFFFFFFFF)
1810 return 0; 1763 return 0;
1811 } 1764 }
1765
1812 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | 1766 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1813 PCI_STATUS_PARITY; 1767 PCI_STATUS_PARITY;
1814 1768
1815 if (status) 1769 if (status)
1816 /* reset only the bits we are interested in */ 1770 /* reset only the bits we are interested in */
@@ -1822,7 +1776,7 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1822typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); 1776typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1823 1777
1824/* Clear any PCI parity errors logged by this device. */ 1778/* Clear any PCI parity errors logged by this device. */
1825static void edac_pci_dev_parity_clear( struct pci_dev *dev ) 1779static void edac_pci_dev_parity_clear(struct pci_dev *dev)
1826{ 1780{
1827 u8 header_type; 1781 u8 header_type;
1828 1782
@@ -1853,25 +1807,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1853 /* check the status reg for errors */ 1807 /* check the status reg for errors */
1854 if (status) { 1808 if (status) {
1855 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 1809 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1856 printk(KERN_CRIT 1810 edac_printk(KERN_CRIT, EDAC_PCI,
1857 "EDAC PCI- "
1858 "Signaled System Error on %s\n", 1811 "Signaled System Error on %s\n",
1859 pci_name (dev)); 1812 pci_name(dev));
1860 1813
1861 if (status & (PCI_STATUS_PARITY)) { 1814 if (status & (PCI_STATUS_PARITY)) {
1862 printk(KERN_CRIT 1815 edac_printk(KERN_CRIT, EDAC_PCI,
1863 "EDAC PCI- "
1864 "Master Data Parity Error on %s\n", 1816 "Master Data Parity Error on %s\n",
1865 pci_name (dev)); 1817 pci_name(dev));
1866 1818
1867 atomic_inc(&pci_parity_count); 1819 atomic_inc(&pci_parity_count);
1868 } 1820 }
1869 1821
1870 if (status & (PCI_STATUS_DETECTED_PARITY)) { 1822 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1871 printk(KERN_CRIT 1823 edac_printk(KERN_CRIT, EDAC_PCI,
1872 "EDAC PCI- "
1873 "Detected Parity Error on %s\n", 1824 "Detected Parity Error on %s\n",
1874 pci_name (dev)); 1825 pci_name(dev));
1875 1826
1876 atomic_inc(&pci_parity_count); 1827 atomic_inc(&pci_parity_count);
1877 } 1828 }
@@ -1892,25 +1843,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1892 /* check the secondary status reg for errors */ 1843 /* check the secondary status reg for errors */
1893 if (status) { 1844 if (status) {
1894 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) 1845 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1895 printk(KERN_CRIT 1846 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1896 "EDAC PCI-Bridge- "
1897 "Signaled System Error on %s\n", 1847 "Signaled System Error on %s\n",
1898 pci_name (dev)); 1848 pci_name(dev));
1899 1849
1900 if (status & (PCI_STATUS_PARITY)) { 1850 if (status & (PCI_STATUS_PARITY)) {
1901 printk(KERN_CRIT 1851 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1902 "EDAC PCI-Bridge- " 1852 "Master Data Parity Error on "
1903 "Master Data Parity Error on %s\n", 1853 "%s\n", pci_name(dev));
1904 pci_name (dev));
1905 1854
1906 atomic_inc(&pci_parity_count); 1855 atomic_inc(&pci_parity_count);
1907 } 1856 }
1908 1857
1909 if (status & (PCI_STATUS_DETECTED_PARITY)) { 1858 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1910 printk(KERN_CRIT 1859 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1911 "EDAC PCI-Bridge- "
1912 "Detected Parity Error on %s\n", 1860 "Detected Parity Error on %s\n",
1913 pci_name (dev)); 1861 pci_name(dev));
1914 1862
1915 atomic_inc(&pci_parity_count); 1863 atomic_inc(&pci_parity_count);
1916 } 1864 }
@@ -1929,58 +1877,55 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
1929 * Returns: 0 not found 1877 * Returns: 0 not found
1930 * 1 found on list 1878 * 1 found on list
1931 */ 1879 */
1932static int check_dev_on_list(struct edac_pci_device_list *list, int free_index, 1880static int check_dev_on_list(struct edac_pci_device_list *list,
1933 struct pci_dev *dev) 1881 int free_index, struct pci_dev *dev)
1934{ 1882{
1935 int i; 1883 int i;
1936 int rc = 0; /* Assume not found */ 1884 int rc = 0; /* Assume not found */
1937 unsigned short vendor=dev->vendor; 1885 unsigned short vendor=dev->vendor;
1938 unsigned short device=dev->device; 1886 unsigned short device=dev->device;
1939 1887
1940 /* Scan the list, looking for a vendor/device match 1888 /* Scan the list, looking for a vendor/device match */
1941 */ 1889 for (i = 0; i < free_index; i++, list++ ) {
1942 for (i = 0; i < free_index; i++, list++ ) { 1890 if ((list->vendor == vendor ) && (list->device == device )) {
1943 if ( (list->vendor == vendor ) && 1891 rc = 1;
1944 (list->device == device )) { 1892 break;
1945 rc = 1; 1893 }
1946 break; 1894 }
1947 }
1948 }
1949 1895
1950 return rc; 1896 return rc;
1951} 1897}
1952 1898
1953/* 1899/*
1954 * pci_dev parity list iterator 1900 * pci_dev parity list iterator
1955 * Scan the PCI device list for one iteration, looking for SERRORs 1901 * Scan the PCI device list for one iteration, looking for SERRORs
1956 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices 1902 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1957 */ 1903 */
1958static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) 1904static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1959{ 1905{
1960 struct pci_dev *dev=NULL; 1906 struct pci_dev *dev = NULL;
1961 1907
1962 /* request for kernel access to the next PCI device, if any, 1908 /* request for kernel access to the next PCI device, if any,
1963 * and while we are looking at it have its reference count 1909 * and while we are looking at it have its reference count
1964 * bumped until we are done with it 1910 * bumped until we are done with it
1965 */ 1911 */
1966 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 1912 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1967 1913 /* if whitelist exists then it has priority, so only scan
1968 /* if whitelist exists then it has priority, so only scan those 1914 * those devices on the whitelist
1969 * devices on the whitelist 1915 */
1970 */ 1916 if (pci_whitelist_count > 0 ) {
1971 if (pci_whitelist_count > 0 ) { 1917 if (check_dev_on_list(pci_whitelist,
1972 if (check_dev_on_list(pci_whitelist,
1973 pci_whitelist_count, dev)) 1918 pci_whitelist_count, dev))
1974 fn(dev); 1919 fn(dev);
1975 } else { 1920 } else {
1976 /* 1921 /*
1977 * if no whitelist, then check if this devices is 1922 * if no whitelist, then check if this devices is
1978 * blacklisted 1923 * blacklisted
1979 */ 1924 */
1980 if (!check_dev_on_list(pci_blacklist, 1925 if (!check_dev_on_list(pci_blacklist,
1981 pci_blacklist_count, dev)) 1926 pci_blacklist_count, dev))
1982 fn(dev); 1927 fn(dev);
1983 } 1928 }
1984 } 1929 }
1985} 1930}
1986 1931
@@ -1989,7 +1934,7 @@ static void do_pci_parity_check(void)
1989 unsigned long flags; 1934 unsigned long flags;
1990 int before_count; 1935 int before_count;
1991 1936
1992 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1937 debugf3("%s()\n", __func__);
1993 1938
1994 if (!check_pci_parity) 1939 if (!check_pci_parity)
1995 return; 1940 return;
@@ -2011,7 +1956,6 @@ static void do_pci_parity_check(void)
2011 } 1956 }
2012} 1957}
2013 1958
2014
2015static inline void clear_pci_parity_errors(void) 1959static inline void clear_pci_parity_errors(void)
2016{ 1960{
2017 /* Clear any PCI bus parity errors that devices initially have logged 1961 /* Clear any PCI bus parity errors that devices initially have logged
@@ -2020,37 +1964,30 @@ static inline void clear_pci_parity_errors(void)
2020 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); 1964 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
2021} 1965}
2022 1966
2023
2024#else /* CONFIG_PCI */ 1967#else /* CONFIG_PCI */
2025 1968
2026
2027static inline void do_pci_parity_check(void) 1969static inline void do_pci_parity_check(void)
2028{ 1970{
2029 /* no-op */ 1971 /* no-op */
2030} 1972}
2031 1973
2032
2033static inline void clear_pci_parity_errors(void) 1974static inline void clear_pci_parity_errors(void)
2034{ 1975{
2035 /* no-op */ 1976 /* no-op */
2036} 1977}
2037 1978
2038
2039#endif /* CONFIG_PCI */ 1979#endif /* CONFIG_PCI */
2040 1980
2041/* 1981/*
2042 * Iterate over all MC instances and check for ECC, et al, errors 1982 * Iterate over all MC instances and check for ECC, et al, errors
2043 */ 1983 */
2044static inline void check_mc_devices (void) 1984static inline void check_mc_devices(void)
2045{ 1985{
2046 unsigned long flags;
2047 struct list_head *item; 1986 struct list_head *item;
2048 struct mem_ctl_info *mci; 1987 struct mem_ctl_info *mci;
2049 1988
2050 debugf3("MC: " __FILE__ ": %s()\n", __func__); 1989 debugf3("%s()\n", __func__);
2051 1990 down(&mem_ctls_mutex);
2052 /* during poll, have interrupts off */
2053 local_irq_save(flags);
2054 1991
2055 list_for_each(item, &mc_devices) { 1992 list_for_each(item, &mc_devices) {
2056 mci = list_entry(item, struct mem_ctl_info, link); 1993 mci = list_entry(item, struct mem_ctl_info, link);
@@ -2059,10 +1996,9 @@ static inline void check_mc_devices (void)
2059 mci->edac_check(mci); 1996 mci->edac_check(mci);
2060 } 1997 }
2061 1998
2062 local_irq_restore(flags); 1999 up(&mem_ctls_mutex);
2063} 2000}
2064 2001
2065
2066/* 2002/*
2067 * Check MC status every poll_msec. 2003 * Check MC status every poll_msec.
2068 * Check PCI status every poll_msec as well. 2004 * Check PCI status every poll_msec as well.
@@ -2073,70 +2009,21 @@ static inline void check_mc_devices (void)
2073 */ 2009 */
2074static void do_edac_check(void) 2010static void do_edac_check(void)
2075{ 2011{
2076 2012 debugf3("%s()\n", __func__);
2077 debugf3("MC: " __FILE__ ": %s()\n", __func__);
2078
2079 check_mc_devices(); 2013 check_mc_devices();
2080
2081 do_pci_parity_check(); 2014 do_pci_parity_check();
2082} 2015}
2083 2016
2084
2085/*
2086 * EDAC thread state information
2087 */
2088struct bs_thread_info
2089{
2090 struct task_struct *task;
2091 struct completion *event;
2092 char *name;
2093 void (*run)(void);
2094};
2095
2096static struct bs_thread_info bs_thread;
2097
2098/*
2099 * edac_kernel_thread
2100 * This the kernel thread that processes edac operations
2101 * in a normal thread environment
2102 */
2103static int edac_kernel_thread(void *arg) 2017static int edac_kernel_thread(void *arg)
2104{ 2018{
2105 struct bs_thread_info *thread = (struct bs_thread_info *) arg; 2019 while (!kthread_should_stop()) {
2106 2020 do_edac_check();
2107 /* detach thread */
2108 daemonize(thread->name);
2109
2110 current->exit_signal = SIGCHLD;
2111 allow_signal(SIGKILL);
2112 thread->task = current;
2113
2114 /* indicate to starting task we have started */
2115 complete(thread->event);
2116
2117 /* loop forever, until we are told to stop */
2118 while(thread->run != NULL) {
2119 void (*run)(void);
2120
2121 /* call the function to check the memory controllers */
2122 run = thread->run;
2123 if (run)
2124 run();
2125
2126 if (signal_pending(current))
2127 flush_signals(current);
2128
2129 /* ensure we are interruptable */
2130 set_current_state(TASK_INTERRUPTIBLE);
2131 2021
2132 /* goto sleep for the interval */ 2022 /* goto sleep for the interval */
2133 schedule_timeout((HZ * poll_msec) / 1000); 2023 schedule_timeout_interruptible((HZ * poll_msec) / 1000);
2134 try_to_freeze(); 2024 try_to_freeze();
2135 } 2025 }
2136 2026
2137 /* notify waiter that we are exiting */
2138 complete(thread->event);
2139
2140 return 0; 2027 return 0;
2141} 2028}
2142 2029
@@ -2146,10 +2033,7 @@ static int edac_kernel_thread(void *arg)
2146 */ 2033 */
2147static int __init edac_mc_init(void) 2034static int __init edac_mc_init(void)
2148{ 2035{
2149 int ret; 2036 edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
2150 struct completion event;
2151
2152 printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n");
2153 2037
2154 /* 2038 /*
2155 * Harvest and clear any boot/initialization PCI parity errors 2039 * Harvest and clear any boot/initialization PCI parity errors
@@ -2160,80 +2044,54 @@ static int __init edac_mc_init(void)
2160 */ 2044 */
2161 clear_pci_parity_errors(); 2045 clear_pci_parity_errors();
2162 2046
2163 /* perform check for first time to harvest boot leftovers */
2164 do_edac_check();
2165
2166 /* Create the MC sysfs entires */ 2047 /* Create the MC sysfs entires */
2167 if (edac_sysfs_memctrl_setup()) { 2048 if (edac_sysfs_memctrl_setup()) {
2168 printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n"); 2049 edac_printk(KERN_ERR, EDAC_MC,
2050 "Error initializing sysfs code\n");
2169 return -ENODEV; 2051 return -ENODEV;
2170 } 2052 }
2171 2053
2172 /* Create the PCI parity sysfs entries */ 2054 /* Create the PCI parity sysfs entries */
2173 if (edac_sysfs_pci_setup()) { 2055 if (edac_sysfs_pci_setup()) {
2174 edac_sysfs_memctrl_teardown(); 2056 edac_sysfs_memctrl_teardown();
2175 printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n"); 2057 edac_printk(KERN_ERR, EDAC_MC,
2058 "EDAC PCI: Error initializing sysfs code\n");
2176 return -ENODEV; 2059 return -ENODEV;
2177 } 2060 }
2178 2061
2179 /* Create our kernel thread */
2180 init_completion(&event);
2181 bs_thread.event = &event;
2182 bs_thread.name = "kedac";
2183 bs_thread.run = do_edac_check;
2184
2185 /* create our kernel thread */ 2062 /* create our kernel thread */
2186 ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL); 2063 edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
2187 if (ret < 0) { 2064
2065 if (IS_ERR(edac_thread)) {
2188 /* remove the sysfs entries */ 2066 /* remove the sysfs entries */
2189 edac_sysfs_memctrl_teardown(); 2067 edac_sysfs_memctrl_teardown();
2190 edac_sysfs_pci_teardown(); 2068 edac_sysfs_pci_teardown();
2191 return -ENOMEM; 2069 return PTR_ERR(edac_thread);
2192 } 2070 }
2193 2071
2194 /* wait for our kernel theard ack that it is up and running */
2195 wait_for_completion(&event);
2196
2197 return 0; 2072 return 0;
2198} 2073}
2199 2074
2200
2201/* 2075/*
2202 * edac_mc_exit() 2076 * edac_mc_exit()
2203 * module exit/termination functioni 2077 * module exit/termination functioni
2204 */ 2078 */
2205static void __exit edac_mc_exit(void) 2079static void __exit edac_mc_exit(void)
2206{ 2080{
2207 struct completion event; 2081 debugf0("%s()\n", __func__);
2208 2082 kthread_stop(edac_thread);
2209 debugf0("MC: " __FILE__ ": %s()\n", __func__);
2210
2211 init_completion(&event);
2212 bs_thread.event = &event;
2213
2214 /* As soon as ->run is set to NULL, the task could disappear,
2215 * so we need to hold tasklist_lock until we have sent the signal
2216 */
2217 read_lock(&tasklist_lock);
2218 bs_thread.run = NULL;
2219 send_sig(SIGKILL, bs_thread.task, 1);
2220 read_unlock(&tasklist_lock);
2221 wait_for_completion(&event);
2222 2083
2223 /* tear down the sysfs device */ 2084 /* tear down the sysfs device */
2224 edac_sysfs_memctrl_teardown(); 2085 edac_sysfs_memctrl_teardown();
2225 edac_sysfs_pci_teardown(); 2086 edac_sysfs_pci_teardown();
2226} 2087}
2227 2088
2228
2229
2230
2231module_init(edac_mc_init); 2089module_init(edac_mc_init);
2232module_exit(edac_mc_exit); 2090module_exit(edac_mc_exit);
2233 2091
2234MODULE_LICENSE("GPL"); 2092MODULE_LICENSE("GPL");
2235MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" 2093MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2236 "Based on.work by Dan Hollis et al"); 2094 "Based on work by Dan Hollis et al");
2237MODULE_DESCRIPTION("Core library routines for MC reporting"); 2095MODULE_DESCRIPTION("Core library routines for MC reporting");
2238 2096
2239module_param(panic_on_ue, int, 0644); 2097module_param(panic_on_ue, int, 0644);
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h
index 75ecf484a43a..8d9e83909b9c 100644
--- a/drivers/edac/edac_mc.h
+++ b/drivers/edac/edac_mc.h
@@ -15,11 +15,9 @@
15 * 15 *
16 */ 16 */
17 17
18
19#ifndef _EDAC_MC_H_ 18#ifndef _EDAC_MC_H_
20#define _EDAC_MC_H_ 19#define _EDAC_MC_H_
21 20
22
23#include <linux/config.h> 21#include <linux/config.h>
24#include <linux/kernel.h> 22#include <linux/kernel.h>
25#include <linux/types.h> 23#include <linux/types.h>
@@ -33,7 +31,6 @@
33#include <linux/completion.h> 31#include <linux/completion.h>
34#include <linux/kobject.h> 32#include <linux/kobject.h>
35 33
36
37#define EDAC_MC_LABEL_LEN 31 34#define EDAC_MC_LABEL_LEN 31
38#define MC_PROC_NAME_MAX_LEN 7 35#define MC_PROC_NAME_MAX_LEN 7
39 36
@@ -43,31 +40,53 @@
43#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) 40#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) )
44#endif 41#endif
45 42
43#define edac_printk(level, prefix, fmt, arg...) \
44 printk(level "EDAC " prefix ": " fmt, ##arg)
45
46#define edac_mc_printk(mci, level, fmt, arg...) \
47 printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg)
48
49#define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \
50 printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg)
51
52/* prefixes for edac_printk() and edac_mc_printk() */
53#define EDAC_MC "MC"
54#define EDAC_PCI "PCI"
55#define EDAC_DEBUG "DEBUG"
56
46#ifdef CONFIG_EDAC_DEBUG 57#ifdef CONFIG_EDAC_DEBUG
47extern int edac_debug_level; 58extern int edac_debug_level;
48#define edac_debug_printk(level, fmt, args...) \ 59
49do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0) 60#define edac_debug_printk(level, fmt, arg...) \
61 do { \
62 if (level <= edac_debug_level) \
63 edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \
64 } while(0)
65
50#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) 66#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ )
51#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) 67#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ )
52#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) 68#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ )
53#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) 69#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ )
54#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) 70#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ )
55#else /* !CONFIG_EDAC_DEBUG */ 71
72#else /* !CONFIG_EDAC_DEBUG */
73
56#define debugf0( ... ) 74#define debugf0( ... )
57#define debugf1( ... ) 75#define debugf1( ... )
58#define debugf2( ... ) 76#define debugf2( ... )
59#define debugf3( ... ) 77#define debugf3( ... )
60#define debugf4( ... ) 78#define debugf4( ... )
61#endif /* !CONFIG_EDAC_DEBUG */
62 79
80#endif /* !CONFIG_EDAC_DEBUG */
63 81
64#define bs_xstr(s) bs_str(s) 82#define edac_xstr(s) edac_str(s)
65#define bs_str(s) #s 83#define edac_str(s) #s
66#define BS_MOD_STR bs_xstr(KBUILD_BASENAME) 84#define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME)
67 85
68#define BIT(x) (1 << (x)) 86#define BIT(x) (1 << (x))
69 87
70#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev 88#define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \
89 PCI_DEVICE_ID_ ## vend ## _ ## dev
71 90
72/* memory devices */ 91/* memory devices */
73enum dev_type { 92enum dev_type {
@@ -117,7 +136,6 @@ enum mem_type {
117#define MEM_FLAG_RDDR BIT(MEM_RDDR) 136#define MEM_FLAG_RDDR BIT(MEM_RDDR)
118#define MEM_FLAG_RMBS BIT(MEM_RMBS) 137#define MEM_FLAG_RMBS BIT(MEM_RMBS)
119 138
120
121/* chipset Error Detection and Correction capabilities and mode */ 139/* chipset Error Detection and Correction capabilities and mode */
122enum edac_type { 140enum edac_type {
123 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ 141 EDAC_UNKNOWN = 0, /* Unknown if ECC is available */
@@ -142,7 +160,6 @@ enum edac_type {
142#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) 160#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
143#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) 161#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
144 162
145
146/* scrubbing capabilities */ 163/* scrubbing capabilities */
147enum scrub_type { 164enum scrub_type {
148 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ 165 SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */
@@ -166,11 +183,6 @@ enum scrub_type {
166#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) 183#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR)
167#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) 184#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
168 185
169enum mci_sysfs_status {
170 MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */
171 MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */
172};
173
174/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ 186/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
175 187
176/* 188/*
@@ -255,20 +267,19 @@ enum mci_sysfs_status {
255 * PS - I enjoyed writing all that about as much as you enjoyed reading it. 267 * PS - I enjoyed writing all that about as much as you enjoyed reading it.
256 */ 268 */
257 269
258
259struct channel_info { 270struct channel_info {
260 int chan_idx; /* channel index */ 271 int chan_idx; /* channel index */
261 u32 ce_count; /* Correctable Errors for this CHANNEL */ 272 u32 ce_count; /* Correctable Errors for this CHANNEL */
262 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ 273 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
263 struct csrow_info *csrow; /* the parent */ 274 struct csrow_info *csrow; /* the parent */
264}; 275};
265 276
266
267struct csrow_info { 277struct csrow_info {
268 unsigned long first_page; /* first page number in dimm */ 278 unsigned long first_page; /* first page number in dimm */
269 unsigned long last_page; /* last page number in dimm */ 279 unsigned long last_page; /* last page number in dimm */
270 unsigned long page_mask; /* used for interleaving - 280 unsigned long page_mask; /* used for interleaving -
271 0UL for non intlv */ 281 * 0UL for non intlv
282 */
272 u32 nr_pages; /* number of pages in csrow */ 283 u32 nr_pages; /* number of pages in csrow */
273 u32 grain; /* granularity of reported error in bytes */ 284 u32 grain; /* granularity of reported error in bytes */
274 int csrow_idx; /* the chip-select row */ 285 int csrow_idx; /* the chip-select row */
@@ -280,29 +291,28 @@ struct csrow_info {
280 struct mem_ctl_info *mci; /* the parent */ 291 struct mem_ctl_info *mci; /* the parent */
281 292
282 struct kobject kobj; /* sysfs kobject for this csrow */ 293 struct kobject kobj; /* sysfs kobject for this csrow */
294 struct completion kobj_complete;
283 295
284 /* FIXME the number of CHANNELs might need to become dynamic */ 296 /* FIXME the number of CHANNELs might need to become dynamic */
285 u32 nr_channels; 297 u32 nr_channels;
286 struct channel_info *channels; 298 struct channel_info *channels;
287}; 299};
288 300
289
290struct mem_ctl_info { 301struct mem_ctl_info {
291 struct list_head link; /* for global list of mem_ctl_info structs */ 302 struct list_head link; /* for global list of mem_ctl_info structs */
292 unsigned long mtype_cap; /* memory types supported by mc */ 303 unsigned long mtype_cap; /* memory types supported by mc */
293 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ 304 unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
294 unsigned long edac_cap; /* configuration capabilities - this is 305 unsigned long edac_cap; /* configuration capabilities - this is
295 closely related to edac_ctl_cap. The 306 * closely related to edac_ctl_cap. The
296 difference is that the controller 307 * difference is that the controller may be
297 may be capable of s4ecd4ed which would 308 * capable of s4ecd4ed which would be listed
298 be listed in edac_ctl_cap, but if 309 * in edac_ctl_cap, but if channels aren't
299 channels aren't capable of s4ecd4ed then the 310 * capable of s4ecd4ed then the edac_cap would
300 edac_cap would not have that capability. */ 311 * not have that capability.
312 */
301 unsigned long scrub_cap; /* chipset scrub capabilities */ 313 unsigned long scrub_cap; /* chipset scrub capabilities */
302 enum scrub_type scrub_mode; /* current scrub mode */ 314 enum scrub_type scrub_mode; /* current scrub mode */
303 315
304 enum mci_sysfs_status sysfs_active; /* status of sysfs */
305
306 /* pointer to edac checking routine */ 316 /* pointer to edac checking routine */
307 void (*edac_check) (struct mem_ctl_info * mci); 317 void (*edac_check) (struct mem_ctl_info * mci);
308 /* 318 /*
@@ -311,7 +321,7 @@ struct mem_ctl_info {
311 */ 321 */
312 /* FIXME - why not send the phys page to begin with? */ 322 /* FIXME - why not send the phys page to begin with? */
313 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 323 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
314 unsigned long page); 324 unsigned long page);
315 int mc_idx; 325 int mc_idx;
316 int nr_csrows; 326 int nr_csrows;
317 struct csrow_info *csrows; 327 struct csrow_info *csrows;
@@ -340,72 +350,69 @@ struct mem_ctl_info {
340 350
341 /* edac sysfs device control */ 351 /* edac sysfs device control */
342 struct kobject edac_mci_kobj; 352 struct kobject edac_mci_kobj;
353 struct completion kobj_complete;
343}; 354};
344 355
345
346
347/* write all or some bits in a byte-register*/ 356/* write all or some bits in a byte-register*/
348static inline void pci_write_bits8(struct pci_dev *pdev, int offset, 357static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value,
349 u8 value, u8 mask) 358 u8 mask)
350{ 359{
351 if (mask != 0xff) { 360 if (mask != 0xff) {
352 u8 buf; 361 u8 buf;
362
353 pci_read_config_byte(pdev, offset, &buf); 363 pci_read_config_byte(pdev, offset, &buf);
354 value &= mask; 364 value &= mask;
355 buf &= ~mask; 365 buf &= ~mask;
356 value |= buf; 366 value |= buf;
357 } 367 }
368
358 pci_write_config_byte(pdev, offset, value); 369 pci_write_config_byte(pdev, offset, value);
359} 370}
360 371
361
362/* write all or some bits in a word-register*/ 372/* write all or some bits in a word-register*/
363static inline void pci_write_bits16(struct pci_dev *pdev, int offset, 373static inline void pci_write_bits16(struct pci_dev *pdev, int offset,
364 u16 value, u16 mask) 374 u16 value, u16 mask)
365{ 375{
366 if (mask != 0xffff) { 376 if (mask != 0xffff) {
367 u16 buf; 377 u16 buf;
378
368 pci_read_config_word(pdev, offset, &buf); 379 pci_read_config_word(pdev, offset, &buf);
369 value &= mask; 380 value &= mask;
370 buf &= ~mask; 381 buf &= ~mask;
371 value |= buf; 382 value |= buf;
372 } 383 }
384
373 pci_write_config_word(pdev, offset, value); 385 pci_write_config_word(pdev, offset, value);
374} 386}
375 387
376
377/* write all or some bits in a dword-register*/ 388/* write all or some bits in a dword-register*/
378static inline void pci_write_bits32(struct pci_dev *pdev, int offset, 389static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
379 u32 value, u32 mask) 390 u32 value, u32 mask)
380{ 391{
381 if (mask != 0xffff) { 392 if (mask != 0xffff) {
382 u32 buf; 393 u32 buf;
394
383 pci_read_config_dword(pdev, offset, &buf); 395 pci_read_config_dword(pdev, offset, &buf);
384 value &= mask; 396 value &= mask;
385 buf &= ~mask; 397 buf &= ~mask;
386 value |= buf; 398 value |= buf;
387 } 399 }
400
388 pci_write_config_dword(pdev, offset, value); 401 pci_write_config_dword(pdev, offset, value);
389} 402}
390 403
391
392#ifdef CONFIG_EDAC_DEBUG 404#ifdef CONFIG_EDAC_DEBUG
393void edac_mc_dump_channel(struct channel_info *chan); 405void edac_mc_dump_channel(struct channel_info *chan);
394void edac_mc_dump_mci(struct mem_ctl_info *mci); 406void edac_mc_dump_mci(struct mem_ctl_info *mci);
395void edac_mc_dump_csrow(struct csrow_info *csrow); 407void edac_mc_dump_csrow(struct csrow_info *csrow);
396#endif /* CONFIG_EDAC_DEBUG */ 408#endif /* CONFIG_EDAC_DEBUG */
397 409
398extern int edac_mc_add_mc(struct mem_ctl_info *mci); 410extern int edac_mc_add_mc(struct mem_ctl_info *mci);
399extern int edac_mc_del_mc(struct mem_ctl_info *mci); 411extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev);
400
401extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 412extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
402 unsigned long page); 413 unsigned long page);
403 414extern void edac_mc_scrub_block(unsigned long page, unsigned long offset,
404extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev 415 u32 size);
405 *pdev);
406
407extern void edac_mc_scrub_block(unsigned long page,
408 unsigned long offset, u32 size);
409 416
410/* 417/*
411 * The no info errors are used when error overflows are reported. 418 * The no info errors are used when error overflows are reported.
@@ -418,31 +425,25 @@ extern void edac_mc_scrub_block(unsigned long page,
418 * statement clutter and extra function arguments. 425 * statement clutter and extra function arguments.
419 */ 426 */
420extern void edac_mc_handle_ce(struct mem_ctl_info *mci, 427extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
421 unsigned long page_frame_number, 428 unsigned long page_frame_number, unsigned long offset_in_page,
422 unsigned long offset_in_page, 429 unsigned long syndrome, int row, int channel,
423 unsigned long syndrome, 430 const char *msg);
424 int row, int channel, const char *msg);
425
426extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, 431extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
427 const char *msg); 432 const char *msg);
428
429extern void edac_mc_handle_ue(struct mem_ctl_info *mci, 433extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
430 unsigned long page_frame_number, 434 unsigned long page_frame_number, unsigned long offset_in_page,
431 unsigned long offset_in_page, 435 int row, const char *msg);
432 int row, const char *msg);
433
434extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, 436extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
435 const char *msg); 437 const char *msg);
436 438
437/* 439/*
438 * This kmalloc's and initializes all the structures. 440 * This kmalloc's and initializes all the structures.
439 * Can't be used if all structures don't have the same lifetime. 441 * Can't be used if all structures don't have the same lifetime.
440 */ 442 */
441extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, 443extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
442 unsigned nr_csrows, unsigned nr_chans); 444 unsigned nr_chans);
443 445
444/* Free an mc previously allocated by edac_mc_alloc() */ 446/* Free an mc previously allocated by edac_mc_alloc() */
445extern void edac_mc_free(struct mem_ctl_info *mci); 447extern void edac_mc_free(struct mem_ctl_info *mci);
446 448
447
448#endif /* _EDAC_MC_H_ */ 449#endif /* _EDAC_MC_H_ */
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 52596e75f9c2..fd342163cf97 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -9,7 +9,6 @@
9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) 9 * by Thayne Harbaugh of Linux Networx. (http://lnxi.com)
10 */ 10 */
11 11
12
13#include <linux/config.h> 12#include <linux/config.h>
14#include <linux/module.h> 13#include <linux/module.h>
15#include <linux/init.h> 14#include <linux/init.h>
@@ -18,6 +17,11 @@
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include "edac_mc.h" 18#include "edac_mc.h"
20 19
20#define i82860_printk(level, fmt, arg...) \
21 edac_printk(level, "i82860", fmt, ##arg)
22
23#define i82860_mc_printk(mci, level, fmt, arg...) \
24 edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg)
21 25
22#ifndef PCI_DEVICE_ID_INTEL_82860_0 26#ifndef PCI_DEVICE_ID_INTEL_82860_0
23#define PCI_DEVICE_ID_INTEL_82860_0 0x2531 27#define PCI_DEVICE_ID_INTEL_82860_0 0x2531
@@ -48,15 +52,15 @@ struct i82860_error_info {
48 52
49static const struct i82860_dev_info i82860_devs[] = { 53static const struct i82860_dev_info i82860_devs[] = {
50 [I82860] = { 54 [I82860] = {
51 .ctl_name = "i82860"}, 55 .ctl_name = "i82860"
56 },
52}; 57};
53 58
54static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code 59static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code
55 has already registered driver */ 60 * has already registered driver
61 */
56 62
57static int i82860_registered = 1; 63static void i82860_get_error_info(struct mem_ctl_info *mci,
58
59static void i82860_get_error_info (struct mem_ctl_info *mci,
60 struct i82860_error_info *info) 64 struct i82860_error_info *info)
61{ 65{
62 /* 66 /*
@@ -78,14 +82,15 @@ static void i82860_get_error_info (struct mem_ctl_info *mci,
78 */ 82 */
79 if (!(info->errsts2 & 0x0003)) 83 if (!(info->errsts2 & 0x0003))
80 return; 84 return;
85
81 if ((info->errsts ^ info->errsts2) & 0x0003) { 86 if ((info->errsts ^ info->errsts2) & 0x0003) {
82 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); 87 pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap);
83 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, 88 pci_read_config_word(mci->pdev, I82860_DERRCTL_STS,
84 &info->derrsyn); 89 &info->derrsyn);
85 } 90 }
86} 91}
87 92
88static int i82860_process_error_info (struct mem_ctl_info *mci, 93static int i82860_process_error_info(struct mem_ctl_info *mci,
89 struct i82860_error_info *info, int handle_errors) 94 struct i82860_error_info *info, int handle_errors)
90{ 95{
91 int row; 96 int row;
@@ -107,8 +112,8 @@ static int i82860_process_error_info (struct mem_ctl_info *mci,
107 if (info->errsts & 0x0002) 112 if (info->errsts & 0x0002)
108 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); 113 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
109 else 114 else
110 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 115 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
111 0, "i82860 UE"); 116 "i82860 UE");
112 117
113 return 1; 118 return 1;
114} 119}
@@ -117,7 +122,7 @@ static void i82860_check(struct mem_ctl_info *mci)
117{ 122{
118 struct i82860_error_info info; 123 struct i82860_error_info info;
119 124
120 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 125 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
121 i82860_get_error_info(mci, &info); 126 i82860_get_error_info(mci, &info);
122 i82860_process_error_info(mci, &info, 1); 127 i82860_process_error_info(mci, &info, 1);
123} 128}
@@ -128,6 +133,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
128 int index; 133 int index;
129 struct mem_ctl_info *mci = NULL; 134 struct mem_ctl_info *mci = NULL;
130 unsigned long last_cumul_size; 135 unsigned long last_cumul_size;
136 struct i82860_error_info discard;
131 137
132 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 138 u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
133 139
@@ -140,21 +146,20 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
140 going to make 1 channel for group. 146 going to make 1 channel for group.
141 */ 147 */
142 mci = edac_mc_alloc(0, 16, 1); 148 mci = edac_mc_alloc(0, 16, 1);
149
143 if (!mci) 150 if (!mci)
144 return -ENOMEM; 151 return -ENOMEM;
145 152
146 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 153 debugf3("%s(): init mci\n", __func__);
147
148 mci->pdev = pdev; 154 mci->pdev = pdev;
149 mci->mtype_cap = MEM_FLAG_DDR; 155 mci->mtype_cap = MEM_FLAG_DDR;
150 156
151
152 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 157 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
153 /* I"m not sure about this but I think that all RDRAM is SECDED */ 158 /* I"m not sure about this but I think that all RDRAM is SECDED */
154 mci->edac_cap = EDAC_FLAG_SECDED; 159 mci->edac_cap = EDAC_FLAG_SECDED;
155 /* adjust FLAGS */ 160 /* adjust FLAGS */
156 161
157 mci->mod_name = BS_MOD_STR; 162 mci->mod_name = EDAC_MOD_STR;
158 mci->mod_ver = "$Revision: 1.1.2.6 $"; 163 mci->mod_ver = "$Revision: 1.1.2.6 $";
159 mci->ctl_name = i82860_devs[dev_idx].ctl_name; 164 mci->ctl_name = i82860_devs[dev_idx].ctl_name;
160 mci->edac_check = i82860_check; 165 mci->edac_check = i82860_check;
@@ -175,12 +180,13 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
175 struct csrow_info *csrow = &mci->csrows[index]; 180 struct csrow_info *csrow = &mci->csrows[index];
176 181
177 pci_read_config_word(mci->pdev, I82860_GBA + index * 2, 182 pci_read_config_word(mci->pdev, I82860_GBA + index * 2,
178 &value); 183 &value);
179 184
180 cumul_size = (value & I82860_GBA_MASK) << 185 cumul_size = (value & I82860_GBA_MASK) <<
181 (I82860_GBA_SHIFT - PAGE_SHIFT); 186 (I82860_GBA_SHIFT - PAGE_SHIFT);
182 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 187 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
183 __func__, index, cumul_size); 188 cumul_size);
189
184 if (cumul_size == last_cumul_size) 190 if (cumul_size == last_cumul_size)
185 continue; /* not populated */ 191 continue; /* not populated */
186 192
@@ -188,42 +194,43 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
188 csrow->last_page = cumul_size - 1; 194 csrow->last_page = cumul_size - 1;
189 csrow->nr_pages = cumul_size - last_cumul_size; 195 csrow->nr_pages = cumul_size - last_cumul_size;
190 last_cumul_size = cumul_size; 196 last_cumul_size = cumul_size;
191 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ 197 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
192 csrow->mtype = MEM_RMBS; 198 csrow->mtype = MEM_RMBS;
193 csrow->dtype = DEV_UNKNOWN; 199 csrow->dtype = DEV_UNKNOWN;
194 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; 200 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
195 } 201 }
196 202
197 /* clear counters */ 203 i82860_get_error_info(mci, &discard); /* clear counters */
198 pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003);
199 204
200 if (edac_mc_add_mc(mci)) { 205 if (edac_mc_add_mc(mci)) {
201 debugf3("MC: " __FILE__ 206 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
202 ": %s(): failed edac_mc_add_mc()\n",
203 __func__);
204 edac_mc_free(mci); 207 edac_mc_free(mci);
205 } else { 208 } else {
206 /* get this far and it's successful */ 209 /* get this far and it's successful */
207 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 210 debugf3("%s(): success\n", __func__);
208 rc = 0; 211 rc = 0;
209 } 212 }
213
210 return rc; 214 return rc;
211} 215}
212 216
213/* returns count (>= 0), or negative on error */ 217/* returns count (>= 0), or negative on error */
214static int __devinit i82860_init_one(struct pci_dev *pdev, 218static int __devinit i82860_init_one(struct pci_dev *pdev,
215 const struct pci_device_id *ent) 219 const struct pci_device_id *ent)
216{ 220{
217 int rc; 221 int rc;
218 222
219 debugf0("MC: " __FILE__ ": %s()\n", __func__); 223 debugf0("%s()\n", __func__);
224 i82860_printk(KERN_INFO, "i82860 init one\n");
220 225
221 printk(KERN_INFO "i82860 init one\n"); 226 if (pci_enable_device(pdev) < 0)
222 if(pci_enable_device(pdev) < 0)
223 return -EIO; 227 return -EIO;
228
224 rc = i82860_probe1(pdev, ent->driver_data); 229 rc = i82860_probe1(pdev, ent->driver_data);
225 if(rc == 0) 230
231 if (rc == 0)
226 mci_pdev = pci_dev_get(pdev); 232 mci_pdev = pci_dev_get(pdev);
233
227 return rc; 234 return rc;
228} 235}
229 236
@@ -231,23 +238,28 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
231{ 238{
232 struct mem_ctl_info *mci; 239 struct mem_ctl_info *mci;
233 240
234 debugf0(__FILE__ ": %s()\n", __func__); 241 debugf0("%s()\n", __func__);
235 242
236 mci = edac_mc_find_mci_by_pdev(pdev); 243 if ((mci = edac_mc_del_mc(pdev)) == NULL)
237 if ((mci != NULL) && (edac_mc_del_mc(mci) == 0)) 244 return;
238 edac_mc_free(mci); 245
246 edac_mc_free(mci);
239} 247}
240 248
241static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { 249static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
242 {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 250 {
243 I82860}, 251 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
244 {0,} /* 0 terminated list. */ 252 I82860
253 },
254 {
255 0,
256 } /* 0 terminated list. */
245}; 257};
246 258
247MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); 259MODULE_DEVICE_TABLE(pci, i82860_pci_tbl);
248 260
249static struct pci_driver i82860_driver = { 261static struct pci_driver i82860_driver = {
250 .name = BS_MOD_STR, 262 .name = EDAC_MOD_STR,
251 .probe = i82860_init_one, 263 .probe = i82860_init_one,
252 .remove = __devexit_p(i82860_remove_one), 264 .remove = __devexit_p(i82860_remove_one),
253 .id_table = i82860_pci_tbl, 265 .id_table = i82860_pci_tbl,
@@ -257,43 +269,56 @@ static int __init i82860_init(void)
257{ 269{
258 int pci_rc; 270 int pci_rc;
259 271
260 debugf3("MC: " __FILE__ ": %s()\n", __func__); 272 debugf3("%s()\n", __func__);
273
261 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) 274 if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
262 return pci_rc; 275 goto fail0;
263 276
264 if (!mci_pdev) { 277 if (!mci_pdev) {
265 i82860_registered = 0;
266 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 278 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
267 PCI_DEVICE_ID_INTEL_82860_0, NULL); 279 PCI_DEVICE_ID_INTEL_82860_0, NULL);
280
268 if (mci_pdev == NULL) { 281 if (mci_pdev == NULL) {
269 debugf0("860 pci_get_device fail\n"); 282 debugf0("860 pci_get_device fail\n");
270 return -ENODEV; 283 pci_rc = -ENODEV;
284 goto fail1;
271 } 285 }
286
272 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); 287 pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl);
288
273 if (pci_rc < 0) { 289 if (pci_rc < 0) {
274 debugf0("860 init fail\n"); 290 debugf0("860 init fail\n");
275 pci_dev_put(mci_pdev); 291 pci_rc = -ENODEV;
276 return -ENODEV; 292 goto fail1;
277 } 293 }
278 } 294 }
295
279 return 0; 296 return 0;
297
298fail1:
299 pci_unregister_driver(&i82860_driver);
300
301fail0:
302 if (mci_pdev != NULL)
303 pci_dev_put(mci_pdev);
304
305 return pci_rc;
280} 306}
281 307
282static void __exit i82860_exit(void) 308static void __exit i82860_exit(void)
283{ 309{
284 debugf3("MC: " __FILE__ ": %s()\n", __func__); 310 debugf3("%s()\n", __func__);
285 311
286 pci_unregister_driver(&i82860_driver); 312 pci_unregister_driver(&i82860_driver);
287 if (!i82860_registered) { 313
288 i82860_remove_one(mci_pdev); 314 if (mci_pdev != NULL)
289 pci_dev_put(mci_pdev); 315 pci_dev_put(mci_pdev);
290 }
291} 316}
292 317
293module_init(i82860_init); 318module_init(i82860_init);
294module_exit(i82860_exit); 319module_exit(i82860_exit);
295 320
296MODULE_LICENSE("GPL"); 321MODULE_LICENSE("GPL");
297MODULE_AUTHOR 322MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
298 ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>"); 323 "Ben Woodard <woodard@redhat.com>");
299MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); 324MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 1991f94af753..0aec92698f17 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -13,18 +13,19 @@
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com 13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */ 14 */
15 15
16
17#include <linux/config.h> 16#include <linux/config.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/init.h> 18#include <linux/init.h>
20
21#include <linux/pci.h> 19#include <linux/pci.h>
22#include <linux/pci_ids.h> 20#include <linux/pci_ids.h>
23
24#include <linux/slab.h> 21#include <linux/slab.h>
25
26#include "edac_mc.h" 22#include "edac_mc.h"
27 23
24#define i82875p_printk(level, fmt, arg...) \
25 edac_printk(level, "i82875p", fmt, ##arg)
26
27#define i82875p_mc_printk(mci, level, fmt, arg...) \
28 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
28 29
29#ifndef PCI_DEVICE_ID_INTEL_82875_0 30#ifndef PCI_DEVICE_ID_INTEL_82875_0
30#define PCI_DEVICE_ID_INTEL_82875_0 0x2578 31#define PCI_DEVICE_ID_INTEL_82875_0 0x2578
@@ -34,11 +35,9 @@
34#define PCI_DEVICE_ID_INTEL_82875_6 0x257e 35#define PCI_DEVICE_ID_INTEL_82875_6 0x257e
35#endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 36#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
36 37
37
38/* four csrows in dual channel, eight in single channel */ 38/* four csrows in dual channel, eight in single channel */
39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) 39#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
40 40
41
42/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 41/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
43#define I82875P_EAP 0x58 /* Error Address Pointer (32b) 42#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
44 * 43 *
@@ -87,7 +86,6 @@
87 * 0 reserved 86 * 0 reserved
88 */ 87 */
89 88
90
91/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ 89/* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) 90#define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 * 91 *
@@ -151,23 +149,19 @@
151 * 1:0 DRAM type 01=DDR 149 * 1:0 DRAM type 01=DDR
152 */ 150 */
153 151
154
155enum i82875p_chips { 152enum i82875p_chips {
156 I82875P = 0, 153 I82875P = 0,
157}; 154};
158 155
159
160struct i82875p_pvt { 156struct i82875p_pvt {
161 struct pci_dev *ovrfl_pdev; 157 struct pci_dev *ovrfl_pdev;
162 void __iomem *ovrfl_window; 158 void __iomem *ovrfl_window;
163}; 159};
164 160
165
166struct i82875p_dev_info { 161struct i82875p_dev_info {
167 const char *ctl_name; 162 const char *ctl_name;
168}; 163};
169 164
170
171struct i82875p_error_info { 165struct i82875p_error_info {
172 u16 errsts; 166 u16 errsts;
173 u32 eap; 167 u32 eap;
@@ -176,17 +170,19 @@ struct i82875p_error_info {
176 u16 errsts2; 170 u16 errsts2;
177}; 171};
178 172
179
180static const struct i82875p_dev_info i82875p_devs[] = { 173static const struct i82875p_dev_info i82875p_devs[] = {
181 [I82875P] = { 174 [I82875P] = {
182 .ctl_name = "i82875p"}, 175 .ctl_name = "i82875p"
176 },
183}; 177};
184 178
185static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code 179static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has
186 has already registered driver */ 180 * already registered driver
181 */
182
187static int i82875p_registered = 1; 183static int i82875p_registered = 1;
188 184
189static void i82875p_get_error_info (struct mem_ctl_info *mci, 185static void i82875p_get_error_info(struct mem_ctl_info *mci,
190 struct i82875p_error_info *info) 186 struct i82875p_error_info *info)
191{ 187{
192 /* 188 /*
@@ -210,15 +206,16 @@ static void i82875p_get_error_info (struct mem_ctl_info *mci,
210 */ 206 */
211 if (!(info->errsts2 & 0x0081)) 207 if (!(info->errsts2 & 0x0081))
212 return; 208 return;
209
213 if ((info->errsts ^ info->errsts2) & 0x0081) { 210 if ((info->errsts ^ info->errsts2) & 0x0081) {
214 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); 211 pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap);
215 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); 212 pci_read_config_byte(mci->pdev, I82875P_DES, &info->des);
216 pci_read_config_byte(mci->pdev, I82875P_DERRSYN, 213 pci_read_config_byte(mci->pdev, I82875P_DERRSYN,
217 &info->derrsyn); 214 &info->derrsyn);
218 } 215 }
219} 216}
220 217
221static int i82875p_process_error_info (struct mem_ctl_info *mci, 218static int i82875p_process_error_info(struct mem_ctl_info *mci,
222 struct i82875p_error_info *info, int handle_errors) 219 struct i82875p_error_info *info, int handle_errors)
223{ 220{
224 int row, multi_chan; 221 int row, multi_chan;
@@ -243,23 +240,21 @@ static int i82875p_process_error_info (struct mem_ctl_info *mci,
243 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); 240 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
244 else 241 else
245 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 242 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
246 multi_chan ? (info->des & 0x1) : 0, 243 multi_chan ? (info->des & 0x1) : 0,
247 "i82875p CE"); 244 "i82875p CE");
248 245
249 return 1; 246 return 1;
250} 247}
251 248
252
253static void i82875p_check(struct mem_ctl_info *mci) 249static void i82875p_check(struct mem_ctl_info *mci)
254{ 250{
255 struct i82875p_error_info info; 251 struct i82875p_error_info info;
256 252
257 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 253 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
258 i82875p_get_error_info(mci, &info); 254 i82875p_get_error_info(mci, &info);
259 i82875p_process_error_info(mci, &info, 1); 255 i82875p_process_error_info(mci, &info, 1);
260} 256}
261 257
262
263#ifdef CONFIG_PROC_FS 258#ifdef CONFIG_PROC_FS
264extern int pci_proc_attach_device(struct pci_dev *); 259extern int pci_proc_attach_device(struct pci_dev *);
265#endif 260#endif
@@ -273,15 +268,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
273 unsigned long last_cumul_size; 268 unsigned long last_cumul_size;
274 struct pci_dev *ovrfl_pdev; 269 struct pci_dev *ovrfl_pdev;
275 void __iomem *ovrfl_window = NULL; 270 void __iomem *ovrfl_window = NULL;
276
277 u32 drc; 271 u32 drc;
278 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ 272 u32 drc_chan; /* Number of channels 0=1chan,1=2chan */
279 u32 nr_chans; 273 u32 nr_chans;
280 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 274 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
275 struct i82875p_error_info discard;
281 276
282 debugf0("MC: " __FILE__ ": %s()\n", __func__); 277 debugf0("%s()\n", __func__);
283 278 ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
284 ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
285 279
286 if (!ovrfl_pdev) { 280 if (!ovrfl_pdev) {
287 /* 281 /*
@@ -292,71 +286,69 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
292 */ 286 */
293 pci_write_bits8(pdev, 0xf4, 0x2, 0x2); 287 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
294 ovrfl_pdev = 288 ovrfl_pdev =
295 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); 289 pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
290
296 if (!ovrfl_pdev) 291 if (!ovrfl_pdev)
297 goto fail; 292 return -ENODEV;
298 } 293 }
294
299#ifdef CONFIG_PROC_FS 295#ifdef CONFIG_PROC_FS
300 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { 296 if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) {
301 printk(KERN_ERR "MC: " __FILE__ 297 i82875p_printk(KERN_ERR,
302 ": %s(): Failed to attach overflow device\n", 298 "%s(): Failed to attach overflow device\n", __func__);
303 __func__); 299 return -ENODEV;
304 goto fail;
305 } 300 }
306#endif /* CONFIG_PROC_FS */ 301#endif
302 /* CONFIG_PROC_FS */
307 if (pci_enable_device(ovrfl_pdev)) { 303 if (pci_enable_device(ovrfl_pdev)) {
308 printk(KERN_ERR "MC: " __FILE__ 304 i82875p_printk(KERN_ERR,
309 ": %s(): Failed to enable overflow device\n", 305 "%s(): Failed to enable overflow device\n", __func__);
310 __func__); 306 return -ENODEV;
311 goto fail;
312 } 307 }
313 308
314 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { 309 if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) {
315#ifdef CORRECT_BIOS 310#ifdef CORRECT_BIOS
316 goto fail; 311 goto fail0;
317#endif 312#endif
318 } 313 }
314
319 /* cache is irrelevant for PCI bus reads/writes */ 315 /* cache is irrelevant for PCI bus reads/writes */
320 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), 316 ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0),
321 pci_resource_len(ovrfl_pdev, 0)); 317 pci_resource_len(ovrfl_pdev, 0));
322 318
323 if (!ovrfl_window) { 319 if (!ovrfl_window) {
324 printk(KERN_ERR "MC: " __FILE__ 320 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
325 ": %s(): Failed to ioremap bar6\n", __func__); 321 __func__);
326 goto fail; 322 goto fail1;
327 } 323 }
328 324
329 /* need to find out the number of channels */ 325 /* need to find out the number of channels */
330 drc = readl(ovrfl_window + I82875P_DRC); 326 drc = readl(ovrfl_window + I82875P_DRC);
331 drc_chan = ((drc >> 21) & 0x1); 327 drc_chan = ((drc >> 21) & 0x1);
332 nr_chans = drc_chan + 1; 328 nr_chans = drc_chan + 1;
333 drc_ddim = (drc >> 18) & 0x1;
334 329
330 drc_ddim = (drc >> 18) & 0x1;
335 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), 331 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
336 nr_chans); 332 nr_chans);
337 333
338 if (!mci) { 334 if (!mci) {
339 rc = -ENOMEM; 335 rc = -ENOMEM;
340 goto fail; 336 goto fail2;
341 } 337 }
342 338
343 debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); 339 debugf3("%s(): init mci\n", __func__);
344
345 mci->pdev = pdev; 340 mci->pdev = pdev;
346 mci->mtype_cap = MEM_FLAG_DDR; 341 mci->mtype_cap = MEM_FLAG_DDR;
347
348 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; 342 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
349 mci->edac_cap = EDAC_FLAG_UNKNOWN; 343 mci->edac_cap = EDAC_FLAG_UNKNOWN;
350 /* adjust FLAGS */ 344 /* adjust FLAGS */
351 345
352 mci->mod_name = BS_MOD_STR; 346 mci->mod_name = EDAC_MOD_STR;
353 mci->mod_ver = "$Revision: 1.5.2.11 $"; 347 mci->mod_ver = "$Revision: 1.5.2.11 $";
354 mci->ctl_name = i82875p_devs[dev_idx].ctl_name; 348 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
355 mci->edac_check = i82875p_check; 349 mci->edac_check = i82875p_check;
356 mci->ctl_page_to_phys = NULL; 350 mci->ctl_page_to_phys = NULL;
357 351 debugf3("%s(): init pvt\n", __func__);
358 debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__);
359
360 pvt = (struct i82875p_pvt *) mci->pvt_info; 352 pvt = (struct i82875p_pvt *) mci->pvt_info;
361 pvt->ovrfl_pdev = ovrfl_pdev; 353 pvt->ovrfl_pdev = ovrfl_pdev;
362 pvt->ovrfl_window = ovrfl_window; 354 pvt->ovrfl_window = ovrfl_window;
@@ -374,8 +366,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
374 366
375 value = readb(ovrfl_window + I82875P_DRB + index); 367 value = readb(ovrfl_window + I82875P_DRB + index);
376 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); 368 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
377 debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", 369 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
378 __func__, index, cumul_size); 370 cumul_size);
371
379 if (cumul_size == last_cumul_size) 372 if (cumul_size == last_cumul_size)
380 continue; /* not populated */ 373 continue; /* not populated */
381 374
@@ -383,71 +376,72 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
383 csrow->last_page = cumul_size - 1; 376 csrow->last_page = cumul_size - 1;
384 csrow->nr_pages = cumul_size - last_cumul_size; 377 csrow->nr_pages = cumul_size - last_cumul_size;
385 last_cumul_size = cumul_size; 378 last_cumul_size = cumul_size;
386 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 379 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
387 csrow->mtype = MEM_DDR; 380 csrow->mtype = MEM_DDR;
388 csrow->dtype = DEV_UNKNOWN; 381 csrow->dtype = DEV_UNKNOWN;
389 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 382 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
390 } 383 }
391 384
392 /* clear counters */ 385 i82875p_get_error_info(mci, &discard); /* clear counters */
393 pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081);
394 386
395 if (edac_mc_add_mc(mci)) { 387 if (edac_mc_add_mc(mci)) {
396 debugf3("MC: " __FILE__ 388 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
397 ": %s(): failed edac_mc_add_mc()\n", __func__); 389 goto fail3;
398 goto fail;
399 } 390 }
400 391
401 /* get this far and it's successful */ 392 /* get this far and it's successful */
402 debugf3("MC: " __FILE__ ": %s(): success\n", __func__); 393 debugf3("%s(): success\n", __func__);
403 return 0; 394 return 0;
404 395
405 fail: 396fail3:
406 if (mci) 397 edac_mc_free(mci);
407 edac_mc_free(mci);
408 398
409 if (ovrfl_window) 399fail2:
410 iounmap(ovrfl_window); 400 iounmap(ovrfl_window);
411 401
412 if (ovrfl_pdev) { 402fail1:
413 pci_release_regions(ovrfl_pdev); 403 pci_release_regions(ovrfl_pdev);
414 pci_disable_device(ovrfl_pdev);
415 }
416 404
405#ifdef CORRECT_BIOS
406fail0:
407#endif
408 pci_disable_device(ovrfl_pdev);
417 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ 409 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
418 return rc; 410 return rc;
419} 411}
420 412
421
422/* returns count (>= 0), or negative on error */ 413/* returns count (>= 0), or negative on error */
423static int __devinit i82875p_init_one(struct pci_dev *pdev, 414static int __devinit i82875p_init_one(struct pci_dev *pdev,
424 const struct pci_device_id *ent) 415 const struct pci_device_id *ent)
425{ 416{
426 int rc; 417 int rc;
427 418
428 debugf0("MC: " __FILE__ ": %s()\n", __func__); 419 debugf0("%s()\n", __func__);
420 i82875p_printk(KERN_INFO, "i82875p init one\n");
429 421
430 printk(KERN_INFO "i82875p init one\n"); 422 if (pci_enable_device(pdev) < 0)
431 if(pci_enable_device(pdev) < 0)
432 return -EIO; 423 return -EIO;
424
433 rc = i82875p_probe1(pdev, ent->driver_data); 425 rc = i82875p_probe1(pdev, ent->driver_data);
426
434 if (mci_pdev == NULL) 427 if (mci_pdev == NULL)
435 mci_pdev = pci_dev_get(pdev); 428 mci_pdev = pci_dev_get(pdev);
429
436 return rc; 430 return rc;
437} 431}
438 432
439
440static void __devexit i82875p_remove_one(struct pci_dev *pdev) 433static void __devexit i82875p_remove_one(struct pci_dev *pdev)
441{ 434{
442 struct mem_ctl_info *mci; 435 struct mem_ctl_info *mci;
443 struct i82875p_pvt *pvt = NULL; 436 struct i82875p_pvt *pvt = NULL;
444 437
445 debugf0(__FILE__ ": %s()\n", __func__); 438 debugf0("%s()\n", __func__);
446 439
447 if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) 440 if ((mci = edac_mc_del_mc(pdev)) == NULL)
448 return; 441 return;
449 442
450 pvt = (struct i82875p_pvt *) mci->pvt_info; 443 pvt = (struct i82875p_pvt *) mci->pvt_info;
444
451 if (pvt->ovrfl_window) 445 if (pvt->ovrfl_window)
452 iounmap(pvt->ovrfl_window); 446 iounmap(pvt->ovrfl_window);
453 447
@@ -459,74 +453,84 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
459 pci_dev_put(pvt->ovrfl_pdev); 453 pci_dev_put(pvt->ovrfl_pdev);
460 } 454 }
461 455
462 if (edac_mc_del_mc(mci))
463 return;
464
465 edac_mc_free(mci); 456 edac_mc_free(mci);
466} 457}
467 458
468
469static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { 459static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
470 {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, 460 {
471 I82875P}, 461 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
472 {0,} /* 0 terminated list. */ 462 I82875P
463 },
464 {
465 0,
466 } /* 0 terminated list. */
473}; 467};
474 468
475MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); 469MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
476 470
477
478static struct pci_driver i82875p_driver = { 471static struct pci_driver i82875p_driver = {
479 .name = BS_MOD_STR, 472 .name = EDAC_MOD_STR,
480 .probe = i82875p_init_one, 473 .probe = i82875p_init_one,
481 .remove = __devexit_p(i82875p_remove_one), 474 .remove = __devexit_p(i82875p_remove_one),
482 .id_table = i82875p_pci_tbl, 475 .id_table = i82875p_pci_tbl,
483}; 476};
484 477
485
486static int __init i82875p_init(void) 478static int __init i82875p_init(void)
487{ 479{
488 int pci_rc; 480 int pci_rc;
489 481
490 debugf3("MC: " __FILE__ ": %s()\n", __func__); 482 debugf3("%s()\n", __func__);
491 pci_rc = pci_register_driver(&i82875p_driver); 483 pci_rc = pci_register_driver(&i82875p_driver);
484
492 if (pci_rc < 0) 485 if (pci_rc < 0)
493 return pci_rc; 486 goto fail0;
487
494 if (mci_pdev == NULL) { 488 if (mci_pdev == NULL) {
495 i82875p_registered = 0; 489 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
496 mci_pdev = 490 PCI_DEVICE_ID_INTEL_82875_0, NULL);
497 pci_get_device(PCI_VENDOR_ID_INTEL, 491
498 PCI_DEVICE_ID_INTEL_82875_0, NULL);
499 if (!mci_pdev) { 492 if (!mci_pdev) {
500 debugf0("875p pci_get_device fail\n"); 493 debugf0("875p pci_get_device fail\n");
501 return -ENODEV; 494 pci_rc = -ENODEV;
495 goto fail1;
502 } 496 }
497
503 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); 498 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
499
504 if (pci_rc < 0) { 500 if (pci_rc < 0) {
505 debugf0("875p init fail\n"); 501 debugf0("875p init fail\n");
506 pci_dev_put(mci_pdev); 502 pci_rc = -ENODEV;
507 return -ENODEV; 503 goto fail1;
508 } 504 }
509 } 505 }
506
510 return 0; 507 return 0;
511}
512 508
509fail1:
510 pci_unregister_driver(&i82875p_driver);
511
512fail0:
513 if (mci_pdev != NULL)
514 pci_dev_put(mci_pdev);
515
516 return pci_rc;
517}
513 518
514static void __exit i82875p_exit(void) 519static void __exit i82875p_exit(void)
515{ 520{
516 debugf3("MC: " __FILE__ ": %s()\n", __func__); 521 debugf3("%s()\n", __func__);
517 522
518 pci_unregister_driver(&i82875p_driver); 523 pci_unregister_driver(&i82875p_driver);
524
519 if (!i82875p_registered) { 525 if (!i82875p_registered) {
520 i82875p_remove_one(mci_pdev); 526 i82875p_remove_one(mci_pdev);
521 pci_dev_put(mci_pdev); 527 pci_dev_put(mci_pdev);
522 } 528 }
523} 529}
524 530
525
526module_init(i82875p_init); 531module_init(i82875p_init);
527module_exit(i82875p_exit); 532module_exit(i82875p_exit);
528 533
529
530MODULE_LICENSE("GPL"); 534MODULE_LICENSE("GPL");
531MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); 535MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
532MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); 536MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index e90892831b90..2c29fafe67c7 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -18,14 +18,17 @@
18#include <linux/config.h> 18#include <linux/config.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/init.h> 20#include <linux/init.h>
21
22#include <linux/pci.h> 21#include <linux/pci.h>
23#include <linux/pci_ids.h> 22#include <linux/pci_ids.h>
24
25#include <linux/slab.h> 23#include <linux/slab.h>
26
27#include "edac_mc.h" 24#include "edac_mc.h"
28 25
26#define r82600_printk(level, fmt, arg...) \
27 edac_printk(level, "r82600", fmt, ##arg)
28
29#define r82600_mc_printk(mci, level, fmt, arg...) \
30 edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg)
31
29/* Radisys say "The 82600 integrates a main memory SDRAM controller that 32/* Radisys say "The 82600 integrates a main memory SDRAM controller that
30 * supports up to four banks of memory. The four banks can support a mix of 33 * supports up to four banks of memory. The four banks can support a mix of
31 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, 34 * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs,
@@ -126,10 +129,8 @@ struct r82600_error_info {
126 u32 eapr; 129 u32 eapr;
127}; 130};
128 131
129
130static unsigned int disable_hardware_scrub = 0; 132static unsigned int disable_hardware_scrub = 0;
131 133
132
133static void r82600_get_error_info (struct mem_ctl_info *mci, 134static void r82600_get_error_info (struct mem_ctl_info *mci,
134 struct r82600_error_info *info) 135 struct r82600_error_info *info)
135{ 136{
@@ -138,17 +139,16 @@ static void r82600_get_error_info (struct mem_ctl_info *mci,
138 if (info->eapr & BIT(0)) 139 if (info->eapr & BIT(0))
139 /* Clear error to allow next error to be reported [p.62] */ 140 /* Clear error to allow next error to be reported [p.62] */
140 pci_write_bits32(mci->pdev, R82600_EAP, 141 pci_write_bits32(mci->pdev, R82600_EAP,
141 ((u32) BIT(0) & (u32) BIT(1)), 142 ((u32) BIT(0) & (u32) BIT(1)),
142 ((u32) BIT(0) & (u32) BIT(1))); 143 ((u32) BIT(0) & (u32) BIT(1)));
143 144
144 if (info->eapr & BIT(1)) 145 if (info->eapr & BIT(1))
145 /* Clear error to allow next error to be reported [p.62] */ 146 /* Clear error to allow next error to be reported [p.62] */
146 pci_write_bits32(mci->pdev, R82600_EAP, 147 pci_write_bits32(mci->pdev, R82600_EAP,
147 ((u32) BIT(0) & (u32) BIT(1)), 148 ((u32) BIT(0) & (u32) BIT(1)),
148 ((u32) BIT(0) & (u32) BIT(1))); 149 ((u32) BIT(0) & (u32) BIT(1)));
149} 150}
150 151
151
152static int r82600_process_error_info (struct mem_ctl_info *mci, 152static int r82600_process_error_info (struct mem_ctl_info *mci,
153 struct r82600_error_info *info, int handle_errors) 153 struct r82600_error_info *info, int handle_errors)
154{ 154{
@@ -167,26 +167,25 @@ static int r82600_process_error_info (struct mem_ctl_info *mci,
167 * granularity (upper 19 bits only) */ 167 * granularity (upper 19 bits only) */
168 page = eapaddr >> PAGE_SHIFT; 168 page = eapaddr >> PAGE_SHIFT;
169 169
170 if (info->eapr & BIT(0)) { /* CE? */ 170 if (info->eapr & BIT(0)) { /* CE? */
171 error_found = 1; 171 error_found = 1;
172 172
173 if (handle_errors) 173 if (handle_errors)
174 edac_mc_handle_ce( 174 edac_mc_handle_ce(mci, page, 0, /* not avail */
175 mci, page, 0, /* not avail */ 175 syndrome,
176 syndrome, 176 edac_mc_find_csrow_by_page(mci, page),
177 edac_mc_find_csrow_by_page(mci, page), 177 0, /* channel */
178 0, /* channel */ 178 mci->ctl_name);
179 mci->ctl_name);
180 } 179 }
181 180
182 if (info->eapr & BIT(1)) { /* UE? */ 181 if (info->eapr & BIT(1)) { /* UE? */
183 error_found = 1; 182 error_found = 1;
184 183
185 if (handle_errors) 184 if (handle_errors)
186 /* 82600 doesn't give enough info */ 185 /* 82600 doesn't give enough info */
187 edac_mc_handle_ue(mci, page, 0, 186 edac_mc_handle_ue(mci, page, 0,
188 edac_mc_find_csrow_by_page(mci, page), 187 edac_mc_find_csrow_by_page(mci, page),
189 mci->ctl_name); 188 mci->ctl_name);
190 } 189 }
191 190
192 return error_found; 191 return error_found;
@@ -196,7 +195,7 @@ static void r82600_check(struct mem_ctl_info *mci)
196{ 195{
197 struct r82600_error_info info; 196 struct r82600_error_info info;
198 197
199 debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); 198 debugf1("MC%d: %s()\n", mci->mc_idx, __func__);
200 r82600_get_error_info(mci, &info); 199 r82600_get_error_info(mci, &info);
201 r82600_process_error_info(mci, &info, 1); 200 r82600_process_error_info(mci, &info, 1);
202} 201}
@@ -213,25 +212,18 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
213 u32 scrub_disabled; 212 u32 scrub_disabled;
214 u32 sdram_refresh_rate; 213 u32 sdram_refresh_rate;
215 u32 row_high_limit_last = 0; 214 u32 row_high_limit_last = 0;
216 u32 eap_init_bits; 215 struct r82600_error_info discard;
217
218 debugf0("MC: " __FILE__ ": %s()\n", __func__);
219
220 216
217 debugf0("%s()\n", __func__);
221 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); 218 pci_read_config_byte(pdev, R82600_DRAMC, &dramcr);
222 pci_read_config_dword(pdev, R82600_EAP, &eapr); 219 pci_read_config_dword(pdev, R82600_EAP, &eapr);
223
224 ecc_on = dramcr & BIT(5); 220 ecc_on = dramcr & BIT(5);
225 reg_sdram = dramcr & BIT(4); 221 reg_sdram = dramcr & BIT(4);
226 scrub_disabled = eapr & BIT(31); 222 scrub_disabled = eapr & BIT(31);
227 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); 223 sdram_refresh_rate = dramcr & (BIT(0) | BIT(1));
228 224 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
229 debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n", 225 sdram_refresh_rate);
230 __func__, sdram_refresh_rate); 226 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
231
232 debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__,
233 dramcr);
234
235 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); 227 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS);
236 228
237 if (mci == NULL) { 229 if (mci == NULL) {
@@ -239,29 +231,28 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
239 goto fail; 231 goto fail;
240 } 232 }
241 233
242 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); 234 debugf0("%s(): mci = %p\n", __func__, mci);
243
244 mci->pdev = pdev; 235 mci->pdev = pdev;
245 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; 236 mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
246
247 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 237 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
248 /* FIXME try to work out if the chip leads have been * 238 /* FIXME try to work out if the chip leads have been used for COM2
249 * used for COM2 instead on this board? [MA6?] MAYBE: */ 239 * instead on this board? [MA6?] MAYBE:
240 */
250 241
251 /* On the R82600, the pins for memory bits 72:65 - i.e. the * 242 /* On the R82600, the pins for memory bits 72:65 - i.e. the *
252 * EC bits are shared with the pins for COM2 (!), so if COM2 * 243 * EC bits are shared with the pins for COM2 (!), so if COM2 *
253 * is enabled, we assume COM2 is wired up, and thus no EDAC * 244 * is enabled, we assume COM2 is wired up, and thus no EDAC *
254 * is possible. */ 245 * is possible. */
255 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; 246 mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
247
256 if (ecc_on) { 248 if (ecc_on) {
257 if (scrub_disabled) 249 if (scrub_disabled)
258 debugf3("MC: " __FILE__ ": %s(): mci = %p - " 250 debugf3("%s(): mci = %p - Scrubbing disabled! EAP: "
259 "Scrubbing disabled! EAP: %#0x\n", __func__, 251 "%#0x\n", __func__, mci, eapr);
260 mci, eapr);
261 } else 252 } else
262 mci->edac_cap = EDAC_FLAG_NONE; 253 mci->edac_cap = EDAC_FLAG_NONE;
263 254
264 mci->mod_name = BS_MOD_STR; 255 mci->mod_name = EDAC_MOD_STR;
265 mci->mod_ver = "$Revision: 1.1.2.6 $"; 256 mci->mod_ver = "$Revision: 1.1.2.6 $";
266 mci->ctl_name = "R82600"; 257 mci->ctl_name = "R82600";
267 mci->edac_check = r82600_check; 258 mci->edac_check = r82600_check;
@@ -276,23 +267,21 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
276 /* find the DRAM Chip Select Base address and mask */ 267 /* find the DRAM Chip Select Base address and mask */
277 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar); 268 pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar);
278 269
279 debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n", 270 debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx,
280 mci->mc_idx, __func__, index, drbar); 271 __func__, index, drbar);
281 272
282 row_high_limit = ((u32) drbar << 24); 273 row_high_limit = ((u32) drbar << 24);
283/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ 274/* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */
284 275
285 debugf1("MC%d: " __FILE__ ": %s() Row=%d, " 276 debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = "
286 "Boundry Address=%#0x, Last = %#0x \n", 277 "%#0x \n", mci->mc_idx, __func__, index,
287 mci->mc_idx, __func__, index, row_high_limit, 278 row_high_limit, row_high_limit_last);
288 row_high_limit_last);
289 279
290 /* Empty row [p.57] */ 280 /* Empty row [p.57] */
291 if (row_high_limit == row_high_limit_last) 281 if (row_high_limit == row_high_limit_last)
292 continue; 282 continue;
293 283
294 row_base = row_high_limit_last; 284 row_base = row_high_limit_last;
295
296 csrow->first_page = row_base >> PAGE_SHIFT; 285 csrow->first_page = row_base >> PAGE_SHIFT;
297 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 286 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
298 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 287 csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
@@ -308,31 +297,22 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
308 row_high_limit_last = row_high_limit; 297 row_high_limit_last = row_high_limit;
309 } 298 }
310 299
311 /* clear counters */ 300 r82600_get_error_info(mci, &discard); /* clear counters */
312 /* FIXME should we? */
313 301
314 if (edac_mc_add_mc(mci)) { 302 if (edac_mc_add_mc(mci)) {
315 debugf3("MC: " __FILE__ 303 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
316 ": %s(): failed edac_mc_add_mc()\n", __func__);
317 goto fail; 304 goto fail;
318 } 305 }
319 306
320 /* get this far and it's successful */ 307 /* get this far and it's successful */
321 308
322 /* Clear error flags to allow next error to be reported [p.62] */
323 /* Test systems seem to always have the UE flag raised on boot */
324
325 eap_init_bits = BIT(0) & BIT(1);
326 if (disable_hardware_scrub) { 309 if (disable_hardware_scrub) {
327 eap_init_bits |= BIT(31); 310 debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n",
328 debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub " 311 __func__);
329 "(scrub on error)\n", __func__); 312 pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31));
330 } 313 }
331 314
332 pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits, 315 debugf3("%s(): success\n", __func__);
333 eap_init_bits);
334
335 debugf3("MC: " __FILE__ ": %s(): success\n", __func__);
336 return 0; 316 return 0;
337 317
338fail: 318fail:
@@ -344,62 +324,60 @@ fail:
344 324
345/* returns count (>= 0), or negative on error */ 325/* returns count (>= 0), or negative on error */
346static int __devinit r82600_init_one(struct pci_dev *pdev, 326static int __devinit r82600_init_one(struct pci_dev *pdev,
347 const struct pci_device_id *ent) 327 const struct pci_device_id *ent)
348{ 328{
349 debugf0("MC: " __FILE__ ": %s()\n", __func__); 329 debugf0("%s()\n", __func__);
350 330
351 /* don't need to call pci_device_enable() */ 331 /* don't need to call pci_device_enable() */
352 return r82600_probe1(pdev, ent->driver_data); 332 return r82600_probe1(pdev, ent->driver_data);
353} 333}
354 334
355
356static void __devexit r82600_remove_one(struct pci_dev *pdev) 335static void __devexit r82600_remove_one(struct pci_dev *pdev)
357{ 336{
358 struct mem_ctl_info *mci; 337 struct mem_ctl_info *mci;
359 338
360 debugf0(__FILE__ ": %s()\n", __func__); 339 debugf0("%s()\n", __func__);
361 340
362 if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) && 341 if ((mci = edac_mc_del_mc(pdev)) == NULL)
363 !edac_mc_del_mc(mci)) 342 return;
364 edac_mc_free(mci);
365}
366 343
344 edac_mc_free(mci);
345}
367 346
368static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { 347static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
369 {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)}, 348 {
370 {0,} /* 0 terminated list. */ 349 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
350 },
351 {
352 0,
353 } /* 0 terminated list. */
371}; 354};
372 355
373MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); 356MODULE_DEVICE_TABLE(pci, r82600_pci_tbl);
374 357
375
376static struct pci_driver r82600_driver = { 358static struct pci_driver r82600_driver = {
377 .name = BS_MOD_STR, 359 .name = EDAC_MOD_STR,
378 .probe = r82600_init_one, 360 .probe = r82600_init_one,
379 .remove = __devexit_p(r82600_remove_one), 361 .remove = __devexit_p(r82600_remove_one),
380 .id_table = r82600_pci_tbl, 362 .id_table = r82600_pci_tbl,
381}; 363};
382 364
383
384static int __init r82600_init(void) 365static int __init r82600_init(void)
385{ 366{
386 return pci_register_driver(&r82600_driver); 367 return pci_register_driver(&r82600_driver);
387} 368}
388 369
389
390static void __exit r82600_exit(void) 370static void __exit r82600_exit(void)
391{ 371{
392 pci_unregister_driver(&r82600_driver); 372 pci_unregister_driver(&r82600_driver);
393} 373}
394 374
395
396module_init(r82600_init); 375module_init(r82600_init);
397module_exit(r82600_exit); 376module_exit(r82600_exit);
398 377
399
400MODULE_LICENSE("GPL"); 378MODULE_LICENSE("GPL");
401MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " 379MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. "
402 "on behalf of EADS Astrium"); 380 "on behalf of EADS Astrium");
403MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); 381MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
404 382
405module_param(disable_hardware_scrub, bool, 0644); 383module_param(disable_hardware_scrub, bool, 0644);
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index 343379f23a53..9b7e4d52ffd4 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -568,20 +568,20 @@ systab_read(struct subsystem *entry, char *buf)
568 if (!entry || !buf) 568 if (!entry || !buf)
569 return -EINVAL; 569 return -EINVAL;
570 570
571 if (efi.mps) 571 if (efi.mps != EFI_INVALID_TABLE_ADDR)
572 str += sprintf(str, "MPS=0x%lx\n", __pa(efi.mps)); 572 str += sprintf(str, "MPS=0x%lx\n", efi.mps);
573 if (efi.acpi20) 573 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
574 str += sprintf(str, "ACPI20=0x%lx\n", __pa(efi.acpi20)); 574 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
575 if (efi.acpi) 575 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
576 str += sprintf(str, "ACPI=0x%lx\n", __pa(efi.acpi)); 576 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
577 if (efi.smbios) 577 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
578 str += sprintf(str, "SMBIOS=0x%lx\n", __pa(efi.smbios)); 578 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
579 if (efi.hcdp) 579 if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
580 str += sprintf(str, "HCDP=0x%lx\n", __pa(efi.hcdp)); 580 str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
581 if (efi.boot_info) 581 if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
582 str += sprintf(str, "BOOTINFO=0x%lx\n", __pa(efi.boot_info)); 582 str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info);
583 if (efi.uga) 583 if (efi.uga != EFI_INVALID_TABLE_ADDR)
584 str += sprintf(str, "UGA=0x%lx\n", __pa(efi.uga)); 584 str += sprintf(str, "UGA=0x%lx\n", efi.uga);
585 585
586 return str - buf; 586 return str - buf;
587} 587}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index ae1fb45dbb40..c37baf9448bc 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -89,19 +89,20 @@ efi_setup_pcdp_console(char *cmdline)
89 struct pcdp_uart *uart; 89 struct pcdp_uart *uart;
90 struct pcdp_device *dev, *end; 90 struct pcdp_device *dev, *end;
91 int i, serial = 0; 91 int i, serial = 0;
92 int rc = -ENODEV;
92 93
93 pcdp = efi.hcdp; 94 if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
94 if (!pcdp)
95 return -ENODEV; 95 return -ENODEV;
96 96
97 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp)); 97 pcdp = ioremap(efi.hcdp, 4096);
98 printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
98 99
99 if (strstr(cmdline, "console=hcdp")) { 100 if (strstr(cmdline, "console=hcdp")) {
100 if (pcdp->rev < 3) 101 if (pcdp->rev < 3)
101 serial = 1; 102 serial = 1;
102 } else if (strstr(cmdline, "console=")) { 103 } else if (strstr(cmdline, "console=")) {
103 printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); 104 printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n");
104 return -ENODEV; 105 goto out;
105 } 106 }
106 107
107 if (pcdp->rev < 3 && efi_uart_console_only()) 108 if (pcdp->rev < 3 && efi_uart_console_only())
@@ -110,7 +111,8 @@ efi_setup_pcdp_console(char *cmdline)
110 for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { 111 for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) {
111 if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { 112 if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) {
112 if (uart->type == PCDP_CONSOLE_UART) { 113 if (uart->type == PCDP_CONSOLE_UART) {
113 return setup_serial_console(uart); 114 rc = setup_serial_console(uart);
115 goto out;
114 } 116 }
115 } 117 }
116 } 118 }
@@ -121,10 +123,13 @@ efi_setup_pcdp_console(char *cmdline)
121 dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { 123 dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
122 if (dev->flags & PCDP_PRIMARY_CONSOLE) { 124 if (dev->flags & PCDP_PRIMARY_CONSOLE) {
123 if (dev->type == PCDP_CONSOLE_VGA) { 125 if (dev->type == PCDP_CONSOLE_VGA) {
124 return setup_vga_console(dev); 126 rc = setup_vga_console(dev);
127 goto out;
125 } 128 }
126 } 129 }
127 } 130 }
128 131
129 return -ENODEV; 132out:
133 iounmap(pcdp);
134 return rc;
130} 135}
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c
index 734b121a0554..491e6032bdec 100644
--- a/drivers/ieee1394/highlevel.c
+++ b/drivers/ieee1394/highlevel.c
@@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl,
306 u64 align_mask = ~(alignment - 1); 306 u64 align_mask = ~(alignment - 1);
307 307
308 if ((alignment & 3) || (alignment > 0x800000000000ULL) || 308 if ((alignment & 3) || (alignment > 0x800000000000ULL) ||
309 ((hweight32(alignment >> 32) + 309 (hweight64(alignment) != 1)) {
310 hweight32(alignment & 0xffffffff) != 1))) {
311 HPSB_ERR("%s called with invalid alignment: 0x%048llx", 310 HPSB_ERR("%s called with invalid alignment: 0x%048llx",
312 __FUNCTION__, (unsigned long long)alignment); 311 __FUNCTION__, (unsigned long long)alignment);
313 return retval; 312 return retval;
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index a81f987978c8..46d1fec2cfd8 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -23,7 +23,7 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/spi/ads7846.h> 25#include <linux/spi/ads7846.h>
26#include <linux/interrupt.h> 26#include <asm/irq.h>
27 27
28#ifdef CONFIG_ARM 28#ifdef CONFIG_ARM
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile
index 03d8ccd51955..988142c30a6d 100644
--- a/drivers/isdn/Makefile
+++ b/drivers/isdn/Makefile
@@ -13,3 +13,4 @@ obj-$(CONFIG_ISDN_DRV_SC) += sc/
13obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ 13obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/
14obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ 14obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/
15obj-$(CONFIG_HYSDN) += hysdn/ 15obj-$(CONFIG_HYSDN) += hysdn/
16obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
new file mode 100644
index 000000000000..53c4fb62ed85
--- /dev/null
+++ b/drivers/isdn/gigaset/Kconfig
@@ -0,0 +1,42 @@
1menu "Siemens Gigaset"
2 depends on ISDN_I4L
3
4config ISDN_DRV_GIGASET
5 tristate "Siemens Gigaset support (isdn)"
6 depends on ISDN_I4L && m
7# depends on ISDN_I4L && MODULES
8 help
9 Say m here if you have a Gigaset or Sinus isdn device.
10
11if ISDN_DRV_GIGASET!=n
12
13config GIGASET_BASE
14 tristate "Gigaset base station support"
15 depends on ISDN_DRV_GIGASET && USB
16 help
17 Say m here if you need to communicate with the base
18 directly via USB.
19
20config GIGASET_M105
21 tristate "Gigaset M105 support"
22 depends on ISDN_DRV_GIGASET && USB
23 help
24 Say m here if you need the driver for the Gigaset M105 device.
25
26config GIGASET_DEBUG
27 bool "Gigaset debugging"
28 help
29 This enables debugging code in the Gigaset drivers.
30 If in doubt, say yes.
31
32config GIGASET_UNDOCREQ
33 bool "Support for undocumented USB requests"
34 help
35 This enables support for USB requests we only know from
36 reverse engineering (currently M105 only). If you need
37 features like configuration mode of M105, say yes. If you
38 care about your device, say no.
39
40endif
41
42endmenu
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
new file mode 100644
index 000000000000..9b9acf1a21ad
--- /dev/null
+++ b/drivers/isdn/gigaset/Makefile
@@ -0,0 +1,6 @@
1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o
2usb_gigaset-y := usb-gigaset.o asyncdata.o
3bas_gigaset-y := bas-gigaset.o isocdata.o
4
5obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o
6obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
new file mode 100644
index 000000000000..171f8b703d61
--- /dev/null
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -0,0 +1,597 @@
1/*
2 * Common data handling layer for ser_gigaset and usb_gigaset
3 *
4 * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Stefan Eilers <Eilers.Stefan@epost.de>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: asyncdata.c,v 1.2.2.7 2005/11/13 23:05:18 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/crc-ccitt.h>
22
23//#define GIG_M10x_STUFF_VOICE_DATA
24
25/* check if byte must be stuffed/escaped
26 * I'm not sure which data should be encoded.
27 * Therefore I will go the hard way and decode every value
28 * less than 0x20, the flag sequence and the control escape char.
29 */
30static inline int muststuff(unsigned char c)
31{
32 if (c < PPP_TRANS) return 1;
33 if (c == PPP_FLAG) return 1;
34 if (c == PPP_ESCAPE) return 1;
35 /* other possible candidates: */
36 /* 0x91: XON with parity set */
37 /* 0x93: XOFF with parity set */
38 return 0;
39}
40
41/* == data input =========================================================== */
42
43/* process a block of received bytes in command mode (modem response)
44 * Return value:
45 * number of processed bytes
46 */
47static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes,
48 struct inbuf_t *inbuf)
49{
50 struct cardstate *cs = inbuf->cs;
51 unsigned cbytes = cs->cbytes;
52 int inputstate = inbuf->inputstate;
53 int startbytes = numbytes;
54
55 for (;;) {
56 cs->respdata[cbytes] = c;
57 if (c == 10 || c == 13) {
58 dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
59 __func__, cbytes);
60 cs->cbytes = cbytes;
61 gigaset_handle_modem_response(cs); /* can change cs->dle */
62 cbytes = 0;
63
64 if (cs->dle &&
65 !(inputstate & INS_DLE_command)) {
66 inputstate &= ~INS_command;
67 break;
68 }
69 } else {
70 /* advance in line buffer, checking for overflow */
71 if (cbytes < MAX_RESP_SIZE - 1)
72 cbytes++;
73 else
74 warn("response too large");
75 }
76
77 if (!numbytes)
78 break;
79 c = *src++;
80 --numbytes;
81 if (c == DLE_FLAG &&
82 (cs->dle || inputstate & INS_DLE_command)) {
83 inputstate |= INS_DLE_char;
84 break;
85 }
86 }
87
88 cs->cbytes = cbytes;
89 inbuf->inputstate = inputstate;
90
91 return startbytes - numbytes;
92}
93
94/* process a block of received bytes in lock mode (tty i/f)
95 * Return value:
96 * number of processed bytes
97 */
98static inline int lock_loop(unsigned char *src, int numbytes,
99 struct inbuf_t *inbuf)
100{
101 struct cardstate *cs = inbuf->cs;
102
103 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src, 0);
104 gigaset_if_receive(cs, src, numbytes);
105
106 return numbytes;
107}
108
109/* process a block of received bytes in HDLC data mode
110 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
111 * When a frame is complete, check the FCS and pass valid frames to the LL.
112 * If DLE is encountered, return immediately to let the caller handle it.
113 * Return value:
114 * number of processed bytes
115 * numbytes (all bytes processed) on error --FIXME
116 */
117static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes,
118 struct inbuf_t *inbuf)
119{
120 struct cardstate *cs = inbuf->cs;
121 struct bc_state *bcs = inbuf->bcs;
122 int inputstate;
123 __u16 fcs;
124 struct sk_buff *skb;
125 unsigned char error;
126 struct sk_buff *compskb;
127 int startbytes = numbytes;
128 int l;
129
130 IFNULLRETVAL(bcs, numbytes);
131 inputstate = bcs->inputstate;
132 fcs = bcs->fcs;
133 skb = bcs->skb;
134 IFNULLRETVAL(skb, numbytes);
135
136 if (unlikely(inputstate & INS_byte_stuff)) {
137 inputstate &= ~INS_byte_stuff;
138 goto byte_stuff;
139 }
140 for (;;) {
141 if (unlikely(c == PPP_ESCAPE)) {
142 if (unlikely(!numbytes)) {
143 inputstate |= INS_byte_stuff;
144 break;
145 }
146 c = *src++;
147 --numbytes;
148 if (unlikely(c == DLE_FLAG &&
149 (cs->dle ||
150 inbuf->inputstate & INS_DLE_command))) {
151 inbuf->inputstate |= INS_DLE_char;
152 inputstate |= INS_byte_stuff;
153 break;
154 }
155byte_stuff:
156 c ^= PPP_TRANS;
157#ifdef CONFIG_GIGASET_DEBUG
158 if (unlikely(!muststuff(c)))
159 dbg(DEBUG_HDLC,
160 "byte stuffed: 0x%02x", c);
161#endif
162 } else if (unlikely(c == PPP_FLAG)) {
163 if (unlikely(inputstate & INS_skip_frame)) {
164 if (!(inputstate & INS_have_data)) { /* 7E 7E */
165 //dbg(DEBUG_HDLC, "(7e)7e------------------------");
166#ifdef CONFIG_GIGASET_DEBUG
167 ++bcs->emptycount;
168#endif
169 } else
170 dbg(DEBUG_HDLC,
171 "7e----------------------------");
172
173 /* end of frame */
174 error = 1;
175 gigaset_rcv_error(NULL, cs, bcs);
176 } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
177 //dbg(DEBUG_HDLC, "(7e)7e------------------------");
178#ifdef CONFIG_GIGASET_DEBUG
179 ++bcs->emptycount;
180#endif
181 break;
182 } else {
183 dbg(DEBUG_HDLC,
184 "7e----------------------------");
185
186 /* end of frame */
187 error = 0;
188
189 if (unlikely(fcs != PPP_GOODFCS)) {
190 err("Packet checksum at %lu failed, "
191 "packet is corrupted (%u bytes)!",
192 bcs->rcvbytes, skb->len);
193 compskb = NULL;
194 gigaset_rcv_error(compskb, cs, bcs);
195 error = 1;
196 } else {
197 if (likely((l = skb->len) > 2)) {
198 skb->tail -= 2;
199 skb->len -= 2;
200 } else {
201 dev_kfree_skb(skb);
202 skb = NULL;
203 inputstate |= INS_skip_frame;
204 if (l == 1) {
205 err("invalid packet size (1)!");
206 error = 1;
207 gigaset_rcv_error(NULL, cs, bcs);
208 }
209 }
210 if (likely(!(error ||
211 (inputstate &
212 INS_skip_frame)))) {
213 gigaset_rcv_skb(skb, cs, bcs);
214 }
215 }
216 }
217
218 if (unlikely(error))
219 if (skb)
220 dev_kfree_skb(skb);
221
222 fcs = PPP_INITFCS;
223 inputstate &= ~(INS_have_data | INS_skip_frame);
224 if (unlikely(bcs->ignore)) {
225 inputstate |= INS_skip_frame;
226 skb = NULL;
227 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
228 skb_reserve(skb, HW_HDR_LEN);
229 } else {
230 warn("could not allocate new skb");
231 inputstate |= INS_skip_frame;
232 }
233
234 break;
235#ifdef CONFIG_GIGASET_DEBUG
236 } else if (unlikely(muststuff(c))) {
237 /* Should not happen. Possible after ZDLE=1<CR><LF>. */
238 dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
239#endif
240 }
241
242 /* add character */
243
244#ifdef CONFIG_GIGASET_DEBUG
245 if (unlikely(!(inputstate & INS_have_data))) {
246 dbg(DEBUG_HDLC,
247 "7e (%d x) ================", bcs->emptycount);
248 bcs->emptycount = 0;
249 }
250#endif
251
252 inputstate |= INS_have_data;
253
254 if (likely(!(inputstate & INS_skip_frame))) {
255 if (unlikely(skb->len == SBUFSIZE)) {
256 warn("received packet too long");
257 dev_kfree_skb_any(skb);
258 skb = NULL;
259 inputstate |= INS_skip_frame;
260 break;
261 }
262 *gigaset_skb_put_quick(skb, 1) = c;
263 /* *__skb_put (skb, 1) = c; */
264 fcs = crc_ccitt_byte(fcs, c);
265 }
266
267 if (unlikely(!numbytes))
268 break;
269 c = *src++;
270 --numbytes;
271 if (unlikely(c == DLE_FLAG &&
272 (cs->dle ||
273 inbuf->inputstate & INS_DLE_command))) {
274 inbuf->inputstate |= INS_DLE_char;
275 break;
276 }
277 }
278 bcs->inputstate = inputstate;
279 bcs->fcs = fcs;
280 bcs->skb = skb;
281 return startbytes - numbytes;
282}
283
284/* process a block of received bytes in transparent data mode
285 * Invert bytes, undoing byte stuffing and watching for DLE escapes.
286 * If DLE is encountered, return immediately to let the caller handle it.
287 * Return value:
288 * number of processed bytes
289 * numbytes (all bytes processed) on error --FIXME
290 */
291static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
292 struct inbuf_t *inbuf)
293{
294 struct cardstate *cs = inbuf->cs;
295 struct bc_state *bcs = inbuf->bcs;
296 int inputstate;
297 struct sk_buff *skb;
298 int startbytes = numbytes;
299
300 IFNULLRETVAL(bcs, numbytes);
301 inputstate = bcs->inputstate;
302 skb = bcs->skb;
303 IFNULLRETVAL(skb, numbytes);
304
305 for (;;) {
306 /* add character */
307 inputstate |= INS_have_data;
308
309 if (likely(!(inputstate & INS_skip_frame))) {
310 if (unlikely(skb->len == SBUFSIZE)) {
311 //FIXME just pass skb up and allocate a new one
312 warn("received packet too long");
313 dev_kfree_skb_any(skb);
314 skb = NULL;
315 inputstate |= INS_skip_frame;
316 break;
317 }
318 *gigaset_skb_put_quick(skb, 1) = gigaset_invtab[c];
319 }
320
321 if (unlikely(!numbytes))
322 break;
323 c = *src++;
324 --numbytes;
325 if (unlikely(c == DLE_FLAG &&
326 (cs->dle ||
327 inbuf->inputstate & INS_DLE_command))) {
328 inbuf->inputstate |= INS_DLE_char;
329 break;
330 }
331 }
332
333 /* pass data up */
334 if (likely(inputstate & INS_have_data)) {
335 if (likely(!(inputstate & INS_skip_frame))) {
336 gigaset_rcv_skb(skb, cs, bcs);
337 }
338 inputstate &= ~(INS_have_data | INS_skip_frame);
339 if (unlikely(bcs->ignore)) {
340 inputstate |= INS_skip_frame;
341 skb = NULL;
342 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN))
343 != NULL)) {
344 skb_reserve(skb, HW_HDR_LEN);
345 } else {
346 warn("could not allocate new skb");
347 inputstate |= INS_skip_frame;
348 }
349 }
350
351 bcs->inputstate = inputstate;
352 bcs->skb = skb;
353 return startbytes - numbytes;
354}
355
356/* process a block of data received from the device
357 */
358void gigaset_m10x_input(struct inbuf_t *inbuf)
359{
360 struct cardstate *cs;
361 unsigned tail, head, numbytes;
362 unsigned char *src, c;
363 int procbytes;
364
365 head = atomic_read(&inbuf->head);
366 tail = atomic_read(&inbuf->tail);
367 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
368
369 if (head != tail) {
370 cs = inbuf->cs;
371 src = inbuf->data + head;
372 numbytes = (head > tail ? RBUFSIZE : tail) - head;
373 dbg(DEBUG_INTR, "processing %u bytes", numbytes);
374
375 while (numbytes) {
376 if (atomic_read(&cs->mstate) == MS_LOCKED) {
377 procbytes = lock_loop(src, numbytes, inbuf);
378 src += procbytes;
379 numbytes -= procbytes;
380 } else {
381 c = *src++;
382 --numbytes;
383 if (c == DLE_FLAG && (cs->dle ||
384 inbuf->inputstate & INS_DLE_command)) {
385 if (!(inbuf->inputstate & INS_DLE_char)) {
386 inbuf->inputstate |= INS_DLE_char;
387 goto nextbyte;
388 }
389 /* <DLE> <DLE> => <DLE> in data stream */
390 inbuf->inputstate &= ~INS_DLE_char;
391 }
392
393 if (!(inbuf->inputstate & INS_DLE_char)) {
394
395 /* FIXME Einfach je nach Modus Funktionszeiger in cs setzen [hier+hdlc_loop]? */
396 /* FIXME Spart folgendes "if" und ermoeglicht andere Protokolle */
397 if (inbuf->inputstate & INS_command)
398 procbytes = cmd_loop(c, src, numbytes, inbuf);
399 else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
400 procbytes = hdlc_loop(c, src, numbytes, inbuf);
401 else
402 procbytes = iraw_loop(c, src, numbytes, inbuf);
403
404 src += procbytes;
405 numbytes -= procbytes;
406 } else { /* DLE-char */
407 inbuf->inputstate &= ~INS_DLE_char;
408 switch (c) {
409 case 'X': /*begin of command*/
410#ifdef CONFIG_GIGASET_DEBUG
411 if (inbuf->inputstate & INS_command)
412 err("received <DLE> 'X' in command mode");
413#endif
414 inbuf->inputstate |=
415 INS_command | INS_DLE_command;
416 break;
417 case '.': /*end of command*/
418#ifdef CONFIG_GIGASET_DEBUG
419 if (!(inbuf->inputstate & INS_command))
420 err("received <DLE> '.' in hdlc mode");
421#endif
422 inbuf->inputstate &= cs->dle ?
423 ~(INS_DLE_command|INS_command)
424 : ~INS_DLE_command;
425 break;
426 //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
427 default:
428 err("received 0x10 0x%02x!", (int) c);
429 /* FIXME: reset driver?? */
430 }
431 }
432 }
433nextbyte:
434 if (!numbytes) {
435 /* end of buffer, check for wrap */
436 if (head > tail) {
437 head = 0;
438 src = inbuf->data;
439 numbytes = tail;
440 } else {
441 head = tail;
442 break;
443 }
444 }
445 }
446
447 dbg(DEBUG_INTR, "setting head to %u", head);
448 atomic_set(&inbuf->head, head);
449 }
450}
451
452
453/* == data output ========================================================== */
454
455/* Encoding of a PPP packet into an octet stuffed HDLC frame
456 * with FCS, opening and closing flags.
457 * parameters:
458 * skb skb containing original packet (freed upon return)
459 * head number of headroom bytes to allocate in result skb
460 * tail number of tailroom bytes to allocate in result skb
461 * Return value:
462 * pointer to newly allocated skb containing the result frame
463 */
464static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
465{
466 struct sk_buff *hdlc_skb;
467 __u16 fcs;
468 unsigned char c;
469 unsigned char *cp;
470 int len;
471 unsigned int stuf_cnt;
472
473 stuf_cnt = 0;
474 fcs = PPP_INITFCS;
475 cp = skb->data;
476 len = skb->len;
477 while (len--) {
478 if (muststuff(*cp))
479 stuf_cnt++;
480 fcs = crc_ccitt_byte(fcs, *cp++);
481 }
482 fcs ^= 0xffff; /* complement */
483
484 /* size of new buffer: original size + number of stuffing bytes
485 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
486 */
487 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head);
488 if (!hdlc_skb) {
489 err("unable to allocate memory for HDLC encoding!");
490 dev_kfree_skb(skb);
491 return NULL;
492 }
493 skb_reserve(hdlc_skb, head);
494
495 /* Copy acknowledge request into new skb */
496 memcpy(hdlc_skb->head, skb->head, 2);
497
498 /* Add flag sequence in front of everything.. */
499 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
500
501 /* Perform byte stuffing while copying data. */
502 while (skb->len--) {
503 if (muststuff(*skb->data)) {
504 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
505 *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS;
506 } else
507 *(skb_put(hdlc_skb, 1)) = *skb->data++;
508 }
509
510 /* Finally add FCS (byte stuffed) and flag sequence */
511 c = (fcs & 0x00ff); /* least significant byte first */
512 if (muststuff(c)) {
513 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
514 c ^= PPP_TRANS;
515 }
516 *(skb_put(hdlc_skb, 1)) = c;
517
518 c = ((fcs >> 8) & 0x00ff);
519 if (muststuff(c)) {
520 *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE;
521 c ^= PPP_TRANS;
522 }
523 *(skb_put(hdlc_skb, 1)) = c;
524
525 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
526
527 dev_kfree_skb(skb);
528 return hdlc_skb;
529}
530
531/* Encoding of a raw packet into an octet stuffed bit inverted frame
532 * parameters:
533 * skb skb containing original packet (freed upon return)
534 * head number of headroom bytes to allocate in result skb
535 * tail number of tailroom bytes to allocate in result skb
536 * Return value:
537 * pointer to newly allocated skb containing the result frame
538 */
539static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
540{
541 struct sk_buff *iraw_skb;
542 unsigned char c;
543 unsigned char *cp;
544 int len;
545
546 /* worst case: every byte must be stuffed */
547 iraw_skb = dev_alloc_skb(2*skb->len + tail + head);
548 if (!iraw_skb) {
549 err("unable to allocate memory for HDLC encoding!");
550 dev_kfree_skb(skb);
551 return NULL;
552 }
553 skb_reserve(iraw_skb, head);
554
555 cp = skb->data;
556 len = skb->len;
557 while (len--) {
558 c = gigaset_invtab[*cp++];
559 if (c == DLE_FLAG)
560 *(skb_put(iraw_skb, 1)) = c;
561 *(skb_put(iraw_skb, 1)) = c;
562 }
563 dev_kfree_skb(skb);
564 return iraw_skb;
565}
566
567/* gigaset_send_skb
568 * called by common.c to queue an skb for sending
569 * and start transmission if necessary
570 * parameters:
571 * B Channel control structure
572 * skb
573 * Return value:
574 * number of bytes accepted for sending
575 * (skb->len if ok, 0 if out of buffer space)
576 * or error code (< 0, eg. -EINVAL)
577 */
578int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
579{
580 unsigned len;
581
582 IFNULLRETVAL(bcs, -EFAULT);
583 IFNULLRETVAL(skb, -EFAULT);
584 len = skb->len;
585
586 if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
587 skb = HDLC_Encode(skb, HW_HDR_LEN, 0);
588 else
589 skb = iraw_encode(skb, HW_HDR_LEN, 0);
590 if (!skb)
591 return -ENOMEM;
592
593 skb_queue_tail(&bcs->squeue, skb);
594 tasklet_schedule(&bcs->cs->write_tasklet);
595
596 return len; /* ok so far */
597}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
new file mode 100644
index 000000000000..31f0f07832bc
--- /dev/null
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -0,0 +1,2365 @@
1/*
2 * USB driver for Gigaset 307x base via direct USB connection.
3 *
4 * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>,
5 * Tilman Schmidt <tilman@imap.cc>,
6 * Stefan Eilers <Eilers.Stefan@epost.de>.
7 *
8 * Based on usb-gigaset.c.
9 *
10 * =====================================================================
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of
14 * the License, or (at your option) any later version.
15 * =====================================================================
16 * ToDo: ...
17 * =====================================================================
18 * Version: $Id: bas-gigaset.c,v 1.52.4.19 2006/02/04 18:28:16 hjlipp Exp $
19 * =====================================================================
20 */
21
22#include "gigaset.h"
23
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/timer.h>
28#include <linux/usb.h>
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31
32/* Version Information */
33#define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
34#define DRIVER_DESC "USB Driver for Gigaset 307x"
35
36
37/* Module parameters */
38
39static int startmode = SM_ISDN;
40static int cidmode = 1;
41
42module_param(startmode, int, S_IRUGO);
43module_param(cidmode, int, S_IRUGO);
44MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
45MODULE_PARM_DESC(cidmode, "Call-ID mode");
46
47#define GIGASET_MINORS 1
48#define GIGASET_MINOR 16
49#define GIGASET_MODULENAME "bas_gigaset"
50#define GIGASET_DEVFSNAME "gig/bas/"
51#define GIGASET_DEVNAME "ttyGB"
52
53#define IF_WRITEBUF 256 //FIXME
54
55/* Values for the Gigaset 307x */
56#define USB_GIGA_VENDOR_ID 0x0681
57#define USB_GIGA_PRODUCT_ID 0x0001
58#define USB_4175_PRODUCT_ID 0x0002
59#define USB_SX303_PRODUCT_ID 0x0021
60#define USB_SX353_PRODUCT_ID 0x0022
61
62/* table of devices that work with this driver */
63static struct usb_device_id gigaset_table [] = {
64 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_GIGA_PRODUCT_ID) },
65 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_4175_PRODUCT_ID) },
66 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
67 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) },
68 { } /* Terminating entry */
69};
70
71MODULE_DEVICE_TABLE(usb, gigaset_table);
72
73/* Get a minor range for your devices from the usb maintainer */
74#define USB_SKEL_MINOR_BASE 200
75
76/*======================= local function prototypes =============================*/
77
78/* This function is called if a new device is connected to the USB port. It
79 * checks whether this new device belongs to this driver.
80 */
81static int gigaset_probe(struct usb_interface *interface,
82 const struct usb_device_id *id);
83
84/* Function will be called if the device is unplugged */
85static void gigaset_disconnect(struct usb_interface *interface);
86
87
88/*==============================================================================*/
89
90struct bas_cardstate {
91 struct usb_device *udev; /* USB device pointer */
92 struct usb_interface *interface; /* interface for this device */
93 unsigned char minor; /* starting minor number */
94
95 struct urb *urb_ctrl; /* control pipe default URB */
96 struct usb_ctrlrequest dr_ctrl;
97 struct timer_list timer_ctrl; /* control request timeout */
98
99 struct timer_list timer_atrdy; /* AT command ready timeout */
100 struct urb *urb_cmd_out; /* for sending AT commands */
101 struct usb_ctrlrequest dr_cmd_out;
102 int retry_cmd_out;
103
104 struct urb *urb_cmd_in; /* for receiving AT replies */
105 struct usb_ctrlrequest dr_cmd_in;
106 struct timer_list timer_cmd_in; /* receive request timeout */
107 unsigned char *rcvbuf; /* AT reply receive buffer */
108
109 struct urb *urb_int_in; /* URB for interrupt pipe */
110 unsigned char int_in_buf[3];
111
112 spinlock_t lock; /* locks all following */
113 atomic_t basstate; /* bitmap (BS_*) */
114 int pending; /* uncompleted base request */
115 int rcvbuf_size; /* size of AT receive buffer */
116 /* 0: no receive in progress */
117 int retry_cmd_in; /* receive req retry count */
118};
119
120/* status of direct USB connection to 307x base (bits in basstate) */
121#define BS_ATOPEN 0x001
122#define BS_B1OPEN 0x002
123#define BS_B2OPEN 0x004
124#define BS_ATREADY 0x008
125#define BS_INIT 0x010
126#define BS_ATTIMER 0x020
127
128
129static struct gigaset_driver *driver = NULL;
130static struct cardstate *cardstate = NULL;
131
132/* usb specific object needed to register this driver with the usb subsystem */
133static struct usb_driver gigaset_usb_driver = {
134 .name = GIGASET_MODULENAME,
135 .probe = gigaset_probe,
136 .disconnect = gigaset_disconnect,
137 .id_table = gigaset_table,
138};
139
140/* get message text for USB status code
141 */
142static char *get_usb_statmsg(int status)
143{
144 static char unkmsg[28];
145
146 switch (status) {
147 case 0:
148 return "success";
149 case -ENOENT:
150 return "canceled";
151 case -ECONNRESET:
152 return "canceled (async)";
153 case -EINPROGRESS:
154 return "pending";
155 case -EPROTO:
156 return "bit stuffing or unknown USB error";
157 case -EILSEQ:
158 return "Illegal byte sequence (CRC mismatch)";
159 case -EPIPE:
160 return "babble detect or endpoint stalled";
161 case -ENOSR:
162 return "buffer error";
163 case -ETIMEDOUT:
164 return "timed out";
165 case -ENODEV:
166 return "device not present";
167 case -EREMOTEIO:
168 return "short packet detected";
169 case -EXDEV:
170 return "partial isochronous transfer";
171 case -EINVAL:
172 return "invalid argument";
173 case -ENXIO:
174 return "URB already queued";
175 case -EAGAIN:
176 return "isochronous start frame too early or too much scheduled";
177 case -EFBIG:
178 return "too many isochronous frames requested";
179 case -EMSGSIZE:
180 return "endpoint message size zero";
181 case -ESHUTDOWN:
182 return "endpoint shutdown";
183 case -EBUSY:
184 return "another request pending";
185 default:
186 snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", status);
187 return unkmsg;
188 }
189}
190
191/* usb_pipetype_str
192 * retrieve string representation of USB pipe type
193 */
194static inline char *usb_pipetype_str(int pipe)
195{
196 if (usb_pipeisoc(pipe))
197 return "Isoc";
198 if (usb_pipeint(pipe))
199 return "Int";
200 if (usb_pipecontrol(pipe))
201 return "Ctrl";
202 if (usb_pipebulk(pipe))
203 return "Bulk";
204 return "?";
205}
206
207/* dump_urb
208 * write content of URB to syslog for debugging
209 */
210static inline void dump_urb(enum debuglevel level, const char *tag,
211 struct urb *urb)
212{
213#ifdef CONFIG_GIGASET_DEBUG
214 int i;
215 IFNULLRET(tag);
216 dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb);
217 if (urb) {
218 dbg(level,
219 " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
220 "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,",
221 (unsigned long) urb->dev,
222 usb_pipetype_str(urb->pipe),
223 usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
224 usb_pipein(urb->pipe) ? "in" : "out",
225 urb->status, (unsigned long) urb->hcpriv,
226 urb->transfer_flags);
227 dbg(level,
228 " transfer_buffer=0x%08lx[%d], actual_length=%d, "
229 "bandwidth=%d, setup_packet=0x%08lx,",
230 (unsigned long) urb->transfer_buffer,
231 urb->transfer_buffer_length, urb->actual_length,
232 urb->bandwidth, (unsigned long) urb->setup_packet);
233 dbg(level,
234 " start_frame=%d, number_of_packets=%d, interval=%d, "
235 "error_count=%d,",
236 urb->start_frame, urb->number_of_packets, urb->interval,
237 urb->error_count);
238 dbg(level,
239 " context=0x%08lx, complete=0x%08lx, iso_frame_desc[]={",
240 (unsigned long) urb->context,
241 (unsigned long) urb->complete);
242 for (i = 0; i < urb->number_of_packets; i++) {
243 struct usb_iso_packet_descriptor *pifd = &urb->iso_frame_desc[i];
244 dbg(level,
245 " {offset=%u, length=%u, actual_length=%u, "
246 "status=%u}",
247 pifd->offset, pifd->length, pifd->actual_length,
248 pifd->status);
249 }
250 }
251 dbg(level, "}}");
252#endif
253}
254
255/* read/set modem control bits etc. (m10x only) */
256static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
257 unsigned new_state)
258{
259 return -EINVAL;
260}
261
262static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
263{
264 return -EINVAL;
265}
266
267static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
268{
269 return -EINVAL;
270}
271
272/* error_hangup
273 * hang up any existing connection because of an unrecoverable error
274 * This function may be called from any context and takes care of scheduling
275 * the necessary actions for execution outside of interrupt context.
276 * argument:
277 * B channel control structure
278 */
279static inline void error_hangup(struct bc_state *bcs)
280{
281 struct cardstate *cs = bcs->cs;
282
283 dbg(DEBUG_ANY,
284 "%s: scheduling HUP for channel %d", __func__, bcs->channel);
285
286 if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
287 //FIXME what should we do?
288 return;
289 }
290
291 gigaset_schedule_event(cs);
292}
293
294/* error_reset
295 * reset Gigaset device because of an unrecoverable error
296 * This function may be called from any context and takes care of scheduling
297 * the necessary actions for execution outside of interrupt context.
298 * argument:
299 * controller state structure
300 */
301static inline void error_reset(struct cardstate *cs)
302{
303 //FIXME try to recover without bothering the user
304 err("unrecoverable error - please disconnect the Gigaset base to reset");
305}
306
307/* check_pending
308 * check for completion of pending control request
309 * parameter:
310 * urb USB request block of completed request
311 * urb->context = hardware specific controller state structure
312 */
313static void check_pending(struct bas_cardstate *ucs)
314{
315 unsigned long flags;
316
317 IFNULLRET(ucs);
318 IFNULLRET(cardstate);
319
320 spin_lock_irqsave(&ucs->lock, flags);
321 switch (ucs->pending) {
322 case 0:
323 break;
324 case HD_OPEN_ATCHANNEL:
325 if (atomic_read(&ucs->basstate) & BS_ATOPEN)
326 ucs->pending = 0;
327 break;
328 case HD_OPEN_B1CHANNEL:
329 if (atomic_read(&ucs->basstate) & BS_B1OPEN)
330 ucs->pending = 0;
331 break;
332 case HD_OPEN_B2CHANNEL:
333 if (atomic_read(&ucs->basstate) & BS_B2OPEN)
334 ucs->pending = 0;
335 break;
336 case HD_CLOSE_ATCHANNEL:
337 if (!(atomic_read(&ucs->basstate) & BS_ATOPEN))
338 ucs->pending = 0;
339 //wake_up_interruptible(cs->initwait);
340 //FIXME need own wait queue?
341 break;
342 case HD_CLOSE_B1CHANNEL:
343 if (!(atomic_read(&ucs->basstate) & BS_B1OPEN))
344 ucs->pending = 0;
345 break;
346 case HD_CLOSE_B2CHANNEL:
347 if (!(atomic_read(&ucs->basstate) & BS_B2OPEN))
348 ucs->pending = 0;
349 break;
350 case HD_DEVICE_INIT_ACK: /* no reply expected */
351 ucs->pending = 0;
352 break;
353 /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE
354 * are handled separately and should never end up here
355 */
356 default:
357 warn("unknown pending request 0x%02x cleared", ucs->pending);
358 ucs->pending = 0;
359 }
360
361 if (!ucs->pending)
362 del_timer(&ucs->timer_ctrl);
363
364 spin_unlock_irqrestore(&ucs->lock, flags);
365}
366
367/* cmd_in_timeout
368 * timeout routine for command input request
369 * argument:
370 * controller state structure
371 */
372static void cmd_in_timeout(unsigned long data)
373{
374 struct cardstate *cs = (struct cardstate *) data;
375 struct bas_cardstate *ucs;
376 unsigned long flags;
377
378 IFNULLRET(cs);
379 ucs = cs->hw.bas;
380 IFNULLRET(ucs);
381
382 spin_lock_irqsave(&cs->lock, flags);
383 if (!atomic_read(&cs->connected)) {
384 dbg(DEBUG_USBREQ, "%s: disconnected", __func__);
385 spin_unlock_irqrestore(&cs->lock, flags);
386 return;
387 }
388 if (!ucs->rcvbuf_size) {
389 dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__);
390 spin_unlock_irqrestore(&cs->lock, flags);
391 return;
392 }
393 spin_unlock_irqrestore(&cs->lock, flags);
394
395 err("timeout reading AT response");
396 error_reset(cs); //FIXME retry?
397}
398
399
400static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs);
401
402/* atread_submit
403 * submit an HD_READ_ATMESSAGE command URB
404 * parameters:
405 * cs controller state structure
406 * timeout timeout in 1/10 sec., 0: none
407 * return value:
408 * 0 on success
409 * -EINVAL if a NULL pointer is encountered somewhere
410 * -EBUSY if another request is pending
411 * any URB submission error code
412 */
413static int atread_submit(struct cardstate *cs, int timeout)
414{
415 struct bas_cardstate *ucs;
416 int ret;
417
418 IFNULLRETVAL(cs, -EINVAL);
419 ucs = cs->hw.bas;
420 IFNULLRETVAL(ucs, -EINVAL);
421 IFNULLRETVAL(ucs->urb_cmd_in, -EINVAL);
422
423 dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)", ucs->rcvbuf_size);
424
425 if (ucs->urb_cmd_in->status == -EINPROGRESS) {
426 err("could not submit HD_READ_ATMESSAGE: URB busy");
427 return -EBUSY;
428 }
429
430 ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
431 ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
432 ucs->dr_cmd_in.wValue = 0;
433 ucs->dr_cmd_in.wIndex = 0;
434 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
435 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
436 usb_rcvctrlpipe(ucs->udev, 0),
437 (unsigned char*) & ucs->dr_cmd_in,
438 ucs->rcvbuf, ucs->rcvbuf_size,
439 read_ctrl_callback, cs->inbuf);
440
441 if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) {
442 err("could not submit HD_READ_ATMESSAGE: %s",
443 get_usb_statmsg(ret));
444 return ret;
445 }
446
447 if (timeout > 0) {
448 dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
449 ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10;
450 ucs->timer_cmd_in.data = (unsigned long) cs;
451 ucs->timer_cmd_in.function = cmd_in_timeout;
452 add_timer(&ucs->timer_cmd_in);
453 }
454 return 0;
455}
456
457static void stopurbs(struct bas_bc_state *);
458static int start_cbsend(struct cardstate *);
459
460/* set/clear bits in base connection state
461 */
462inline static void update_basstate(struct bas_cardstate *ucs,
463 int set, int clear)
464{
465 unsigned long flags;
466 int state;
467
468 spin_lock_irqsave(&ucs->lock, flags);
469 state = atomic_read(&ucs->basstate);
470 state &= ~clear;
471 state |= set;
472 atomic_set(&ucs->basstate, state);
473 spin_unlock_irqrestore(&ucs->lock, flags);
474}
475
476
477/* read_int_callback
478 * USB completion handler for interrupt pipe input
479 * called by the USB subsystem in interrupt context
480 * parameter:
481 * urb USB request block
482 * urb->context = controller state structure
483 */
484static void read_int_callback(struct urb *urb, struct pt_regs *regs)
485{
486 struct cardstate *cs;
487 struct bas_cardstate *ucs;
488 struct bc_state *bcs;
489 unsigned long flags;
490 int status;
491 unsigned l;
492 int channel;
493
494 IFNULLRET(urb);
495 cs = (struct cardstate *) urb->context;
496 IFNULLRET(cs);
497 ucs = cs->hw.bas;
498 IFNULLRET(ucs);
499
500 if (unlikely(!atomic_read(&cs->connected))) {
501 warn("%s: disconnected", __func__);
502 return;
503 }
504
505 switch (urb->status) {
506 case 0: /* success */
507 break;
508 case -ENOENT: /* canceled */
509 case -ECONNRESET: /* canceled (async) */
510 case -EINPROGRESS: /* pending */
511 /* ignore silently */
512 dbg(DEBUG_USBREQ,
513 "%s: %s", __func__, get_usb_statmsg(urb->status));
514 return;
515 default: /* severe trouble */
516 warn("interrupt read: %s", get_usb_statmsg(urb->status));
517 //FIXME corrective action? resubmission always ok?
518 goto resubmit;
519 }
520
521 l = (unsigned) ucs->int_in_buf[1] +
522 (((unsigned) ucs->int_in_buf[2]) << 8);
523
524 dbg(DEBUG_USBREQ,
525 "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", urb->actual_length,
526 (int)ucs->int_in_buf[0], l,
527 (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]);
528
529 channel = 0;
530
531 switch (ucs->int_in_buf[0]) {
532 case HD_DEVICE_INIT_OK:
533 update_basstate(ucs, BS_INIT, 0);
534 break;
535
536 case HD_READY_SEND_ATDATA:
537 del_timer(&ucs->timer_atrdy);
538 update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
539 start_cbsend(cs);
540 break;
541
542 case HD_OPEN_B2CHANNEL_ACK:
543 ++channel;
544 case HD_OPEN_B1CHANNEL_ACK:
545 bcs = cs->bcs + channel;
546 update_basstate(ucs, BS_B1OPEN << channel, 0);
547 gigaset_bchannel_up(bcs);
548 break;
549
550 case HD_OPEN_ATCHANNEL_ACK:
551 update_basstate(ucs, BS_ATOPEN, 0);
552 start_cbsend(cs);
553 break;
554
555 case HD_CLOSE_B2CHANNEL_ACK:
556 ++channel;
557 case HD_CLOSE_B1CHANNEL_ACK:
558 bcs = cs->bcs + channel;
559 update_basstate(ucs, 0, BS_B1OPEN << channel);
560 stopurbs(bcs->hw.bas);
561 gigaset_bchannel_down(bcs);
562 break;
563
564 case HD_CLOSE_ATCHANNEL_ACK:
565 update_basstate(ucs, 0, BS_ATOPEN);
566 break;
567
568 case HD_B2_FLOW_CONTROL:
569 ++channel;
570 case HD_B1_FLOW_CONTROL:
571 bcs = cs->bcs + channel;
572 atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES,
573 &bcs->hw.bas->corrbytes);
574 dbg(DEBUG_ISO,
575 "Flow control (channel %d, sub %d): 0x%02x => %d",
576 channel, bcs->hw.bas->numsub, l,
577 atomic_read(&bcs->hw.bas->corrbytes));
578 break;
579
580 case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */
581 if (!l) {
582 warn("HD_RECEIVEATDATA_ACK with length 0 ignored");
583 break;
584 }
585 spin_lock_irqsave(&cs->lock, flags);
586 if (ucs->rcvbuf_size) {
587 spin_unlock_irqrestore(&cs->lock, flags);
588 err("receive AT data overrun, %d bytes lost", l);
589 error_reset(cs); //FIXME reschedule
590 break;
591 }
592 if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) {
593 spin_unlock_irqrestore(&cs->lock, flags);
594 err("%s: out of memory, %d bytes lost", __func__, l);
595 error_reset(cs); //FIXME reschedule
596 break;
597 }
598 ucs->rcvbuf_size = l;
599 ucs->retry_cmd_in = 0;
600 if ((status = atread_submit(cs, BAS_TIMEOUT)) < 0) {
601 kfree(ucs->rcvbuf);
602 ucs->rcvbuf = NULL;
603 ucs->rcvbuf_size = 0;
604 error_reset(cs); //FIXME reschedule
605 }
606 spin_unlock_irqrestore(&cs->lock, flags);
607 break;
608
609 case HD_RESET_INTERRUPT_PIPE_ACK:
610 dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK");
611 break;
612
613 case HD_SUSPEND_END:
614 dbg(DEBUG_USBREQ, "HD_SUSPEND_END");
615 break;
616
617 default:
618 warn("unknown Gigaset signal 0x%02x (%u) ignored",
619 (int) ucs->int_in_buf[0], l);
620 }
621
622 check_pending(ucs);
623
624resubmit:
625 status = usb_submit_urb(urb, SLAB_ATOMIC);
626 if (unlikely(status)) {
627 err("could not resubmit interrupt URB: %s",
628 get_usb_statmsg(status));
629 error_reset(cs);
630 }
631}
632
633/* read_ctrl_callback
634 * USB completion handler for control pipe input
635 * called by the USB subsystem in interrupt context
636 * parameter:
637 * urb USB request block
638 * urb->context = inbuf structure for controller state
639 */
640static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs)
641{
642 struct cardstate *cs;
643 struct bas_cardstate *ucs;
644 unsigned numbytes;
645 unsigned long flags;
646 struct inbuf_t *inbuf;
647 int have_data = 0;
648
649 IFNULLRET(urb);
650 inbuf = (struct inbuf_t *) urb->context;
651 IFNULLRET(inbuf);
652 cs = inbuf->cs;
653 IFNULLRET(cs);
654 ucs = cs->hw.bas;
655 IFNULLRET(ucs);
656
657 spin_lock_irqsave(&cs->lock, flags);
658 if (!atomic_read(&cs->connected)) {
659 warn("%s: disconnected", __func__);
660 spin_unlock_irqrestore(&cs->lock, flags);
661 return;
662 }
663
664 if (!ucs->rcvbuf_size) {
665 warn("%s: no receive in progress", __func__);
666 spin_unlock_irqrestore(&cs->lock, flags);
667 return;
668 }
669
670 del_timer(&ucs->timer_cmd_in);
671
672 switch (urb->status) {
673 case 0: /* normal completion */
674 numbytes = urb->actual_length;
675 if (unlikely(numbytes == 0)) {
676 warn("control read: empty block received");
677 goto retry;
678 }
679 if (unlikely(numbytes != ucs->rcvbuf_size)) {
680 warn("control read: received %d chars, expected %d",
681 numbytes, ucs->rcvbuf_size);
682 if (numbytes > ucs->rcvbuf_size)
683 numbytes = ucs->rcvbuf_size;
684 }
685
686 /* copy received bytes to inbuf */
687 have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes);
688
689 if (unlikely(numbytes < ucs->rcvbuf_size)) {
690 /* incomplete - resubmit for remaining bytes */
691 ucs->rcvbuf_size -= numbytes;
692 ucs->retry_cmd_in = 0;
693 goto retry;
694 }
695 break;
696
697 case -ENOENT: /* canceled */
698 case -ECONNRESET: /* canceled (async) */
699 case -EINPROGRESS: /* pending */
700 /* no action necessary */
701 dbg(DEBUG_USBREQ,
702 "%s: %s", __func__, get_usb_statmsg(urb->status));
703 break;
704
705 default: /* severe trouble */
706 warn("control read: %s", get_usb_statmsg(urb->status));
707 retry:
708 if (ucs->retry_cmd_in++ < BAS_RETRY) {
709 notice("control read: retry %d", ucs->retry_cmd_in);
710 if (atread_submit(cs, BAS_TIMEOUT) >= 0) {
711 /* resubmitted - bypass regular exit block */
712 spin_unlock_irqrestore(&cs->lock, flags);
713 return;
714 }
715 } else {
716 err("control read: giving up after %d tries",
717 ucs->retry_cmd_in);
718 }
719 error_reset(cs);
720 }
721
722 kfree(ucs->rcvbuf);
723 ucs->rcvbuf = NULL;
724 ucs->rcvbuf_size = 0;
725 spin_unlock_irqrestore(&cs->lock, flags);
726 if (have_data) {
727 dbg(DEBUG_INTR, "%s-->BH", __func__);
728 gigaset_schedule_event(cs);
729 }
730}
731
732/* read_iso_callback
733 * USB completion handler for B channel isochronous input
734 * called by the USB subsystem in interrupt context
735 * parameter:
736 * urb USB request block of completed request
737 * urb->context = bc_state structure
738 */
739static void read_iso_callback(struct urb *urb, struct pt_regs *regs)
740{
741 struct bc_state *bcs;
742 struct bas_bc_state *ubc;
743 unsigned long flags;
744 int i, rc;
745
746 IFNULLRET(urb);
747 IFNULLRET(urb->context);
748 IFNULLRET(cardstate);
749
750 /* status codes not worth bothering the tasklet with */
751 if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
752 urb->status == -EINPROGRESS)) {
753 dbg(DEBUG_ISO,
754 "%s: %s", __func__, get_usb_statmsg(urb->status));
755 return;
756 }
757
758 bcs = (struct bc_state *) urb->context;
759 ubc = bcs->hw.bas;
760 IFNULLRET(ubc);
761
762 spin_lock_irqsave(&ubc->isoinlock, flags);
763 if (likely(ubc->isoindone == NULL)) {
764 /* pass URB to tasklet */
765 ubc->isoindone = urb;
766 tasklet_schedule(&ubc->rcvd_tasklet);
767 } else {
768 /* tasklet still busy, drop data and resubmit URB */
769 ubc->loststatus = urb->status;
770 for (i = 0; i < BAS_NUMFRAMES; i++) {
771 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
772 if (unlikely(urb->iso_frame_desc[i].status != 0 &&
773 urb->iso_frame_desc[i].status != -EINPROGRESS)) {
774 ubc->loststatus = urb->iso_frame_desc[i].status;
775 }
776 urb->iso_frame_desc[i].status = 0;
777 urb->iso_frame_desc[i].actual_length = 0;
778 }
779 if (likely(atomic_read(&ubc->running))) {
780 urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
781 urb->transfer_flags = URB_ISO_ASAP;
782 urb->number_of_packets = BAS_NUMFRAMES;
783 dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__);
784 rc = usb_submit_urb(urb, SLAB_ATOMIC);
785 if (unlikely(rc != 0)) {
786 err("could not resubmit isochronous read URB: %s",
787 get_usb_statmsg(rc));
788 dump_urb(DEBUG_ISO, "isoc read", urb);
789 error_hangup(bcs);
790 }
791 }
792 }
793 spin_unlock_irqrestore(&ubc->isoinlock, flags);
794}
795
796/* write_iso_callback
797 * USB completion handler for B channel isochronous output
798 * called by the USB subsystem in interrupt context
799 * parameter:
800 * urb USB request block of completed request
801 * urb->context = isow_urbctx_t structure
802 */
803static void write_iso_callback(struct urb *urb, struct pt_regs *regs)
804{
805 struct isow_urbctx_t *ucx;
806 struct bas_bc_state *ubc;
807 unsigned long flags;
808
809 IFNULLRET(urb);
810 IFNULLRET(urb->context);
811 IFNULLRET(cardstate);
812
813 /* status codes not worth bothering the tasklet with */
814 if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET ||
815 urb->status == -EINPROGRESS)) {
816 dbg(DEBUG_ISO,
817 "%s: %s", __func__, get_usb_statmsg(urb->status));
818 return;
819 }
820
821 /* pass URB context to tasklet */
822 ucx = (struct isow_urbctx_t *) urb->context;
823 IFNULLRET(ucx->bcs);
824 ubc = ucx->bcs->hw.bas;
825 IFNULLRET(ubc);
826
827 spin_lock_irqsave(&ubc->isooutlock, flags);
828 ubc->isooutovfl = ubc->isooutdone;
829 ubc->isooutdone = ucx;
830 spin_unlock_irqrestore(&ubc->isooutlock, flags);
831 tasklet_schedule(&ubc->sent_tasklet);
832}
833
834/* starturbs
835 * prepare and submit USB request blocks for isochronous input and output
836 * argument:
837 * B channel control structure
838 * return value:
839 * 0 on success
840 * < 0 on error (no URBs submitted)
841 */
842static int starturbs(struct bc_state *bcs)
843{
844 struct urb *urb;
845 struct bas_bc_state *ubc;
846 int j, k;
847 int rc;
848
849 IFNULLRETVAL(bcs, -EFAULT);
850 ubc = bcs->hw.bas;
851 IFNULLRETVAL(ubc, -EFAULT);
852
853 /* initialize L2 reception */
854 if (bcs->proto2 == ISDN_PROTO_L2_HDLC)
855 bcs->inputstate |= INS_flag_hunt;
856
857 /* submit all isochronous input URBs */
858 atomic_set(&ubc->running, 1);
859 for (k = 0; k < BAS_INURBS; k++) {
860 urb = ubc->isoinurbs[k];
861 if (!urb) {
862 err("isoinurbs[%d]==NULL", k);
863 rc = -EFAULT;
864 goto error;
865 }
866
867 urb->dev = bcs->cs->hw.bas->udev;
868 urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel);
869 urb->transfer_flags = URB_ISO_ASAP;
870 urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE;
871 urb->transfer_buffer_length = BAS_INBUFSIZE;
872 urb->number_of_packets = BAS_NUMFRAMES;
873 urb->interval = BAS_FRAMETIME;
874 urb->complete = read_iso_callback;
875 urb->context = bcs;
876 for (j = 0; j < BAS_NUMFRAMES; j++) {
877 urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME;
878 urb->iso_frame_desc[j].length = BAS_MAXFRAME;
879 urb->iso_frame_desc[j].status = 0;
880 urb->iso_frame_desc[j].actual_length = 0;
881 }
882
883 dump_urb(DEBUG_ISO, "Initial isoc read", urb);
884 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
885 err("could not submit isochronous read URB %d: %s",
886 k, get_usb_statmsg(rc));
887 goto error;
888 }
889 }
890
891 /* initialize L2 transmission */
892 gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG);
893
894 /* set up isochronous output URBs for flag idling */
895 for (k = 0; k < BAS_OUTURBS; ++k) {
896 urb = ubc->isoouturbs[k].urb;
897 if (!urb) {
898 err("isoouturbs[%d].urb==NULL", k);
899 rc = -EFAULT;
900 goto error;
901 }
902 urb->dev = bcs->cs->hw.bas->udev;
903 urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel);
904 urb->transfer_flags = URB_ISO_ASAP;
905 urb->transfer_buffer = ubc->isooutbuf->data;
906 urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
907 urb->number_of_packets = BAS_NUMFRAMES;
908 urb->interval = BAS_FRAMETIME;
909 urb->complete = write_iso_callback;
910 urb->context = &ubc->isoouturbs[k];
911 for (j = 0; j < BAS_NUMFRAMES; ++j) {
912 urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE;
913 urb->iso_frame_desc[j].length = BAS_NORMFRAME;
914 urb->iso_frame_desc[j].status = 0;
915 urb->iso_frame_desc[j].actual_length = 0;
916 }
917 ubc->isoouturbs[k].limit = -1;
918 }
919
920 /* submit two URBs, keep third one */
921 for (k = 0; k < 2; ++k) {
922 dump_urb(DEBUG_ISO, "Initial isoc write", urb);
923 rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC);
924 if (rc != 0) {
925 err("could not submit isochronous write URB %d: %s",
926 k, get_usb_statmsg(rc));
927 goto error;
928 }
929 }
930 dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
931 ubc->isooutfree = &ubc->isoouturbs[2];
932 ubc->isooutdone = ubc->isooutovfl = NULL;
933 return 0;
934 error:
935 stopurbs(ubc);
936 return rc;
937}
938
939/* stopurbs
940 * cancel the USB request blocks for isochronous input and output
941 * errors are silently ignored
942 * argument:
943 * B channel control structure
944 */
945static void stopurbs(struct bas_bc_state *ubc)
946{
947 int k, rc;
948
949 IFNULLRET(ubc);
950
951 atomic_set(&ubc->running, 0);
952
953 for (k = 0; k < BAS_INURBS; ++k) {
954 rc = usb_unlink_urb(ubc->isoinurbs[k]);
955 dbg(DEBUG_ISO, "%s: isoc input URB %d unlinked, result = %d",
956 __func__, k, rc);
957 }
958
959 for (k = 0; k < BAS_OUTURBS; ++k) {
960 rc = usb_unlink_urb(ubc->isoouturbs[k].urb);
961 dbg(DEBUG_ISO, "%s: isoc output URB %d unlinked, result = %d",
962 __func__, k, rc);
963 }
964}
965
966/* Isochronous Write - Bottom Half */
967/* =============================== */
968
969/* submit_iso_write_urb
970 * fill and submit the next isochronous write URB
971 * parameters:
972 * bcs B channel state structure
973 * return value:
974 * number of frames submitted in URB
975 * 0 if URB not submitted because no data available (isooutbuf busy)
976 * error code < 0 on error
977 */
978static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
979{
980 struct urb *urb;
981 struct bas_bc_state *ubc;
982 struct usb_iso_packet_descriptor *ifd;
983 int corrbytes, nframe, rc;
984
985 IFNULLRETVAL(ucx, -EFAULT);
986 urb = ucx->urb;
987 IFNULLRETVAL(urb, -EFAULT);
988 IFNULLRETVAL(ucx->bcs, -EFAULT);
989 ubc = ucx->bcs->hw.bas;
990 IFNULLRETVAL(ubc, -EFAULT);
991
992 urb->dev = ucx->bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
993 urb->transfer_flags = URB_ISO_ASAP;
994 urb->transfer_buffer = ubc->isooutbuf->data;
995 urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data);
996
997 for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) {
998 ifd = &urb->iso_frame_desc[nframe];
999
1000 /* compute frame length according to flow control */
1001 ifd->length = BAS_NORMFRAME;
1002 if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) {
1003 dbg(DEBUG_ISO, "%s: corrbytes=%d", __func__, corrbytes);
1004 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
1005 corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME;
1006 else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME)
1007 corrbytes = BAS_LOWFRAME - BAS_NORMFRAME;
1008 ifd->length += corrbytes;
1009 atomic_add(-corrbytes, &ubc->corrbytes);
1010 }
1011 //dbg(DEBUG_ISO, "%s: frame %d length=%d", __func__, nframe, ifd->length);
1012
1013 /* retrieve block of data to send */
1014 ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length);
1015 if (ifd->offset < 0) {
1016 if (ifd->offset == -EBUSY) {
1017 dbg(DEBUG_ISO, "%s: buffer busy at frame %d",
1018 __func__, nframe);
1019 /* tasklet will be restarted from gigaset_send_skb() */
1020 } else {
1021 err("%s: buffer error %d at frame %d",
1022 __func__, ifd->offset, nframe);
1023 return ifd->offset;
1024 }
1025 break;
1026 }
1027 ucx->limit = atomic_read(&ubc->isooutbuf->nextread);
1028 ifd->status = 0;
1029 ifd->actual_length = 0;
1030 }
1031 if ((urb->number_of_packets = nframe) > 0) {
1032 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
1033 err("could not submit isochronous write URB: %s",
1034 get_usb_statmsg(rc));
1035 dump_urb(DEBUG_ISO, "isoc write", urb);
1036 return rc;
1037 }
1038 ++ubc->numsub;
1039 }
1040 return nframe;
1041}
1042
1043/* write_iso_tasklet
1044 * tasklet scheduled when an isochronous output URB from the Gigaset device
1045 * has completed
1046 * parameter:
1047 * data B channel state structure
1048 */
1049static void write_iso_tasklet(unsigned long data)
1050{
1051 struct bc_state *bcs;
1052 struct bas_bc_state *ubc;
1053 struct cardstate *cs;
1054 struct isow_urbctx_t *done, *next, *ovfl;
1055 struct urb *urb;
1056 struct usb_iso_packet_descriptor *ifd;
1057 int offset;
1058 unsigned long flags;
1059 int i;
1060 struct sk_buff *skb;
1061 int len;
1062
1063 bcs = (struct bc_state *) data;
1064 IFNULLRET(bcs);
1065 ubc = bcs->hw.bas;
1066 IFNULLRET(ubc);
1067 cs = bcs->cs;
1068 IFNULLRET(cs);
1069
1070 /* loop while completed URBs arrive in time */
1071 for (;;) {
1072 if (unlikely(!atomic_read(&cs->connected))) {
1073 warn("%s: disconnected", __func__);
1074 return;
1075 }
1076
1077 if (unlikely(!(atomic_read(&ubc->running)))) {
1078 dbg(DEBUG_ISO, "%s: not running", __func__);
1079 return;
1080 }
1081
1082 /* retrieve completed URBs */
1083 spin_lock_irqsave(&ubc->isooutlock, flags);
1084 done = ubc->isooutdone;
1085 ubc->isooutdone = NULL;
1086 ovfl = ubc->isooutovfl;
1087 ubc->isooutovfl = NULL;
1088 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1089 if (ovfl) {
1090 err("isochronous write buffer underrun - buy a faster machine :-)");
1091 error_hangup(bcs);
1092 break;
1093 }
1094 if (!done)
1095 break;
1096
1097 /* submit free URB if available */
1098 spin_lock_irqsave(&ubc->isooutlock, flags);
1099 next = ubc->isooutfree;
1100 ubc->isooutfree = NULL;
1101 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1102 if (next) {
1103 if (submit_iso_write_urb(next) <= 0) {
1104 /* could not submit URB, put it back */
1105 spin_lock_irqsave(&ubc->isooutlock, flags);
1106 if (ubc->isooutfree == NULL) {
1107 ubc->isooutfree = next;
1108 next = NULL;
1109 }
1110 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1111 if (next) {
1112 /* couldn't put it back */
1113 err("losing isochronous write URB");
1114 error_hangup(bcs);
1115 }
1116 }
1117 }
1118
1119 /* process completed URB */
1120 urb = done->urb;
1121 switch (urb->status) {
1122 case 0: /* normal completion */
1123 break;
1124 case -EXDEV: /* inspect individual frames */
1125 /* assumptions (for lack of documentation):
1126 * - actual_length bytes of the frame in error are successfully sent
1127 * - all following frames are not sent at all
1128 */
1129 dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
1130 offset = done->limit; /* just in case */
1131 for (i = 0; i < BAS_NUMFRAMES; i++) {
1132 ifd = &urb->iso_frame_desc[i];
1133 if (ifd->status ||
1134 ifd->actual_length != ifd->length) {
1135 warn("isochronous write: frame %d: %s, "
1136 "only %d of %d bytes sent",
1137 i, get_usb_statmsg(ifd->status),
1138 ifd->actual_length, ifd->length);
1139 offset = (ifd->offset +
1140 ifd->actual_length)
1141 % BAS_OUTBUFSIZE;
1142 break;
1143 }
1144 }
1145#ifdef CONFIG_GIGASET_DEBUG
1146 /* check assumption on remaining frames */
1147 for (; i < BAS_NUMFRAMES; i++) {
1148 ifd = &urb->iso_frame_desc[i];
1149 if (ifd->status != -EINPROGRESS
1150 || ifd->actual_length != 0) {
1151 warn("isochronous write: frame %d: %s, "
1152 "%d of %d bytes sent",
1153 i, get_usb_statmsg(ifd->status),
1154 ifd->actual_length, ifd->length);
1155 offset = (ifd->offset +
1156 ifd->actual_length)
1157 % BAS_OUTBUFSIZE;
1158 break;
1159 }
1160 }
1161#endif
1162 break;
1163 case -EPIPE: //FIXME is this the code for "underrun"?
1164 err("isochronous write stalled");
1165 error_hangup(bcs);
1166 break;
1167 default: /* severe trouble */
1168 warn("isochronous write: %s",
1169 get_usb_statmsg(urb->status));
1170 }
1171
1172 /* mark the write buffer area covered by this URB as free */
1173 if (done->limit >= 0)
1174 atomic_set(&ubc->isooutbuf->read, done->limit);
1175
1176 /* mark URB as free */
1177 spin_lock_irqsave(&ubc->isooutlock, flags);
1178 next = ubc->isooutfree;
1179 ubc->isooutfree = done;
1180 spin_unlock_irqrestore(&ubc->isooutlock, flags);
1181 if (next) {
1182 /* only one URB still active - resubmit one */
1183 if (submit_iso_write_urb(next) <= 0) {
1184 /* couldn't submit */
1185 error_hangup(bcs);
1186 }
1187 }
1188 }
1189
1190 /* process queued SKBs */
1191 while ((skb = skb_dequeue(&bcs->squeue))) {
1192 /* copy to output buffer, doing L2 encapsulation */
1193 len = skb->len;
1194 if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) {
1195 /* insufficient buffer space, push back onto queue */
1196 skb_queue_head(&bcs->squeue, skb);
1197 dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d",
1198 __func__, skb_queue_len(&bcs->squeue));
1199 break;
1200 }
1201 skb_pull(skb, len);
1202 gigaset_skb_sent(bcs, skb);
1203 dev_kfree_skb_any(skb);
1204 }
1205}
1206
1207/* Isochronous Read - Bottom Half */
1208/* ============================== */
1209
1210/* read_iso_tasklet
1211 * tasklet scheduled when an isochronous input URB from the Gigaset device
1212 * has completed
1213 * parameter:
1214 * data B channel state structure
1215 */
1216static void read_iso_tasklet(unsigned long data)
1217{
1218 struct bc_state *bcs;
1219 struct bas_bc_state *ubc;
1220 struct cardstate *cs;
1221 struct urb *urb;
1222 char *rcvbuf;
1223 unsigned long flags;
1224 int totleft, numbytes, offset, frame, rc;
1225
1226 bcs = (struct bc_state *) data;
1227 IFNULLRET(bcs);
1228 ubc = bcs->hw.bas;
1229 IFNULLRET(ubc);
1230 cs = bcs->cs;
1231 IFNULLRET(cs);
1232
1233 /* loop while more completed URBs arrive in the meantime */
1234 for (;;) {
1235 if (!atomic_read(&cs->connected)) {
1236 warn("%s: disconnected", __func__);
1237 return;
1238 }
1239
1240 /* retrieve URB */
1241 spin_lock_irqsave(&ubc->isoinlock, flags);
1242 if (!(urb = ubc->isoindone)) {
1243 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1244 return;
1245 }
1246 ubc->isoindone = NULL;
1247 if (unlikely(ubc->loststatus != -EINPROGRESS)) {
1248 warn("isochronous read overrun, dropped URB with status: %s, %d bytes lost",
1249 get_usb_statmsg(ubc->loststatus), ubc->isoinlost);
1250 ubc->loststatus = -EINPROGRESS;
1251 }
1252 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1253
1254 if (unlikely(!(atomic_read(&ubc->running)))) {
1255 dbg(DEBUG_ISO, "%s: channel not running, dropped URB with status: %s",
1256 __func__, get_usb_statmsg(urb->status));
1257 return;
1258 }
1259
1260 switch (urb->status) {
1261 case 0: /* normal completion */
1262 break;
1263 case -EXDEV: /* inspect individual frames (we do that anyway) */
1264 dbg(DEBUG_ISO, "%s: URB partially completed", __func__);
1265 break;
1266 case -ENOENT:
1267 case -ECONNRESET:
1268 dbg(DEBUG_ISO, "%s: URB canceled", __func__);
1269 continue; /* -> skip */
1270 case -EINPROGRESS: /* huh? */
1271 dbg(DEBUG_ISO, "%s: URB still pending", __func__);
1272 continue; /* -> skip */
1273 case -EPIPE:
1274 err("isochronous read stalled");
1275 error_hangup(bcs);
1276 continue; /* -> skip */
1277 default: /* severe trouble */
1278 warn("isochronous read: %s",
1279 get_usb_statmsg(urb->status));
1280 goto error;
1281 }
1282
1283 rcvbuf = urb->transfer_buffer;
1284 totleft = urb->actual_length;
1285 for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) {
1286 if (unlikely(urb->iso_frame_desc[frame].status)) {
1287 warn("isochronous read: frame %d: %s",
1288 frame, get_usb_statmsg(urb->iso_frame_desc[frame].status));
1289 break;
1290 }
1291 numbytes = urb->iso_frame_desc[frame].actual_length;
1292 if (unlikely(numbytes > BAS_MAXFRAME)) {
1293 warn("isochronous read: frame %d: numbytes (%d) > BAS_MAXFRAME",
1294 frame, numbytes);
1295 break;
1296 }
1297 if (unlikely(numbytes > totleft)) {
1298 warn("isochronous read: frame %d: numbytes (%d) > totleft (%d)",
1299 frame, numbytes, totleft);
1300 break;
1301 }
1302 offset = urb->iso_frame_desc[frame].offset;
1303 if (unlikely(offset + numbytes > BAS_INBUFSIZE)) {
1304 warn("isochronous read: frame %d: offset (%d) + numbytes (%d) > BAS_INBUFSIZE",
1305 frame, offset, numbytes);
1306 break;
1307 }
1308 gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs);
1309 totleft -= numbytes;
1310 }
1311 if (unlikely(totleft > 0))
1312 warn("isochronous read: %d data bytes missing",
1313 totleft);
1314
1315 error:
1316 /* URB processed, resubmit */
1317 for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
1318 urb->iso_frame_desc[frame].status = 0;
1319 urb->iso_frame_desc[frame].actual_length = 0;
1320 }
1321 urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */
1322 urb->transfer_flags = URB_ISO_ASAP;
1323 urb->number_of_packets = BAS_NUMFRAMES;
1324 if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) {
1325 err("could not resubmit isochronous read URB: %s",
1326 get_usb_statmsg(rc));
1327 dump_urb(DEBUG_ISO, "resubmit iso read", urb);
1328 error_hangup(bcs);
1329 }
1330 }
1331}
1332
1333/* Channel Operations */
1334/* ================== */
1335
1336/* req_timeout
1337 * timeout routine for control output request
1338 * argument:
1339 * B channel control structure
1340 */
1341static void req_timeout(unsigned long data)
1342{
1343 struct bc_state *bcs = (struct bc_state *) data;
1344 struct bas_cardstate *ucs;
1345 int pending;
1346 unsigned long flags;
1347
1348 IFNULLRET(bcs);
1349 IFNULLRET(bcs->cs);
1350 ucs = bcs->cs->hw.bas;
1351 IFNULLRET(ucs);
1352
1353 check_pending(ucs);
1354
1355 spin_lock_irqsave(&ucs->lock, flags);
1356 pending = ucs->pending;
1357 ucs->pending = 0;
1358 spin_unlock_irqrestore(&ucs->lock, flags);
1359
1360 switch (pending) {
1361 case 0: /* no pending request */
1362 dbg(DEBUG_USBREQ, "%s: no request pending", __func__);
1363 break;
1364
1365 case HD_OPEN_ATCHANNEL:
1366 err("timeout opening AT channel");
1367 error_reset(bcs->cs);
1368 break;
1369
1370 case HD_OPEN_B2CHANNEL:
1371 case HD_OPEN_B1CHANNEL:
1372 err("timeout opening channel %d", bcs->channel + 1);
1373 error_hangup(bcs);
1374 break;
1375
1376 case HD_CLOSE_ATCHANNEL:
1377 err("timeout closing AT channel");
1378 //wake_up_interruptible(cs->initwait);
1379 //FIXME need own wait queue?
1380 break;
1381
1382 case HD_CLOSE_B2CHANNEL:
1383 case HD_CLOSE_B1CHANNEL:
1384 err("timeout closing channel %d", bcs->channel + 1);
1385 break;
1386
1387 default:
1388 warn("request 0x%02x timed out, clearing", pending);
1389 }
1390}
1391
1392/* write_ctrl_callback
1393 * USB completion handler for control pipe output
1394 * called by the USB subsystem in interrupt context
1395 * parameter:
1396 * urb USB request block of completed request
1397 * urb->context = hardware specific controller state structure
1398 */
1399static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs)
1400{
1401 struct bas_cardstate *ucs;
1402 unsigned long flags;
1403
1404 IFNULLRET(urb);
1405 IFNULLRET(urb->context);
1406 IFNULLRET(cardstate);
1407
1408 ucs = (struct bas_cardstate *) urb->context;
1409 spin_lock_irqsave(&ucs->lock, flags);
1410 if (urb->status && ucs->pending) {
1411 err("control request 0x%02x failed: %s",
1412 ucs->pending, get_usb_statmsg(urb->status));
1413 del_timer(&ucs->timer_ctrl);
1414 ucs->pending = 0;
1415 }
1416 /* individual handling of specific request types */
1417 switch (ucs->pending) {
1418 case HD_DEVICE_INIT_ACK: /* no reply expected */
1419 ucs->pending = 0;
1420 break;
1421 }
1422 spin_unlock_irqrestore(&ucs->lock, flags);
1423}
1424
1425/* req_submit
1426 * submit a control output request without message buffer to the Gigaset base
1427 * and optionally start a timeout
1428 * parameters:
1429 * bcs B channel control structure
1430 * req control request code (HD_*)
1431 * val control request parameter value (set to 0 if unused)
1432 * timeout timeout in seconds (0: no timeout)
1433 * return value:
1434 * 0 on success
1435 * -EINVAL if a NULL pointer is encountered somewhere
1436 * -EBUSY if another request is pending
1437 * any URB submission error code
1438 */
1439static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1440{
1441 struct bas_cardstate *ucs;
1442 int ret;
1443 unsigned long flags;
1444
1445 IFNULLRETVAL(bcs, -EINVAL);
1446 IFNULLRETVAL(bcs->cs, -EINVAL);
1447 ucs = bcs->cs->hw.bas;
1448 IFNULLRETVAL(ucs, -EINVAL);
1449 IFNULLRETVAL(ucs->urb_ctrl, -EINVAL);
1450
1451 dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val);
1452
1453 spin_lock_irqsave(&ucs->lock, flags);
1454 if (ucs->pending) {
1455 spin_unlock_irqrestore(&ucs->lock, flags);
1456 err("submission of request 0x%02x failed: request 0x%02x still pending",
1457 req, ucs->pending);
1458 return -EBUSY;
1459 }
1460 if (ucs->urb_ctrl->status == -EINPROGRESS) {
1461 spin_unlock_irqrestore(&ucs->lock, flags);
1462 err("could not submit request 0x%02x: URB busy", req);
1463 return -EBUSY;
1464 }
1465
1466 ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ;
1467 ucs->dr_ctrl.bRequest = req;
1468 ucs->dr_ctrl.wValue = cpu_to_le16(val);
1469 ucs->dr_ctrl.wIndex = 0;
1470 ucs->dr_ctrl.wLength = 0;
1471 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
1472 usb_sndctrlpipe(ucs->udev, 0),
1473 (unsigned char*) &ucs->dr_ctrl, NULL, 0,
1474 write_ctrl_callback, ucs);
1475 if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) {
1476 err("could not submit request 0x%02x: %s",
1477 req, get_usb_statmsg(ret));
1478 spin_unlock_irqrestore(&ucs->lock, flags);
1479 return ret;
1480 }
1481 ucs->pending = req;
1482
1483 if (timeout > 0) {
1484 dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout);
1485 ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10;
1486 ucs->timer_ctrl.data = (unsigned long) bcs;
1487 ucs->timer_ctrl.function = req_timeout;
1488 add_timer(&ucs->timer_ctrl);
1489 }
1490
1491 spin_unlock_irqrestore(&ucs->lock, flags);
1492 return 0;
1493}
1494
1495/* gigaset_init_bchannel
1496 * called by common.c to connect a B channel
1497 * initialize isochronous I/O and tell the Gigaset base to open the channel
1498 * argument:
1499 * B channel control structure
1500 * return value:
1501 * 0 on success, error code < 0 on error
1502 */
1503static int gigaset_init_bchannel(struct bc_state *bcs)
1504{
1505 int req, ret;
1506
1507 IFNULLRETVAL(bcs, -EINVAL);
1508
1509 if ((ret = starturbs(bcs)) < 0) {
1510 err("could not start isochronous I/O for channel %d",
1511 bcs->channel + 1);
1512 error_hangup(bcs);
1513 return ret;
1514 }
1515
1516 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
1517 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
1518 err("could not open channel %d: %s",
1519 bcs->channel + 1, get_usb_statmsg(ret));
1520 stopurbs(bcs->hw.bas);
1521 error_hangup(bcs);
1522 }
1523 return ret;
1524}
1525
1526/* gigaset_close_bchannel
1527 * called by common.c to disconnect a B channel
1528 * tell the Gigaset base to close the channel
1529 * stopping isochronous I/O and LL notification will be done when the
1530 * acknowledgement for the close arrives
1531 * argument:
1532 * B channel control structure
1533 * return value:
1534 * 0 on success, error code < 0 on error
1535 */
1536static int gigaset_close_bchannel(struct bc_state *bcs)
1537{
1538 int req, ret;
1539
1540 IFNULLRETVAL(bcs, -EINVAL);
1541
1542 if (!(atomic_read(&bcs->cs->hw.bas->basstate) &
1543 (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
1544 /* channel not running: just signal common.c */
1545 gigaset_bchannel_down(bcs);
1546 return 0;
1547 }
1548
1549 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
1550 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
1551 err("could not submit HD_CLOSE_BxCHANNEL request: %s",
1552 get_usb_statmsg(ret));
1553 return ret;
1554}
1555
1556/* Device Operations */
1557/* ================= */
1558
1559/* complete_cb
1560 * unqueue first command buffer from queue, waking any sleepers
1561 * must be called with cs->cmdlock held
1562 * parameter:
1563 * cs controller state structure
1564 */
1565static void complete_cb(struct cardstate *cs)
1566{
1567 struct cmdbuf_t *cb;
1568
1569 IFNULLRET(cs);
1570 cb = cs->cmdbuf;
1571 IFNULLRET(cb);
1572
1573 /* unqueue completed buffer */
1574 cs->cmdbytes -= cs->curlen;
1575 dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD,
1576 "write_command: sent %u bytes, %u left",
1577 cs->curlen, cs->cmdbytes);
1578 if ((cs->cmdbuf = cb->next) != NULL) {
1579 cs->cmdbuf->prev = NULL;
1580 cs->curlen = cs->cmdbuf->len;
1581 } else {
1582 cs->lastcmdbuf = NULL;
1583 cs->curlen = 0;
1584 }
1585
1586 if (cb->wake_tasklet)
1587 tasklet_schedule(cb->wake_tasklet);
1588
1589 kfree(cb);
1590}
1591
1592static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len);
1593
1594/* write_command_callback
1595 * USB completion handler for AT command transmission
1596 * called by the USB subsystem in interrupt context
1597 * parameter:
1598 * urb USB request block of completed request
1599 * urb->context = controller state structure
1600 */
1601static void write_command_callback(struct urb *urb, struct pt_regs *regs)
1602{
1603 struct cardstate *cs;
1604 unsigned long flags;
1605 struct bas_cardstate *ucs;
1606
1607 IFNULLRET(urb);
1608 cs = (struct cardstate *) urb->context;
1609 IFNULLRET(cs);
1610 ucs = cs->hw.bas;
1611 IFNULLRET(ucs);
1612
1613 /* check status */
1614 switch (urb->status) {
1615 case 0: /* normal completion */
1616 break;
1617 case -ENOENT: /* canceled */
1618 case -ECONNRESET: /* canceled (async) */
1619 case -EINPROGRESS: /* pending */
1620 /* ignore silently */
1621 dbg(DEBUG_USBREQ,
1622 "%s: %s", __func__, get_usb_statmsg(urb->status));
1623 return;
1624 default: /* any failure */
1625 if (++ucs->retry_cmd_out > BAS_RETRY) {
1626 warn("command write: %s, giving up after %d retries",
1627 get_usb_statmsg(urb->status), ucs->retry_cmd_out);
1628 break;
1629 }
1630 if (cs->cmdbuf == NULL) {
1631 warn("command write: %s, cannot retry - cmdbuf gone",
1632 get_usb_statmsg(urb->status));
1633 break;
1634 }
1635 notice("command write: %s, retry %d",
1636 get_usb_statmsg(urb->status), ucs->retry_cmd_out);
1637 if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
1638 /* resubmitted - bypass regular exit block */
1639 return;
1640 /* command send failed, assume base still waiting */
1641 update_basstate(ucs, BS_ATREADY, 0);
1642 }
1643
1644 spin_lock_irqsave(&cs->cmdlock, flags);
1645 if (cs->cmdbuf != NULL)
1646 complete_cb(cs);
1647 spin_unlock_irqrestore(&cs->cmdlock, flags);
1648}
1649
1650/* atrdy_timeout
1651 * timeout routine for AT command transmission
1652 * argument:
1653 * controller state structure
1654 */
1655static void atrdy_timeout(unsigned long data)
1656{
1657 struct cardstate *cs = (struct cardstate *) data;
1658 struct bas_cardstate *ucs;
1659
1660 IFNULLRET(cs);
1661 ucs = cs->hw.bas;
1662 IFNULLRET(ucs);
1663
1664 warn("timeout waiting for HD_READY_SEND_ATDATA");
1665
1666 /* fake the missing signal - what else can I do? */
1667 update_basstate(ucs, BS_ATREADY, BS_ATTIMER);
1668 start_cbsend(cs);
1669}
1670
1671/* atwrite_submit
1672 * submit an HD_WRITE_ATMESSAGE command URB
1673 * parameters:
1674 * cs controller state structure
1675 * buf buffer containing command to send
1676 * len length of command to send
1677 * return value:
1678 * 0 on success
1679 * -EFAULT if a NULL pointer is encountered somewhere
1680 * -EBUSY if another request is pending
1681 * any URB submission error code
1682 */
1683static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1684{
1685 struct bas_cardstate *ucs;
1686 int ret;
1687
1688 IFNULLRETVAL(cs, -EFAULT);
1689 ucs = cs->hw.bas;
1690 IFNULLRETVAL(ucs, -EFAULT);
1691 IFNULLRETVAL(ucs->urb_cmd_out, -EFAULT);
1692
1693 dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len);
1694
1695 if (ucs->urb_cmd_out->status == -EINPROGRESS) {
1696 err("could not submit HD_WRITE_ATMESSAGE: URB busy");
1697 return -EBUSY;
1698 }
1699
1700 ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ;
1701 ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE;
1702 ucs->dr_cmd_out.wValue = 0;
1703 ucs->dr_cmd_out.wIndex = 0;
1704 ucs->dr_cmd_out.wLength = cpu_to_le16(len);
1705 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
1706 usb_sndctrlpipe(ucs->udev, 0),
1707 (unsigned char*) &ucs->dr_cmd_out, buf, len,
1708 write_command_callback, cs);
1709
1710 if ((ret = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC)) != 0) {
1711 err("could not submit HD_WRITE_ATMESSAGE: %s",
1712 get_usb_statmsg(ret));
1713 return ret;
1714 }
1715
1716 /* submitted successfully */
1717 update_basstate(ucs, 0, BS_ATREADY);
1718
1719 /* start timeout if necessary */
1720 if (!(atomic_read(&ucs->basstate) & BS_ATTIMER)) {
1721 dbg(DEBUG_OUTPUT,
1722 "setting ATREADY timeout of %d/10 secs", ATRDY_TIMEOUT);
1723 ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10;
1724 ucs->timer_atrdy.data = (unsigned long) cs;
1725 ucs->timer_atrdy.function = atrdy_timeout;
1726 add_timer(&ucs->timer_atrdy);
1727 update_basstate(ucs, BS_ATTIMER, 0);
1728 }
1729 return 0;
1730}
1731
1732/* start_cbsend
1733 * start transmission of AT command queue if necessary
1734 * parameter:
1735 * cs controller state structure
1736 * return value:
1737 * 0 on success
1738 * error code < 0 on error
1739 */
1740static int start_cbsend(struct cardstate *cs)
1741{
1742 struct cmdbuf_t *cb;
1743 struct bas_cardstate *ucs;
1744 unsigned long flags;
1745 int rc;
1746 int retval = 0;
1747
1748 IFNULLRETVAL(cs, -EFAULT);
1749 ucs = cs->hw.bas;
1750 IFNULLRETVAL(ucs, -EFAULT);
1751
1752 /* check if AT channel is open */
1753 if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) {
1754 dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, "AT channel not open");
1755 rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
1756 if (rc < 0) {
1757 err("could not open AT channel");
1758 /* flush command queue */
1759 spin_lock_irqsave(&cs->cmdlock, flags);
1760 while (cs->cmdbuf != NULL)
1761 complete_cb(cs);
1762 spin_unlock_irqrestore(&cs->cmdlock, flags);
1763 }
1764 return rc;
1765 }
1766
1767 /* try to send first command in queue */
1768 spin_lock_irqsave(&cs->cmdlock, flags);
1769
1770 while ((cb = cs->cmdbuf) != NULL &&
1771 atomic_read(&ucs->basstate) & BS_ATREADY) {
1772 ucs->retry_cmd_out = 0;
1773 rc = atwrite_submit(cs, cb->buf, cb->len);
1774 if (unlikely(rc)) {
1775 retval = rc;
1776 complete_cb(cs);
1777 }
1778 }
1779
1780 spin_unlock_irqrestore(&cs->cmdlock, flags);
1781 return retval;
1782}
1783
1784/* gigaset_write_cmd
1785 * This function is called by the device independent part of the driver
1786 * to transmit an AT command string to the Gigaset device.
1787 * It encapsulates the device specific method for transmission over the
1788 * direct USB connection to the base.
1789 * The command string is added to the queue of commands to send, and
1790 * USB transmission is started if necessary.
1791 * parameters:
1792 * cs controller state structure
1793 * buf command string to send
1794 * len number of bytes to send (max. IF_WRITEBUF)
1795 * wake_tasklet tasklet to run when transmission is completed (NULL if none)
1796 * return value:
1797 * number of bytes queued on success
1798 * error code < 0 on error
1799 */
1800static int gigaset_write_cmd(struct cardstate *cs,
1801 const unsigned char *buf, int len,
1802 struct tasklet_struct *wake_tasklet)
1803{
1804 struct cmdbuf_t *cb;
1805 unsigned long flags;
1806 int status;
1807
1808 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
1809 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
1810 "CMD Transmit", len, buf, 0);
1811
1812 if (!atomic_read(&cs->connected)) {
1813 err("%s: not connected", __func__);
1814 return -ENODEV;
1815 }
1816
1817 if (len <= 0)
1818 return 0; /* nothing to do */
1819
1820 if (len > IF_WRITEBUF)
1821 len = IF_WRITEBUF;
1822 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
1823 err("%s: out of memory", __func__);
1824 return -ENOMEM;
1825 }
1826
1827 memcpy(cb->buf, buf, len);
1828 cb->len = len;
1829 cb->offset = 0;
1830 cb->next = NULL;
1831 cb->wake_tasklet = wake_tasklet;
1832
1833 spin_lock_irqsave(&cs->cmdlock, flags);
1834 cb->prev = cs->lastcmdbuf;
1835 if (cs->lastcmdbuf)
1836 cs->lastcmdbuf->next = cb;
1837 else {
1838 cs->cmdbuf = cb;
1839 cs->curlen = len;
1840 }
1841 cs->cmdbytes += len;
1842 cs->lastcmdbuf = cb;
1843 spin_unlock_irqrestore(&cs->cmdlock, flags);
1844
1845 status = start_cbsend(cs);
1846
1847 return status < 0 ? status : len;
1848}
1849
1850/* gigaset_write_room
1851 * tty_driver.write_room interface routine
1852 * return number of characters the driver will accept to be written via gigaset_write_cmd
1853 * parameter:
1854 * controller state structure
1855 * return value:
1856 * number of characters
1857 */
1858static int gigaset_write_room(struct cardstate *cs)
1859{
1860 return IF_WRITEBUF;
1861}
1862
1863/* gigaset_chars_in_buffer
1864 * tty_driver.chars_in_buffer interface routine
1865 * return number of characters waiting to be sent
1866 * parameter:
1867 * controller state structure
1868 * return value:
1869 * number of characters
1870 */
1871static int gigaset_chars_in_buffer(struct cardstate *cs)
1872{
1873 unsigned long flags;
1874 unsigned bytes;
1875
1876 spin_lock_irqsave(&cs->cmdlock, flags);
1877 bytes = cs->cmdbytes;
1878 spin_unlock_irqrestore(&cs->cmdlock, flags);
1879
1880 return bytes;
1881}
1882
1883/* gigaset_brkchars
1884 * implementation of ioctl(GIGASET_BRKCHARS)
1885 * parameter:
1886 * controller state structure
1887 * return value:
1888 * -EINVAL (unimplemented function)
1889 */
1890static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
1891{
1892 return -EINVAL;
1893}
1894
1895
1896/* Device Initialization/Shutdown */
1897/* ============================== */
1898
1899/* Free hardware dependent part of the B channel structure
1900 * parameter:
1901 * bcs B channel structure
1902 * return value:
1903 * !=0 on success
1904 */
1905static int gigaset_freebcshw(struct bc_state *bcs)
1906{
1907 if (!bcs->hw.bas)
1908 return 0;
1909
1910 if (bcs->hw.bas->isooutbuf)
1911 kfree(bcs->hw.bas->isooutbuf);
1912 kfree(bcs->hw.bas);
1913 bcs->hw.bas = NULL;
1914 return 1;
1915}
1916
1917/* Initialize hardware dependent part of the B channel structure
1918 * parameter:
1919 * bcs B channel structure
1920 * return value:
1921 * !=0 on success
1922 */
1923static int gigaset_initbcshw(struct bc_state *bcs)
1924{
1925 int i;
1926 struct bas_bc_state *ubc;
1927
1928 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
1929 if (!ubc) {
1930 err("could not allocate bas_bc_state");
1931 return 0;
1932 }
1933
1934 atomic_set(&ubc->running, 0);
1935 atomic_set(&ubc->corrbytes, 0);
1936 spin_lock_init(&ubc->isooutlock);
1937 for (i = 0; i < BAS_OUTURBS; ++i) {
1938 ubc->isoouturbs[i].urb = NULL;
1939 ubc->isoouturbs[i].bcs = bcs;
1940 }
1941 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
1942 ubc->numsub = 0;
1943 if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) {
1944 err("could not allocate isochronous output buffer");
1945 kfree(ubc);
1946 bcs->hw.bas = NULL;
1947 return 0;
1948 }
1949 tasklet_init(&ubc->sent_tasklet,
1950 &write_iso_tasklet, (unsigned long) bcs);
1951
1952 spin_lock_init(&ubc->isoinlock);
1953 for (i = 0; i < BAS_INURBS; ++i)
1954 ubc->isoinurbs[i] = NULL;
1955 ubc->isoindone = NULL;
1956 ubc->loststatus = -EINPROGRESS;
1957 ubc->isoinlost = 0;
1958 ubc->seqlen = 0;
1959 ubc->inbyte = 0;
1960 ubc->inbits = 0;
1961 ubc->goodbytes = 0;
1962 ubc->alignerrs = 0;
1963 ubc->fcserrs = 0;
1964 ubc->frameerrs = 0;
1965 ubc->giants = 0;
1966 ubc->runts = 0;
1967 ubc->aborts = 0;
1968 ubc->shared0s = 0;
1969 ubc->stolen0s = 0;
1970 tasklet_init(&ubc->rcvd_tasklet,
1971 &read_iso_tasklet, (unsigned long) bcs);
1972 return 1;
1973}
1974
1975static void gigaset_reinitbcshw(struct bc_state *bcs)
1976{
1977 struct bas_bc_state *ubc = bcs->hw.bas;
1978
1979 atomic_set(&bcs->hw.bas->running, 0);
1980 atomic_set(&bcs->hw.bas->corrbytes, 0);
1981 bcs->hw.bas->numsub = 0;
1982 spin_lock_init(&ubc->isooutlock);
1983 spin_lock_init(&ubc->isoinlock);
1984 ubc->loststatus = -EINPROGRESS;
1985}
1986
1987static void gigaset_freecshw(struct cardstate *cs)
1988{
1989 struct bas_cardstate *ucs = cs->hw.bas;
1990
1991 del_timer(&ucs->timer_ctrl);
1992 del_timer(&ucs->timer_atrdy);
1993 del_timer(&ucs->timer_cmd_in);
1994
1995 kfree(cs->hw.bas);
1996}
1997
1998static int gigaset_initcshw(struct cardstate *cs)
1999{
2000 struct bas_cardstate *ucs;
2001
2002 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
2003 if (!ucs)
2004 return 0;
2005
2006 ucs->urb_cmd_in = NULL;
2007 ucs->urb_cmd_out = NULL;
2008 ucs->rcvbuf = NULL;
2009 ucs->rcvbuf_size = 0;
2010
2011 spin_lock_init(&ucs->lock);
2012 ucs->pending = 0;
2013
2014 atomic_set(&ucs->basstate, 0);
2015 init_timer(&ucs->timer_ctrl);
2016 init_timer(&ucs->timer_atrdy);
2017 init_timer(&ucs->timer_cmd_in);
2018
2019 return 1;
2020}
2021
2022/* freeurbs
2023 * unlink and deallocate all URBs unconditionally
2024 * caller must make sure that no commands are still in progress
2025 * parameter:
2026 * cs controller state structure
2027 */
2028static void freeurbs(struct cardstate *cs)
2029{
2030 struct bas_cardstate *ucs;
2031 struct bas_bc_state *ubc;
2032 int i, j;
2033
2034 IFNULLRET(cs);
2035 ucs = cs->hw.bas;
2036 IFNULLRET(ucs);
2037
2038 for (j = 0; j < 2; ++j) {
2039 ubc = cs->bcs[j].hw.bas;
2040 IFNULLCONT(ubc);
2041 for (i = 0; i < BAS_OUTURBS; ++i)
2042 if (ubc->isoouturbs[i].urb) {
2043 usb_kill_urb(ubc->isoouturbs[i].urb);
2044 dbg(DEBUG_INIT,
2045 "%s: isoc output URB %d/%d unlinked",
2046 __func__, j, i);
2047 usb_free_urb(ubc->isoouturbs[i].urb);
2048 ubc->isoouturbs[i].urb = NULL;
2049 }
2050 for (i = 0; i < BAS_INURBS; ++i)
2051 if (ubc->isoinurbs[i]) {
2052 usb_kill_urb(ubc->isoinurbs[i]);
2053 dbg(DEBUG_INIT,
2054 "%s: isoc input URB %d/%d unlinked",
2055 __func__, j, i);
2056 usb_free_urb(ubc->isoinurbs[i]);
2057 ubc->isoinurbs[i] = NULL;
2058 }
2059 }
2060 if (ucs->urb_int_in) {
2061 usb_kill_urb(ucs->urb_int_in);
2062 dbg(DEBUG_INIT, "%s: interrupt input URB unlinked", __func__);
2063 usb_free_urb(ucs->urb_int_in);
2064 ucs->urb_int_in = NULL;
2065 }
2066 if (ucs->urb_cmd_out) {
2067 usb_kill_urb(ucs->urb_cmd_out);
2068 dbg(DEBUG_INIT, "%s: command output URB unlinked", __func__);
2069 usb_free_urb(ucs->urb_cmd_out);
2070 ucs->urb_cmd_out = NULL;
2071 }
2072 if (ucs->urb_cmd_in) {
2073 usb_kill_urb(ucs->urb_cmd_in);
2074 dbg(DEBUG_INIT, "%s: command input URB unlinked", __func__);
2075 usb_free_urb(ucs->urb_cmd_in);
2076 ucs->urb_cmd_in = NULL;
2077 }
2078 if (ucs->urb_ctrl) {
2079 usb_kill_urb(ucs->urb_ctrl);
2080 dbg(DEBUG_INIT, "%s: control output URB unlinked", __func__);
2081 usb_free_urb(ucs->urb_ctrl);
2082 ucs->urb_ctrl = NULL;
2083 }
2084}
2085
2086/* gigaset_probe
2087 * This function is called when a new USB device is connected.
2088 * It checks whether the new device is handled by this driver.
2089 */
2090static int gigaset_probe(struct usb_interface *interface,
2091 const struct usb_device_id *id)
2092{
2093 struct usb_host_interface *hostif;
2094 struct usb_device *udev = interface_to_usbdev(interface);
2095 struct cardstate *cs = NULL;
2096 struct bas_cardstate *ucs = NULL;
2097 struct bas_bc_state *ubc;
2098 struct usb_endpoint_descriptor *endpoint;
2099 int i, j;
2100 int ret;
2101
2102 IFNULLRETVAL(udev, -ENODEV);
2103
2104 dbg(DEBUG_ANY,
2105 "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
2106 __func__, le16_to_cpu(udev->descriptor.idVendor),
2107 le16_to_cpu(udev->descriptor.idProduct));
2108
2109 /* See if the device offered us matches what we can accept */
2110 if ((le16_to_cpu(udev->descriptor.idVendor) != USB_GIGA_VENDOR_ID) ||
2111 (le16_to_cpu(udev->descriptor.idProduct) != USB_GIGA_PRODUCT_ID &&
2112 le16_to_cpu(udev->descriptor.idProduct) != USB_4175_PRODUCT_ID &&
2113 le16_to_cpu(udev->descriptor.idProduct) != USB_SX303_PRODUCT_ID &&
2114 le16_to_cpu(udev->descriptor.idProduct) != USB_SX353_PRODUCT_ID)) {
2115 dbg(DEBUG_ANY, "%s: unmatched ID - exiting", __func__);
2116 return -ENODEV;
2117 }
2118
2119 /* set required alternate setting */
2120 hostif = interface->cur_altsetting;
2121 if (hostif->desc.bAlternateSetting != 3) {
2122 dbg(DEBUG_ANY,
2123 "%s: wrong alternate setting %d - trying to switch",
2124 __func__, hostif->desc.bAlternateSetting);
2125 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) {
2126 warn("usb_set_interface failed, device %d interface %d altsetting %d",
2127 udev->devnum, hostif->desc.bInterfaceNumber,
2128 hostif->desc.bAlternateSetting);
2129 return -ENODEV;
2130 }
2131 hostif = interface->cur_altsetting;
2132 }
2133
2134 /* Reject application specific interfaces
2135 */
2136 if (hostif->desc.bInterfaceClass != 255) {
2137 warn("%s: bInterfaceClass == %d",
2138 __func__, hostif->desc.bInterfaceClass);
2139 return -ENODEV;
2140 }
2141
2142 info("%s: Device matched (Vendor: 0x%x, Product: 0x%x)",
2143 __func__, le16_to_cpu(udev->descriptor.idVendor),
2144 le16_to_cpu(udev->descriptor.idProduct));
2145
2146 cs = gigaset_getunassignedcs(driver);
2147 if (!cs) {
2148 err("%s: no free cardstate", __func__);
2149 return -ENODEV;
2150 }
2151 ucs = cs->hw.bas;
2152 ucs->udev = udev;
2153 ucs->interface = interface;
2154
2155 /* allocate URBs:
2156 * - one for the interrupt pipe
2157 * - three for the different uses of the default control pipe
2158 * - three for each isochronous pipe
2159 */
2160 ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL);
2161 if (!ucs->urb_int_in) {
2162 err("No free urbs available");
2163 goto error;
2164 }
2165 ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL);
2166 if (!ucs->urb_cmd_in) {
2167 err("No free urbs available");
2168 goto error;
2169 }
2170 ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL);
2171 if (!ucs->urb_cmd_out) {
2172 err("No free urbs available");
2173 goto error;
2174 }
2175 ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL);
2176 if (!ucs->urb_ctrl) {
2177 err("No free urbs available");
2178 goto error;
2179 }
2180
2181 for (j = 0; j < 2; ++j) {
2182 ubc = cs->bcs[j].hw.bas;
2183 for (i = 0; i < BAS_OUTURBS; ++i) {
2184 ubc->isoouturbs[i].urb =
2185 usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
2186 if (!ubc->isoouturbs[i].urb) {
2187 err("No free urbs available");
2188 goto error;
2189 }
2190 }
2191 for (i = 0; i < BAS_INURBS; ++i) {
2192 ubc->isoinurbs[i] =
2193 usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL);
2194 if (!ubc->isoinurbs[i]) {
2195 err("No free urbs available");
2196 goto error;
2197 }
2198 }
2199 }
2200
2201 ucs->rcvbuf = NULL;
2202 ucs->rcvbuf_size = 0;
2203
2204 /* Fill the interrupt urb and send it to the core */
2205 endpoint = &hostif->endpoint[0].desc;
2206 usb_fill_int_urb(ucs->urb_int_in, udev,
2207 usb_rcvintpipe(udev,
2208 (endpoint->bEndpointAddress) & 0x0f),
2209 ucs->int_in_buf, 3, read_int_callback, cs,
2210 endpoint->bInterval);
2211 ret = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL);
2212 if (ret) {
2213 err("could not submit interrupt URB: %s", get_usb_statmsg(ret));
2214 goto error;
2215 }
2216
2217 /* tell the device that the driver is ready */
2218 if ((ret = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0)
2219 goto error;
2220
2221 /* tell common part that the device is ready */
2222 if (startmode == SM_LOCKED)
2223 atomic_set(&cs->mstate, MS_LOCKED);
2224 if (!gigaset_start(cs))
2225 goto error;
2226
2227 /* save address of controller structure */
2228 usb_set_intfdata(interface, cs);
2229
2230 /* set up device sysfs */
2231 gigaset_init_dev_sysfs(interface);
2232 return 0;
2233
2234error:
2235 freeurbs(cs);
2236 gigaset_unassign(cs);
2237 return -ENODEV;
2238}
2239
2240/* gigaset_disconnect
2241 * This function is called when the Gigaset base is unplugged.
2242 */
2243static void gigaset_disconnect(struct usb_interface *interface)
2244{
2245 struct cardstate *cs;
2246 struct bas_cardstate *ucs;
2247
2248 /* clear device sysfs */
2249 gigaset_free_dev_sysfs(interface);
2250
2251 cs = usb_get_intfdata(interface);
2252 usb_set_intfdata(interface, NULL);
2253
2254 IFNULLRET(cs);
2255 ucs = cs->hw.bas;
2256 IFNULLRET(ucs);
2257
2258 info("disconnecting GigaSet base");
2259 gigaset_stop(cs);
2260 freeurbs(cs);
2261 kfree(ucs->rcvbuf);
2262 ucs->rcvbuf = NULL;
2263 ucs->rcvbuf_size = 0;
2264 atomic_set(&ucs->basstate, 0);
2265 gigaset_unassign(cs);
2266}
2267
2268static struct gigaset_ops gigops = {
2269 gigaset_write_cmd,
2270 gigaset_write_room,
2271 gigaset_chars_in_buffer,
2272 gigaset_brkchars,
2273 gigaset_init_bchannel,
2274 gigaset_close_bchannel,
2275 gigaset_initbcshw,
2276 gigaset_freebcshw,
2277 gigaset_reinitbcshw,
2278 gigaset_initcshw,
2279 gigaset_freecshw,
2280 gigaset_set_modem_ctrl,
2281 gigaset_baud_rate,
2282 gigaset_set_line_ctrl,
2283 gigaset_isoc_send_skb,
2284 gigaset_isoc_input,
2285};
2286
2287/* bas_gigaset_init
2288 * This function is called after the kernel module is loaded.
2289 */
2290static int __init bas_gigaset_init(void)
2291{
2292 int result;
2293
2294 /* allocate memory for our driver state and intialize it */
2295 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
2296 GIGASET_MODULENAME, GIGASET_DEVNAME,
2297 GIGASET_DEVFSNAME, &gigops,
2298 THIS_MODULE)) == NULL)
2299 goto error;
2300
2301 /* allocate memory for our device state and intialize it */
2302 cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode, GIGASET_MODULENAME);
2303 if (!cardstate)
2304 goto error;
2305
2306 /* register this driver with the USB subsystem */
2307 result = usb_register(&gigaset_usb_driver);
2308 if (result < 0) {
2309 err("usb_register failed (error %d)", -result);
2310 goto error;
2311 }
2312
2313 info(DRIVER_AUTHOR);
2314 info(DRIVER_DESC);
2315 return 0;
2316
2317error: if (cardstate)
2318 gigaset_freecs(cardstate);
2319 cardstate = NULL;
2320 if (driver)
2321 gigaset_freedriver(driver);
2322 driver = NULL;
2323 return -1;
2324}
2325
2326/* bas_gigaset_exit
2327 * This function is called before the kernel module is unloaded.
2328 */
2329static void __exit bas_gigaset_exit(void)
2330{
2331 gigaset_blockdriver(driver); /* => probe will fail
2332 * => no gigaset_start any more
2333 */
2334
2335 gigaset_shutdown(cardstate);
2336 /* from now on, no isdn callback should be possible */
2337
2338 if (atomic_read(&cardstate->hw.bas->basstate) & BS_ATOPEN) {
2339 dbg(DEBUG_ANY, "closing AT channel");
2340 if (req_submit(cardstate->bcs,
2341 HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT) >= 0) {
2342 /* successfully submitted - wait for completion */
2343 //wait_event_interruptible(cs->initwait, !cs->hw.bas->pending);
2344 //FIXME need own wait queue? wakeup?
2345 }
2346 }
2347
2348 /* deregister this driver with the USB subsystem */
2349 usb_deregister(&gigaset_usb_driver);
2350 /* this will call the disconnect-callback */
2351 /* from now on, no disconnect/probe callback should be running */
2352
2353 gigaset_freecs(cardstate);
2354 cardstate = NULL;
2355 gigaset_freedriver(driver);
2356 driver = NULL;
2357}
2358
2359
2360module_init(bas_gigaset_init);
2361module_exit(bas_gigaset_exit);
2362
2363MODULE_AUTHOR(DRIVER_AUTHOR);
2364MODULE_DESCRIPTION(DRIVER_DESC);
2365MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
new file mode 100644
index 000000000000..64371995c1a9
--- /dev/null
+++ b/drivers/isdn/gigaset/common.c
@@ -0,0 +1,1203 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: common.c,v 1.104.4.22 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/ctype.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24
25/* Version Information */
26#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers <Eilers.Stefan@epost.de>"
27#define DRIVER_DESC "Driver for Gigaset 307x"
28
29/* Module parameters */
30int gigaset_debuglevel = DEBUG_DEFAULT;
31EXPORT_SYMBOL_GPL(gigaset_debuglevel);
32module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR);
33MODULE_PARM_DESC(debug, "debug level");
34
35/*======================================================================
36 Prototypes of internal functions
37 */
38
39//static void gigaset_process_response(int resp_code, int parameter,
40// struct at_state_t *at_state,
41// unsigned char ** pstring);
42static struct cardstate *alloc_cs(struct gigaset_driver *drv);
43static void free_cs(struct cardstate *cs);
44static void make_valid(struct cardstate *cs, unsigned mask);
45static void make_invalid(struct cardstate *cs, unsigned mask);
46
47#define VALID_MINOR 0x01
48#define VALID_ID 0x02
49#define ASSIGNED 0x04
50
51/* bitwise byte inversion table */
52__u8 gigaset_invtab[256] = {
53 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
54 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
55 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
56 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
57 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
58 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
59 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
60 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
61 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
62 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
63 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
64 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
65 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
66 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
67 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
68 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
69 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
70 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
71 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
72 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
73 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
74 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
75 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
76 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
77 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
78 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
79 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
80 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
81 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
82 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
83 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
84 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
85};
86EXPORT_SYMBOL_GPL(gigaset_invtab);
87
88void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
89 size_t len, const unsigned char *buf, int from_user)
90{
91 unsigned char outbuf[80];
92 unsigned char inbuf[80 - 1];
93 size_t numin;
94 const unsigned char *in;
95 size_t space = sizeof outbuf - 1;
96 unsigned char *out = outbuf;
97
98 if (!from_user) {
99 in = buf;
100 numin = len;
101 } else {
102 numin = len < sizeof inbuf ? len : sizeof inbuf;
103 in = inbuf;
104 if (copy_from_user(inbuf, (const unsigned char __user *) buf, numin)) {
105 strncpy(inbuf, "<FAULT>", sizeof inbuf);
106 numin = sizeof "<FAULT>" - 1;
107 }
108 }
109
110 for (; numin && space; --numin, ++in) {
111 --space;
112 if (*in >= 32)
113 *out++ = *in;
114 else {
115 *out++ = '^';
116 if (space) {
117 *out++ = '@' + *in;
118 --space;
119 }
120 }
121 }
122 *out = 0;
123
124 dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf);
125}
126EXPORT_SYMBOL_GPL(gigaset_dbg_buffer);
127
128static int setflags(struct cardstate *cs, unsigned flags, unsigned delay)
129{
130 int r;
131
132 r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags);
133 cs->control_state = flags;
134 if (r < 0)
135 return r;
136
137 if (delay) {
138 set_current_state(TASK_INTERRUPTIBLE);
139 schedule_timeout(delay * HZ / 1000);
140 }
141
142 return 0;
143}
144
145int gigaset_enterconfigmode(struct cardstate *cs)
146{
147 int i, r;
148
149 if (!atomic_read(&cs->connected)) {
150 err("not connected!");
151 return -1;
152 }
153
154 cs->control_state = TIOCM_RTS; //FIXME
155
156 r = setflags(cs, TIOCM_DTR, 200);
157 if (r < 0)
158 goto error;
159 r = setflags(cs, 0, 200);
160 if (r < 0)
161 goto error;
162 for (i = 0; i < 5; ++i) {
163 r = setflags(cs, TIOCM_RTS, 100);
164 if (r < 0)
165 goto error;
166 r = setflags(cs, 0, 100);
167 if (r < 0)
168 goto error;
169 }
170 r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800);
171 if (r < 0)
172 goto error;
173
174 return 0;
175
176error:
177 err("error %d on setuartbits!\n", -r);
178 cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value?
179 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
180
181 return -1; //r
182}
183
184static int test_timeout(struct at_state_t *at_state)
185{
186 if (!at_state->timer_expires)
187 return 0;
188
189 if (--at_state->timer_expires) {
190 dbg(DEBUG_MCMD, "decreased timer of %p to %lu",
191 at_state, at_state->timer_expires);
192 return 0;
193 }
194
195 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
196 atomic_read(&at_state->timer_index), NULL)) {
197 //FIXME what should we do?
198 }
199
200 return 1;
201}
202
203static void timer_tick(unsigned long data)
204{
205 struct cardstate *cs = (struct cardstate *) data;
206 unsigned long flags;
207 unsigned channel;
208 struct at_state_t *at_state;
209 int timeout = 0;
210
211 spin_lock_irqsave(&cs->lock, flags);
212
213 for (channel = 0; channel < cs->channels; ++channel)
214 if (test_timeout(&cs->bcs[channel].at_state))
215 timeout = 1;
216
217 if (test_timeout(&cs->at_state))
218 timeout = 1;
219
220 list_for_each_entry(at_state, &cs->temp_at_states, list)
221 if (test_timeout(at_state))
222 timeout = 1;
223
224 if (atomic_read(&cs->running)) {
225 mod_timer(&cs->timer, jiffies + GIG_TICK);
226 if (timeout) {
227 dbg(DEBUG_CMD, "scheduling timeout");
228 tasklet_schedule(&cs->event_tasklet);
229 }
230 }
231
232 spin_unlock_irqrestore(&cs->lock, flags);
233}
234
235int gigaset_get_channel(struct bc_state *bcs)
236{
237 unsigned long flags;
238
239 spin_lock_irqsave(&bcs->cs->lock, flags);
240 if (bcs->use_count) {
241 dbg(DEBUG_ANY, "could not allocate channel %d", bcs->channel);
242 spin_unlock_irqrestore(&bcs->cs->lock, flags);
243 return 0;
244 }
245 ++bcs->use_count;
246 bcs->busy = 1;
247 dbg(DEBUG_ANY, "allocated channel %d", bcs->channel);
248 spin_unlock_irqrestore(&bcs->cs->lock, flags);
249 return 1;
250}
251
252void gigaset_free_channel(struct bc_state *bcs)
253{
254 unsigned long flags;
255
256 spin_lock_irqsave(&bcs->cs->lock, flags);
257 if (!bcs->busy) {
258 dbg(DEBUG_ANY, "could not free channel %d", bcs->channel);
259 spin_unlock_irqrestore(&bcs->cs->lock, flags);
260 return;
261 }
262 --bcs->use_count;
263 bcs->busy = 0;
264 dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
265 spin_unlock_irqrestore(&bcs->cs->lock, flags);
266}
267
268int gigaset_get_channels(struct cardstate *cs)
269{
270 unsigned long flags;
271 int i;
272
273 spin_lock_irqsave(&cs->lock, flags);
274 for (i = 0; i < cs->channels; ++i)
275 if (cs->bcs[i].use_count) {
276 spin_unlock_irqrestore(&cs->lock, flags);
277 dbg(DEBUG_ANY, "could not allocated all channels");
278 return 0;
279 }
280 for (i = 0; i < cs->channels; ++i)
281 ++cs->bcs[i].use_count;
282 spin_unlock_irqrestore(&cs->lock, flags);
283
284 dbg(DEBUG_ANY, "allocated all channels");
285
286 return 1;
287}
288
289void gigaset_free_channels(struct cardstate *cs)
290{
291 unsigned long flags;
292 int i;
293
294 dbg(DEBUG_ANY, "unblocking all channels");
295 spin_lock_irqsave(&cs->lock, flags);
296 for (i = 0; i < cs->channels; ++i)
297 --cs->bcs[i].use_count;
298 spin_unlock_irqrestore(&cs->lock, flags);
299}
300
301void gigaset_block_channels(struct cardstate *cs)
302{
303 unsigned long flags;
304 int i;
305
306 dbg(DEBUG_ANY, "blocking all channels");
307 spin_lock_irqsave(&cs->lock, flags);
308 for (i = 0; i < cs->channels; ++i)
309 ++cs->bcs[i].use_count;
310 spin_unlock_irqrestore(&cs->lock, flags);
311}
312
313static void clear_events(struct cardstate *cs)
314{
315 struct event_t *ev;
316 unsigned head, tail;
317
318 /* no locking needed (no reader/writer allowed) */
319
320 head = atomic_read(&cs->ev_head);
321 tail = atomic_read(&cs->ev_tail);
322
323 while (tail != head) {
324 ev = cs->events + head;
325 kfree(ev->ptr);
326
327 head = (head + 1) % MAX_EVENTS;
328 }
329
330 atomic_set(&cs->ev_head, tail);
331}
332
333struct event_t *gigaset_add_event(struct cardstate *cs,
334 struct at_state_t *at_state, int type,
335 void *ptr, int parameter, void *arg)
336{
337 unsigned long flags;
338 unsigned next, tail;
339 struct event_t *event = NULL;
340
341 spin_lock_irqsave(&cs->ev_lock, flags);
342
343 tail = atomic_read(&cs->ev_tail);
344 next = (tail + 1) % MAX_EVENTS;
345 if (unlikely(next == atomic_read(&cs->ev_head)))
346 err("event queue full");
347 else {
348 event = cs->events + tail;
349 event->type = type;
350 event->at_state = at_state;
351 event->cid = -1;
352 event->ptr = ptr;
353 event->arg = arg;
354 event->parameter = parameter;
355 atomic_set(&cs->ev_tail, next);
356 }
357
358 spin_unlock_irqrestore(&cs->ev_lock, flags);
359
360 return event;
361}
362EXPORT_SYMBOL_GPL(gigaset_add_event);
363
364static void free_strings(struct at_state_t *at_state)
365{
366 int i;
367
368 for (i = 0; i < STR_NUM; ++i) {
369 kfree(at_state->str_var[i]);
370 at_state->str_var[i] = NULL;
371 }
372}
373
374static void clear_at_state(struct at_state_t *at_state)
375{
376 free_strings(at_state);
377}
378
379static void dealloc_at_states(struct cardstate *cs)
380{
381 struct at_state_t *cur, *next;
382
383 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
384 list_del(&cur->list);
385 free_strings(cur);
386 kfree(cur);
387 }
388}
389
390static void gigaset_freebcs(struct bc_state *bcs)
391{
392 int i;
393
394 dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
395 if (!bcs->cs->ops->freebcshw(bcs)) {
396 dbg(DEBUG_INIT, "failed");
397 }
398
399 dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
400 clear_at_state(&bcs->at_state);
401 dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
402
403 if (bcs->skb)
404 dev_kfree_skb(bcs->skb);
405 for (i = 0; i < AT_NUM; ++i) {
406 kfree(bcs->commands[i]);
407 bcs->commands[i] = NULL;
408 }
409}
410
411void gigaset_freecs(struct cardstate *cs)
412{
413 int i;
414 unsigned long flags;
415
416 if (!cs)
417 return;
418
419 down(&cs->sem);
420
421 if (!cs->bcs)
422 goto f_cs;
423 if (!cs->inbuf)
424 goto f_bcs;
425
426 spin_lock_irqsave(&cs->lock, flags);
427 atomic_set(&cs->running, 0);
428 spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */
429
430 tasklet_kill(&cs->event_tasklet);
431 del_timer_sync(&cs->timer);
432
433 switch (cs->cs_init) {
434 default:
435 gigaset_if_free(cs);
436
437 dbg(DEBUG_INIT, "clearing hw");
438 cs->ops->freecshw(cs);
439
440 //FIXME cmdbuf
441
442 /* fall through */
443 case 2: /* error in initcshw */
444 /* Deregister from LL */
445 make_invalid(cs, VALID_ID);
446 dbg(DEBUG_INIT, "clearing iif");
447 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
448
449 /* fall through */
450 case 1: /* error when regestering to LL */
451 dbg(DEBUG_INIT, "clearing at_state");
452 clear_at_state(&cs->at_state);
453 dealloc_at_states(cs);
454
455 /* fall through */
456 case 0: /* error in one call to initbcs */
457 for (i = 0; i < cs->channels; ++i) {
458 dbg(DEBUG_INIT, "clearing bcs[%d]", i);
459 gigaset_freebcs(cs->bcs + i);
460 }
461
462 clear_events(cs);
463 dbg(DEBUG_INIT, "freeing inbuf");
464 kfree(cs->inbuf);
465 }
466f_bcs: dbg(DEBUG_INIT, "freeing bcs[]");
467 kfree(cs->bcs);
468f_cs: dbg(DEBUG_INIT, "freeing cs");
469 up(&cs->sem);
470 free_cs(cs);
471}
472EXPORT_SYMBOL_GPL(gigaset_freecs);
473
474void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
475 struct cardstate *cs, int cid)
476{
477 int i;
478
479 INIT_LIST_HEAD(&at_state->list);
480 at_state->waiting = 0;
481 at_state->getstring = 0;
482 at_state->pending_commands = 0;
483 at_state->timer_expires = 0;
484 at_state->timer_active = 0;
485 atomic_set(&at_state->timer_index, 0);
486 atomic_set(&at_state->seq_index, 0);
487 at_state->ConState = 0;
488 for (i = 0; i < STR_NUM; ++i)
489 at_state->str_var[i] = NULL;
490 at_state->int_var[VAR_ZDLE] = 0;
491 at_state->int_var[VAR_ZCTP] = -1;
492 at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
493 at_state->cs = cs;
494 at_state->bcs = bcs;
495 at_state->cid = cid;
496 if (!cid)
497 at_state->replystruct = cs->tabnocid;
498 else
499 at_state->replystruct = cs->tabcid;
500}
501
502
503static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
504 struct cardstate *cs, int inputstate)
505/* inbuf->read must be allocated before! */
506{
507 atomic_set(&inbuf->head, 0);
508 atomic_set(&inbuf->tail, 0);
509 inbuf->cs = cs;
510 inbuf->bcs = bcs; /*base driver: NULL*/
511 inbuf->rcvbuf = NULL; //FIXME
512 inbuf->inputstate = inputstate;
513}
514
515/* Initialize the b-channel structure */
516static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
517 struct cardstate *cs, int channel)
518{
519 int i;
520
521 bcs->tx_skb = NULL; //FIXME -> hw part
522
523 skb_queue_head_init(&bcs->squeue);
524
525 bcs->corrupted = 0;
526 bcs->trans_down = 0;
527 bcs->trans_up = 0;
528
529 dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel);
530 gigaset_at_init(&bcs->at_state, bcs, cs, -1);
531
532 bcs->rcvbytes = 0;
533
534#ifdef CONFIG_GIGASET_DEBUG
535 bcs->emptycount = 0;
536#endif
537
538 dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel);
539 bcs->fcs = PPP_INITFCS;
540 bcs->inputstate = 0;
541 if (cs->ignoreframes) {
542 bcs->inputstate |= INS_skip_frame;
543 bcs->skb = NULL;
544 } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
545 skb_reserve(bcs->skb, HW_HDR_LEN);
546 else {
547 warn("could not allocate skb");
548 bcs->inputstate |= INS_skip_frame;
549 }
550
551 bcs->channel = channel;
552 bcs->cs = cs;
553
554 bcs->chstate = 0;
555 bcs->use_count = 1;
556 bcs->busy = 0;
557 bcs->ignore = cs->ignoreframes;
558
559 for (i = 0; i < AT_NUM; ++i)
560 bcs->commands[i] = NULL;
561
562 dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
563 if (cs->ops->initbcshw(bcs))
564 return bcs;
565
566//error:
567 dbg(DEBUG_INIT, " failed");
568
569 dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
570 if (bcs->skb)
571 dev_kfree_skb(bcs->skb);
572
573 return NULL;
574}
575
576/* gigaset_initcs
577 * Allocate and initialize cardstate structure for Gigaset driver
578 * Calls hardware dependent gigaset_initcshw() function
579 * Calls B channel initialization function gigaset_initbcs() for each B channel
580 * parameters:
581 * drv hardware driver the device belongs to
582 * channels number of B channels supported by device
583 * onechannel !=0: B channel data and AT commands share one communication channel
584 * ==0: B channels have separate communication channels
585 * ignoreframes number of frames to ignore after setting up B channel
586 * cidmode !=0: start in CallID mode
587 * modulename name of driver module (used for I4L registration)
588 * return value:
589 * pointer to cardstate structure
590 */
591struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
592 int onechannel, int ignoreframes,
593 int cidmode, const char *modulename)
594{
595 struct cardstate *cs = NULL;
596 int i;
597
598 dbg(DEBUG_INIT, "allocating cs");
599 cs = alloc_cs(drv);
600 if (!cs)
601 goto error;
602 dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1);
603 cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL);
604 if (!cs->bcs)
605 goto error;
606 dbg(DEBUG_INIT, "allocating inbuf");
607 cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL);
608 if (!cs->inbuf)
609 goto error;
610
611 cs->cs_init = 0;
612 cs->channels = channels;
613 cs->onechannel = onechannel;
614 cs->ignoreframes = ignoreframes;
615 INIT_LIST_HEAD(&cs->temp_at_states);
616 atomic_set(&cs->running, 0);
617 init_timer(&cs->timer); /* clear next & prev */
618 spin_lock_init(&cs->ev_lock);
619 atomic_set(&cs->ev_tail, 0);
620 atomic_set(&cs->ev_head, 0);
621 init_MUTEX_LOCKED(&cs->sem);
622 tasklet_init(&cs->event_tasklet, &gigaset_handle_event, (unsigned long) cs);
623 atomic_set(&cs->commands_pending, 0);
624 cs->cur_at_seq = 0;
625 cs->gotfwver = -1;
626 cs->open_count = 0;
627 cs->tty = NULL;
628 atomic_set(&cs->cidmode, cidmode != 0);
629
630 //if(onechannel) { //FIXME
631 cs->tabnocid = gigaset_tab_nocid_m10x;
632 cs->tabcid = gigaset_tab_cid_m10x;
633 //} else {
634 // cs->tabnocid = gigaset_tab_nocid;
635 // cs->tabcid = gigaset_tab_cid;
636 //}
637
638 init_waitqueue_head(&cs->waitqueue);
639 cs->waiting = 0;
640
641 atomic_set(&cs->mode, M_UNKNOWN);
642 atomic_set(&cs->mstate, MS_UNINITIALIZED);
643
644 for (i = 0; i < channels; ++i) {
645 dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
646 if (!gigaset_initbcs(cs->bcs + i, cs, i))
647 goto error;
648 }
649
650 ++cs->cs_init;
651
652 dbg(DEBUG_INIT, "setting up at_state");
653 spin_lock_init(&cs->lock);
654 gigaset_at_init(&cs->at_state, NULL, cs, 0);
655 cs->dle = 0;
656 cs->cbytes = 0;
657
658 dbg(DEBUG_INIT, "setting up inbuf");
659 if (onechannel) { //FIXME distinction necessary?
660 gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
661 } else
662 gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
663
664 atomic_set(&cs->connected, 0);
665
666 dbg(DEBUG_INIT, "setting up cmdbuf");
667 cs->cmdbuf = cs->lastcmdbuf = NULL;
668 spin_lock_init(&cs->cmdlock);
669 cs->curlen = 0;
670 cs->cmdbytes = 0;
671
672 /*
673 * Tell the ISDN4Linux subsystem (the LL) that
674 * a driver for a USB-Device is available !
675 * If this is done, "isdnctrl" is able to bind a device for this driver even
676 * if no physical usb-device is currently connected.
677 * But this device will just be accessable if a physical USB device is connected
678 * (via "gigaset_probe") .
679 */
680 dbg(DEBUG_INIT, "setting up iif");
681 if (!gigaset_register_to_LL(cs, modulename)) {
682 err("register_isdn=>error");
683 goto error;
684 }
685
686 make_valid(cs, VALID_ID);
687 ++cs->cs_init;
688 dbg(DEBUG_INIT, "setting up hw");
689 if (!cs->ops->initcshw(cs))
690 goto error;
691
692 ++cs->cs_init;
693
694 gigaset_if_init(cs);
695
696 atomic_set(&cs->running, 1);
697 cs->timer.data = (unsigned long) cs;
698 cs->timer.function = timer_tick;
699 cs->timer.expires = jiffies + GIG_TICK;
700 /* FIXME: can jiffies increase too much until the timer is added?
701 * Same problem(?) with mod_timer() in timer_tick(). */
702 add_timer(&cs->timer);
703
704 dbg(DEBUG_INIT, "cs initialized!");
705 up(&cs->sem);
706 return cs;
707
708error: if (cs)
709 up(&cs->sem);
710 dbg(DEBUG_INIT, "failed");
711 gigaset_freecs(cs);
712 return NULL;
713}
714EXPORT_SYMBOL_GPL(gigaset_initcs);
715
716/* ReInitialize the b-channel structure */ /* e.g. called on hangup, disconnect */
717void gigaset_bcs_reinit(struct bc_state *bcs)
718{
719 struct sk_buff *skb;
720 struct cardstate *cs = bcs->cs;
721 unsigned long flags;
722
723 while ((skb = skb_dequeue(&bcs->squeue)) != NULL)
724 dev_kfree_skb(skb);
725
726 spin_lock_irqsave(&cs->lock, flags); //FIXME
727 clear_at_state(&bcs->at_state);
728 bcs->at_state.ConState = 0;
729 bcs->at_state.timer_active = 0;
730 bcs->at_state.timer_expires = 0;
731 bcs->at_state.cid = -1; /* No CID defined */
732 spin_unlock_irqrestore(&cs->lock, flags);
733
734 bcs->inputstate = 0;
735
736#ifdef CONFIG_GIGASET_DEBUG
737 bcs->emptycount = 0;
738#endif
739
740 bcs->fcs = PPP_INITFCS;
741 bcs->chstate = 0;
742
743 bcs->ignore = cs->ignoreframes;
744 if (bcs->ignore)
745 bcs->inputstate |= INS_skip_frame;
746
747
748 cs->ops->reinitbcshw(bcs);
749}
750
751static void cleanup_cs(struct cardstate *cs)
752{
753 struct cmdbuf_t *cb, *tcb;
754 int i;
755 unsigned long flags;
756
757 spin_lock_irqsave(&cs->lock, flags);
758
759 atomic_set(&cs->mode, M_UNKNOWN);
760 atomic_set(&cs->mstate, MS_UNINITIALIZED);
761
762 clear_at_state(&cs->at_state);
763 dealloc_at_states(cs);
764 free_strings(&cs->at_state);
765 gigaset_at_init(&cs->at_state, NULL, cs, 0);
766
767 kfree(cs->inbuf->rcvbuf);
768 cs->inbuf->rcvbuf = NULL;
769 cs->inbuf->inputstate = INS_command;
770 atomic_set(&cs->inbuf->head, 0);
771 atomic_set(&cs->inbuf->tail, 0);
772
773 cb = cs->cmdbuf;
774 while (cb) {
775 tcb = cb;
776 cb = cb->next;
777 kfree(tcb);
778 }
779 cs->cmdbuf = cs->lastcmdbuf = NULL;
780 cs->curlen = 0;
781 cs->cmdbytes = 0;
782 cs->gotfwver = -1;
783 cs->dle = 0;
784 cs->cur_at_seq = 0;
785 atomic_set(&cs->commands_pending, 0);
786 cs->cbytes = 0;
787
788 spin_unlock_irqrestore(&cs->lock, flags);
789
790 for (i = 0; i < cs->channels; ++i) {
791 gigaset_freebcs(cs->bcs + i);
792 if (!gigaset_initbcs(cs->bcs + i, cs, i))
793 break; //FIXME error handling
794 }
795
796 if (cs->waiting) {
797 cs->cmd_result = -ENODEV;
798 cs->waiting = 0;
799 wake_up_interruptible(&cs->waitqueue);
800 }
801}
802
803
804int gigaset_start(struct cardstate *cs)
805{
806 if (down_interruptible(&cs->sem))
807 return 0;
808 //info("USB device for Gigaset 307x now attached to Dev %d", ucs->minor);
809
810 atomic_set(&cs->connected, 1);
811
812 if (atomic_read(&cs->mstate) != MS_LOCKED) {
813 cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
814 cs->ops->baud_rate(cs, B115200);
815 cs->ops->set_line_ctrl(cs, CS8);
816 cs->control_state = TIOCM_DTR|TIOCM_RTS;
817 } else {
818 //FIXME use some saved values?
819 }
820
821 cs->waiting = 1;
822
823 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
824 cs->waiting = 0;
825 //FIXME what should we do?
826 goto error;
827 }
828
829 dbg(DEBUG_CMD, "scheduling START");
830 gigaset_schedule_event(cs);
831
832 wait_event(cs->waitqueue, !cs->waiting);
833
834 up(&cs->sem);
835 return 1;
836
837error:
838 up(&cs->sem);
839 return 0;
840}
841EXPORT_SYMBOL_GPL(gigaset_start);
842
843void gigaset_shutdown(struct cardstate *cs)
844{
845 down(&cs->sem);
846
847 cs->waiting = 1;
848
849 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
850 //FIXME what should we do?
851 goto exit;
852 }
853
854 dbg(DEBUG_CMD, "scheduling SHUTDOWN");
855 gigaset_schedule_event(cs);
856
857 if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
858 warn("aborted");
859 //FIXME
860 }
861
862 if (atomic_read(&cs->mstate) != MS_LOCKED) {
863 //FIXME?
864 //gigaset_baud_rate(cs, B115200);
865 //gigaset_set_line_ctrl(cs, CS8);
866 //gigaset_set_modem_ctrl(cs, TIOCM_DTR|TIOCM_RTS, 0);
867 //cs->control_state = 0;
868 } else {
869 //FIXME use some saved values?
870 }
871
872 cleanup_cs(cs);
873
874exit:
875 up(&cs->sem);
876}
877EXPORT_SYMBOL_GPL(gigaset_shutdown);
878
879void gigaset_stop(struct cardstate *cs)
880{
881 down(&cs->sem);
882
883 atomic_set(&cs->connected, 0);
884
885 cs->waiting = 1;
886
887 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
888 //FIXME what should we do?
889 goto exit;
890 }
891
892 dbg(DEBUG_CMD, "scheduling STOP");
893 gigaset_schedule_event(cs);
894
895 if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) {
896 warn("aborted");
897 //FIXME
898 }
899
900 /* Tell the LL that the device is not available .. */
901 gigaset_i4l_cmd(cs, ISDN_STAT_STOP); // FIXME move to event layer?
902
903 cleanup_cs(cs);
904
905exit:
906 up(&cs->sem);
907}
908EXPORT_SYMBOL_GPL(gigaset_stop);
909
910static LIST_HEAD(drivers);
911static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
912
913struct cardstate *gigaset_get_cs_by_id(int id)
914{
915 unsigned long flags;
916 static struct cardstate *ret = NULL;
917 static struct cardstate *cs;
918 struct gigaset_driver *drv;
919 unsigned i;
920
921 spin_lock_irqsave(&driver_lock, flags);
922 list_for_each_entry(drv, &drivers, list) {
923 spin_lock(&drv->lock);
924 for (i = 0; i < drv->minors; ++i) {
925 if (drv->flags[i] & VALID_ID) {
926 cs = drv->cs + i;
927 if (cs->myid == id)
928 ret = cs;
929 }
930 if (ret)
931 break;
932 }
933 spin_unlock(&drv->lock);
934 if (ret)
935 break;
936 }
937 spin_unlock_irqrestore(&driver_lock, flags);
938 return ret;
939}
940
941void gigaset_debugdrivers(void)
942{
943 unsigned long flags;
944 static struct cardstate *cs;
945 struct gigaset_driver *drv;
946 unsigned i;
947
948 spin_lock_irqsave(&driver_lock, flags);
949 list_for_each_entry(drv, &drivers, list) {
950 dbg(DEBUG_DRIVER, "driver %p", drv);
951 spin_lock(&drv->lock);
952 for (i = 0; i < drv->minors; ++i) {
953 dbg(DEBUG_DRIVER, " index %u", i);
954 dbg(DEBUG_DRIVER, " flags 0x%02x", drv->flags[i]);
955 cs = drv->cs + i;
956 dbg(DEBUG_DRIVER, " cardstate %p", cs);
957 dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index);
958 dbg(DEBUG_DRIVER, " driver %p", cs->driver);
959 dbg(DEBUG_DRIVER, " i4l id %d", cs->myid);
960 }
961 spin_unlock(&drv->lock);
962 }
963 spin_unlock_irqrestore(&driver_lock, flags);
964}
965EXPORT_SYMBOL_GPL(gigaset_debugdrivers);
966
967struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty)
968{
969 if (tty->index < 0 || tty->index >= tty->driver->num)
970 return NULL;
971 return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start);
972}
973
974struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
975{
976 unsigned long flags;
977 static struct cardstate *ret = NULL;
978 struct gigaset_driver *drv;
979 unsigned index;
980
981 spin_lock_irqsave(&driver_lock, flags);
982 list_for_each_entry(drv, &drivers, list) {
983 if (minor < drv->minor || minor >= drv->minor + drv->minors)
984 continue;
985 index = minor - drv->minor;
986 spin_lock(&drv->lock);
987 if (drv->flags[index] & VALID_MINOR)
988 ret = drv->cs + index;
989 spin_unlock(&drv->lock);
990 if (ret)
991 break;
992 }
993 spin_unlock_irqrestore(&driver_lock, flags);
994 return ret;
995}
996
997void gigaset_freedriver(struct gigaset_driver *drv)
998{
999 unsigned long flags;
1000
1001 spin_lock_irqsave(&driver_lock, flags);
1002 list_del(&drv->list);
1003 spin_unlock_irqrestore(&driver_lock, flags);
1004
1005 gigaset_if_freedriver(drv);
1006 module_put(drv->owner);
1007
1008 kfree(drv->cs);
1009 kfree(drv->flags);
1010 kfree(drv);
1011}
1012EXPORT_SYMBOL_GPL(gigaset_freedriver);
1013
1014/* gigaset_initdriver
1015 * Allocate and initialize gigaset_driver structure. Initialize interface.
1016 * parameters:
1017 * minor First minor number
1018 * minors Number of minors this driver can handle
1019 * procname Name of the driver (e.g. for /proc/tty/drivers, path in /proc/driver)
1020 * devname Name of the device files (prefix without minor number)
1021 * devfsname Devfs name of the device files without %d
1022 * return value:
1023 * Pointer to the gigaset_driver structure on success, NULL on failure.
1024 */
1025struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
1026 const char *procname,
1027 const char *devname,
1028 const char *devfsname,
1029 const struct gigaset_ops *ops,
1030 struct module *owner)
1031{
1032 struct gigaset_driver *drv;
1033 unsigned long flags;
1034 unsigned i;
1035
1036 drv = kmalloc(sizeof *drv, GFP_KERNEL);
1037 if (!drv)
1038 return NULL;
1039 if (!try_module_get(owner))
1040 return NULL;
1041
1042 drv->cs = NULL;
1043 drv->have_tty = 0;
1044 drv->minor = minor;
1045 drv->minors = minors;
1046 spin_lock_init(&drv->lock);
1047 drv->blocked = 0;
1048 drv->ops = ops;
1049 drv->owner = owner;
1050 INIT_LIST_HEAD(&drv->list);
1051
1052 drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL);
1053 if (!drv->cs)
1054 goto out1;
1055 drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL);
1056 if (!drv->flags)
1057 goto out2;
1058
1059 for (i = 0; i < minors; ++i) {
1060 drv->flags[i] = 0;
1061 drv->cs[i].driver = drv;
1062 drv->cs[i].ops = drv->ops;
1063 drv->cs[i].minor_index = i;
1064 }
1065
1066 gigaset_if_initdriver(drv, procname, devname, devfsname);
1067
1068 spin_lock_irqsave(&driver_lock, flags);
1069 list_add(&drv->list, &drivers);
1070 spin_unlock_irqrestore(&driver_lock, flags);
1071
1072 return drv;
1073
1074out2:
1075 kfree(drv->cs);
1076out1:
1077 kfree(drv);
1078 module_put(owner);
1079 return NULL;
1080}
1081EXPORT_SYMBOL_GPL(gigaset_initdriver);
1082
1083static struct cardstate *alloc_cs(struct gigaset_driver *drv)
1084{
1085 unsigned long flags;
1086 unsigned i;
1087 static struct cardstate *ret = NULL;
1088
1089 spin_lock_irqsave(&drv->lock, flags);
1090 for (i = 0; i < drv->minors; ++i) {
1091 if (!(drv->flags[i] & VALID_MINOR)) {
1092 drv->flags[i] = VALID_MINOR;
1093 ret = drv->cs + i;
1094 }
1095 if (ret)
1096 break;
1097 }
1098 spin_unlock_irqrestore(&drv->lock, flags);
1099 return ret;
1100}
1101
1102static void free_cs(struct cardstate *cs)
1103{
1104 unsigned long flags;
1105 struct gigaset_driver *drv = cs->driver;
1106 spin_lock_irqsave(&drv->lock, flags);
1107 drv->flags[cs->minor_index] = 0;
1108 spin_unlock_irqrestore(&drv->lock, flags);
1109}
1110
1111static void make_valid(struct cardstate *cs, unsigned mask)
1112{
1113 unsigned long flags;
1114 struct gigaset_driver *drv = cs->driver;
1115 spin_lock_irqsave(&drv->lock, flags);
1116 drv->flags[cs->minor_index] |= mask;
1117 spin_unlock_irqrestore(&drv->lock, flags);
1118}
1119
1120static void make_invalid(struct cardstate *cs, unsigned mask)
1121{
1122 unsigned long flags;
1123 struct gigaset_driver *drv = cs->driver;
1124 spin_lock_irqsave(&drv->lock, flags);
1125 drv->flags[cs->minor_index] &= ~mask;
1126 spin_unlock_irqrestore(&drv->lock, flags);
1127}
1128
1129/* For drivers without fixed assignment device<->cardstate (usb) */
1130struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv)
1131{
1132 unsigned long flags;
1133 struct cardstate *cs = NULL;
1134 unsigned i;
1135
1136 spin_lock_irqsave(&drv->lock, flags);
1137 if (drv->blocked)
1138 goto exit;
1139 for (i = 0; i < drv->minors; ++i) {
1140 if ((drv->flags[i] & VALID_MINOR) &&
1141 !(drv->flags[i] & ASSIGNED)) {
1142 drv->flags[i] |= ASSIGNED;
1143 cs = drv->cs + i;
1144 break;
1145 }
1146 }
1147exit:
1148 spin_unlock_irqrestore(&drv->lock, flags);
1149 return cs;
1150}
1151EXPORT_SYMBOL_GPL(gigaset_getunassignedcs);
1152
1153void gigaset_unassign(struct cardstate *cs)
1154{
1155 unsigned long flags;
1156 unsigned *minor_flags;
1157 struct gigaset_driver *drv;
1158
1159 if (!cs)
1160 return;
1161 drv = cs->driver;
1162 spin_lock_irqsave(&drv->lock, flags);
1163 minor_flags = drv->flags + cs->minor_index;
1164 if (*minor_flags & VALID_MINOR)
1165 *minor_flags &= ~ASSIGNED;
1166 spin_unlock_irqrestore(&drv->lock, flags);
1167}
1168EXPORT_SYMBOL_GPL(gigaset_unassign);
1169
1170void gigaset_blockdriver(struct gigaset_driver *drv)
1171{
1172 unsigned long flags;
1173 spin_lock_irqsave(&drv->lock, flags);
1174 drv->blocked = 1;
1175 spin_unlock_irqrestore(&drv->lock, flags);
1176}
1177EXPORT_SYMBOL_GPL(gigaset_blockdriver);
1178
1179static int __init gigaset_init_module(void)
1180{
1181 /* in accordance with the principle of least astonishment,
1182 * setting the 'debug' parameter to 1 activates a sensible
1183 * set of default debug levels
1184 */
1185 if (gigaset_debuglevel == 1)
1186 gigaset_debuglevel = DEBUG_DEFAULT;
1187
1188 info(DRIVER_AUTHOR);
1189 info(DRIVER_DESC);
1190 return 0;
1191}
1192
1193static void __exit gigaset_exit_module(void)
1194{
1195}
1196
1197module_init(gigaset_init_module);
1198module_exit(gigaset_exit_module);
1199
1200MODULE_AUTHOR(DRIVER_AUTHOR);
1201MODULE_DESCRIPTION(DRIVER_DESC);
1202
1203MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
new file mode 100644
index 000000000000..fdcb80bb21c7
--- /dev/null
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -0,0 +1,1983 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: ev-layer.c,v 1.4.2.18 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21
22/* ========================================================== */
23/* bit masks for pending commands */
24#define PC_INIT 0x004
25#define PC_DLE0 0x008
26#define PC_DLE1 0x010
27#define PC_CID 0x080
28#define PC_NOCID 0x100
29#define PC_HUP 0x002
30#define PC_DIAL 0x001
31#define PC_ACCEPT 0x040
32#define PC_SHUTDOWN 0x020
33#define PC_CIDMODE 0x200
34#define PC_UMMODE 0x400
35
36/* types of modem responses */
37#define RT_NOTHING 0
38#define RT_ZSAU 1
39#define RT_RING 2
40#define RT_NUMBER 3
41#define RT_STRING 4
42#define RT_HEX 5
43#define RT_ZCAU 6
44
45/* Possible ASCII responses */
46#define RSP_OK 0
47//#define RSP_BUSY 1
48//#define RSP_CONNECT 2
49#define RSP_ZGCI 3
50#define RSP_RING 4
51#define RSP_ZAOC 5
52#define RSP_ZCSTR 6
53#define RSP_ZCFGT 7
54#define RSP_ZCFG 8
55#define RSP_ZCCR 9
56#define RSP_EMPTY 10
57#define RSP_ZLOG 11
58#define RSP_ZCAU 12
59#define RSP_ZMWI 13
60#define RSP_ZABINFO 14
61#define RSP_ZSMLSTCHG 15
62#define RSP_VAR 100
63#define RSP_ZSAU (RSP_VAR + VAR_ZSAU)
64#define RSP_ZDLE (RSP_VAR + VAR_ZDLE)
65#define RSP_ZVLS (RSP_VAR + VAR_ZVLS)
66#define RSP_ZCTP (RSP_VAR + VAR_ZCTP)
67#define RSP_STR (RSP_VAR + VAR_NUM)
68#define RSP_NMBR (RSP_STR + STR_NMBR)
69#define RSP_ZCPN (RSP_STR + STR_ZCPN)
70#define RSP_ZCON (RSP_STR + STR_ZCON)
71#define RSP_ZBC (RSP_STR + STR_ZBC)
72#define RSP_ZHLC (RSP_STR + STR_ZHLC)
73#define RSP_ERROR -1 /* ERROR */
74#define RSP_WRONG_CID -2 /* unknown cid in cmd */
75//#define RSP_EMPTY -3
76#define RSP_UNKNOWN -4 /* unknown response */
77#define RSP_FAIL -5 /* internal error */
78#define RSP_INVAL -6 /* invalid response */
79
80#define RSP_NONE -19
81#define RSP_STRING -20
82#define RSP_NULL -21
83//#define RSP_RETRYFAIL -22
84//#define RSP_RETRY -23
85//#define RSP_SKIP -24
86#define RSP_INIT -27
87#define RSP_ANY -26
88#define RSP_LAST -28
89#define RSP_NODEV -9
90
91/* actions for process_response */
92#define ACT_NOTHING 0
93#define ACT_SETDLE1 1
94#define ACT_SETDLE0 2
95#define ACT_FAILINIT 3
96#define ACT_HUPMODEM 4
97#define ACT_CONFIGMODE 5
98#define ACT_INIT 6
99#define ACT_DLE0 7
100#define ACT_DLE1 8
101#define ACT_FAILDLE0 9
102#define ACT_FAILDLE1 10
103#define ACT_RING 11
104#define ACT_CID 12
105#define ACT_FAILCID 13
106#define ACT_SDOWN 14
107#define ACT_FAILSDOWN 15
108#define ACT_DEBUG 16
109#define ACT_WARN 17
110#define ACT_DIALING 18
111#define ACT_ABORTDIAL 19
112#define ACT_DISCONNECT 20
113#define ACT_CONNECT 21
114#define ACT_REMOTEREJECT 22
115#define ACT_CONNTIMEOUT 23
116#define ACT_REMOTEHUP 24
117#define ACT_ABORTHUP 25
118#define ACT_ICALL 26
119#define ACT_ACCEPTED 27
120#define ACT_ABORTACCEPT 28
121#define ACT_TIMEOUT 29
122#define ACT_GETSTRING 30
123#define ACT_SETVER 31
124#define ACT_FAILVER 32
125#define ACT_GOTVER 33
126#define ACT_TEST 34
127#define ACT_ERROR 35
128#define ACT_ABORTCID 36
129#define ACT_ZCAU 37
130#define ACT_NOTIFY_BC_DOWN 38
131#define ACT_NOTIFY_BC_UP 39
132#define ACT_DIAL 40
133#define ACT_ACCEPT 41
134#define ACT_PROTO_L2 42
135#define ACT_HUP 43
136#define ACT_IF_LOCK 44
137#define ACT_START 45
138#define ACT_STOP 46
139#define ACT_FAKEDLE0 47
140#define ACT_FAKEHUP 48
141#define ACT_FAKESDOWN 49
142#define ACT_SHUTDOWN 50
143#define ACT_PROC_CIDMODE 51
144#define ACT_UMODESET 52
145#define ACT_FAILUMODE 53
146#define ACT_CMODESET 54
147#define ACT_FAILCMODE 55
148#define ACT_IF_VER 56
149#define ACT_CMD 100
150
151/* at command sequences */
152#define SEQ_NONE 0
153#define SEQ_INIT 100
154#define SEQ_DLE0 200
155#define SEQ_DLE1 250
156#define SEQ_CID 300
157#define SEQ_NOCID 350
158#define SEQ_HUP 400
159#define SEQ_DIAL 600
160#define SEQ_ACCEPT 720
161#define SEQ_SHUTDOWN 500
162#define SEQ_CIDMODE 10
163#define SEQ_UMMODE 11
164
165
166// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring
167struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */
168{
169 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
170
171 /* initialize device, set cid mode if possible */
172 //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}},
173 //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}},
174 //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT,
175 // {ACT_TIMEOUT}},
176
177 {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT,
178 {ACT_TIMEOUT}}, /* wait until device is ready */
179
180 {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */
181 {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */
182
183 {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */
184 {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */
185
186 {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */
187 {RSP_OK, 108,108, -1, 104,-1},
188 {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"},
189 {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}},
190 {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}},
191
192 {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0,
193 ACT_HUPMODEM,
194 ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */
195 {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"},
196
197 {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */
198 {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}},
199 {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}},
200 {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}},
201
202 {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}},
203 {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}},
204
205 {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}},
206
207 {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
208 {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}},
209 {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}},
210#if 0
211 {EV_TIMEOUT, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
212 {RSP_ERROR, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"},
213 {RSP_OK, 121,121, -1, 130, 5, {ACT_GOTVER}, "^SGCI=1\r"},
214
215 {RSP_OK, 130,130, -1, 0, 0, {ACT_INIT}},
216 {RSP_ERROR, 130,130, -1, 0, 0, {ACT_FAILINIT}},
217 {EV_TIMEOUT, 130,130, -1, 0, 0, {ACT_FAILINIT}},
218#endif
219
220 /* leave dle mode */
221 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
222 {RSP_OK, 201,201, -1, 202,-1},
223 //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE
224 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}},
225 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}},
226 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
227 {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}},
228
229 /* enter dle mode */
230 {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
231 {RSP_OK, 251,251, -1, 252,-1},
232 {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}},
233 {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
234 {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}},
235
236 /* incoming call */
237 {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}},
238
239 /* get cid */
240 //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}},
241 //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}},
242 //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"},
243
244 {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
245 {RSP_OK, 301,301, -1, 302,-1},
246 {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}},
247 {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}},
248 {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}},
249
250 /* enter cid mode */
251 {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
252 {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}},
253 {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
254 {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}},
255
256 /* leave cid mode */
257 //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"},
258 {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"},
259 {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}},
260 {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
261 {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}},
262
263 /* abort getting cid */
264 {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}},
265
266 /* reset */
267#if 0
268 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 503, 5, {0}, "^SGCI=0\r"},
269 {RSP_OK, 503,503, -1, 504, 5, {0}, "Z\r"},
270#endif
271 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
272 {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}},
273 {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
274 {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}},
275 {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}},
276
277 {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME
278 {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME
279 {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME
280 {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME
281 {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME
282 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME
283
284 /* misc. */
285 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
286 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
287 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
288 {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
289 {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
290 {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
291 {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
292
293 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
294 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
295 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
296 {RSP_LAST}
297};
298
299// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall
300struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */
301{
302 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
303
304 /* dial */
305 {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME
306 {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}},
307 {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}},
308 {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
309 {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}},
310 {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}},
311 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}},
312 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
313 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}},
314 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
315 {RSP_OK, 607,607, -1, 608,-1},
316 //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE
317 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}},
318 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}},
319
320 {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}},
321 {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}},
322 {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}},
323 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
324 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}},
325
326 /* dialing */
327 {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}},
328 {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}},
329 {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */
330
331 /* connection established */
332 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
333 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1
334
335 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout
336
337 /* remote hangup */
338 {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}},
339 {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
340 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}},
341
342 /* hangup */
343 {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME
344 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1?
345 {RSP_OK, 401,401, -1, 402, 5},
346 {RSP_ZVLS, 402,402, 0, 403, 5},
347 {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */
348 //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
349 {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
350 {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver?
351 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}},
352 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}},
353
354 {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout
355
356 /* ring */
357 {RSP_ZBC, 700,700, -1, -1,-1, {0}},
358 {RSP_ZHLC, 700,700, -1, -1,-1, {0}},
359 {RSP_NMBR, 700,700, -1, -1,-1, {0}},
360 {RSP_ZCPN, 700,700, -1, -1,-1, {0}},
361 {RSP_ZCTP, 700,700, -1, -1,-1, {0}},
362 {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}},
363 {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}},
364
365 /*accept icall*/
366 {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME
367 {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}},
368 {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}},
369 {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */
370 {RSP_OK, 723,723, -1, 724, 5, {0}},
371 {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}},
372 {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
373 {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}},
374 {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}},
375 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}},
376 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}},
377
378 {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}},
379
380 /* misc. */
381 {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME
382
383 {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
384 {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
385 {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
386 {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
387
388 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
389 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
390 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
391 {RSP_LAST}
392};
393
394
395#if 0
396static struct reply_t tab_nocid[]= /* no dle mode */ //FIXME aenderungen uebernehmen
397{
398 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
399
400 {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
401 {RSP_LAST,0,0,0,0,0,0}
402};
403
404static struct reply_t tab_cid[] = /* no dle mode */ //FIXME aenderungen uebernehmen
405{
406 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */
407
408 {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL},
409 {RSP_LAST,0,0,0,0,0,0}
410};
411#endif
412
413static struct resp_type_t resp_type[]=
414{
415 /*{"", RSP_EMPTY, RT_NOTHING},*/
416 {"OK", RSP_OK, RT_NOTHING},
417 {"ERROR", RSP_ERROR, RT_NOTHING},
418 {"ZSAU", RSP_ZSAU, RT_ZSAU},
419 {"ZCAU", RSP_ZCAU, RT_ZCAU},
420 {"RING", RSP_RING, RT_RING},
421 {"ZGCI", RSP_ZGCI, RT_NUMBER},
422 {"ZVLS", RSP_ZVLS, RT_NUMBER},
423 {"ZCTP", RSP_ZCTP, RT_NUMBER},
424 {"ZDLE", RSP_ZDLE, RT_NUMBER},
425 {"ZCFGT", RSP_ZCFGT, RT_NUMBER},
426 {"ZCCR", RSP_ZCCR, RT_NUMBER},
427 {"ZMWI", RSP_ZMWI, RT_NUMBER},
428 {"ZHLC", RSP_ZHLC, RT_STRING},
429 {"ZBC", RSP_ZBC, RT_STRING},
430 {"NMBR", RSP_NMBR, RT_STRING},
431 {"ZCPN", RSP_ZCPN, RT_STRING},
432 {"ZCON", RSP_ZCON, RT_STRING},
433 {"ZAOC", RSP_ZAOC, RT_STRING},
434 {"ZCSTR", RSP_ZCSTR, RT_STRING},
435 {"ZCFG", RSP_ZCFG, RT_HEX},
436 {"ZLOG", RSP_ZLOG, RT_NOTHING},
437 {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
438 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
439 {NULL,0,0}
440};
441
442/*
443 * Get integer from char-pointer
444 */
445static int isdn_getnum(char *p)
446{
447 int v = -1;
448
449 IFNULLRETVAL(p, -1);
450
451 dbg(DEBUG_TRANSCMD, "string: %s", p);
452
453 while (*p >= '0' && *p <= '9')
454 v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0');
455 if (*p)
456 v = -1; /* invalid Character */
457 return v;
458}
459
460/*
461 * Get integer from char-pointer
462 */
463static int isdn_gethex(char *p)
464{
465 int v = 0;
466 int c;
467
468 IFNULLRETVAL(p, -1);
469
470 dbg(DEBUG_TRANSCMD, "string: %s", p);
471
472 if (!*p)
473 return -1;
474
475 do {
476 if (v > (INT_MAX - 15) / 16)
477 return -1;
478 c = *p;
479 if (c >= '0' && c <= '9')
480 c -= '0';
481 else if (c >= 'a' && c <= 'f')
482 c -= 'a' - 10;
483 else if (c >= 'A' && c <= 'F')
484 c -= 'A' - 10;
485 else
486 return -1;
487 v = v * 16 + c;
488 } while (*++p);
489
490 return v;
491}
492
493static inline void new_index(atomic_t *index, int max)
494{
495 if (atomic_read(index) == max) //FIXME race?
496 atomic_set(index, 0);
497 else
498 atomic_inc(index);
499}
500
501/* retrieve CID from parsed response
502 * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535
503 */
504static int cid_of_response(char *s)
505{
506 int cid;
507
508 if (s[-1] != ';')
509 return 0; /* no CID separator */
510 cid = isdn_getnum(s);
511 if (cid < 0)
512 return 0; /* CID not numeric */
513 if (cid < 1 || cid > 65535)
514 return -1; /* CID out of range */
515 return cid;
516 //FIXME is ;<digit>+ at end of non-CID response really impossible?
517}
518
519/* This function will be called via task queue from the callback handler.
520 * We received a modem response and have to handle it..
521 */
522void gigaset_handle_modem_response(struct cardstate *cs)
523{
524 unsigned char *argv[MAX_REC_PARAMS + 1];
525 int params;
526 int i, j;
527 struct resp_type_t *rt;
528 int curarg;
529 unsigned long flags;
530 unsigned next, tail, head;
531 struct event_t *event;
532 int resp_code;
533 int param_type;
534 int abort;
535 size_t len;
536 int cid;
537 int rawstring;
538
539 IFNULLRET(cs);
540
541 len = cs->cbytes;
542 if (!len) {
543 /* ignore additional LFs/CRs (M10x config mode or cx100) */
544 dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]);
545 return;
546 }
547 cs->respdata[len] = 0;
548 dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata);
549 argv[0] = cs->respdata;
550 params = 1;
551 if (cs->at_state.getstring) {
552 /* getstring only allowed without cid at the moment */
553 cs->at_state.getstring = 0;
554 rawstring = 1;
555 cid = 0;
556 } else {
557 /* parse line */
558 for (i = 0; i < len; i++)
559 switch (cs->respdata[i]) {
560 case ';':
561 case ',':
562 case '=':
563 if (params > MAX_REC_PARAMS) {
564 warn("too many parameters in response");
565 /* need last parameter (might be CID) */
566 params--;
567 }
568 argv[params++] = cs->respdata + i + 1;
569 }
570
571 rawstring = 0;
572 cid = params > 1 ? cid_of_response(argv[params-1]) : 0;
573 if (cid < 0) {
574 gigaset_add_event(cs, &cs->at_state, RSP_INVAL,
575 NULL, 0, NULL);
576 return;
577 }
578
579 for (j = 1; j < params; ++j)
580 argv[j][-1] = 0;
581
582 dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]);
583 if (cid) {
584 --params;
585 dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]);
586 }
587 dbg(DEBUG_TRANSCMD, "available params: %d", params - 1);
588 for (j = 1; j < params; j++)
589 dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]);
590 }
591
592 spin_lock_irqsave(&cs->ev_lock, flags);
593 head = atomic_read(&cs->ev_head);
594 tail = atomic_read(&cs->ev_tail);
595
596 abort = 1;
597 curarg = 0;
598 while (curarg < params) {
599 next = (tail + 1) % MAX_EVENTS;
600 if (unlikely(next == head)) {
601 err("event queue full");
602 break;
603 }
604
605 event = cs->events + tail;
606 event->at_state = NULL;
607 event->cid = cid;
608 event->ptr = NULL;
609 event->arg = NULL;
610 tail = next;
611
612 if (rawstring) {
613 resp_code = RSP_STRING;
614 param_type = RT_STRING;
615 } else {
616 for (rt = resp_type; rt->response; ++rt)
617 if (!strcmp(argv[curarg], rt->response))
618 break;
619
620 if (!rt->response) {
621 event->type = RSP_UNKNOWN;
622 warn("unknown modem response: %s",
623 argv[curarg]);
624 break;
625 }
626
627 resp_code = rt->resp_code;
628 param_type = rt->type;
629 ++curarg;
630 }
631
632 event->type = resp_code;
633
634 switch (param_type) {
635 case RT_NOTHING:
636 break;
637 case RT_RING:
638 if (!cid) {
639 err("received RING without CID!");
640 event->type = RSP_INVAL;
641 abort = 1;
642 } else {
643 event->cid = 0;
644 event->parameter = cid;
645 abort = 0;
646 }
647 break;
648 case RT_ZSAU:
649 if (curarg >= params) {
650 event->parameter = ZSAU_NONE;
651 break;
652 }
653 if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING"))
654 event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING;
655 else if (!strcmp(argv[curarg], "CALL_DELIVERED"))
656 event->parameter = ZSAU_CALL_DELIVERED;
657 else if (!strcmp(argv[curarg], "ACTIVE"))
658 event->parameter = ZSAU_ACTIVE;
659 else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
660 event->parameter = ZSAU_DISCONNECT_IND;
661 else if (!strcmp(argv[curarg], "NULL"))
662 event->parameter = ZSAU_NULL;
663 else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
664 event->parameter = ZSAU_DISCONNECT_REQ;
665 else {
666 event->parameter = ZSAU_UNKNOWN;
667 warn("%s: unknown parameter %s after ZSAU",
668 __func__, argv[curarg]);
669 }
670 ++curarg;
671 break;
672 case RT_STRING:
673 if (curarg < params) {
674 len = strlen(argv[curarg]) + 1;
675 event->ptr = kmalloc(len, GFP_ATOMIC);
676 if (event->ptr)
677 memcpy(event->ptr, argv[curarg], len);
678 else
679 err("no memory for string!");
680 ++curarg;
681 }
682#ifdef CONFIG_GIGASET_DEBUG
683 if (!event->ptr)
684 dbg(DEBUG_CMD, "string==NULL");
685 else
686 dbg(DEBUG_CMD,
687 "string==%s", (char *) event->ptr);
688#endif
689 break;
690 case RT_ZCAU:
691 event->parameter = -1;
692 if (curarg + 1 < params) {
693 i = isdn_gethex(argv[curarg]);
694 j = isdn_gethex(argv[curarg + 1]);
695 if (i >= 0 && i < 256 && j >= 0 && j < 256)
696 event->parameter = (unsigned) i << 8
697 | j;
698 curarg += 2;
699 } else
700 curarg = params - 1;
701 break;
702 case RT_NUMBER:
703 case RT_HEX:
704 if (curarg < params) {
705 if (param_type == RT_HEX)
706 event->parameter =
707 isdn_gethex(argv[curarg]);
708 else
709 event->parameter =
710 isdn_getnum(argv[curarg]);
711 ++curarg;
712 } else
713 event->parameter = -1;
714#ifdef CONFIG_GIGASET_DEBUG
715 dbg(DEBUG_CMD, "parameter==%d", event->parameter);
716#endif
717 break;
718 }
719
720 if (resp_code == RSP_ZDLE)
721 cs->dle = event->parameter;
722
723 if (abort)
724 break;
725 }
726
727 atomic_set(&cs->ev_tail, tail);
728 spin_unlock_irqrestore(&cs->ev_lock, flags);
729
730 if (curarg != params)
731 dbg(DEBUG_ANY, "invalid number of processed parameters: %d/%d",
732 curarg, params);
733}
734EXPORT_SYMBOL_GPL(gigaset_handle_modem_response);
735
736/* disconnect
737 * process closing of connection associated with given AT state structure
738 */
739static void disconnect(struct at_state_t **at_state_p)
740{
741 unsigned long flags;
742 struct bc_state *bcs;
743 struct cardstate *cs;
744
745 IFNULLRET(at_state_p);
746 IFNULLRET(*at_state_p);
747 bcs = (*at_state_p)->bcs;
748 cs = (*at_state_p)->cs;
749 IFNULLRET(cs);
750
751 new_index(&(*at_state_p)->seq_index, MAX_SEQ_INDEX);
752
753 /* revert to selected idle mode */
754 if (!atomic_read(&cs->cidmode)) {
755 cs->at_state.pending_commands |= PC_UMMODE;
756 atomic_set(&cs->commands_pending, 1); //FIXME
757 dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
758 }
759
760 if (bcs) {
761 /* B channel assigned: invoke hardware specific handler */
762 cs->ops->close_bchannel(bcs);
763 } else {
764 /* no B channel assigned: just deallocate */
765 spin_lock_irqsave(&cs->lock, flags);
766 list_del(&(*at_state_p)->list);
767 kfree(*at_state_p);
768 *at_state_p = NULL;
769 spin_unlock_irqrestore(&cs->lock, flags);
770 }
771}
772
773/* get_free_channel
774 * get a free AT state structure: either one of those associated with the
775 * B channels of the Gigaset device, or if none of those is available,
776 * a newly allocated one with bcs=NULL
777 * The structure should be freed by calling disconnect() after use.
778 */
779static inline struct at_state_t *get_free_channel(struct cardstate *cs,
780 int cid)
781/* cids: >0: siemens-cid
782 0: without cid
783 -1: no cid assigned yet
784*/
785{
786 unsigned long flags;
787 int i;
788 struct at_state_t *ret;
789
790 for (i = 0; i < cs->channels; ++i)
791 if (gigaset_get_channel(cs->bcs + i)) {
792 ret = &cs->bcs[i].at_state;
793 ret->cid = cid;
794 return ret;
795 }
796
797 spin_lock_irqsave(&cs->lock, flags);
798 ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC);
799 if (ret) {
800 gigaset_at_init(ret, NULL, cs, cid);
801 list_add(&ret->list, &cs->temp_at_states);
802 }
803 spin_unlock_irqrestore(&cs->lock, flags);
804 return ret;
805}
806
807static void init_failed(struct cardstate *cs, int mode)
808{
809 int i;
810 struct at_state_t *at_state;
811
812 cs->at_state.pending_commands &= ~PC_INIT;
813 atomic_set(&cs->mode, mode);
814 atomic_set(&cs->mstate, MS_UNINITIALIZED);
815 gigaset_free_channels(cs);
816 for (i = 0; i < cs->channels; ++i) {
817 at_state = &cs->bcs[i].at_state;
818 if (at_state->pending_commands & PC_CID) {
819 at_state->pending_commands &= ~PC_CID;
820 at_state->pending_commands |= PC_NOCID;
821 atomic_set(&cs->commands_pending, 1);
822 }
823 }
824}
825
826static void schedule_init(struct cardstate *cs, int state)
827{
828 if (cs->at_state.pending_commands & PC_INIT) {
829 dbg(DEBUG_CMD, "not scheduling PC_INIT again");
830 return;
831 }
832 atomic_set(&cs->mstate, state);
833 atomic_set(&cs->mode, M_UNKNOWN);
834 gigaset_block_channels(cs);
835 cs->at_state.pending_commands |= PC_INIT;
836 atomic_set(&cs->commands_pending, 1);
837 dbg(DEBUG_CMD, "Scheduling PC_INIT");
838}
839
840/* Add "AT" to a command, add the cid, dle encode it, send the result to the hardware. */
841static void send_command(struct cardstate *cs, const char *cmd, int cid,
842 int dle, gfp_t kmallocflags)
843{
844 size_t cmdlen, buflen;
845 char *cmdpos, *cmdbuf, *cmdtail;
846
847 cmdlen = strlen(cmd);
848 buflen = 11 + cmdlen;
849
850 if (likely(buflen > cmdlen)) {
851 cmdbuf = kmalloc(buflen, kmallocflags);
852 if (likely(cmdbuf != NULL)) {
853 cmdpos = cmdbuf + 9;
854 cmdtail = cmdpos + cmdlen;
855 memcpy(cmdpos, cmd, cmdlen);
856
857 if (cid > 0 && cid <= 65535) {
858 do {
859 *--cmdpos = '0' + cid % 10;
860 cid /= 10;
861 ++cmdlen;
862 } while (cid);
863 }
864
865 cmdlen += 2;
866 *--cmdpos = 'T';
867 *--cmdpos = 'A';
868
869 if (dle) {
870 cmdlen += 4;
871 *--cmdpos = '(';
872 *--cmdpos = 0x10;
873 *cmdtail++ = 0x10;
874 *cmdtail++ = ')';
875 }
876
877 cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL);
878 kfree(cmdbuf);
879 } else
880 err("no memory for command buffer");
881 } else
882 err("overflow in buflen");
883}
884
885static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid)
886{
887 struct at_state_t *at_state;
888 int i;
889 unsigned long flags;
890
891 if (cid == 0)
892 return &cs->at_state;
893
894 for (i = 0; i < cs->channels; ++i)
895 if (cid == cs->bcs[i].at_state.cid)
896 return &cs->bcs[i].at_state;
897
898 spin_lock_irqsave(&cs->lock, flags);
899
900 list_for_each_entry(at_state, &cs->temp_at_states, list)
901 if (cid == at_state->cid) {
902 spin_unlock_irqrestore(&cs->lock, flags);
903 return at_state;
904 }
905
906 spin_unlock_irqrestore(&cs->lock, flags);
907
908 return NULL;
909}
910
911static void bchannel_down(struct bc_state *bcs)
912{
913 IFNULLRET(bcs);
914 IFNULLRET(bcs->cs);
915
916 if (bcs->chstate & CHS_B_UP) {
917 bcs->chstate &= ~CHS_B_UP;
918 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
919 }
920
921 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
922 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
923 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
924 }
925
926 gigaset_free_channel(bcs);
927
928 gigaset_bcs_reinit(bcs);
929}
930
931static void bchannel_up(struct bc_state *bcs)
932{
933 IFNULLRET(bcs);
934
935 if (!(bcs->chstate & CHS_D_UP)) {
936 notice("%s: D channel not up", __func__);
937 bcs->chstate |= CHS_D_UP;
938 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
939 }
940
941 if (bcs->chstate & CHS_B_UP) {
942 notice("%s: B channel already up", __func__);
943 return;
944 }
945
946 bcs->chstate |= CHS_B_UP;
947 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
948}
949
950static void start_dial(struct at_state_t *at_state, void *data, int seq_index)
951{
952 struct bc_state *bcs = at_state->bcs;
953 struct cardstate *cs = at_state->cs;
954 int retval;
955
956 bcs->chstate |= CHS_NOTIFY_LL;
957 //atomic_set(&bcs->status, BCS_INIT);
958
959 if (atomic_read(&at_state->seq_index) != seq_index)
960 goto error;
961
962 retval = gigaset_isdn_setup_dial(at_state, data);
963 if (retval != 0)
964 goto error;
965
966
967 at_state->pending_commands |= PC_CID;
968 dbg(DEBUG_CMD, "Scheduling PC_CID");
969//#ifdef GIG_MAYINITONDIAL
970// if (atomic_read(&cs->MState) == MS_UNKNOWN) {
971// cs->at_state.pending_commands |= PC_INIT;
972// dbg(DEBUG_CMD, "Scheduling PC_INIT");
973// }
974//#endif
975 atomic_set(&cs->commands_pending, 1); //FIXME
976 return;
977
978error:
979 at_state->pending_commands |= PC_NOCID;
980 dbg(DEBUG_CMD, "Scheduling PC_NOCID");
981 atomic_set(&cs->commands_pending, 1); //FIXME
982 return;
983}
984
985static void start_accept(struct at_state_t *at_state)
986{
987 struct cardstate *cs = at_state->cs;
988 int retval;
989
990 retval = gigaset_isdn_setup_accept(at_state);
991
992 if (retval == 0) {
993 at_state->pending_commands |= PC_ACCEPT;
994 dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
995 atomic_set(&cs->commands_pending, 1); //FIXME
996 } else {
997 //FIXME
998 at_state->pending_commands |= PC_HUP;
999 dbg(DEBUG_CMD, "Scheduling PC_HUP");
1000 atomic_set(&cs->commands_pending, 1); //FIXME
1001 }
1002}
1003
1004static void do_start(struct cardstate *cs)
1005{
1006 gigaset_free_channels(cs);
1007
1008 if (atomic_read(&cs->mstate) != MS_LOCKED)
1009 schedule_init(cs, MS_INIT);
1010
1011 gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
1012 // FIXME: not in locked mode
1013 // FIXME 2: only after init sequence
1014
1015 cs->waiting = 0;
1016 wake_up(&cs->waitqueue);
1017}
1018
1019static void finish_shutdown(struct cardstate *cs)
1020{
1021 if (atomic_read(&cs->mstate) != MS_LOCKED) {
1022 atomic_set(&cs->mstate, MS_UNINITIALIZED);
1023 atomic_set(&cs->mode, M_UNKNOWN);
1024 }
1025
1026 /* The rest is done by cleanup_cs () in user mode. */
1027
1028 cs->cmd_result = -ENODEV;
1029 cs->waiting = 0;
1030 wake_up_interruptible(&cs->waitqueue);
1031}
1032
1033static void do_shutdown(struct cardstate *cs)
1034{
1035 gigaset_block_channels(cs);
1036
1037 if (atomic_read(&cs->mstate) == MS_READY) {
1038 atomic_set(&cs->mstate, MS_SHUTDOWN);
1039 cs->at_state.pending_commands |= PC_SHUTDOWN;
1040 atomic_set(&cs->commands_pending, 1); //FIXME
1041 dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN"); //FIXME
1042 //gigaset_schedule_event(cs); //FIXME
1043 } else
1044 finish_shutdown(cs);
1045}
1046
1047static void do_stop(struct cardstate *cs)
1048{
1049 do_shutdown(cs);
1050}
1051
1052/* Entering cid mode or getting a cid failed:
1053 * try to initialize the device and try again.
1054 *
1055 * channel >= 0: getting cid for the channel failed
1056 * channel < 0: entering cid mode failed
1057 *
1058 * returns 0 on failure
1059 */
1060static int reinit_and_retry(struct cardstate *cs, int channel)
1061{
1062 int i;
1063
1064 if (--cs->retry_count <= 0)
1065 return 0;
1066
1067 for (i = 0; i < cs->channels; ++i)
1068 if (cs->bcs[i].at_state.cid > 0)
1069 return 0;
1070
1071 if (channel < 0)
1072 warn("Could not enter cid mode. Reinit device and try again.");
1073 else {
1074 warn("Could not get a call id. Reinit device and try again.");
1075 cs->bcs[channel].at_state.pending_commands |= PC_CID;
1076 }
1077 schedule_init(cs, MS_INIT);
1078 return 1;
1079}
1080
1081static int at_state_invalid(struct cardstate *cs,
1082 struct at_state_t *test_ptr)
1083{
1084 unsigned long flags;
1085 unsigned channel;
1086 struct at_state_t *at_state;
1087 int retval = 0;
1088
1089 spin_lock_irqsave(&cs->lock, flags);
1090
1091 if (test_ptr == &cs->at_state)
1092 goto exit;
1093
1094 list_for_each_entry(at_state, &cs->temp_at_states, list)
1095 if (at_state == test_ptr)
1096 goto exit;
1097
1098 for (channel = 0; channel < cs->channels; ++channel)
1099 if (&cs->bcs[channel].at_state == test_ptr)
1100 goto exit;
1101
1102 retval = 1;
1103exit:
1104 spin_unlock_irqrestore(&cs->lock, flags);
1105 return retval;
1106}
1107
1108static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
1109 struct at_state_t **p_at_state)
1110{
1111 int retval;
1112 struct at_state_t *at_state = *p_at_state;
1113
1114 retval = gigaset_isdn_icall(at_state);
1115 switch (retval) {
1116 case ICALL_ACCEPT:
1117 break;
1118 default:
1119 err("internal error: disposition=%d", retval);
1120 /* --v-- fall through --v-- */
1121 case ICALL_IGNORE:
1122 case ICALL_REJECT:
1123 /* hang up actively
1124 * Device doc says that would reject the call.
1125 * In fact it doesn't.
1126 */
1127 at_state->pending_commands |= PC_HUP;
1128 atomic_set(&cs->commands_pending, 1);
1129 break;
1130 }
1131}
1132
1133static int do_lock(struct cardstate *cs)
1134{
1135 int mode;
1136 int i;
1137
1138 switch (atomic_read(&cs->mstate)) {
1139 case MS_UNINITIALIZED:
1140 case MS_READY:
1141 if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
1142 cs->at_state.pending_commands)
1143 return -EBUSY;
1144
1145 for (i = 0; i < cs->channels; ++i)
1146 if (cs->bcs[i].at_state.pending_commands)
1147 return -EBUSY;
1148
1149 if (!gigaset_get_channels(cs))
1150 return -EBUSY;
1151
1152 break;
1153 case MS_LOCKED:
1154 //retval = -EACCES;
1155 break;
1156 default:
1157 return -EBUSY;
1158 }
1159
1160 mode = atomic_read(&cs->mode);
1161 atomic_set(&cs->mstate, MS_LOCKED);
1162 atomic_set(&cs->mode, M_UNKNOWN);
1163 //FIXME reset card state / at states / bcs states
1164
1165 return mode;
1166}
1167
1168static int do_unlock(struct cardstate *cs)
1169{
1170 if (atomic_read(&cs->mstate) != MS_LOCKED)
1171 return -EINVAL;
1172
1173 atomic_set(&cs->mstate, MS_UNINITIALIZED);
1174 atomic_set(&cs->mode, M_UNKNOWN);
1175 gigaset_free_channels(cs);
1176 //FIXME reset card state / at states / bcs states
1177 if (atomic_read(&cs->connected))
1178 schedule_init(cs, MS_INIT);
1179
1180 return 0;
1181}
1182
1183static void do_action(int action, struct cardstate *cs,
1184 struct bc_state *bcs,
1185 struct at_state_t **p_at_state, char **pp_command,
1186 int *p_genresp, int *p_resp_code,
1187 struct event_t *ev)
1188{
1189 struct at_state_t *at_state = *p_at_state;
1190 struct at_state_t *at_state2;
1191 unsigned long flags;
1192
1193 int channel;
1194
1195 unsigned char *s, *e;
1196 int i;
1197 unsigned long val;
1198
1199 switch (action) {
1200 case ACT_NOTHING:
1201 break;
1202 case ACT_TIMEOUT:
1203 at_state->waiting = 1;
1204 break;
1205 case ACT_INIT:
1206 //FIXME setup everything
1207 cs->at_state.pending_commands &= ~PC_INIT;
1208 cs->cur_at_seq = SEQ_NONE;
1209 atomic_set(&cs->mode, M_UNIMODEM);
1210 if (!atomic_read(&cs->cidmode)) {
1211 gigaset_free_channels(cs);
1212 atomic_set(&cs->mstate, MS_READY);
1213 break;
1214 }
1215 cs->at_state.pending_commands |= PC_CIDMODE;
1216 atomic_set(&cs->commands_pending, 1); //FIXME
1217 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1218 break;
1219 case ACT_FAILINIT:
1220 warn("Could not initialize the device.");
1221 cs->dle = 0;
1222 init_failed(cs, M_UNKNOWN);
1223 cs->cur_at_seq = SEQ_NONE;
1224 break;
1225 case ACT_CONFIGMODE:
1226 init_failed(cs, M_CONFIG);
1227 cs->cur_at_seq = SEQ_NONE;
1228 break;
1229 case ACT_SETDLE1:
1230 cs->dle = 1;
1231 /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */
1232 cs->inbuf[0].inputstate &=
1233 ~(INS_command | INS_DLE_command);
1234 break;
1235 case ACT_SETDLE0:
1236 cs->dle = 0;
1237 cs->inbuf[0].inputstate =
1238 (cs->inbuf[0].inputstate & ~INS_DLE_command)
1239 | INS_command;
1240 break;
1241 case ACT_CMODESET:
1242 if (atomic_read(&cs->mstate) == MS_INIT ||
1243 atomic_read(&cs->mstate) == MS_RECOVER) {
1244 gigaset_free_channels(cs);
1245 atomic_set(&cs->mstate, MS_READY);
1246 }
1247 atomic_set(&cs->mode, M_CID);
1248 cs->cur_at_seq = SEQ_NONE;
1249 break;
1250 case ACT_UMODESET:
1251 atomic_set(&cs->mode, M_UNIMODEM);
1252 cs->cur_at_seq = SEQ_NONE;
1253 break;
1254 case ACT_FAILCMODE:
1255 cs->cur_at_seq = SEQ_NONE;
1256 if (atomic_read(&cs->mstate) == MS_INIT ||
1257 atomic_read(&cs->mstate) == MS_RECOVER) {
1258 init_failed(cs, M_UNKNOWN);
1259 break;
1260 }
1261 if (!reinit_and_retry(cs, -1))
1262 schedule_init(cs, MS_RECOVER);
1263 break;
1264 case ACT_FAILUMODE:
1265 cs->cur_at_seq = SEQ_NONE;
1266 schedule_init(cs, MS_RECOVER);
1267 break;
1268 case ACT_HUPMODEM:
1269 /* send "+++" (hangup in unimodem mode) */
1270 cs->ops->write_cmd(cs, "+++", 3, NULL);
1271 break;
1272 case ACT_RING:
1273 /* get fresh AT state structure for new CID */
1274 at_state2 = get_free_channel(cs, ev->parameter);
1275 if (!at_state2) {
1276 warn("RING ignored: "
1277 "could not allocate channel structure");
1278 break;
1279 }
1280
1281 /* initialize AT state structure
1282 * note that bcs may be NULL if no B channel is free
1283 */
1284 at_state2->ConState = 700;
1285 kfree(at_state2->str_var[STR_NMBR]);
1286 at_state2->str_var[STR_NMBR] = NULL;
1287 kfree(at_state2->str_var[STR_ZCPN]);
1288 at_state2->str_var[STR_ZCPN] = NULL;
1289 kfree(at_state2->str_var[STR_ZBC]);
1290 at_state2->str_var[STR_ZBC] = NULL;
1291 kfree(at_state2->str_var[STR_ZHLC]);
1292 at_state2->str_var[STR_ZHLC] = NULL;
1293 at_state2->int_var[VAR_ZCTP] = -1;
1294
1295 spin_lock_irqsave(&cs->lock, flags);
1296 at_state2->timer_expires = RING_TIMEOUT;
1297 at_state2->timer_active = 1;
1298 spin_unlock_irqrestore(&cs->lock, flags);
1299 break;
1300 case ACT_ICALL:
1301 handle_icall(cs, bcs, p_at_state);
1302 at_state = *p_at_state;
1303 break;
1304 case ACT_FAILSDOWN:
1305 warn("Could not shut down the device.");
1306 /* fall through */
1307 case ACT_FAKESDOWN:
1308 case ACT_SDOWN:
1309 cs->cur_at_seq = SEQ_NONE;
1310 finish_shutdown(cs);
1311 break;
1312 case ACT_CONNECT:
1313 if (cs->onechannel) {
1314 at_state->pending_commands |= PC_DLE1;
1315 atomic_set(&cs->commands_pending, 1);
1316 break;
1317 }
1318 bcs->chstate |= CHS_D_UP;
1319 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
1320 cs->ops->init_bchannel(bcs);
1321 break;
1322 case ACT_DLE1:
1323 cs->cur_at_seq = SEQ_NONE;
1324 bcs = cs->bcs + cs->curchannel;
1325
1326 bcs->chstate |= CHS_D_UP;
1327 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
1328 cs->ops->init_bchannel(bcs);
1329 break;
1330 case ACT_FAKEHUP:
1331 at_state->int_var[VAR_ZSAU] = ZSAU_NULL;
1332 /* fall through */
1333 case ACT_DISCONNECT:
1334 cs->cur_at_seq = SEQ_NONE;
1335 at_state->cid = -1;
1336 if (bcs && cs->onechannel && cs->dle) {
1337 /* Check for other open channels not needed:
1338 * DLE only used for M10x with one B channel.
1339 */
1340 at_state->pending_commands |= PC_DLE0;
1341 atomic_set(&cs->commands_pending, 1);
1342 } else {
1343 disconnect(p_at_state);
1344 at_state = *p_at_state;
1345 }
1346 break;
1347 case ACT_FAKEDLE0:
1348 at_state->int_var[VAR_ZDLE] = 0;
1349 cs->dle = 0;
1350 /* fall through */
1351 case ACT_DLE0:
1352 cs->cur_at_seq = SEQ_NONE;
1353 at_state2 = &cs->bcs[cs->curchannel].at_state;
1354 disconnect(&at_state2);
1355 break;
1356 case ACT_ABORTHUP:
1357 cs->cur_at_seq = SEQ_NONE;
1358 warn("Could not hang up.");
1359 at_state->cid = -1;
1360 if (bcs && cs->onechannel)
1361 at_state->pending_commands |= PC_DLE0;
1362 else {
1363 disconnect(p_at_state);
1364 at_state = *p_at_state;
1365 }
1366 schedule_init(cs, MS_RECOVER);
1367 break;
1368 case ACT_FAILDLE0:
1369 cs->cur_at_seq = SEQ_NONE;
1370 warn("Could not leave DLE mode.");
1371 at_state2 = &cs->bcs[cs->curchannel].at_state;
1372 disconnect(&at_state2);
1373 schedule_init(cs, MS_RECOVER);
1374 break;
1375 case ACT_FAILDLE1:
1376 cs->cur_at_seq = SEQ_NONE;
1377 warn("Could not enter DLE mode. Try to hang up.");
1378 channel = cs->curchannel;
1379 cs->bcs[channel].at_state.pending_commands |= PC_HUP;
1380 atomic_set(&cs->commands_pending, 1);
1381 break;
1382
1383 case ACT_CID: /* got cid; start dialing */
1384 cs->cur_at_seq = SEQ_NONE;
1385 channel = cs->curchannel;
1386 if (ev->parameter > 0 && ev->parameter <= 65535) {
1387 cs->bcs[channel].at_state.cid = ev->parameter;
1388 cs->bcs[channel].at_state.pending_commands |=
1389 PC_DIAL;
1390 atomic_set(&cs->commands_pending, 1);
1391 break;
1392 }
1393 /* fall through */
1394 case ACT_FAILCID:
1395 cs->cur_at_seq = SEQ_NONE;
1396 channel = cs->curchannel;
1397 if (!reinit_and_retry(cs, channel)) {
1398 warn("Could not get a call id. Dialing not possible");
1399 at_state2 = &cs->bcs[channel].at_state;
1400 disconnect(&at_state2);
1401 }
1402 break;
1403 case ACT_ABORTCID:
1404 cs->cur_at_seq = SEQ_NONE;
1405 at_state2 = &cs->bcs[cs->curchannel].at_state;
1406 disconnect(&at_state2);
1407 break;
1408
1409 case ACT_DIALING:
1410 case ACT_ACCEPTED:
1411 cs->cur_at_seq = SEQ_NONE;
1412 break;
1413
1414 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */
1415 disconnect(p_at_state);
1416 at_state = *p_at_state;
1417 break;
1418
1419 case ACT_ABORTDIAL: /* error/timeout during dial preparation */
1420 cs->cur_at_seq = SEQ_NONE;
1421 at_state->pending_commands |= PC_HUP;
1422 atomic_set(&cs->commands_pending, 1);
1423 break;
1424
1425 case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
1426 case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
1427 case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
1428 at_state->pending_commands |= PC_HUP;
1429 atomic_set(&cs->commands_pending, 1);
1430 break;
1431 case ACT_GETSTRING: /* warning: RING, ZDLE, ... are not handled properly any more */
1432 at_state->getstring = 1;
1433 break;
1434 case ACT_SETVER:
1435 if (!ev->ptr) {
1436 *p_genresp = 1;
1437 *p_resp_code = RSP_ERROR;
1438 break;
1439 }
1440 s = ev->ptr;
1441
1442 if (!strcmp(s, "OK")) {
1443 *p_genresp = 1;
1444 *p_resp_code = RSP_ERROR;
1445 break;
1446 }
1447
1448 for (i = 0; i < 4; ++i) {
1449 val = simple_strtoul(s, (char **) &e, 10);
1450 if (val > INT_MAX || e == s)
1451 break;
1452 if (i == 3) {
1453 if (*e)
1454 break;
1455 } else if (*e != '.')
1456 break;
1457 else
1458 s = e + 1;
1459 cs->fwver[i] = val;
1460 }
1461 if (i != 4) {
1462 *p_genresp = 1;
1463 *p_resp_code = RSP_ERROR;
1464 break;
1465 }
1466 /*at_state->getstring = 1;*/
1467 cs->gotfwver = 0;
1468 break;
1469 case ACT_GOTVER:
1470 if (cs->gotfwver == 0) {
1471 cs->gotfwver = 1;
1472 dbg(DEBUG_ANY,
1473 "firmware version %02d.%03d.%02d.%02d",
1474 cs->fwver[0], cs->fwver[1],
1475 cs->fwver[2], cs->fwver[3]);
1476 break;
1477 }
1478 /* fall through */
1479 case ACT_FAILVER:
1480 cs->gotfwver = -1;
1481 err("could not read firmware version.");
1482 break;
1483#ifdef CONFIG_GIGASET_DEBUG
1484 case ACT_ERROR:
1485 *p_genresp = 1;
1486 *p_resp_code = RSP_ERROR;
1487 break;
1488 case ACT_TEST:
1489 {
1490 static int count = 3; //2; //1;
1491 *p_genresp = 1;
1492 *p_resp_code = count ? RSP_ERROR : RSP_OK;
1493 if (count > 0)
1494 --count;
1495 }
1496 break;
1497#endif
1498 case ACT_DEBUG:
1499 dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
1500 __func__, ev->type, at_state->ConState);
1501 break;
1502 case ACT_WARN:
1503 warn("%s: resp_code %d in ConState %d!",
1504 __func__, ev->type, at_state->ConState);
1505 break;
1506 case ACT_ZCAU:
1507 warn("cause code %04x in connection state %d.",
1508 ev->parameter, at_state->ConState);
1509 break;
1510
1511 /* events from the LL */
1512 case ACT_DIAL:
1513 start_dial(at_state, ev->ptr, ev->parameter);
1514 break;
1515 case ACT_ACCEPT:
1516 start_accept(at_state);
1517 break;
1518 case ACT_PROTO_L2:
1519 dbg(DEBUG_CMD,
1520 "set protocol to %u", (unsigned) ev->parameter);
1521 at_state->bcs->proto2 = ev->parameter;
1522 break;
1523 case ACT_HUP:
1524 at_state->pending_commands |= PC_HUP;
1525 atomic_set(&cs->commands_pending, 1); //FIXME
1526 dbg(DEBUG_CMD, "Scheduling PC_HUP");
1527 break;
1528
1529 /* hotplug events */
1530 case ACT_STOP:
1531 do_stop(cs);
1532 break;
1533 case ACT_START:
1534 do_start(cs);
1535 break;
1536
1537 /* events from the interface */ // FIXME without ACT_xxxx?
1538 case ACT_IF_LOCK:
1539 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1540 cs->waiting = 0;
1541 wake_up(&cs->waitqueue);
1542 break;
1543 case ACT_IF_VER:
1544 if (ev->parameter != 0)
1545 cs->cmd_result = -EINVAL;
1546 else if (cs->gotfwver != 1) {
1547 cs->cmd_result = -ENOENT;
1548 } else {
1549 memcpy(ev->arg, cs->fwver, sizeof cs->fwver);
1550 cs->cmd_result = 0;
1551 }
1552 cs->waiting = 0;
1553 wake_up(&cs->waitqueue);
1554 break;
1555
1556 /* events from the proc file system */ // FIXME without ACT_xxxx?
1557 case ACT_PROC_CIDMODE:
1558 if (ev->parameter != atomic_read(&cs->cidmode)) {
1559 atomic_set(&cs->cidmode, ev->parameter);
1560 if (ev->parameter) {
1561 cs->at_state.pending_commands |= PC_CIDMODE;
1562 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1563 } else {
1564 cs->at_state.pending_commands |= PC_UMMODE;
1565 dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
1566 }
1567 atomic_set(&cs->commands_pending, 1);
1568 }
1569 cs->waiting = 0;
1570 wake_up(&cs->waitqueue);
1571 break;
1572
1573 /* events from the hardware drivers */
1574 case ACT_NOTIFY_BC_DOWN:
1575 bchannel_down(bcs);
1576 break;
1577 case ACT_NOTIFY_BC_UP:
1578 bchannel_up(bcs);
1579 break;
1580 case ACT_SHUTDOWN:
1581 do_shutdown(cs);
1582 break;
1583
1584
1585 default:
1586 if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) {
1587 *pp_command = at_state->bcs->commands[action - ACT_CMD];
1588 if (!*pp_command) {
1589 *p_genresp = 1;
1590 *p_resp_code = RSP_NULL;
1591 }
1592 } else
1593 err("%s: action==%d!", __func__, action);
1594 }
1595}
1596
1597/* State machine to do the calling and hangup procedure */
1598static void process_event(struct cardstate *cs, struct event_t *ev)
1599{
1600 struct bc_state *bcs;
1601 char *p_command = NULL;
1602 struct reply_t *rep;
1603 int rcode;
1604 int genresp = 0;
1605 int resp_code = RSP_ERROR;
1606 int sendcid;
1607 struct at_state_t *at_state;
1608 int index;
1609 int curact;
1610 unsigned long flags;
1611
1612 IFNULLRET(cs);
1613 IFNULLRET(ev);
1614
1615 if (ev->cid >= 0) {
1616 at_state = at_state_from_cid(cs, ev->cid);
1617 if (!at_state) {
1618 gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
1619 NULL, 0, NULL);
1620 return;
1621 }
1622 } else {
1623 at_state = ev->at_state;
1624 if (at_state_invalid(cs, at_state)) {
1625 dbg(DEBUG_ANY,
1626 "event for invalid at_state %p", at_state);
1627 return;
1628 }
1629 }
1630
1631 dbg(DEBUG_CMD,
1632 "connection state %d, event %d", at_state->ConState, ev->type);
1633
1634 bcs = at_state->bcs;
1635 sendcid = at_state->cid;
1636
1637 /* Setting the pointer to the dial array */
1638 rep = at_state->replystruct;
1639 IFNULLRET(rep);
1640
1641 if (ev->type == EV_TIMEOUT) {
1642 if (ev->parameter != atomic_read(&at_state->timer_index)
1643 || !at_state->timer_active) {
1644 ev->type = RSP_NONE; /* old timeout */
1645 dbg(DEBUG_ANY, "old timeout");
1646 } else if (!at_state->waiting)
1647 dbg(DEBUG_ANY, "timeout occured");
1648 else
1649 dbg(DEBUG_ANY, "stopped waiting");
1650 }
1651
1652 /* if the response belongs to a variable in at_state->int_var[VAR_XXXX] or at_state->str_var[STR_XXXX], set it */
1653 if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) {
1654 index = ev->type - RSP_VAR;
1655 at_state->int_var[index] = ev->parameter;
1656 } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) {
1657 index = ev->type - RSP_STR;
1658 kfree(at_state->str_var[index]);
1659 at_state->str_var[index] = ev->ptr;
1660 ev->ptr = NULL; /* prevent process_events() from deallocating ptr */
1661 }
1662
1663 if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING)
1664 at_state->getstring = 0;
1665
1666 /* Search row in dial array which matches modem response and current constate */
1667 for (;; rep++) {
1668 rcode = rep->resp_code;
1669 /* dbg (DEBUG_ANY, "rcode %d", rcode); */
1670 if (rcode == RSP_LAST) {
1671 /* found nothing...*/
1672 warn("%s: rcode=RSP_LAST: resp_code %d in ConState %d!",
1673 __func__, ev->type, at_state->ConState);
1674 return;
1675 }
1676 if ((rcode == RSP_ANY || rcode == ev->type)
1677 && ((int) at_state->ConState >= rep->min_ConState)
1678 && (rep->max_ConState < 0
1679 || (int) at_state->ConState <= rep->max_ConState)
1680 && (rep->parameter < 0 || rep->parameter == ev->parameter))
1681 break;
1682 }
1683
1684 p_command = rep->command;
1685
1686 at_state->waiting = 0;
1687 for (curact = 0; curact < MAXACT; ++curact) {
1688 /* The row tells us what we should do ..
1689 */
1690 do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev);
1691 if (!at_state)
1692 break; /* may be freed after disconnect */
1693 }
1694
1695 if (at_state) {
1696 /* Jump to the next con-state regarding the array */
1697 if (rep->new_ConState >= 0)
1698 at_state->ConState = rep->new_ConState;
1699
1700 if (genresp) {
1701 spin_lock_irqsave(&cs->lock, flags);
1702 at_state->timer_expires = 0; //FIXME
1703 at_state->timer_active = 0; //FIXME
1704 spin_unlock_irqrestore(&cs->lock, flags);
1705 gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL);
1706 } else {
1707 /* Send command to modem if not NULL... */
1708 if (p_command/*rep->command*/) {
1709 if (atomic_read(&cs->connected))
1710 send_command(cs, p_command,
1711 sendcid, cs->dle,
1712 GFP_ATOMIC);
1713 else
1714 gigaset_add_event(cs, at_state,
1715 RSP_NODEV,
1716 NULL, 0, NULL);
1717 }
1718
1719 spin_lock_irqsave(&cs->lock, flags);
1720 if (!rep->timeout) {
1721 at_state->timer_expires = 0;
1722 at_state->timer_active = 0;
1723 } else if (rep->timeout > 0) { /* new timeout */
1724 at_state->timer_expires = rep->timeout * 10;
1725 at_state->timer_active = 1;
1726 new_index(&at_state->timer_index,
1727 MAX_TIMER_INDEX);
1728 }
1729 spin_unlock_irqrestore(&cs->lock, flags);
1730 }
1731 }
1732}
1733
1734static void schedule_sequence(struct cardstate *cs,
1735 struct at_state_t *at_state, int sequence)
1736{
1737 cs->cur_at_seq = sequence;
1738 gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL);
1739}
1740
1741static void process_command_flags(struct cardstate *cs)
1742{
1743 struct at_state_t *at_state = NULL;
1744 struct bc_state *bcs;
1745 int i;
1746 int sequence;
1747
1748 IFNULLRET(cs);
1749
1750 atomic_set(&cs->commands_pending, 0);
1751
1752 if (cs->cur_at_seq) {
1753 dbg(DEBUG_CMD, "not searching scheduled commands: busy");
1754 return;
1755 }
1756
1757 dbg(DEBUG_CMD, "searching scheduled commands");
1758
1759 sequence = SEQ_NONE;
1760
1761 /* clear pending_commands and hangup channels on shutdown */
1762 if (cs->at_state.pending_commands & PC_SHUTDOWN) {
1763 cs->at_state.pending_commands &= ~PC_CIDMODE;
1764 for (i = 0; i < cs->channels; ++i) {
1765 bcs = cs->bcs + i;
1766 at_state = &bcs->at_state;
1767 at_state->pending_commands &=
1768 ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
1769 if (at_state->cid > 0)
1770 at_state->pending_commands |= PC_HUP;
1771 if (at_state->pending_commands & PC_CID) {
1772 at_state->pending_commands |= PC_NOCID;
1773 at_state->pending_commands &= ~PC_CID;
1774 }
1775 }
1776 }
1777
1778 /* clear pending_commands and hangup channels on reset */
1779 if (cs->at_state.pending_commands & PC_INIT) {
1780 cs->at_state.pending_commands &= ~PC_CIDMODE;
1781 for (i = 0; i < cs->channels; ++i) {
1782 bcs = cs->bcs + i;
1783 at_state = &bcs->at_state;
1784 at_state->pending_commands &=
1785 ~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
1786 if (at_state->cid > 0)
1787 at_state->pending_commands |= PC_HUP;
1788 if (atomic_read(&cs->mstate) == MS_RECOVER) {
1789 if (at_state->pending_commands & PC_CID) {
1790 at_state->pending_commands |= PC_NOCID;
1791 at_state->pending_commands &= ~PC_CID;
1792 }
1793 }
1794 }
1795 }
1796
1797 /* only switch back to unimodem mode, if no commands are pending and no channels are up */
1798 if (cs->at_state.pending_commands == PC_UMMODE
1799 && !atomic_read(&cs->cidmode)
1800 && list_empty(&cs->temp_at_states)
1801 && atomic_read(&cs->mode) == M_CID) {
1802 sequence = SEQ_UMMODE;
1803 at_state = &cs->at_state;
1804 for (i = 0; i < cs->channels; ++i) {
1805 bcs = cs->bcs + i;
1806 if (bcs->at_state.pending_commands ||
1807 bcs->at_state.cid > 0) {
1808 sequence = SEQ_NONE;
1809 break;
1810 }
1811 }
1812 }
1813 cs->at_state.pending_commands &= ~PC_UMMODE;
1814 if (sequence != SEQ_NONE) {
1815 schedule_sequence(cs, at_state, sequence);
1816 return;
1817 }
1818
1819 for (i = 0; i < cs->channels; ++i) {
1820 bcs = cs->bcs + i;
1821 if (bcs->at_state.pending_commands & PC_HUP) {
1822 bcs->at_state.pending_commands &= ~PC_HUP;
1823 if (bcs->at_state.pending_commands & PC_CID) {
1824 /* not yet dialing: PC_NOCID is sufficient */
1825 bcs->at_state.pending_commands |= PC_NOCID;
1826 bcs->at_state.pending_commands &= ~PC_CID;
1827 } else {
1828 schedule_sequence(cs, &bcs->at_state, SEQ_HUP);
1829 return;
1830 }
1831 }
1832 if (bcs->at_state.pending_commands & PC_NOCID) {
1833 bcs->at_state.pending_commands &= ~PC_NOCID;
1834 cs->curchannel = bcs->channel;
1835 schedule_sequence(cs, &cs->at_state, SEQ_NOCID);
1836 return;
1837 } else if (bcs->at_state.pending_commands & PC_DLE0) {
1838 bcs->at_state.pending_commands &= ~PC_DLE0;
1839 cs->curchannel = bcs->channel;
1840 schedule_sequence(cs, &cs->at_state, SEQ_DLE0);
1841 return;
1842 }
1843 }
1844
1845 list_for_each_entry(at_state, &cs->temp_at_states, list)
1846 if (at_state->pending_commands & PC_HUP) {
1847 at_state->pending_commands &= ~PC_HUP;
1848 schedule_sequence(cs, at_state, SEQ_HUP);
1849 return;
1850 }
1851
1852 if (cs->at_state.pending_commands & PC_INIT) {
1853 cs->at_state.pending_commands &= ~PC_INIT;
1854 cs->dle = 0; //FIXME
1855 cs->inbuf->inputstate = INS_command;
1856 //FIXME reset card state (or -> LOCK0)?
1857 schedule_sequence(cs, &cs->at_state, SEQ_INIT);
1858 return;
1859 }
1860 if (cs->at_state.pending_commands & PC_SHUTDOWN) {
1861 cs->at_state.pending_commands &= ~PC_SHUTDOWN;
1862 schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN);
1863 return;
1864 }
1865 if (cs->at_state.pending_commands & PC_CIDMODE) {
1866 cs->at_state.pending_commands &= ~PC_CIDMODE;
1867 if (atomic_read(&cs->mode) == M_UNIMODEM) {
1868#if 0
1869 cs->retry_count = 2;
1870#else
1871 cs->retry_count = 1;
1872#endif
1873 schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
1874 return;
1875 }
1876 }
1877
1878 for (i = 0; i < cs->channels; ++i) {
1879 bcs = cs->bcs + i;
1880 if (bcs->at_state.pending_commands & PC_DLE1) {
1881 bcs->at_state.pending_commands &= ~PC_DLE1;
1882 cs->curchannel = bcs->channel;
1883 schedule_sequence(cs, &cs->at_state, SEQ_DLE1);
1884 return;
1885 }
1886 if (bcs->at_state.pending_commands & PC_ACCEPT) {
1887 bcs->at_state.pending_commands &= ~PC_ACCEPT;
1888 schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT);
1889 return;
1890 }
1891 if (bcs->at_state.pending_commands & PC_DIAL) {
1892 bcs->at_state.pending_commands &= ~PC_DIAL;
1893 schedule_sequence(cs, &bcs->at_state, SEQ_DIAL);
1894 return;
1895 }
1896 if (bcs->at_state.pending_commands & PC_CID) {
1897 switch (atomic_read(&cs->mode)) {
1898 case M_UNIMODEM:
1899 cs->at_state.pending_commands |= PC_CIDMODE;
1900 dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
1901 atomic_set(&cs->commands_pending, 1);
1902 return;
1903#ifdef GIG_MAYINITONDIAL
1904 case M_UNKNOWN:
1905 schedule_init(cs, MS_INIT);
1906 return;
1907#endif
1908 }
1909 bcs->at_state.pending_commands &= ~PC_CID;
1910 cs->curchannel = bcs->channel;
1911#ifdef GIG_RETRYCID
1912 cs->retry_count = 2;
1913#else
1914 cs->retry_count = 1;
1915#endif
1916 schedule_sequence(cs, &cs->at_state, SEQ_CID);
1917 return;
1918 }
1919 }
1920}
1921
1922static void process_events(struct cardstate *cs)
1923{
1924 struct event_t *ev;
1925 unsigned head, tail;
1926 int i;
1927 int check_flags = 0;
1928 int was_busy;
1929
1930 /* no locking needed (only one reader) */
1931 head = atomic_read(&cs->ev_head);
1932
1933 for (i = 0; i < 2 * MAX_EVENTS; ++i) {
1934 tail = atomic_read(&cs->ev_tail);
1935 if (tail == head) {
1936 if (!check_flags && !atomic_read(&cs->commands_pending))
1937 break;
1938 check_flags = 0;
1939 process_command_flags(cs);
1940 tail = atomic_read(&cs->ev_tail);
1941 if (tail == head) {
1942 if (!atomic_read(&cs->commands_pending))
1943 break;
1944 continue;
1945 }
1946 }
1947
1948 ev = cs->events + head;
1949 was_busy = cs->cur_at_seq != SEQ_NONE;
1950 process_event(cs, ev);
1951 kfree(ev->ptr);
1952 ev->ptr = NULL;
1953 if (was_busy && cs->cur_at_seq == SEQ_NONE)
1954 check_flags = 1;
1955
1956 head = (head + 1) % MAX_EVENTS;
1957 atomic_set(&cs->ev_head, head);
1958 }
1959
1960 if (i == 2 * MAX_EVENTS) {
1961 err("infinite loop in process_events; aborting.");
1962 }
1963}
1964
1965/* tasklet scheduled on any event received from the Gigaset device
1966 * parameter:
1967 * data ISDN controller state structure
1968 */
1969void gigaset_handle_event(unsigned long data)
1970{
1971 struct cardstate *cs = (struct cardstate *) data;
1972
1973 IFNULLRET(cs);
1974 IFNULLRET(cs->inbuf);
1975
1976 /* handle incoming data on control/common channel */
1977 if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) {
1978 dbg(DEBUG_INTR, "processing new data");
1979 cs->ops->handle_input(cs->inbuf);
1980 }
1981
1982 process_events(cs);
1983}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
new file mode 100644
index 000000000000..729edcdb6dac
--- /dev/null
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -0,0 +1,938 @@
1/* Siemens Gigaset 307x driver
2 * Common header file for all connection variants
3 *
4 * Written by Stefan Eilers <Eilers.Stefan@epost.de>
5 * and Hansjoerg Lipp <hjlipp@web.de>
6 *
7 * Version: $Id: gigaset.h,v 1.97.4.26 2006/02/04 18:28:16 hjlipp Exp $
8 * ===========================================================================
9 */
10
11#ifndef GIGASET_H
12#define GIGASET_H
13
14#include <linux/config.h>
15#include <linux/kernel.h>
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/atomic.h>
19#include <linux/spinlock.h>
20#include <linux/isdnif.h>
21#include <linux/usb.h>
22#include <linux/skbuff.h>
23#include <linux/netdevice.h>
24#include <linux/ppp_defs.h>
25#include <linux/timer.h>
26#include <linux/interrupt.h>
27#include <linux/tty.h>
28#include <linux/tty_driver.h>
29#include <linux/list.h>
30
31#define GIG_VERSION {0,5,0,0}
32#define GIG_COMPAT {0,4,0,0}
33
34#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
35#define MAX_RESP_SIZE 512 /* Max. size of a response string */
36#define HW_HDR_LEN 2 /* Header size used to store ack info */
37
38#define MAX_EVENTS 64 /* size of event queue */
39
40#define RBUFSIZE 8192
41#define SBUFSIZE 4096 /* sk_buff payload size */
42
43#define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */
44#define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */
45
46/* compile time options */
47#define GIG_MAJOR 0
48
49#define GIG_MAYINITONDIAL
50#define GIG_RETRYCID
51#define GIG_X75
52
53#define MAX_TIMER_INDEX 1000
54#define MAX_SEQ_INDEX 1000
55
56#define GIG_TICK (HZ / 10)
57
58/* timeout values (unit: 1 sec) */
59#define INIT_TIMEOUT 1
60
61/* timeout values (unit: 0.1 sec) */
62#define RING_TIMEOUT 3 /* for additional parameters to RING */
63#define BAS_TIMEOUT 20 /* for response to Base USB ops */
64#define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */
65
66#define BAS_RETRY 3 /* max. retries for base USB ops */
67
68#define MAXACT 3
69
70#define IFNULL(a) if (unlikely(!(a)))
71#define IFNULLRET(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return; }
72#define IFNULLRETVAL(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return (b); }
73#define IFNULLCONT(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); continue; }
74#define IFNULLGOTO(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); goto b; }
75
76extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
77
78/* any combination of these can be given with the 'debug=' parameter to insmod, e.g.
79 * 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and DEBUG_INTR. */
80enum debuglevel { /* up to 24 bits (atomic_t) */
81 DEBUG_REG = 0x0002, /* serial port I/O register operations */
82 DEBUG_OPEN = 0x0004, /* open/close serial port */
83 DEBUG_INTR = 0x0008, /* interrupt processing */
84 DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on interrupt
85 requests, not available as run-time option */
86 DEBUG_CMD = 0x00020, /* sent/received LL commands */
87 DEBUG_STREAM = 0x00040, /* application data stream I/O events */
88 DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
89 DEBUG_LLDATA = 0x00100, /* sent/received LL data */
90 DEBUG_INTR_0 = 0x00200, /* serial port output interrupt processing */
91 DEBUG_DRIVER = 0x00400, /* driver structure */
92 DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
93 DEBUG_WRITE = 0x01000, /* M105 data write */
94 DEBUG_TRANSCMD = 0x02000, /*AT-COMMANDS+RESPONSES*/
95 DEBUG_MCMD = 0x04000, /*COMMANDS THAT ARE SENT VERY OFTEN*/
96 DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data structures */
97 DEBUG_LOCK = 0x10000, /* semaphore operations */
98 DEBUG_OUTPUT = 0x20000, /* output to device */
99 DEBUG_ISO = 0x40000, /* isochronous transfers */
100 DEBUG_IF = 0x80000, /* character device operations */
101 DEBUG_USBREQ = 0x100000, /* USB communication (except payload data) */
102 DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when MS_LOCKED */
103
104 DEBUG_ANY = 0x3fffff, /* print message if any of the others is activated */
105};
106
107#ifdef CONFIG_GIGASET_DEBUG
108#define DEBUG_DEFAULT (DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ)
109//#define DEBUG_DEFAULT (DEBUG_LOCK | DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUF_IF | DEBUG_DRIVER | DEBUG_OUTPUT | DEBUG_INTR)
110#else
111#define DEBUG_DEFAULT 0
112#endif
113
114/* redefine syslog macros to prepend module name instead of entire source path */
115/* The space before the comma in ", ##" is needed by gcc 2.95 */
116#undef info
117#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
118
119#undef notice
120#define notice(format, arg...) printk(KERN_NOTICE "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
121
122#undef warn
123#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
124
125#undef err
126#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg)
127
128#undef dbg
129#ifdef CONFIG_GIGASET_DEBUG
130#define dbg(level, format, arg...) do { if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \
131 printk(KERN_DEBUG "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg); } while (0)
132#else
133#define dbg(level, format, arg...) do {} while (0)
134#endif
135
136void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
137 size_t len, const unsigned char *buf, int from_user);
138
139/* connection state */
140#define ZSAU_NONE 0
141#define ZSAU_DISCONNECT_IND 4
142#define ZSAU_OUTGOING_CALL_PROCEEDING 1
143#define ZSAU_PROCEEDING 1
144#define ZSAU_CALL_DELIVERED 2
145#define ZSAU_ACTIVE 3
146#define ZSAU_NULL 5
147#define ZSAU_DISCONNECT_REQ 6
148#define ZSAU_UNKNOWN -1
149
150/* USB control transfer requests */
151#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
152#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
153
154/* int-in-events 3070 */
155#define HD_B1_FLOW_CONTROL 0x80
156#define HD_B2_FLOW_CONTROL 0x81
157#define HD_RECEIVEATDATA_ACK (0x35) // 3070 // att: HD_RECEIVE>>AT<<DATA_ACK
158#define HD_READY_SEND_ATDATA (0x36) // 3070
159#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070
160#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070
161#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070
162#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070
163#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070
164#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070
165#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070
166// Powermangment
167#define HD_SUSPEND_END (0x61) // ISurf USB
168// Configuration
169#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070
170
171/* control requests 3070 */
172#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070
173#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070
174#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070
175#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070
176#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070
177#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070
178#define HD_WRITE_ATMESSAGE (0x12) // 3070
179#define HD_READ_ATMESSAGE (0x13) // 3070
180#define HD_OPEN_ATCHANNEL (0x28) // 3070
181#define HD_CLOSE_ATCHANNEL (0x29) // 3070
182
183/* USB frames for isochronous transfer */
184#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
185#define BAS_NUMFRAMES 8 /* number of frames per URB */
186#define BAS_MAXFRAME 16 /* allocated bytes per frame */
187#define BAS_NORMFRAME 8 /* send size without flow control */
188#define BAS_HIGHFRAME 10 /* " " with positive flow control */
189#define BAS_LOWFRAME 5 /* " " with negative flow control */
190#define BAS_CORRFRAMES 4 /* flow control multiplicator */
191
192#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isochronous input buffer per URB */
193#define BAS_OUTBUFSIZE 4096 /* size of common isochronous output buffer */
194#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isochronous output buffer */
195
196#define BAS_INURBS 3
197#define BAS_OUTURBS 3
198
199/* variable commands in struct bc_state */
200#define AT_ISO 0
201#define AT_DIAL 1
202#define AT_MSN 2
203#define AT_BC 3
204#define AT_PROTO 4
205#define AT_TYPE 5
206#define AT_HLC 6
207#define AT_NUM 7
208
209/* variables in struct at_state_t */
210#define VAR_ZSAU 0
211#define VAR_ZDLE 1
212#define VAR_ZVLS 2
213#define VAR_ZCTP 3
214#define VAR_NUM 4
215
216#define STR_NMBR 0
217#define STR_ZCPN 1
218#define STR_ZCON 2
219#define STR_ZBC 3
220#define STR_ZHLC 4
221#define STR_NUM 5
222
223#define EV_TIMEOUT -105
224#define EV_IF_VER -106
225#define EV_PROC_CIDMODE -107
226#define EV_SHUTDOWN -108
227#define EV_START -110
228#define EV_STOP -111
229#define EV_IF_LOCK -112
230#define EV_PROTO_L2 -113
231#define EV_ACCEPT -114
232#define EV_DIAL -115
233#define EV_HUP -116
234#define EV_BC_OPEN -117
235#define EV_BC_CLOSED -118
236
237/* input state */
238#define INS_command 0x0001
239#define INS_DLE_char 0x0002
240#define INS_byte_stuff 0x0004
241#define INS_have_data 0x0008
242#define INS_skip_frame 0x0010
243#define INS_DLE_command 0x0020
244#define INS_flag_hunt 0x0040
245
246/* channel state */
247#define CHS_D_UP 0x01
248#define CHS_B_UP 0x02
249#define CHS_NOTIFY_LL 0x04
250
251#define ICALL_REJECT 0
252#define ICALL_ACCEPT 1
253#define ICALL_IGNORE 2
254
255/* device state */
256#define MS_UNINITIALIZED 0
257#define MS_INIT 1
258#define MS_LOCKED 2
259#define MS_SHUTDOWN 3
260#define MS_RECOVER 4
261#define MS_READY 5
262
263/* mode */
264#define M_UNKNOWN 0
265#define M_CONFIG 1
266#define M_UNIMODEM 2
267#define M_CID 3
268
269/* start mode */
270#define SM_LOCKED 0
271#define SM_ISDN 1 /* default */
272
273struct gigaset_ops;
274struct gigaset_driver;
275
276struct usb_cardstate;
277struct ser_cardstate;
278struct bas_cardstate;
279
280struct bc_state;
281struct usb_bc_state;
282struct ser_bc_state;
283struct bas_bc_state;
284
285struct reply_t {
286 int resp_code; /* RSP_XXXX */
287 int min_ConState; /* <0 => ignore */
288 int max_ConState; /* <0 => ignore */
289 int parameter; /* e.g. ZSAU_XXXX <0: ignore*/
290 int new_ConState; /* <0 => ignore */
291 int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/
292 int action[MAXACT]; /* ACT_XXXX */
293 char *command; /* NULL==none */
294};
295
296extern struct reply_t gigaset_tab_cid_m10x[];
297extern struct reply_t gigaset_tab_nocid_m10x[];
298
299struct inbuf_t {
300 unsigned char *rcvbuf; /* usb-gigaset receive buffer */
301 struct bc_state *bcs;
302 struct cardstate *cs;
303 int inputstate;
304
305 atomic_t head, tail;
306 unsigned char data[RBUFSIZE];
307};
308
309/* isochronous write buffer structure
310 * circular buffer with pad area for extraction of complete USB frames
311 * - data[read..nextread-1] is valid data already submitted to the USB subsystem
312 * - data[nextread..write-1] is valid data yet to be sent
313 * - data[write] is the next byte to write to
314 * - in byte-oriented L2 procotols, it is completely free
315 * - in bit-oriented L2 procotols, it may contain a partial byte of valid data
316 * - data[write+1..read-1] is free
317 * - wbits is the number of valid data bits in data[write], starting at the LSB
318 * - writesem is the semaphore for writing to the buffer:
319 * if writesem <= 0, data[write..read-1] is currently being written to
320 * - idle contains the byte value to repeat when the end of valid data is
321 * reached; if nextread==write (buffer contains no data to send), either the
322 * BAS_OUTBUFPAD bytes immediately before data[write] (if write>=BAS_OUTBUFPAD)
323 * or those of the pad area (if write<BAS_OUTBUFPAD) are also filled with that
324 * value
325 * - optionally, the following statistics on the buffer's usage can be collected:
326 * maxfill: maximum number of bytes occupied
327 * idlefills: number of times a frame of idle bytes is prepared
328 * emptygets: number of times the buffer was empty when a data frame was requested
329 * backtoback: number of times two data packets were entered into the buffer
330 * without intervening idle flags
331 * nakedback: set if no idle flags have been inserted since the last data packet
332 */
333struct isowbuf_t {
334 atomic_t read;
335 atomic_t nextread;
336 atomic_t write;
337 atomic_t writesem;
338 int wbits;
339 unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
340 unsigned char idle;
341};
342
343/* isochronous write URB context structure
344 * data to be stored along with the URB and retrieved when it is returned
345 * as completed by the USB subsystem
346 * - urb: pointer to the URB itself
347 * - bcs: pointer to the B Channel control structure
348 * - limit: end of write buffer area covered by this URB
349 */
350struct isow_urbctx_t {
351 struct urb *urb;
352 struct bc_state *bcs;
353 int limit;
354};
355
356/* AT state structure
357 * data associated with the state of an ISDN connection, whether or not
358 * it is currently assigned a B channel
359 */
360struct at_state_t {
361 struct list_head list;
362 int waiting;
363 int getstring;
364 atomic_t timer_index;
365 unsigned long timer_expires;
366 int timer_active;
367 unsigned int ConState; /* State of connection */
368 struct reply_t *replystruct;
369 int cid;
370 int int_var[VAR_NUM]; /* see VAR_XXXX */
371 char *str_var[STR_NUM]; /* see STR_XXXX */
372 unsigned pending_commands; /* see PC_XXXX */
373 atomic_t seq_index;
374
375 struct cardstate *cs;
376 struct bc_state *bcs;
377};
378
379struct resp_type_t {
380 unsigned char *response;
381 int resp_code; /* RSP_XXXX */
382 int type; /* RT_XXXX */
383};
384
385struct prot_skb {
386 atomic_t empty;
387 struct semaphore *sem;
388 struct sk_buff *skb;
389};
390
391struct event_t {
392 int type;
393 void *ptr, *arg;
394 int parameter;
395 int cid;
396 struct at_state_t *at_state;
397};
398
399/* This buffer holds all information about the used B-Channel */
400struct bc_state {
401 struct sk_buff *tx_skb; /* Current transfer buffer to modem */
402 struct sk_buff_head squeue; /* B-Channel send Queue */
403
404 /* Variables for debugging .. */
405 int corrupted; /* Counter for corrupted packages */
406 int trans_down; /* Counter of packages (downstream) */
407 int trans_up; /* Counter of packages (upstream) */
408
409 struct at_state_t at_state;
410 unsigned long rcvbytes;
411
412 __u16 fcs;
413 struct sk_buff *skb;
414 int inputstate; /* see INS_XXXX */
415
416 int channel;
417
418 struct cardstate *cs;
419
420 unsigned chstate; /* bitmap (CHS_*) */
421 int ignore;
422 unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */
423 char *commands[AT_NUM]; /* see AT_XXXX */
424
425#ifdef CONFIG_GIGASET_DEBUG
426 int emptycount;
427#endif
428 int busy;
429 int use_count;
430
431 /* hardware drivers */
432 union {
433 struct ser_bc_state *ser; /* private data of serial hardware driver */
434 struct usb_bc_state *usb; /* private data of usb hardware driver */
435 struct bas_bc_state *bas;
436 } hw;
437};
438
439struct cardstate {
440 struct gigaset_driver *driver;
441 unsigned minor_index;
442
443 const struct gigaset_ops *ops;
444
445 /* Stuff to handle communication */
446 //wait_queue_head_t initwait;
447 wait_queue_head_t waitqueue;
448 int waiting;
449 atomic_t mode; /* see M_XXXX */
450 atomic_t mstate; /* Modem state: see MS_XXXX */
451 /* only changed by the event layer */
452 int cmd_result;
453
454 int channels;
455 struct bc_state *bcs; /* Array of struct bc_state */
456
457 int onechannel; /* data and commands transmitted in one stream (M10x) */
458
459 spinlock_t lock;
460 struct at_state_t at_state; /* at_state_t for cid == 0 */
461 struct list_head temp_at_states; /* list of temporary "struct at_state_t"s without B channel */
462
463 struct inbuf_t *inbuf;
464
465 struct cmdbuf_t *cmdbuf, *lastcmdbuf;
466 spinlock_t cmdlock;
467 unsigned curlen, cmdbytes;
468
469 unsigned open_count;
470 struct tty_struct *tty;
471 struct tasklet_struct if_wake_tasklet;
472 unsigned control_state;
473
474 unsigned fwver[4];
475 int gotfwver;
476
477 atomic_t running; /* !=0 if events are handled */
478 atomic_t connected; /* !=0 if hardware is connected */
479
480 atomic_t cidmode;
481
482 int myid; /* id for communication with LL */
483 isdn_if iif;
484
485 struct reply_t *tabnocid;
486 struct reply_t *tabcid;
487 int cs_init;
488 int ignoreframes; /* frames to ignore after setting up the B channel */
489 struct semaphore sem; /* locks this structure: */
490 /* connected is not changed, */
491 /* hardware_up is not changed, */
492 /* MState is not changed to or from MS_LOCKED */
493
494 struct timer_list timer;
495 int retry_count;
496 int dle; /* !=0 if modem commands/responses are dle encoded */
497 int cur_at_seq; /* sequence of AT commands being processed */
498 int curchannel; /* channel, those commands are meant for */
499 atomic_t commands_pending; /* flag(s) in xxx.commands_pending have been set */
500 struct tasklet_struct event_tasklet; /* tasklet for serializing AT commands. Scheduled
501 * -> for modem reponses (and incomming data for M10x)
502 * -> on timeout
503 * -> after setting bits in xxx.at_state.pending_command
504 * (e.g. command from LL) */
505 struct tasklet_struct write_tasklet; /* tasklet for serial output
506 * (not used in base driver) */
507
508 /* event queue */
509 struct event_t events[MAX_EVENTS];
510 atomic_t ev_tail, ev_head;
511 spinlock_t ev_lock;
512
513 /* current modem response */
514 unsigned char respdata[MAX_RESP_SIZE];
515 unsigned cbytes;
516
517 /* hardware drivers */
518 union {
519 struct usb_cardstate *usb; /* private data of USB hardware driver */
520 struct ser_cardstate *ser; /* private data of serial hardware driver */
521 struct bas_cardstate *bas; /* private data of base hardware driver */
522 } hw;
523};
524
525struct gigaset_driver {
526 struct list_head list;
527 spinlock_t lock; /* locks minor tables and blocked */
528 //struct semaphore sem; /* locks this structure */
529 struct tty_driver *tty;
530 unsigned have_tty;
531 unsigned minor;
532 unsigned minors;
533 struct cardstate *cs;
534 unsigned *flags;
535 int blocked;
536
537 const struct gigaset_ops *ops;
538 struct module *owner;
539};
540
541struct cmdbuf_t {
542 struct cmdbuf_t *next, *prev;
543 int len, offset;
544 struct tasklet_struct *wake_tasklet;
545 unsigned char buf[0];
546};
547
548struct bas_bc_state {
549 /* isochronous output state */
550 atomic_t running;
551 atomic_t corrbytes;
552 spinlock_t isooutlock;
553 struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
554 struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl;
555 struct isowbuf_t *isooutbuf;
556 unsigned numsub; /* submitted URB counter (for diagnostic messages only) */
557 struct tasklet_struct sent_tasklet;
558
559 /* isochronous input state */
560 spinlock_t isoinlock;
561 struct urb *isoinurbs[BAS_INURBS];
562 unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
563 struct urb *isoindone; /* completed isoc read URB */
564 int loststatus; /* status of dropped URB */
565 unsigned isoinlost; /* number of bytes lost */
566 /* state of bit unstuffing algorithm (in addition to BC_state.inputstate) */
567 unsigned seqlen; /* number of '1' bits not yet unstuffed */
568 unsigned inbyte, inbits; /* collected bits for next byte */
569 /* statistics */
570 unsigned goodbytes; /* bytes correctly received */
571 unsigned alignerrs; /* frames with incomplete byte at end */
572 unsigned fcserrs; /* FCS errors */
573 unsigned frameerrs; /* framing errors */
574 unsigned giants; /* long frames */
575 unsigned runts; /* short frames */
576 unsigned aborts; /* HDLC aborts */
577 unsigned shared0s; /* '0' bits shared between flags */
578 unsigned stolen0s; /* '0' stuff bits also serving as leading flag bits */
579 struct tasklet_struct rcvd_tasklet;
580};
581
582struct gigaset_ops {
583 /* Called from ev-layer.c/interface.c for sending AT commands to the device */
584 int (*write_cmd)(struct cardstate *cs,
585 const unsigned char *buf, int len,
586 struct tasklet_struct *wake_tasklet);
587
588 /* Called from interface.c for additional device control */
589 int (*write_room)(struct cardstate *cs);
590 int (*chars_in_buffer)(struct cardstate *cs);
591 int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]);
592
593 /* Called from ev-layer.c after setting up connection
594 * Should call gigaset_bchannel_up(), when finished. */
595 int (*init_bchannel)(struct bc_state *bcs);
596
597 /* Called from ev-layer.c after hanging up
598 * Should call gigaset_bchannel_down(), when finished. */
599 int (*close_bchannel)(struct bc_state *bcs);
600
601 /* Called by gigaset_initcs() for setting up bcs->hw.xxx */
602 int (*initbcshw)(struct bc_state *bcs);
603
604 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
605 int (*freebcshw)(struct bc_state *bcs);
606
607 /* Called by gigaset_stop() or gigaset_bchannel_down() for resetting bcs->hw.xxx */
608 void (*reinitbcshw)(struct bc_state *bcs);
609
610 /* Called by gigaset_initcs() for setting up cs->hw.xxx */
611 int (*initcshw)(struct cardstate *cs);
612
613 /* Called by gigaset_freecs() for freeing cs->hw.xxx */
614 void (*freecshw)(struct cardstate *cs);
615
616 ///* Called by gigaset_stop() for killing URBs, shutting down the device, ...
617 // hardwareup: ==0: don't try to shut down the device, hardware is really not accessible
618 // !=0: hardware still up */
619 //void (*stophw)(struct cardstate *cs, int hardwareup);
620
621 /* Called from common.c/interface.c for additional serial port control */
622 int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, unsigned new_state);
623 int (*baud_rate)(struct cardstate *cs, unsigned cflag);
624 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
625
626 /* Called from i4l.c to put an skb into the send-queue. */
627 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
628
629 /* Called from ev-layer.c to process a block of data
630 * received through the common/control channel. */
631 void (*handle_input)(struct inbuf_t *inbuf);
632
633};
634
635/* = Common structures and definitions ======================================= */
636
637/* Parser states for DLE-Event:
638 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
639 * <DLE_FLAG>: 0x10
640 * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+
641 */
642#define DLE_FLAG 0x10
643
644/* ===========================================================================
645 * Functions implemented in asyncdata.c
646 */
647
648/* Called from i4l.c to put an skb into the send-queue.
649 * After sending gigaset_skb_sent() should be called. */
650int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
651
652/* Called from ev-layer.c to process a block of data
653 * received through the common/control channel. */
654void gigaset_m10x_input(struct inbuf_t *inbuf);
655
656/* ===========================================================================
657 * Functions implemented in isocdata.c
658 */
659
660/* Called from i4l.c to put an skb into the send-queue.
661 * After sending gigaset_skb_sent() should be called. */
662int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
663
664/* Called from ev-layer.c to process a block of data
665 * received through the common/control channel. */
666void gigaset_isoc_input(struct inbuf_t *inbuf);
667
668/* Called from bas-gigaset.c to process a block of data
669 * received through the isochronous channel */
670void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs);
671
672/* Called from bas-gigaset.c to put a block of data
673 * into the isochronous output buffer */
674int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len);
675
676/* Called from bas-gigaset.c to initialize the isochronous output buffer */
677void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
678
679/* Called from bas-gigaset.c to retrieve a block of bytes for sending */
680int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
681
682/* ===========================================================================
683 * Functions implemented in i4l.c/gigaset.h
684 */
685
686/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */
687int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid);
688
689/* Called from xxx-gigaset.c to indicate completion of sending an skb */
690void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
691
692/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
693int gigaset_isdn_icall(struct at_state_t *at_state);
694int gigaset_isdn_setup_accept(struct at_state_t *at_state);
695int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data);
696
697void gigaset_i4l_cmd(struct cardstate *cs, int cmd);
698void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
699
700
701static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
702{
703 isdn_ctrl response;
704
705 /* error -> LL */
706 dbg(DEBUG_CMD, "sending L1ERR");
707 response.driver = bcs->cs->myid;
708 response.command = ISDN_STAT_L1ERR;
709 response.arg = bcs->channel;
710 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
711 bcs->cs->iif.statcallb(&response);
712}
713
714/* ===========================================================================
715 * Functions implemented in ev-layer.c
716 */
717
718/* tasklet called from common.c to process queued events */
719void gigaset_handle_event(unsigned long data);
720
721/* called from isocdata.c / asyncdata.c
722 * when a complete modem response line has been received */
723void gigaset_handle_modem_response(struct cardstate *cs);
724
725/* ===========================================================================
726 * Functions implemented in proc.c
727 */
728
729/* initialize sysfs for device */
730void gigaset_init_dev_sysfs(struct usb_interface *interface);
731void gigaset_free_dev_sysfs(struct usb_interface *interface);
732
733/* ===========================================================================
734 * Functions implemented in common.c/gigaset.h
735 */
736
737void gigaset_bcs_reinit(struct bc_state *bcs);
738void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
739 struct cardstate *cs, int cid);
740int gigaset_get_channel(struct bc_state *bcs);
741void gigaset_free_channel(struct bc_state *bcs);
742int gigaset_get_channels(struct cardstate *cs);
743void gigaset_free_channels(struct cardstate *cs);
744void gigaset_block_channels(struct cardstate *cs);
745
746/* Allocate and initialize driver structure. */
747struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
748 const char *procname,
749 const char *devname,
750 const char *devfsname,
751 const struct gigaset_ops *ops,
752 struct module *owner);
753
754/* Deallocate driver structure. */
755void gigaset_freedriver(struct gigaset_driver *drv);
756void gigaset_debugdrivers(void);
757struct cardstate *gigaset_get_cs_by_minor(unsigned minor);
758struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
759struct cardstate *gigaset_get_cs_by_id(int id);
760
761/* For drivers without fixed assignment device<->cardstate (usb) */
762struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv);
763void gigaset_unassign(struct cardstate *cs);
764void gigaset_blockdriver(struct gigaset_driver *drv);
765
766/* Allocate and initialize card state. Calls hardware dependent gigaset_init[b]cs(). */
767struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
768 int onechannel, int ignoreframes,
769 int cidmode, const char *modulename);
770
771/* Free card state. Calls hardware dependent gigaset_free[b]cs(). */
772void gigaset_freecs(struct cardstate *cs);
773
774/* Tell common.c that hardware and driver are ready. */
775int gigaset_start(struct cardstate *cs);
776
777/* Tell common.c that the device is not present any more. */
778void gigaset_stop(struct cardstate *cs);
779
780/* Tell common.c that the driver is being unloaded. */
781void gigaset_shutdown(struct cardstate *cs);
782
783/* Tell common.c that an skb has been sent. */
784void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
785
786/* Append event to the queue.
787 * Returns NULL on failure or a pointer to the event on success.
788 * ptr must be kmalloc()ed (and not be freed by the caller).
789 */
790struct event_t *gigaset_add_event(struct cardstate *cs,
791 struct at_state_t *at_state, int type,
792 void *ptr, int parameter, void *arg);
793
794/* Called on CONFIG1 command from frontend. */
795int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode
796
797/* cs->lock must not be locked */
798static inline void gigaset_schedule_event(struct cardstate *cs)
799{
800 unsigned long flags;
801 spin_lock_irqsave(&cs->lock, flags);
802 if (atomic_read(&cs->running))
803 tasklet_schedule(&cs->event_tasklet);
804 spin_unlock_irqrestore(&cs->lock, flags);
805}
806
807/* Tell common.c that B channel has been closed. */
808/* cs->lock must not be locked */
809static inline void gigaset_bchannel_down(struct bc_state *bcs)
810{
811 gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
812
813 dbg(DEBUG_CMD, "scheduling BC_CLOSED");
814 gigaset_schedule_event(bcs->cs);
815}
816
817/* Tell common.c that B channel has been opened. */
818/* cs->lock must not be locked */
819static inline void gigaset_bchannel_up(struct bc_state *bcs)
820{
821 gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
822
823 dbg(DEBUG_CMD, "scheduling BC_OPEN");
824 gigaset_schedule_event(bcs->cs);
825}
826
827/* handling routines for sk_buff */
828/* ============================= */
829
830/* private version of __skb_put()
831 * append 'len' bytes to the content of 'skb', already knowing that the
832 * existing buffer can accomodate them
833 * returns a pointer to the location where the new bytes should be copied to
834 * This function does not take any locks so it must be called with the
835 * appropriate locks held only.
836 */
837static inline unsigned char *gigaset_skb_put_quick(struct sk_buff *skb,
838 unsigned int len)
839{
840 unsigned char *tmp = skb->tail;
841 /*SKB_LINEAR_ASSERT(skb);*/ /* not needed here */
842 skb->tail += len;
843 skb->len += len;
844 return tmp;
845}
846
847/* pass received skb to LL
848 * Warning: skb must not be accessed anymore!
849 */
850static inline void gigaset_rcv_skb(struct sk_buff *skb,
851 struct cardstate *cs,
852 struct bc_state *bcs)
853{
854 cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
855 bcs->trans_down++;
856}
857
858/* handle reception of corrupted skb
859 * Warning: skb must not be accessed anymore!
860 */
861static inline void gigaset_rcv_error(struct sk_buff *procskb,
862 struct cardstate *cs,
863 struct bc_state *bcs)
864{
865 if (procskb)
866 dev_kfree_skb(procskb);
867
868 if (bcs->ignore)
869 --bcs->ignore;
870 else {
871 ++bcs->corrupted;
872 gigaset_isdn_rcv_err(bcs);
873 }
874}
875
876
877/* bitwise byte inversion table */
878extern __u8 gigaset_invtab[]; /* in common.c */
879
880
881/* append received bytes to inbuf */
882static inline int gigaset_fill_inbuf(struct inbuf_t *inbuf,
883 const unsigned char *src,
884 unsigned numbytes)
885{
886 unsigned n, head, tail, bytesleft;
887
888 dbg(DEBUG_INTR, "received %u bytes", numbytes);
889
890 if (!numbytes)
891 return 0;
892
893 bytesleft = numbytes;
894 tail = atomic_read(&inbuf->tail);
895 head = atomic_read(&inbuf->head);
896 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
897
898 while (bytesleft) {
899 if (head > tail)
900 n = head - 1 - tail;
901 else if (head == 0)
902 n = (RBUFSIZE-1) - tail;
903 else
904 n = RBUFSIZE - tail;
905 if (!n) {
906 err("buffer overflow (%u bytes lost)", bytesleft);
907 break;
908 }
909 if (n > bytesleft)
910 n = bytesleft;
911 memcpy(inbuf->data + tail, src, n);
912 bytesleft -= n;
913 tail = (tail + n) % RBUFSIZE;
914 src += n;
915 }
916 dbg(DEBUG_INTR, "setting tail to %u", tail);
917 atomic_set(&inbuf->tail, tail);
918 return numbytes != bytesleft;
919}
920
921/* ===========================================================================
922 * Functions implemented in interface.c
923 */
924
925/* initialize interface */
926void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
927 const char *devname, const char *devfsname);
928/* release interface */
929void gigaset_if_freedriver(struct gigaset_driver *drv);
930/* add minor */
931void gigaset_if_init(struct cardstate *cs);
932/* remove minor */
933void gigaset_if_free(struct cardstate *cs);
934/* device received data */
935void gigaset_if_receive(struct cardstate *cs,
936 unsigned char *buffer, size_t len);
937
938#endif
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
new file mode 100644
index 000000000000..731a675f21b0
--- /dev/null
+++ b/drivers/isdn/gigaset/i4l.c
@@ -0,0 +1,567 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers (Eilers.Stefan@epost.de),
5 * Hansjoerg Lipp (hjlipp@web.de),
6 * Tilman Schmidt (tilman@imap.cc).
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: i4l.c,v 1.3.2.9 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21
22/* == Handling of I4L IO ============================================================================*/
23
24/* writebuf_from_LL
25 * called by LL to transmit data on an open channel
26 * inserts the buffer data into the send queue and starts the transmission
27 * Note that this operation must not sleep!
28 * When the buffer is processed completely, gigaset_skb_sent() should be called.
29 * parameters:
30 * driverID driver ID as assigned by LL
31 * channel channel number
32 * ack if != 0 LL wants to be notified on completion via statcallb(ISDN_STAT_BSENT)
33 * skb skb containing data to send
34 * return value:
35 * number of accepted bytes
36 * 0 if temporarily unable to accept data (out of buffer space)
37 * <0 on error (eg. -EINVAL)
38 */
39static int writebuf_from_LL(int driverID, int channel, int ack, struct sk_buff *skb)
40{
41 struct cardstate *cs;
42 struct bc_state *bcs;
43 unsigned len;
44 unsigned skblen;
45
46 if (!(cs = gigaset_get_cs_by_id(driverID))) {
47 err("%s: invalid driver ID (%d)", __func__, driverID);
48 return -ENODEV;
49 }
50 if (channel < 0 || channel >= cs->channels) {
51 err("%s: invalid channel ID (%d)", __func__, channel);
52 return -ENODEV;
53 }
54 bcs = &cs->bcs[channel];
55 len = skb->len;
56
57 dbg(DEBUG_LLDATA,
58 "Receiving data from LL (id: %d, channel: %d, ack: %d, size: %d)",
59 driverID, channel, ack, len);
60
61 if (!len) {
62 if (ack)
63 warn("not ACKing empty packet from LL");
64 return 0;
65 }
66 if (len > MAX_BUF_SIZE) {
67 err("%s: packet too large (%d bytes)", __func__, channel);
68 return -EINVAL;
69 }
70
71 if (!atomic_read(&cs->connected))
72 return -ENODEV;
73
74 skblen = ack ? len : 0;
75 skb->head[0] = skblen & 0xff;
76 skb->head[1] = skblen >> 8;
77 dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", len, skblen,
78 (unsigned) skb->head[0], (unsigned) skb->head[1]);
79
80 /* pass to device-specific module */
81 return cs->ops->send_skb(bcs, skb);
82}
83
84void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
85{
86 unsigned len;
87 isdn_ctrl response;
88
89 ++bcs->trans_up;
90
91 if (skb->len)
92 warn("%s: skb->len==%d", __func__, skb->len);
93
94 len = (unsigned char) skb->head[0] |
95 (unsigned) (unsigned char) skb->head[1] << 8;
96 if (len) {
97 dbg(DEBUG_MCMD,
98 "Acknowledge sending to LL (id: %d, channel: %d size: %u)",
99 bcs->cs->myid, bcs->channel, len);
100
101 response.driver = bcs->cs->myid;
102 response.command = ISDN_STAT_BSENT;
103 response.arg = bcs->channel;
104 response.parm.length = len;
105 bcs->cs->iif.statcallb(&response);
106 }
107}
108EXPORT_SYMBOL_GPL(gigaset_skb_sent);
109
110/* This function will be called by LL to send commands
111 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
112 * so don't put too much effort into it.
113 */
114static int command_from_LL(isdn_ctrl *cntrl)
115{
116 struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver);
117 //isdn_ctrl response;
118 //unsigned long flags;
119 struct bc_state *bcs;
120 int retval = 0;
121 struct setup_parm *sp;
122
123 //dbg(DEBUG_ANY, "Gigaset_HW: Receiving command");
124 gigaset_debugdrivers();
125
126 /* Terminate this call if no device is present. Bt if the command is "ISDN_CMD_LOCK" or
127 * "ISDN_CMD_UNLOCK" then execute it due to the fact that they are device independent !
128 */
129 //FIXME "remove test for &connected"
130 if ((!cs || !atomic_read(&cs->connected))) {
131 warn("LL tried to access unknown device with nr. %d",
132 cntrl->driver);
133 return -ENODEV;
134 }
135
136 switch (cntrl->command) {
137 case ISDN_CMD_IOCTL:
138
139 dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver:%d,arg: %ld)",
140 cntrl->driver, cntrl->arg);
141
142 warn("ISDN_CMD_IOCTL is not supported.");
143 return -EINVAL;
144
145 case ISDN_CMD_DIAL:
146 dbg(DEBUG_ANY, "ISDN_CMD_DIAL (driver: %d, channel: %ld, "
147 "phone: %s,ownmsn: %s, si1: %d, si2: %d)",
148 cntrl->driver, cntrl->arg,
149 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
150 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
151
152 if (cntrl->arg >= cs->channels) {
153 err("invalid channel (%d)", (int) cntrl->arg);
154 return -EINVAL;
155 }
156
157 bcs = cs->bcs + cntrl->arg;
158
159 if (!gigaset_get_channel(bcs)) {
160 err("channel not free");
161 return -EBUSY;
162 }
163
164 sp = kmalloc(sizeof *sp, GFP_ATOMIC);
165 if (!sp) {
166 gigaset_free_channel(bcs);
167 err("ISDN_CMD_DIAL: out of memory");
168 return -ENOMEM;
169 }
170 *sp = cntrl->parm.setup;
171
172 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp,
173 atomic_read(&bcs->at_state.seq_index),
174 NULL)) {
175 //FIXME what should we do?
176 kfree(sp);
177 gigaset_free_channel(bcs);
178 return -ENOMEM;
179 }
180
181 dbg(DEBUG_CMD, "scheduling DIAL");
182 gigaset_schedule_event(cs);
183 break;
184 case ISDN_CMD_ACCEPTD: //FIXME
185 dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD");
186
187 if (cntrl->arg >= cs->channels) {
188 err("invalid channel (%d)", (int) cntrl->arg);
189 return -EINVAL;
190 }
191
192 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
193 EV_ACCEPT, NULL, 0, NULL)) {
194 //FIXME what should we do?
195 return -ENOMEM;
196 }
197
198 dbg(DEBUG_CMD, "scheduling ACCEPT");
199 gigaset_schedule_event(cs);
200
201 break;
202 case ISDN_CMD_ACCEPTB:
203 dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
204 break;
205 case ISDN_CMD_HANGUP:
206 dbg(DEBUG_ANY,
207 "ISDN_CMD_HANGUP (channel: %d)", (int) cntrl->arg);
208
209 if (cntrl->arg >= cs->channels) {
210 err("ISDN_CMD_HANGUP: invalid channel (%u)",
211 (unsigned) cntrl->arg);
212 return -EINVAL;
213 }
214
215 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state,
216 EV_HUP, NULL, 0, NULL)) {
217 //FIXME what should we do?
218 return -ENOMEM;
219 }
220
221 dbg(DEBUG_CMD, "scheduling HUP");
222 gigaset_schedule_event(cs);
223
224 break;
225 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME
226 dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ");
227 break;
228 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME
229 dbg(DEBUG_ANY,
230 "ISDN_CMD_SETEAZ (id:%d, channel: %ld, number: %s)",
231 cntrl->driver, cntrl->arg, cntrl->parm.num);
232 break;
233 case ISDN_CMD_SETL2: /* Set L2 to given protocol */
234 dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (Channel: %ld, Proto: %lx)",
235 cntrl->arg & 0xff, (cntrl->arg >> 8));
236
237 if ((cntrl->arg & 0xff) >= cs->channels) {
238 err("invalid channel (%u)",
239 (unsigned) cntrl->arg & 0xff);
240 return -EINVAL;
241 }
242
243 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state,
244 EV_PROTO_L2, NULL, cntrl->arg >> 8,
245 NULL)) {
246 //FIXME what should we do?
247 return -ENOMEM;
248 }
249
250 dbg(DEBUG_CMD, "scheduling PROTO_L2");
251 gigaset_schedule_event(cs);
252 break;
253 case ISDN_CMD_SETL3: /* Set L3 to given protocol */
254 dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (Channel: %ld, Proto: %lx)",
255 cntrl->arg & 0xff, (cntrl->arg >> 8));
256
257 if ((cntrl->arg & 0xff) >= cs->channels) {
258 err("invalid channel (%u)",
259 (unsigned) cntrl->arg & 0xff);
260 return -EINVAL;
261 }
262
263 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
264 err("invalid protocol %lu", cntrl->arg >> 8);
265 return -EINVAL;
266 }
267
268 break;
269 case ISDN_CMD_PROCEED:
270 dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME
271 break;
272 case ISDN_CMD_ALERT:
273 dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME
274 if (cntrl->arg >= cs->channels) {
275 err("invalid channel (%d)", (int) cntrl->arg);
276 return -EINVAL;
277 }
278 //bcs = cs->bcs + cntrl->arg;
279 //bcs->proto2 = -1;
280 // FIXME
281 break;
282 case ISDN_CMD_REDIR:
283 dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME
284 break;
285 case ISDN_CMD_PROT_IO:
286 dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
287 break;
288 case ISDN_CMD_FAXCMD:
289 dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD");
290 break;
291 case ISDN_CMD_GETL2:
292 dbg(DEBUG_ANY, "ISDN_CMD_GETL2");
293 break;
294 case ISDN_CMD_GETL3:
295 dbg(DEBUG_ANY, "ISDN_CMD_GETL3");
296 break;
297 case ISDN_CMD_GETEAZ:
298 dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ");
299 break;
300 case ISDN_CMD_SETSIL:
301 dbg(DEBUG_ANY, "ISDN_CMD_SETSIL");
302 break;
303 case ISDN_CMD_GETSIL:
304 dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
305 break;
306 default:
307 err("unknown command %d from LL",
308 cntrl->command);
309 return -EINVAL;
310 }
311
312 return retval;
313}
314
315void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
316{
317 isdn_ctrl command;
318
319 command.driver = cs->myid;
320 command.command = cmd;
321 command.arg = 0;
322 cs->iif.statcallb(&command);
323}
324
325void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
326{
327 isdn_ctrl command;
328
329 command.driver = bcs->cs->myid;
330 command.command = cmd;
331 command.arg = bcs->channel;
332 bcs->cs->iif.statcallb(&command);
333}
334
335int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
336{
337 struct bc_state *bcs = at_state->bcs;
338 unsigned proto;
339 const char *bc;
340 size_t length[AT_NUM];
341 size_t l;
342 int i;
343 struct setup_parm *sp = data;
344
345 switch (bcs->proto2) {
346 case ISDN_PROTO_L2_HDLC:
347 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
348 break;
349 case ISDN_PROTO_L2_TRANS:
350 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
351 break;
352 default:
353 err("invalid protocol: %u", bcs->proto2);
354 return -EINVAL;
355 }
356
357 switch (sp->si1) {
358 case 1: /* audio */
359 bc = "9090A3"; /* 3.1 kHz audio, A-law */
360 break;
361 case 7: /* data */
362 default: /* hope the app knows what it is doing */
363 bc = "8890"; /* unrestricted digital information */
364 }
365 //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
366
367 length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
368 l = strlen(sp->eazmsn);
369 length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0;
370 length[AT_BC ] = 5 + strlen(bc) + 1 + 1;
371 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
372 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
373 length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
374 length[AT_HLC ] = 0;
375
376 for (i = 0; i < AT_NUM; ++i) {
377 kfree(bcs->commands[i]);
378 bcs->commands[i] = NULL;
379 if (length[i] &&
380 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
381 err("out of memory");
382 return -ENOMEM;
383 }
384 }
385
386 /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
387 if (sp->phone[0] == '*' && sp->phone[1] == '*') {
388 /* internal call: translate ** prefix to CTP value */
389 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
390 "D%s\r", sp->phone+2);
391 strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
392 } else {
393 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
394 "D%s\r", sp->phone);
395 strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
396 }
397
398 if (bcs->commands[AT_MSN])
399 snprintf(bcs->commands[AT_MSN], length[AT_MSN], "^SMSN=%s\r", sp->eazmsn);
400 snprintf(bcs->commands[AT_BC ], length[AT_BC ], "^SBC=%s\r", bc);
401 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
402 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned)bcs->channel + 1);
403
404 return 0;
405}
406
407int gigaset_isdn_setup_accept(struct at_state_t *at_state)
408{
409 unsigned proto;
410 size_t length[AT_NUM];
411 int i;
412 struct bc_state *bcs = at_state->bcs;
413
414 switch (bcs->proto2) {
415 case ISDN_PROTO_L2_HDLC:
416 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
417 break;
418 case ISDN_PROTO_L2_TRANS:
419 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
420 break;
421 default:
422 err("invalid protocol: %u", bcs->proto2);
423 return -EINVAL;
424 }
425
426 length[AT_DIAL ] = 0;
427 length[AT_MSN ] = 0;
428 length[AT_BC ] = 0;
429 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
430 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
431 length[AT_TYPE ] = 0;
432 length[AT_HLC ] = 0;
433
434 for (i = 0; i < AT_NUM; ++i) {
435 kfree(bcs->commands[i]);
436 bcs->commands[i] = NULL;
437 if (length[i] &&
438 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
439 err("out of memory");
440 return -ENOMEM;
441 }
442 }
443
444 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto);
445 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned) bcs->channel + 1);
446
447 return 0;
448}
449
450int gigaset_isdn_icall(struct at_state_t *at_state)
451{
452 struct cardstate *cs = at_state->cs;
453 struct bc_state *bcs = at_state->bcs;
454 isdn_ctrl response;
455 int retval;
456
457 /* fill ICALL structure */
458 response.parm.setup.si1 = 0; /* default: unknown */
459 response.parm.setup.si2 = 0;
460 response.parm.setup.screen = 0; //FIXME how to set these?
461 response.parm.setup.plan = 0;
462 if (!at_state->str_var[STR_ZBC]) {
463 /* no BC (internal call): assume speech, A-law */
464 response.parm.setup.si1 = 1;
465 } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) {
466 /* unrestricted digital information */
467 response.parm.setup.si1 = 7;
468 } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) {
469 /* speech, A-law */
470 response.parm.setup.si1 = 1;
471 } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) {
472 /* 3,1 kHz audio, A-law */
473 response.parm.setup.si1 = 1;
474 response.parm.setup.si2 = 2;
475 } else {
476 warn("RING ignored - unsupported BC %s",
477 at_state->str_var[STR_ZBC]);
478 return ICALL_IGNORE;
479 }
480 if (at_state->str_var[STR_NMBR]) {
481 strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
482 sizeof response.parm.setup.phone - 1);
483 response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
484 } else
485 response.parm.setup.phone[0] = 0;
486 if (at_state->str_var[STR_ZCPN]) {
487 strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
488 sizeof response.parm.setup.eazmsn - 1);
489 response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
490 } else
491 response.parm.setup.eazmsn[0] = 0;
492
493 if (!bcs) {
494 notice("no channel for incoming call");
495 dbg(DEBUG_CMD, "Sending ICALLW");
496 response.command = ISDN_STAT_ICALLW;
497 response.arg = 0; //FIXME
498 } else {
499 dbg(DEBUG_CMD, "Sending ICALL");
500 response.command = ISDN_STAT_ICALL;
501 response.arg = bcs->channel; //FIXME
502 }
503 response.driver = cs->myid;
504 retval = cs->iif.statcallb(&response);
505 dbg(DEBUG_CMD, "Response: %d", retval);
506 switch (retval) {
507 case 0: /* no takers */
508 return ICALL_IGNORE;
509 case 1: /* alerting */
510 bcs->chstate |= CHS_NOTIFY_LL;
511 return ICALL_ACCEPT;
512 case 2: /* reject */
513 return ICALL_REJECT;
514 case 3: /* incomplete */
515 warn("LL requested unsupported feature: Incomplete Number");
516 return ICALL_IGNORE;
517 case 4: /* proceeding */
518 /* Gigaset will send ALERTING anyway.
519 * There doesn't seem to be a way to avoid this.
520 */
521 return ICALL_ACCEPT;
522 case 5: /* deflect */
523 warn("LL requested unsupported feature: Call Deflection");
524 return ICALL_IGNORE;
525 default:
526 err("LL error %d on ICALL", retval);
527 return ICALL_IGNORE;
528 }
529}
530
531/* Set Callback function pointer */
532int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
533{
534 isdn_if *iif = &cs->iif;
535
536 dbg(DEBUG_ANY, "Register driver capabilities to LL");
537
538 //iif->id[sizeof(iif->id) - 1]=0;
539 //strncpy(iif->id, isdnid, sizeof(iif->id) - 1);
540 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
541 >= sizeof iif->id)
542 return -ENOMEM; //FIXME EINVAL/...??
543
544 iif->owner = THIS_MODULE;
545 iif->channels = cs->channels; /* I am supporting just one channel *//* I was supporting...*/
546 iif->maxbufsize = MAX_BUF_SIZE;
547 iif->features = ISDN_FEATURE_L2_TRANS | /* Our device is very advanced, therefore */
548 ISDN_FEATURE_L2_HDLC |
549#ifdef GIG_X75
550 ISDN_FEATURE_L2_X75I |
551#endif
552 ISDN_FEATURE_L3_TRANS |
553 ISDN_FEATURE_P_EURO;
554 iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */
555 iif->command = command_from_LL;
556 iif->writebuf_skb = writebuf_from_LL;
557 iif->writecmd = NULL; /* Don't support isdnctrl */
558 iif->readstat = NULL; /* Don't support isdnctrl */
559 iif->rcvcallb_skb = NULL; /* Will be set by LL */
560 iif->statcallb = NULL; /* Will be set by LL */
561
562 if (!register_isdn(iif))
563 return 0;
564
565 cs->myid = iif->channels; /* Set my device id */
566 return 1;
567}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
new file mode 100644
index 000000000000..3a81d9c65141
--- /dev/null
+++ b/drivers/isdn/gigaset/interface.c
@@ -0,0 +1,718 @@
1/*
2 * interface to user space for the gigaset driver
3 *
4 * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 * Version: $Id: interface.c,v 1.14.4.15 2006/02/04 18:28:16 hjlipp Exp $
13 * =====================================================================
14 */
15
16#include "gigaset.h"
17#include <linux/gigaset_dev.h>
18#include <linux/tty.h>
19#include <linux/tty_flip.h>
20
21/*** our ioctls ***/
22
23static int if_lock(struct cardstate *cs, int *arg)
24{
25 int cmd = *arg;
26
27 dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd);
28
29 if (cmd > 1)
30 return -EINVAL;
31
32 if (cmd < 0) {
33 *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove?
34 return 0;
35 }
36
37 if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED
38 && atomic_read(&cs->connected)) {
39 cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
40 cs->ops->baud_rate(cs, B115200);
41 cs->ops->set_line_ctrl(cs, CS8);
42 cs->control_state = TIOCM_DTR|TIOCM_RTS;
43 }
44
45 cs->waiting = 1;
46 if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK,
47 NULL, cmd, NULL)) {
48 cs->waiting = 0;
49 return -ENOMEM;
50 }
51
52 dbg(DEBUG_CMD, "scheduling IF_LOCK");
53 gigaset_schedule_event(cs);
54
55 wait_event(cs->waitqueue, !cs->waiting);
56
57 if (cs->cmd_result >= 0) {
58 *arg = cs->cmd_result;
59 return 0;
60 }
61
62 return cs->cmd_result;
63}
64
65static int if_version(struct cardstate *cs, unsigned arg[4])
66{
67 static const unsigned version[4] = GIG_VERSION;
68 static const unsigned compat[4] = GIG_COMPAT;
69 unsigned cmd = arg[0];
70
71 dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd);
72
73 switch (cmd) {
74 case GIGVER_DRIVER:
75 memcpy(arg, version, sizeof version);
76 return 0;
77 case GIGVER_COMPAT:
78 memcpy(arg, compat, sizeof compat);
79 return 0;
80 case GIGVER_FWBASE:
81 cs->waiting = 1;
82 if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER,
83 NULL, 0, arg)) {
84 cs->waiting = 0;
85 return -ENOMEM;
86 }
87
88 dbg(DEBUG_CMD, "scheduling IF_VER");
89 gigaset_schedule_event(cs);
90
91 wait_event(cs->waitqueue, !cs->waiting);
92
93 if (cs->cmd_result >= 0)
94 return 0;
95
96 return cs->cmd_result;
97 default:
98 return -EINVAL;
99 }
100}
101
102static int if_config(struct cardstate *cs, int *arg)
103{
104 dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg);
105
106 if (*arg != 1)
107 return -EINVAL;
108
109 if (atomic_read(&cs->mstate) != MS_LOCKED)
110 return -EBUSY;
111
112 *arg = 0;
113 return gigaset_enterconfigmode(cs);
114}
115
116/*** the terminal driver ***/
117/* stolen from usbserial and some other tty drivers */
118
119static int if_open(struct tty_struct *tty, struct file *filp);
120static void if_close(struct tty_struct *tty, struct file *filp);
121static int if_ioctl(struct tty_struct *tty, struct file *file,
122 unsigned int cmd, unsigned long arg);
123static int if_write_room(struct tty_struct *tty);
124static int if_chars_in_buffer(struct tty_struct *tty);
125static void if_throttle(struct tty_struct *tty);
126static void if_unthrottle(struct tty_struct *tty);
127static void if_set_termios(struct tty_struct *tty, struct termios *old);
128static int if_tiocmget(struct tty_struct *tty, struct file *file);
129static int if_tiocmset(struct tty_struct *tty, struct file *file,
130 unsigned int set, unsigned int clear);
131static int if_write(struct tty_struct *tty,
132 const unsigned char *buf, int count);
133
134static struct tty_operations if_ops = {
135 .open = if_open,
136 .close = if_close,
137 .ioctl = if_ioctl,
138 .write = if_write,
139 .write_room = if_write_room,
140 .chars_in_buffer = if_chars_in_buffer,
141 .set_termios = if_set_termios,
142 .throttle = if_throttle,
143 .unthrottle = if_unthrottle,
144#if 0
145 .break_ctl = serial_break,
146#endif
147 .tiocmget = if_tiocmget,
148 .tiocmset = if_tiocmset,
149};
150
151static int if_open(struct tty_struct *tty, struct file *filp)
152{
153 struct cardstate *cs;
154 unsigned long flags;
155
156 dbg(DEBUG_IF, "%d+%d: %s()", tty->driver->minor_start, tty->index,
157 __FUNCTION__);
158
159 tty->driver_data = NULL;
160
161 cs = gigaset_get_cs_by_tty(tty);
162 if (!cs)
163 return -ENODEV;
164
165 if (down_interruptible(&cs->sem))
166 return -ERESTARTSYS; // FIXME -EINTR?
167 tty->driver_data = cs;
168
169 ++cs->open_count;
170
171 if (cs->open_count == 1) {
172 spin_lock_irqsave(&cs->lock, flags);
173 cs->tty = tty;
174 spin_unlock_irqrestore(&cs->lock, flags);
175 tty->low_latency = 1; //FIXME test
176 //FIXME
177 }
178
179 up(&cs->sem);
180 return 0;
181}
182
183static void if_close(struct tty_struct *tty, struct file *filp)
184{
185 struct cardstate *cs;
186 unsigned long flags;
187
188 cs = (struct cardstate *) tty->driver_data;
189 if (!cs) {
190 err("cs==NULL in %s", __FUNCTION__);
191 return;
192 }
193
194 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
195
196 down(&cs->sem);
197
198 if (!cs->open_count)
199 warn("%s: device not opened", __FUNCTION__);
200 else {
201 if (!--cs->open_count) {
202 spin_lock_irqsave(&cs->lock, flags);
203 cs->tty = NULL;
204 spin_unlock_irqrestore(&cs->lock, flags);
205 //FIXME
206 }
207 }
208
209 up(&cs->sem);
210}
211
212static int if_ioctl(struct tty_struct *tty, struct file *file,
213 unsigned int cmd, unsigned long arg)
214{
215 struct cardstate *cs;
216 int retval = -ENODEV;
217 int int_arg;
218 unsigned char buf[6];
219 unsigned version[4];
220
221 cs = (struct cardstate *) tty->driver_data;
222 if (!cs) {
223 err("cs==NULL in %s", __FUNCTION__);
224 return -ENODEV;
225 }
226
227 dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __FUNCTION__, cmd);
228
229 if (down_interruptible(&cs->sem))
230 return -ERESTARTSYS; // FIXME -EINTR?
231
232 if (!cs->open_count)
233 warn("%s: device not opened", __FUNCTION__);
234 else {
235 retval = 0;
236 switch (cmd) {
237 case GIGASET_REDIR:
238 retval = get_user(int_arg, (int __user *) arg);
239 if (retval >= 0)
240 retval = if_lock(cs, &int_arg);
241 if (retval >= 0)
242 retval = put_user(int_arg, (int __user *) arg);
243 break;
244 case GIGASET_CONFIG:
245 retval = get_user(int_arg, (int __user *) arg);
246 if (retval >= 0)
247 retval = if_config(cs, &int_arg);
248 if (retval >= 0)
249 retval = put_user(int_arg, (int __user *) arg);
250 break;
251 case GIGASET_BRKCHARS:
252 //FIXME test if MS_LOCKED
253 gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS",
254 6, (const unsigned char *) arg, 1);
255 if (!atomic_read(&cs->connected)) {
256 dbg(DEBUG_ANY, "can't communicate with unplugged device");
257 retval = -ENODEV;
258 break;
259 }
260 retval = copy_from_user(&buf,
261 (const unsigned char __user *) arg, 6)
262 ? -EFAULT : 0;
263 if (retval >= 0)
264 retval = cs->ops->brkchars(cs, buf);
265 break;
266 case GIGASET_VERSION:
267 retval = copy_from_user(version, (unsigned __user *) arg,
268 sizeof version) ? -EFAULT : 0;
269 if (retval >= 0)
270 retval = if_version(cs, version);
271 if (retval >= 0)
272 retval = copy_to_user((unsigned __user *) arg, version,
273 sizeof version)
274 ? -EFAULT : 0;
275 break;
276 default:
277 dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x",
278 __FUNCTION__, cmd);
279 retval = -ENOIOCTLCMD;
280 }
281 }
282
283 up(&cs->sem);
284
285 return retval;
286}
287
288static int if_tiocmget(struct tty_struct *tty, struct file *file)
289{
290 struct cardstate *cs;
291 int retval;
292
293 cs = (struct cardstate *) tty->driver_data;
294 if (!cs) {
295 err("cs==NULL in %s", __FUNCTION__);
296 return -ENODEV;
297 }
298
299 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
300
301 if (down_interruptible(&cs->sem))
302 return -ERESTARTSYS; // FIXME -EINTR?
303
304 // FIXME read from device?
305 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
306
307 up(&cs->sem);
308
309 return retval;
310}
311
312static int if_tiocmset(struct tty_struct *tty, struct file *file,
313 unsigned int set, unsigned int clear)
314{
315 struct cardstate *cs;
316 int retval;
317 unsigned mc;
318
319 cs = (struct cardstate *) tty->driver_data;
320 if (!cs) {
321 err("cs==NULL in %s", __FUNCTION__);
322 return -ENODEV;
323 }
324
325 dbg(DEBUG_IF,
326 "%u: %s(0x%x, 0x%x)", cs->minor_index, __FUNCTION__, set, clear);
327
328 if (down_interruptible(&cs->sem))
329 return -ERESTARTSYS; // FIXME -EINTR?
330
331 if (!atomic_read(&cs->connected)) {
332 dbg(DEBUG_ANY, "can't communicate with unplugged device");
333 retval = -ENODEV;
334 } else {
335 mc = (cs->control_state | set) & ~clear & (TIOCM_RTS|TIOCM_DTR);
336 retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc);
337 cs->control_state = mc;
338 }
339
340 up(&cs->sem);
341
342 return retval;
343}
344
345static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
346{
347 struct cardstate *cs;
348 int retval = -ENODEV;
349
350 cs = (struct cardstate *) tty->driver_data;
351 if (!cs) {
352 err("cs==NULL in %s", __FUNCTION__);
353 return -ENODEV;
354 }
355
356 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
357
358 if (down_interruptible(&cs->sem))
359 return -ERESTARTSYS; // FIXME -EINTR?
360
361 if (!cs->open_count)
362 warn("%s: device not opened", __FUNCTION__);
363 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
364 warn("can't write to unlocked device");
365 retval = -EBUSY;
366 } else if (!atomic_read(&cs->connected)) {
367 dbg(DEBUG_ANY, "can't write to unplugged device");
368 retval = -EBUSY; //FIXME
369 } else {
370 retval = cs->ops->write_cmd(cs, buf, count,
371 &cs->if_wake_tasklet);
372 }
373
374 up(&cs->sem);
375
376 return retval;
377}
378
379static int if_write_room(struct tty_struct *tty)
380{
381 struct cardstate *cs;
382 int retval = -ENODEV;
383
384 cs = (struct cardstate *) tty->driver_data;
385 if (!cs) {
386 err("cs==NULL in %s", __FUNCTION__);
387 return -ENODEV;
388 }
389
390 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
391
392 if (down_interruptible(&cs->sem))
393 return -ERESTARTSYS; // FIXME -EINTR?
394
395 if (!cs->open_count)
396 warn("%s: device not opened", __FUNCTION__);
397 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
398 warn("can't write to unlocked device");
399 retval = -EBUSY; //FIXME
400 } else if (!atomic_read(&cs->connected)) {
401 dbg(DEBUG_ANY, "can't write to unplugged device");
402 retval = -EBUSY; //FIXME
403 } else
404 retval = cs->ops->write_room(cs);
405
406 up(&cs->sem);
407
408 return retval;
409}
410
411static int if_chars_in_buffer(struct tty_struct *tty)
412{
413 struct cardstate *cs;
414 int retval = -ENODEV;
415
416 cs = (struct cardstate *) tty->driver_data;
417 if (!cs) {
418 err("cs==NULL in %s", __FUNCTION__);
419 return -ENODEV;
420 }
421
422 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
423
424 if (down_interruptible(&cs->sem))
425 return -ERESTARTSYS; // FIXME -EINTR?
426
427 if (!cs->open_count)
428 warn("%s: device not opened", __FUNCTION__);
429 else if (atomic_read(&cs->mstate) != MS_LOCKED) {
430 warn("can't write to unlocked device");
431 retval = -EBUSY;
432 } else if (!atomic_read(&cs->connected)) {
433 dbg(DEBUG_ANY, "can't write to unplugged device");
434 retval = -EBUSY; //FIXME
435 } else
436 retval = cs->ops->chars_in_buffer(cs);
437
438 up(&cs->sem);
439
440 return retval;
441}
442
443static void if_throttle(struct tty_struct *tty)
444{
445 struct cardstate *cs;
446
447 cs = (struct cardstate *) tty->driver_data;
448 if (!cs) {
449 err("cs==NULL in %s", __FUNCTION__);
450 return;
451 }
452
453 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
454
455 down(&cs->sem);
456
457 if (!cs->open_count)
458 warn("%s: device not opened", __FUNCTION__);
459 else {
460 //FIXME
461 }
462
463 up(&cs->sem);
464}
465
466static void if_unthrottle(struct tty_struct *tty)
467{
468 struct cardstate *cs;
469
470 cs = (struct cardstate *) tty->driver_data;
471 if (!cs) {
472 err("cs==NULL in %s", __FUNCTION__);
473 return;
474 }
475
476 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
477
478 down(&cs->sem);
479
480 if (!cs->open_count)
481 warn("%s: device not opened", __FUNCTION__);
482 else {
483 //FIXME
484 }
485
486 up(&cs->sem);
487}
488
489static void if_set_termios(struct tty_struct *tty, struct termios *old)
490{
491 struct cardstate *cs;
492 unsigned int iflag;
493 unsigned int cflag;
494 unsigned int old_cflag;
495 unsigned int control_state, new_state;
496
497 cs = (struct cardstate *) tty->driver_data;
498 if (!cs) {
499 err("cs==NULL in %s", __FUNCTION__);
500 return;
501 }
502
503 dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__);
504
505 down(&cs->sem);
506
507 if (!cs->open_count) {
508 warn("%s: device not opened", __FUNCTION__);
509 goto out;
510 }
511
512 if (!atomic_read(&cs->connected)) {
513 dbg(DEBUG_ANY, "can't communicate with unplugged device");
514 goto out;
515 }
516
517 // stolen from mct_u232.c
518 iflag = tty->termios->c_iflag;
519 cflag = tty->termios->c_cflag;
520 old_cflag = old ? old->c_cflag : cflag; //FIXME?
521 dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", cs->minor_index,
522 iflag, cflag, old_cflag);
523
524 /* get a local copy of the current port settings */
525 control_state = cs->control_state;
526
527 /*
528 * Update baud rate.
529 * Do not attempt to cache old rates and skip settings,
530 * disconnects screw such tricks up completely.
531 * Premature optimization is the root of all evil.
532 */
533
534 /* reassert DTR and (maybe) RTS on transition from B0 */
535 if ((old_cflag & CBAUD) == B0) {
536 new_state = control_state | TIOCM_DTR;
537 /* don't set RTS if using hardware flow control */
538 if (!(old_cflag & CRTSCTS))
539 new_state |= TIOCM_RTS;
540 dbg(DEBUG_IF, "%u: from B0 - set DTR%s", cs->minor_index,
541 (new_state & TIOCM_RTS) ? " only" : "/RTS");
542 cs->ops->set_modem_ctrl(cs, control_state, new_state);
543 control_state = new_state;
544 }
545
546 cs->ops->baud_rate(cs, cflag & CBAUD);
547
548 if ((cflag & CBAUD) == B0) {
549 /* Drop RTS and DTR */
550 dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index);
551 new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS);
552 cs->ops->set_modem_ctrl(cs, control_state, new_state);
553 control_state = new_state;
554 }
555
556 /*
557 * Update line control register (LCR)
558 */
559
560 cs->ops->set_line_ctrl(cs, cflag);
561
562#if 0
563 //FIXME this hangs M101 [ts 2005-03-09]
564 //FIXME do we need this?
565 /*
566 * Set flow control: well, I do not really now how to handle DTR/RTS.
567 * Just do what we have seen with SniffUSB on Win98.
568 */
569 /* Drop DTR/RTS if no flow control otherwise assert */
570 dbg(DEBUG_IF, "%u: control_state %x", cs->minor_index, control_state);
571 new_state = control_state;
572 if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS))
573 new_state |= TIOCM_DTR | TIOCM_RTS;
574 else
575 new_state &= ~(TIOCM_DTR | TIOCM_RTS);
576 if (new_state != control_state) {
577 dbg(DEBUG_IF, "%u: new_state %x", cs->minor_index, new_state);
578 gigaset_set_modem_ctrl(cs, control_state, new_state); // FIXME: mct_u232.c sets the old state here. is this a bug?
579 control_state = new_state;
580 }
581#endif
582
583 /* save off the modified port settings */
584 cs->control_state = control_state;
585
586out:
587 up(&cs->sem);
588}
589
590
591/* wakeup tasklet for the write operation */
592static void if_wake(unsigned long data)
593{
594 struct cardstate *cs = (struct cardstate *) data;
595 struct tty_struct *tty;
596
597 tty = cs->tty;
598 if (!tty)
599 return;
600
601 if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
602 tty->ldisc.write_wakeup) {
603 dbg(DEBUG_IF, "write wakeup call");
604 tty->ldisc.write_wakeup(tty);
605 }
606
607 wake_up_interruptible(&tty->write_wait);
608}
609
610/*** interface to common ***/
611
612void gigaset_if_init(struct cardstate *cs)
613{
614 struct gigaset_driver *drv;
615
616 drv = cs->driver;
617 if (!drv->have_tty)
618 return;
619
620 tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs);
621 tty_register_device(drv->tty, cs->minor_index, NULL);
622}
623
624void gigaset_if_free(struct cardstate *cs)
625{
626 struct gigaset_driver *drv;
627
628 drv = cs->driver;
629 if (!drv->have_tty)
630 return;
631
632 tasklet_disable(&cs->if_wake_tasklet);
633 tasklet_kill(&cs->if_wake_tasklet);
634 tty_unregister_device(drv->tty, cs->minor_index);
635}
636
637void gigaset_if_receive(struct cardstate *cs,
638 unsigned char *buffer, size_t len)
639{
640 unsigned long flags;
641 struct tty_struct *tty;
642
643 spin_lock_irqsave(&cs->lock, flags);
644 if ((tty = cs->tty) == NULL)
645 dbg(DEBUG_ANY, "receive on closed device");
646 else {
647 tty_buffer_request_room(tty, len);
648 tty_insert_flip_string(tty, buffer, len);
649 tty_flip_buffer_push(tty);
650 }
651 spin_unlock_irqrestore(&cs->lock, flags);
652}
653EXPORT_SYMBOL_GPL(gigaset_if_receive);
654
655/* gigaset_if_initdriver
656 * Initialize tty interface.
657 * parameters:
658 * drv Driver
659 * procname Name of the driver (e.g. for /proc/tty/drivers)
660 * devname Name of the device files (prefix without minor number)
661 * devfsname Devfs name of the device files without %d
662 */
663void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
664 const char *devname, const char *devfsname)
665{
666 unsigned minors = drv->minors;
667 int ret;
668 struct tty_driver *tty;
669
670 drv->have_tty = 0;
671
672 if ((drv->tty = alloc_tty_driver(minors)) == NULL)
673 goto enomem;
674 tty = drv->tty;
675
676 tty->magic = TTY_DRIVER_MAGIC,
677 tty->major = GIG_MAJOR,
678 tty->type = TTY_DRIVER_TYPE_SERIAL,
679 tty->subtype = SERIAL_TYPE_NORMAL,
680 tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS,
681
682 tty->driver_name = procname;
683 tty->name = devname;
684 tty->minor_start = drv->minor;
685 tty->num = drv->minors;
686
687 tty->owner = THIS_MODULE;
688 tty->devfs_name = devfsname;
689
690 tty->init_termios = tty_std_termios; //FIXME
691 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME
692 tty_set_operations(tty, &if_ops);
693
694 ret = tty_register_driver(tty);
695 if (ret < 0) {
696 warn("failed to register tty driver (error %d)", ret);
697 goto error;
698 }
699 dbg(DEBUG_IF, "tty driver initialized");
700 drv->have_tty = 1;
701 return;
702
703enomem:
704 warn("could not allocate tty structures");
705error:
706 if (drv->tty)
707 put_tty_driver(drv->tty);
708}
709
710void gigaset_if_freedriver(struct gigaset_driver *drv)
711{
712 if (!drv->have_tty)
713 return;
714
715 drv->have_tty = 0;
716 tty_unregister_driver(drv->tty);
717 put_tty_driver(drv->tty);
718}
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
new file mode 100644
index 000000000000..5744eb91b315
--- /dev/null
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -0,0 +1,1009 @@
1/*
2 * Common data handling layer for bas_gigaset
3 *
4 * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>,
5 * Hansjoerg Lipp <hjlipp@web.de>.
6 *
7 * =====================================================================
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 * =====================================================================
13 * ToDo: ...
14 * =====================================================================
15 * Version: $Id: isocdata.c,v 1.2.2.5 2005/11/13 23:05:19 hjlipp Exp $
16 * =====================================================================
17 */
18
19#include "gigaset.h"
20#include <linux/crc-ccitt.h>
21
22/* access methods for isowbuf_t */
23/* ============================ */
24
25/* initialize buffer structure
26 */
27void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
28{
29 atomic_set(&iwb->read, 0);
30 atomic_set(&iwb->nextread, 0);
31 atomic_set(&iwb->write, 0);
32 atomic_set(&iwb->writesem, 1);
33 iwb->wbits = 0;
34 iwb->idle = idle;
35 memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD);
36}
37
38/* compute number of bytes which can be appended to buffer
39 * so that there is still room to append a maximum frame of flags
40 */
41static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
42{
43 int read, write, freebytes;
44
45 read = atomic_read(&iwb->read);
46 write = atomic_read(&iwb->write);
47 if ((freebytes = read - write) > 0) {
48 /* no wraparound: need padding space within regular area */
49 return freebytes - BAS_OUTBUFPAD;
50 } else if (read < BAS_OUTBUFPAD) {
51 /* wraparound: can use space up to end of regular area */
52 return BAS_OUTBUFSIZE - write;
53 } else {
54 /* following the wraparound yields more space */
55 return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD;
56 }
57}
58
59/* compare two offsets within the buffer
60 * The buffer is seen as circular, with the read position as start
61 * returns -1/0/1 if position a </=/> position b without crossing 'read'
62 */
63static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
64{
65 int read;
66 if (a == b)
67 return 0;
68 read = atomic_read(&iwb->read);
69 if (a < b) {
70 if (a < read && read <= b)
71 return +1;
72 else
73 return -1;
74 } else {
75 if (b < read && read <= a)
76 return -1;
77 else
78 return +1;
79 }
80}
81
82/* start writing
83 * acquire the write semaphore
84 * return true if acquired, false if busy
85 */
86static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
87{
88 if (!atomic_dec_and_test(&iwb->writesem)) {
89 atomic_inc(&iwb->writesem);
90 dbg(DEBUG_ISO,
91 "%s: couldn't acquire iso write semaphore", __func__);
92 return 0;
93 }
94#ifdef CONFIG_GIGASET_DEBUG
95 dbg(DEBUG_ISO,
96 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
97 __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits);
98#endif
99 return 1;
100}
101
102/* finish writing
103 * release the write semaphore and update the maximum buffer fill level
104 * returns the current write position
105 */
106static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
107{
108 int write = atomic_read(&iwb->write);
109 atomic_inc(&iwb->writesem);
110 return write;
111}
112
113/* append bits to buffer without any checks
114 * - data contains bits to append, starting at LSB
115 * - nbits is number of bits to append (0..24)
116 * must be called with the write semaphore held
117 * If more than nbits bits are set in data, the extraneous bits are set in the
118 * buffer too, but the write position is only advanced by nbits.
119 */
120static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
121{
122 int write = atomic_read(&iwb->write);
123 data <<= iwb->wbits;
124 data |= iwb->data[write];
125 nbits += iwb->wbits;
126 while (nbits >= 8) {
127 iwb->data[write++] = data & 0xff;
128 write %= BAS_OUTBUFSIZE;
129 data >>= 8;
130 nbits -= 8;
131 }
132 iwb->wbits = nbits;
133 iwb->data[write] = data & 0xff;
134 atomic_set(&iwb->write, write);
135}
136
137/* put final flag on HDLC bitstream
138 * also sets the idle fill byte to the correspondingly shifted flag pattern
139 * must be called with the write semaphore held
140 */
141static inline void isowbuf_putflag(struct isowbuf_t *iwb)
142{
143 int write;
144
145 /* add two flags, thus reliably covering one byte */
146 isowbuf_putbits(iwb, 0x7e7e, 8);
147 /* recover the idle flag byte */
148 write = atomic_read(&iwb->write);
149 iwb->idle = iwb->data[write];
150 dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
151 /* mask extraneous bits in buffer */
152 iwb->data[write] &= (1 << iwb->wbits) - 1;
153}
154
155/* retrieve a block of bytes for sending
156 * The requested number of bytes is provided as a contiguous block.
157 * If necessary, the frame is filled to the requested number of bytes
158 * with the idle value.
159 * returns offset to frame, < 0 on busy or error
160 */
161int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
162{
163 int read, write, limit, src, dst;
164 unsigned char pbyte;
165
166 read = atomic_read(&iwb->nextread);
167 write = atomic_read(&iwb->write);
168 if (likely(read == write)) {
169 //dbg(DEBUG_STREAM, "%s: send buffer empty", __func__);
170 /* return idle frame */
171 return read < BAS_OUTBUFPAD ?
172 BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD;
173 }
174
175 limit = read + size;
176 dbg(DEBUG_STREAM,
177 "%s: read=%d write=%d limit=%d", __func__, read, write, limit);
178#ifdef CONFIG_GIGASET_DEBUG
179 if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) {
180 err("invalid size %d", size);
181 return -EINVAL;
182 }
183 src = atomic_read(&iwb->read);
184 if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
185 (read < src && limit >= src))) {
186 err("isoc write buffer frame reservation violated");
187 return -EFAULT;
188 }
189#endif
190
191 if (read < write) {
192 /* no wraparound in valid data */
193 if (limit >= write) {
194 /* append idle frame */
195 if (!isowbuf_startwrite(iwb))
196 return -EBUSY;
197 /* write position could have changed */
198 if (limit >= (write = atomic_read(&iwb->write))) {
199 pbyte = iwb->data[write]; /* save partial byte */
200 limit = write + BAS_OUTBUFPAD;
201 dbg(DEBUG_STREAM,
202 "%s: filling %d->%d with %02x",
203 __func__, write, limit, iwb->idle);
204 if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE)
205 memset(iwb->data + write, iwb->idle,
206 BAS_OUTBUFPAD);
207 else {
208 /* wraparound, fill entire pad area */
209 memset(iwb->data + write, iwb->idle,
210 BAS_OUTBUFSIZE + BAS_OUTBUFPAD
211 - write);
212 limit = 0;
213 }
214 dbg(DEBUG_STREAM, "%s: restoring %02x at %d",
215 __func__, pbyte, limit);
216 iwb->data[limit] = pbyte; /* restore partial byte */
217 atomic_set(&iwb->write, limit);
218 }
219 isowbuf_donewrite(iwb);
220 }
221 } else {
222 /* valid data wraparound */
223 if (limit >= BAS_OUTBUFSIZE) {
224 /* copy wrapped part into pad area */
225 src = 0;
226 dst = BAS_OUTBUFSIZE;
227 while (dst < limit && src < write)
228 iwb->data[dst++] = iwb->data[src++];
229 if (dst <= limit) {
230 /* fill pad area with idle byte */
231 memset(iwb->data + dst, iwb->idle,
232 BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst);
233 }
234 limit = src;
235 }
236 }
237 atomic_set(&iwb->nextread, limit);
238 return read;
239}
240
241/* dump_bytes
242 * write hex bytes to syslog for debugging
243 */
244static inline void dump_bytes(enum debuglevel level, const char *tag,
245 unsigned char *bytes, int count)
246{
247#ifdef CONFIG_GIGASET_DEBUG
248 unsigned char c;
249 static char dbgline[3 * 32 + 1];
250 static const char hexdigit[] = "0123456789abcdef";
251 int i = 0;
252 IFNULLRET(tag);
253 IFNULLRET(bytes);
254 while (count-- > 0) {
255 if (i > sizeof(dbgline) - 4) {
256 dbgline[i] = '\0';
257 dbg(level, "%s:%s", tag, dbgline);
258 i = 0;
259 }
260 c = *bytes++;
261 dbgline[i] = (i && !(i % 12)) ? '-' : ' ';
262 i++;
263 dbgline[i++] = hexdigit[(c >> 4) & 0x0f];
264 dbgline[i++] = hexdigit[c & 0x0f];
265 }
266 dbgline[i] = '\0';
267 dbg(level, "%s:%s", tag, dbgline);
268#endif
269}
270
271/*============================================================================*/
272
273/* bytewise HDLC bitstuffing via table lookup
274 * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits
275 * index: 256*(number of preceding '1' bits) + (next byte to stuff)
276 * value: bit 9.. 0 = result bits
277 * bit 12..10 = number of trailing '1' bits in result
278 * bit 14..13 = number of bits added by stuffing
279 */
280static u16 stufftab[5 * 256] = {
281// previous 1s = 0:
282 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
283 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
284 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
285 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f,
286 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
287 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f,
288 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
289 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df,
290 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f,
291 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f,
292 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af,
293 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f,
294 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf,
295 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f,
296 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
297 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
298
299// previous 1s = 1:
300 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
301 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
302 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
303 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f,
304 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f,
305 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af,
306 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf,
307 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef,
308 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f,
309 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f,
310 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f,
311 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f,
312 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f,
313 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af,
314 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
315 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
316
317// previous 1s = 2:
318 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
319 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
320 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
321 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077,
322 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097,
323 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7,
324 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7,
325 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7,
326 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517,
327 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537,
328 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557,
329 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577,
330 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997,
331 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7,
332 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
333 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
334
335// previous 1s = 3:
336 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
337 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
338 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
339 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b,
340 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b,
341 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb,
342 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db,
343 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb,
344 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b,
345 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b,
346 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b,
347 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b,
348 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b,
349 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb,
350 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
351 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
352
353// previous 1s = 4:
354 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
355 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
356 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
357 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d,
358 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d,
359 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd,
360 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd,
361 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d,
362 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d,
363 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d,
364 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d,
365 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d,
366 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d,
367 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd,
368 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd,
369 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d
370};
371
372/* hdlc_bitstuff_byte
373 * perform HDLC bitstuffing for one input byte (8 bits, LSB first)
374 * parameters:
375 * cin input byte
376 * ones number of trailing '1' bits in result before this step
377 * iwb pointer to output buffer structure (write semaphore must be held)
378 * return value:
379 * number of trailing '1' bits in result after this step
380 */
381
382static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
383 int ones)
384{
385 u16 stuff;
386 int shiftinc, newones;
387
388 /* get stuffing information for input byte
389 * value: bit 9.. 0 = result bits
390 * bit 12..10 = number of trailing '1' bits in result
391 * bit 14..13 = number of bits added by stuffing
392 */
393 stuff = stufftab[256 * ones + cin];
394 shiftinc = (stuff >> 13) & 3;
395 newones = (stuff >> 10) & 7;
396 stuff &= 0x3ff;
397
398 /* append stuffed byte to output stream */
399 isowbuf_putbits(iwb, stuff, 8 + shiftinc);
400 return newones;
401}
402
403/* hdlc_buildframe
404 * Perform HDLC framing with bitstuffing on a byte buffer
405 * The input buffer is regarded as a sequence of bits, starting with the least
406 * significant bit of the first byte and ending with the most significant bit
407 * of the last byte. A 16 bit FCS is appended as defined by RFC 1662.
408 * Whenever five consecutive '1' bits appear in the resulting bit sequence, a
409 * '0' bit is inserted after them.
410 * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110')
411 * are appended to the output buffer starting at the given bit position, which
412 * is assumed to already contain a leading flag.
413 * The output buffer must have sufficient length; count + count/5 + 6 bytes
414 * starting at *out are safe and are verified to be present.
415 * parameters:
416 * in input buffer
417 * count number of bytes in input buffer
418 * iwb pointer to output buffer structure (write semaphore must be held)
419 * return value:
420 * position of end of packet in output buffer on success,
421 * -EAGAIN if write semaphore busy or buffer full
422 */
423
424static inline int hdlc_buildframe(struct isowbuf_t *iwb,
425 unsigned char *in, int count)
426{
427 int ones;
428 u16 fcs;
429 int end;
430 unsigned char c;
431
432 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
433 !isowbuf_startwrite(iwb)) {
434 dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
435 __func__, isowbuf_freebytes(iwb));
436 return -EAGAIN;
437 }
438
439 dump_bytes(DEBUG_STREAM, "snd data", in, count);
440
441 /* bitstuff and checksum input data */
442 fcs = PPP_INITFCS;
443 ones = 0;
444 while (count-- > 0) {
445 c = *in++;
446 ones = hdlc_bitstuff_byte(iwb, c, ones);
447 fcs = crc_ccitt_byte(fcs, c);
448 }
449
450 /* bitstuff and append FCS (complemented, least significant byte first) */
451 fcs ^= 0xffff;
452 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
453 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
454
455 /* put closing flag and repeat byte for flag idle */
456 isowbuf_putflag(iwb);
457 end = isowbuf_donewrite(iwb);
458 dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1);
459 return end;
460}
461
462/* trans_buildframe
463 * Append a block of 'transparent' data to the output buffer,
464 * inverting the bytes.
465 * The output buffer must have sufficient length; count bytes
466 * starting at *out are safe and are verified to be present.
467 * parameters:
468 * in input buffer
469 * count number of bytes in input buffer
470 * iwb pointer to output buffer structure (write semaphore must be held)
471 * return value:
472 * position of end of packet in output buffer on success,
473 * -EAGAIN if write semaphore busy or buffer full
474 */
475
476static inline int trans_buildframe(struct isowbuf_t *iwb,
477 unsigned char *in, int count)
478{
479 int write;
480 unsigned char c;
481
482 if (unlikely(count <= 0))
483 return atomic_read(&iwb->write); /* better ideas? */
484
485 if (isowbuf_freebytes(iwb) < count ||
486 !isowbuf_startwrite(iwb)) {
487 dbg(DEBUG_ISO, "can't put %d bytes", count);
488 return -EAGAIN;
489 }
490
491 dbg(DEBUG_STREAM, "put %d bytes", count);
492 write = atomic_read(&iwb->write);
493 do {
494 c = gigaset_invtab[*in++];
495 iwb->data[write++] = c;
496 write %= BAS_OUTBUFSIZE;
497 } while (--count > 0);
498 atomic_set(&iwb->write, write);
499 iwb->idle = c;
500
501 return isowbuf_donewrite(iwb);
502}
503
504int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
505{
506 int result;
507
508 switch (bcs->proto2) {
509 case ISDN_PROTO_L2_HDLC:
510 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
511 dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", __func__, len, result);
512 break;
513 default: /* assume transparent */
514 result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len);
515 dbg(DEBUG_ISO, "%s: %d bytes trans -> %d", __func__, len, result);
516 }
517 return result;
518}
519
520/* hdlc_putbyte
521 * append byte c to current skb of B channel structure *bcs, updating fcs
522 */
523static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs)
524{
525 bcs->fcs = crc_ccitt_byte(bcs->fcs, c);
526 if (unlikely(bcs->skb == NULL)) {
527 /* skipping */
528 return;
529 }
530 if (unlikely(bcs->skb->len == SBUFSIZE)) {
531 warn("received oversized packet discarded");
532 bcs->hw.bas->giants++;
533 dev_kfree_skb_any(bcs->skb);
534 bcs->skb = NULL;
535 return;
536 }
537 *gigaset_skb_put_quick(bcs->skb, 1) = c;
538}
539
540/* hdlc_flush
541 * drop partial HDLC data packet
542 */
543static inline void hdlc_flush(struct bc_state *bcs)
544{
545 /* clear skb or allocate new if not skipping */
546 if (likely(bcs->skb != NULL))
547 skb_trim(bcs->skb, 0);
548 else if (!bcs->ignore) {
549 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
550 skb_reserve(bcs->skb, HW_HDR_LEN);
551 else
552 err("could not allocate skb");
553 }
554
555 /* reset packet state */
556 bcs->fcs = PPP_INITFCS;
557}
558
559/* hdlc_done
560 * process completed HDLC data packet
561 */
562static inline void hdlc_done(struct bc_state *bcs)
563{
564 struct sk_buff *procskb;
565
566 if (unlikely(bcs->ignore)) {
567 bcs->ignore--;
568 hdlc_flush(bcs);
569 return;
570 }
571
572 if ((procskb = bcs->skb) == NULL) {
573 /* previous error */
574 dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
575 gigaset_rcv_error(NULL, bcs->cs, bcs);
576 } else if (procskb->len < 2) {
577 notice("received short frame (%d octets)", procskb->len);
578 bcs->hw.bas->runts++;
579 gigaset_rcv_error(procskb, bcs->cs, bcs);
580 } else if (bcs->fcs != PPP_GOODFCS) {
581 notice("frame check error (0x%04x)", bcs->fcs);
582 bcs->hw.bas->fcserrs++;
583 gigaset_rcv_error(procskb, bcs->cs, bcs);
584 } else {
585 procskb->len -= 2; /* subtract FCS */
586 procskb->tail -= 2;
587 dbg(DEBUG_ISO,
588 "%s: good frame (%d octets)", __func__, procskb->len);
589 dump_bytes(DEBUG_STREAM,
590 "rcv data", procskb->data, procskb->len);
591 bcs->hw.bas->goodbytes += procskb->len;
592 gigaset_rcv_skb(procskb, bcs->cs, bcs);
593 }
594
595 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
596 skb_reserve(bcs->skb, HW_HDR_LEN);
597 else
598 err("could not allocate skb");
599 bcs->fcs = PPP_INITFCS;
600}
601
602/* hdlc_frag
603 * drop HDLC data packet with non-integral last byte
604 */
605static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
606{
607 if (unlikely(bcs->ignore)) {
608 bcs->ignore--;
609 hdlc_flush(bcs);
610 return;
611 }
612
613 notice("received partial byte (%d bits)", inbits);
614 bcs->hw.bas->alignerrs++;
615 gigaset_rcv_error(bcs->skb, bcs->cs, bcs);
616
617 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
618 skb_reserve(bcs->skb, HW_HDR_LEN);
619 else
620 err("could not allocate skb");
621 bcs->fcs = PPP_INITFCS;
622}
623
624/* bit counts lookup table for HDLC bit unstuffing
625 * index: input byte
626 * value: bit 0..3 = number of consecutive '1' bits starting from LSB
627 * bit 4..6 = number of consecutive '1' bits starting from MSB
628 * (replacing 8 by 7 to make it fit; the algorithm won't care)
629 * bit 7 set if there are 5 or more "interior" consecutive '1' bits
630 */
631static unsigned char bitcounts[256] = {
632 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
633 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
634 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
635 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06,
636 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
637 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05,
638 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04,
639 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07,
640 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
641 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15,
642 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14,
643 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16,
644 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24,
645 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25,
646 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34,
647 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78
648};
649
650/* hdlc_unpack
651 * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation)
652 * on a sequence of received data bytes (8 bits each, LSB first)
653 * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb
654 * notify of errors via gigaset_rcv_error
655 * tally frames, errors etc. in BC structure counters
656 * parameters:
657 * src received data
658 * count number of received bytes
659 * bcs receiving B channel structure
660 */
661static inline void hdlc_unpack(unsigned char *src, unsigned count,
662 struct bc_state *bcs)
663{
664 struct bas_bc_state *ubc;
665 int inputstate;
666 unsigned seqlen, inbyte, inbits;
667
668 IFNULLRET(bcs);
669 ubc = bcs->hw.bas;
670 IFNULLRET(ubc);
671
672 /* load previous state:
673 * inputstate = set of flag bits:
674 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort
675 * - INS_have_data: at least one complete data byte received since last flag
676 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7)
677 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
678 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
679 */
680 inputstate = bcs->inputstate;
681 seqlen = ubc->seqlen;
682 inbyte = ubc->inbyte;
683 inbits = ubc->inbits;
684
685 /* bit unstuffing a byte a time
686 * Take your time to understand this; it's straightforward but tedious.
687 * The "bitcounts" lookup table is used to speed up the counting of
688 * leading and trailing '1' bits.
689 */
690 while (count--) {
691 unsigned char c = *src++;
692 unsigned char tabentry = bitcounts[c];
693 unsigned lead1 = tabentry & 0x0f;
694 unsigned trail1 = (tabentry >> 4) & 0x0f;
695
696 seqlen += lead1;
697
698 if (unlikely(inputstate & INS_flag_hunt)) {
699 if (c == PPP_FLAG) {
700 /* flag-in-one */
701 inputstate &= ~(INS_flag_hunt | INS_have_data);
702 inbyte = 0;
703 inbits = 0;
704 } else if (seqlen == 6 && trail1 != 7) {
705 /* flag completed & not followed by abort */
706 inputstate &= ~(INS_flag_hunt | INS_have_data);
707 inbyte = c >> (lead1 + 1);
708 inbits = 7 - lead1;
709 if (trail1 >= 8) {
710 /* interior stuffing: omitting the MSB handles most cases */
711 inbits--;
712 /* correct the incorrectly handled cases individually */
713 switch (c) {
714 case 0xbe:
715 inbyte = 0x3f;
716 break;
717 }
718 }
719 }
720 /* else: continue flag-hunting */
721 } else if (likely(seqlen < 5 && trail1 < 7)) {
722 /* streamlined case: 8 data bits, no stuffing */
723 inbyte |= c << inbits;
724 hdlc_putbyte(inbyte & 0xff, bcs);
725 inputstate |= INS_have_data;
726 inbyte >>= 8;
727 /* inbits unchanged */
728 } else if (likely(seqlen == 6 && inbits == 7 - lead1 &&
729 trail1 + 1 == inbits &&
730 !(inputstate & INS_have_data))) {
731 /* streamlined case: flag idle - state unchanged */
732 } else if (unlikely(seqlen > 6)) {
733 /* abort sequence */
734 ubc->aborts++;
735 hdlc_flush(bcs);
736 inputstate |= INS_flag_hunt;
737 } else if (seqlen == 6) {
738 /* closing flag, including (6 - lead1) '1's and one '0' from inbits */
739 if (inbits > 7 - lead1) {
740 hdlc_frag(bcs, inbits + lead1 - 7);
741 inputstate &= ~INS_have_data;
742 } else {
743 if (inbits < 7 - lead1)
744 ubc->stolen0s ++;
745 if (inputstate & INS_have_data) {
746 hdlc_done(bcs);
747 inputstate &= ~INS_have_data;
748 }
749 }
750
751 if (c == PPP_FLAG) {
752 /* complete flag, LSB overlaps preceding flag */
753 ubc->shared0s ++;
754 inbits = 0;
755 inbyte = 0;
756 } else if (trail1 != 7) {
757 /* remaining bits */
758 inbyte = c >> (lead1 + 1);
759 inbits = 7 - lead1;
760 if (trail1 >= 8) {
761 /* interior stuffing: omitting the MSB handles most cases */
762 inbits--;
763 /* correct the incorrectly handled cases individually */
764 switch (c) {
765 case 0xbe:
766 inbyte = 0x3f;
767 break;
768 }
769 }
770 } else {
771 /* abort sequence follows, skb already empty anyway */
772 ubc->aborts++;
773 inputstate |= INS_flag_hunt;
774 }
775 } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */
776
777 if (c == PPP_FLAG) {
778 /* complete flag */
779 if (seqlen == 5)
780 ubc->stolen0s++;
781 if (inbits) {
782 hdlc_frag(bcs, inbits);
783 inbits = 0;
784 inbyte = 0;
785 } else if (inputstate & INS_have_data)
786 hdlc_done(bcs);
787 inputstate &= ~INS_have_data;
788 } else if (trail1 == 7) {
789 /* abort sequence */
790 ubc->aborts++;
791 hdlc_flush(bcs);
792 inputstate |= INS_flag_hunt;
793 } else {
794 /* stuffed data */
795 if (trail1 < 7) { /* => seqlen == 5 */
796 /* stuff bit at position lead1, no interior stuffing */
797 unsigned char mask = (1 << lead1) - 1;
798 c = (c & mask) | ((c & ~mask) >> 1);
799 inbyte |= c << inbits;
800 inbits += 7;
801 } else if (seqlen < 5) { /* trail1 >= 8 */
802 /* interior stuffing: omitting the MSB handles most cases */
803 /* correct the incorrectly handled cases individually */
804 switch (c) {
805 case 0xbe:
806 c = 0x7e;
807 break;
808 }
809 inbyte |= c << inbits;
810 inbits += 7;
811 } else { /* seqlen == 5 && trail1 >= 8 */
812
813 /* stuff bit at lead1 *and* interior stuffing */
814 switch (c) { /* unstuff individually */
815 case 0x7d:
816 c = 0x3f;
817 break;
818 case 0xbe:
819 c = 0x3f;
820 break;
821 case 0x3e:
822 c = 0x1f;
823 break;
824 case 0x7c:
825 c = 0x3e;
826 break;
827 }
828 inbyte |= c << inbits;
829 inbits += 6;
830 }
831 if (inbits >= 8) {
832 inbits -= 8;
833 hdlc_putbyte(inbyte & 0xff, bcs);
834 inputstate |= INS_have_data;
835 inbyte >>= 8;
836 }
837 }
838 }
839 seqlen = trail1 & 7;
840 }
841
842 /* save new state */
843 bcs->inputstate = inputstate;
844 ubc->seqlen = seqlen;
845 ubc->inbyte = inbyte;
846 ubc->inbits = inbits;
847}
848
849/* trans_receive
850 * pass on received USB frame transparently as SKB via gigaset_rcv_skb
851 * invert bytes
852 * tally frames, errors etc. in BC structure counters
853 * parameters:
854 * src received data
855 * count number of received bytes
856 * bcs receiving B channel structure
857 */
858static inline void trans_receive(unsigned char *src, unsigned count,
859 struct bc_state *bcs)
860{
861 struct sk_buff *skb;
862 int dobytes;
863 unsigned char *dst;
864
865 if (unlikely(bcs->ignore)) {
866 bcs->ignore--;
867 hdlc_flush(bcs);
868 return;
869 }
870 if (unlikely((skb = bcs->skb) == NULL)) {
871 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
872 if (!skb) {
873 err("could not allocate skb");
874 return;
875 }
876 skb_reserve(skb, HW_HDR_LEN);
877 }
878 bcs->hw.bas->goodbytes += skb->len;
879 dobytes = TRANSBUFSIZE - skb->len;
880 while (count > 0) {
881 dst = skb_put(skb, count < dobytes ? count : dobytes);
882 while (count > 0 && dobytes > 0) {
883 *dst++ = gigaset_invtab[*src++];
884 count--;
885 dobytes--;
886 }
887 if (dobytes == 0) {
888 gigaset_rcv_skb(skb, bcs->cs, bcs);
889 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN);
890 if (!skb) {
891 err("could not allocate skb");
892 return;
893 }
894 skb_reserve(bcs->skb, HW_HDR_LEN);
895 dobytes = TRANSBUFSIZE;
896 }
897 }
898}
899
900void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs)
901{
902 switch (bcs->proto2) {
903 case ISDN_PROTO_L2_HDLC:
904 hdlc_unpack(src, count, bcs);
905 break;
906 default: /* assume transparent */
907 trans_receive(src, count, bcs);
908 }
909}
910
911/* == data input =========================================================== */
912
913static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
914{
915 struct cardstate *cs = inbuf->cs;
916 unsigned cbytes = cs->cbytes;
917
918 while (numbytes--) {
919 /* copy next character, check for end of line */
920 switch (cs->respdata[cbytes] = *src++) {
921 case '\r':
922 case '\n':
923 /* end of line */
924 dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
925 __func__, cbytes);
926 cs->cbytes = cbytes;
927 gigaset_handle_modem_response(cs);
928 cbytes = 0;
929 break;
930 default:
931 /* advance in line buffer, checking for overflow */
932 if (cbytes < MAX_RESP_SIZE - 1)
933 cbytes++;
934 else
935 warn("response too large");
936 }
937 }
938
939 /* save state */
940 cs->cbytes = cbytes;
941}
942
943
944/* process a block of data received through the control channel
945 */
946void gigaset_isoc_input(struct inbuf_t *inbuf)
947{
948 struct cardstate *cs = inbuf->cs;
949 unsigned tail, head, numbytes;
950 unsigned char *src;
951
952 head = atomic_read(&inbuf->head);
953 while (head != (tail = atomic_read(&inbuf->tail))) {
954 dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
955 if (head > tail)
956 tail = RBUFSIZE;
957 src = inbuf->data + head;
958 numbytes = tail - head;
959 dbg(DEBUG_INTR, "processing %u bytes", numbytes);
960
961 if (atomic_read(&cs->mstate) == MS_LOCKED) {
962 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
963 numbytes, src, 0);
964 gigaset_if_receive(inbuf->cs, src, numbytes);
965 } else {
966 gigaset_dbg_buffer(DEBUG_CMD, "received response",
967 numbytes, src, 0);
968 cmd_loop(src, numbytes, inbuf);
969 }
970
971 head += numbytes;
972 if (head == RBUFSIZE)
973 head = 0;
974 dbg(DEBUG_INTR, "setting head to %u", head);
975 atomic_set(&inbuf->head, head);
976 }
977}
978
979
980/* == data output ========================================================== */
981
982/* gigaset_send_skb
983 * called by common.c to queue an skb for sending
984 * and start transmission if necessary
985 * parameters:
986 * B Channel control structure
987 * skb
988 * return value:
989 * number of bytes accepted for sending
990 * (skb->len if ok, 0 if out of buffer space)
991 * or error code (< 0, eg. -EINVAL)
992 */
993int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb)
994{
995 int len;
996
997 IFNULLRETVAL(bcs, -EFAULT);
998 IFNULLRETVAL(skb, -EFAULT);
999 len = skb->len;
1000
1001 skb_queue_tail(&bcs->squeue, skb);
1002 dbg(DEBUG_ISO,
1003 "%s: skb queued, qlen=%d", __func__, skb_queue_len(&bcs->squeue));
1004
1005 /* tasklet submits URB if necessary */
1006 tasklet_schedule(&bcs->hw.bas->sent_tasklet);
1007
1008 return len; /* ok so far */
1009}
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
new file mode 100644
index 000000000000..c6915fa2be6c
--- /dev/null
+++ b/drivers/isdn/gigaset/proc.c
@@ -0,0 +1,81 @@
1/*
2 * Stuff used by all variants of the driver
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>,
5 * Hansjoerg Lipp <hjlipp@web.de>,
6 * Tilman Schmidt <tilman@imap.cc>.
7 *
8 * =====================================================================
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 * =====================================================================
14 * ToDo: ...
15 * =====================================================================
16 * Version: $Id: proc.c,v 1.5.2.13 2006/02/04 18:28:16 hjlipp Exp $
17 * =====================================================================
18 */
19
20#include "gigaset.h"
21#include <linux/ctype.h>
22
23static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf)
24{
25 struct usb_interface *intf = to_usb_interface(dev);
26 struct cardstate *cs = usb_get_intfdata(intf);
27 return sprintf(buf, "%d\n", atomic_read(&cs->cidmode)); // FIXME use scnprintf for 13607 bit architectures (if PAGE_SIZE==4096)
28}
29
30static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
31{
32 struct usb_interface *intf = to_usb_interface(dev);
33 struct cardstate *cs = usb_get_intfdata(intf);
34 long int value;
35 char *end;
36
37 value = simple_strtol(buf, &end, 0);
38 while (*end)
39 if (!isspace(*end++))
40 return -EINVAL;
41 if (value < 0 || value > 1)
42 return -EINVAL;
43
44 if (down_interruptible(&cs->sem))
45 return -ERESTARTSYS; // FIXME -EINTR?
46
47 cs->waiting = 1;
48 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
49 NULL, value, NULL)) {
50 cs->waiting = 0;
51 up(&cs->sem);
52 return -ENOMEM;
53 }
54
55 dbg(DEBUG_CMD, "scheduling PROC_CIDMODE");
56 gigaset_schedule_event(cs);
57
58 wait_event(cs->waitqueue, !cs->waiting);
59
60 up(&cs->sem);
61
62 return count;
63}
64
65static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode);
66
67/* free sysfs for device */
68void gigaset_free_dev_sysfs(struct usb_interface *interface)
69{
70 dbg(DEBUG_INIT, "removing sysfs entries");
71 device_remove_file(&interface->dev, &dev_attr_cidmode);
72}
73EXPORT_SYMBOL_GPL(gigaset_free_dev_sysfs);
74
75/* initialize sysfs for device */
76void gigaset_init_dev_sysfs(struct usb_interface *interface)
77{
78 dbg(DEBUG_INIT, "setting up sysfs");
79 device_create_file(&interface->dev, &dev_attr_cidmode);
80}
81EXPORT_SYMBOL_GPL(gigaset_init_dev_sysfs);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
new file mode 100644
index 000000000000..323fc7349dec
--- /dev/null
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -0,0 +1,1008 @@
1/*
2 * USB driver for Gigaset 307x directly or using M105 Data.
3 *
4 * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>
5 * and Hansjoerg Lipp <hjlipp@web.de>.
6 *
7 * This driver was derived from the USB skeleton driver by
8 * Greg Kroah-Hartman <greg@kroah.com>
9 *
10 * =====================================================================
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of
14 * the License, or (at your option) any later version.
15 * =====================================================================
16 * ToDo: ...
17 * =====================================================================
18 * Version: $Id: usb-gigaset.c,v 1.85.4.18 2006/02/04 18:28:16 hjlipp Exp $
19 * =====================================================================
20 */
21
22#include "gigaset.h"
23
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/slab.h>
27#include <linux/usb.h>
28#include <linux/module.h>
29#include <linux/moduleparam.h>
30
31/* Version Information */
32#define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>"
33#define DRIVER_DESC "USB Driver for Gigaset 307x using M105"
34
35/* Module parameters */
36
37static int startmode = SM_ISDN;
38static int cidmode = 1;
39
40module_param(startmode, int, S_IRUGO);
41module_param(cidmode, int, S_IRUGO);
42MODULE_PARM_DESC(startmode, "start in isdn4linux mode");
43MODULE_PARM_DESC(cidmode, "Call-ID mode");
44
45#define GIGASET_MINORS 1
46#define GIGASET_MINOR 8
47#define GIGASET_MODULENAME "usb_gigaset"
48#define GIGASET_DEVFSNAME "gig/usb/"
49#define GIGASET_DEVNAME "ttyGU"
50
51#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256
52
53/* Values for the Gigaset M105 Data */
54#define USB_M105_VENDOR_ID 0x0681
55#define USB_M105_PRODUCT_ID 0x0009
56
57/* table of devices that work with this driver */
58static struct usb_device_id gigaset_table [] = {
59 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
60 { } /* Terminating entry */
61};
62
63MODULE_DEVICE_TABLE(usb, gigaset_table);
64
65/* Get a minor range for your devices from the usb maintainer */
66#define USB_SKEL_MINOR_BASE 200
67
68
69/*
70 * Control requests (empty fields: 00)
71 *
72 * RT|RQ|VALUE|INDEX|LEN |DATA
73 * In:
74 * C1 08 01
75 * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:?
76 * C1 0F ll ll
77 * Get device information/status (llll: 0x200 and 0x40 seen).
78 * Real size: I only saw MIN(llll,0x64).
79 * Contents: seems to be always the same...
80 * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes)
81 * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0"
82 * rest: ?
83 * Out:
84 * 41 11
85 * Initialize/reset device ?
86 * 41 00 xx 00
87 * ? (xx=00 or 01; 01 on start, 00 on close)
88 * 41 07 vv mm
89 * Set/clear flags vv=value, mm=mask (see RQ 08)
90 * 41 12 xx
91 * Used before the following configuration requests are issued
92 * (with xx=0x0f). I've seen other values<0xf, though.
93 * 41 01 xx xx
94 * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1.
95 * 41 03 ps bb
96 * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity
97 * [ 0x30: m, 0x40: s ]
98 * [s: 0: 1 stop bit; 1: 1.5; 2: 2]
99 * bb: bits/byte (seen 7 and 8)
100 * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00
101 * ??
102 * Initialization: 01, 40, 00, 00
103 * Open device: 00 40, 00, 00
104 * yy and zz seem to be equal, either 0x00 or 0x0a
105 * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80)
106 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
107 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
108 * xx is usually 0x00 but was 0x7e before starting data transfer
109 * in unimodem mode. So, this might be an array of characters that need
110 * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
111 *
112 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
113 * flags per packet.
114 */
115
116static int gigaset_probe(struct usb_interface *interface,
117 const struct usb_device_id *id);
118static void gigaset_disconnect(struct usb_interface *interface);
119
120static struct gigaset_driver *driver = NULL;
121static struct cardstate *cardstate = NULL;
122
123/* usb specific object needed to register this driver with the usb subsystem */
124static struct usb_driver gigaset_usb_driver = {
125 .name = GIGASET_MODULENAME,
126 .probe = gigaset_probe,
127 .disconnect = gigaset_disconnect,
128 .id_table = gigaset_table,
129};
130
131struct usb_cardstate {
132 struct usb_device *udev; /* save off the usb device pointer */
133 struct usb_interface *interface; /* the interface for this device */
134 atomic_t busy; /* bulk output in progress */
135
136 /* Output buffer for commands (M105: and data)*/
137 unsigned char *bulk_out_buffer; /* the buffer to send data */
138 int bulk_out_size; /* the size of the send buffer */
139 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
140 struct urb *bulk_out_urb; /* the urb used to transmit data */
141
142 /* Input buffer for command responses (M105: and data)*/
143 int rcvbuf_size; /* the size of the receive buffer */
144 struct urb *read_urb; /* the urb used to receive data */
145 __u8 int_in_endpointAddr; /* the address of the bulk in endpoint */
146
147 char bchars[6]; /* req. 0x19 */
148};
149
150struct usb_bc_state {};
151
152static inline unsigned tiocm_to_gigaset(unsigned state)
153{
154 return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0);
155}
156
157#ifdef CONFIG_GIGASET_UNDOCREQ
158/* WARNING: EXPERIMENTAL! */
159static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
160 unsigned new_state)
161{
162 unsigned mask, val;
163 int r;
164
165 mask = tiocm_to_gigaset(old_state ^ new_state);
166 val = tiocm_to_gigaset(new_state);
167
168 dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
169 r = usb_control_msg(cs->hw.usb->udev,
170 usb_sndctrlpipe(cs->hw.usb->udev, 0), 7, 0x41,
171 (val & 0xff) | ((mask & 0xff) << 8), 0,
172 NULL, 0, 2000 /*timeout??*/); // don't use this in an interrupt/BH
173 if (r < 0)
174 return r;
175 //..
176 return 0;
177}
178
179static int set_value(struct cardstate *cs, u8 req, u16 val)
180{
181 int r, r2;
182
183 dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val);
184 r = usb_control_msg(cs->hw.usb->udev,
185 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x12, 0x41,
186 0xf /*?*/, 0,
187 NULL, 0, 2000 /*?*/); /* no idea, what this does */
188 if (r < 0) {
189 err("error %d on request 0x12", -r);
190 return r;
191 }
192
193 r = usb_control_msg(cs->hw.usb->udev,
194 usb_sndctrlpipe(cs->hw.usb->udev, 0), req, 0x41,
195 val, 0,
196 NULL, 0, 2000 /*?*/);
197 if (r < 0)
198 err("error %d on request 0x%02x", -r, (unsigned)req);
199
200 r2 = usb_control_msg(cs->hw.usb->udev,
201 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
202 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/);
203 if (r2 < 0)
204 err("error %d on request 0x19", -r2);
205
206 return r < 0 ? r : (r2 < 0 ? r2 : 0);
207}
208
209/* WARNING: HIGHLY EXPERIMENTAL! */
210// don't use this in an interrupt/BH
211static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
212{
213 u16 val;
214 u32 rate;
215
216 cflag &= CBAUD;
217
218 switch (cflag) {
219 //FIXME more values?
220 case B300: rate = 300; break;
221 case B600: rate = 600; break;
222 case B1200: rate = 1200; break;
223 case B2400: rate = 2400; break;
224 case B4800: rate = 4800; break;
225 case B9600: rate = 9600; break;
226 case B19200: rate = 19200; break;
227 case B38400: rate = 38400; break;
228 case B57600: rate = 57600; break;
229 case B115200: rate = 115200; break;
230 default:
231 rate = 9600;
232 err("unsupported baudrate request 0x%x,"
233 " using default of B9600", cflag);
234 }
235
236 val = 0x383fff / rate + 1;
237
238 return set_value(cs, 1, val);
239}
240
241/* WARNING: HIGHLY EXPERIMENTAL! */
242// don't use this in an interrupt/BH
243static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
244{
245 u16 val = 0;
246
247 /* set the parity */
248 if (cflag & PARENB)
249 val |= (cflag & PARODD) ? 0x10 : 0x20;
250
251 /* set the number of data bits */
252 switch (cflag & CSIZE) {
253 case CS5:
254 val |= 5 << 8; break;
255 case CS6:
256 val |= 6 << 8; break;
257 case CS7:
258 val |= 7 << 8; break;
259 case CS8:
260 val |= 8 << 8; break;
261 default:
262 err("CSIZE was not CS5-CS8, using default of 8");
263 val |= 8 << 8;
264 break;
265 }
266
267 /* set the number of stop bits */
268 if (cflag & CSTOPB) {
269 if ((cflag & CSIZE) == CS5)
270 val |= 1; /* 1.5 stop bits */ //FIXME is this okay?
271 else
272 val |= 2; /* 2 stop bits */
273 }
274
275 return set_value(cs, 3, val);
276}
277
278#else
279static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
280 unsigned new_state)
281{
282 return -EINVAL;
283}
284
285static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
286{
287 return -EINVAL;
288}
289
290static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
291{
292 return -EINVAL;
293}
294#endif
295
296
297 /*================================================================================================================*/
298static int gigaset_init_bchannel(struct bc_state *bcs)
299{
300 /* nothing to do for M10x */
301 gigaset_bchannel_up(bcs);
302 return 0;
303}
304
305static int gigaset_close_bchannel(struct bc_state *bcs)
306{
307 /* nothing to do for M10x */
308 gigaset_bchannel_down(bcs);
309 return 0;
310}
311
312//void send_ack_to_LL(void *data);
313static int write_modem(struct cardstate *cs);
314static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb);
315
316
317/* Handling of send queue. If there is already a skb opened, put data to
318 * the transfer buffer by calling "write_modem". Otherwise take a new skb out of the queue.
319 * This function will be called by the ISR via "transmit_chars" (USB: B-Channel Bulk callback handler
320 * via immediate task queue) or by writebuf_from_LL if the LL wants to transmit data.
321 */
322static void gigaset_modem_fill(unsigned long data)
323{
324 struct cardstate *cs = (struct cardstate *) data;
325 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
326 struct cmdbuf_t *cb;
327 unsigned long flags;
328 int again;
329
330 dbg(DEBUG_OUTPUT, "modem_fill");
331
332 if (atomic_read(&cs->hw.usb->busy)) {
333 dbg(DEBUG_OUTPUT, "modem_fill: busy");
334 return;
335 }
336
337 do {
338 again = 0;
339 if (!bcs->tx_skb) { /* no skb is being sent */
340 spin_lock_irqsave(&cs->cmdlock, flags);
341 cb = cs->cmdbuf;
342 spin_unlock_irqrestore(&cs->cmdlock, flags);
343 if (cb) { /* commands to send? */
344 dbg(DEBUG_OUTPUT, "modem_fill: cb");
345 if (send_cb(cs, cb) < 0) {
346 dbg(DEBUG_OUTPUT,
347 "modem_fill: send_cb failed");
348 again = 1; /* no callback will be called! */
349 }
350 } else { /* skbs to send? */
351 bcs->tx_skb = skb_dequeue(&bcs->squeue);
352 if (bcs->tx_skb)
353 dbg(DEBUG_INTR,
354 "Dequeued skb (Adr: %lx)!",
355 (unsigned long) bcs->tx_skb);
356 }
357 }
358
359 if (bcs->tx_skb) {
360 dbg(DEBUG_OUTPUT, "modem_fill: tx_skb");
361 if (write_modem(cs) < 0) {
362 dbg(DEBUG_OUTPUT,
363 "modem_fill: write_modem failed");
364 // FIXME should we tell the LL?
365 again = 1; /* no callback will be called! */
366 }
367 }
368 } while (again);
369}
370
371/**
372 * gigaset_read_int_callback
373 *
374 * It is called if the data was received from the device. This is almost similiar to
375 * the interrupt service routine in the serial device.
376 */
377static void gigaset_read_int_callback(struct urb *urb, struct pt_regs *regs)
378{
379 int resubmit = 0;
380 int r;
381 struct cardstate *cs;
382 unsigned numbytes;
383 unsigned char *src;
384 //unsigned long flags;
385 struct inbuf_t *inbuf;
386
387 IFNULLRET(urb);
388 inbuf = (struct inbuf_t *) urb->context;
389 IFNULLRET(inbuf);
390 //spin_lock_irqsave(&inbuf->lock, flags);
391 cs = inbuf->cs;
392 IFNULLGOTO(cs, exit);
393 IFNULLGOTO(cardstate, exit);
394
395 if (!atomic_read(&cs->connected)) {
396 err("%s: disconnected", __func__);
397 goto exit;
398 }
399
400 if (!urb->status) {
401 numbytes = urb->actual_length;
402
403 if (numbytes) {
404 src = inbuf->rcvbuf;
405 if (unlikely(*src))
406 warn("%s: There was no leading 0, but 0x%02x!",
407 __func__, (unsigned) *src);
408 ++src; /* skip leading 0x00 */
409 --numbytes;
410 if (gigaset_fill_inbuf(inbuf, src, numbytes)) {
411 dbg(DEBUG_INTR, "%s-->BH", __func__);
412 gigaset_schedule_event(inbuf->cs);
413 }
414 } else
415 dbg(DEBUG_INTR, "Received zero block length");
416 resubmit = 1;
417 } else {
418 /* The urb might have been killed. */
419 dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d",
420 __func__, urb->status);
421 if (urb->status != -ENOENT) /* not killed */
422 resubmit = 1;
423 }
424exit:
425 //spin_unlock_irqrestore(&inbuf->lock, flags);
426 if (resubmit) {
427 r = usb_submit_urb(urb, SLAB_ATOMIC);
428 if (r)
429 err("error %d when resubmitting urb.", -r);
430 }
431}
432
433
434/* This callback routine is called when data was transmitted to a B-Channel.
435 * Therefore it has to check if there is still data to transmit. This
436 * happens by calling modem_fill via task queue.
437 *
438 */
439static void gigaset_write_bulk_callback(struct urb *urb, struct pt_regs *regs)
440{
441 struct cardstate *cs = (struct cardstate *) urb->context;
442
443 IFNULLRET(cs);
444#ifdef CONFIG_GIGASET_DEBUG
445 if (!atomic_read(&cs->connected)) {
446 err("%s:not connected", __func__);
447 return;
448 }
449#endif
450 if (urb->status)
451 err("bulk transfer failed (status %d)", -urb->status); /* That's all we can do. Communication problems
452 are handeled by timeouts or network protocols */
453
454 atomic_set(&cs->hw.usb->busy, 0);
455 tasklet_schedule(&cs->write_tasklet);
456}
457
458static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
459{
460 struct cmdbuf_t *tcb;
461 unsigned long flags;
462 int count;
463 int status = -ENOENT; // FIXME
464 struct usb_cardstate *ucs = cs->hw.usb;
465
466 do {
467 if (!cb->len) {
468 tcb = cb;
469
470 spin_lock_irqsave(&cs->cmdlock, flags);
471 cs->cmdbytes -= cs->curlen;
472 dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left",
473 cs->curlen, cs->cmdbytes);
474 cs->cmdbuf = cb = cb->next;
475 if (cb) {
476 cb->prev = NULL;
477 cs->curlen = cb->len;
478 } else {
479 cs->lastcmdbuf = NULL;
480 cs->curlen = 0;
481 }
482 spin_unlock_irqrestore(&cs->cmdlock, flags);
483
484 if (tcb->wake_tasklet)
485 tasklet_schedule(tcb->wake_tasklet);
486 kfree(tcb);
487 }
488 if (cb) {
489 count = min(cb->len, ucs->bulk_out_size);
490 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
491 usb_sndbulkpipe(ucs->udev,
492 ucs->bulk_out_endpointAddr & 0x0f),
493 cb->buf + cb->offset, count,
494 gigaset_write_bulk_callback, cs);
495
496 cb->offset += count;
497 cb->len -= count;
498 atomic_set(&ucs->busy, 1);
499 dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count);
500
501 status = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
502 if (status) {
503 atomic_set(&ucs->busy, 0);
504 err("could not submit urb (error %d).",
505 -status);
506 cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */
507 }
508 }
509 } while (cb && status); /* bei Fehler naechster Befehl //FIXME: ist das OK? */
510
511 return status;
512}
513
514/* Write string into transbuf and send it to modem.
515 */
516static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
517 int len, struct tasklet_struct *wake_tasklet)
518{
519 struct cmdbuf_t *cb;
520 unsigned long flags;
521
522 gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
523 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
524 "CMD Transmit", len, buf, 0);
525
526 if (!atomic_read(&cs->connected)) {
527 err("%s: not connected", __func__);
528 return -ENODEV;
529 }
530
531 if (len <= 0)
532 return 0;
533
534 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) {
535 err("%s: out of memory", __func__);
536 return -ENOMEM;
537 }
538
539 memcpy(cb->buf, buf, len);
540 cb->len = len;
541 cb->offset = 0;
542 cb->next = NULL;
543 cb->wake_tasklet = wake_tasklet;
544
545 spin_lock_irqsave(&cs->cmdlock, flags);
546 cb->prev = cs->lastcmdbuf;
547 if (cs->lastcmdbuf)
548 cs->lastcmdbuf->next = cb;
549 else {
550 cs->cmdbuf = cb;
551 cs->curlen = len;
552 }
553 cs->cmdbytes += len;
554 cs->lastcmdbuf = cb;
555 spin_unlock_irqrestore(&cs->cmdlock, flags);
556
557 tasklet_schedule(&cs->write_tasklet);
558 return len;
559}
560
561static int gigaset_write_room(struct cardstate *cs)
562{
563 unsigned long flags;
564 unsigned bytes;
565
566 spin_lock_irqsave(&cs->cmdlock, flags);
567 bytes = cs->cmdbytes;
568 spin_unlock_irqrestore(&cs->cmdlock, flags);
569
570 return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0;
571}
572
573static int gigaset_chars_in_buffer(struct cardstate *cs)
574{
575 return cs->cmdbytes;
576}
577
578static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
579{
580#ifdef CONFIG_GIGASET_UNDOCREQ
581 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf, 0);
582 memcpy(cs->hw.usb->bchars, buf, 6);
583 return usb_control_msg(cs->hw.usb->udev,
584 usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41,
585 0, 0, &buf, 6, 2000);
586#else
587 return -EINVAL;
588#endif
589}
590
591static int gigaset_freebcshw(struct bc_state *bcs)
592{
593 if (!bcs->hw.usb)
594 return 0;
595 //FIXME
596 kfree(bcs->hw.usb);
597 return 1;
598}
599
600/* Initialize the b-channel structure */
601static int gigaset_initbcshw(struct bc_state *bcs)
602{
603 bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL);
604 if (!bcs->hw.usb)
605 return 0;
606
607 //bcs->hw.usb->trans_flg = READY_TO_TRNSMIT; /* B-Channel ready to transmit */
608 return 1;
609}
610
611static void gigaset_reinitbcshw(struct bc_state *bcs)
612{
613}
614
615static void gigaset_freecshw(struct cardstate *cs)
616{
617 //FIXME
618 tasklet_kill(&cs->write_tasklet);
619 kfree(cs->hw.usb);
620}
621
622static int gigaset_initcshw(struct cardstate *cs)
623{
624 struct usb_cardstate *ucs;
625
626 cs->hw.usb = ucs =
627 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
628 if (!ucs)
629 return 0;
630
631 ucs->bchars[0] = 0;
632 ucs->bchars[1] = 0;
633 ucs->bchars[2] = 0;
634 ucs->bchars[3] = 0;
635 ucs->bchars[4] = 0x11;
636 ucs->bchars[5] = 0x13;
637 ucs->bulk_out_buffer = NULL;
638 ucs->bulk_out_urb = NULL;
639 //ucs->urb_cmd_out = NULL;
640 ucs->read_urb = NULL;
641 tasklet_init(&cs->write_tasklet,
642 &gigaset_modem_fill, (unsigned long) cs);
643
644 return 1;
645}
646
647/* Writes the data of the current open skb into the modem.
648 * We have to protect against multiple calls until the
649 * callback handler () is called , due to the fact that we
650 * are just allowed to send data once to an endpoint. Therefore
651 * we using "trans_flg" to synchonize ...
652 */
653static int write_modem(struct cardstate *cs)
654{
655 int ret;
656 int count;
657 struct bc_state *bcs = &cs->bcs[0]; /* only one channel */
658 struct usb_cardstate *ucs = cs->hw.usb;
659 //unsigned long flags;
660
661 IFNULLRETVAL(bcs->tx_skb, -EINVAL);
662
663 dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len);
664
665 ret = -ENODEV;
666 IFNULLGOTO(ucs->bulk_out_buffer, error);
667 IFNULLGOTO(ucs->bulk_out_urb, error);
668 ret = 0;
669
670 if (!bcs->tx_skb->len) {
671 dev_kfree_skb_any(bcs->tx_skb);
672 bcs->tx_skb = NULL;
673 return -EINVAL;
674 }
675
676 /* Copy data to bulk out buffer and // FIXME copying not necessary
677 * transmit data
678 */
679 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
680 memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count);
681 skb_pull(bcs->tx_skb, count);
682
683 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
684 usb_sndbulkpipe(ucs->udev,
685 ucs->bulk_out_endpointAddr & 0x0f),
686 ucs->bulk_out_buffer, count,
687 gigaset_write_bulk_callback, cs);
688 atomic_set(&ucs->busy, 1);
689 dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
690
691 ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC);
692 if (ret) {
693 err("could not submit urb (error %d).", -ret);
694 atomic_set(&ucs->busy, 0);
695 }
696 if (!bcs->tx_skb->len) {
697 /* skb sent completely */
698 gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0?
699
700 dbg(DEBUG_INTR,
701 "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb);
702 dev_kfree_skb_any(bcs->tx_skb);
703 bcs->tx_skb = NULL;
704 }
705
706 return ret;
707error:
708 dev_kfree_skb_any(bcs->tx_skb);
709 bcs->tx_skb = NULL;
710 return ret;
711
712}
713
714static int gigaset_probe(struct usb_interface *interface,
715 const struct usb_device_id *id)
716{
717 int retval;
718 struct usb_device *udev = interface_to_usbdev(interface);
719 unsigned int ifnum;
720 struct usb_host_interface *hostif;
721 struct cardstate *cs = NULL;
722 struct usb_cardstate *ucs = NULL;
723 //struct usb_interface_descriptor *iface_desc;
724 struct usb_endpoint_descriptor *endpoint;
725 //isdn_ctrl command;
726 int buffer_size;
727 int alt;
728 //unsigned long flags;
729
730 info("%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
731 __func__, le16_to_cpu(udev->descriptor.idVendor),
732 le16_to_cpu(udev->descriptor.idProduct));
733
734 retval = -ENODEV; //FIXME
735
736 /* See if the device offered us matches what we can accept */
737 if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) ||
738 (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID)))
739 return -ENODEV;
740
741 /* this starts to become ascii art... */
742 hostif = interface->cur_altsetting;
743 alt = hostif->desc.bAlternateSetting;
744 ifnum = hostif->desc.bInterfaceNumber; // FIXME ?
745
746 if (alt != 0 || ifnum != 0) {
747 warn("ifnum %d, alt %d", ifnum, alt);
748 return -ENODEV;
749 }
750
751 /* Reject application specific intefaces
752 *
753 */
754 if (hostif->desc.bInterfaceClass != 255) {
755 info("%s: Device matched, but iface_desc[%d]->bInterfaceClass==%d !",
756 __func__, ifnum, hostif->desc.bInterfaceClass);
757 return -ENODEV;
758 }
759
760 info("%s: Device matched ... !", __func__);
761
762 cs = gigaset_getunassignedcs(driver);
763 if (!cs) {
764 warn("No free cardstate!");
765 return -ENODEV;
766 }
767 ucs = cs->hw.usb;
768
769#if 0
770 if (usb_set_configuration(udev, udev->config[0].desc.bConfigurationValue) < 0) {
771 warn("set_configuration failed");
772 goto error;
773 }
774
775
776 if (usb_set_interface(udev, ifnum/*==0*/, alt/*==0*/) < 0) {
777 warn("usb_set_interface failed, device %d interface %d altsetting %d",
778 udev->devnum, ifnum, alt);
779 goto error;
780 }
781#endif
782
783 /* set up the endpoint information */
784 /* check out the endpoints */
785 /* We will get 2 endpoints: One for sending commands to the device (bulk out) and one to
786 * poll messages from the device(int in).
787 * Therefore we will have an almost similiar situation as with our serial port handler.
788 * If an connection will be established, we will have to create data in/out pipes
789 * dynamically...
790 */
791
792 endpoint = &hostif->endpoint[0].desc;
793
794 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
795 ucs->bulk_out_size = buffer_size;
796 ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress;
797 ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
798 if (!ucs->bulk_out_buffer) {
799 err("Couldn't allocate bulk_out_buffer");
800 retval = -ENOMEM;
801 goto error;
802 }
803
804 ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL);
805 if (!ucs->bulk_out_urb) {
806 err("Couldn't allocate bulk_out_buffer");
807 retval = -ENOMEM;
808 goto error;
809 }
810
811 endpoint = &hostif->endpoint[1].desc;
812
813 atomic_set(&ucs->busy, 0);
814 ucs->udev = udev;
815 ucs->interface = interface;
816
817 ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL);
818 if (!ucs->read_urb) {
819 err("No free urbs available");
820 retval = -ENOMEM;
821 goto error;
822 }
823 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
824 ucs->rcvbuf_size = buffer_size;
825 ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
826 cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
827 if (!cs->inbuf[0].rcvbuf) {
828 err("Couldn't allocate rcvbuf");
829 retval = -ENOMEM;
830 goto error;
831 }
832 /* Fill the interrupt urb and send it to the core */
833 usb_fill_int_urb(ucs->read_urb, udev,
834 usb_rcvintpipe(udev,
835 endpoint->bEndpointAddress & 0x0f),
836 cs->inbuf[0].rcvbuf, buffer_size,
837 gigaset_read_int_callback,
838 cs->inbuf + 0, endpoint->bInterval);
839
840 retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL);
841 if (retval) {
842 err("Could not submit URB!");
843 goto error;
844 }
845
846 /* tell common part that the device is ready */
847 if (startmode == SM_LOCKED)
848 atomic_set(&cs->mstate, MS_LOCKED);
849 if (!gigaset_start(cs)) {
850 tasklet_kill(&cs->write_tasklet);
851 retval = -ENODEV; //FIXME
852 goto error;
853 }
854
855 /* save address of controller structure */
856 usb_set_intfdata(interface, cs);
857
858 /* set up device sysfs */
859 gigaset_init_dev_sysfs(interface);
860 return 0;
861
862error:
863 if (ucs->read_urb)
864 usb_kill_urb(ucs->read_urb);
865 kfree(ucs->bulk_out_buffer);
866 if (ucs->bulk_out_urb != NULL)
867 usb_free_urb(ucs->bulk_out_urb);
868 kfree(cs->inbuf[0].rcvbuf);
869 if (ucs->read_urb != NULL)
870 usb_free_urb(ucs->read_urb);
871 ucs->read_urb = ucs->bulk_out_urb = NULL;
872 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
873 gigaset_unassign(cs);
874 return retval;
875}
876
877/**
878 * skel_disconnect
879 */
880static void gigaset_disconnect(struct usb_interface *interface)
881{
882 struct cardstate *cs;
883 struct usb_cardstate *ucs;
884
885 cs = usb_get_intfdata(interface);
886
887 /* clear device sysfs */
888 gigaset_free_dev_sysfs(interface);
889
890 usb_set_intfdata(interface, NULL);
891 ucs = cs->hw.usb;
892 usb_kill_urb(ucs->read_urb);
893 //info("GigaSet USB device #%d will be disconnected", minor);
894
895 gigaset_stop(cs);
896
897 tasklet_kill(&cs->write_tasklet);
898
899 usb_kill_urb(ucs->bulk_out_urb); /* FIXME: nur, wenn noetig */
900 //usb_kill_urb(ucs->urb_cmd_out); /* FIXME: nur, wenn noetig */
901
902 kfree(ucs->bulk_out_buffer);
903 if (ucs->bulk_out_urb != NULL)
904 usb_free_urb(ucs->bulk_out_urb);
905 //if(ucs->urb_cmd_out != NULL)
906 // usb_free_urb(ucs->urb_cmd_out);
907 kfree(cs->inbuf[0].rcvbuf);
908 if (ucs->read_urb != NULL)
909 usb_free_urb(ucs->read_urb);
910 ucs->read_urb = ucs->bulk_out_urb/*=ucs->urb_cmd_out*/=NULL;
911 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL;
912
913 gigaset_unassign(cs);
914}
915
916static struct gigaset_ops ops = {
917 gigaset_write_cmd,
918 gigaset_write_room,
919 gigaset_chars_in_buffer,
920 gigaset_brkchars,
921 gigaset_init_bchannel,
922 gigaset_close_bchannel,
923 gigaset_initbcshw,
924 gigaset_freebcshw,
925 gigaset_reinitbcshw,
926 gigaset_initcshw,
927 gigaset_freecshw,
928 gigaset_set_modem_ctrl,
929 gigaset_baud_rate,
930 gigaset_set_line_ctrl,
931 gigaset_m10x_send_skb,
932 gigaset_m10x_input,
933};
934
935/**
936 * usb_gigaset_init
937 * This function is called while kernel-module is loaded
938 */
939static int __init usb_gigaset_init(void)
940{
941 int result;
942
943 /* allocate memory for our driver state and intialize it */
944 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
945 GIGASET_MODULENAME, GIGASET_DEVNAME,
946 GIGASET_DEVFSNAME, &ops,
947 THIS_MODULE)) == NULL)
948 goto error;
949
950 /* allocate memory for our device state and intialize it */
951 cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
952 if (!cardstate)
953 goto error;
954
955 /* register this driver with the USB subsystem */
956 result = usb_register(&gigaset_usb_driver);
957 if (result < 0) {
958 err("usb_gigaset: usb_register failed (error %d)",
959 -result);
960 goto error;
961 }
962
963 info(DRIVER_AUTHOR);
964 info(DRIVER_DESC);
965 return 0;
966
967error: if (cardstate)
968 gigaset_freecs(cardstate);
969 cardstate = NULL;
970 if (driver)
971 gigaset_freedriver(driver);
972 driver = NULL;
973 return -1;
974}
975
976
977/**
978 * usb_gigaset_exit
979 * This function is called while unloading the kernel-module
980 */
981static void __exit usb_gigaset_exit(void)
982{
983 gigaset_blockdriver(driver); /* => probe will fail
984 * => no gigaset_start any more
985 */
986
987 gigaset_shutdown(cardstate);
988 /* from now on, no isdn callback should be possible */
989
990 /* deregister this driver with the USB subsystem */
991 usb_deregister(&gigaset_usb_driver);
992 /* this will call the disconnect-callback */
993 /* from now on, no disconnect/probe callback should be running */
994
995 gigaset_freecs(cardstate);
996 cardstate = NULL;
997 gigaset_freedriver(driver);
998 driver = NULL;
999}
1000
1001
1002module_init(usb_gigaset_init);
1003module_exit(usb_gigaset_exit);
1004
1005MODULE_AUTHOR(DRIVER_AUTHOR);
1006MODULE_DESCRIPTION(DRIVER_DESC);
1007
1008MODULE_LICENSE("GPL");
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index 296d6a6f749f..3b431723c7cb 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -437,9 +437,7 @@ static inline unsigned int t1_get_slice(unsigned int base,
437#endif 437#endif
438 dp += i; 438 dp += i;
439 i = 0; 439 i = 0;
440 if (i == 0) 440 break;
441 break;
442 /* fall through */
443 default: 441 default:
444 *dp++ = b1_get_byte(base); 442 *dp++ = b1_get_byte(base);
445 i--; 443 i--;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 1789b607f090..a4f7288a1fc8 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -139,3 +139,4 @@ source "drivers/isdn/hysdn/Kconfig"
139 139
140endmenu 140endmenu
141 141
142source "drivers/isdn/gigaset/Kconfig"
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 4eb05d7143d8..f4516ca7aa3a 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/sysdev.h> 36#include <linux/sysdev.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/mutex.h>
38 39
39#include <asm/byteorder.h> 40#include <asm/byteorder.h>
40#include <asm/io.h> 41#include <asm/io.h>
@@ -92,7 +93,7 @@ struct smu_device {
92 * for now, just hard code that 93 * for now, just hard code that
93 */ 94 */
94static struct smu_device *smu; 95static struct smu_device *smu;
95static DECLARE_MUTEX(smu_part_access); 96static DEFINE_MUTEX(smu_part_access);
96 97
97static void smu_i2c_retry(unsigned long data); 98static void smu_i2c_retry(unsigned long data);
98 99
@@ -976,11 +977,11 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
976 977
977 if (interruptible) { 978 if (interruptible) {
978 int rc; 979 int rc;
979 rc = down_interruptible(&smu_part_access); 980 rc = mutex_lock_interruptible(&smu_part_access);
980 if (rc) 981 if (rc)
981 return ERR_PTR(rc); 982 return ERR_PTR(rc);
982 } else 983 } else
983 down(&smu_part_access); 984 mutex_lock(&smu_part_access);
984 985
985 part = (struct smu_sdbp_header *)get_property(smu->of_node, 986 part = (struct smu_sdbp_header *)get_property(smu->of_node,
986 pname, size); 987 pname, size);
@@ -990,7 +991,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size,
990 if (part != NULL && size) 991 if (part != NULL && size)
991 *size = part->len << 2; 992 *size = part->len << 2;
992 } 993 }
993 up(&smu_part_access); 994 mutex_unlock(&smu_part_access);
994 return part; 995 return part;
995} 996}
996 997
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e1c18aa1d712..f8ffaee20ff8 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap)
89} 89}
90 90
91#define WRITE_POOL_SIZE 256 91#define WRITE_POOL_SIZE 256
92/* mempool for queueing pending writes on the bitmap file */
93static void *write_pool_alloc(gfp_t gfp_flags, void *data)
94{
95 return kmalloc(sizeof(struct page_list), gfp_flags);
96}
97
98static void write_pool_free(void *ptr, void *data)
99{
100 kfree(ptr);
101}
102 92
103/* 93/*
104 * just a placeholder - calls kmalloc for bitmap pages 94 * just a placeholder - calls kmalloc for bitmap pages
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev)
1564 spin_lock_init(&bitmap->write_lock); 1554 spin_lock_init(&bitmap->write_lock);
1565 INIT_LIST_HEAD(&bitmap->complete_pages); 1555 INIT_LIST_HEAD(&bitmap->complete_pages);
1566 init_waitqueue_head(&bitmap->write_wait); 1556 init_waitqueue_head(&bitmap->write_wait);
1567 bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc, 1557 bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE,
1568 write_pool_free, NULL); 1558 sizeof(struct page_list));
1569 err = -ENOMEM; 1559 err = -ENOMEM;
1570 if (!bitmap->write_pool) 1560 if (!bitmap->write_pool)
1571 goto error; 1561 goto error;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index e7a650f9ca07..259e86f26549 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -94,20 +94,6 @@ struct crypt_config {
94static kmem_cache_t *_crypt_io_pool; 94static kmem_cache_t *_crypt_io_pool;
95 95
96/* 96/*
97 * Mempool alloc and free functions for the page
98 */
99static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
100{
101 return alloc_page(gfp_mask);
102}
103
104static void mempool_free_page(void *page, void *data)
105{
106 __free_page(page);
107}
108
109
110/*
111 * Different IV generation algorithms: 97 * Different IV generation algorithms:
112 * 98 *
113 * plain: the initial vector is the 32-bit low-endian version of the sector 99 * plain: the initial vector is the 32-bit low-endian version of the sector
@@ -630,15 +616,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
630 } 616 }
631 } 617 }
632 618
633 cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 619 cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
634 mempool_free_slab, _crypt_io_pool);
635 if (!cc->io_pool) { 620 if (!cc->io_pool) {
636 ti->error = PFX "Cannot allocate crypt io mempool"; 621 ti->error = PFX "Cannot allocate crypt io mempool";
637 goto bad3; 622 goto bad3;
638 } 623 }
639 624
640 cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, 625 cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
641 mempool_free_page, NULL);
642 if (!cc->page_pool) { 626 if (!cc->page_pool) {
643 ti->error = PFX "Cannot allocate page mempool"; 627 ti->error = PFX "Cannot allocate page mempool";
644 goto bad4; 628 goto bad4;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 4809b209fbb1..da663d2ff552 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -32,16 +32,6 @@ struct io {
32static unsigned _num_ios; 32static unsigned _num_ios;
33static mempool_t *_io_pool; 33static mempool_t *_io_pool;
34 34
35static void *alloc_io(gfp_t gfp_mask, void *pool_data)
36{
37 return kmalloc(sizeof(struct io), gfp_mask);
38}
39
40static void free_io(void *element, void *pool_data)
41{
42 kfree(element);
43}
44
45static unsigned int pages_to_ios(unsigned int pages) 35static unsigned int pages_to_ios(unsigned int pages)
46{ 36{
47 return 4 * pages; /* too many ? */ 37 return 4 * pages; /* too many ? */
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios)
65 55
66 } else { 56 } else {
67 /* create new pool */ 57 /* create new pool */
68 _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); 58 _io_pool = mempool_create_kmalloc_pool(new_ios,
59 sizeof(struct io));
69 if (!_io_pool) 60 if (!_io_pool)
70 return -ENOMEM; 61 return -ENOMEM;
71 62
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index f72a82fb9434..1816f30678ed 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -179,8 +179,7 @@ static struct multipath *alloc_multipath(void)
179 m->queue_io = 1; 179 m->queue_io = 1;
180 INIT_WORK(&m->process_queued_ios, process_queued_ios, m); 180 INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
181 INIT_WORK(&m->trigger_event, trigger_event, m); 181 INIT_WORK(&m->trigger_event, trigger_event, m);
182 m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 182 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
183 mempool_free_slab, _mpio_cache);
184 if (!m->mpio_pool) { 183 if (!m->mpio_pool) {
185 kfree(m); 184 kfree(m);
186 return NULL; 185 return NULL;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6cfa8d435d55..4e90f231fbfb 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
122/* FIXME move this */ 122/* FIXME move this */
123static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); 123static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
124 124
125static void *region_alloc(gfp_t gfp_mask, void *pool_data)
126{
127 return kmalloc(sizeof(struct region), gfp_mask);
128}
129
130static void region_free(void *element, void *pool_data)
131{
132 kfree(element);
133}
134
135#define MIN_REGIONS 64 125#define MIN_REGIONS 64
136#define MAX_RECOVERY 1 126#define MAX_RECOVERY 1
137static int rh_init(struct region_hash *rh, struct mirror_set *ms, 127static int rh_init(struct region_hash *rh, struct mirror_set *ms,
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
173 INIT_LIST_HEAD(&rh->quiesced_regions); 163 INIT_LIST_HEAD(&rh->quiesced_regions);
174 INIT_LIST_HEAD(&rh->recovered_regions); 164 INIT_LIST_HEAD(&rh->recovered_regions);
175 165
176 rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, 166 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
177 region_free, NULL); 167 sizeof(struct region));
178 if (!rh->region_pool) { 168 if (!rh->region_pool) {
179 vfree(rh->buckets); 169 vfree(rh->buckets);
180 rh->buckets = NULL; 170 rh->buckets = NULL;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f3759dd7828e..7401540086df 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1174,8 +1174,7 @@ static int __init dm_snapshot_init(void)
1174 goto bad4; 1174 goto bad4;
1175 } 1175 }
1176 1176
1177 pending_pool = mempool_create(128, mempool_alloc_slab, 1177 pending_pool = mempool_create_slab_pool(128, pending_cache);
1178 mempool_free_slab, pending_cache);
1179 if (!pending_pool) { 1178 if (!pending_pool) {
1180 DMERR("Couldn't create pending pool."); 1179 DMERR("Couldn't create pending pool.");
1181 r = -ENOMEM; 1180 r = -ENOMEM;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 8c82373f7ff3..a64798ef481e 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -823,13 +823,11 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
823 md->queue->unplug_fn = dm_unplug_all; 823 md->queue->unplug_fn = dm_unplug_all;
824 md->queue->issue_flush_fn = dm_flush_all; 824 md->queue->issue_flush_fn = dm_flush_all;
825 825
826 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 826 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
827 mempool_free_slab, _io_cache);
828 if (!md->io_pool) 827 if (!md->io_pool)
829 goto bad2; 828 goto bad2;
830 829
831 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, 830 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
832 mempool_free_slab, _tio_cache);
833 if (!md->tio_pool) 831 if (!md->tio_pool)
834 goto bad3; 832 goto bad3;
835 833
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index 8b3515f394a6..9dcb2c8a3853 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -227,8 +227,7 @@ static int jobs_init(void)
227 if (!_job_cache) 227 if (!_job_cache)
228 return -ENOMEM; 228 return -ENOMEM;
229 229
230 _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, 230 _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
231 mempool_free_slab, _job_cache);
232 if (!_job_pool) { 231 if (!_job_pool) {
233 kmem_cache_destroy(_job_cache); 232 kmem_cache_destroy(_job_cache);
234 return -ENOMEM; 233 return -ENOMEM;
@@ -590,51 +589,51 @@ static void client_del(struct kcopyd_client *kc)
590 up(&_client_lock); 589 up(&_client_lock);
591} 590}
592 591
593static DECLARE_MUTEX(kcopyd_init_lock); 592static DEFINE_MUTEX(kcopyd_init_lock);
594static int kcopyd_clients = 0; 593static int kcopyd_clients = 0;
595 594
596static int kcopyd_init(void) 595static int kcopyd_init(void)
597{ 596{
598 int r; 597 int r;
599 598
600 down(&kcopyd_init_lock); 599 mutex_lock(&kcopyd_init_lock);
601 600
602 if (kcopyd_clients) { 601 if (kcopyd_clients) {
603 /* Already initialized. */ 602 /* Already initialized. */
604 kcopyd_clients++; 603 kcopyd_clients++;
605 up(&kcopyd_init_lock); 604 mutex_unlock(&kcopyd_init_lock);
606 return 0; 605 return 0;
607 } 606 }
608 607
609 r = jobs_init(); 608 r = jobs_init();
610 if (r) { 609 if (r) {
611 up(&kcopyd_init_lock); 610 mutex_unlock(&kcopyd_init_lock);
612 return r; 611 return r;
613 } 612 }
614 613
615 _kcopyd_wq = create_singlethread_workqueue("kcopyd"); 614 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
616 if (!_kcopyd_wq) { 615 if (!_kcopyd_wq) {
617 jobs_exit(); 616 jobs_exit();
618 up(&kcopyd_init_lock); 617 mutex_unlock(&kcopyd_init_lock);
619 return -ENOMEM; 618 return -ENOMEM;
620 } 619 }
621 620
622 kcopyd_clients++; 621 kcopyd_clients++;
623 INIT_WORK(&_kcopyd_work, do_work, NULL); 622 INIT_WORK(&_kcopyd_work, do_work, NULL);
624 up(&kcopyd_init_lock); 623 mutex_unlock(&kcopyd_init_lock);
625 return 0; 624 return 0;
626} 625}
627 626
628static void kcopyd_exit(void) 627static void kcopyd_exit(void)
629{ 628{
630 down(&kcopyd_init_lock); 629 mutex_lock(&kcopyd_init_lock);
631 kcopyd_clients--; 630 kcopyd_clients--;
632 if (!kcopyd_clients) { 631 if (!kcopyd_clients) {
633 jobs_exit(); 632 jobs_exit();
634 destroy_workqueue(_kcopyd_wq); 633 destroy_workqueue(_kcopyd_wq);
635 _kcopyd_wq = NULL; 634 _kcopyd_wq = NULL;
636 } 635 }
637 up(&kcopyd_init_lock); 636 mutex_unlock(&kcopyd_init_lock);
638} 637}
639 638
640int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) 639int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 96f7af4ae400..1cc9de44ce86 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -35,18 +35,6 @@
35#define NR_RESERVED_BUFS 32 35#define NR_RESERVED_BUFS 32
36 36
37 37
38static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
39{
40 struct multipath_bh *mpb;
41 mpb = kzalloc(sizeof(*mpb), gfp_flags);
42 return mpb;
43}
44
45static void mp_pool_free(void *mpb, void *data)
46{
47 kfree(mpb);
48}
49
50static int multipath_map (multipath_conf_t *conf) 38static int multipath_map (multipath_conf_t *conf)
51{ 39{
52 int i, disks = conf->raid_disks; 40 int i, disks = conf->raid_disks;
@@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev)
494 } 482 }
495 mddev->degraded = conf->raid_disks = conf->working_disks; 483 mddev->degraded = conf->raid_disks = conf->working_disks;
496 484
497 conf->pool = mempool_create(NR_RESERVED_BUFS, 485 conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS,
498 mp_pool_alloc, mp_pool_free, 486 sizeof(struct multipath_bh));
499 NULL);
500 if (conf->pool == NULL) { 487 if (conf->pool == NULL) {
501 printk(KERN_ERR 488 printk(KERN_ERR
502 "multipath: couldn't allocate memory for %s\n", 489 "multipath: couldn't allocate memory for %s\n",
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b09fb6307153..7d4c5497785b 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void)
1179 goto exit; 1179 goto exit;
1180 } 1180 }
1181 1181
1182 i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1182 i2o_blk_req_pool.pool =
1183 mempool_alloc_slab, 1183 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
1184 mempool_free_slab, 1184 i2o_blk_req_pool.slab);
1185 i2o_blk_req_pool.slab);
1186 if (!i2o_blk_req_pool.pool) { 1185 if (!i2o_blk_req_pool.pool) {
1187 osm_err("can't init request mempool\n"); 1186 osm_err("can't init request mempool\n");
1188 rc = -ENOMEM; 1187 rc = -ENOMEM;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index d339308539fa..70f63891b19c 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -196,8 +196,6 @@
196 196
197 197
198#define DRV_NAME "3c59x" 198#define DRV_NAME "3c59x"
199#define DRV_VERSION "LK1.1.19"
200#define DRV_RELDATE "10 Nov 2002"
201 199
202 200
203 201
@@ -275,10 +273,8 @@ static char version[] __devinitdata =
275DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; 273DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
276 274
277MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 275MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
278MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver " 276MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
279 DRV_VERSION " " DRV_RELDATE);
280MODULE_LICENSE("GPL"); 277MODULE_LICENSE("GPL");
281MODULE_VERSION(DRV_VERSION);
282 278
283 279
284/* Operational parameter that usually are not changed. */ 280/* Operational parameter that usually are not changed. */
@@ -904,7 +900,6 @@ static void acpi_set_WOL(struct net_device *dev);
904static struct ethtool_ops vortex_ethtool_ops; 900static struct ethtool_ops vortex_ethtool_ops;
905static void set_8021q_mode(struct net_device *dev, int enable); 901static void set_8021q_mode(struct net_device *dev, int enable);
906 902
907
908/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ 903/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
909/* Option count limit only -- unlimited interfaces are supported. */ 904/* Option count limit only -- unlimited interfaces are supported. */
910#define MAX_UNITS 8 905#define MAX_UNITS 8
@@ -919,8 +914,6 @@ static int global_full_duplex = -1;
919static int global_enable_wol = -1; 914static int global_enable_wol = -1;
920static int global_use_mmio = -1; 915static int global_use_mmio = -1;
921 916
922/* #define dev_alloc_skb dev_alloc_skb_debug */
923
924/* Variables to work-around the Compaq PCI BIOS32 problem. */ 917/* Variables to work-around the Compaq PCI BIOS32 problem. */
925static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; 918static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
926static struct net_device *compaq_net_device; 919static struct net_device *compaq_net_device;
@@ -976,7 +969,7 @@ static void poll_vortex(struct net_device *dev)
976 969
977#ifdef CONFIG_PM 970#ifdef CONFIG_PM
978 971
979static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) 972static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
980{ 973{
981 struct net_device *dev = pci_get_drvdata(pdev); 974 struct net_device *dev = pci_get_drvdata(pdev);
982 975
@@ -994,7 +987,7 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
994 return 0; 987 return 0;
995} 988}
996 989
997static int vortex_resume (struct pci_dev *pdev) 990static int vortex_resume(struct pci_dev *pdev)
998{ 991{
999 struct net_device *dev = pci_get_drvdata(pdev); 992 struct net_device *dev = pci_get_drvdata(pdev);
1000 struct vortex_private *vp = netdev_priv(dev); 993 struct vortex_private *vp = netdev_priv(dev);
@@ -1027,8 +1020,8 @@ static struct eisa_device_id vortex_eisa_ids[] = {
1027 { "" } 1020 { "" }
1028}; 1021};
1029 1022
1030static int vortex_eisa_probe (struct device *device); 1023static int vortex_eisa_probe(struct device *device);
1031static int vortex_eisa_remove (struct device *device); 1024static int vortex_eisa_remove(struct device *device);
1032 1025
1033static struct eisa_driver vortex_eisa_driver = { 1026static struct eisa_driver vortex_eisa_driver = {
1034 .id_table = vortex_eisa_ids, 1027 .id_table = vortex_eisa_ids,
@@ -1039,12 +1032,12 @@ static struct eisa_driver vortex_eisa_driver = {
1039 } 1032 }
1040}; 1033};
1041 1034
1042static int vortex_eisa_probe (struct device *device) 1035static int vortex_eisa_probe(struct device *device)
1043{ 1036{
1044 void __iomem *ioaddr; 1037 void __iomem *ioaddr;
1045 struct eisa_device *edev; 1038 struct eisa_device *edev;
1046 1039
1047 edev = to_eisa_device (device); 1040 edev = to_eisa_device(device);
1048 1041
1049 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) 1042 if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
1050 return -EBUSY; 1043 return -EBUSY;
@@ -1053,7 +1046,7 @@ static int vortex_eisa_probe (struct device *device)
1053 1046
1054 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, 1047 if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
1055 edev->id.driver_data, vortex_cards_found)) { 1048 edev->id.driver_data, vortex_cards_found)) {
1056 release_region (edev->base_addr, VORTEX_TOTAL_SIZE); 1049 release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
1057 return -ENODEV; 1050 return -ENODEV;
1058 } 1051 }
1059 1052
@@ -1062,15 +1055,15 @@ static int vortex_eisa_probe (struct device *device)
1062 return 0; 1055 return 0;
1063} 1056}
1064 1057
1065static int vortex_eisa_remove (struct device *device) 1058static int vortex_eisa_remove(struct device *device)
1066{ 1059{
1067 struct eisa_device *edev; 1060 struct eisa_device *edev;
1068 struct net_device *dev; 1061 struct net_device *dev;
1069 struct vortex_private *vp; 1062 struct vortex_private *vp;
1070 void __iomem *ioaddr; 1063 void __iomem *ioaddr;
1071 1064
1072 edev = to_eisa_device (device); 1065 edev = to_eisa_device(device);
1073 dev = eisa_get_drvdata (edev); 1066 dev = eisa_get_drvdata(edev);
1074 1067
1075 if (!dev) { 1068 if (!dev) {
1076 printk("vortex_eisa_remove called for Compaq device!\n"); 1069 printk("vortex_eisa_remove called for Compaq device!\n");
@@ -1080,17 +1073,17 @@ static int vortex_eisa_remove (struct device *device)
1080 vp = netdev_priv(dev); 1073 vp = netdev_priv(dev);
1081 ioaddr = vp->ioaddr; 1074 ioaddr = vp->ioaddr;
1082 1075
1083 unregister_netdev (dev); 1076 unregister_netdev(dev);
1084 iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD); 1077 iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
1085 release_region (dev->base_addr, VORTEX_TOTAL_SIZE); 1078 release_region(dev->base_addr, VORTEX_TOTAL_SIZE);
1086 1079
1087 free_netdev (dev); 1080 free_netdev(dev);
1088 return 0; 1081 return 0;
1089} 1082}
1090#endif 1083#endif
1091 1084
1092/* returns count found (>= 0), or negative on error */ 1085/* returns count found (>= 0), or negative on error */
1093static int __init vortex_eisa_init (void) 1086static int __init vortex_eisa_init(void)
1094{ 1087{
1095 int eisa_found = 0; 1088 int eisa_found = 0;
1096 int orig_cards_found = vortex_cards_found; 1089 int orig_cards_found = vortex_cards_found;
@@ -1121,7 +1114,7 @@ static int __init vortex_eisa_init (void)
1121} 1114}
1122 1115
1123/* returns count (>= 0), or negative on error */ 1116/* returns count (>= 0), or negative on error */
1124static int __devinit vortex_init_one (struct pci_dev *pdev, 1117static int __devinit vortex_init_one(struct pci_dev *pdev,
1125 const struct pci_device_id *ent) 1118 const struct pci_device_id *ent)
1126{ 1119{
1127 int rc, unit, pci_bar; 1120 int rc, unit, pci_bar;
@@ -1129,7 +1122,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
1129 void __iomem *ioaddr; 1122 void __iomem *ioaddr;
1130 1123
1131 /* wake up and enable device */ 1124 /* wake up and enable device */
1132 rc = pci_enable_device (pdev); 1125 rc = pci_enable_device(pdev);
1133 if (rc < 0) 1126 if (rc < 0)
1134 goto out; 1127 goto out;
1135 1128
@@ -1151,7 +1144,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev,
1151 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, 1144 rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1152 ent->driver_data, unit); 1145 ent->driver_data, unit);
1153 if (rc < 0) { 1146 if (rc < 0) {
1154 pci_disable_device (pdev); 1147 pci_disable_device(pdev);
1155 goto out; 1148 goto out;
1156 } 1149 }
1157 1150
@@ -1236,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1236 if (print_info) 1229 if (print_info)
1237 printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); 1230 printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
1238 1231
1239 printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n", 1232 printk(KERN_INFO "%s: 3Com %s %s at %p.\n",
1240 print_name, 1233 print_name,
1241 pdev ? "PCI" : "EISA", 1234 pdev ? "PCI" : "EISA",
1242 vci->name, 1235 vci->name,
@@ -1266,7 +1259,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1266 1259
1267 /* enable bus-mastering if necessary */ 1260 /* enable bus-mastering if necessary */
1268 if (vci->flags & PCI_USES_MASTER) 1261 if (vci->flags & PCI_USES_MASTER)
1269 pci_set_master (pdev); 1262 pci_set_master(pdev);
1270 1263
1271 if (vci->drv_flags & IS_VORTEX) { 1264 if (vci->drv_flags & IS_VORTEX) {
1272 u8 pci_latency; 1265 u8 pci_latency;
@@ -1310,7 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1310 if (pdev) 1303 if (pdev)
1311 pci_set_drvdata(pdev, dev); 1304 pci_set_drvdata(pdev, dev);
1312 if (edev) 1305 if (edev)
1313 eisa_set_drvdata (edev, dev); 1306 eisa_set_drvdata(edev, dev);
1314 1307
1315 vp->media_override = 7; 1308 vp->media_override = 7;
1316 if (option >= 0) { 1309 if (option >= 0) {
@@ -1335,7 +1328,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1335 vp->enable_wol = 1; 1328 vp->enable_wol = 1;
1336 } 1329 }
1337 1330
1338 vp->force_fd = vp->full_duplex; 1331 vp->mii.force_media = vp->full_duplex;
1339 vp->options = option; 1332 vp->options = option;
1340 /* Read the station address from the EEPROM. */ 1333 /* Read the station address from the EEPROM. */
1341 EL3WINDOW(0); 1334 EL3WINDOW(0);
@@ -1625,6 +1618,46 @@ issue_and_wait(struct net_device *dev, int cmd)
1625} 1618}
1626 1619
1627static void 1620static void
1621vortex_set_duplex(struct net_device *dev)
1622{
1623 struct vortex_private *vp = netdev_priv(dev);
1624 void __iomem *ioaddr = vp->ioaddr;
1625
1626 printk(KERN_INFO "%s: setting %s-duplex.\n",
1627 dev->name, (vp->full_duplex) ? "full" : "half");
1628
1629 EL3WINDOW(3);
1630 /* Set the full-duplex bit. */
1631 iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1632 (vp->large_frames ? 0x40 : 0) |
1633 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1634 0x100 : 0),
1635 ioaddr + Wn3_MAC_Ctrl);
1636
1637 issue_and_wait(dev, TxReset);
1638 /*
1639 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1640 */
1641 issue_and_wait(dev, RxReset|0x04);
1642}
1643
1644static void vortex_check_media(struct net_device *dev, unsigned int init)
1645{
1646 struct vortex_private *vp = netdev_priv(dev);
1647 unsigned int ok_to_print = 0;
1648
1649 if (vortex_debug > 3)
1650 ok_to_print = 1;
1651
1652 if (mii_check_media(&vp->mii, ok_to_print, init)) {
1653 vp->full_duplex = vp->mii.full_duplex;
1654 vortex_set_duplex(dev);
1655 } else if (init) {
1656 vortex_set_duplex(dev);
1657 }
1658}
1659
1660static void
1628vortex_up(struct net_device *dev) 1661vortex_up(struct net_device *dev)
1629{ 1662{
1630 struct vortex_private *vp = netdev_priv(dev); 1663 struct vortex_private *vp = netdev_priv(dev);
@@ -1684,53 +1717,20 @@ vortex_up(struct net_device *dev)
1684 printk(KERN_DEBUG "%s: Initial media type %s.\n", 1717 printk(KERN_DEBUG "%s: Initial media type %s.\n",
1685 dev->name, media_tbl[dev->if_port].name); 1718 dev->name, media_tbl[dev->if_port].name);
1686 1719
1687 vp->full_duplex = vp->force_fd; 1720 vp->full_duplex = vp->mii.force_media;
1688 config = BFINS(config, dev->if_port, 20, 4); 1721 config = BFINS(config, dev->if_port, 20, 4);
1689 if (vortex_debug > 6) 1722 if (vortex_debug > 6)
1690 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); 1723 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1691 iowrite32(config, ioaddr + Wn3_Config); 1724 iowrite32(config, ioaddr + Wn3_Config);
1692 1725
1726 netif_carrier_off(dev);
1693 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { 1727 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1694 int mii_reg1, mii_reg5;
1695 EL3WINDOW(4); 1728 EL3WINDOW(4);
1696 /* Read BMSR (reg1) only to clear old status. */ 1729 vortex_check_media(dev, 1);
1697 mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
1698 mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1699 if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
1700 netif_carrier_off(dev); /* No MII device or no link partner report */
1701 } else {
1702 mii_reg5 &= vp->advertising;
1703 if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
1704 || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
1705 vp->full_duplex = 1;
1706 netif_carrier_on(dev);
1707 }
1708 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1709 if (vortex_debug > 1)
1710 printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
1711 " info1 %04x, setting %s-duplex.\n",
1712 dev->name, vp->phys[0],
1713 mii_reg1, mii_reg5,
1714 vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
1715 EL3WINDOW(3);
1716 }
1717
1718 /* Set the full-duplex bit. */
1719 iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1720 (vp->large_frames ? 0x40 : 0) |
1721 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1722 ioaddr + Wn3_MAC_Ctrl);
1723
1724 if (vortex_debug > 1) {
1725 printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
1726 dev->name, config);
1727 } 1730 }
1731 else
1732 vortex_set_duplex(dev);
1728 1733
1729 issue_and_wait(dev, TxReset);
1730 /*
1731 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1732 */
1733 issue_and_wait(dev, RxReset|0x04);
1734 1734
1735 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 1735 iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1736 1736
@@ -1805,7 +1805,6 @@ vortex_up(struct net_device *dev)
1805 set_8021q_mode(dev, 1); 1805 set_8021q_mode(dev, 1);
1806 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 1806 iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1807 1807
1808// issue_and_wait(dev, SetTxStart|0x07ff);
1809 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 1808 iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1810 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 1809 iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1811 /* Allow status bits to be seen. */ 1810 /* Allow status bits to be seen. */
@@ -1892,7 +1891,7 @@ vortex_timer(unsigned long data)
1892 void __iomem *ioaddr = vp->ioaddr; 1891 void __iomem *ioaddr = vp->ioaddr;
1893 int next_tick = 60*HZ; 1892 int next_tick = 60*HZ;
1894 int ok = 0; 1893 int ok = 0;
1895 int media_status, mii_status, old_window; 1894 int media_status, old_window;
1896 1895
1897 if (vortex_debug > 2) { 1896 if (vortex_debug > 2) {
1898 printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", 1897 printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
@@ -1900,8 +1899,6 @@ vortex_timer(unsigned long data)
1900 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); 1899 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1901 } 1900 }
1902 1901
1903 if (vp->medialock)
1904 goto leave_media_alone;
1905 disable_irq(dev->irq); 1902 disable_irq(dev->irq);
1906 old_window = ioread16(ioaddr + EL3_CMD) >> 13; 1903 old_window = ioread16(ioaddr + EL3_CMD) >> 13;
1907 EL3WINDOW(4); 1904 EL3WINDOW(4);
@@ -1924,44 +1921,9 @@ vortex_timer(unsigned long data)
1924 break; 1921 break;
1925 case XCVR_MII: case XCVR_NWAY: 1922 case XCVR_MII: case XCVR_NWAY:
1926 { 1923 {
1927 spin_lock_bh(&vp->lock);
1928 mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
1929 if (!(mii_status & BMSR_LSTATUS)) {
1930 /* Re-read to get actual link status */
1931 mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
1932 }
1933 ok = 1; 1924 ok = 1;
1934 if (vortex_debug > 2) 1925 spin_lock_bh(&vp->lock);
1935 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", 1926 vortex_check_media(dev, 0);
1936 dev->name, mii_status);
1937 if (mii_status & BMSR_LSTATUS) {
1938 int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1939 if (! vp->force_fd && mii_reg5 != 0xffff) {
1940 int duplex;
1941
1942 mii_reg5 &= vp->advertising;
1943 duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
1944 if (vp->full_duplex != duplex) {
1945 vp->full_duplex = duplex;
1946 printk(KERN_INFO "%s: Setting %s-duplex based on MII "
1947 "#%d link partner capability of %4.4x.\n",
1948 dev->name, vp->full_duplex ? "full" : "half",
1949 vp->phys[0], mii_reg5);
1950 /* Set the full-duplex bit. */
1951 EL3WINDOW(3);
1952 iowrite16( (vp->full_duplex ? 0x20 : 0) |
1953 (vp->large_frames ? 0x40 : 0) |
1954 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1955 ioaddr + Wn3_MAC_Ctrl);
1956 if (vortex_debug > 1)
1957 printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
1958 /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
1959 }
1960 }
1961 netif_carrier_on(dev);
1962 } else {
1963 netif_carrier_off(dev);
1964 }
1965 spin_unlock_bh(&vp->lock); 1927 spin_unlock_bh(&vp->lock);
1966 } 1928 }
1967 break; 1929 break;
@@ -1971,7 +1933,14 @@ vortex_timer(unsigned long data)
1971 dev->name, media_tbl[dev->if_port].name, media_status); 1933 dev->name, media_tbl[dev->if_port].name, media_status);
1972 ok = 1; 1934 ok = 1;
1973 } 1935 }
1974 if ( ! ok) { 1936
1937 if (!netif_carrier_ok(dev))
1938 next_tick = 5*HZ;
1939
1940 if (vp->medialock)
1941 goto leave_media_alone;
1942
1943 if (!ok) {
1975 unsigned int config; 1944 unsigned int config;
1976 1945
1977 do { 1946 do {
@@ -2004,14 +1973,14 @@ vortex_timer(unsigned long data)
2004 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); 1973 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
2005 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ 1974 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
2006 } 1975 }
2007 EL3WINDOW(old_window);
2008 enable_irq(dev->irq);
2009 1976
2010leave_media_alone: 1977leave_media_alone:
2011 if (vortex_debug > 2) 1978 if (vortex_debug > 2)
2012 printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", 1979 printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
2013 dev->name, media_tbl[dev->if_port].name); 1980 dev->name, media_tbl[dev->if_port].name);
2014 1981
1982 EL3WINDOW(old_window);
1983 enable_irq(dev->irq);
2015 mod_timer(&vp->timer, RUN_AT(next_tick)); 1984 mod_timer(&vp->timer, RUN_AT(next_tick));
2016 if (vp->deferred) 1985 if (vp->deferred)
2017 iowrite16(FakeIntr, ioaddr + EL3_CMD); 1986 iowrite16(FakeIntr, ioaddr + EL3_CMD);
@@ -2206,7 +2175,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2206 if (vp->bus_master) { 2175 if (vp->bus_master) {
2207 /* Set the bus-master controller to transfer the packet. */ 2176 /* Set the bus-master controller to transfer the packet. */
2208 int len = (skb->len + 3) & ~3; 2177 int len = (skb->len + 3) & ~3;
2209 iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), 2178 iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
2210 ioaddr + Wn7_MasterAddr); 2179 ioaddr + Wn7_MasterAddr);
2211 iowrite16(len, ioaddr + Wn7_MasterLen); 2180 iowrite16(len, ioaddr + Wn7_MasterLen);
2212 vp->tx_skb = skb; 2181 vp->tx_skb = skb;
@@ -2983,20 +2952,6 @@ static int vortex_nway_reset(struct net_device *dev)
2983 return rc; 2952 return rc;
2984} 2953}
2985 2954
2986static u32 vortex_get_link(struct net_device *dev)
2987{
2988 struct vortex_private *vp = netdev_priv(dev);
2989 void __iomem *ioaddr = vp->ioaddr;
2990 unsigned long flags;
2991 int rc;
2992
2993 spin_lock_irqsave(&vp->lock, flags);
2994 EL3WINDOW(4);
2995 rc = mii_link_ok(&vp->mii);
2996 spin_unlock_irqrestore(&vp->lock, flags);
2997 return rc;
2998}
2999
3000static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2955static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3001{ 2956{
3002 struct vortex_private *vp = netdev_priv(dev); 2957 struct vortex_private *vp = netdev_priv(dev);
@@ -3077,7 +3032,6 @@ static void vortex_get_drvinfo(struct net_device *dev,
3077 struct vortex_private *vp = netdev_priv(dev); 3032 struct vortex_private *vp = netdev_priv(dev);
3078 3033
3079 strcpy(info->driver, DRV_NAME); 3034 strcpy(info->driver, DRV_NAME);
3080 strcpy(info->version, DRV_VERSION);
3081 if (VORTEX_PCI(vp)) { 3035 if (VORTEX_PCI(vp)) {
3082 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); 3036 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
3083 } else { 3037 } else {
@@ -3098,9 +3052,9 @@ static struct ethtool_ops vortex_ethtool_ops = {
3098 .get_stats_count = vortex_get_stats_count, 3052 .get_stats_count = vortex_get_stats_count,
3099 .get_settings = vortex_get_settings, 3053 .get_settings = vortex_get_settings,
3100 .set_settings = vortex_set_settings, 3054 .set_settings = vortex_set_settings,
3101 .get_link = vortex_get_link, 3055 .get_link = ethtool_op_get_link,
3102 .nway_reset = vortex_nway_reset, 3056 .nway_reset = vortex_nway_reset,
3103 .get_perm_addr = ethtool_op_get_perm_addr, 3057 .get_perm_addr = ethtool_op_get_perm_addr,
3104}; 3058};
3105 3059
3106#ifdef CONFIG_PCI 3060#ifdef CONFIG_PCI
@@ -3301,7 +3255,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
3301 } 3255 }
3302 return; 3256 return;
3303} 3257}
3304 3258
3305/* ACPI: Advanced Configuration and Power Interface. */ 3259/* ACPI: Advanced Configuration and Power Interface. */
3306/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ 3260/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
3307static void acpi_set_WOL(struct net_device *dev) 3261static void acpi_set_WOL(struct net_device *dev)
@@ -3325,7 +3279,7 @@ static void acpi_set_WOL(struct net_device *dev)
3325} 3279}
3326 3280
3327 3281
3328static void __devexit vortex_remove_one (struct pci_dev *pdev) 3282static void __devexit vortex_remove_one(struct pci_dev *pdev)
3329{ 3283{
3330 struct net_device *dev = pci_get_drvdata(pdev); 3284 struct net_device *dev = pci_get_drvdata(pdev);
3331 struct vortex_private *vp; 3285 struct vortex_private *vp;
@@ -3381,7 +3335,7 @@ static int vortex_have_pci;
3381static int vortex_have_eisa; 3335static int vortex_have_eisa;
3382 3336
3383 3337
3384static int __init vortex_init (void) 3338static int __init vortex_init(void)
3385{ 3339{
3386 int pci_rc, eisa_rc; 3340 int pci_rc, eisa_rc;
3387 3341
@@ -3397,14 +3351,14 @@ static int __init vortex_init (void)
3397} 3351}
3398 3352
3399 3353
3400static void __exit vortex_eisa_cleanup (void) 3354static void __exit vortex_eisa_cleanup(void)
3401{ 3355{
3402 struct vortex_private *vp; 3356 struct vortex_private *vp;
3403 void __iomem *ioaddr; 3357 void __iomem *ioaddr;
3404 3358
3405#ifdef CONFIG_EISA 3359#ifdef CONFIG_EISA
3406 /* Take care of the EISA devices */ 3360 /* Take care of the EISA devices */
3407 eisa_driver_unregister (&vortex_eisa_driver); 3361 eisa_driver_unregister(&vortex_eisa_driver);
3408#endif 3362#endif
3409 3363
3410 if (compaq_net_device) { 3364 if (compaq_net_device) {
@@ -3412,33 +3366,24 @@ static void __exit vortex_eisa_cleanup (void)
3412 ioaddr = ioport_map(compaq_net_device->base_addr, 3366 ioaddr = ioport_map(compaq_net_device->base_addr,
3413 VORTEX_TOTAL_SIZE); 3367 VORTEX_TOTAL_SIZE);
3414 3368
3415 unregister_netdev (compaq_net_device); 3369 unregister_netdev(compaq_net_device);
3416 iowrite16 (TotalReset, ioaddr + EL3_CMD); 3370 iowrite16(TotalReset, ioaddr + EL3_CMD);
3417 release_region(compaq_net_device->base_addr, 3371 release_region(compaq_net_device->base_addr,
3418 VORTEX_TOTAL_SIZE); 3372 VORTEX_TOTAL_SIZE);
3419 3373
3420 free_netdev (compaq_net_device); 3374 free_netdev(compaq_net_device);
3421 } 3375 }
3422} 3376}
3423 3377
3424 3378
3425static void __exit vortex_cleanup (void) 3379static void __exit vortex_cleanup(void)
3426{ 3380{
3427 if (vortex_have_pci) 3381 if (vortex_have_pci)
3428 pci_unregister_driver (&vortex_driver); 3382 pci_unregister_driver(&vortex_driver);
3429 if (vortex_have_eisa) 3383 if (vortex_have_eisa)
3430 vortex_eisa_cleanup (); 3384 vortex_eisa_cleanup();
3431} 3385}
3432 3386
3433 3387
3434module_init(vortex_init); 3388module_init(vortex_init);
3435module_exit(vortex_cleanup); 3389module_exit(vortex_cleanup);
3436
3437
3438/*
3439 * Local variables:
3440 * c-indent-level: 4
3441 * c-basic-offset: 4
3442 * tab-width: 4
3443 * End:
3444 */
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 253440a98022..8429ceb01389 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1693,7 +1693,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs
1693 * 1693 *
1694 * Process receive interrupt events, 1694 * Process receive interrupt events,
1695 * put buffer to higher layer and refill buffer pool 1695 * put buffer to higher layer and refill buffer pool
1696 * Note: This fucntion is called by interrupt handler, 1696 * Note: This function is called by interrupt handler,
1697 * don't do "too much" work here 1697 * don't do "too much" work here
1698 */ 1698 */
1699 1699
@@ -1840,7 +1840,7 @@ static int sis900_rx(struct net_device *net_dev)
1840 * 1840 *
1841 * Check for error condition and free socket buffer etc 1841 * Check for error condition and free socket buffer etc
1842 * schedule for more transmission as needed 1842 * schedule for more transmission as needed
1843 * Note: This fucntion is called by interrupt handler, 1843 * Note: This function is called by interrupt handler,
1844 * don't do "too much" work here 1844 * don't do "too much" work here
1845 */ 1845 */
1846 1846
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 1ff5de076d21..4505540e3c59 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -105,6 +105,7 @@
105#include <linux/delay.h> 105#include <linux/delay.h>
106#include <net/syncppp.h> 106#include <net/syncppp.h>
107#include <linux/hdlc.h> 107#include <linux/hdlc.h>
108#include <linux/mutex.h>
108 109
109/* Version */ 110/* Version */
110static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; 111static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n";
@@ -112,7 +113,7 @@ static int debug;
112static int quartz; 113static int quartz;
113 114
114#ifdef CONFIG_DSCC4_PCI_RST 115#ifdef CONFIG_DSCC4_PCI_RST
115static DECLARE_MUTEX(dscc4_sem); 116static DEFINE_MUTEX(dscc4_mutex);
116static u32 dscc4_pci_config_store[16]; 117static u32 dscc4_pci_config_store[16];
117#endif 118#endif
118 119
@@ -1018,7 +1019,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1018{ 1019{
1019 int i; 1020 int i;
1020 1021
1021 down(&dscc4_sem); 1022 mutex_lock(&dscc4_mutex);
1022 for (i = 0; i < 16; i++) 1023 for (i = 0; i < 16; i++)
1023 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); 1024 pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
1024 1025
@@ -1039,7 +1040,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1039 1040
1040 for (i = 0; i < 16; i++) 1041 for (i = 0; i < 16; i++)
1041 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); 1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
1042 up(&dscc4_sem); 1043 mutex_unlock(&dscc4_mutex);
1043} 1044}
1044#else 1045#else
1045#define dscc4_pci_reset(pdev,ioaddr) do {} while (0) 1046#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index ea62bed6bc83..bbbfd79adbaf 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -32,6 +32,7 @@
32#include <linux/kmod.h> 32#include <linux/kmod.h>
33 33
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/mutex.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
36 37
37#undef PARPORT_PARANOID 38#undef PARPORT_PARANOID
@@ -50,7 +51,7 @@ static DEFINE_SPINLOCK(full_list_lock);
50 51
51static LIST_HEAD(drivers); 52static LIST_HEAD(drivers);
52 53
53static DECLARE_MUTEX(registration_lock); 54static DEFINE_MUTEX(registration_lock);
54 55
55/* What you can do to a port that's gone away.. */ 56/* What you can do to a port that's gone away.. */
56static void dead_write_lines (struct parport *p, unsigned char b){} 57static void dead_write_lines (struct parport *p, unsigned char b){}
@@ -158,11 +159,11 @@ int parport_register_driver (struct parport_driver *drv)
158 if (list_empty(&portlist)) 159 if (list_empty(&portlist))
159 get_lowlevel_driver (); 160 get_lowlevel_driver ();
160 161
161 down(&registration_lock); 162 mutex_lock(&registration_lock);
162 list_for_each_entry(port, &portlist, list) 163 list_for_each_entry(port, &portlist, list)
163 drv->attach(port); 164 drv->attach(port);
164 list_add(&drv->list, &drivers); 165 list_add(&drv->list, &drivers);
165 up(&registration_lock); 166 mutex_unlock(&registration_lock);
166 167
167 return 0; 168 return 0;
168} 169}
@@ -188,11 +189,11 @@ void parport_unregister_driver (struct parport_driver *drv)
188{ 189{
189 struct parport *port; 190 struct parport *port;
190 191
191 down(&registration_lock); 192 mutex_lock(&registration_lock);
192 list_del_init(&drv->list); 193 list_del_init(&drv->list);
193 list_for_each_entry(port, &portlist, list) 194 list_for_each_entry(port, &portlist, list)
194 drv->detach(port); 195 drv->detach(port);
195 up(&registration_lock); 196 mutex_unlock(&registration_lock);
196} 197}
197 198
198static void free_port (struct parport *port) 199static void free_port (struct parport *port)
@@ -366,7 +367,7 @@ void parport_announce_port (struct parport *port)
366#endif 367#endif
367 368
368 parport_proc_register(port); 369 parport_proc_register(port);
369 down(&registration_lock); 370 mutex_lock(&registration_lock);
370 spin_lock_irq(&parportlist_lock); 371 spin_lock_irq(&parportlist_lock);
371 list_add_tail(&port->list, &portlist); 372 list_add_tail(&port->list, &portlist);
372 for (i = 1; i < 3; i++) { 373 for (i = 1; i < 3; i++) {
@@ -383,7 +384,7 @@ void parport_announce_port (struct parport *port)
383 if (slave) 384 if (slave)
384 attach_driver_chain(slave); 385 attach_driver_chain(slave);
385 } 386 }
386 up(&registration_lock); 387 mutex_unlock(&registration_lock);
387} 388}
388 389
389/** 390/**
@@ -409,7 +410,7 @@ void parport_remove_port(struct parport *port)
409{ 410{
410 int i; 411 int i;
411 412
412 down(&registration_lock); 413 mutex_lock(&registration_lock);
413 414
414 /* Spread the word. */ 415 /* Spread the word. */
415 detach_driver_chain (port); 416 detach_driver_chain (port);
@@ -436,7 +437,7 @@ void parport_remove_port(struct parport *port)
436 } 437 }
437 spin_unlock(&parportlist_lock); 438 spin_unlock(&parportlist_lock);
438 439
439 up(&registration_lock); 440 mutex_unlock(&registration_lock);
440 441
441 parport_proc_unregister(port); 442 parport_proc_unregister(port);
442 443
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 3eefe2cec72d..46825fee3ae4 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -19,7 +19,7 @@
19#include <linux/string.h> 19#include <linux/string.h>
20 20
21#include <asm/pci-bridge.h> 21#include <asm/pci-bridge.h>
22#include <asm/semaphore.h> 22#include <linux/mutex.h>
23#include <asm/rtas.h> 23#include <asm/rtas.h>
24#include <asm/vio.h> 24#include <asm/vio.h>
25 25
@@ -27,7 +27,7 @@
27#include "rpaphp.h" 27#include "rpaphp.h"
28#include "rpadlpar.h" 28#include "rpadlpar.h"
29 29
30static DECLARE_MUTEX(rpadlpar_sem); 30static DEFINE_MUTEX(rpadlpar_mutex);
31 31
32#define DLPAR_MODULE_NAME "rpadlpar_io" 32#define DLPAR_MODULE_NAME "rpadlpar_io"
33 33
@@ -300,7 +300,7 @@ int dlpar_add_slot(char *drc_name)
300 int node_type; 300 int node_type;
301 int rc = -EIO; 301 int rc = -EIO;
302 302
303 if (down_interruptible(&rpadlpar_sem)) 303 if (mutex_lock_interruptible(&rpadlpar_mutex))
304 return -ERESTARTSYS; 304 return -ERESTARTSYS;
305 305
306 /* Find newly added node */ 306 /* Find newly added node */
@@ -324,7 +324,7 @@ int dlpar_add_slot(char *drc_name)
324 324
325 printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); 325 printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
326exit: 326exit:
327 up(&rpadlpar_sem); 327 mutex_unlock(&rpadlpar_mutex);
328 return rc; 328 return rc;
329} 329}
330 330
@@ -417,7 +417,7 @@ int dlpar_remove_slot(char *drc_name)
417 int node_type; 417 int node_type;
418 int rc = 0; 418 int rc = 0;
419 419
420 if (down_interruptible(&rpadlpar_sem)) 420 if (mutex_lock_interruptible(&rpadlpar_mutex))
421 return -ERESTARTSYS; 421 return -ERESTARTSYS;
422 422
423 dn = find_dlpar_node(drc_name, &node_type); 423 dn = find_dlpar_node(drc_name, &node_type);
@@ -439,7 +439,7 @@ int dlpar_remove_slot(char *drc_name)
439 } 439 }
440 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); 440 printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
441exit: 441exit:
442 up(&rpadlpar_sem); 442 mutex_unlock(&rpadlpar_mutex);
443 return rc; 443 return rc;
444} 444}
445 445
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index c402da8e78ae..8cb9abde736b 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -15,6 +15,7 @@
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/mutex.h>
18 19
19#include <asm/sn/addrs.h> 20#include <asm/sn/addrs.h>
20#include <asm/sn/l1.h> 21#include <asm/sn/l1.h>
@@ -81,7 +82,7 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
81 .get_power_status = get_power_status, 82 .get_power_status = get_power_status,
82}; 83};
83 84
84static DECLARE_MUTEX(sn_hotplug_sem); 85static DEFINE_MUTEX(sn_hotplug_mutex);
85 86
86static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, 87static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot,
87 char *buf) 88 char *buf)
@@ -346,7 +347,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
346 int rc; 347 int rc;
347 348
348 /* Serialize the Linux PCI infrastructure */ 349 /* Serialize the Linux PCI infrastructure */
349 down(&sn_hotplug_sem); 350 mutex_lock(&sn_hotplug_mutex);
350 351
351 /* 352 /*
352 * Power-on and initialize the slot in the SN 353 * Power-on and initialize the slot in the SN
@@ -354,7 +355,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
354 */ 355 */
355 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); 356 rc = sn_slot_enable(bss_hotplug_slot, slot->device_num);
356 if (rc) { 357 if (rc) {
357 up(&sn_hotplug_sem); 358 mutex_unlock(&sn_hotplug_mutex);
358 return rc; 359 return rc;
359 } 360 }
360 361
@@ -362,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
362 PCI_DEVFN(slot->device_num + 1, 0)); 363 PCI_DEVFN(slot->device_num + 1, 0));
363 if (!num_funcs) { 364 if (!num_funcs) {
364 dev_dbg(slot->pci_bus->self, "no device in slot\n"); 365 dev_dbg(slot->pci_bus->self, "no device in slot\n");
365 up(&sn_hotplug_sem); 366 mutex_unlock(&sn_hotplug_mutex);
366 return -ENODEV; 367 return -ENODEV;
367 } 368 }
368 369
@@ -402,7 +403,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
402 if (new_ppb) 403 if (new_ppb)
403 pci_bus_add_devices(new_bus); 404 pci_bus_add_devices(new_bus);
404 405
405 up(&sn_hotplug_sem); 406 mutex_unlock(&sn_hotplug_mutex);
406 407
407 if (rc == 0) 408 if (rc == 0)
408 dev_dbg(slot->pci_bus->self, 409 dev_dbg(slot->pci_bus->self,
@@ -422,7 +423,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
422 int rc; 423 int rc;
423 424
424 /* Acquire update access to the bus */ 425 /* Acquire update access to the bus */
425 down(&sn_hotplug_sem); 426 mutex_lock(&sn_hotplug_mutex);
426 427
427 /* is it okay to bring this slot down? */ 428 /* is it okay to bring this slot down? */
428 rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, 429 rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
@@ -450,7 +451,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
450 PCI_REQ_SLOT_DISABLE); 451 PCI_REQ_SLOT_DISABLE);
451 leaving: 452 leaving:
452 /* Release the bus lock */ 453 /* Release the bus lock */
453 up(&sn_hotplug_sem); 454 mutex_unlock(&sn_hotplug_mutex);
454 455
455 return rc; 456 return rc;
456} 457}
@@ -462,9 +463,9 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
462 struct pcibus_info *pcibus_info; 463 struct pcibus_info *pcibus_info;
463 464
464 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); 465 pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
465 down(&sn_hotplug_sem); 466 mutex_lock(&sn_hotplug_mutex);
466 *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); 467 *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
467 up(&sn_hotplug_sem); 468 mutex_unlock(&sn_hotplug_mutex);
468 return 0; 469 return 0;
469} 470}
470 471
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index b1b4b683cbdd..ac7c2bb6c69e 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/isapnp.h> 44#include <linux/isapnp.h>
45#include <linux/mutex.h>
45#include <asm/io.h> 46#include <asm/io.h>
46 47
47#if 0 48#if 0
@@ -92,7 +93,7 @@ MODULE_LICENSE("GPL");
92#define _LTAG_FIXEDMEM32RANGE 0x86 93#define _LTAG_FIXEDMEM32RANGE 0x86
93 94
94static unsigned char isapnp_checksum_value; 95static unsigned char isapnp_checksum_value;
95static DECLARE_MUTEX(isapnp_cfg_mutex); 96static DEFINE_MUTEX(isapnp_cfg_mutex);
96static int isapnp_detected; 97static int isapnp_detected;
97static int isapnp_csn_count; 98static int isapnp_csn_count;
98 99
@@ -903,7 +904,7 @@ int isapnp_cfg_begin(int csn, int logdev)
903{ 904{
904 if (csn < 1 || csn > isapnp_csn_count || logdev > 10) 905 if (csn < 1 || csn > isapnp_csn_count || logdev > 10)
905 return -EINVAL; 906 return -EINVAL;
906 down(&isapnp_cfg_mutex); 907 mutex_lock(&isapnp_cfg_mutex);
907 isapnp_wait(); 908 isapnp_wait();
908 isapnp_key(); 909 isapnp_key();
909 isapnp_wake(csn); 910 isapnp_wake(csn);
@@ -929,7 +930,7 @@ int isapnp_cfg_begin(int csn, int logdev)
929int isapnp_cfg_end(void) 930int isapnp_cfg_end(void)
930{ 931{
931 isapnp_wait(); 932 isapnp_wait();
932 up(&isapnp_cfg_mutex); 933 mutex_unlock(&isapnp_cfg_mutex);
933 return 0; 934 return 0;
934} 935}
935 936
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index bd06607a5dcc..eecb2afad5c2 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -28,6 +28,7 @@
28#include <linux/major.h> 28#include <linux/major.h>
29#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/mutex.h>
31 32
32struct class *class3270; 33struct class *class3270;
33 34
@@ -59,7 +60,7 @@ struct raw3270 {
59#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ 60#define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */
60 61
61/* Semaphore to protect global data of raw3270 (devices, views, etc). */ 62/* Semaphore to protect global data of raw3270 (devices, views, etc). */
62static DECLARE_MUTEX(raw3270_sem); 63static DEFINE_MUTEX(raw3270_mutex);
63 64
64/* List of 3270 devices. */ 65/* List of 3270 devices. */
65static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); 66static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices);
@@ -815,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
815 * number for it. Note: there is no device with minor 0, 816 * number for it. Note: there is no device with minor 0,
816 * see special case for fs3270.c:fs3270_open(). 817 * see special case for fs3270.c:fs3270_open().
817 */ 818 */
818 down(&raw3270_sem); 819 mutex_lock(&raw3270_mutex);
819 /* Keep the list sorted. */ 820 /* Keep the list sorted. */
820 minor = RAW3270_FIRSTMINOR; 821 minor = RAW3270_FIRSTMINOR;
821 rp->minor = -1; 822 rp->minor = -1;
@@ -832,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
832 rp->minor = minor; 833 rp->minor = minor;
833 list_add_tail(&rp->list, &raw3270_devices); 834 list_add_tail(&rp->list, &raw3270_devices);
834 } 835 }
835 up(&raw3270_sem); 836 mutex_unlock(&raw3270_mutex);
836 /* No free minor number? Then give up. */ 837 /* No free minor number? Then give up. */
837 if (rp->minor == -1) 838 if (rp->minor == -1)
838 return -EUSERS; 839 return -EUSERS;
@@ -1003,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
1003 1004
1004 if (minor <= 0) 1005 if (minor <= 0)
1005 return -ENODEV; 1006 return -ENODEV;
1006 down(&raw3270_sem); 1007 mutex_lock(&raw3270_mutex);
1007 rc = -ENODEV; 1008 rc = -ENODEV;
1008 list_for_each_entry(rp, &raw3270_devices, list) { 1009 list_for_each_entry(rp, &raw3270_devices, list) {
1009 if (rp->minor != minor) 1010 if (rp->minor != minor)
@@ -1024,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor)
1024 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1025 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1025 break; 1026 break;
1026 } 1027 }
1027 up(&raw3270_sem); 1028 mutex_unlock(&raw3270_mutex);
1028 return rc; 1029 return rc;
1029} 1030}
1030 1031
@@ -1038,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
1038 struct raw3270_view *view, *tmp; 1039 struct raw3270_view *view, *tmp;
1039 unsigned long flags; 1040 unsigned long flags;
1040 1041
1041 down(&raw3270_sem); 1042 mutex_lock(&raw3270_mutex);
1042 view = ERR_PTR(-ENODEV); 1043 view = ERR_PTR(-ENODEV);
1043 list_for_each_entry(rp, &raw3270_devices, list) { 1044 list_for_each_entry(rp, &raw3270_devices, list) {
1044 if (rp->minor != minor) 1045 if (rp->minor != minor)
@@ -1057,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
1057 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); 1058 spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
1058 break; 1059 break;
1059 } 1060 }
1060 up(&raw3270_sem); 1061 mutex_unlock(&raw3270_mutex);
1061 return view; 1062 return view;
1062} 1063}
1063 1064
@@ -1104,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp)
1104 struct ccw_device *cdev; 1105 struct ccw_device *cdev;
1105 1106
1106 /* Remove from device chain. */ 1107 /* Remove from device chain. */
1107 down(&raw3270_sem); 1108 mutex_lock(&raw3270_mutex);
1108 if (rp->clttydev) 1109 if (rp->clttydev)
1109 class_device_destroy(class3270, 1110 class_device_destroy(class3270,
1110 MKDEV(IBM_TTY3270_MAJOR, rp->minor)); 1111 MKDEV(IBM_TTY3270_MAJOR, rp->minor));
@@ -1112,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp)
1112 class_device_destroy(class3270, 1113 class_device_destroy(class3270,
1113 MKDEV(IBM_FS3270_MAJOR, rp->minor)); 1114 MKDEV(IBM_FS3270_MAJOR, rp->minor));
1114 list_del_init(&rp->list); 1115 list_del_init(&rp->list);
1115 up(&raw3270_sem); 1116 mutex_unlock(&raw3270_mutex);
1116 1117
1117 /* Disconnect from ccw_device. */ 1118 /* Disconnect from ccw_device. */
1118 cdev = rp->cdev; 1119 cdev = rp->cdev;
@@ -1208,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int))
1208 if (!np) 1209 if (!np)
1209 return -ENOMEM; 1210 return -ENOMEM;
1210 np->notifier = notifier; 1211 np->notifier = notifier;
1211 down(&raw3270_sem); 1212 mutex_lock(&raw3270_mutex);
1212 list_add_tail(&np->list, &raw3270_notifier); 1213 list_add_tail(&np->list, &raw3270_notifier);
1213 list_for_each_entry(rp, &raw3270_devices, list) { 1214 list_for_each_entry(rp, &raw3270_devices, list) {
1214 get_device(&rp->cdev->dev); 1215 get_device(&rp->cdev->dev);
1215 notifier(rp->minor, 1); 1216 notifier(rp->minor, 1);
1216 } 1217 }
1217 up(&raw3270_sem); 1218 mutex_unlock(&raw3270_mutex);
1218 return 0; 1219 return 0;
1219} 1220}
1220 1221
@@ -1222,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int))
1222{ 1223{
1223 struct raw3270_notifier *np; 1224 struct raw3270_notifier *np;
1224 1225
1225 down(&raw3270_sem); 1226 mutex_lock(&raw3270_mutex);
1226 list_for_each_entry(np, &raw3270_notifier, list) 1227 list_for_each_entry(np, &raw3270_notifier, list)
1227 if (np->notifier == notifier) { 1228 if (np->notifier == notifier) {
1228 list_del(&np->list); 1229 list_del(&np->list);
1229 kfree(np); 1230 kfree(np);
1230 break; 1231 break;
1231 } 1232 }
1232 up(&raw3270_sem); 1233 mutex_unlock(&raw3270_mutex);
1233} 1234}
1234 1235
1235/* 1236/*
@@ -1256,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev)
1256 goto failure; 1257 goto failure;
1257 raw3270_create_attributes(rp); 1258 raw3270_create_attributes(rp);
1258 set_bit(RAW3270_FLAGS_READY, &rp->flags); 1259 set_bit(RAW3270_FLAGS_READY, &rp->flags);
1259 down(&raw3270_sem); 1260 mutex_lock(&raw3270_mutex);
1260 list_for_each_entry(np, &raw3270_notifier, list) 1261 list_for_each_entry(np, &raw3270_notifier, list)
1261 np->notifier(rp->minor, 1); 1262 np->notifier(rp->minor, 1);
1262 up(&raw3270_sem); 1263 mutex_unlock(&raw3270_mutex);
1263 return 0; 1264 return 0;
1264 1265
1265failure: 1266failure:
@@ -1307,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev)
1307 } 1308 }
1308 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1309 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1309 1310
1310 down(&raw3270_sem); 1311 mutex_lock(&raw3270_mutex);
1311 list_for_each_entry(np, &raw3270_notifier, list) 1312 list_for_each_entry(np, &raw3270_notifier, list)
1312 np->notifier(rp->minor, 0); 1313 np->notifier(rp->minor, 0);
1313 up(&raw3270_sem); 1314 mutex_unlock(&raw3270_mutex);
1314 1315
1315 /* Reset 3270 device. */ 1316 /* Reset 3270 device. */
1316 raw3270_reset_device(rp); 1317 raw3270_reset_device(rp);
@@ -1370,13 +1371,13 @@ raw3270_init(void)
1370 rc = ccw_driver_register(&raw3270_ccw_driver); 1371 rc = ccw_driver_register(&raw3270_ccw_driver);
1371 if (rc == 0) { 1372 if (rc == 0) {
1372 /* Create attributes for early (= console) device. */ 1373 /* Create attributes for early (= console) device. */
1373 down(&raw3270_sem); 1374 mutex_lock(&raw3270_mutex);
1374 class3270 = class_create(THIS_MODULE, "3270"); 1375 class3270 = class_create(THIS_MODULE, "3270");
1375 list_for_each_entry(rp, &raw3270_devices, list) { 1376 list_for_each_entry(rp, &raw3270_devices, list) {
1376 get_device(&rp->cdev->dev); 1377 get_device(&rp->cdev->dev);
1377 raw3270_create_attributes(rp); 1378 raw3270_create_attributes(rp);
1378 } 1379 }
1379 up(&raw3270_sem); 1380 mutex_unlock(&raw3270_mutex);
1380 } 1381 }
1381 return rc; 1382 return rc;
1382} 1383}
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 95b92f317b6f..395cfc6a344f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
829 device_unregister(&unit->sysfs_device); 829 device_unregister(&unit->sysfs_device);
830} 830}
831 831
832static void *
833zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
834{
835 return kmalloc((size_t) size, gfp_mask);
836}
837
838static void
839zfcp_mempool_free(void *element, void *size)
840{
841 kfree(element);
842}
843
844/* 832/*
845 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI 833 * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI
846 * commands. 834 * commands.
@@ -853,51 +841,39 @@ static int
853zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) 841zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
854{ 842{
855 adapter->pool.fsf_req_erp = 843 adapter->pool.fsf_req_erp =
856 mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR, 844 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR,
857 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 845 sizeof(struct zfcp_fsf_req_pool_element));
858 sizeof(struct zfcp_fsf_req_pool_element)); 846 if (!adapter->pool.fsf_req_erp)
859
860 if (NULL == adapter->pool.fsf_req_erp)
861 return -ENOMEM; 847 return -ENOMEM;
862 848
863 adapter->pool.fsf_req_scsi = 849 adapter->pool.fsf_req_scsi =
864 mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR, 850 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR,
865 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 851 sizeof(struct zfcp_fsf_req_pool_element));
866 sizeof(struct zfcp_fsf_req_pool_element)); 852 if (!adapter->pool.fsf_req_scsi)
867
868 if (NULL == adapter->pool.fsf_req_scsi)
869 return -ENOMEM; 853 return -ENOMEM;
870 854
871 adapter->pool.fsf_req_abort = 855 adapter->pool.fsf_req_abort =
872 mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR, 856 mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR,
873 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 857 sizeof(struct zfcp_fsf_req_pool_element));
874 sizeof(struct zfcp_fsf_req_pool_element)); 858 if (!adapter->pool.fsf_req_abort)
875
876 if (NULL == adapter->pool.fsf_req_abort)
877 return -ENOMEM; 859 return -ENOMEM;
878 860
879 adapter->pool.fsf_req_status_read = 861 adapter->pool.fsf_req_status_read =
880 mempool_create(ZFCP_POOL_STATUS_READ_NR, 862 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
881 zfcp_mempool_alloc, zfcp_mempool_free, 863 sizeof(struct zfcp_fsf_req));
882 (void *) sizeof(struct zfcp_fsf_req)); 864 if (!adapter->pool.fsf_req_status_read)
883
884 if (NULL == adapter->pool.fsf_req_status_read)
885 return -ENOMEM; 865 return -ENOMEM;
886 866
887 adapter->pool.data_status_read = 867 adapter->pool.data_status_read =
888 mempool_create(ZFCP_POOL_STATUS_READ_NR, 868 mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR,
889 zfcp_mempool_alloc, zfcp_mempool_free, 869 sizeof(struct fsf_status_read_buffer));
890 (void *) sizeof(struct fsf_status_read_buffer)); 870 if (!adapter->pool.data_status_read)
891
892 if (NULL == adapter->pool.data_status_read)
893 return -ENOMEM; 871 return -ENOMEM;
894 872
895 adapter->pool.data_gid_pn = 873 adapter->pool.data_gid_pn =
896 mempool_create(ZFCP_POOL_DATA_GID_PN_NR, 874 mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR,
897 zfcp_mempool_alloc, zfcp_mempool_free, (void *) 875 sizeof(struct zfcp_gid_pn_data));
898 sizeof(struct zfcp_gid_pn_data)); 876 if (!adapter->pool.data_gid_pn)
899
900 if (NULL == adapter->pool.data_gid_pn)
901 return -ENOMEM; 877 return -ENOMEM;
902 878
903 return 0; 879 return 0;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7b82ff090d42..2068b66822b7 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -3200,8 +3200,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session)
3200 * Data-Out PDU's within R2T-sequence can be quite big; 3200 * Data-Out PDU's within R2T-sequence can be quite big;
3201 * using mempool 3201 * using mempool
3202 */ 3202 */
3203 ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX, 3203 ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX,
3204 mempool_alloc_slab, mempool_free_slab, taskcache); 3204 taskcache);
3205 if (ctask->datapool == NULL) { 3205 if (ctask->datapool == NULL) {
3206 kfifo_free(ctask->r2tqueue); 3206 kfifo_free(ctask->r2tqueue);
3207 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); 3207 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 352df47bcaca..07017658ac56 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -38,18 +38,6 @@
38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ 38#define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */
39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ 39#define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */
40 40
41static void *
42lpfc_pool_kmalloc(gfp_t gfp_flags, void *data)
43{
44 return kmalloc((unsigned long)data, gfp_flags);
45}
46
47static void
48lpfc_pool_kfree(void *obj, void *data)
49{
50 kfree(obj);
51}
52
53int 41int
54lpfc_mem_alloc(struct lpfc_hba * phba) 42lpfc_mem_alloc(struct lpfc_hba * phba)
55{ 43{
@@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
79 pool->current_count++; 67 pool->current_count++;
80 } 68 }
81 69
82 phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, 70 phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
83 lpfc_pool_kmalloc, lpfc_pool_kfree, 71 sizeof(LPFC_MBOXQ_t));
84 (void *)(unsigned long)sizeof(LPFC_MBOXQ_t));
85 if (!phba->mbox_mem_pool) 72 if (!phba->mbox_mem_pool)
86 goto fail_free_mbuf_pool; 73 goto fail_free_mbuf_pool;
87 74
88 phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, 75 phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE,
89 lpfc_pool_kmalloc, lpfc_pool_kfree, 76 sizeof(struct lpfc_nodelist));
90 (void *)(unsigned long)sizeof(struct lpfc_nodelist));
91 if (!phba->nlp_mem_pool) 77 if (!phba->nlp_mem_pool)
92 goto fail_free_mbox_pool; 78 goto fail_free_mbox_pool;
93 79
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 029bbf461bb2..017729c59a49 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2154,8 +2154,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
2154 int rval; 2154 int rval;
2155 2155
2156 rval = QLA_SUCCESS; 2156 rval = QLA_SUCCESS;
2157 ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, 2157 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2158 mempool_free_slab, srb_cachep);
2159 if (ha->srb_mempool == NULL) { 2158 if (ha->srb_mempool == NULL) {
2160 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); 2159 qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
2161 rval = QLA_FUNCTION_FAILED; 2160 rval = QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ede158d08d9d..8f010a314a3d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void)
1787 sgp->name); 1787 sgp->name);
1788 } 1788 }
1789 1789
1790 sgp->pool = mempool_create(SG_MEMPOOL_SIZE, 1790 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1791 mempool_alloc_slab, mempool_free_slab, 1791 sgp->slab);
1792 sgp->slab);
1793 if (!sgp->pool) { 1792 if (!sgp->pool) {
1794 printk(KERN_ERR "SCSI: can't init sg mempool %s\n", 1793 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1795 sgp->name); 1794 sgp->name);
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c
index 3c987f49f6b4..7a6db1c5c8c5 100644
--- a/drivers/telephony/phonedev.c
+++ b/drivers/telephony/phonedev.c
@@ -29,6 +29,7 @@
29#include <linux/kmod.h> 29#include <linux/kmod.h>
30#include <linux/sem.h> 30#include <linux/sem.h>
31#include <linux/devfs_fs_kernel.h> 31#include <linux/devfs_fs_kernel.h>
32#include <linux/mutex.h>
32 33
33#define PHONE_NUM_DEVICES 256 34#define PHONE_NUM_DEVICES 256
34 35
@@ -37,7 +38,7 @@
37 */ 38 */
38 39
39static struct phone_device *phone_device[PHONE_NUM_DEVICES]; 40static struct phone_device *phone_device[PHONE_NUM_DEVICES];
40static DECLARE_MUTEX(phone_lock); 41static DEFINE_MUTEX(phone_lock);
41 42
42/* 43/*
43 * Open a phone device. 44 * Open a phone device.
@@ -53,14 +54,14 @@ static int phone_open(struct inode *inode, struct file *file)
53 if (minor >= PHONE_NUM_DEVICES) 54 if (minor >= PHONE_NUM_DEVICES)
54 return -ENODEV; 55 return -ENODEV;
55 56
56 down(&phone_lock); 57 mutex_lock(&phone_lock);
57 p = phone_device[minor]; 58 p = phone_device[minor];
58 if (p) 59 if (p)
59 new_fops = fops_get(p->f_op); 60 new_fops = fops_get(p->f_op);
60 if (!new_fops) { 61 if (!new_fops) {
61 up(&phone_lock); 62 mutex_unlock(&phone_lock);
62 request_module("char-major-%d-%d", PHONE_MAJOR, minor); 63 request_module("char-major-%d-%d", PHONE_MAJOR, minor);
63 down(&phone_lock); 64 mutex_lock(&phone_lock);
64 p = phone_device[minor]; 65 p = phone_device[minor];
65 if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL) 66 if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
66 { 67 {
@@ -78,7 +79,7 @@ static int phone_open(struct inode *inode, struct file *file)
78 } 79 }
79 fops_put(old_fops); 80 fops_put(old_fops);
80end: 81end:
81 up(&phone_lock); 82 mutex_unlock(&phone_lock);
82 return err; 83 return err;
83} 84}
84 85
@@ -100,18 +101,18 @@ int phone_register_device(struct phone_device *p, int unit)
100 end = unit + 1; /* enter the loop at least one time */ 101 end = unit + 1; /* enter the loop at least one time */
101 } 102 }
102 103
103 down(&phone_lock); 104 mutex_lock(&phone_lock);
104 for (i = base; i < end; i++) { 105 for (i = base; i < end; i++) {
105 if (phone_device[i] == NULL) { 106 if (phone_device[i] == NULL) {
106 phone_device[i] = p; 107 phone_device[i] = p;
107 p->minor = i; 108 p->minor = i;
108 devfs_mk_cdev(MKDEV(PHONE_MAJOR,i), 109 devfs_mk_cdev(MKDEV(PHONE_MAJOR,i),
109 S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i); 110 S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i);
110 up(&phone_lock); 111 mutex_unlock(&phone_lock);
111 return 0; 112 return 0;
112 } 113 }
113 } 114 }
114 up(&phone_lock); 115 mutex_unlock(&phone_lock);
115 return -ENFILE; 116 return -ENFILE;
116} 117}
117 118
@@ -121,12 +122,12 @@ int phone_register_device(struct phone_device *p, int unit)
121 122
122void phone_unregister_device(struct phone_device *pfd) 123void phone_unregister_device(struct phone_device *pfd)
123{ 124{
124 down(&phone_lock); 125 mutex_lock(&phone_lock);
125 if (phone_device[pfd->minor] != pfd) 126 if (phone_device[pfd->minor] != pfd)
126 panic("phone: bad unregister"); 127 panic("phone: bad unregister");
127 devfs_remove("phone/%d", pfd->minor); 128 devfs_remove("phone/%d", pfd->minor);
128 phone_device[pfd->minor] = NULL; 129 phone_device[pfd->minor] = NULL;
129 up(&phone_lock); 130 mutex_unlock(&phone_lock);
130} 131}
131 132
132 133