diff options
Diffstat (limited to 'drivers')
97 files changed, 7715 insertions, 5496 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 0dd96d1afd39..f28dcb4ec8b3 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
@@ -30,7 +30,7 @@ obj-$(CONFIG_PARPORT) += parport/ | |||
30 | obj-y += base/ block/ misc/ mfd/ net/ media/ | 30 | obj-y += base/ block/ misc/ mfd/ net/ media/ |
31 | obj-$(CONFIG_NUBUS) += nubus/ | 31 | obj-$(CONFIG_NUBUS) += nubus/ |
32 | obj-$(CONFIG_ATM) += atm/ | 32 | obj-$(CONFIG_ATM) += atm/ |
33 | obj-$(CONFIG_PPC_PMAC) += macintosh/ | 33 | obj-y += macintosh/ |
34 | obj-$(CONFIG_IDE) += ide/ | 34 | obj-$(CONFIG_IDE) += ide/ |
35 | obj-$(CONFIG_FC4) += fc4/ | 35 | obj-$(CONFIG_FC4) += fc4/ |
36 | obj-$(CONFIG_SCSI) += scsi/ | 36 | obj-$(CONFIG_SCSI) += scsi/ |
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c index 667fa1dfa1a3..91082ce6f5d1 100644 --- a/drivers/acpi/bay.c +++ b/drivers/acpi/bay.c | |||
@@ -296,7 +296,7 @@ static int bay_add(acpi_handle handle, int id) | |||
296 | /* | 296 | /* |
297 | * Initialize bay device structure | 297 | * Initialize bay device structure |
298 | */ | 298 | */ |
299 | new_bay = kzalloc(GFP_ATOMIC, sizeof(*new_bay)); | 299 | new_bay = kzalloc(sizeof(*new_bay), GFP_ATOMIC); |
300 | INIT_LIST_HEAD(&new_bay->list); | 300 | INIT_LIST_HEAD(&new_bay->list); |
301 | new_bay->handle = handle; | 301 | new_bay->handle = handle; |
302 | new_bay->name = (char *)nbuffer.pointer; | 302 | new_bay->name = (char *)nbuffer.pointer; |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 48616c6fee9d..e2796fb40eb7 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1173,7 +1173,7 @@ static void ahci_host_intr(struct ata_port *ap) | |||
1173 | * dangerous, we need to know more about them. Print | 1173 | * dangerous, we need to know more about them. Print |
1174 | * more of it. | 1174 | * more of it. |
1175 | */ | 1175 | */ |
1176 | const u32 *f = pp->rx_fis + RX_FIS_SDB; | 1176 | const __le32 *f = pp->rx_fis + RX_FIS_SDB; |
1177 | 1177 | ||
1178 | ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ " | 1178 | ata_port_printk(ap, KERN_INFO, "Spurious SDB FIS during NCQ " |
1179 | "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n", | 1179 | "issue=0x%x SAct=0x%x FIS=%08x:%08x%s\n", |
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c index 46d8a94669b4..5f4e82ade6cd 100644 --- a/drivers/ata/sata_svw.c +++ b/drivers/ata/sata_svw.c | |||
@@ -116,7 +116,7 @@ static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) | |||
116 | { | 116 | { |
117 | if (sc_reg > SCR_CONTROL) | 117 | if (sc_reg > SCR_CONTROL) |
118 | return 0xffffffffU; | 118 | return 0xffffffffU; |
119 | return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); | 119 | return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); |
120 | } | 120 | } |
121 | 121 | ||
122 | 122 | ||
@@ -125,7 +125,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, | |||
125 | { | 125 | { |
126 | if (sc_reg > SCR_CONTROL) | 126 | if (sc_reg > SCR_CONTROL) |
127 | return; | 127 | return; |
128 | writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); | 128 | writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); |
129 | } | 129 | } |
130 | 130 | ||
131 | 131 | ||
@@ -262,7 +262,7 @@ static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) | |||
262 | 262 | ||
263 | static u8 k2_stat_check_status(struct ata_port *ap) | 263 | static u8 k2_stat_check_status(struct ata_port *ap) |
264 | { | 264 | { |
265 | return readl((void *) ap->ioaddr.status_addr); | 265 | return readl((void __iomem *) ap->ioaddr.status_addr); |
266 | } | 266 | } |
267 | 267 | ||
268 | #ifdef CONFIG_PPC_OF | 268 | #ifdef CONFIG_PPC_OF |
diff --git a/drivers/char/watchdog/machzwd.c b/drivers/char/watchdog/machzwd.c index 276577d08fba..4d730fdbd528 100644 --- a/drivers/char/watchdog/machzwd.c +++ b/drivers/char/watchdog/machzwd.c | |||
@@ -325,7 +325,7 @@ static int zf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |||
325 | return put_user(0, p); | 325 | return put_user(0, p); |
326 | 326 | ||
327 | case WDIOC_KEEPALIVE: | 327 | case WDIOC_KEEPALIVE: |
328 | zf_ping(0); | 328 | zf_ping(NULL); |
329 | break; | 329 | break; |
330 | 330 | ||
331 | default: | 331 | default: |
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c index 43a68398656f..31ea405f2eeb 100644 --- a/drivers/crypto/geode-aes.c +++ b/drivers/crypto/geode-aes.c | |||
@@ -457,7 +457,7 @@ static struct pci_driver geode_aes_driver = { | |||
457 | static int __init | 457 | static int __init |
458 | geode_aes_init(void) | 458 | geode_aes_init(void) |
459 | { | 459 | { |
460 | return pci_module_init(&geode_aes_driver); | 460 | return pci_register_driver(&geode_aes_driver); |
461 | } | 461 | } |
462 | 462 | ||
463 | static void __exit | 463 | static void __exit |
diff --git a/drivers/hwmon/ams/ams-input.c b/drivers/hwmon/ams/ams-input.c index f126aa485134..18210164e307 100644 --- a/drivers/hwmon/ams/ams-input.c +++ b/drivers/hwmon/ams/ams-input.c | |||
@@ -153,7 +153,7 @@ int ams_input_init(void) | |||
153 | } | 153 | } |
154 | 154 | ||
155 | /* Call with ams_info.lock held! */ | 155 | /* Call with ams_info.lock held! */ |
156 | void ams_input_exit() | 156 | void ams_input_exit(void) |
157 | { | 157 | { |
158 | ams_input_disable(); | 158 | ams_input_disable(); |
159 | device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick); | 159 | device_remove_file(&ams_info.of_dev->dev, &dev_attr_joystick); |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 0a7d1ab60e6d..89e37283c836 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -567,7 +567,7 @@ void iser_rcv_completion(struct iser_desc *rx_desc, | |||
567 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; | 567 | opcode = hdr->opcode & ISCSI_OPCODE_MASK; |
568 | 568 | ||
569 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { | 569 | if (opcode == ISCSI_OP_SCSI_CMD_RSP) { |
570 | itt = hdr->itt & ISCSI_ITT_MASK; /* mask out cid and age bits */ | 570 | itt = get_itt(hdr->itt); /* mask out cid and age bits */ |
571 | if (!(itt < session->cmds_max)) | 571 | if (!(itt < session->cmds_max)) |
572 | iser_err("itt can't be matched to task!!!" | 572 | iser_err("itt can't be matched to task!!!" |
573 | "conn %p opcode %d cmds_max %d itt %d\n", | 573 | "conn %p opcode %d cmds_max %d itt %d\n", |
@@ -625,7 +625,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) | |||
625 | /* this arithmetic is legal by libiscsi dd_data allocation */ | 625 | /* this arithmetic is legal by libiscsi dd_data allocation */ |
626 | mtask = (void *) ((long)(void *)tx_desc - | 626 | mtask = (void *) ((long)(void *)tx_desc - |
627 | sizeof(struct iscsi_mgmt_task)); | 627 | sizeof(struct iscsi_mgmt_task)); |
628 | if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { | 628 | if (mtask->hdr->itt == RESERVED_ITT) { |
629 | struct iscsi_session *session = conn->session; | 629 | struct iscsi_session *session = conn->session; |
630 | 630 | ||
631 | spin_lock(&conn->session->lock); | 631 | spin_lock(&conn->session->lock); |
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index 4358a0a78eaa..c7db4032ef02 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c | |||
@@ -83,7 +83,7 @@ | |||
83 | 83 | ||
84 | 84 | ||
85 | struct ucb1400 { | 85 | struct ucb1400 { |
86 | ac97_t *ac97; | 86 | struct snd_ac97 *ac97; |
87 | struct input_dev *ts_idev; | 87 | struct input_dev *ts_idev; |
88 | 88 | ||
89 | int irq; | 89 | int irq; |
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index b10972ed0c9f..099f0afd394d 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -62,7 +62,7 @@ static struct kvm_stats_debugfs_item { | |||
62 | { "halt_exits", &kvm_stat.halt_exits }, | 62 | { "halt_exits", &kvm_stat.halt_exits }, |
63 | { "request_irq", &kvm_stat.request_irq_exits }, | 63 | { "request_irq", &kvm_stat.request_irq_exits }, |
64 | { "irq_exits", &kvm_stat.irq_exits }, | 64 | { "irq_exits", &kvm_stat.irq_exits }, |
65 | { 0, 0 } | 65 | { NULL, NULL } |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct dentry *debugfs_dir; | 68 | static struct dentry *debugfs_dir; |
@@ -205,7 +205,7 @@ static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot) | |||
205 | mutex_lock(&vcpu->mutex); | 205 | mutex_lock(&vcpu->mutex); |
206 | if (unlikely(!vcpu->vmcs)) { | 206 | if (unlikely(!vcpu->vmcs)) { |
207 | mutex_unlock(&vcpu->mutex); | 207 | mutex_unlock(&vcpu->mutex); |
208 | return 0; | 208 | return NULL; |
209 | } | 209 | } |
210 | return kvm_arch_ops->vcpu_load(vcpu); | 210 | return kvm_arch_ops->vcpu_load(vcpu); |
211 | } | 211 | } |
@@ -257,9 +257,9 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, | |||
257 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) | 257 | if (!dont || free->dirty_bitmap != dont->dirty_bitmap) |
258 | vfree(free->dirty_bitmap); | 258 | vfree(free->dirty_bitmap); |
259 | 259 | ||
260 | free->phys_mem = 0; | 260 | free->phys_mem = NULL; |
261 | free->npages = 0; | 261 | free->npages = 0; |
262 | free->dirty_bitmap = 0; | 262 | free->dirty_bitmap = NULL; |
263 | } | 263 | } |
264 | 264 | ||
265 | static void kvm_free_physmem(struct kvm *kvm) | 265 | static void kvm_free_physmem(struct kvm *kvm) |
@@ -267,7 +267,7 @@ static void kvm_free_physmem(struct kvm *kvm) | |||
267 | int i; | 267 | int i; |
268 | 268 | ||
269 | for (i = 0; i < kvm->nmemslots; ++i) | 269 | for (i = 0; i < kvm->nmemslots; ++i) |
270 | kvm_free_physmem_slot(&kvm->memslots[i], 0); | 270 | kvm_free_physmem_slot(&kvm->memslots[i], NULL); |
271 | } | 271 | } |
272 | 272 | ||
273 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) | 273 | static void kvm_free_vcpu(struct kvm_vcpu *vcpu) |
@@ -640,11 +640,11 @@ raced: | |||
640 | 640 | ||
641 | /* Deallocate if slot is being removed */ | 641 | /* Deallocate if slot is being removed */ |
642 | if (!npages) | 642 | if (!npages) |
643 | new.phys_mem = 0; | 643 | new.phys_mem = NULL; |
644 | 644 | ||
645 | /* Free page dirty bitmap if unneeded */ | 645 | /* Free page dirty bitmap if unneeded */ |
646 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) | 646 | if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) |
647 | new.dirty_bitmap = 0; | 647 | new.dirty_bitmap = NULL; |
648 | 648 | ||
649 | r = -ENOMEM; | 649 | r = -ENOMEM; |
650 | 650 | ||
@@ -799,14 +799,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) | |||
799 | && gfn < memslot->base_gfn + memslot->npages) | 799 | && gfn < memslot->base_gfn + memslot->npages) |
800 | return memslot; | 800 | return memslot; |
801 | } | 801 | } |
802 | return 0; | 802 | return NULL; |
803 | } | 803 | } |
804 | EXPORT_SYMBOL_GPL(gfn_to_memslot); | 804 | EXPORT_SYMBOL_GPL(gfn_to_memslot); |
805 | 805 | ||
806 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) | 806 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn) |
807 | { | 807 | { |
808 | int i; | 808 | int i; |
809 | struct kvm_memory_slot *memslot = 0; | 809 | struct kvm_memory_slot *memslot = NULL; |
810 | unsigned long rel_gfn; | 810 | unsigned long rel_gfn; |
811 | 811 | ||
812 | for (i = 0; i < kvm->nmemslots; ++i) { | 812 | for (i = 0; i < kvm->nmemslots; ++i) { |
@@ -1778,6 +1778,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1778 | unsigned int ioctl, unsigned long arg) | 1778 | unsigned int ioctl, unsigned long arg) |
1779 | { | 1779 | { |
1780 | struct kvm *kvm = filp->private_data; | 1780 | struct kvm *kvm = filp->private_data; |
1781 | void __user *argp = (void __user *)arg; | ||
1781 | int r = -EINVAL; | 1782 | int r = -EINVAL; |
1782 | 1783 | ||
1783 | switch (ioctl) { | 1784 | switch (ioctl) { |
@@ -1794,12 +1795,12 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1794 | struct kvm_run kvm_run; | 1795 | struct kvm_run kvm_run; |
1795 | 1796 | ||
1796 | r = -EFAULT; | 1797 | r = -EFAULT; |
1797 | if (copy_from_user(&kvm_run, (void *)arg, sizeof kvm_run)) | 1798 | if (copy_from_user(&kvm_run, argp, sizeof kvm_run)) |
1798 | goto out; | 1799 | goto out; |
1799 | r = kvm_dev_ioctl_run(kvm, &kvm_run); | 1800 | r = kvm_dev_ioctl_run(kvm, &kvm_run); |
1800 | if (r < 0 && r != -EINTR) | 1801 | if (r < 0 && r != -EINTR) |
1801 | goto out; | 1802 | goto out; |
1802 | if (copy_to_user((void *)arg, &kvm_run, sizeof kvm_run)) { | 1803 | if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) { |
1803 | r = -EFAULT; | 1804 | r = -EFAULT; |
1804 | goto out; | 1805 | goto out; |
1805 | } | 1806 | } |
@@ -1809,13 +1810,13 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1809 | struct kvm_regs kvm_regs; | 1810 | struct kvm_regs kvm_regs; |
1810 | 1811 | ||
1811 | r = -EFAULT; | 1812 | r = -EFAULT; |
1812 | if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) | 1813 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) |
1813 | goto out; | 1814 | goto out; |
1814 | r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs); | 1815 | r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs); |
1815 | if (r) | 1816 | if (r) |
1816 | goto out; | 1817 | goto out; |
1817 | r = -EFAULT; | 1818 | r = -EFAULT; |
1818 | if (copy_to_user((void *)arg, &kvm_regs, sizeof kvm_regs)) | 1819 | if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs)) |
1819 | goto out; | 1820 | goto out; |
1820 | r = 0; | 1821 | r = 0; |
1821 | break; | 1822 | break; |
@@ -1824,7 +1825,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1824 | struct kvm_regs kvm_regs; | 1825 | struct kvm_regs kvm_regs; |
1825 | 1826 | ||
1826 | r = -EFAULT; | 1827 | r = -EFAULT; |
1827 | if (copy_from_user(&kvm_regs, (void *)arg, sizeof kvm_regs)) | 1828 | if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs)) |
1828 | goto out; | 1829 | goto out; |
1829 | r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); | 1830 | r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs); |
1830 | if (r) | 1831 | if (r) |
@@ -1836,13 +1837,13 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1836 | struct kvm_sregs kvm_sregs; | 1837 | struct kvm_sregs kvm_sregs; |
1837 | 1838 | ||
1838 | r = -EFAULT; | 1839 | r = -EFAULT; |
1839 | if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) | 1840 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) |
1840 | goto out; | 1841 | goto out; |
1841 | r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs); | 1842 | r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs); |
1842 | if (r) | 1843 | if (r) |
1843 | goto out; | 1844 | goto out; |
1844 | r = -EFAULT; | 1845 | r = -EFAULT; |
1845 | if (copy_to_user((void *)arg, &kvm_sregs, sizeof kvm_sregs)) | 1846 | if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) |
1846 | goto out; | 1847 | goto out; |
1847 | r = 0; | 1848 | r = 0; |
1848 | break; | 1849 | break; |
@@ -1851,7 +1852,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1851 | struct kvm_sregs kvm_sregs; | 1852 | struct kvm_sregs kvm_sregs; |
1852 | 1853 | ||
1853 | r = -EFAULT; | 1854 | r = -EFAULT; |
1854 | if (copy_from_user(&kvm_sregs, (void *)arg, sizeof kvm_sregs)) | 1855 | if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) |
1855 | goto out; | 1856 | goto out; |
1856 | r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); | 1857 | r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs); |
1857 | if (r) | 1858 | if (r) |
@@ -1863,13 +1864,13 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1863 | struct kvm_translation tr; | 1864 | struct kvm_translation tr; |
1864 | 1865 | ||
1865 | r = -EFAULT; | 1866 | r = -EFAULT; |
1866 | if (copy_from_user(&tr, (void *)arg, sizeof tr)) | 1867 | if (copy_from_user(&tr, argp, sizeof tr)) |
1867 | goto out; | 1868 | goto out; |
1868 | r = kvm_dev_ioctl_translate(kvm, &tr); | 1869 | r = kvm_dev_ioctl_translate(kvm, &tr); |
1869 | if (r) | 1870 | if (r) |
1870 | goto out; | 1871 | goto out; |
1871 | r = -EFAULT; | 1872 | r = -EFAULT; |
1872 | if (copy_to_user((void *)arg, &tr, sizeof tr)) | 1873 | if (copy_to_user(argp, &tr, sizeof tr)) |
1873 | goto out; | 1874 | goto out; |
1874 | r = 0; | 1875 | r = 0; |
1875 | break; | 1876 | break; |
@@ -1878,7 +1879,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1878 | struct kvm_interrupt irq; | 1879 | struct kvm_interrupt irq; |
1879 | 1880 | ||
1880 | r = -EFAULT; | 1881 | r = -EFAULT; |
1881 | if (copy_from_user(&irq, (void *)arg, sizeof irq)) | 1882 | if (copy_from_user(&irq, argp, sizeof irq)) |
1882 | goto out; | 1883 | goto out; |
1883 | r = kvm_dev_ioctl_interrupt(kvm, &irq); | 1884 | r = kvm_dev_ioctl_interrupt(kvm, &irq); |
1884 | if (r) | 1885 | if (r) |
@@ -1890,7 +1891,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1890 | struct kvm_debug_guest dbg; | 1891 | struct kvm_debug_guest dbg; |
1891 | 1892 | ||
1892 | r = -EFAULT; | 1893 | r = -EFAULT; |
1893 | if (copy_from_user(&dbg, (void *)arg, sizeof dbg)) | 1894 | if (copy_from_user(&dbg, argp, sizeof dbg)) |
1894 | goto out; | 1895 | goto out; |
1895 | r = kvm_dev_ioctl_debug_guest(kvm, &dbg); | 1896 | r = kvm_dev_ioctl_debug_guest(kvm, &dbg); |
1896 | if (r) | 1897 | if (r) |
@@ -1902,7 +1903,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1902 | struct kvm_memory_region kvm_mem; | 1903 | struct kvm_memory_region kvm_mem; |
1903 | 1904 | ||
1904 | r = -EFAULT; | 1905 | r = -EFAULT; |
1905 | if (copy_from_user(&kvm_mem, (void *)arg, sizeof kvm_mem)) | 1906 | if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem)) |
1906 | goto out; | 1907 | goto out; |
1907 | r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); | 1908 | r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem); |
1908 | if (r) | 1909 | if (r) |
@@ -1913,7 +1914,7 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1913 | struct kvm_dirty_log log; | 1914 | struct kvm_dirty_log log; |
1914 | 1915 | ||
1915 | r = -EFAULT; | 1916 | r = -EFAULT; |
1916 | if (copy_from_user(&log, (void *)arg, sizeof log)) | 1917 | if (copy_from_user(&log, argp, sizeof log)) |
1917 | goto out; | 1918 | goto out; |
1918 | r = kvm_dev_ioctl_get_dirty_log(kvm, &log); | 1919 | r = kvm_dev_ioctl_get_dirty_log(kvm, &log); |
1919 | if (r) | 1920 | if (r) |
@@ -1921,13 +1922,13 @@ static long kvm_dev_ioctl(struct file *filp, | |||
1921 | break; | 1922 | break; |
1922 | } | 1923 | } |
1923 | case KVM_GET_MSRS: | 1924 | case KVM_GET_MSRS: |
1924 | r = msr_io(kvm, (void __user *)arg, get_msr, 1); | 1925 | r = msr_io(kvm, argp, get_msr, 1); |
1925 | break; | 1926 | break; |
1926 | case KVM_SET_MSRS: | 1927 | case KVM_SET_MSRS: |
1927 | r = msr_io(kvm, (void __user *)arg, do_set_msr, 0); | 1928 | r = msr_io(kvm, argp, do_set_msr, 0); |
1928 | break; | 1929 | break; |
1929 | case KVM_GET_MSR_INDEX_LIST: { | 1930 | case KVM_GET_MSR_INDEX_LIST: { |
1930 | struct kvm_msr_list __user *user_msr_list = (void __user *)arg; | 1931 | struct kvm_msr_list __user *user_msr_list = argp; |
1931 | struct kvm_msr_list msr_list; | 1932 | struct kvm_msr_list msr_list; |
1932 | unsigned n; | 1933 | unsigned n; |
1933 | 1934 | ||
@@ -2014,7 +2015,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val, | |||
2014 | * in vmx root mode. | 2015 | * in vmx root mode. |
2015 | */ | 2016 | */ |
2016 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); | 2017 | printk(KERN_INFO "kvm: exiting hardware virtualization\n"); |
2017 | on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); | 2018 | on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); |
2018 | } | 2019 | } |
2019 | return NOTIFY_OK; | 2020 | return NOTIFY_OK; |
2020 | } | 2021 | } |
@@ -2028,7 +2029,7 @@ static __init void kvm_init_debug(void) | |||
2028 | { | 2029 | { |
2029 | struct kvm_stats_debugfs_item *p; | 2030 | struct kvm_stats_debugfs_item *p; |
2030 | 2031 | ||
2031 | debugfs_dir = debugfs_create_dir("kvm", 0); | 2032 | debugfs_dir = debugfs_create_dir("kvm", NULL); |
2032 | for (p = debugfs_entries; p->name; ++p) | 2033 | for (p = debugfs_entries; p->name; ++p) |
2033 | p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir, | 2034 | p->dentry = debugfs_create_u32(p->name, 0444, debugfs_dir, |
2034 | p->data); | 2035 | p->data); |
@@ -2069,7 +2070,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) | |||
2069 | if (r < 0) | 2070 | if (r < 0) |
2070 | return r; | 2071 | return r; |
2071 | 2072 | ||
2072 | on_each_cpu(kvm_arch_ops->hardware_enable, 0, 0, 1); | 2073 | on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); |
2073 | register_reboot_notifier(&kvm_reboot_notifier); | 2074 | register_reboot_notifier(&kvm_reboot_notifier); |
2074 | 2075 | ||
2075 | kvm_chardev_ops.owner = module; | 2076 | kvm_chardev_ops.owner = module; |
@@ -2084,7 +2085,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) | |||
2084 | 2085 | ||
2085 | out_free: | 2086 | out_free: |
2086 | unregister_reboot_notifier(&kvm_reboot_notifier); | 2087 | unregister_reboot_notifier(&kvm_reboot_notifier); |
2087 | on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); | 2088 | on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); |
2088 | kvm_arch_ops->hardware_unsetup(); | 2089 | kvm_arch_ops->hardware_unsetup(); |
2089 | return r; | 2090 | return r; |
2090 | } | 2091 | } |
@@ -2094,7 +2095,7 @@ void kvm_exit_arch(void) | |||
2094 | misc_deregister(&kvm_dev); | 2095 | misc_deregister(&kvm_dev); |
2095 | 2096 | ||
2096 | unregister_reboot_notifier(&kvm_reboot_notifier); | 2097 | unregister_reboot_notifier(&kvm_reboot_notifier); |
2097 | on_each_cpu(kvm_arch_ops->hardware_disable, 0, 0, 1); | 2098 | on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); |
2098 | kvm_arch_ops->hardware_unsetup(); | 2099 | kvm_arch_ops->hardware_unsetup(); |
2099 | kvm_arch_ops = NULL; | 2100 | kvm_arch_ops = NULL; |
2100 | } | 2101 | } |
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 22c426cd8cb2..be793770f31b 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -333,7 +333,7 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu, | |||
333 | for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) | 333 | for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) |
334 | ; | 334 | ; |
335 | desc->shadow_ptes[i] = desc->shadow_ptes[j]; | 335 | desc->shadow_ptes[i] = desc->shadow_ptes[j]; |
336 | desc->shadow_ptes[j] = 0; | 336 | desc->shadow_ptes[j] = NULL; |
337 | if (j != 0) | 337 | if (j != 0) |
338 | return; | 338 | return; |
339 | if (!prev_desc && !desc->more) | 339 | if (!prev_desc && !desc->more) |
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index c79df79307ed..85f61dd1e936 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c | |||
@@ -274,7 +274,7 @@ static void svm_hardware_disable(void *garbage) | |||
274 | wrmsrl(MSR_VM_HSAVE_PA, 0); | 274 | wrmsrl(MSR_VM_HSAVE_PA, 0); |
275 | rdmsrl(MSR_EFER, efer); | 275 | rdmsrl(MSR_EFER, efer); |
276 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); | 276 | wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK); |
277 | per_cpu(svm_data, raw_smp_processor_id()) = 0; | 277 | per_cpu(svm_data, raw_smp_processor_id()) = NULL; |
278 | __free_page(svm_data->save_area); | 278 | __free_page(svm_data->save_area); |
279 | kfree(svm_data); | 279 | kfree(svm_data); |
280 | } | 280 | } |
@@ -642,7 +642,7 @@ static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) | |||
642 | case VCPU_SREG_LDTR: return &save->ldtr; | 642 | case VCPU_SREG_LDTR: return &save->ldtr; |
643 | } | 643 | } |
644 | BUG(); | 644 | BUG(); |
645 | return 0; | 645 | return NULL; |
646 | } | 646 | } |
647 | 647 | ||
648 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) | 648 | static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
@@ -934,7 +934,7 @@ static int io_get_override(struct kvm_vcpu *vcpu, | |||
934 | return 0; | 934 | return 0; |
935 | 935 | ||
936 | *addr_override = 0; | 936 | *addr_override = 0; |
937 | *seg = 0; | 937 | *seg = NULL; |
938 | for (i = 0; i < ins_length; i++) | 938 | for (i = 0; i < ins_length; i++) |
939 | switch (inst[i]) { | 939 | switch (inst[i]) { |
940 | case 0xf0: | 940 | case 0xf0: |
@@ -1087,7 +1087,7 @@ static int cpuid_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1087 | 1087 | ||
1088 | static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 1088 | static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
1089 | { | 1089 | { |
1090 | if (emulate_instruction(vcpu, 0, 0, 0) != EMULATE_DONE) | 1090 | if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE) |
1091 | printk(KERN_ERR "%s: failed\n", __FUNCTION__); | 1091 | printk(KERN_ERR "%s: failed\n", __FUNCTION__); |
1092 | return 1; | 1092 | return 1; |
1093 | } | 1093 | } |
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index 54c35c0b3181..27e05a77e21a 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c | |||
@@ -98,7 +98,7 @@ static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) | |||
98 | for (i = 0; i < vcpu->nmsrs; ++i) | 98 | for (i = 0; i < vcpu->nmsrs; ++i) |
99 | if (vcpu->guest_msrs[i].index == msr) | 99 | if (vcpu->guest_msrs[i].index == msr) |
100 | return &vcpu->guest_msrs[i]; | 100 | return &vcpu->guest_msrs[i]; |
101 | return 0; | 101 | return NULL; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void vmcs_clear(struct vmcs *vmcs) | 104 | static void vmcs_clear(struct vmcs *vmcs) |
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index a9e747c39791..1a86387e23be 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | 1 | ||
2 | menu "Macintosh device drivers" | 2 | menu "Macintosh device drivers" |
3 | depends on PPC || MAC | 3 | depends on PPC || MAC || X86 |
4 | 4 | ||
5 | config ADB | 5 | config ADB |
6 | bool "Apple Desktop Bus (ADB) support" | 6 | bool "Apple Desktop Bus (ADB) support" |
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 5ed41fe84e57..f83fad2a3ff4 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c | |||
@@ -171,11 +171,11 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm) | |||
171 | /* Make sure dbdma is reset */ | 171 | /* Make sure dbdma is reset */ |
172 | DBDMA_DO_RESET(rm->dma_regs); | 172 | DBDMA_DO_RESET(rm->dma_regs); |
173 | 173 | ||
174 | pr_debug("rackmeter: mark offset=0x%lx\n", | 174 | pr_debug("rackmeter: mark offset=0x%zx\n", |
175 | offsetof(struct rackmeter_dma, mark)); | 175 | offsetof(struct rackmeter_dma, mark)); |
176 | pr_debug("rackmeter: buf1 offset=0x%lx\n", | 176 | pr_debug("rackmeter: buf1 offset=0x%zx\n", |
177 | offsetof(struct rackmeter_dma, buf1)); | 177 | offsetof(struct rackmeter_dma, buf1)); |
178 | pr_debug("rackmeter: buf2 offset=0x%lx\n", | 178 | pr_debug("rackmeter: buf2 offset=0x%zx\n", |
179 | offsetof(struct rackmeter_dma, buf2)); | 179 | offsetof(struct rackmeter_dma, buf2)); |
180 | 180 | ||
181 | /* Prepare 4 dbdma commands for the 2 buffers */ | 181 | /* Prepare 4 dbdma commands for the 2 buffers */ |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 11108165e264..059704fbb753 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1160,6 +1160,22 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1160 | return 0; | 1160 | return 0; |
1161 | } | 1161 | } |
1162 | 1162 | ||
1163 | if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { | ||
1164 | DEFINE_WAIT(__wait); | ||
1165 | /* note that it is safe to do the prepare_to_wait | ||
1166 | * after the test as long as we do it before dropping | ||
1167 | * the spinlock. | ||
1168 | */ | ||
1169 | prepare_to_wait(&bitmap->overflow_wait, &__wait, | ||
1170 | TASK_UNINTERRUPTIBLE); | ||
1171 | spin_unlock_irq(&bitmap->lock); | ||
1172 | bitmap->mddev->queue | ||
1173 | ->unplug_fn(bitmap->mddev->queue); | ||
1174 | schedule(); | ||
1175 | finish_wait(&bitmap->overflow_wait, &__wait); | ||
1176 | continue; | ||
1177 | } | ||
1178 | |||
1163 | switch(*bmc) { | 1179 | switch(*bmc) { |
1164 | case 0: | 1180 | case 0: |
1165 | bitmap_file_set_bit(bitmap, offset); | 1181 | bitmap_file_set_bit(bitmap, offset); |
@@ -1169,7 +1185,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1169 | case 1: | 1185 | case 1: |
1170 | *bmc = 2; | 1186 | *bmc = 2; |
1171 | } | 1187 | } |
1172 | BUG_ON((*bmc & COUNTER_MAX) == COUNTER_MAX); | 1188 | |
1173 | (*bmc)++; | 1189 | (*bmc)++; |
1174 | 1190 | ||
1175 | spin_unlock_irq(&bitmap->lock); | 1191 | spin_unlock_irq(&bitmap->lock); |
@@ -1207,6 +1223,9 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto | |||
1207 | if (!success && ! (*bmc & NEEDED_MASK)) | 1223 | if (!success && ! (*bmc & NEEDED_MASK)) |
1208 | *bmc |= NEEDED_MASK; | 1224 | *bmc |= NEEDED_MASK; |
1209 | 1225 | ||
1226 | if ((*bmc & COUNTER_MAX) == COUNTER_MAX) | ||
1227 | wake_up(&bitmap->overflow_wait); | ||
1228 | |||
1210 | (*bmc)--; | 1229 | (*bmc)--; |
1211 | if (*bmc <= 2) { | 1230 | if (*bmc <= 2) { |
1212 | set_page_attr(bitmap, | 1231 | set_page_attr(bitmap, |
@@ -1431,6 +1450,7 @@ int bitmap_create(mddev_t *mddev) | |||
1431 | spin_lock_init(&bitmap->lock); | 1450 | spin_lock_init(&bitmap->lock); |
1432 | atomic_set(&bitmap->pending_writes, 0); | 1451 | atomic_set(&bitmap->pending_writes, 0); |
1433 | init_waitqueue_head(&bitmap->write_wait); | 1452 | init_waitqueue_head(&bitmap->write_wait); |
1453 | init_waitqueue_head(&bitmap->overflow_wait); | ||
1434 | 1454 | ||
1435 | bitmap->mddev = mddev; | 1455 | bitmap->mddev = mddev; |
1436 | 1456 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 467c16982d02..11c3d7bfa797 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2620,7 +2620,7 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | |||
2620 | } | 2620 | } |
2621 | bi = conf->retry_read_aligned_list; | 2621 | bi = conf->retry_read_aligned_list; |
2622 | if(bi) { | 2622 | if(bi) { |
2623 | conf->retry_read_aligned = bi->bi_next; | 2623 | conf->retry_read_aligned_list = bi->bi_next; |
2624 | bi->bi_next = NULL; | 2624 | bi->bi_next = NULL; |
2625 | bi->bi_phys_segments = 1; /* biased count of active stripes */ | 2625 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
2626 | bi->bi_hw_segments = 0; /* count of processed stripes */ | 2626 | bi->bi_hw_segments = 0; /* count of processed stripes */ |
@@ -2669,6 +2669,27 @@ static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) | |||
2669 | return 0; | 2669 | return 0; |
2670 | } | 2670 | } |
2671 | 2671 | ||
2672 | static int bio_fits_rdev(struct bio *bi) | ||
2673 | { | ||
2674 | request_queue_t *q = bdev_get_queue(bi->bi_bdev); | ||
2675 | |||
2676 | if ((bi->bi_size>>9) > q->max_sectors) | ||
2677 | return 0; | ||
2678 | blk_recount_segments(q, bi); | ||
2679 | if (bi->bi_phys_segments > q->max_phys_segments || | ||
2680 | bi->bi_hw_segments > q->max_hw_segments) | ||
2681 | return 0; | ||
2682 | |||
2683 | if (q->merge_bvec_fn) | ||
2684 | /* it's too hard to apply the merge_bvec_fn at this stage, | ||
2685 | * just just give up | ||
2686 | */ | ||
2687 | return 0; | ||
2688 | |||
2689 | return 1; | ||
2690 | } | ||
2691 | |||
2692 | |||
2672 | static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | 2693 | static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) |
2673 | { | 2694 | { |
2674 | mddev_t *mddev = q->queuedata; | 2695 | mddev_t *mddev = q->queuedata; |
@@ -2715,6 +2736,13 @@ static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) | |||
2715 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); | 2736 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
2716 | align_bi->bi_sector += rdev->data_offset; | 2737 | align_bi->bi_sector += rdev->data_offset; |
2717 | 2738 | ||
2739 | if (!bio_fits_rdev(align_bi)) { | ||
2740 | /* too big in some way */ | ||
2741 | bio_put(align_bi); | ||
2742 | rdev_dec_pending(rdev, mddev); | ||
2743 | return 0; | ||
2744 | } | ||
2745 | |||
2718 | spin_lock_irq(&conf->device_lock); | 2746 | spin_lock_irq(&conf->device_lock); |
2719 | wait_event_lock_irq(conf->wait_for_stripe, | 2747 | wait_event_lock_irq(conf->wait_for_stripe, |
2720 | conf->quiesce == 0, | 2748 | conf->quiesce == 0, |
@@ -3107,7 +3135,9 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3107 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); | 3135 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); |
3108 | 3136 | ||
3109 | for (; logical_sector < last_sector; | 3137 | for (; logical_sector < last_sector; |
3110 | logical_sector += STRIPE_SECTORS, scnt++) { | 3138 | logical_sector += STRIPE_SECTORS, |
3139 | sector += STRIPE_SECTORS, | ||
3140 | scnt++) { | ||
3111 | 3141 | ||
3112 | if (scnt < raid_bio->bi_hw_segments) | 3142 | if (scnt < raid_bio->bi_hw_segments) |
3113 | /* already done this stripe */ | 3143 | /* already done this stripe */ |
@@ -3123,7 +3153,13 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3123 | } | 3153 | } |
3124 | 3154 | ||
3125 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); | 3155 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); |
3126 | add_stripe_bio(sh, raid_bio, dd_idx, 0); | 3156 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
3157 | release_stripe(sh); | ||
3158 | raid_bio->bi_hw_segments = scnt; | ||
3159 | conf->retry_read_aligned = raid_bio; | ||
3160 | return handled; | ||
3161 | } | ||
3162 | |||
3127 | handle_stripe(sh, NULL); | 3163 | handle_stripe(sh, NULL); |
3128 | release_stripe(sh); | 3164 | release_stripe(sh); |
3129 | handled++; | 3165 | handled++; |
diff --git a/drivers/media/common/ir-keymaps.c b/drivers/media/common/ir-keymaps.c index f51e02fe3655..0e948a5c5a03 100644 --- a/drivers/media/common/ir-keymaps.c +++ b/drivers/media/common/ir-keymaps.c | |||
@@ -698,7 +698,6 @@ IR_KEYTAB_TYPE ir_codes_pinnacle_grey[IR_KEYTAB_SIZE] = { | |||
698 | [ 0x29 ] = KEY_TEXT, | 698 | [ 0x29 ] = KEY_TEXT, |
699 | [ 0x2a ] = KEY_MEDIA, | 699 | [ 0x2a ] = KEY_MEDIA, |
700 | [ 0x18 ] = KEY_EPG, | 700 | [ 0x18 ] = KEY_EPG, |
701 | [ 0x27 ] = KEY_RECORD, | ||
702 | }; | 701 | }; |
703 | 702 | ||
704 | EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey); | 703 | EXPORT_SYMBOL_GPL(ir_codes_pinnacle_grey); |
diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c index 7243337b771a..bdd6301d2a47 100644 --- a/drivers/media/video/usbvision/usbvision-video.c +++ b/drivers/media/video/usbvision/usbvision-video.c | |||
@@ -1072,7 +1072,7 @@ static int usbvision_v4l2_ioctl(struct inode *inode, struct file *file, | |||
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | 1074 | ||
1075 | static ssize_t usbvision_v4l2_read(struct file *file, char *buf, | 1075 | static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf, |
1076 | size_t count, loff_t *ppos) | 1076 | size_t count, loff_t *ppos) |
1077 | { | 1077 | { |
1078 | struct video_device *dev = video_devdata(file); | 1078 | struct video_device *dev = video_devdata(file); |
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 89bba277da5f..bedae4ad3f74 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -42,7 +42,7 @@ config SGI_IOC4 | |||
42 | 42 | ||
43 | config TIFM_CORE | 43 | config TIFM_CORE |
44 | tristate "TI Flash Media interface support (EXPERIMENTAL)" | 44 | tristate "TI Flash Media interface support (EXPERIMENTAL)" |
45 | depends on EXPERIMENTAL | 45 | depends on EXPERIMENTAL && PCI |
46 | help | 46 | help |
47 | If you want support for Texas Instruments(R) Flash Media adapters | 47 | If you want support for Texas Instruments(R) Flash Media adapters |
48 | you should select this option and then also choose an appropriate | 48 | you should select this option and then also choose an appropriate |
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index db9d7df75ae0..552b7957a92a 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c | |||
@@ -108,8 +108,8 @@ static struct jprobe lkdtm; | |||
108 | static int lkdtm_parse_commandline(void); | 108 | static int lkdtm_parse_commandline(void); |
109 | static void lkdtm_handler(void); | 109 | static void lkdtm_handler(void); |
110 | 110 | ||
111 | static char* cpoint_name = INVALID; | 111 | static char* cpoint_name; |
112 | static char* cpoint_type = NONE; | 112 | static char* cpoint_type; |
113 | static int cpoint_count = DEFAULT_COUNT; | 113 | static int cpoint_count = DEFAULT_COUNT; |
114 | static int recur_count = REC_NUM_DEFAULT; | 114 | static int recur_count = REC_NUM_DEFAULT; |
115 | 115 | ||
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig index 4224686fdf2a..12af9c718764 100644 --- a/drivers/mmc/Kconfig +++ b/drivers/mmc/Kconfig | |||
@@ -111,7 +111,7 @@ config MMC_IMX | |||
111 | 111 | ||
112 | config MMC_TIFM_SD | 112 | config MMC_TIFM_SD |
113 | tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" | 113 | tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" |
114 | depends on MMC && EXPERIMENTAL | 114 | depends on MMC && EXPERIMENTAL && PCI |
115 | select TIFM_CORE | 115 | select TIFM_CORE |
116 | help | 116 | help |
117 | Say Y here if you want to be able to access MMC/SD cards with | 117 | Say Y here if you want to be able to access MMC/SD cards with |
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index 7e34c4f07b70..bc7e906571d3 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c | |||
@@ -600,8 +600,7 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring | |||
600 | count -= semi_count; | 600 | count -= semi_count; |
601 | memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); | 601 | memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count); |
602 | } else { | 602 | } else { |
603 | /* Packet is in one chunk -- we can copy + cksum. */ | 603 | memcpy_fromio(skb->data, base + ring_offset, count); |
604 | eth_io_copy_and_sum(skb, base + ring_offset, count, 0); | ||
605 | } | 604 | } |
606 | return; | 605 | return; |
607 | } | 606 | } |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ad92b6a76ee6..38f41a593b12 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2335,6 +2335,17 @@ config QLA3XXX | |||
2335 | To compile this driver as a module, choose M here: the module | 2335 | To compile this driver as a module, choose M here: the module |
2336 | will be called qla3xxx. | 2336 | will be called qla3xxx. |
2337 | 2337 | ||
2338 | config ATL1 | ||
2339 | tristate "Attansic L1 Gigabit Ethernet support (EXPERIMENTAL)" | ||
2340 | depends on NET_PCI && PCI && EXPERIMENTAL | ||
2341 | select CRC32 | ||
2342 | select MII | ||
2343 | help | ||
2344 | This driver supports the Attansic L1 gigabit ethernet adapter. | ||
2345 | |||
2346 | To compile this driver as a module, choose M here. The module | ||
2347 | will be called atl1. | ||
2348 | |||
2338 | endmenu | 2349 | endmenu |
2339 | 2350 | ||
2340 | # | 2351 | # |
@@ -2534,7 +2545,7 @@ config RIONET_RX_SIZE | |||
2534 | 2545 | ||
2535 | config FDDI | 2546 | config FDDI |
2536 | bool "FDDI driver support" | 2547 | bool "FDDI driver support" |
2537 | depends on (PCI || EISA) | 2548 | depends on (PCI || EISA || TC) |
2538 | help | 2549 | help |
2539 | Fiber Distributed Data Interface is a high speed local area network | 2550 | Fiber Distributed Data Interface is a high speed local area network |
2540 | design; essentially a replacement for high speed Ethernet. FDDI can | 2551 | design; essentially a replacement for high speed Ethernet. FDDI can |
@@ -2544,11 +2555,31 @@ config FDDI | |||
2544 | will say N. | 2555 | will say N. |
2545 | 2556 | ||
2546 | config DEFXX | 2557 | config DEFXX |
2547 | tristate "Digital DEFEA and DEFPA adapter support" | 2558 | tristate "Digital DEFTA/DEFEA/DEFPA adapter support" |
2548 | depends on FDDI && (PCI || EISA) | 2559 | depends on FDDI && (PCI || EISA || TC) |
2549 | help | 2560 | ---help--- |
2550 | This is support for the DIGITAL series of EISA (DEFEA) and PCI | 2561 | This is support for the DIGITAL series of TURBOchannel (DEFTA), |
2551 | (DEFPA) controllers which can connect you to a local FDDI network. | 2562 | EISA (DEFEA) and PCI (DEFPA) controllers which can connect you |
2563 | to a local FDDI network. | ||
2564 | |||
2565 | To compile this driver as a module, choose M here: the module | ||
2566 | will be called defxx. If unsure, say N. | ||
2567 | |||
2568 | config DEFXX_MMIO | ||
2569 | bool | ||
2570 | prompt "Use MMIO instead of PIO" if PCI || EISA | ||
2571 | depends on DEFXX | ||
2572 | default n if PCI || EISA | ||
2573 | default y | ||
2574 | ---help--- | ||
2575 | This instructs the driver to use EISA or PCI memory-mapped I/O | ||
2576 | (MMIO) as appropriate instead of programmed I/O ports (PIO). | ||
2577 | Enabling this gives an improvement in processing time in parts | ||
2578 | of the driver, but it may cause problems with EISA (DEFEA) | ||
2579 | adapters. TURBOchannel does not have the concept of I/O ports, | ||
2580 | so MMIO is always used for these (DEFTA) adapters. | ||
2581 | |||
2582 | If unsure, say N. | ||
2552 | 2583 | ||
2553 | config SKFP | 2584 | config SKFP |
2554 | tristate "SysKonnect FDDI PCI support" | 2585 | tristate "SysKonnect FDDI PCI support" |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 0878e3df5174..33af833667da 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -9,6 +9,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/ | |||
9 | obj-$(CONFIG_CHELSIO_T3) += cxgb3/ | 9 | obj-$(CONFIG_CHELSIO_T3) += cxgb3/ |
10 | obj-$(CONFIG_EHEA) += ehea/ | 10 | obj-$(CONFIG_EHEA) += ehea/ |
11 | obj-$(CONFIG_BONDING) += bonding/ | 11 | obj-$(CONFIG_BONDING) += bonding/ |
12 | obj-$(CONFIG_ATL1) += atl1/ | ||
12 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o | 13 | obj-$(CONFIG_GIANFAR) += gianfar_driver.o |
13 | 14 | ||
14 | gianfar_driver-objs := gianfar.o \ | 15 | gianfar_driver-objs := gianfar.o \ |
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c index c01f87f5bed7..644c408515df 100644 --- a/drivers/net/ac3200.c +++ b/drivers/net/ac3200.c | |||
@@ -327,8 +327,7 @@ static void ac_block_input(struct net_device *dev, int count, struct sk_buff *sk | |||
327 | memcpy_fromio(skb->data + semi_count, | 327 | memcpy_fromio(skb->data + semi_count, |
328 | ei_status.mem + TX_PAGES*256, count); | 328 | ei_status.mem + TX_PAGES*256, count); |
329 | } else { | 329 | } else { |
330 | /* Packet is in one chunk -- we can copy + cksum. */ | 330 | memcpy_fromio(skb->data, start, count); |
331 | eth_io_copy_and_sum(skb, start, count, 0); | ||
332 | } | 331 | } |
333 | } | 332 | } |
334 | 333 | ||
diff --git a/drivers/net/atl1/Makefile b/drivers/net/atl1/Makefile new file mode 100644 index 000000000000..a6b707e4e69e --- /dev/null +++ b/drivers/net/atl1/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-$(CONFIG_ATL1) += atl1.o | ||
2 | atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o | ||
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h new file mode 100644 index 000000000000..b1c6034e68fa --- /dev/null +++ b/drivers/net/atl1/atl1.h | |||
@@ -0,0 +1,283 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #ifndef _ATL1_H_ | ||
25 | #define _ATL1_H_ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/if_vlan.h> | ||
29 | |||
30 | #include "atl1_hw.h" | ||
31 | |||
32 | /* function prototypes needed by multiple files */ | ||
33 | s32 atl1_up(struct atl1_adapter *adapter); | ||
34 | void atl1_down(struct atl1_adapter *adapter); | ||
35 | int atl1_reset(struct atl1_adapter *adapter); | ||
36 | s32 atl1_setup_ring_resources(struct atl1_adapter *adapter); | ||
37 | void atl1_free_ring_resources(struct atl1_adapter *adapter); | ||
38 | |||
39 | extern char atl1_driver_name[]; | ||
40 | extern char atl1_driver_version[]; | ||
41 | extern const struct ethtool_ops atl1_ethtool_ops; | ||
42 | |||
43 | struct atl1_adapter; | ||
44 | |||
45 | #define ATL1_MAX_INTR 3 | ||
46 | |||
47 | #define ATL1_DEFAULT_TPD 256 | ||
48 | #define ATL1_MAX_TPD 1024 | ||
49 | #define ATL1_MIN_TPD 64 | ||
50 | #define ATL1_DEFAULT_RFD 512 | ||
51 | #define ATL1_MIN_RFD 128 | ||
52 | #define ATL1_MAX_RFD 2048 | ||
53 | |||
54 | #define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) | ||
55 | #define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc) | ||
56 | #define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc) | ||
57 | #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) | ||
58 | |||
59 | /* | ||
60 | * Some workarounds require millisecond delays and are run during interrupt | ||
61 | * context. Most notably, when establishing link, the phy may need tweaking | ||
62 | * but cannot process phy register reads/writes faster than millisecond | ||
63 | * intervals...and we establish link due to a "link status change" interrupt. | ||
64 | */ | ||
65 | |||
66 | /* | ||
67 | * wrapper around a pointer to a socket buffer, | ||
68 | * so a DMA handle can be stored along with the buffer | ||
69 | */ | ||
70 | struct atl1_buffer { | ||
71 | struct sk_buff *skb; | ||
72 | u16 length; | ||
73 | u16 alloced; | ||
74 | dma_addr_t dma; | ||
75 | }; | ||
76 | |||
77 | #define MAX_TX_BUF_LEN 0x3000 /* 12KB */ | ||
78 | |||
79 | struct atl1_tpd_ring { | ||
80 | void *desc; /* pointer to the descriptor ring memory */ | ||
81 | dma_addr_t dma; /* physical adress of the descriptor ring */ | ||
82 | u16 size; /* length of descriptor ring in bytes */ | ||
83 | u16 count; /* number of descriptors in the ring */ | ||
84 | u16 hw_idx; /* hardware index */ | ||
85 | atomic_t next_to_clean; | ||
86 | atomic_t next_to_use; | ||
87 | struct atl1_buffer *buffer_info; | ||
88 | }; | ||
89 | |||
90 | struct atl1_rfd_ring { | ||
91 | void *desc; | ||
92 | dma_addr_t dma; | ||
93 | u16 size; | ||
94 | u16 count; | ||
95 | atomic_t next_to_use; | ||
96 | u16 next_to_clean; | ||
97 | struct atl1_buffer *buffer_info; | ||
98 | }; | ||
99 | |||
100 | struct atl1_rrd_ring { | ||
101 | void *desc; | ||
102 | dma_addr_t dma; | ||
103 | unsigned int size; | ||
104 | u16 count; | ||
105 | u16 next_to_use; | ||
106 | atomic_t next_to_clean; | ||
107 | }; | ||
108 | |||
109 | struct atl1_ring_header { | ||
110 | void *desc; /* pointer to the descriptor ring memory */ | ||
111 | dma_addr_t dma; /* physical adress of the descriptor ring */ | ||
112 | unsigned int size; /* length of descriptor ring in bytes */ | ||
113 | }; | ||
114 | |||
115 | struct atl1_cmb { | ||
116 | struct coals_msg_block *cmb; | ||
117 | dma_addr_t dma; | ||
118 | }; | ||
119 | |||
120 | struct atl1_smb { | ||
121 | struct stats_msg_block *smb; | ||
122 | dma_addr_t dma; | ||
123 | }; | ||
124 | |||
125 | /* Statistics counters */ | ||
126 | struct atl1_sft_stats { | ||
127 | u64 rx_packets; | ||
128 | u64 tx_packets; | ||
129 | u64 rx_bytes; | ||
130 | u64 tx_bytes; | ||
131 | u64 multicast; | ||
132 | u64 collisions; | ||
133 | u64 rx_errors; | ||
134 | u64 rx_length_errors; | ||
135 | u64 rx_crc_errors; | ||
136 | u64 rx_frame_errors; | ||
137 | u64 rx_fifo_errors; | ||
138 | u64 rx_missed_errors; | ||
139 | u64 tx_errors; | ||
140 | u64 tx_fifo_errors; | ||
141 | u64 tx_aborted_errors; | ||
142 | u64 tx_window_errors; | ||
143 | u64 tx_carrier_errors; | ||
144 | |||
145 | u64 tx_pause; /* num Pause packet transmitted. */ | ||
146 | u64 excecol; /* num tx packets aborted due to excessive collisions. */ | ||
147 | u64 deffer; /* num deferred tx packets */ | ||
148 | u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */ | ||
149 | u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */ | ||
150 | u64 latecol; /* num tx packets w/ late collisions. */ | ||
151 | u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ | ||
152 | u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */ | ||
153 | u64 rx_pause; /* num Pause packets received. */ | ||
154 | u64 rx_rrd_ov; | ||
155 | u64 rx_trunc; | ||
156 | }; | ||
157 | |||
158 | /* board specific private data structure */ | ||
159 | #define ATL1_REGS_LEN 8 | ||
160 | |||
161 | /* Structure containing variables used by the shared code */ | ||
162 | struct atl1_hw { | ||
163 | u8 __iomem *hw_addr; | ||
164 | struct atl1_adapter *back; | ||
165 | enum atl1_dma_order dma_ord; | ||
166 | enum atl1_dma_rcb rcb_value; | ||
167 | enum atl1_dma_req_block dmar_block; | ||
168 | enum atl1_dma_req_block dmaw_block; | ||
169 | u8 preamble_len; | ||
170 | u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */ | ||
171 | u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */ | ||
172 | u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */ | ||
173 | u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */ | ||
174 | u8 ipgr1; /* 64bit Carrier-Sense window */ | ||
175 | u8 ipgr2; /* 96-bit IPG window */ | ||
176 | u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */ | ||
177 | u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */ | ||
178 | u8 rfd_fetch_gap; | ||
179 | u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */ | ||
180 | u8 tpd_fetch_th; | ||
181 | u8 tpd_fetch_gap; | ||
182 | u16 tx_jumbo_task_th; | ||
183 | u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is | ||
184 | 8 bytes long */ | ||
185 | u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */ | ||
186 | u16 rx_jumbo_lkah; | ||
187 | u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */ | ||
188 | u16 lcol; /* Collision Window */ | ||
189 | |||
190 | u16 cmb_tpd; | ||
191 | u16 cmb_rrd; | ||
192 | u16 cmb_rx_timer; | ||
193 | u16 cmb_tx_timer; | ||
194 | u32 smb_timer; | ||
195 | u16 media_type; | ||
196 | u16 autoneg_advertised; | ||
197 | u16 pci_cmd_word; | ||
198 | |||
199 | u16 mii_autoneg_adv_reg; | ||
200 | u16 mii_1000t_ctrl_reg; | ||
201 | |||
202 | u32 mem_rang; | ||
203 | u32 txcw; | ||
204 | u32 max_frame_size; | ||
205 | u32 min_frame_size; | ||
206 | u32 mc_filter_type; | ||
207 | u32 num_mc_addrs; | ||
208 | u32 collision_delta; | ||
209 | u32 tx_packet_delta; | ||
210 | u16 phy_spd_default; | ||
211 | |||
212 | u16 dev_rev; | ||
213 | u8 revision_id; | ||
214 | |||
215 | /* spi flash */ | ||
216 | u8 flash_vendor; | ||
217 | |||
218 | u8 dma_fairness; | ||
219 | u8 mac_addr[ETH_ALEN]; | ||
220 | u8 perm_mac_addr[ETH_ALEN]; | ||
221 | |||
222 | /* bool phy_preamble_sup; */ | ||
223 | bool phy_configured; | ||
224 | }; | ||
225 | |||
226 | struct atl1_adapter { | ||
227 | /* OS defined structs */ | ||
228 | struct net_device *netdev; | ||
229 | struct pci_dev *pdev; | ||
230 | struct net_device_stats net_stats; | ||
231 | struct atl1_sft_stats soft_stats; | ||
232 | |||
233 | struct vlan_group *vlgrp; | ||
234 | u32 rx_buffer_len; | ||
235 | u32 wol; | ||
236 | u16 link_speed; | ||
237 | u16 link_duplex; | ||
238 | spinlock_t lock; | ||
239 | atomic_t irq_sem; | ||
240 | struct work_struct tx_timeout_task; | ||
241 | struct work_struct link_chg_task; | ||
242 | struct work_struct pcie_dma_to_rst_task; | ||
243 | struct timer_list watchdog_timer; | ||
244 | struct timer_list phy_config_timer; | ||
245 | bool phy_timer_pending; | ||
246 | |||
247 | bool mac_disabled; | ||
248 | |||
249 | /* All descriptor rings' memory */ | ||
250 | struct atl1_ring_header ring_header; | ||
251 | |||
252 | /* TX */ | ||
253 | struct atl1_tpd_ring tpd_ring; | ||
254 | spinlock_t mb_lock; | ||
255 | |||
256 | /* RX */ | ||
257 | struct atl1_rfd_ring rfd_ring; | ||
258 | struct atl1_rrd_ring rrd_ring; | ||
259 | u64 hw_csum_err; | ||
260 | u64 hw_csum_good; | ||
261 | |||
262 | u32 gorcl; | ||
263 | u64 gorcl_old; | ||
264 | |||
265 | /* Interrupt Moderator timer ( 2us resolution) */ | ||
266 | u16 imt; | ||
267 | /* Interrupt Clear timer (2us resolution) */ | ||
268 | u16 ict; | ||
269 | |||
270 | /* MII interface info */ | ||
271 | struct mii_if_info mii; | ||
272 | |||
273 | /* structs defined in atl1_hw.h */ | ||
274 | u32 bd_number; /* board number */ | ||
275 | bool pci_using_64; | ||
276 | struct atl1_hw hw; | ||
277 | struct atl1_smb smb; | ||
278 | struct atl1_cmb cmb; | ||
279 | |||
280 | u32 pci_state[16]; | ||
281 | }; | ||
282 | |||
283 | #endif /* _ATL1_H_ */ | ||
diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c new file mode 100644 index 000000000000..c11c27798e5c --- /dev/null +++ b/drivers/net/atl1/atl1_ethtool.c | |||
@@ -0,0 +1,508 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/ethtool.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/mii.h> | ||
29 | #include <asm/uaccess.h> | ||
30 | |||
31 | #include "atl1.h" | ||
32 | |||
33 | struct atl1_stats { | ||
34 | char stat_string[ETH_GSTRING_LEN]; | ||
35 | int sizeof_stat; | ||
36 | int stat_offset; | ||
37 | }; | ||
38 | |||
39 | #define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \ | ||
40 | offsetof(struct atl1_adapter, m) | ||
41 | |||
42 | static struct atl1_stats atl1_gstrings_stats[] = { | ||
43 | {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, | ||
44 | {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, | ||
45 | {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, | ||
46 | {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, | ||
47 | {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, | ||
48 | {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, | ||
49 | {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)}, | ||
50 | {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)}, | ||
51 | {"multicast", ATL1_STAT(soft_stats.multicast)}, | ||
52 | {"collisions", ATL1_STAT(soft_stats.collisions)}, | ||
53 | {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, | ||
54 | {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, | ||
55 | {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, | ||
56 | {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, | ||
57 | {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, | ||
58 | {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, | ||
59 | {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, | ||
60 | {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, | ||
61 | {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, | ||
62 | {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, | ||
63 | {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, | ||
64 | {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, | ||
65 | {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, | ||
66 | {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, | ||
67 | {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, | ||
68 | {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, | ||
69 | {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, | ||
70 | {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, | ||
71 | {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, | ||
72 | {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, | ||
73 | {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} | ||
74 | }; | ||
75 | |||
76 | static void atl1_get_ethtool_stats(struct net_device *netdev, | ||
77 | struct ethtool_stats *stats, u64 *data) | ||
78 | { | ||
79 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
80 | int i; | ||
81 | char *p; | ||
82 | |||
83 | for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { | ||
84 | p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; | ||
85 | data[i] = (atl1_gstrings_stats[i].sizeof_stat == | ||
86 | sizeof(u64)) ? *(u64 *)p : *(u32 *)p; | ||
87 | } | ||
88 | |||
89 | } | ||
90 | |||
91 | static int atl1_get_stats_count(struct net_device *netdev) | ||
92 | { | ||
93 | return ARRAY_SIZE(atl1_gstrings_stats); | ||
94 | } | ||
95 | |||
96 | static int atl1_get_settings(struct net_device *netdev, | ||
97 | struct ethtool_cmd *ecmd) | ||
98 | { | ||
99 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
100 | struct atl1_hw *hw = &adapter->hw; | ||
101 | |||
102 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
103 | SUPPORTED_10baseT_Full | | ||
104 | SUPPORTED_100baseT_Half | | ||
105 | SUPPORTED_100baseT_Full | | ||
106 | SUPPORTED_1000baseT_Full | | ||
107 | SUPPORTED_Autoneg | SUPPORTED_TP); | ||
108 | ecmd->advertising = ADVERTISED_TP; | ||
109 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
110 | hw->media_type == MEDIA_TYPE_1000M_FULL) { | ||
111 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
112 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { | ||
113 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
114 | ecmd->advertising |= | ||
115 | (ADVERTISED_10baseT_Half | | ||
116 | ADVERTISED_10baseT_Full | | ||
117 | ADVERTISED_100baseT_Half | | ||
118 | ADVERTISED_100baseT_Full | | ||
119 | ADVERTISED_1000baseT_Full); | ||
120 | } | ||
121 | else | ||
122 | ecmd->advertising |= (ADVERTISED_1000baseT_Full); | ||
123 | } | ||
124 | ecmd->port = PORT_TP; | ||
125 | ecmd->phy_address = 0; | ||
126 | ecmd->transceiver = XCVR_INTERNAL; | ||
127 | |||
128 | if (netif_carrier_ok(adapter->netdev)) { | ||
129 | u16 link_speed, link_duplex; | ||
130 | atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); | ||
131 | ecmd->speed = link_speed; | ||
132 | if (link_duplex == FULL_DUPLEX) | ||
133 | ecmd->duplex = DUPLEX_FULL; | ||
134 | else | ||
135 | ecmd->duplex = DUPLEX_HALF; | ||
136 | } else { | ||
137 | ecmd->speed = -1; | ||
138 | ecmd->duplex = -1; | ||
139 | } | ||
140 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
141 | hw->media_type == MEDIA_TYPE_1000M_FULL) | ||
142 | ecmd->autoneg = AUTONEG_ENABLE; | ||
143 | else | ||
144 | ecmd->autoneg = AUTONEG_DISABLE; | ||
145 | |||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static int atl1_set_settings(struct net_device *netdev, | ||
150 | struct ethtool_cmd *ecmd) | ||
151 | { | ||
152 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
153 | struct atl1_hw *hw = &adapter->hw; | ||
154 | u16 phy_data; | ||
155 | int ret_val = 0; | ||
156 | u16 old_media_type = hw->media_type; | ||
157 | |||
158 | if (netif_running(adapter->netdev)) { | ||
159 | printk(KERN_DEBUG "%s: ethtool shutting down adapter\n", | ||
160 | atl1_driver_name); | ||
161 | atl1_down(adapter); | ||
162 | } | ||
163 | |||
164 | if (ecmd->autoneg == AUTONEG_ENABLE) | ||
165 | hw->media_type = MEDIA_TYPE_AUTO_SENSOR; | ||
166 | else { | ||
167 | if (ecmd->speed == SPEED_1000) { | ||
168 | if (ecmd->duplex != DUPLEX_FULL) { | ||
169 | printk(KERN_WARNING | ||
170 | "%s: can't force to 1000M half duplex\n", | ||
171 | atl1_driver_name); | ||
172 | ret_val = -EINVAL; | ||
173 | goto exit_sset; | ||
174 | } | ||
175 | hw->media_type = MEDIA_TYPE_1000M_FULL; | ||
176 | } else if (ecmd->speed == SPEED_100) { | ||
177 | if (ecmd->duplex == DUPLEX_FULL) { | ||
178 | hw->media_type = MEDIA_TYPE_100M_FULL; | ||
179 | } else | ||
180 | hw->media_type = MEDIA_TYPE_100M_HALF; | ||
181 | } else { | ||
182 | if (ecmd->duplex == DUPLEX_FULL) | ||
183 | hw->media_type = MEDIA_TYPE_10M_FULL; | ||
184 | else | ||
185 | hw->media_type = MEDIA_TYPE_10M_HALF; | ||
186 | } | ||
187 | } | ||
188 | switch (hw->media_type) { | ||
189 | case MEDIA_TYPE_AUTO_SENSOR: | ||
190 | ecmd->advertising = | ||
191 | ADVERTISED_10baseT_Half | | ||
192 | ADVERTISED_10baseT_Full | | ||
193 | ADVERTISED_100baseT_Half | | ||
194 | ADVERTISED_100baseT_Full | | ||
195 | ADVERTISED_1000baseT_Full | | ||
196 | ADVERTISED_Autoneg | ADVERTISED_TP; | ||
197 | break; | ||
198 | case MEDIA_TYPE_1000M_FULL: | ||
199 | ecmd->advertising = | ||
200 | ADVERTISED_1000baseT_Full | | ||
201 | ADVERTISED_Autoneg | ADVERTISED_TP; | ||
202 | break; | ||
203 | default: | ||
204 | ecmd->advertising = 0; | ||
205 | break; | ||
206 | } | ||
207 | if (atl1_phy_setup_autoneg_adv(hw)) { | ||
208 | ret_val = -EINVAL; | ||
209 | printk(KERN_WARNING | ||
210 | "%s: invalid ethtool speed/duplex setting\n", | ||
211 | atl1_driver_name); | ||
212 | goto exit_sset; | ||
213 | } | ||
214 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
215 | hw->media_type == MEDIA_TYPE_1000M_FULL) | ||
216 | phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; | ||
217 | else { | ||
218 | switch (hw->media_type) { | ||
219 | case MEDIA_TYPE_100M_FULL: | ||
220 | phy_data = | ||
221 | MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | | ||
222 | MII_CR_RESET; | ||
223 | break; | ||
224 | case MEDIA_TYPE_100M_HALF: | ||
225 | phy_data = MII_CR_SPEED_100 | MII_CR_RESET; | ||
226 | break; | ||
227 | case MEDIA_TYPE_10M_FULL: | ||
228 | phy_data = | ||
229 | MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; | ||
230 | break; | ||
231 | default: /* MEDIA_TYPE_10M_HALF: */ | ||
232 | phy_data = MII_CR_SPEED_10 | MII_CR_RESET; | ||
233 | break; | ||
234 | } | ||
235 | } | ||
236 | atl1_write_phy_reg(hw, MII_BMCR, phy_data); | ||
237 | exit_sset: | ||
238 | if (ret_val) | ||
239 | hw->media_type = old_media_type; | ||
240 | |||
241 | if (netif_running(adapter->netdev)) { | ||
242 | printk(KERN_DEBUG "%s: ethtool starting adapter\n", | ||
243 | atl1_driver_name); | ||
244 | atl1_up(adapter); | ||
245 | } else if (!ret_val) { | ||
246 | printk(KERN_DEBUG "%s: ethtool resetting adapter\n", | ||
247 | atl1_driver_name); | ||
248 | atl1_reset(adapter); | ||
249 | } | ||
250 | return ret_val; | ||
251 | } | ||
252 | |||
253 | static void atl1_get_drvinfo(struct net_device *netdev, | ||
254 | struct ethtool_drvinfo *drvinfo) | ||
255 | { | ||
256 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
257 | |||
258 | strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver)); | ||
259 | strncpy(drvinfo->version, atl1_driver_version, | ||
260 | sizeof(drvinfo->version)); | ||
261 | strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); | ||
262 | strncpy(drvinfo->bus_info, pci_name(adapter->pdev), | ||
263 | sizeof(drvinfo->bus_info)); | ||
264 | drvinfo->eedump_len = ATL1_EEDUMP_LEN; | ||
265 | } | ||
266 | |||
267 | static void atl1_get_wol(struct net_device *netdev, | ||
268 | struct ethtool_wolinfo *wol) | ||
269 | { | ||
270 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
271 | |||
272 | wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; | ||
273 | wol->wolopts = 0; | ||
274 | if (adapter->wol & ATL1_WUFC_EX) | ||
275 | wol->wolopts |= WAKE_UCAST; | ||
276 | if (adapter->wol & ATL1_WUFC_MC) | ||
277 | wol->wolopts |= WAKE_MCAST; | ||
278 | if (adapter->wol & ATL1_WUFC_BC) | ||
279 | wol->wolopts |= WAKE_BCAST; | ||
280 | if (adapter->wol & ATL1_WUFC_MAG) | ||
281 | wol->wolopts |= WAKE_MAGIC; | ||
282 | return; | ||
283 | } | ||
284 | |||
285 | static int atl1_set_wol(struct net_device *netdev, | ||
286 | struct ethtool_wolinfo *wol) | ||
287 | { | ||
288 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
289 | |||
290 | if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) | ||
291 | return -EOPNOTSUPP; | ||
292 | adapter->wol = 0; | ||
293 | if (wol->wolopts & WAKE_UCAST) | ||
294 | adapter->wol |= ATL1_WUFC_EX; | ||
295 | if (wol->wolopts & WAKE_MCAST) | ||
296 | adapter->wol |= ATL1_WUFC_MC; | ||
297 | if (wol->wolopts & WAKE_BCAST) | ||
298 | adapter->wol |= ATL1_WUFC_BC; | ||
299 | if (wol->wolopts & WAKE_MAGIC) | ||
300 | adapter->wol |= ATL1_WUFC_MAG; | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static void atl1_get_ringparam(struct net_device *netdev, | ||
305 | struct ethtool_ringparam *ring) | ||
306 | { | ||
307 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
308 | struct atl1_tpd_ring *txdr = &adapter->tpd_ring; | ||
309 | struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; | ||
310 | |||
311 | ring->rx_max_pending = ATL1_MAX_RFD; | ||
312 | ring->tx_max_pending = ATL1_MAX_TPD; | ||
313 | ring->rx_mini_max_pending = 0; | ||
314 | ring->rx_jumbo_max_pending = 0; | ||
315 | ring->rx_pending = rxdr->count; | ||
316 | ring->tx_pending = txdr->count; | ||
317 | ring->rx_mini_pending = 0; | ||
318 | ring->rx_jumbo_pending = 0; | ||
319 | } | ||
320 | |||
321 | static int atl1_set_ringparam(struct net_device *netdev, | ||
322 | struct ethtool_ringparam *ring) | ||
323 | { | ||
324 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
325 | struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; | ||
326 | struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; | ||
327 | struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; | ||
328 | |||
329 | struct atl1_tpd_ring tpd_old, tpd_new; | ||
330 | struct atl1_rfd_ring rfd_old, rfd_new; | ||
331 | struct atl1_rrd_ring rrd_old, rrd_new; | ||
332 | struct atl1_ring_header rhdr_old, rhdr_new; | ||
333 | int err; | ||
334 | |||
335 | tpd_old = adapter->tpd_ring; | ||
336 | rfd_old = adapter->rfd_ring; | ||
337 | rrd_old = adapter->rrd_ring; | ||
338 | rhdr_old = adapter->ring_header; | ||
339 | |||
340 | if (netif_running(adapter->netdev)) | ||
341 | atl1_down(adapter); | ||
342 | |||
343 | rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); | ||
344 | rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : | ||
345 | rfdr->count; | ||
346 | rfdr->count = (rfdr->count + 3) & ~3; | ||
347 | rrdr->count = rfdr->count; | ||
348 | |||
349 | tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); | ||
350 | tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : | ||
351 | tpdr->count; | ||
352 | tpdr->count = (tpdr->count + 3) & ~3; | ||
353 | |||
354 | if (netif_running(adapter->netdev)) { | ||
355 | /* try to get new resources before deleting old */ | ||
356 | err = atl1_setup_ring_resources(adapter); | ||
357 | if (err) | ||
358 | goto err_setup_ring; | ||
359 | |||
360 | /* | ||
361 | * save the new, restore the old in order to free it, | ||
362 | * then restore the new back again | ||
363 | */ | ||
364 | |||
365 | rfd_new = adapter->rfd_ring; | ||
366 | rrd_new = adapter->rrd_ring; | ||
367 | tpd_new = adapter->tpd_ring; | ||
368 | rhdr_new = adapter->ring_header; | ||
369 | adapter->rfd_ring = rfd_old; | ||
370 | adapter->rrd_ring = rrd_old; | ||
371 | adapter->tpd_ring = tpd_old; | ||
372 | adapter->ring_header = rhdr_old; | ||
373 | atl1_free_ring_resources(adapter); | ||
374 | adapter->rfd_ring = rfd_new; | ||
375 | adapter->rrd_ring = rrd_new; | ||
376 | adapter->tpd_ring = tpd_new; | ||
377 | adapter->ring_header = rhdr_new; | ||
378 | |||
379 | err = atl1_up(adapter); | ||
380 | if (err) | ||
381 | return err; | ||
382 | } | ||
383 | return 0; | ||
384 | |||
385 | err_setup_ring: | ||
386 | adapter->rfd_ring = rfd_old; | ||
387 | adapter->rrd_ring = rrd_old; | ||
388 | adapter->tpd_ring = tpd_old; | ||
389 | adapter->ring_header = rhdr_old; | ||
390 | atl1_up(adapter); | ||
391 | return err; | ||
392 | } | ||
393 | |||
394 | static void atl1_get_pauseparam(struct net_device *netdev, | ||
395 | struct ethtool_pauseparam *epause) | ||
396 | { | ||
397 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
398 | struct atl1_hw *hw = &adapter->hw; | ||
399 | |||
400 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
401 | hw->media_type == MEDIA_TYPE_1000M_FULL) { | ||
402 | epause->autoneg = AUTONEG_ENABLE; | ||
403 | } else { | ||
404 | epause->autoneg = AUTONEG_DISABLE; | ||
405 | } | ||
406 | epause->rx_pause = 1; | ||
407 | epause->tx_pause = 1; | ||
408 | } | ||
409 | |||
410 | static int atl1_set_pauseparam(struct net_device *netdev, | ||
411 | struct ethtool_pauseparam *epause) | ||
412 | { | ||
413 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
414 | struct atl1_hw *hw = &adapter->hw; | ||
415 | |||
416 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
417 | hw->media_type == MEDIA_TYPE_1000M_FULL) { | ||
418 | epause->autoneg = AUTONEG_ENABLE; | ||
419 | } else { | ||
420 | epause->autoneg = AUTONEG_DISABLE; | ||
421 | } | ||
422 | |||
423 | epause->rx_pause = 1; | ||
424 | epause->tx_pause = 1; | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static u32 atl1_get_rx_csum(struct net_device *netdev) | ||
430 | { | ||
431 | return 1; | ||
432 | } | ||
433 | |||
434 | static void atl1_get_strings(struct net_device *netdev, u32 stringset, | ||
435 | u8 *data) | ||
436 | { | ||
437 | u8 *p = data; | ||
438 | int i; | ||
439 | |||
440 | switch (stringset) { | ||
441 | case ETH_SS_STATS: | ||
442 | for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { | ||
443 | memcpy(p, atl1_gstrings_stats[i].stat_string, | ||
444 | ETH_GSTRING_LEN); | ||
445 | p += ETH_GSTRING_LEN; | ||
446 | } | ||
447 | break; | ||
448 | } | ||
449 | } | ||
450 | |||
451 | static int atl1_nway_reset(struct net_device *netdev) | ||
452 | { | ||
453 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
454 | struct atl1_hw *hw = &adapter->hw; | ||
455 | |||
456 | if (netif_running(netdev)) { | ||
457 | u16 phy_data; | ||
458 | atl1_down(adapter); | ||
459 | |||
460 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
461 | hw->media_type == MEDIA_TYPE_1000M_FULL) { | ||
462 | phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; | ||
463 | } else { | ||
464 | switch (hw->media_type) { | ||
465 | case MEDIA_TYPE_100M_FULL: | ||
466 | phy_data = MII_CR_FULL_DUPLEX | | ||
467 | MII_CR_SPEED_100 | MII_CR_RESET; | ||
468 | break; | ||
469 | case MEDIA_TYPE_100M_HALF: | ||
470 | phy_data = MII_CR_SPEED_100 | MII_CR_RESET; | ||
471 | break; | ||
472 | case MEDIA_TYPE_10M_FULL: | ||
473 | phy_data = MII_CR_FULL_DUPLEX | | ||
474 | MII_CR_SPEED_10 | MII_CR_RESET; | ||
475 | break; | ||
476 | default: /* MEDIA_TYPE_10M_HALF */ | ||
477 | phy_data = MII_CR_SPEED_10 | MII_CR_RESET; | ||
478 | } | ||
479 | } | ||
480 | atl1_write_phy_reg(hw, MII_BMCR, phy_data); | ||
481 | atl1_up(adapter); | ||
482 | } | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | const struct ethtool_ops atl1_ethtool_ops = { | ||
487 | .get_settings = atl1_get_settings, | ||
488 | .set_settings = atl1_set_settings, | ||
489 | .get_drvinfo = atl1_get_drvinfo, | ||
490 | .get_wol = atl1_get_wol, | ||
491 | .set_wol = atl1_set_wol, | ||
492 | .get_ringparam = atl1_get_ringparam, | ||
493 | .set_ringparam = atl1_set_ringparam, | ||
494 | .get_pauseparam = atl1_get_pauseparam, | ||
495 | .set_pauseparam = atl1_set_pauseparam, | ||
496 | .get_rx_csum = atl1_get_rx_csum, | ||
497 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
498 | .set_tx_csum = ethtool_op_set_tx_hw_csum, | ||
499 | .get_link = ethtool_op_get_link, | ||
500 | .get_sg = ethtool_op_get_sg, | ||
501 | .set_sg = ethtool_op_set_sg, | ||
502 | .get_strings = atl1_get_strings, | ||
503 | .nway_reset = atl1_nway_reset, | ||
504 | .get_ethtool_stats = atl1_get_ethtool_stats, | ||
505 | .get_stats_count = atl1_get_stats_count, | ||
506 | .get_tso = ethtool_op_get_tso, | ||
507 | .set_tso = ethtool_op_set_tso, | ||
508 | }; | ||
diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c new file mode 100644 index 000000000000..08b2d785469d --- /dev/null +++ b/drivers/net/atl1/atl1_hw.c | |||
@@ -0,0 +1,718 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/if_vlan.h> | ||
28 | #include <linux/etherdevice.h> | ||
29 | #include <linux/crc32.h> | ||
30 | #include <asm/byteorder.h> | ||
31 | |||
32 | #include "atl1.h" | ||
33 | |||
34 | /* | ||
35 | * Reset the transmit and receive units; mask and clear all interrupts. | ||
36 | * hw - Struct containing variables accessed by shared code | ||
37 | * return : ATL1_SUCCESS or idle status (if error) | ||
38 | */ | ||
39 | s32 atl1_reset_hw(struct atl1_hw *hw) | ||
40 | { | ||
41 | u32 icr; | ||
42 | int i; | ||
43 | |||
44 | /* | ||
45 | * Clear Interrupt mask to stop board from generating | ||
46 | * interrupts & Clear any pending interrupt events | ||
47 | */ | ||
48 | /* | ||
49 | * iowrite32(0, hw->hw_addr + REG_IMR); | ||
50 | * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); | ||
51 | */ | ||
52 | |||
53 | /* | ||
54 | * Issue Soft Reset to the MAC. This will reset the chip's | ||
55 | * transmit, receive, DMA. It will not effect | ||
56 | * the current PCI configuration. The global reset bit is self- | ||
57 | * clearing, and should clear within a microsecond. | ||
58 | */ | ||
59 | iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); | ||
60 | ioread32(hw->hw_addr + REG_MASTER_CTRL); | ||
61 | |||
62 | iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE); | ||
63 | ioread16(hw->hw_addr + REG_GPHY_ENABLE); | ||
64 | |||
65 | msleep(1); /* delay about 1ms */ | ||
66 | |||
67 | /* Wait at least 10ms for All module to be Idle */ | ||
68 | for (i = 0; i < 10; i++) { | ||
69 | icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); | ||
70 | if (!icr) | ||
71 | break; | ||
72 | msleep(1); /* delay 1 ms */ | ||
73 | cpu_relax(); /* FIXME: is this still the right way to do this? */ | ||
74 | } | ||
75 | |||
76 | if (icr) { | ||
77 | printk (KERN_DEBUG "icr = %x\n", icr); | ||
78 | return icr; | ||
79 | } | ||
80 | |||
81 | return ATL1_SUCCESS; | ||
82 | } | ||
83 | |||
84 | /* function about EEPROM | ||
85 | * | ||
86 | * check_eeprom_exist | ||
87 | * return 0 if eeprom exist | ||
88 | */ | ||
89 | static int atl1_check_eeprom_exist(struct atl1_hw *hw) | ||
90 | { | ||
91 | u32 value; | ||
92 | value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
93 | if (value & SPI_FLASH_CTRL_EN_VPD) { | ||
94 | value &= ~SPI_FLASH_CTRL_EN_VPD; | ||
95 | iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
96 | } | ||
97 | |||
98 | value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); | ||
99 | return ((value & 0xFF00) == 0x6C00) ? 0 : 1; | ||
100 | } | ||
101 | |||
102 | static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) | ||
103 | { | ||
104 | int i; | ||
105 | u32 control; | ||
106 | |||
107 | if (offset & 3) | ||
108 | return false; /* address do not align */ | ||
109 | |||
110 | iowrite32(0, hw->hw_addr + REG_VPD_DATA); | ||
111 | control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; | ||
112 | iowrite32(control, hw->hw_addr + REG_VPD_CAP); | ||
113 | ioread32(hw->hw_addr + REG_VPD_CAP); | ||
114 | |||
115 | for (i = 0; i < 10; i++) { | ||
116 | msleep(2); | ||
117 | control = ioread32(hw->hw_addr + REG_VPD_CAP); | ||
118 | if (control & VPD_CAP_VPD_FLAG) | ||
119 | break; | ||
120 | } | ||
121 | if (control & VPD_CAP_VPD_FLAG) { | ||
122 | *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); | ||
123 | return true; | ||
124 | } | ||
125 | return false; /* timeout */ | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Reads the value from a PHY register | ||
130 | * hw - Struct containing variables accessed by shared code | ||
131 | * reg_addr - address of the PHY register to read | ||
132 | */ | ||
133 | s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) | ||
134 | { | ||
135 | u32 val; | ||
136 | int i; | ||
137 | |||
138 | val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | | ||
139 | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << | ||
140 | MDIO_CLK_SEL_SHIFT; | ||
141 | iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); | ||
142 | ioread32(hw->hw_addr + REG_MDIO_CTRL); | ||
143 | |||
144 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
145 | udelay(2); | ||
146 | val = ioread32(hw->hw_addr + REG_MDIO_CTRL); | ||
147 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
148 | break; | ||
149 | } | ||
150 | if (!(val & (MDIO_START | MDIO_BUSY))) { | ||
151 | *phy_data = (u16) val; | ||
152 | return ATL1_SUCCESS; | ||
153 | } | ||
154 | return ATL1_ERR_PHY; | ||
155 | } | ||
156 | |||
157 | #define CUSTOM_SPI_CS_SETUP 2 | ||
158 | #define CUSTOM_SPI_CLK_HI 2 | ||
159 | #define CUSTOM_SPI_CLK_LO 2 | ||
160 | #define CUSTOM_SPI_CS_HOLD 2 | ||
161 | #define CUSTOM_SPI_CS_HI 3 | ||
162 | |||
163 | static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) | ||
164 | { | ||
165 | int i; | ||
166 | u32 value; | ||
167 | |||
168 | iowrite32(0, hw->hw_addr + REG_SPI_DATA); | ||
169 | iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); | ||
170 | |||
171 | value = SPI_FLASH_CTRL_WAIT_READY | | ||
172 | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << | ||
173 | SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & | ||
174 | SPI_FLASH_CTRL_CLK_HI_MASK) << | ||
175 | SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & | ||
176 | SPI_FLASH_CTRL_CLK_LO_MASK) << | ||
177 | SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & | ||
178 | SPI_FLASH_CTRL_CS_HOLD_MASK) << | ||
179 | SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & | ||
180 | SPI_FLASH_CTRL_CS_HI_MASK) << | ||
181 | SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << | ||
182 | SPI_FLASH_CTRL_INS_SHIFT; | ||
183 | |||
184 | iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
185 | |||
186 | value |= SPI_FLASH_CTRL_START; | ||
187 | iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
188 | ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
189 | |||
190 | for (i = 0; i < 10; i++) { | ||
191 | msleep(1); /* 1ms */ | ||
192 | value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); | ||
193 | if (!(value & SPI_FLASH_CTRL_START)) | ||
194 | break; | ||
195 | } | ||
196 | |||
197 | if (value & SPI_FLASH_CTRL_START) | ||
198 | return false; | ||
199 | |||
200 | *buf = ioread32(hw->hw_addr + REG_SPI_DATA); | ||
201 | |||
202 | return true; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * get_permanent_address | ||
207 | * return 0 if get valid mac address, | ||
208 | */ | ||
209 | static int atl1_get_permanent_address(struct atl1_hw *hw) | ||
210 | { | ||
211 | u32 addr[2]; | ||
212 | u32 i, control; | ||
213 | u16 reg; | ||
214 | u8 eth_addr[ETH_ALEN]; | ||
215 | bool key_valid; | ||
216 | |||
217 | if (is_valid_ether_addr(hw->perm_mac_addr)) | ||
218 | return 0; | ||
219 | |||
220 | /* init */ | ||
221 | addr[0] = addr[1] = 0; | ||
222 | |||
223 | if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */ | ||
224 | reg = 0; | ||
225 | key_valid = false; | ||
226 | /* Read out all EEPROM content */ | ||
227 | i = 0; | ||
228 | while (1) { | ||
229 | if (atl1_read_eeprom(hw, i + 0x100, &control)) { | ||
230 | if (key_valid) { | ||
231 | if (reg == REG_MAC_STA_ADDR) | ||
232 | addr[0] = control; | ||
233 | else if (reg == (REG_MAC_STA_ADDR + 4)) | ||
234 | addr[1] = control; | ||
235 | key_valid = false; | ||
236 | } else if ((control & 0xff) == 0x5A) { | ||
237 | key_valid = true; | ||
238 | reg = (u16) (control >> 16); | ||
239 | } else | ||
240 | break; /* assume data end while encount an invalid KEYWORD */ | ||
241 | } else | ||
242 | break; /* read error */ | ||
243 | i += 4; | ||
244 | } | ||
245 | |||
246 | /* | ||
247 | * The following 2 lines are the Attansic originals. Saving for posterity. | ||
248 | * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]); | ||
249 | * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]); | ||
250 | */ | ||
251 | *(u32 *) & eth_addr[2] = swab32(addr[0]); | ||
252 | *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]); | ||
253 | |||
254 | if (is_valid_ether_addr(eth_addr)) { | ||
255 | memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); | ||
256 | return 0; | ||
257 | } | ||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | /* see if SPI FLAGS exist ? */ | ||
262 | addr[0] = addr[1] = 0; | ||
263 | reg = 0; | ||
264 | key_valid = false; | ||
265 | i = 0; | ||
266 | while (1) { | ||
267 | if (atl1_spi_read(hw, i + 0x1f000, &control)) { | ||
268 | if (key_valid) { | ||
269 | if (reg == REG_MAC_STA_ADDR) | ||
270 | addr[0] = control; | ||
271 | else if (reg == (REG_MAC_STA_ADDR + 4)) | ||
272 | addr[1] = control; | ||
273 | key_valid = false; | ||
274 | } else if ((control & 0xff) == 0x5A) { | ||
275 | key_valid = true; | ||
276 | reg = (u16) (control >> 16); | ||
277 | } else | ||
278 | break; /* data end */ | ||
279 | } else | ||
280 | break; /* read error */ | ||
281 | i += 4; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * The following 2 lines are the Attansic originals. Saving for posterity. | ||
286 | * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]); | ||
287 | * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]); | ||
288 | */ | ||
289 | *(u32 *) & eth_addr[2] = swab32(addr[0]); | ||
290 | *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]); | ||
291 | if (is_valid_ether_addr(eth_addr)) { | ||
292 | memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); | ||
293 | return 0; | ||
294 | } | ||
295 | return 1; | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Reads the adapter's MAC address from the EEPROM | ||
300 | * hw - Struct containing variables accessed by shared code | ||
301 | */ | ||
302 | s32 atl1_read_mac_addr(struct atl1_hw *hw) | ||
303 | { | ||
304 | u16 i; | ||
305 | |||
306 | if (atl1_get_permanent_address(hw)) | ||
307 | random_ether_addr(hw->perm_mac_addr); | ||
308 | |||
309 | for (i = 0; i < ETH_ALEN; i++) | ||
310 | hw->mac_addr[i] = hw->perm_mac_addr[i]; | ||
311 | return ATL1_SUCCESS; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Hashes an address to determine its location in the multicast table | ||
316 | * hw - Struct containing variables accessed by shared code | ||
317 | * mc_addr - the multicast address to hash | ||
318 | * | ||
319 | * atl1_hash_mc_addr | ||
320 | * purpose | ||
321 | * set hash value for a multicast address | ||
322 | * hash calcu processing : | ||
323 | * 1. calcu 32bit CRC for multicast address | ||
324 | * 2. reverse crc with MSB to LSB | ||
325 | */ | ||
326 | u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) | ||
327 | { | ||
328 | u32 crc32, value = 0; | ||
329 | int i; | ||
330 | |||
331 | crc32 = ether_crc_le(6, mc_addr); | ||
332 | crc32 = ~crc32; | ||
333 | for (i = 0; i < 32; i++) | ||
334 | value |= (((crc32 >> i) & 1) << (31 - i)); | ||
335 | |||
336 | return value; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * Sets the bit in the multicast table corresponding to the hash value. | ||
341 | * hw - Struct containing variables accessed by shared code | ||
342 | * hash_value - Multicast address hash value | ||
343 | */ | ||
344 | void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) | ||
345 | { | ||
346 | u32 hash_bit, hash_reg; | ||
347 | u32 mta; | ||
348 | |||
349 | /* | ||
350 | * The HASH Table is a register array of 2 32-bit registers. | ||
351 | * It is treated like an array of 64 bits. We want to set | ||
352 | * bit BitArray[hash_value]. So we figure out what register | ||
353 | * the bit is in, read it, OR in the new bit, then write | ||
354 | * back the new value. The register is determined by the | ||
355 | * upper 7 bits of the hash value and the bit within that | ||
356 | * register are determined by the lower 5 bits of the value. | ||
357 | */ | ||
358 | hash_reg = (hash_value >> 31) & 0x1; | ||
359 | hash_bit = (hash_value >> 26) & 0x1F; | ||
360 | mta = ioread32((hw + REG_RX_HASH_TABLE) + (hash_reg << 2)); | ||
361 | mta |= (1 << hash_bit); | ||
362 | iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Writes a value to a PHY register | ||
367 | * hw - Struct containing variables accessed by shared code | ||
368 | * reg_addr - address of the PHY register to write | ||
369 | * data - data to write to the PHY | ||
370 | */ | ||
371 | s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) | ||
372 | { | ||
373 | int i; | ||
374 | u32 val; | ||
375 | |||
376 | val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | | ||
377 | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | | ||
378 | MDIO_SUP_PREAMBLE | | ||
379 | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; | ||
380 | iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); | ||
381 | ioread32(hw->hw_addr + REG_MDIO_CTRL); | ||
382 | |||
383 | for (i = 0; i < MDIO_WAIT_TIMES; i++) { | ||
384 | udelay(2); | ||
385 | val = ioread32(hw->hw_addr + REG_MDIO_CTRL); | ||
386 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
387 | break; | ||
388 | } | ||
389 | |||
390 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
391 | return ATL1_SUCCESS; | ||
392 | |||
393 | return ATL1_ERR_PHY; | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * Make L001's PHY out of Power Saving State (bug) | ||
398 | * hw - Struct containing variables accessed by shared code | ||
399 | * when power on, L001's PHY always on Power saving State | ||
400 | * (Gigabit Link forbidden) | ||
401 | */ | ||
402 | static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) | ||
403 | { | ||
404 | s32 ret; | ||
405 | ret = atl1_write_phy_reg(hw, 29, 0x0029); | ||
406 | if (ret) | ||
407 | return ret; | ||
408 | return atl1_write_phy_reg(hw, 30, 0); | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | *TODO: do something or get rid of this | ||
413 | */ | ||
414 | s32 atl1_phy_enter_power_saving(struct atl1_hw *hw) | ||
415 | { | ||
416 | /* s32 ret_val; | ||
417 | * u16 phy_data; | ||
418 | */ | ||
419 | |||
420 | /* | ||
421 | ret_val = atl1_write_phy_reg(hw, ...); | ||
422 | ret_val = atl1_write_phy_reg(hw, ...); | ||
423 | .... | ||
424 | */ | ||
425 | return ATL1_SUCCESS; | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * Resets the PHY and make all config validate | ||
430 | * hw - Struct containing variables accessed by shared code | ||
431 | * | ||
432 | * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) | ||
433 | */ | ||
434 | static s32 atl1_phy_reset(struct atl1_hw *hw) | ||
435 | { | ||
436 | s32 ret_val; | ||
437 | u16 phy_data; | ||
438 | |||
439 | if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || | ||
440 | hw->media_type == MEDIA_TYPE_1000M_FULL) | ||
441 | phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; | ||
442 | else { | ||
443 | switch (hw->media_type) { | ||
444 | case MEDIA_TYPE_100M_FULL: | ||
445 | phy_data = | ||
446 | MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | | ||
447 | MII_CR_RESET; | ||
448 | break; | ||
449 | case MEDIA_TYPE_100M_HALF: | ||
450 | phy_data = MII_CR_SPEED_100 | MII_CR_RESET; | ||
451 | break; | ||
452 | case MEDIA_TYPE_10M_FULL: | ||
453 | phy_data = | ||
454 | MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; | ||
455 | break; | ||
456 | default: /* MEDIA_TYPE_10M_HALF: */ | ||
457 | phy_data = MII_CR_SPEED_10 | MII_CR_RESET; | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); | ||
463 | if (ret_val) { | ||
464 | u32 val; | ||
465 | int i; | ||
466 | /* pcie serdes link may be down! */ | ||
467 | printk(KERN_DEBUG "%s: autoneg caused pcie phy link down\n", | ||
468 | atl1_driver_name); | ||
469 | |||
470 | for (i = 0; i < 25; i++) { | ||
471 | msleep(1); | ||
472 | val = ioread32(hw->hw_addr + REG_MDIO_CTRL); | ||
473 | if (!(val & (MDIO_START | MDIO_BUSY))) | ||
474 | break; | ||
475 | } | ||
476 | |||
477 | if ((val & (MDIO_START | MDIO_BUSY)) != 0) { | ||
478 | printk(KERN_WARNING | ||
479 | "%s: pcie link down at least for 25ms\n", | ||
480 | atl1_driver_name); | ||
481 | return ret_val; | ||
482 | } | ||
483 | } | ||
484 | return ATL1_SUCCESS; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Configures PHY autoneg and flow control advertisement settings | ||
489 | * hw - Struct containing variables accessed by shared code | ||
490 | */ | ||
491 | s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) | ||
492 | { | ||
493 | s32 ret_val; | ||
494 | s16 mii_autoneg_adv_reg; | ||
495 | s16 mii_1000t_ctrl_reg; | ||
496 | |||
497 | /* Read the MII Auto-Neg Advertisement Register (Address 4). */ | ||
498 | mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; | ||
499 | |||
500 | /* Read the MII 1000Base-T Control Register (Address 9). */ | ||
501 | mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; | ||
502 | |||
503 | /* | ||
504 | * First we clear all the 10/100 mb speed bits in the Auto-Neg | ||
505 | * Advertisement Register (Address 4) and the 1000 mb speed bits in | ||
506 | * the 1000Base-T Control Register (Address 9). | ||
507 | */ | ||
508 | mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; | ||
509 | mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; | ||
510 | |||
511 | /* | ||
512 | * Need to parse media_type and set up | ||
513 | * the appropriate PHY registers. | ||
514 | */ | ||
515 | switch (hw->media_type) { | ||
516 | case MEDIA_TYPE_AUTO_SENSOR: | ||
517 | mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | | ||
518 | MII_AR_10T_FD_CAPS | | ||
519 | MII_AR_100TX_HD_CAPS | | ||
520 | MII_AR_100TX_FD_CAPS); | ||
521 | mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; | ||
522 | break; | ||
523 | |||
524 | case MEDIA_TYPE_1000M_FULL: | ||
525 | mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS; | ||
526 | break; | ||
527 | |||
528 | case MEDIA_TYPE_100M_FULL: | ||
529 | mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; | ||
530 | break; | ||
531 | |||
532 | case MEDIA_TYPE_100M_HALF: | ||
533 | mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; | ||
534 | break; | ||
535 | |||
536 | case MEDIA_TYPE_10M_FULL: | ||
537 | mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; | ||
538 | break; | ||
539 | |||
540 | default: | ||
541 | mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; | ||
542 | break; | ||
543 | } | ||
544 | |||
545 | /* flow control fixed to enable all */ | ||
546 | mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); | ||
547 | |||
548 | hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; | ||
549 | hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; | ||
550 | |||
551 | ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); | ||
552 | if (ret_val) | ||
553 | return ret_val; | ||
554 | |||
555 | ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg); | ||
556 | if (ret_val) | ||
557 | return ret_val; | ||
558 | |||
559 | return ATL1_SUCCESS; | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * Configures link settings. | ||
564 | * hw - Struct containing variables accessed by shared code | ||
565 | * Assumes the hardware has previously been reset and the | ||
566 | * transmitter and receiver are not enabled. | ||
567 | */ | ||
568 | static s32 atl1_setup_link(struct atl1_hw *hw) | ||
569 | { | ||
570 | s32 ret_val; | ||
571 | |||
572 | /* | ||
573 | * Options: | ||
574 | * PHY will advertise value(s) parsed from | ||
575 | * autoneg_advertised and fc | ||
576 | * no matter what autoneg is , We will not wait link result. | ||
577 | */ | ||
578 | ret_val = atl1_phy_setup_autoneg_adv(hw); | ||
579 | if (ret_val) { | ||
580 | printk(KERN_DEBUG "%s: error setting up autonegotiation\n", | ||
581 | atl1_driver_name); | ||
582 | return ret_val; | ||
583 | } | ||
584 | /* SW.Reset , En-Auto-Neg if needed */ | ||
585 | ret_val = atl1_phy_reset(hw); | ||
586 | if (ret_val) { | ||
587 | printk(KERN_DEBUG "%s: error resetting the phy\n", | ||
588 | atl1_driver_name); | ||
589 | return ret_val; | ||
590 | } | ||
591 | hw->phy_configured = true; | ||
592 | return ret_val; | ||
593 | } | ||
594 | |||
595 | static struct atl1_spi_flash_dev flash_table[] = { | ||
596 | /* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */ | ||
597 | {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62}, | ||
598 | {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60}, | ||
599 | {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7}, | ||
600 | }; | ||
601 | |||
602 | static void atl1_init_flash_opcode(struct atl1_hw *hw) | ||
603 | { | ||
604 | if (hw->flash_vendor >= sizeof(flash_table) / sizeof(flash_table[0])) | ||
605 | hw->flash_vendor = 0; /* ATMEL */ | ||
606 | |||
607 | /* Init OP table */ | ||
608 | iowrite8(flash_table[hw->flash_vendor].cmd_program, | ||
609 | hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); | ||
610 | iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, | ||
611 | hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); | ||
612 | iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, | ||
613 | hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); | ||
614 | iowrite8(flash_table[hw->flash_vendor].cmd_rdid, | ||
615 | hw->hw_addr + REG_SPI_FLASH_OP_RDID); | ||
616 | iowrite8(flash_table[hw->flash_vendor].cmd_wren, | ||
617 | hw->hw_addr + REG_SPI_FLASH_OP_WREN); | ||
618 | iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, | ||
619 | hw->hw_addr + REG_SPI_FLASH_OP_RDSR); | ||
620 | iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, | ||
621 | hw->hw_addr + REG_SPI_FLASH_OP_WRSR); | ||
622 | iowrite8(flash_table[hw->flash_vendor].cmd_read, | ||
623 | hw->hw_addr + REG_SPI_FLASH_OP_READ); | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * Performs basic configuration of the adapter. | ||
628 | * hw - Struct containing variables accessed by shared code | ||
629 | * Assumes that the controller has previously been reset and is in a | ||
630 | * post-reset uninitialized state. Initializes multicast table, | ||
631 | * and Calls routines to setup link | ||
632 | * Leaves the transmit and receive units disabled and uninitialized. | ||
633 | */ | ||
634 | s32 atl1_init_hw(struct atl1_hw *hw) | ||
635 | { | ||
636 | u32 ret_val = 0; | ||
637 | |||
638 | /* Zero out the Multicast HASH table */ | ||
639 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | ||
640 | /* clear the old settings from the multicast hash table */ | ||
641 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | ||
642 | |||
643 | atl1_init_flash_opcode(hw); | ||
644 | |||
645 | if (!hw->phy_configured) { | ||
646 | /* enable GPHY LinkChange Interrrupt */ | ||
647 | ret_val = atl1_write_phy_reg(hw, 18, 0xC00); | ||
648 | if (ret_val) | ||
649 | return ret_val; | ||
650 | /* make PHY out of power-saving state */ | ||
651 | ret_val = atl1_phy_leave_power_saving(hw); | ||
652 | if (ret_val) | ||
653 | return ret_val; | ||
654 | /* Call a subroutine to configure the link */ | ||
655 | ret_val = atl1_setup_link(hw); | ||
656 | } | ||
657 | return ret_val; | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Detects the current speed and duplex settings of the hardware. | ||
662 | * hw - Struct containing variables accessed by shared code | ||
663 | * speed - Speed of the connection | ||
664 | * duplex - Duplex setting of the connection | ||
665 | */ | ||
666 | s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) | ||
667 | { | ||
668 | s32 ret_val; | ||
669 | u16 phy_data; | ||
670 | |||
671 | /* ; --- Read PHY Specific Status Register (17) */ | ||
672 | ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); | ||
673 | if (ret_val) | ||
674 | return ret_val; | ||
675 | |||
676 | if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) | ||
677 | return ATL1_ERR_PHY_RES; | ||
678 | |||
679 | switch (phy_data & MII_AT001_PSSR_SPEED) { | ||
680 | case MII_AT001_PSSR_1000MBS: | ||
681 | *speed = SPEED_1000; | ||
682 | break; | ||
683 | case MII_AT001_PSSR_100MBS: | ||
684 | *speed = SPEED_100; | ||
685 | break; | ||
686 | case MII_AT001_PSSR_10MBS: | ||
687 | *speed = SPEED_10; | ||
688 | break; | ||
689 | default: | ||
690 | printk(KERN_DEBUG "%s: error getting speed\n", | ||
691 | atl1_driver_name); | ||
692 | return ATL1_ERR_PHY_SPEED; | ||
693 | break; | ||
694 | } | ||
695 | if (phy_data & MII_AT001_PSSR_DPLX) | ||
696 | *duplex = FULL_DUPLEX; | ||
697 | else | ||
698 | *duplex = HALF_DUPLEX; | ||
699 | |||
700 | return ATL1_SUCCESS; | ||
701 | } | ||
702 | |||
703 | void atl1_set_mac_addr(struct atl1_hw *hw) | ||
704 | { | ||
705 | u32 value; | ||
706 | /* | ||
707 | * 00-0B-6A-F6-00-DC | ||
708 | * 0: 6AF600DC 1: 000B | ||
709 | * low dword | ||
710 | */ | ||
711 | value = (((u32) hw->mac_addr[2]) << 24) | | ||
712 | (((u32) hw->mac_addr[3]) << 16) | | ||
713 | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); | ||
714 | iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); | ||
715 | /* high dword */ | ||
716 | value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); | ||
717 | iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); | ||
718 | } | ||
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h new file mode 100644 index 000000000000..100c09c66e64 --- /dev/null +++ b/drivers/net/atl1/atl1_hw.h | |||
@@ -0,0 +1,951 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | * | ||
23 | * There are a lot of defines in here that are unused and/or have cryptic | ||
24 | * names. Please leave them alone, as they're the closest thing we have | ||
25 | * to a spec from Attansic at present. *ahem* -- CHS | ||
26 | */ | ||
27 | |||
28 | #ifndef _ATL1_HW_H_ | ||
29 | #define _ATL1_HW_H_ | ||
30 | |||
31 | #include <linux/types.h> | ||
32 | #include <linux/mii.h> | ||
33 | |||
34 | struct atl1_adapter; | ||
35 | struct atl1_hw; | ||
36 | |||
37 | /* function prototypes needed by multiple files */ | ||
38 | s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw); | ||
39 | s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data); | ||
40 | s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); | ||
41 | s32 atl1_read_mac_addr(struct atl1_hw *hw); | ||
42 | s32 atl1_init_hw(struct atl1_hw *hw); | ||
43 | s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex); | ||
44 | s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex); | ||
45 | u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr); | ||
46 | void atl1_hash_set(struct atl1_hw *hw, u32 hash_value); | ||
47 | s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data); | ||
48 | void atl1_set_mac_addr(struct atl1_hw *hw); | ||
49 | s32 atl1_phy_enter_power_saving(struct atl1_hw *hw); | ||
50 | s32 atl1_reset_hw(struct atl1_hw *hw); | ||
51 | void atl1_check_options(struct atl1_adapter *adapter); | ||
52 | |||
53 | /* register definitions */ | ||
54 | #define REG_PCIE_CAP_LIST 0x58 | ||
55 | |||
56 | #define REG_VPD_CAP 0x6C | ||
57 | #define VPD_CAP_ID_MASK 0xff | ||
58 | #define VPD_CAP_ID_SHIFT 0 | ||
59 | #define VPD_CAP_NEXT_PTR_MASK 0xFF | ||
60 | #define VPD_CAP_NEXT_PTR_SHIFT 8 | ||
61 | #define VPD_CAP_VPD_ADDR_MASK 0x7FFF | ||
62 | #define VPD_CAP_VPD_ADDR_SHIFT 16 | ||
63 | #define VPD_CAP_VPD_FLAG 0x80000000 | ||
64 | |||
65 | #define REG_VPD_DATA 0x70 | ||
66 | |||
67 | #define REG_SPI_FLASH_CTRL 0x200 | ||
68 | #define SPI_FLASH_CTRL_STS_NON_RDY 0x1 | ||
69 | #define SPI_FLASH_CTRL_STS_WEN 0x2 | ||
70 | #define SPI_FLASH_CTRL_STS_WPEN 0x80 | ||
71 | #define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF | ||
72 | #define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 | ||
73 | #define SPI_FLASH_CTRL_INS_MASK 0x7 | ||
74 | #define SPI_FLASH_CTRL_INS_SHIFT 8 | ||
75 | #define SPI_FLASH_CTRL_START 0x800 | ||
76 | #define SPI_FLASH_CTRL_EN_VPD 0x2000 | ||
77 | #define SPI_FLASH_CTRL_LDSTART 0x8000 | ||
78 | #define SPI_FLASH_CTRL_CS_HI_MASK 0x3 | ||
79 | #define SPI_FLASH_CTRL_CS_HI_SHIFT 16 | ||
80 | #define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 | ||
81 | #define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 | ||
82 | #define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 | ||
83 | #define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 | ||
84 | #define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 | ||
85 | #define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 | ||
86 | #define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 | ||
87 | #define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 | ||
88 | #define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 | ||
89 | #define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 | ||
90 | #define SPI_FLASH_CTRL_WAIT_READY 0x10000000 | ||
91 | |||
92 | #define REG_SPI_ADDR 0x204 | ||
93 | |||
94 | #define REG_SPI_DATA 0x208 | ||
95 | |||
96 | #define REG_SPI_FLASH_CONFIG 0x20C | ||
97 | #define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF | ||
98 | #define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 | ||
99 | #define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 | ||
100 | #define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 | ||
101 | #define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 | ||
102 | |||
103 | #define REG_SPI_FLASH_OP_PROGRAM 0x210 | ||
104 | #define REG_SPI_FLASH_OP_SC_ERASE 0x211 | ||
105 | #define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 | ||
106 | #define REG_SPI_FLASH_OP_RDID 0x213 | ||
107 | #define REG_SPI_FLASH_OP_WREN 0x214 | ||
108 | #define REG_SPI_FLASH_OP_RDSR 0x215 | ||
109 | #define REG_SPI_FLASH_OP_WRSR 0x216 | ||
110 | #define REG_SPI_FLASH_OP_READ 0x217 | ||
111 | |||
112 | #define REG_TWSI_CTRL 0x218 | ||
113 | #define TWSI_CTRL_LD_OFFSET_MASK 0xFF | ||
114 | #define TWSI_CTRL_LD_OFFSET_SHIFT 0 | ||
115 | #define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 | ||
116 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 | ||
117 | #define TWSI_CTRL_SW_LDSTART 0x800 | ||
118 | #define TWSI_CTRL_HW_LDSTART 0x1000 | ||
119 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F | ||
120 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 | ||
121 | #define TWSI_CTRL_LD_EXIST 0x400000 | ||
122 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 | ||
123 | #define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 | ||
124 | #define TWSI_CTRL_FREQ_SEL_100K 0 | ||
125 | #define TWSI_CTRL_FREQ_SEL_200K 1 | ||
126 | #define TWSI_CTRL_FREQ_SEL_300K 2 | ||
127 | #define TWSI_CTRL_FREQ_SEL_400K 3 | ||
128 | #define TWSI_CTRL_SMB_SLV_ADDR | ||
129 | #define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 | ||
130 | #define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 | ||
131 | |||
132 | #define REG_PCIE_DEV_MISC_CTRL 0x21C | ||
133 | #define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 | ||
134 | #define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 | ||
135 | #define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 | ||
136 | #define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 | ||
137 | #define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 | ||
138 | |||
139 | /* Selene Master Control Register */ | ||
140 | #define REG_MASTER_CTRL 0x1400 | ||
141 | #define MASTER_CTRL_SOFT_RST 0x1 | ||
142 | #define MASTER_CTRL_MTIMER_EN 0x2 | ||
143 | #define MASTER_CTRL_ITIMER_EN 0x4 | ||
144 | #define MASTER_CTRL_MANUAL_INT 0x8 | ||
145 | #define MASTER_CTRL_REV_NUM_SHIFT 16 | ||
146 | #define MASTER_CTRL_REV_NUM_MASK 0xff | ||
147 | #define MASTER_CTRL_DEV_ID_SHIFT 24 | ||
148 | #define MASTER_CTRL_DEV_ID_MASK 0xff | ||
149 | |||
150 | /* Timer Initial Value Register */ | ||
151 | #define REG_MANUAL_TIMER_INIT 0x1404 | ||
152 | |||
153 | /* IRQ ModeratorTimer Initial Value Register */ | ||
154 | #define REG_IRQ_MODU_TIMER_INIT 0x1408 | ||
155 | |||
156 | #define REG_GPHY_ENABLE 0x140C | ||
157 | |||
158 | /* IRQ Anti-Lost Timer Initial Value Register */ | ||
159 | #define REG_CMBDISDMA_TIMER 0x140E | ||
160 | |||
161 | /* Block IDLE Status Register */ | ||
162 | #define REG_IDLE_STATUS 0x1410 | ||
163 | #define IDLE_STATUS_RXMAC 1 | ||
164 | #define IDLE_STATUS_TXMAC 2 | ||
165 | #define IDLE_STATUS_RXQ 4 | ||
166 | #define IDLE_STATUS_TXQ 8 | ||
167 | #define IDLE_STATUS_DMAR 0x10 | ||
168 | #define IDLE_STATUS_DMAW 0x20 | ||
169 | #define IDLE_STATUS_SMB 0x40 | ||
170 | #define IDLE_STATUS_CMB 0x80 | ||
171 | |||
172 | /* MDIO Control Register */ | ||
173 | #define REG_MDIO_CTRL 0x1414 | ||
174 | #define MDIO_DATA_MASK 0xffff | ||
175 | #define MDIO_DATA_SHIFT 0 | ||
176 | #define MDIO_REG_ADDR_MASK 0x1f | ||
177 | #define MDIO_REG_ADDR_SHIFT 16 | ||
178 | #define MDIO_RW 0x200000 | ||
179 | #define MDIO_SUP_PREAMBLE 0x400000 | ||
180 | #define MDIO_START 0x800000 | ||
181 | #define MDIO_CLK_SEL_SHIFT 24 | ||
182 | #define MDIO_CLK_25_4 0 | ||
183 | #define MDIO_CLK_25_6 2 | ||
184 | #define MDIO_CLK_25_8 3 | ||
185 | #define MDIO_CLK_25_10 4 | ||
186 | #define MDIO_CLK_25_14 5 | ||
187 | #define MDIO_CLK_25_20 6 | ||
188 | #define MDIO_CLK_25_28 7 | ||
189 | #define MDIO_BUSY 0x8000000 | ||
190 | #define MDIO_WAIT_TIMES 30 | ||
191 | |||
192 | /* MII PHY Status Register */ | ||
193 | #define REG_PHY_STATUS 0x1418 | ||
194 | |||
195 | /* BIST Control and Status Register0 (for the Packet Memory) */ | ||
196 | #define REG_BIST0_CTRL 0x141c | ||
197 | #define BIST0_NOW 0x1 | ||
198 | #define BIST0_SRAM_FAIL 0x2 | ||
199 | #define BIST0_FUSE_FLAG 0x4 | ||
200 | #define REG_BIST1_CTRL 0x1420 | ||
201 | #define BIST1_NOW 0x1 | ||
202 | #define BIST1_SRAM_FAIL 0x2 | ||
203 | #define BIST1_FUSE_FLAG 0x4 | ||
204 | |||
205 | /* MAC Control Register */ | ||
206 | #define REG_MAC_CTRL 0x1480 | ||
207 | #define MAC_CTRL_TX_EN 1 | ||
208 | #define MAC_CTRL_RX_EN 2 | ||
209 | #define MAC_CTRL_TX_FLOW 4 | ||
210 | #define MAC_CTRL_RX_FLOW 8 | ||
211 | #define MAC_CTRL_LOOPBACK 0x10 | ||
212 | #define MAC_CTRL_DUPLX 0x20 | ||
213 | #define MAC_CTRL_ADD_CRC 0x40 | ||
214 | #define MAC_CTRL_PAD 0x80 | ||
215 | #define MAC_CTRL_LENCHK 0x100 | ||
216 | #define MAC_CTRL_HUGE_EN 0x200 | ||
217 | #define MAC_CTRL_PRMLEN_SHIFT 10 | ||
218 | #define MAC_CTRL_PRMLEN_MASK 0xf | ||
219 | #define MAC_CTRL_RMV_VLAN 0x4000 | ||
220 | #define MAC_CTRL_PROMIS_EN 0x8000 | ||
221 | #define MAC_CTRL_TX_PAUSE 0x10000 | ||
222 | #define MAC_CTRL_SCNT 0x20000 | ||
223 | #define MAC_CTRL_SRST_TX 0x40000 | ||
224 | #define MAC_CTRL_TX_SIMURST 0x80000 | ||
225 | #define MAC_CTRL_SPEED_SHIFT 20 | ||
226 | #define MAC_CTRL_SPEED_MASK 0x300000 | ||
227 | #define MAC_CTRL_SPEED_1000 2 | ||
228 | #define MAC_CTRL_SPEED_10_100 1 | ||
229 | #define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 | ||
230 | #define MAC_CTRL_TX_HUGE 0x800000 | ||
231 | #define MAC_CTRL_RX_CHKSUM_EN 0x1000000 | ||
232 | #define MAC_CTRL_MC_ALL_EN 0x2000000 | ||
233 | #define MAC_CTRL_BC_EN 0x4000000 | ||
234 | #define MAC_CTRL_DBG 0x8000000 | ||
235 | |||
236 | /* MAC IPG/IFG Control Register */ | ||
237 | #define REG_MAC_IPG_IFG 0x1484 | ||
238 | #define MAC_IPG_IFG_IPGT_SHIFT 0 | ||
239 | #define MAC_IPG_IFG_IPGT_MASK 0x7f | ||
240 | #define MAC_IPG_IFG_MIFG_SHIFT 8 | ||
241 | #define MAC_IPG_IFG_MIFG_MASK 0xff | ||
242 | #define MAC_IPG_IFG_IPGR1_SHIFT 16 | ||
243 | #define MAC_IPG_IFG_IPGR1_MASK 0x7f | ||
244 | #define MAC_IPG_IFG_IPGR2_SHIFT 24 | ||
245 | #define MAC_IPG_IFG_IPGR2_MASK 0x7f | ||
246 | |||
247 | /* MAC STATION ADDRESS */ | ||
248 | #define REG_MAC_STA_ADDR 0x1488 | ||
249 | |||
250 | /* Hash table for multicast address */ | ||
251 | #define REG_RX_HASH_TABLE 0x1490 | ||
252 | |||
253 | /* MAC Half-Duplex Control Register */ | ||
254 | #define REG_MAC_HALF_DUPLX_CTRL 0x1498 | ||
255 | #define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 | ||
256 | #define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff | ||
257 | #define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 | ||
258 | #define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf | ||
259 | #define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 | ||
260 | #define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 | ||
261 | #define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 | ||
262 | #define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 | ||
263 | #define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 | ||
264 | #define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf | ||
265 | #define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 | ||
266 | #define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf | ||
267 | |||
268 | /* Maximum Frame Length Control Register */ | ||
269 | #define REG_MTU 0x149c | ||
270 | |||
271 | /* Wake-On-Lan control register */ | ||
272 | #define REG_WOL_CTRL 0x14a0 | ||
273 | #define WOL_PATTERN_EN 0x00000001 | ||
274 | #define WOL_PATTERN_PME_EN 0x00000002 | ||
275 | #define WOL_MAGIC_EN 0x00000004 | ||
276 | #define WOL_MAGIC_PME_EN 0x00000008 | ||
277 | #define WOL_LINK_CHG_EN 0x00000010 | ||
278 | #define WOL_LINK_CHG_PME_EN 0x00000020 | ||
279 | #define WOL_PATTERN_ST 0x00000100 | ||
280 | #define WOL_MAGIC_ST 0x00000200 | ||
281 | #define WOL_LINKCHG_ST 0x00000400 | ||
282 | #define WOL_CLK_SWITCH_EN 0x00008000 | ||
283 | #define WOL_PT0_EN 0x00010000 | ||
284 | #define WOL_PT1_EN 0x00020000 | ||
285 | #define WOL_PT2_EN 0x00040000 | ||
286 | #define WOL_PT3_EN 0x00080000 | ||
287 | #define WOL_PT4_EN 0x00100000 | ||
288 | #define WOL_PT5_EN 0x00200000 | ||
289 | #define WOL_PT6_EN 0x00400000 | ||
290 | |||
291 | /* WOL Length ( 2 DWORD ) */ | ||
292 | #define REG_WOL_PATTERN_LEN 0x14a4 | ||
293 | #define WOL_PT_LEN_MASK 0x7f | ||
294 | #define WOL_PT0_LEN_SHIFT 0 | ||
295 | #define WOL_PT1_LEN_SHIFT 8 | ||
296 | #define WOL_PT2_LEN_SHIFT 16 | ||
297 | #define WOL_PT3_LEN_SHIFT 24 | ||
298 | #define WOL_PT4_LEN_SHIFT 0 | ||
299 | #define WOL_PT5_LEN_SHIFT 8 | ||
300 | #define WOL_PT6_LEN_SHIFT 16 | ||
301 | |||
302 | /* Internal SRAM Partition Register */ | ||
303 | #define REG_SRAM_RFD_ADDR 0x1500 | ||
304 | #define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4) | ||
305 | #define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8) | ||
306 | #define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12) | ||
307 | #define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16) | ||
308 | #define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20) | ||
309 | #define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24) | ||
310 | #define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28) | ||
311 | #define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32) | ||
312 | #define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36) | ||
313 | #define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40) | ||
314 | #define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44) | ||
315 | #define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48) | ||
316 | #define SRAM_TCPH_ADDR_MASK 0x0fff | ||
317 | #define SRAM_TCPH_ADDR_SHIFT 0 | ||
318 | #define SRAM_PATH_ADDR_MASK 0x0fff | ||
319 | #define SRAM_PATH_ADDR_SHIFT 16 | ||
320 | |||
321 | /* Load Ptr Register */ | ||
322 | #define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52) | ||
323 | |||
324 | /* Descriptor Control register */ | ||
325 | #define REG_DESC_BASE_ADDR_HI 0x1540 | ||
326 | #define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4) | ||
327 | #define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8) | ||
328 | #define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12) | ||
329 | #define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16) | ||
330 | #define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20) | ||
331 | #define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24) | ||
332 | #define DESC_RFD_RING_SIZE_MASK 0x7ff | ||
333 | #define DESC_RFD_RING_SIZE_SHIFT 0 | ||
334 | #define DESC_RRD_RING_SIZE_MASK 0x7ff | ||
335 | #define DESC_RRD_RING_SIZE_SHIFT 16 | ||
336 | #define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28) | ||
337 | #define DESC_TPD_RING_SIZE_MASK 0x3ff | ||
338 | #define DESC_TPD_RING_SIZE_SHIFT 0 | ||
339 | |||
340 | /* TXQ Control Register */ | ||
341 | #define REG_TXQ_CTRL 0x1580 | ||
342 | #define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0 | ||
343 | #define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f | ||
344 | #define TXQ_CTRL_EN 0x20 | ||
345 | #define TXQ_CTRL_ENH_MODE 0x40 | ||
346 | #define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8 | ||
347 | #define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f | ||
348 | #define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 | ||
349 | #define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff | ||
350 | |||
351 | /* Jumbo packet Threshold for task offload */ | ||
352 | #define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584 | ||
353 | #define TX_JUMBO_TASK_TH_MASK 0x7ff | ||
354 | #define TX_JUMBO_TASK_TH_SHIFT 0 | ||
355 | #define TX_TPD_MIN_IPG_MASK 0x1f | ||
356 | #define TX_TPD_MIN_IPG_SHIFT 16 | ||
357 | |||
358 | /* RXQ Control Register */ | ||
359 | #define REG_RXQ_CTRL 0x15a0 | ||
360 | #define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0 | ||
361 | #define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff | ||
362 | #define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8 | ||
363 | #define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff | ||
364 | #define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16 | ||
365 | #define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f | ||
366 | #define RXQ_CTRL_CUT_THRU_EN 0x40000000 | ||
367 | #define RXQ_CTRL_EN 0x80000000 | ||
368 | |||
369 | /* Rx jumbo packet threshold and rrd retirement timer */ | ||
370 | #define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4) | ||
371 | #define RXQ_JMBOSZ_TH_MASK 0x7ff | ||
372 | #define RXQ_JMBOSZ_TH_SHIFT 0 | ||
373 | #define RXQ_JMBO_LKAH_MASK 0xf | ||
374 | #define RXQ_JMBO_LKAH_SHIFT 11 | ||
375 | #define RXQ_RRD_TIMER_MASK 0xffff | ||
376 | #define RXQ_RRD_TIMER_SHIFT 16 | ||
377 | |||
378 | /* RFD flow control register */ | ||
379 | #define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8) | ||
380 | #define RXQ_RXF_PAUSE_TH_HI_SHIFT 16 | ||
381 | #define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff | ||
382 | #define RXQ_RXF_PAUSE_TH_LO_SHIFT 0 | ||
383 | #define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff | ||
384 | |||
385 | /* RRD flow control register */ | ||
386 | #define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12) | ||
387 | #define RXQ_RRD_PAUSE_TH_HI_SHIFT 0 | ||
388 | #define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff | ||
389 | #define RXQ_RRD_PAUSE_TH_LO_SHIFT 16 | ||
390 | #define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff | ||
391 | |||
392 | /* DMA Engine Control Register */ | ||
393 | #define REG_DMA_CTRL 0x15c0 | ||
394 | #define DMA_CTRL_DMAR_IN_ORDER 0x1 | ||
395 | #define DMA_CTRL_DMAR_ENH_ORDER 0x2 | ||
396 | #define DMA_CTRL_DMAR_OUT_ORDER 0x4 | ||
397 | #define DMA_CTRL_RCB_VALUE 0x8 | ||
398 | #define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 | ||
399 | #define DMA_CTRL_DMAR_BURST_LEN_MASK 7 | ||
400 | #define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 | ||
401 | #define DMA_CTRL_DMAW_BURST_LEN_MASK 7 | ||
402 | #define DMA_CTRL_DMAR_EN 0x400 | ||
403 | #define DMA_CTRL_DMAW_EN 0x800 | ||
404 | |||
405 | /* CMB/SMB Control Register */ | ||
406 | #define REG_CSMB_CTRL 0x15d0 | ||
407 | #define CSMB_CTRL_CMB_NOW 1 | ||
408 | #define CSMB_CTRL_SMB_NOW 2 | ||
409 | #define CSMB_CTRL_CMB_EN 4 | ||
410 | #define CSMB_CTRL_SMB_EN 8 | ||
411 | |||
412 | /* CMB DMA Write Threshold Register */ | ||
413 | #define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4) | ||
414 | #define CMB_RRD_TH_SHIFT 0 | ||
415 | #define CMB_RRD_TH_MASK 0x7ff | ||
416 | #define CMB_TPD_TH_SHIFT 16 | ||
417 | #define CMB_TPD_TH_MASK 0x7ff | ||
418 | |||
419 | /* RX/TX count-down timer to trigger CMB-write. 2us resolution. */ | ||
420 | #define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8) | ||
421 | #define CMB_RX_TM_SHIFT 0 | ||
422 | #define CMB_RX_TM_MASK 0xffff | ||
423 | #define CMB_TX_TM_SHIFT 16 | ||
424 | #define CMB_TX_TM_MASK 0xffff | ||
425 | |||
426 | /* Number of packet received since last CMB write */ | ||
427 | #define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12) | ||
428 | |||
429 | /* Number of packet transmitted since last CMB write */ | ||
430 | #define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16) | ||
431 | |||
432 | /* SMB auto DMA timer register */ | ||
433 | #define REG_SMB_TIMER (REG_CSMB_CTRL+20) | ||
434 | |||
435 | /* Mailbox Register */ | ||
436 | #define REG_MAILBOX 0x15f0 | ||
437 | #define MB_RFD_PROD_INDX_SHIFT 0 | ||
438 | #define MB_RFD_PROD_INDX_MASK 0x7ff | ||
439 | #define MB_RRD_CONS_INDX_SHIFT 11 | ||
440 | #define MB_RRD_CONS_INDX_MASK 0x7ff | ||
441 | #define MB_TPD_PROD_INDX_SHIFT 22 | ||
442 | #define MB_TPD_PROD_INDX_MASK 0x3ff | ||
443 | |||
444 | /* Interrupt Status Register */ | ||
445 | #define REG_ISR 0x1600 | ||
446 | #define ISR_SMB 1 | ||
447 | #define ISR_TIMER 2 | ||
448 | #define ISR_MANUAL 4 | ||
449 | #define ISR_RXF_OV 8 | ||
450 | #define ISR_RFD_UNRUN 0x10 | ||
451 | #define ISR_RRD_OV 0x20 | ||
452 | #define ISR_TXF_UNRUN 0x40 | ||
453 | #define ISR_LINK 0x80 | ||
454 | #define ISR_HOST_RFD_UNRUN 0x100 | ||
455 | #define ISR_HOST_RRD_OV 0x200 | ||
456 | #define ISR_DMAR_TO_RST 0x400 | ||
457 | #define ISR_DMAW_TO_RST 0x800 | ||
458 | #define ISR_GPHY 0x1000 | ||
459 | #define ISR_RX_PKT 0x10000 | ||
460 | #define ISR_TX_PKT 0x20000 | ||
461 | #define ISR_TX_DMA 0x40000 | ||
462 | #define ISR_RX_DMA 0x80000 | ||
463 | #define ISR_CMB_RX 0x100000 | ||
464 | #define ISR_CMB_TX 0x200000 | ||
465 | #define ISR_MAC_RX 0x400000 | ||
466 | #define ISR_MAC_TX 0x800000 | ||
467 | #define ISR_UR_DETECTED 0x1000000 | ||
468 | #define ISR_FERR_DETECTED 0x2000000 | ||
469 | #define ISR_NFERR_DETECTED 0x4000000 | ||
470 | #define ISR_CERR_DETECTED 0x8000000 | ||
471 | #define ISR_PHY_LINKDOWN 0x10000000 | ||
472 | #define ISR_DIS_SMB 0x20000000 | ||
473 | #define ISR_DIS_DMA 0x40000000 | ||
474 | #define ISR_DIS_INT 0x80000000 | ||
475 | |||
476 | /* Interrupt Mask Register */ | ||
477 | #define REG_IMR 0x1604 | ||
478 | |||
479 | /* Normal Interrupt mask */ | ||
480 | #define IMR_NORMAL_MASK (\ | ||
481 | ISR_SMB |\ | ||
482 | ISR_GPHY |\ | ||
483 | ISR_PHY_LINKDOWN|\ | ||
484 | ISR_DMAR_TO_RST |\ | ||
485 | ISR_DMAW_TO_RST |\ | ||
486 | ISR_CMB_TX |\ | ||
487 | ISR_CMB_RX ) | ||
488 | |||
489 | /* Debug Interrupt Mask (enable all interrupt) */ | ||
490 | #define IMR_DEBUG_MASK (\ | ||
491 | ISR_SMB |\ | ||
492 | ISR_TIMER |\ | ||
493 | ISR_MANUAL |\ | ||
494 | ISR_RXF_OV |\ | ||
495 | ISR_RFD_UNRUN |\ | ||
496 | ISR_RRD_OV |\ | ||
497 | ISR_TXF_UNRUN |\ | ||
498 | ISR_LINK |\ | ||
499 | ISR_CMB_TX |\ | ||
500 | ISR_CMB_RX |\ | ||
501 | ISR_RX_PKT |\ | ||
502 | ISR_TX_PKT |\ | ||
503 | ISR_MAC_RX |\ | ||
504 | ISR_MAC_TX ) | ||
505 | |||
506 | /* Interrupt Status Register */ | ||
507 | #define REG_RFD_RRD_IDX 0x1800 | ||
508 | #define REG_TPD_IDX 0x1804 | ||
509 | |||
510 | /* MII definition */ | ||
511 | /* PHY Common Register */ | ||
512 | #define MII_AT001_CR 0x09 | ||
513 | #define MII_AT001_SR 0x0A | ||
514 | #define MII_AT001_ESR 0x0F | ||
515 | #define MII_AT001_PSCR 0x10 | ||
516 | #define MII_AT001_PSSR 0x11 | ||
517 | |||
518 | /* PHY Control Register */ | ||
519 | #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ | ||
520 | #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ | ||
521 | #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ | ||
522 | #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ | ||
523 | #define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ | ||
524 | #define MII_CR_POWER_DOWN 0x0800 /* Power down */ | ||
525 | #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ | ||
526 | #define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ | ||
527 | #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ | ||
528 | #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ | ||
529 | #define MII_CR_SPEED_MASK 0x2040 | ||
530 | #define MII_CR_SPEED_1000 0x0040 | ||
531 | #define MII_CR_SPEED_100 0x2000 | ||
532 | #define MII_CR_SPEED_10 0x0000 | ||
533 | |||
534 | /* PHY Status Register */ | ||
535 | #define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ | ||
536 | #define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ | ||
537 | #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ | ||
538 | #define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ | ||
539 | #define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ | ||
540 | #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ | ||
541 | #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ | ||
542 | #define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ | ||
543 | #define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ | ||
544 | #define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ | ||
545 | #define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ | ||
546 | #define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ | ||
547 | #define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ | ||
548 | #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ | ||
549 | #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ | ||
550 | |||
551 | /* Link partner ability register. */ | ||
552 | #define MII_LPA_SLCT 0x001f /* Same as advertise selector */ | ||
553 | #define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ | ||
554 | #define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ | ||
555 | #define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ | ||
556 | #define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ | ||
557 | #define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ | ||
558 | #define MII_LPA_PAUSE 0x0400 /* PAUSE */ | ||
559 | #define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ | ||
560 | #define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ | ||
561 | #define MII_LPA_LPACK 0x4000 /* Link partner acked us */ | ||
562 | #define MII_LPA_NPAGE 0x8000 /* Next page bit */ | ||
563 | |||
564 | /* Autoneg Advertisement Register */ | ||
565 | #define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ | ||
566 | #define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ | ||
567 | #define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ | ||
568 | #define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ | ||
569 | #define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ | ||
570 | #define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ | ||
571 | #define MII_AR_PAUSE 0x0400 /* Pause operation desired */ | ||
572 | #define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ | ||
573 | #define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ | ||
574 | #define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ | ||
575 | #define MII_AR_SPEED_MASK 0x01E0 | ||
576 | #define MII_AR_DEFAULT_CAP_MASK 0x0DE0 | ||
577 | |||
578 | /* 1000BASE-T Control Register */ | ||
579 | #define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ | ||
580 | #define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ | ||
581 | #define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */ | ||
582 | #define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */ | ||
583 | #define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */ | ||
584 | #define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ | ||
585 | #define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ | ||
586 | #define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ | ||
587 | #define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ | ||
588 | #define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ | ||
589 | #define MII_AT001_CR_1000T_SPEED_MASK 0x0300 | ||
590 | #define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 | ||
591 | |||
592 | /* 1000BASE-T Status Register */ | ||
593 | #define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ | ||
594 | #define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ | ||
595 | #define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ | ||
596 | #define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ | ||
597 | #define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ | ||
598 | #define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ | ||
599 | #define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 | ||
600 | #define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 | ||
601 | |||
602 | /* Extended Status Register */ | ||
603 | #define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ | ||
604 | #define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ | ||
605 | #define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ | ||
606 | #define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ | ||
607 | |||
608 | /* AT001 PHY Specific Control Register */ | ||
609 | #define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ | ||
610 | #define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ | ||
611 | #define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ | ||
612 | #define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 | ||
613 | #define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */ | ||
614 | #define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */ | ||
615 | #define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ | ||
616 | #define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ | ||
617 | #define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */ | ||
618 | #define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */ | ||
619 | #define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ | ||
620 | #define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ | ||
621 | #define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ | ||
622 | #define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ | ||
623 | #define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 | ||
624 | #define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 | ||
625 | #define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 | ||
626 | |||
627 | /* AT001 PHY Specific Status Register */ | ||
628 | #define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ | ||
629 | #define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ | ||
630 | #define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ | ||
631 | #define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ | ||
632 | #define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ | ||
633 | #define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ | ||
634 | |||
635 | /* PCI Command Register Bit Definitions */ | ||
636 | #define PCI_REG_COMMAND 0x04 /* PCI Command Register */ | ||
637 | #define CMD_IO_SPACE 0x0001 | ||
638 | #define CMD_MEMORY_SPACE 0x0002 | ||
639 | #define CMD_BUS_MASTER 0x0004 | ||
640 | |||
641 | /* Wake Up Filter Control */ | ||
642 | #define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ | ||
643 | #define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ | ||
644 | #define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ | ||
645 | #define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ | ||
646 | #define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ | ||
647 | |||
648 | /* Error Codes */ | ||
649 | #define ATL1_SUCCESS 0 | ||
650 | #define ATL1_ERR_EEPROM 1 | ||
651 | #define ATL1_ERR_PHY 2 | ||
652 | #define ATL1_ERR_CONFIG 3 | ||
653 | #define ATL1_ERR_PARAM 4 | ||
654 | #define ATL1_ERR_MAC_TYPE 5 | ||
655 | #define ATL1_ERR_PHY_TYPE 6 | ||
656 | #define ATL1_ERR_PHY_SPEED 7 | ||
657 | #define ATL1_ERR_PHY_RES 8 | ||
658 | |||
659 | #define SPEED_0 0xffff | ||
660 | #define SPEED_10 10 | ||
661 | #define SPEED_100 100 | ||
662 | #define SPEED_1000 1000 | ||
663 | #define HALF_DUPLEX 1 | ||
664 | #define FULL_DUPLEX 2 | ||
665 | |||
666 | #define MEDIA_TYPE_AUTO_SENSOR 0 | ||
667 | #define MEDIA_TYPE_1000M_FULL 1 | ||
668 | #define MEDIA_TYPE_100M_FULL 2 | ||
669 | #define MEDIA_TYPE_100M_HALF 3 | ||
670 | #define MEDIA_TYPE_10M_FULL 4 | ||
671 | #define MEDIA_TYPE_10M_HALF 5 | ||
672 | |||
673 | #define ADVERTISE_10_HALF 0x0001 | ||
674 | #define ADVERTISE_10_FULL 0x0002 | ||
675 | #define ADVERTISE_100_HALF 0x0004 | ||
676 | #define ADVERTISE_100_FULL 0x0008 | ||
677 | #define ADVERTISE_1000_HALF 0x0010 | ||
678 | #define ADVERTISE_1000_FULL 0x0020 | ||
679 | #define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ | ||
680 | #define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ | ||
681 | #define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ | ||
682 | |||
683 | /* The size (in bytes) of a ethernet packet */ | ||
684 | #define ENET_HEADER_SIZE 14 | ||
685 | #define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */ | ||
686 | #define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */ | ||
687 | #define ETHERNET_FCS_SIZE 4 | ||
688 | #define MAX_JUMBO_FRAME_SIZE 0x2800 | ||
689 | |||
690 | #define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ | ||
691 | #define PHY_FORCE_TIME 20 /* 2.0 Seconds */ | ||
692 | |||
693 | /* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */ | ||
694 | #define EEPROM_SUM 0xBABA | ||
695 | |||
696 | #define ATL1_EEDUMP_LEN 48 | ||
697 | |||
698 | /* Statistics counters collected by the MAC */ | ||
699 | struct stats_msg_block { | ||
700 | /* rx */ | ||
701 | u32 rx_ok; /* The number of good packet received. */ | ||
702 | u32 rx_bcast; /* The number of good broadcast packet received. */ | ||
703 | u32 rx_mcast; /* The number of good multicast packet received. */ | ||
704 | u32 rx_pause; /* The number of Pause packet received. */ | ||
705 | u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */ | ||
706 | u32 rx_fcs_err; /* The number of packets with bad FCS. */ | ||
707 | u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */ | ||
708 | u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ | ||
709 | u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ | ||
710 | u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ | ||
711 | u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ | ||
712 | u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ | ||
713 | u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ | ||
714 | u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ | ||
715 | u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ | ||
716 | u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ | ||
717 | u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ | ||
718 | u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */ | ||
719 | u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ | ||
720 | u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ | ||
721 | u32 rx_align_err; /* Alignment Error */ | ||
722 | u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ | ||
723 | u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ | ||
724 | u32 rx_err_addr; /* The number of packets dropped due to address filtering. */ | ||
725 | |||
726 | /* tx */ | ||
727 | u32 tx_ok; /* The number of good packet transmitted. */ | ||
728 | u32 tx_bcast; /* The number of good broadcast packet transmitted. */ | ||
729 | u32 tx_mcast; /* The number of good multicast packet transmitted. */ | ||
730 | u32 tx_pause; /* The number of Pause packet transmitted. */ | ||
731 | u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ | ||
732 | u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ | ||
733 | u32 tx_defer; /* The number of packets transmitted that is deferred. */ | ||
734 | u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ | ||
735 | u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ | ||
736 | u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ | ||
737 | u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ | ||
738 | u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ | ||
739 | u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ | ||
740 | u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ | ||
741 | u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ | ||
742 | u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ | ||
743 | u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ | ||
744 | u32 tx_late_col; /* The number of packets transmitted with late collisions. */ | ||
745 | u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ | ||
746 | u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ | ||
747 | u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ | ||
748 | u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ | ||
749 | u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ | ||
750 | u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ | ||
751 | u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ | ||
752 | u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update. | ||
753 | * Software should clear this bit as soon as retrieving the statistics information. */ | ||
754 | }; | ||
755 | |||
756 | /* Coalescing Message Block */ | ||
757 | struct coals_msg_block { | ||
758 | u32 int_stats; /* interrupt status */ | ||
759 | u16 rrd_prod_idx; /* TRD Producer Index. */ | ||
760 | u16 rfd_cons_idx; /* RFD Consumer Index. */ | ||
761 | u16 update; /* Selene sets this bit every time it DMA the CMB to host memory. | ||
762 | * Software supposes to clear this bit when CMB information is processed. */ | ||
763 | u16 tpd_cons_idx; /* TPD Consumer Index. */ | ||
764 | }; | ||
765 | |||
766 | /* RRD descriptor */ | ||
767 | struct rx_return_desc { | ||
768 | u8 num_buf; /* Number of RFD buffers used by the received packet */ | ||
769 | u8 resved; | ||
770 | u16 buf_indx; /* RFD Index of the first buffer */ | ||
771 | union { | ||
772 | u32 valid; | ||
773 | struct { | ||
774 | u16 rx_chksum; | ||
775 | u16 pkt_size; | ||
776 | } xsum_sz; | ||
777 | } xsz; | ||
778 | |||
779 | u16 pkt_flg; /* Packet flags */ | ||
780 | u16 err_flg; /* Error flags */ | ||
781 | u16 resved2; | ||
782 | u16 vlan_tag; /* VLAN TAG */ | ||
783 | }; | ||
784 | |||
785 | #define PACKET_FLAG_ETH_TYPE 0x0080 | ||
786 | #define PACKET_FLAG_VLAN_INS 0x0100 | ||
787 | #define PACKET_FLAG_ERR 0x0200 | ||
788 | #define PACKET_FLAG_IPV4 0x0400 | ||
789 | #define PACKET_FLAG_UDP 0x0800 | ||
790 | #define PACKET_FLAG_TCP 0x1000 | ||
791 | #define PACKET_FLAG_BCAST 0x2000 | ||
792 | #define PACKET_FLAG_MCAST 0x4000 | ||
793 | #define PACKET_FLAG_PAUSE 0x8000 | ||
794 | |||
795 | #define ERR_FLAG_CRC 0x0001 | ||
796 | #define ERR_FLAG_CODE 0x0002 | ||
797 | #define ERR_FLAG_DRIBBLE 0x0004 | ||
798 | #define ERR_FLAG_RUNT 0x0008 | ||
799 | #define ERR_FLAG_OV 0x0010 | ||
800 | #define ERR_FLAG_TRUNC 0x0020 | ||
801 | #define ERR_FLAG_IP_CHKSUM 0x0040 | ||
802 | #define ERR_FLAG_L4_CHKSUM 0x0080 | ||
803 | #define ERR_FLAG_LEN 0x0100 | ||
804 | #define ERR_FLAG_DES_ADDR 0x0200 | ||
805 | |||
806 | /* RFD descriptor */ | ||
807 | struct rx_free_desc { | ||
808 | __le64 buffer_addr; /* Address of the descriptor's data buffer */ | ||
809 | __le16 buf_len; /* Size of the receive buffer in host memory, in byte */ | ||
810 | u16 coalese; /* Update consumer index to host after the reception of this frame */ | ||
811 | /* __attribute__ ((packed)) is required */ | ||
812 | } __attribute__ ((packed)); | ||
813 | |||
814 | /* tsopu defines */ | ||
815 | #define TSO_PARAM_BUFLEN_MASK 0x3FFF | ||
816 | #define TSO_PARAM_BUFLEN_SHIFT 0 | ||
817 | #define TSO_PARAM_DMAINT_MASK 0x0001 | ||
818 | #define TSO_PARAM_DMAINT_SHIFT 14 | ||
819 | #define TSO_PARAM_PKTNT_MASK 0x0001 | ||
820 | #define TSO_PARAM_PKTINT_SHIFT 15 | ||
821 | #define TSO_PARAM_VLANTAG_MASK 0xFFFF | ||
822 | #define TSO_PARAM_VLAN_SHIFT 16 | ||
823 | |||
824 | /* tsopl defines */ | ||
825 | #define TSO_PARAM_EOP_MASK 0x0001 | ||
826 | #define TSO_PARAM_EOP_SHIFT 0 | ||
827 | #define TSO_PARAM_COALESCE_MASK 0x0001 | ||
828 | #define TSO_PARAM_COALESCE_SHIFT 1 | ||
829 | #define TSO_PARAM_INSVLAG_MASK 0x0001 | ||
830 | #define TSO_PARAM_INSVLAG_SHIFT 2 | ||
831 | #define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001 | ||
832 | #define TSO_PARAM_CUSTOMCKSUM_SHIFT 3 | ||
833 | #define TSO_PARAM_SEGMENT_MASK 0x0001 | ||
834 | #define TSO_PARAM_SEGMENT_SHIFT 4 | ||
835 | #define TSO_PARAM_IPCKSUM_MASK 0x0001 | ||
836 | #define TSO_PARAM_IPCKSUM_SHIFT 5 | ||
837 | #define TSO_PARAM_TCPCKSUM_MASK 0x0001 | ||
838 | #define TSO_PARAM_TCPCKSUM_SHIFT 6 | ||
839 | #define TSO_PARAM_UDPCKSUM_MASK 0x0001 | ||
840 | #define TSO_PARAM_UDPCKSUM_SHIFT 7 | ||
841 | #define TSO_PARAM_VLANTAGGED_MASK 0x0001 | ||
842 | #define TSO_PARAM_VLANTAGGED_SHIFT 8 | ||
843 | #define TSO_PARAM_ETHTYPE_MASK 0x0001 | ||
844 | #define TSO_PARAM_ETHTYPE_SHIFT 9 | ||
845 | #define TSO_PARAM_IPHL_MASK 0x000F | ||
846 | #define TSO_PARAM_IPHL_SHIFT 10 | ||
847 | #define TSO_PARAM_TCPHDRLEN_MASK 0x000F | ||
848 | #define TSO_PARAM_TCPHDRLEN_SHIFT 14 | ||
849 | #define TSO_PARAM_HDRFLAG_MASK 0x0001 | ||
850 | #define TSO_PARAM_HDRFLAG_SHIFT 18 | ||
851 | #define TSO_PARAM_MSS_MASK 0x1FFF | ||
852 | #define TSO_PARAM_MSS_SHIFT 19 | ||
853 | |||
854 | /* csumpu defines */ | ||
855 | #define CSUM_PARAM_BUFLEN_MASK 0x3FFF | ||
856 | #define CSUM_PARAM_BUFLEN_SHIFT 0 | ||
857 | #define CSUM_PARAM_DMAINT_MASK 0x0001 | ||
858 | #define CSUM_PARAM_DMAINT_SHIFT 14 | ||
859 | #define CSUM_PARAM_PKTINT_MASK 0x0001 | ||
860 | #define CSUM_PARAM_PKTINT_SHIFT 15 | ||
861 | #define CSUM_PARAM_VALANTAG_MASK 0xFFFF | ||
862 | #define CSUM_PARAM_VALAN_SHIFT 16 | ||
863 | |||
864 | /* csumpl defines*/ | ||
865 | #define CSUM_PARAM_EOP_MASK 0x0001 | ||
866 | #define CSUM_PARAM_EOP_SHIFT 0 | ||
867 | #define CSUM_PARAM_COALESCE_MASK 0x0001 | ||
868 | #define CSUM_PARAM_COALESCE_SHIFT 1 | ||
869 | #define CSUM_PARAM_INSVLAG_MASK 0x0001 | ||
870 | #define CSUM_PARAM_INSVLAG_SHIFT 2 | ||
871 | #define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001 | ||
872 | #define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3 | ||
873 | #define CSUM_PARAM_SEGMENT_MASK 0x0001 | ||
874 | #define CSUM_PARAM_SEGMENT_SHIFT 4 | ||
875 | #define CSUM_PARAM_IPCKSUM_MASK 0x0001 | ||
876 | #define CSUM_PARAM_IPCKSUM_SHIFT 5 | ||
877 | #define CSUM_PARAM_TCPCKSUM_MASK 0x0001 | ||
878 | #define CSUM_PARAM_TCPCKSUM_SHIFT 6 | ||
879 | #define CSUM_PARAM_UDPCKSUM_MASK 0x0001 | ||
880 | #define CSUM_PARAM_UDPCKSUM_SHIFT 7 | ||
881 | #define CSUM_PARAM_VLANTAGGED_MASK 0x0001 | ||
882 | #define CSUM_PARAM_VLANTAGGED_SHIFT 8 | ||
883 | #define CSUM_PARAM_ETHTYPE_MASK 0x0001 | ||
884 | #define CSUM_PARAM_ETHTYPE_SHIFT 9 | ||
885 | #define CSUM_PARAM_IPHL_MASK 0x000F | ||
886 | #define CSUM_PARAM_IPHL_SHIFT 10 | ||
887 | #define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF | ||
888 | #define CSUM_PARAM_PLOADOFFSET_SHIFT 16 | ||
889 | #define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF | ||
890 | #define CSUM_PARAM_XSUMOFFSET_SHIFT 24 | ||
891 | |||
892 | /* TPD descriptor */ | ||
893 | struct tso_param { | ||
894 | /* The order of these declarations is important -- don't change it */ | ||
895 | u32 tsopu; /* tso_param upper word */ | ||
896 | u32 tsopl; /* tso_param lower word */ | ||
897 | }; | ||
898 | |||
899 | struct csum_param { | ||
900 | /* The order of these declarations is important -- don't change it */ | ||
901 | u32 csumpu; /* csum_param upper word */ | ||
902 | u32 csumpl; /* csum_param lower word */ | ||
903 | }; | ||
904 | |||
905 | union tpd_descr { | ||
906 | u64 data; | ||
907 | struct csum_param csum; | ||
908 | struct tso_param tso; | ||
909 | }; | ||
910 | |||
911 | struct tx_packet_desc { | ||
912 | __le64 buffer_addr; | ||
913 | union tpd_descr desc; | ||
914 | }; | ||
915 | |||
916 | /* DMA Order Settings */ | ||
917 | enum atl1_dma_order { | ||
918 | atl1_dma_ord_in = 1, | ||
919 | atl1_dma_ord_enh = 2, | ||
920 | atl1_dma_ord_out = 4 | ||
921 | }; | ||
922 | |||
923 | enum atl1_dma_rcb { | ||
924 | atl1_rcb_64 = 0, | ||
925 | atl1_rcb_128 = 1 | ||
926 | }; | ||
927 | |||
928 | enum atl1_dma_req_block { | ||
929 | atl1_dma_req_128 = 0, | ||
930 | atl1_dma_req_256 = 1, | ||
931 | atl1_dma_req_512 = 2, | ||
932 | atl1_dam_req_1024 = 3, | ||
933 | atl1_dam_req_2048 = 4, | ||
934 | atl1_dma_req_4096 = 5 | ||
935 | }; | ||
936 | |||
937 | struct atl1_spi_flash_dev { | ||
938 | const char *manu_name; /* manufacturer id */ | ||
939 | /* op-code */ | ||
940 | u8 cmd_wrsr; | ||
941 | u8 cmd_read; | ||
942 | u8 cmd_program; | ||
943 | u8 cmd_wren; | ||
944 | u8 cmd_wrdi; | ||
945 | u8 cmd_rdsr; | ||
946 | u8 cmd_rdid; | ||
947 | u8 cmd_sector_erase; | ||
948 | u8 cmd_chip_erase; | ||
949 | }; | ||
950 | |||
951 | #endif /* _ATL1_HW_H_ */ | ||
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c new file mode 100644 index 000000000000..6655640eb4ca --- /dev/null +++ b/drivers/net/atl1/atl1_main.c | |||
@@ -0,0 +1,2468 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | * | ||
23 | * The full GNU General Public License is included in this distribution in the | ||
24 | * file called COPYING. | ||
25 | * | ||
26 | * Contact Information: | ||
27 | * Xiong Huang <xiong_huang@attansic.com> | ||
28 | * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei, | ||
29 | * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA | ||
30 | * | ||
31 | * Chris Snook <csnook@redhat.com> | ||
32 | * Jay Cliburn <jcliburn@gmail.com> | ||
33 | * | ||
34 | * This version is adapted from the Attansic reference driver for | ||
35 | * inclusion in the Linux kernel. It is currently under heavy development. | ||
36 | * A very incomplete list of things that need to be dealt with: | ||
37 | * | ||
38 | * TODO: | ||
39 | * Fix TSO; tx performance is horrible with TSO enabled. | ||
40 | * Wake on LAN. | ||
41 | * Add more ethtool functions, including set ring parameters. | ||
42 | * Fix abstruse irq enable/disable condition described here: | ||
43 | * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 | ||
44 | * | ||
45 | * NEEDS TESTING: | ||
46 | * VLAN | ||
47 | * multicast | ||
48 | * promiscuous mode | ||
49 | * interrupt coalescing | ||
50 | * SMP torture testing | ||
51 | */ | ||
52 | |||
53 | #include <linux/types.h> | ||
54 | #include <linux/netdevice.h> | ||
55 | #include <linux/pci.h> | ||
56 | #include <linux/spinlock.h> | ||
57 | #include <linux/slab.h> | ||
58 | #include <linux/string.h> | ||
59 | #include <linux/skbuff.h> | ||
60 | #include <linux/etherdevice.h> | ||
61 | #include <linux/if_vlan.h> | ||
62 | #include <linux/irqreturn.h> | ||
63 | #include <linux/workqueue.h> | ||
64 | #include <linux/timer.h> | ||
65 | #include <linux/jiffies.h> | ||
66 | #include <linux/hardirq.h> | ||
67 | #include <linux/interrupt.h> | ||
68 | #include <linux/irqflags.h> | ||
69 | #include <linux/dma-mapping.h> | ||
70 | #include <linux/net.h> | ||
71 | #include <linux/pm.h> | ||
72 | #include <linux/in.h> | ||
73 | #include <linux/ip.h> | ||
74 | #include <linux/tcp.h> | ||
75 | #include <linux/compiler.h> | ||
76 | #include <linux/delay.h> | ||
77 | #include <linux/mii.h> | ||
78 | #include <net/checksum.h> | ||
79 | |||
80 | #include <asm/atomic.h> | ||
81 | #include <asm/byteorder.h> | ||
82 | |||
83 | #include "atl1.h" | ||
84 | |||
85 | #define RUN_REALTIME 0 | ||
86 | #define DRIVER_VERSION "2.0.6" | ||
87 | |||
88 | char atl1_driver_name[] = "atl1"; | ||
89 | static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver"; | ||
90 | static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation."; | ||
91 | char atl1_driver_version[] = DRIVER_VERSION; | ||
92 | |||
93 | MODULE_AUTHOR | ||
94 | ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>"); | ||
95 | MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver"); | ||
96 | MODULE_LICENSE("GPL"); | ||
97 | MODULE_VERSION(DRIVER_VERSION); | ||
98 | |||
99 | /* | ||
100 | * atl1_pci_tbl - PCI Device ID Table | ||
101 | */ | ||
102 | static const struct pci_device_id atl1_pci_tbl[] = { | ||
103 | {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1048)}, | ||
104 | /* required last entry */ | ||
105 | {0,} | ||
106 | }; | ||
107 | |||
108 | MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); | ||
109 | |||
110 | /* | ||
111 | * atl1_sw_init - Initialize general software structures (struct atl1_adapter) | ||
112 | * @adapter: board private structure to initialize | ||
113 | * | ||
114 | * atl1_sw_init initializes the Adapter private data structure. | ||
115 | * Fields are initialized based on PCI device information and | ||
116 | * OS network device settings (MTU size). | ||
117 | */ | ||
118 | static int __devinit atl1_sw_init(struct atl1_adapter *adapter) | ||
119 | { | ||
120 | struct atl1_hw *hw = &adapter->hw; | ||
121 | struct net_device *netdev = adapter->netdev; | ||
122 | struct pci_dev *pdev = adapter->pdev; | ||
123 | |||
124 | /* PCI config space info */ | ||
125 | pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); | ||
126 | |||
127 | hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
128 | hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; | ||
129 | |||
130 | adapter->wol = 0; | ||
131 | adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; | ||
132 | adapter->ict = 50000; /* 100ms */ | ||
133 | adapter->link_speed = SPEED_0; /* hardware init */ | ||
134 | adapter->link_duplex = FULL_DUPLEX; | ||
135 | |||
136 | hw->phy_configured = false; | ||
137 | hw->preamble_len = 7; | ||
138 | hw->ipgt = 0x60; | ||
139 | hw->min_ifg = 0x50; | ||
140 | hw->ipgr1 = 0x40; | ||
141 | hw->ipgr2 = 0x60; | ||
142 | hw->max_retry = 0xf; | ||
143 | hw->lcol = 0x37; | ||
144 | hw->jam_ipg = 7; | ||
145 | hw->rfd_burst = 8; | ||
146 | hw->rrd_burst = 8; | ||
147 | hw->rfd_fetch_gap = 1; | ||
148 | hw->rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
149 | hw->rx_jumbo_lkah = 1; | ||
150 | hw->rrd_ret_timer = 16; | ||
151 | hw->tpd_burst = 4; | ||
152 | hw->tpd_fetch_th = 16; | ||
153 | hw->txf_burst = 0x100; | ||
154 | hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; | ||
155 | hw->tpd_fetch_gap = 1; | ||
156 | hw->rcb_value = atl1_rcb_64; | ||
157 | hw->dma_ord = atl1_dma_ord_enh; | ||
158 | hw->dmar_block = atl1_dma_req_256; | ||
159 | hw->dmaw_block = atl1_dma_req_256; | ||
160 | hw->cmb_rrd = 4; | ||
161 | hw->cmb_tpd = 4; | ||
162 | hw->cmb_rx_timer = 1; /* about 2us */ | ||
163 | hw->cmb_tx_timer = 1; /* about 2us */ | ||
164 | hw->smb_timer = 100000; /* about 200ms */ | ||
165 | |||
166 | atomic_set(&adapter->irq_sem, 0); | ||
167 | spin_lock_init(&adapter->lock); | ||
168 | spin_lock_init(&adapter->mb_lock); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * atl1_setup_mem_resources - allocate Tx / RX descriptor resources | ||
175 | * @adapter: board private structure | ||
176 | * | ||
177 | * Return 0 on success, negative on failure | ||
178 | */ | ||
179 | s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) | ||
180 | { | ||
181 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
182 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
183 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
184 | struct atl1_ring_header *ring_header = &adapter->ring_header; | ||
185 | struct pci_dev *pdev = adapter->pdev; | ||
186 | int size; | ||
187 | u8 offset = 0; | ||
188 | |||
189 | size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); | ||
190 | tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); | ||
191 | if (unlikely(!tpd_ring->buffer_info)) { | ||
192 | printk(KERN_WARNING "%s: kzalloc failed , size = D%d\n", | ||
193 | atl1_driver_name, size); | ||
194 | goto err_nomem; | ||
195 | } | ||
196 | rfd_ring->buffer_info = | ||
197 | (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); | ||
198 | |||
199 | /* real ring DMA buffer */ | ||
200 | ring_header->size = size = sizeof(struct tx_packet_desc) * | ||
201 | tpd_ring->count | ||
202 | + sizeof(struct rx_free_desc) * rfd_ring->count | ||
203 | + sizeof(struct rx_return_desc) * rrd_ring->count | ||
204 | + sizeof(struct coals_msg_block) | ||
205 | + sizeof(struct stats_msg_block) | ||
206 | + 40; /* "40: for 8 bytes align" huh? -- CHS */ | ||
207 | |||
208 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, | ||
209 | &ring_header->dma); | ||
210 | if (unlikely(!ring_header->desc)) { | ||
211 | printk(KERN_WARNING | ||
212 | "%s: pci_alloc_consistent failed, size = D%d\n", | ||
213 | atl1_driver_name, size); | ||
214 | goto err_nomem; | ||
215 | } | ||
216 | |||
217 | memset(ring_header->desc, 0, ring_header->size); | ||
218 | |||
219 | /* init TPD ring */ | ||
220 | tpd_ring->dma = ring_header->dma; | ||
221 | offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; | ||
222 | tpd_ring->dma += offset; | ||
223 | tpd_ring->desc = (u8 *) ring_header->desc + offset; | ||
224 | tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; | ||
225 | atomic_set(&tpd_ring->next_to_use, 0); | ||
226 | atomic_set(&tpd_ring->next_to_clean, 0); | ||
227 | |||
228 | /* init RFD ring */ | ||
229 | rfd_ring->dma = tpd_ring->dma + tpd_ring->size; | ||
230 | offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; | ||
231 | rfd_ring->dma += offset; | ||
232 | rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); | ||
233 | rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; | ||
234 | rfd_ring->next_to_clean = 0; | ||
235 | /* rfd_ring->next_to_use = rfd_ring->count - 1; */ | ||
236 | atomic_set(&rfd_ring->next_to_use, 0); | ||
237 | |||
238 | /* init RRD ring */ | ||
239 | rrd_ring->dma = rfd_ring->dma + rfd_ring->size; | ||
240 | offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; | ||
241 | rrd_ring->dma += offset; | ||
242 | rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); | ||
243 | rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; | ||
244 | rrd_ring->next_to_use = 0; | ||
245 | atomic_set(&rrd_ring->next_to_clean, 0); | ||
246 | |||
247 | /* init CMB */ | ||
248 | adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; | ||
249 | offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; | ||
250 | adapter->cmb.dma += offset; | ||
251 | adapter->cmb.cmb = | ||
252 | (struct coals_msg_block *) ((u8 *) rrd_ring->desc + | ||
253 | (rrd_ring->size + offset)); | ||
254 | |||
255 | /* init SMB */ | ||
256 | adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); | ||
257 | offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; | ||
258 | adapter->smb.dma += offset; | ||
259 | adapter->smb.smb = (struct stats_msg_block *) | ||
260 | ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); | ||
261 | |||
262 | return ATL1_SUCCESS; | ||
263 | |||
264 | err_nomem: | ||
265 | kfree(tpd_ring->buffer_info); | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * atl1_irq_enable - Enable default interrupt generation settings | ||
271 | * @adapter: board private structure | ||
272 | */ | ||
273 | static void atl1_irq_enable(struct atl1_adapter *adapter) | ||
274 | { | ||
275 | if (likely(!atomic_dec_and_test(&adapter->irq_sem))) | ||
276 | iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); | ||
277 | } | ||
278 | |||
279 | static void atl1_clear_phy_int(struct atl1_adapter *adapter) | ||
280 | { | ||
281 | u16 phy_data; | ||
282 | unsigned long flags; | ||
283 | |||
284 | spin_lock_irqsave(&adapter->lock, flags); | ||
285 | atl1_read_phy_reg(&adapter->hw, 19, &phy_data); | ||
286 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
287 | } | ||
288 | |||
289 | static void atl1_inc_smb(struct atl1_adapter *adapter) | ||
290 | { | ||
291 | struct stats_msg_block *smb = adapter->smb.smb; | ||
292 | |||
293 | /* Fill out the OS statistics structure */ | ||
294 | adapter->soft_stats.rx_packets += smb->rx_ok; | ||
295 | adapter->soft_stats.tx_packets += smb->tx_ok; | ||
296 | adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; | ||
297 | adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; | ||
298 | adapter->soft_stats.multicast += smb->rx_mcast; | ||
299 | adapter->soft_stats.collisions += (smb->tx_1_col + | ||
300 | smb->tx_2_col * 2 + | ||
301 | smb->tx_late_col + | ||
302 | smb->tx_abort_col * | ||
303 | adapter->hw.max_retry); | ||
304 | |||
305 | /* Rx Errors */ | ||
306 | adapter->soft_stats.rx_errors += (smb->rx_frag + | ||
307 | smb->rx_fcs_err + | ||
308 | smb->rx_len_err + | ||
309 | smb->rx_sz_ov + | ||
310 | smb->rx_rxf_ov + | ||
311 | smb->rx_rrd_ov + smb->rx_align_err); | ||
312 | adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; | ||
313 | adapter->soft_stats.rx_length_errors += smb->rx_len_err; | ||
314 | adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; | ||
315 | adapter->soft_stats.rx_frame_errors += smb->rx_align_err; | ||
316 | adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + | ||
317 | smb->rx_rxf_ov); | ||
318 | |||
319 | adapter->soft_stats.rx_pause += smb->rx_pause; | ||
320 | adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; | ||
321 | adapter->soft_stats.rx_trunc += smb->rx_sz_ov; | ||
322 | |||
323 | /* Tx Errors */ | ||
324 | adapter->soft_stats.tx_errors += (smb->tx_late_col + | ||
325 | smb->tx_abort_col + | ||
326 | smb->tx_underrun + smb->tx_trunc); | ||
327 | adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; | ||
328 | adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; | ||
329 | adapter->soft_stats.tx_window_errors += smb->tx_late_col; | ||
330 | |||
331 | adapter->soft_stats.excecol += smb->tx_abort_col; | ||
332 | adapter->soft_stats.deffer += smb->tx_defer; | ||
333 | adapter->soft_stats.scc += smb->tx_1_col; | ||
334 | adapter->soft_stats.mcc += smb->tx_2_col; | ||
335 | adapter->soft_stats.latecol += smb->tx_late_col; | ||
336 | adapter->soft_stats.tx_underun += smb->tx_underrun; | ||
337 | adapter->soft_stats.tx_trunc += smb->tx_trunc; | ||
338 | adapter->soft_stats.tx_pause += smb->tx_pause; | ||
339 | |||
340 | adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; | ||
341 | adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; | ||
342 | adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; | ||
343 | adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; | ||
344 | adapter->net_stats.multicast = adapter->soft_stats.multicast; | ||
345 | adapter->net_stats.collisions = adapter->soft_stats.collisions; | ||
346 | adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; | ||
347 | adapter->net_stats.rx_over_errors = | ||
348 | adapter->soft_stats.rx_missed_errors; | ||
349 | adapter->net_stats.rx_length_errors = | ||
350 | adapter->soft_stats.rx_length_errors; | ||
351 | adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; | ||
352 | adapter->net_stats.rx_frame_errors = | ||
353 | adapter->soft_stats.rx_frame_errors; | ||
354 | adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; | ||
355 | adapter->net_stats.rx_missed_errors = | ||
356 | adapter->soft_stats.rx_missed_errors; | ||
357 | adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; | ||
358 | adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; | ||
359 | adapter->net_stats.tx_aborted_errors = | ||
360 | adapter->soft_stats.tx_aborted_errors; | ||
361 | adapter->net_stats.tx_window_errors = | ||
362 | adapter->soft_stats.tx_window_errors; | ||
363 | adapter->net_stats.tx_carrier_errors = | ||
364 | adapter->soft_stats.tx_carrier_errors; | ||
365 | } | ||
366 | |||
367 | static void atl1_rx_checksum(struct atl1_adapter *adapter, | ||
368 | struct rx_return_desc *rrd, | ||
369 | struct sk_buff *skb) | ||
370 | { | ||
371 | skb->ip_summed = CHECKSUM_NONE; | ||
372 | |||
373 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
374 | if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | | ||
375 | ERR_FLAG_CODE | ERR_FLAG_OV)) { | ||
376 | adapter->hw_csum_err++; | ||
377 | printk(KERN_DEBUG "%s: rx checksum error\n", | ||
378 | atl1_driver_name); | ||
379 | return; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | /* not IPv4 */ | ||
384 | if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) | ||
385 | /* checksum is invalid, but it's not an IPv4 pkt, so ok */ | ||
386 | return; | ||
387 | |||
388 | /* IPv4 packet */ | ||
389 | if (likely(!(rrd->err_flg & | ||
390 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { | ||
391 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
392 | adapter->hw_csum_good++; | ||
393 | return; | ||
394 | } | ||
395 | |||
396 | /* IPv4, but hardware thinks its checksum is wrong */ | ||
397 | printk(KERN_DEBUG "%s: hw csum wrong pkt_flag:%x, err_flag:%x\n", | ||
398 | atl1_driver_name, rrd->pkt_flg, rrd->err_flg); | ||
399 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
400 | skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); | ||
401 | adapter->hw_csum_err++; | ||
402 | return; | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * atl1_alloc_rx_buffers - Replace used receive buffers | ||
407 | * @adapter: address of board private structure | ||
408 | */ | ||
409 | static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) | ||
410 | { | ||
411 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
412 | struct net_device *netdev = adapter->netdev; | ||
413 | struct pci_dev *pdev = adapter->pdev; | ||
414 | struct page *page; | ||
415 | unsigned long offset; | ||
416 | struct atl1_buffer *buffer_info, *next_info; | ||
417 | struct sk_buff *skb; | ||
418 | u16 num_alloc = 0; | ||
419 | u16 rfd_next_to_use, next_next; | ||
420 | struct rx_free_desc *rfd_desc; | ||
421 | |||
422 | next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); | ||
423 | if (++next_next == rfd_ring->count) | ||
424 | next_next = 0; | ||
425 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
426 | next_info = &rfd_ring->buffer_info[next_next]; | ||
427 | |||
428 | while (!buffer_info->alloced && !next_info->alloced) { | ||
429 | if (buffer_info->skb) { | ||
430 | buffer_info->alloced = 1; | ||
431 | goto next; | ||
432 | } | ||
433 | |||
434 | rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); | ||
435 | |||
436 | skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); | ||
437 | if (unlikely(!skb)) { /* Better luck next round */ | ||
438 | adapter->net_stats.rx_dropped++; | ||
439 | break; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Make buffer alignment 2 beyond a 16 byte boundary | ||
444 | * this will result in a 16 byte aligned IP header after | ||
445 | * the 14 byte MAC header is removed | ||
446 | */ | ||
447 | skb_reserve(skb, NET_IP_ALIGN); | ||
448 | skb->dev = netdev; | ||
449 | |||
450 | buffer_info->alloced = 1; | ||
451 | buffer_info->skb = skb; | ||
452 | buffer_info->length = (u16) adapter->rx_buffer_len; | ||
453 | page = virt_to_page(skb->data); | ||
454 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
455 | buffer_info->dma = pci_map_page(pdev, page, offset, | ||
456 | adapter->rx_buffer_len, | ||
457 | PCI_DMA_FROMDEVICE); | ||
458 | rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
459 | rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); | ||
460 | rfd_desc->coalese = 0; | ||
461 | |||
462 | next: | ||
463 | rfd_next_to_use = next_next; | ||
464 | if (unlikely(++next_next == rfd_ring->count)) | ||
465 | next_next = 0; | ||
466 | |||
467 | buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; | ||
468 | next_info = &rfd_ring->buffer_info[next_next]; | ||
469 | num_alloc++; | ||
470 | } | ||
471 | |||
472 | if (num_alloc) { | ||
473 | /* | ||
474 | * Force memory writes to complete before letting h/w | ||
475 | * know there are new descriptors to fetch. (Only | ||
476 | * applicable for weak-ordered memory model archs, | ||
477 | * such as IA-64). | ||
478 | */ | ||
479 | wmb(); | ||
480 | atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); | ||
481 | } | ||
482 | return num_alloc; | ||
483 | } | ||
484 | |||
485 | static void atl1_intr_rx(struct atl1_adapter *adapter) | ||
486 | { | ||
487 | int i, count; | ||
488 | u16 length; | ||
489 | u16 rrd_next_to_clean; | ||
490 | u32 value; | ||
491 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
492 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
493 | struct atl1_buffer *buffer_info; | ||
494 | struct rx_return_desc *rrd; | ||
495 | struct sk_buff *skb; | ||
496 | |||
497 | count = 0; | ||
498 | |||
499 | rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); | ||
500 | |||
501 | while (1) { | ||
502 | rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); | ||
503 | i = 1; | ||
504 | if (likely(rrd->xsz.valid)) { /* packet valid */ | ||
505 | chk_rrd: | ||
506 | /* check rrd status */ | ||
507 | if (likely(rrd->num_buf == 1)) | ||
508 | goto rrd_ok; | ||
509 | |||
510 | /* rrd seems to be bad */ | ||
511 | if (unlikely(i-- > 0)) { | ||
512 | /* rrd may not be DMAed completely */ | ||
513 | printk(KERN_DEBUG | ||
514 | "%s: RRD may not be DMAed completely\n", | ||
515 | atl1_driver_name); | ||
516 | udelay(1); | ||
517 | goto chk_rrd; | ||
518 | } | ||
519 | /* bad rrd */ | ||
520 | printk(KERN_DEBUG "%s: bad RRD\n", atl1_driver_name); | ||
521 | /* see if update RFD index */ | ||
522 | if (rrd->num_buf > 1) { | ||
523 | u16 num_buf; | ||
524 | num_buf = | ||
525 | (rrd->xsz.xsum_sz.pkt_size + | ||
526 | adapter->rx_buffer_len - | ||
527 | 1) / adapter->rx_buffer_len; | ||
528 | if (rrd->num_buf == num_buf) { | ||
529 | /* clean alloc flag for bad rrd */ | ||
530 | while (rfd_ring->next_to_clean != | ||
531 | (rrd->buf_indx + num_buf)) { | ||
532 | rfd_ring->buffer_info[rfd_ring-> | ||
533 | next_to_clean].alloced = 0; | ||
534 | if (++rfd_ring->next_to_clean == | ||
535 | rfd_ring->count) { | ||
536 | rfd_ring-> | ||
537 | next_to_clean = 0; | ||
538 | } | ||
539 | } | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /* update rrd */ | ||
544 | rrd->xsz.valid = 0; | ||
545 | if (++rrd_next_to_clean == rrd_ring->count) | ||
546 | rrd_next_to_clean = 0; | ||
547 | count++; | ||
548 | continue; | ||
549 | } else { /* current rrd still not be updated */ | ||
550 | |||
551 | break; | ||
552 | } | ||
553 | rrd_ok: | ||
554 | /* clean alloc flag for bad rrd */ | ||
555 | while (rfd_ring->next_to_clean != rrd->buf_indx) { | ||
556 | rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = | ||
557 | 0; | ||
558 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
559 | rfd_ring->next_to_clean = 0; | ||
560 | } | ||
561 | |||
562 | buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; | ||
563 | if (++rfd_ring->next_to_clean == rfd_ring->count) | ||
564 | rfd_ring->next_to_clean = 0; | ||
565 | |||
566 | /* update rrd next to clean */ | ||
567 | if (++rrd_next_to_clean == rrd_ring->count) | ||
568 | rrd_next_to_clean = 0; | ||
569 | count++; | ||
570 | |||
571 | if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { | ||
572 | if (!(rrd->err_flg & | ||
573 | (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ||
574 | | ERR_FLAG_LEN))) { | ||
575 | /* packet error, don't need upstream */ | ||
576 | buffer_info->alloced = 0; | ||
577 | rrd->xsz.valid = 0; | ||
578 | continue; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* Good Receive */ | ||
583 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
584 | buffer_info->length, PCI_DMA_FROMDEVICE); | ||
585 | skb = buffer_info->skb; | ||
586 | length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); | ||
587 | |||
588 | skb_put(skb, length - ETHERNET_FCS_SIZE); | ||
589 | |||
590 | /* Receive Checksum Offload */ | ||
591 | atl1_rx_checksum(adapter, rrd, skb); | ||
592 | skb->protocol = eth_type_trans(skb, adapter->netdev); | ||
593 | |||
594 | if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { | ||
595 | u16 vlan_tag = (rrd->vlan_tag >> 4) | | ||
596 | ((rrd->vlan_tag & 7) << 13) | | ||
597 | ((rrd->vlan_tag & 8) << 9); | ||
598 | vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); | ||
599 | } else | ||
600 | netif_rx(skb); | ||
601 | |||
602 | /* let protocol layer free skb */ | ||
603 | buffer_info->skb = NULL; | ||
604 | buffer_info->alloced = 0; | ||
605 | rrd->xsz.valid = 0; | ||
606 | |||
607 | adapter->netdev->last_rx = jiffies; | ||
608 | } | ||
609 | |||
610 | atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); | ||
611 | |||
612 | atl1_alloc_rx_buffers(adapter); | ||
613 | |||
614 | /* update mailbox ? */ | ||
615 | if (count) { | ||
616 | u32 tpd_next_to_use; | ||
617 | u32 rfd_next_to_use; | ||
618 | u32 rrd_next_to_clean; | ||
619 | |||
620 | spin_lock(&adapter->mb_lock); | ||
621 | |||
622 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
623 | rfd_next_to_use = | ||
624 | atomic_read(&adapter->rfd_ring.next_to_use); | ||
625 | rrd_next_to_clean = | ||
626 | atomic_read(&adapter->rrd_ring.next_to_clean); | ||
627 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
628 | MB_RFD_PROD_INDX_SHIFT) | | ||
629 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
630 | MB_RRD_CONS_INDX_SHIFT) | | ||
631 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
632 | MB_TPD_PROD_INDX_SHIFT); | ||
633 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
634 | spin_unlock(&adapter->mb_lock); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static void atl1_intr_tx(struct atl1_adapter *adapter) | ||
639 | { | ||
640 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
641 | struct atl1_buffer *buffer_info; | ||
642 | u16 sw_tpd_next_to_clean; | ||
643 | u16 cmb_tpd_next_to_clean; | ||
644 | u8 update = 0; | ||
645 | |||
646 | sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
647 | cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); | ||
648 | |||
649 | while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { | ||
650 | struct tx_packet_desc *tpd; | ||
651 | update = 1; | ||
652 | tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean); | ||
653 | buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; | ||
654 | if (buffer_info->dma) { | ||
655 | pci_unmap_page(adapter->pdev, buffer_info->dma, | ||
656 | buffer_info->length, PCI_DMA_TODEVICE); | ||
657 | buffer_info->dma = 0; | ||
658 | } | ||
659 | |||
660 | if (buffer_info->skb) { | ||
661 | dev_kfree_skb_irq(buffer_info->skb); | ||
662 | buffer_info->skb = NULL; | ||
663 | } | ||
664 | tpd->buffer_addr = 0; | ||
665 | tpd->desc.data = 0; | ||
666 | |||
667 | if (++sw_tpd_next_to_clean == tpd_ring->count) | ||
668 | sw_tpd_next_to_clean = 0; | ||
669 | } | ||
670 | atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); | ||
671 | |||
672 | if (netif_queue_stopped(adapter->netdev) | ||
673 | && netif_carrier_ok(adapter->netdev)) | ||
674 | netif_wake_queue(adapter->netdev); | ||
675 | } | ||
676 | |||
677 | static void atl1_check_for_link(struct atl1_adapter *adapter) | ||
678 | { | ||
679 | struct net_device *netdev = adapter->netdev; | ||
680 | u16 phy_data = 0; | ||
681 | |||
682 | spin_lock(&adapter->lock); | ||
683 | adapter->phy_timer_pending = false; | ||
684 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
685 | atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); | ||
686 | spin_unlock(&adapter->lock); | ||
687 | |||
688 | /* notify upper layer link down ASAP */ | ||
689 | if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ | ||
690 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
691 | printk(KERN_INFO "%s: %s link is down\n", | ||
692 | atl1_driver_name, netdev->name); | ||
693 | adapter->link_speed = SPEED_0; | ||
694 | netif_carrier_off(netdev); | ||
695 | netif_stop_queue(netdev); | ||
696 | } | ||
697 | } | ||
698 | schedule_work(&adapter->link_chg_task); | ||
699 | } | ||
700 | |||
701 | /* | ||
702 | * atl1_intr - Interrupt Handler | ||
703 | * @irq: interrupt number | ||
704 | * @data: pointer to a network interface device structure | ||
705 | * @pt_regs: CPU registers structure | ||
706 | */ | ||
707 | static irqreturn_t atl1_intr(int irq, void *data) | ||
708 | { | ||
709 | /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/ | ||
710 | struct atl1_adapter *adapter = netdev_priv(data); | ||
711 | u32 status; | ||
712 | u8 update_rx; | ||
713 | int max_ints = 10; | ||
714 | |||
715 | status = adapter->cmb.cmb->int_stats; | ||
716 | if (!status) | ||
717 | return IRQ_NONE; | ||
718 | |||
719 | update_rx = 0; | ||
720 | |||
721 | do { | ||
722 | /* clear CMB interrupt status at once */ | ||
723 | adapter->cmb.cmb->int_stats = 0; | ||
724 | |||
725 | if (status & ISR_GPHY) /* clear phy status */ | ||
726 | atl1_clear_phy_int(adapter); | ||
727 | |||
728 | /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ | ||
729 | iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); | ||
730 | |||
731 | /* check if SMB intr */ | ||
732 | if (status & ISR_SMB) | ||
733 | atl1_inc_smb(adapter); | ||
734 | |||
735 | /* check if PCIE PHY Link down */ | ||
736 | if (status & ISR_PHY_LINKDOWN) { | ||
737 | printk(KERN_DEBUG "%s: pcie phy link down %x\n", | ||
738 | atl1_driver_name, status); | ||
739 | if (netif_running(adapter->netdev)) { /* reset MAC */ | ||
740 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
741 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
742 | return IRQ_HANDLED; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | /* check if DMA read/write error ? */ | ||
747 | if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { | ||
748 | printk(KERN_DEBUG | ||
749 | "%s: pcie DMA r/w error (status = 0x%x)\n", | ||
750 | atl1_driver_name, status); | ||
751 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
752 | schedule_work(&adapter->pcie_dma_to_rst_task); | ||
753 | return IRQ_HANDLED; | ||
754 | } | ||
755 | |||
756 | /* link event */ | ||
757 | if (status & ISR_GPHY) { | ||
758 | adapter->soft_stats.tx_carrier_errors++; | ||
759 | atl1_check_for_link(adapter); | ||
760 | } | ||
761 | |||
762 | /* transmit event */ | ||
763 | if (status & ISR_CMB_TX) | ||
764 | atl1_intr_tx(adapter); | ||
765 | |||
766 | /* rx exception */ | ||
767 | if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | | ||
768 | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | | ||
769 | ISR_HOST_RRD_OV | ISR_CMB_RX))) { | ||
770 | if (status & | ||
771 | (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | | ||
772 | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV)) | ||
773 | printk(KERN_INFO | ||
774 | "%s: rx exception: status = 0x%x\n", | ||
775 | atl1_driver_name, status); | ||
776 | atl1_intr_rx(adapter); | ||
777 | } | ||
778 | |||
779 | if (--max_ints < 0) | ||
780 | break; | ||
781 | |||
782 | } while ((status = adapter->cmb.cmb->int_stats)); | ||
783 | |||
784 | /* re-enable Interrupt */ | ||
785 | iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); | ||
786 | return IRQ_HANDLED; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | * atl1_set_multi - Multicast and Promiscuous mode set | ||
791 | * @netdev: network interface device structure | ||
792 | * | ||
793 | * The set_multi entry point is called whenever the multicast address | ||
794 | * list or the network interface flags are updated. This routine is | ||
795 | * responsible for configuring the hardware for proper multicast, | ||
796 | * promiscuous mode, and all-multi behavior. | ||
797 | */ | ||
798 | static void atl1_set_multi(struct net_device *netdev) | ||
799 | { | ||
800 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
801 | struct atl1_hw *hw = &adapter->hw; | ||
802 | struct dev_mc_list *mc_ptr; | ||
803 | u32 rctl; | ||
804 | u32 hash_value; | ||
805 | |||
806 | /* Check for Promiscuous and All Multicast modes */ | ||
807 | rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
808 | if (netdev->flags & IFF_PROMISC) | ||
809 | rctl |= MAC_CTRL_PROMIS_EN; | ||
810 | else if (netdev->flags & IFF_ALLMULTI) { | ||
811 | rctl |= MAC_CTRL_MC_ALL_EN; | ||
812 | rctl &= ~MAC_CTRL_PROMIS_EN; | ||
813 | } else | ||
814 | rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); | ||
815 | |||
816 | iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); | ||
817 | |||
818 | /* clear the old settings from the multicast hash table */ | ||
819 | iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); | ||
820 | iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); | ||
821 | |||
822 | /* compute mc addresses' hash value ,and put it into hash table */ | ||
823 | for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { | ||
824 | hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); | ||
825 | atl1_hash_set(hw, hash_value); | ||
826 | } | ||
827 | } | ||
828 | |||
829 | static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) | ||
830 | { | ||
831 | u32 value; | ||
832 | struct atl1_hw *hw = &adapter->hw; | ||
833 | struct net_device *netdev = adapter->netdev; | ||
834 | /* Config MAC CTRL Register */ | ||
835 | value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; | ||
836 | /* duplex */ | ||
837 | if (FULL_DUPLEX == adapter->link_duplex) | ||
838 | value |= MAC_CTRL_DUPLX; | ||
839 | /* speed */ | ||
840 | value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? | ||
841 | MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << | ||
842 | MAC_CTRL_SPEED_SHIFT); | ||
843 | /* flow control */ | ||
844 | value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); | ||
845 | /* PAD & CRC */ | ||
846 | value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); | ||
847 | /* preamble length */ | ||
848 | value |= (((u32) adapter->hw.preamble_len | ||
849 | & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); | ||
850 | /* vlan */ | ||
851 | if (adapter->vlgrp) | ||
852 | value |= MAC_CTRL_RMV_VLAN; | ||
853 | /* rx checksum | ||
854 | if (adapter->rx_csum) | ||
855 | value |= MAC_CTRL_RX_CHKSUM_EN; | ||
856 | */ | ||
857 | /* filter mode */ | ||
858 | value |= MAC_CTRL_BC_EN; | ||
859 | if (netdev->flags & IFF_PROMISC) | ||
860 | value |= MAC_CTRL_PROMIS_EN; | ||
861 | else if (netdev->flags & IFF_ALLMULTI) | ||
862 | value |= MAC_CTRL_MC_ALL_EN; | ||
863 | /* value |= MAC_CTRL_LOOPBACK; */ | ||
864 | iowrite32(value, hw->hw_addr + REG_MAC_CTRL); | ||
865 | } | ||
866 | |||
867 | static u32 atl1_check_link(struct atl1_adapter *adapter) | ||
868 | { | ||
869 | struct atl1_hw *hw = &adapter->hw; | ||
870 | struct net_device *netdev = adapter->netdev; | ||
871 | u32 ret_val; | ||
872 | u16 speed, duplex, phy_data; | ||
873 | int reconfig = 0; | ||
874 | |||
875 | /* MII_BMSR must read twice */ | ||
876 | atl1_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
877 | atl1_read_phy_reg(hw, MII_BMSR, &phy_data); | ||
878 | if (!(phy_data & BMSR_LSTATUS)) { /* link down */ | ||
879 | if (netif_carrier_ok(netdev)) { /* old link state: Up */ | ||
880 | printk(KERN_INFO "%s: link is down\n", | ||
881 | atl1_driver_name); | ||
882 | adapter->link_speed = SPEED_0; | ||
883 | netif_carrier_off(netdev); | ||
884 | netif_stop_queue(netdev); | ||
885 | } | ||
886 | return ATL1_SUCCESS; | ||
887 | } | ||
888 | |||
889 | /* Link Up */ | ||
890 | ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); | ||
891 | if (ret_val) | ||
892 | return ret_val; | ||
893 | |||
894 | switch (hw->media_type) { | ||
895 | case MEDIA_TYPE_1000M_FULL: | ||
896 | if (speed != SPEED_1000 || duplex != FULL_DUPLEX) | ||
897 | reconfig = 1; | ||
898 | break; | ||
899 | case MEDIA_TYPE_100M_FULL: | ||
900 | if (speed != SPEED_100 || duplex != FULL_DUPLEX) | ||
901 | reconfig = 1; | ||
902 | break; | ||
903 | case MEDIA_TYPE_100M_HALF: | ||
904 | if (speed != SPEED_100 || duplex != HALF_DUPLEX) | ||
905 | reconfig = 1; | ||
906 | break; | ||
907 | case MEDIA_TYPE_10M_FULL: | ||
908 | if (speed != SPEED_10 || duplex != FULL_DUPLEX) | ||
909 | reconfig = 1; | ||
910 | break; | ||
911 | case MEDIA_TYPE_10M_HALF: | ||
912 | if (speed != SPEED_10 || duplex != HALF_DUPLEX) | ||
913 | reconfig = 1; | ||
914 | break; | ||
915 | } | ||
916 | |||
917 | /* link result is our setting */ | ||
918 | if (!reconfig) { | ||
919 | if (adapter->link_speed != speed | ||
920 | || adapter->link_duplex != duplex) { | ||
921 | adapter->link_speed = speed; | ||
922 | adapter->link_duplex = duplex; | ||
923 | atl1_setup_mac_ctrl(adapter); | ||
924 | printk(KERN_INFO "%s: %s link is up %d Mbps %s\n", | ||
925 | atl1_driver_name, netdev->name, | ||
926 | adapter->link_speed, | ||
927 | adapter->link_duplex == | ||
928 | FULL_DUPLEX ? "full duplex" : "half duplex"); | ||
929 | } | ||
930 | if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ | ||
931 | netif_carrier_on(netdev); | ||
932 | netif_wake_queue(netdev); | ||
933 | } | ||
934 | return ATL1_SUCCESS; | ||
935 | } | ||
936 | |||
937 | /* change orignal link status */ | ||
938 | if (netif_carrier_ok(netdev)) { | ||
939 | adapter->link_speed = SPEED_0; | ||
940 | netif_carrier_off(netdev); | ||
941 | netif_stop_queue(netdev); | ||
942 | } | ||
943 | |||
944 | if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && | ||
945 | hw->media_type != MEDIA_TYPE_1000M_FULL) { | ||
946 | switch (hw->media_type) { | ||
947 | case MEDIA_TYPE_100M_FULL: | ||
948 | phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | | ||
949 | MII_CR_RESET; | ||
950 | break; | ||
951 | case MEDIA_TYPE_100M_HALF: | ||
952 | phy_data = MII_CR_SPEED_100 | MII_CR_RESET; | ||
953 | break; | ||
954 | case MEDIA_TYPE_10M_FULL: | ||
955 | phy_data = | ||
956 | MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; | ||
957 | break; | ||
958 | default: /* MEDIA_TYPE_10M_HALF: */ | ||
959 | phy_data = MII_CR_SPEED_10 | MII_CR_RESET; | ||
960 | break; | ||
961 | } | ||
962 | atl1_write_phy_reg(hw, MII_BMCR, phy_data); | ||
963 | return ATL1_SUCCESS; | ||
964 | } | ||
965 | |||
966 | /* auto-neg, insert timer to re-config phy */ | ||
967 | if (!adapter->phy_timer_pending) { | ||
968 | adapter->phy_timer_pending = true; | ||
969 | mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); | ||
970 | } | ||
971 | |||
972 | return ATL1_SUCCESS; | ||
973 | } | ||
974 | |||
975 | static void set_flow_ctrl_old(struct atl1_adapter *adapter) | ||
976 | { | ||
977 | u32 hi, lo, value; | ||
978 | |||
979 | /* RFD Flow Control */ | ||
980 | value = adapter->rfd_ring.count; | ||
981 | hi = value / 16; | ||
982 | if (hi < 2) | ||
983 | hi = 2; | ||
984 | lo = value * 7 / 8; | ||
985 | |||
986 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | ||
987 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | ||
988 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | ||
989 | |||
990 | /* RRD Flow Control */ | ||
991 | value = adapter->rrd_ring.count; | ||
992 | lo = value / 16; | ||
993 | hi = value * 7 / 8; | ||
994 | if (lo < 2) | ||
995 | lo = 2; | ||
996 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | ||
997 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | ||
998 | iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | ||
999 | } | ||
1000 | |||
1001 | static void set_flow_ctrl_new(struct atl1_hw *hw) | ||
1002 | { | ||
1003 | u32 hi, lo, value; | ||
1004 | |||
1005 | /* RXF Flow Control */ | ||
1006 | value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); | ||
1007 | lo = value / 16; | ||
1008 | if (lo < 192) | ||
1009 | lo = 192; | ||
1010 | hi = value * 7 / 8; | ||
1011 | if (hi < lo) | ||
1012 | hi = lo + 16; | ||
1013 | value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | | ||
1014 | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); | ||
1015 | iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); | ||
1016 | |||
1017 | /* RRD Flow Control */ | ||
1018 | value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); | ||
1019 | lo = value / 8; | ||
1020 | hi = value * 7 / 8; | ||
1021 | if (lo < 2) | ||
1022 | lo = 2; | ||
1023 | if (hi < lo) | ||
1024 | hi = lo + 3; | ||
1025 | value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | | ||
1026 | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); | ||
1027 | iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); | ||
1028 | } | ||
1029 | |||
1030 | /* | ||
1031 | * atl1_configure - Configure Transmit&Receive Unit after Reset | ||
1032 | * @adapter: board private structure | ||
1033 | * | ||
1034 | * Configure the Tx /Rx unit of the MAC after a reset. | ||
1035 | */ | ||
1036 | static u32 atl1_configure(struct atl1_adapter *adapter) | ||
1037 | { | ||
1038 | struct atl1_hw *hw = &adapter->hw; | ||
1039 | u32 value; | ||
1040 | |||
1041 | /* clear interrupt status */ | ||
1042 | iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); | ||
1043 | |||
1044 | /* set MAC Address */ | ||
1045 | value = (((u32) hw->mac_addr[2]) << 24) | | ||
1046 | (((u32) hw->mac_addr[3]) << 16) | | ||
1047 | (((u32) hw->mac_addr[4]) << 8) | | ||
1048 | (((u32) hw->mac_addr[5])); | ||
1049 | iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); | ||
1050 | value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); | ||
1051 | iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); | ||
1052 | |||
1053 | /* tx / rx ring */ | ||
1054 | |||
1055 | /* HI base address */ | ||
1056 | iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), | ||
1057 | hw->hw_addr + REG_DESC_BASE_ADDR_HI); | ||
1058 | /* LO base address */ | ||
1059 | iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), | ||
1060 | hw->hw_addr + REG_DESC_RFD_ADDR_LO); | ||
1061 | iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), | ||
1062 | hw->hw_addr + REG_DESC_RRD_ADDR_LO); | ||
1063 | iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), | ||
1064 | hw->hw_addr + REG_DESC_TPD_ADDR_LO); | ||
1065 | iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), | ||
1066 | hw->hw_addr + REG_DESC_CMB_ADDR_LO); | ||
1067 | iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), | ||
1068 | hw->hw_addr + REG_DESC_SMB_ADDR_LO); | ||
1069 | |||
1070 | /* element count */ | ||
1071 | value = adapter->rrd_ring.count; | ||
1072 | value <<= 16; | ||
1073 | value += adapter->rfd_ring.count; | ||
1074 | iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); | ||
1075 | iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); | ||
1076 | |||
1077 | /* Load Ptr */ | ||
1078 | iowrite32(1, hw->hw_addr + REG_LOAD_PTR); | ||
1079 | |||
1080 | /* config Mailbox */ | ||
1081 | value = ((atomic_read(&adapter->tpd_ring.next_to_use) | ||
1082 | & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | | ||
1083 | ((atomic_read(&adapter->rrd_ring.next_to_clean) | ||
1084 | & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | | ||
1085 | ((atomic_read(&adapter->rfd_ring.next_to_use) | ||
1086 | & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); | ||
1087 | iowrite32(value, hw->hw_addr + REG_MAILBOX); | ||
1088 | |||
1089 | /* config IPG/IFG */ | ||
1090 | value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) | ||
1091 | << MAC_IPG_IFG_IPGT_SHIFT) | | ||
1092 | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) | ||
1093 | << MAC_IPG_IFG_MIFG_SHIFT) | | ||
1094 | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) | ||
1095 | << MAC_IPG_IFG_IPGR1_SHIFT) | | ||
1096 | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) | ||
1097 | << MAC_IPG_IFG_IPGR2_SHIFT); | ||
1098 | iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); | ||
1099 | |||
1100 | /* config Half-Duplex Control */ | ||
1101 | value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | | ||
1102 | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) | ||
1103 | << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | | ||
1104 | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | | ||
1105 | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | | ||
1106 | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) | ||
1107 | << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); | ||
1108 | iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); | ||
1109 | |||
1110 | /* set Interrupt Moderator Timer */ | ||
1111 | iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); | ||
1112 | iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); | ||
1113 | |||
1114 | /* set Interrupt Clear Timer */ | ||
1115 | iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); | ||
1116 | |||
1117 | /* set MTU, 4 : VLAN */ | ||
1118 | iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU); | ||
1119 | |||
1120 | /* jumbo size & rrd retirement timer */ | ||
1121 | value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) | ||
1122 | << RXQ_JMBOSZ_TH_SHIFT) | | ||
1123 | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) | ||
1124 | << RXQ_JMBO_LKAH_SHIFT) | | ||
1125 | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) | ||
1126 | << RXQ_RRD_TIMER_SHIFT); | ||
1127 | iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); | ||
1128 | |||
1129 | /* Flow Control */ | ||
1130 | switch (hw->dev_rev) { | ||
1131 | case 0x8001: | ||
1132 | case 0x9001: | ||
1133 | case 0x9002: | ||
1134 | case 0x9003: | ||
1135 | set_flow_ctrl_old(adapter); | ||
1136 | break; | ||
1137 | default: | ||
1138 | set_flow_ctrl_new(hw); | ||
1139 | break; | ||
1140 | } | ||
1141 | |||
1142 | /* config TXQ */ | ||
1143 | value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) | ||
1144 | << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | | ||
1145 | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) | ||
1146 | << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | | ||
1147 | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) | ||
1148 | << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; | ||
1149 | iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); | ||
1150 | |||
1151 | /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ | ||
1152 | value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) | ||
1153 | << TX_JUMBO_TASK_TH_SHIFT) | | ||
1154 | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) | ||
1155 | << TX_TPD_MIN_IPG_SHIFT); | ||
1156 | iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); | ||
1157 | |||
1158 | /* config RXQ */ | ||
1159 | value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) | ||
1160 | << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | | ||
1161 | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) | ||
1162 | << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | | ||
1163 | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) | ||
1164 | << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | | ||
1165 | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; | ||
1166 | iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); | ||
1167 | |||
1168 | /* config DMA Engine */ | ||
1169 | value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | ||
1170 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | ||
1171 | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) | ||
1172 | << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | | ||
1173 | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; | ||
1174 | value |= (u32) hw->dma_ord; | ||
1175 | if (atl1_rcb_128 == hw->rcb_value) | ||
1176 | value |= DMA_CTRL_RCB_VALUE; | ||
1177 | iowrite32(value, hw->hw_addr + REG_DMA_CTRL); | ||
1178 | |||
1179 | /* config CMB / SMB */ | ||
1180 | value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16); | ||
1181 | iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); | ||
1182 | value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); | ||
1183 | iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); | ||
1184 | iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); | ||
1185 | |||
1186 | /* --- enable CMB / SMB */ | ||
1187 | value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; | ||
1188 | iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); | ||
1189 | |||
1190 | value = ioread32(adapter->hw.hw_addr + REG_ISR); | ||
1191 | if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) | ||
1192 | value = 1; /* config failed */ | ||
1193 | else | ||
1194 | value = 0; | ||
1195 | |||
1196 | /* clear all interrupt status */ | ||
1197 | iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); | ||
1198 | iowrite32(0, adapter->hw.hw_addr + REG_ISR); | ||
1199 | return value; | ||
1200 | } | ||
1201 | |||
1202 | /* | ||
1203 | * atl1_irq_disable - Mask off interrupt generation on the NIC | ||
1204 | * @adapter: board private structure | ||
1205 | */ | ||
1206 | static void atl1_irq_disable(struct atl1_adapter *adapter) | ||
1207 | { | ||
1208 | atomic_inc(&adapter->irq_sem); | ||
1209 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1210 | ioread32(adapter->hw.hw_addr + REG_IMR); | ||
1211 | synchronize_irq(adapter->pdev->irq); | ||
1212 | } | ||
1213 | |||
1214 | static void atl1_vlan_rx_register(struct net_device *netdev, | ||
1215 | struct vlan_group *grp) | ||
1216 | { | ||
1217 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1218 | unsigned long flags; | ||
1219 | u32 ctrl; | ||
1220 | |||
1221 | spin_lock_irqsave(&adapter->lock, flags); | ||
1222 | /* atl1_irq_disable(adapter); */ | ||
1223 | adapter->vlgrp = grp; | ||
1224 | |||
1225 | if (grp) { | ||
1226 | /* enable VLAN tag insert/strip */ | ||
1227 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1228 | ctrl |= MAC_CTRL_RMV_VLAN; | ||
1229 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1230 | } else { | ||
1231 | /* disable VLAN tag insert/strip */ | ||
1232 | ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1233 | ctrl &= ~MAC_CTRL_RMV_VLAN; | ||
1234 | iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); | ||
1235 | } | ||
1236 | |||
1237 | /* atl1_irq_enable(adapter); */ | ||
1238 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1239 | } | ||
1240 | |||
1241 | /* FIXME: justify or remove -- CHS */ | ||
1242 | static void atl1_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | ||
1243 | { | ||
1244 | /* We don't do Vlan filtering */ | ||
1245 | return; | ||
1246 | } | ||
1247 | |||
1248 | /* FIXME: this looks wrong too -- CHS */ | ||
1249 | static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | ||
1250 | { | ||
1251 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1252 | unsigned long flags; | ||
1253 | |||
1254 | spin_lock_irqsave(&adapter->lock, flags); | ||
1255 | /* atl1_irq_disable(adapter); */ | ||
1256 | if (adapter->vlgrp) | ||
1257 | adapter->vlgrp->vlan_devices[vid] = NULL; | ||
1258 | /* atl1_irq_enable(adapter); */ | ||
1259 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1260 | /* We don't do Vlan filtering */ | ||
1261 | return; | ||
1262 | } | ||
1263 | |||
1264 | static void atl1_restore_vlan(struct atl1_adapter *adapter) | ||
1265 | { | ||
1266 | atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); | ||
1267 | if (adapter->vlgrp) { | ||
1268 | u16 vid; | ||
1269 | for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { | ||
1270 | if (!adapter->vlgrp->vlan_devices[vid]) | ||
1271 | continue; | ||
1272 | atl1_vlan_rx_add_vid(adapter->netdev, vid); | ||
1273 | } | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1277 | static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) | ||
1278 | { | ||
1279 | u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); | ||
1280 | u16 next_to_use = atomic_read(&tpd_ring->next_to_use); | ||
1281 | return ((next_to_clean > | ||
1282 | next_to_use) ? next_to_clean - next_to_use - | ||
1283 | 1 : tpd_ring->count + next_to_clean - next_to_use - 1); | ||
1284 | } | ||
1285 | |||
1286 | static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, | ||
1287 | struct tso_param *tso) | ||
1288 | { | ||
1289 | /* We enter this function holding a spinlock. */ | ||
1290 | u8 ipofst; | ||
1291 | int err; | ||
1292 | |||
1293 | if (skb_shinfo(skb)->gso_size) { | ||
1294 | if (skb_header_cloned(skb)) { | ||
1295 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1296 | if (unlikely(err)) | ||
1297 | return err; | ||
1298 | } | ||
1299 | |||
1300 | if (skb->protocol == ntohs(ETH_P_IP)) { | ||
1301 | skb->nh.iph->tot_len = 0; | ||
1302 | skb->nh.iph->check = 0; | ||
1303 | skb->h.th->check = | ||
1304 | ~csum_tcpudp_magic(skb->nh.iph->saddr, | ||
1305 | skb->nh.iph->daddr, 0, | ||
1306 | IPPROTO_TCP, 0); | ||
1307 | ipofst = skb->nh.raw - skb->data; | ||
1308 | if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ | ||
1309 | tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; | ||
1310 | |||
1311 | tso->tsopl |= (skb->nh.iph->ihl & | ||
1312 | CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; | ||
1313 | tso->tsopl |= ((skb->h.th->doff << 2) & | ||
1314 | TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; | ||
1315 | tso->tsopl |= (skb_shinfo(skb)->gso_size & | ||
1316 | TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; | ||
1317 | tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; | ||
1318 | tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; | ||
1319 | tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; | ||
1320 | return true; | ||
1321 | } | ||
1322 | } | ||
1323 | return false; | ||
1324 | } | ||
1325 | |||
1326 | static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, | ||
1327 | struct csum_param *csum) | ||
1328 | { | ||
1329 | u8 css, cso; | ||
1330 | |||
1331 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | ||
1332 | cso = skb->h.raw - skb->data; | ||
1333 | css = (skb->h.raw + skb->csum) - skb->data; | ||
1334 | if (unlikely(cso & 0x1)) { | ||
1335 | printk(KERN_DEBUG "%s: payload offset != even number\n", | ||
1336 | atl1_driver_name); | ||
1337 | return -1; | ||
1338 | } | ||
1339 | csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << | ||
1340 | CSUM_PARAM_PLOADOFFSET_SHIFT; | ||
1341 | csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << | ||
1342 | CSUM_PARAM_XSUMOFFSET_SHIFT; | ||
1343 | csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; | ||
1344 | return true; | ||
1345 | } | ||
1346 | |||
1347 | return true; | ||
1348 | } | ||
1349 | |||
1350 | static void atl1_tx_map(struct atl1_adapter *adapter, | ||
1351 | struct sk_buff *skb, bool tcp_seg) | ||
1352 | { | ||
1353 | /* We enter this function holding a spinlock. */ | ||
1354 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1355 | struct atl1_buffer *buffer_info; | ||
1356 | struct page *page; | ||
1357 | int first_buf_len = skb->len; | ||
1358 | unsigned long offset; | ||
1359 | unsigned int nr_frags; | ||
1360 | unsigned int f; | ||
1361 | u16 tpd_next_to_use; | ||
1362 | u16 proto_hdr_len; | ||
1363 | u16 i, m, len12; | ||
1364 | |||
1365 | first_buf_len -= skb->data_len; | ||
1366 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
1367 | tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); | ||
1368 | buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; | ||
1369 | if (unlikely(buffer_info->skb)) | ||
1370 | BUG(); | ||
1371 | buffer_info->skb = NULL; /* put skb in last TPD */ | ||
1372 | |||
1373 | if (tcp_seg) { | ||
1374 | /* TSO/GSO */ | ||
1375 | proto_hdr_len = | ||
1376 | ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); | ||
1377 | buffer_info->length = proto_hdr_len; | ||
1378 | page = virt_to_page(skb->data); | ||
1379 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
1380 | buffer_info->dma = pci_map_page(adapter->pdev, page, | ||
1381 | offset, proto_hdr_len, | ||
1382 | PCI_DMA_TODEVICE); | ||
1383 | |||
1384 | if (++tpd_next_to_use == tpd_ring->count) | ||
1385 | tpd_next_to_use = 0; | ||
1386 | |||
1387 | if (first_buf_len > proto_hdr_len) { | ||
1388 | len12 = first_buf_len - proto_hdr_len; | ||
1389 | m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | ||
1390 | for (i = 0; i < m; i++) { | ||
1391 | buffer_info = | ||
1392 | &tpd_ring->buffer_info[tpd_next_to_use]; | ||
1393 | buffer_info->skb = NULL; | ||
1394 | buffer_info->length = | ||
1395 | (MAX_TX_BUF_LEN >= | ||
1396 | len12) ? MAX_TX_BUF_LEN : len12; | ||
1397 | len12 -= buffer_info->length; | ||
1398 | page = virt_to_page(skb->data + | ||
1399 | (proto_hdr_len + | ||
1400 | i * MAX_TX_BUF_LEN)); | ||
1401 | offset = (unsigned long)(skb->data + | ||
1402 | (proto_hdr_len + | ||
1403 | i * MAX_TX_BUF_LEN)) & | ||
1404 | ~PAGE_MASK; | ||
1405 | buffer_info->dma = | ||
1406 | pci_map_page(adapter->pdev, page, offset, | ||
1407 | buffer_info->length, | ||
1408 | PCI_DMA_TODEVICE); | ||
1409 | if (++tpd_next_to_use == tpd_ring->count) | ||
1410 | tpd_next_to_use = 0; | ||
1411 | } | ||
1412 | } | ||
1413 | } else { | ||
1414 | /* not TSO/GSO */ | ||
1415 | buffer_info->length = first_buf_len; | ||
1416 | page = virt_to_page(skb->data); | ||
1417 | offset = (unsigned long)skb->data & ~PAGE_MASK; | ||
1418 | buffer_info->dma = pci_map_page(adapter->pdev, page, | ||
1419 | offset, first_buf_len, | ||
1420 | PCI_DMA_TODEVICE); | ||
1421 | if (++tpd_next_to_use == tpd_ring->count) | ||
1422 | tpd_next_to_use = 0; | ||
1423 | } | ||
1424 | |||
1425 | for (f = 0; f < nr_frags; f++) { | ||
1426 | struct skb_frag_struct *frag; | ||
1427 | u16 lenf, i, m; | ||
1428 | |||
1429 | frag = &skb_shinfo(skb)->frags[f]; | ||
1430 | lenf = frag->size; | ||
1431 | |||
1432 | m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | ||
1433 | for (i = 0; i < m; i++) { | ||
1434 | buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; | ||
1435 | if (unlikely(buffer_info->skb)) | ||
1436 | BUG(); | ||
1437 | buffer_info->skb = NULL; | ||
1438 | buffer_info->length = | ||
1439 | (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf; | ||
1440 | lenf -= buffer_info->length; | ||
1441 | buffer_info->dma = | ||
1442 | pci_map_page(adapter->pdev, frag->page, | ||
1443 | frag->page_offset + i * MAX_TX_BUF_LEN, | ||
1444 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1445 | |||
1446 | if (++tpd_next_to_use == tpd_ring->count) | ||
1447 | tpd_next_to_use = 0; | ||
1448 | } | ||
1449 | } | ||
1450 | |||
1451 | /* last tpd's buffer-info */ | ||
1452 | buffer_info->skb = skb; | ||
1453 | } | ||
1454 | |||
1455 | static void atl1_tx_queue(struct atl1_adapter *adapter, int count, | ||
1456 | union tpd_descr *descr) | ||
1457 | { | ||
1458 | /* We enter this function holding a spinlock. */ | ||
1459 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1460 | int j; | ||
1461 | u32 val; | ||
1462 | struct atl1_buffer *buffer_info; | ||
1463 | struct tx_packet_desc *tpd; | ||
1464 | u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); | ||
1465 | |||
1466 | for (j = 0; j < count; j++) { | ||
1467 | buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; | ||
1468 | tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); | ||
1469 | tpd->desc.csum.csumpu = descr->csum.csumpu; | ||
1470 | tpd->desc.csum.csumpl = descr->csum.csumpl; | ||
1471 | tpd->desc.tso.tsopu = descr->tso.tsopu; | ||
1472 | tpd->desc.tso.tsopl = descr->tso.tsopl; | ||
1473 | tpd->buffer_addr = cpu_to_le64(buffer_info->dma); | ||
1474 | tpd->desc.data = descr->data; | ||
1475 | tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & | ||
1476 | CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT; | ||
1477 | |||
1478 | val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & | ||
1479 | TSO_PARAM_SEGMENT_MASK; | ||
1480 | if (val && !j) | ||
1481 | tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; | ||
1482 | |||
1483 | if (j == (count - 1)) | ||
1484 | tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; | ||
1485 | |||
1486 | if (++tpd_next_to_use == tpd_ring->count) | ||
1487 | tpd_next_to_use = 0; | ||
1488 | } | ||
1489 | /* | ||
1490 | * Force memory writes to complete before letting h/w | ||
1491 | * know there are new descriptors to fetch. (Only | ||
1492 | * applicable for weak-ordered memory model archs, | ||
1493 | * such as IA-64). | ||
1494 | */ | ||
1495 | wmb(); | ||
1496 | |||
1497 | atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); | ||
1498 | } | ||
1499 | |||
1500 | static void atl1_update_mailbox(struct atl1_adapter *adapter) | ||
1501 | { | ||
1502 | unsigned long flags; | ||
1503 | u32 tpd_next_to_use; | ||
1504 | u32 rfd_next_to_use; | ||
1505 | u32 rrd_next_to_clean; | ||
1506 | u32 value; | ||
1507 | |||
1508 | spin_lock_irqsave(&adapter->mb_lock, flags); | ||
1509 | |||
1510 | tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); | ||
1511 | rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); | ||
1512 | rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); | ||
1513 | |||
1514 | value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << | ||
1515 | MB_RFD_PROD_INDX_SHIFT) | | ||
1516 | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << | ||
1517 | MB_RRD_CONS_INDX_SHIFT) | | ||
1518 | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << | ||
1519 | MB_TPD_PROD_INDX_SHIFT); | ||
1520 | iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); | ||
1521 | |||
1522 | spin_unlock_irqrestore(&adapter->mb_lock, flags); | ||
1523 | } | ||
1524 | |||
1525 | static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
1526 | { | ||
1527 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1528 | int len = skb->len; | ||
1529 | int tso; | ||
1530 | int count = 1; | ||
1531 | int ret_val; | ||
1532 | u32 val; | ||
1533 | union tpd_descr param; | ||
1534 | u16 frag_size; | ||
1535 | u16 vlan_tag; | ||
1536 | unsigned long flags; | ||
1537 | unsigned int nr_frags = 0; | ||
1538 | unsigned int mss = 0; | ||
1539 | unsigned int f; | ||
1540 | unsigned int proto_hdr_len; | ||
1541 | |||
1542 | len -= skb->data_len; | ||
1543 | |||
1544 | if (unlikely(skb->len == 0)) { | ||
1545 | dev_kfree_skb_any(skb); | ||
1546 | return NETDEV_TX_OK; | ||
1547 | } | ||
1548 | |||
1549 | param.data = 0; | ||
1550 | param.tso.tsopu = 0; | ||
1551 | param.tso.tsopl = 0; | ||
1552 | param.csum.csumpu = 0; | ||
1553 | param.csum.csumpl = 0; | ||
1554 | |||
1555 | /* nr_frags will be nonzero if we're doing scatter/gather (SG) */ | ||
1556 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
1557 | for (f = 0; f < nr_frags; f++) { | ||
1558 | frag_size = skb_shinfo(skb)->frags[f].size; | ||
1559 | if (frag_size) | ||
1560 | count += | ||
1561 | (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | ||
1562 | } | ||
1563 | |||
1564 | /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ | ||
1565 | mss = skb_shinfo(skb)->gso_size; | ||
1566 | if (mss) { | ||
1567 | if (skb->protocol == ntohs(ETH_P_IP)) { | ||
1568 | proto_hdr_len = ((skb->h.raw - skb->data) + | ||
1569 | (skb->h.th->doff << 2)); | ||
1570 | if (unlikely(proto_hdr_len > len)) { | ||
1571 | dev_kfree_skb_any(skb); | ||
1572 | return NETDEV_TX_OK; | ||
1573 | } | ||
1574 | /* need additional TPD ? */ | ||
1575 | if (proto_hdr_len != len) | ||
1576 | count += (len - proto_hdr_len + | ||
1577 | MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; | ||
1578 | } | ||
1579 | } | ||
1580 | |||
1581 | local_irq_save(flags); | ||
1582 | if (!spin_trylock(&adapter->lock)) { | ||
1583 | /* Can't get lock - tell upper layer to requeue */ | ||
1584 | local_irq_restore(flags); | ||
1585 | printk(KERN_DEBUG "%s: TX locked\n", atl1_driver_name); | ||
1586 | return NETDEV_TX_LOCKED; | ||
1587 | } | ||
1588 | |||
1589 | if (tpd_avail(&adapter->tpd_ring) < count) { | ||
1590 | /* not enough descriptors */ | ||
1591 | netif_stop_queue(netdev); | ||
1592 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1593 | printk(KERN_DEBUG "%s: TX busy\n", atl1_driver_name); | ||
1594 | return NETDEV_TX_BUSY; | ||
1595 | } | ||
1596 | |||
1597 | param.data = 0; | ||
1598 | |||
1599 | if (adapter->vlgrp && vlan_tx_tag_present(skb)) { | ||
1600 | vlan_tag = vlan_tx_tag_get(skb); | ||
1601 | vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | | ||
1602 | ((vlan_tag >> 9) & 0x8); | ||
1603 | param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; | ||
1604 | param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << | ||
1605 | CSUM_PARAM_VALAN_SHIFT; | ||
1606 | } | ||
1607 | |||
1608 | tso = atl1_tso(adapter, skb, ¶m.tso); | ||
1609 | if (tso < 0) { | ||
1610 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1611 | dev_kfree_skb_any(skb); | ||
1612 | return NETDEV_TX_OK; | ||
1613 | } | ||
1614 | |||
1615 | if (!tso) { | ||
1616 | ret_val = atl1_tx_csum(adapter, skb, ¶m.csum); | ||
1617 | if (ret_val < 0) { | ||
1618 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1619 | dev_kfree_skb_any(skb); | ||
1620 | return NETDEV_TX_OK; | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1624 | val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & | ||
1625 | CSUM_PARAM_SEGMENT_MASK; | ||
1626 | atl1_tx_map(adapter, skb, 1 == val); | ||
1627 | atl1_tx_queue(adapter, count, ¶m); | ||
1628 | netdev->trans_start = jiffies; | ||
1629 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1630 | atl1_update_mailbox(adapter); | ||
1631 | return NETDEV_TX_OK; | ||
1632 | } | ||
1633 | |||
1634 | /* | ||
1635 | * atl1_get_stats - Get System Network Statistics | ||
1636 | * @netdev: network interface device structure | ||
1637 | * | ||
1638 | * Returns the address of the device statistics structure. | ||
1639 | * The statistics are actually updated from the timer callback. | ||
1640 | */ | ||
1641 | static struct net_device_stats *atl1_get_stats(struct net_device *netdev) | ||
1642 | { | ||
1643 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1644 | return &adapter->net_stats; | ||
1645 | } | ||
1646 | |||
1647 | /* | ||
1648 | * atl1_clean_rx_ring - Free RFD Buffers | ||
1649 | * @adapter: board private structure | ||
1650 | */ | ||
1651 | static void atl1_clean_rx_ring(struct atl1_adapter *adapter) | ||
1652 | { | ||
1653 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1654 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1655 | struct atl1_buffer *buffer_info; | ||
1656 | struct pci_dev *pdev = adapter->pdev; | ||
1657 | unsigned long size; | ||
1658 | unsigned int i; | ||
1659 | |||
1660 | /* Free all the Rx ring sk_buffs */ | ||
1661 | for (i = 0; i < rfd_ring->count; i++) { | ||
1662 | buffer_info = &rfd_ring->buffer_info[i]; | ||
1663 | if (buffer_info->dma) { | ||
1664 | pci_unmap_page(pdev, | ||
1665 | buffer_info->dma, | ||
1666 | buffer_info->length, | ||
1667 | PCI_DMA_FROMDEVICE); | ||
1668 | buffer_info->dma = 0; | ||
1669 | } | ||
1670 | if (buffer_info->skb) { | ||
1671 | dev_kfree_skb(buffer_info->skb); | ||
1672 | buffer_info->skb = NULL; | ||
1673 | } | ||
1674 | } | ||
1675 | |||
1676 | size = sizeof(struct atl1_buffer) * rfd_ring->count; | ||
1677 | memset(rfd_ring->buffer_info, 0, size); | ||
1678 | |||
1679 | /* Zero out the descriptor ring */ | ||
1680 | memset(rfd_ring->desc, 0, rfd_ring->size); | ||
1681 | |||
1682 | rfd_ring->next_to_clean = 0; | ||
1683 | atomic_set(&rfd_ring->next_to_use, 0); | ||
1684 | |||
1685 | rrd_ring->next_to_use = 0; | ||
1686 | atomic_set(&rrd_ring->next_to_clean, 0); | ||
1687 | } | ||
1688 | |||
1689 | /* | ||
1690 | * atl1_clean_tx_ring - Free Tx Buffers | ||
1691 | * @adapter: board private structure | ||
1692 | */ | ||
1693 | static void atl1_clean_tx_ring(struct atl1_adapter *adapter) | ||
1694 | { | ||
1695 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1696 | struct atl1_buffer *buffer_info; | ||
1697 | struct pci_dev *pdev = adapter->pdev; | ||
1698 | unsigned long size; | ||
1699 | unsigned int i; | ||
1700 | |||
1701 | /* Free all the Tx ring sk_buffs */ | ||
1702 | for (i = 0; i < tpd_ring->count; i++) { | ||
1703 | buffer_info = &tpd_ring->buffer_info[i]; | ||
1704 | if (buffer_info->dma) { | ||
1705 | pci_unmap_page(pdev, buffer_info->dma, | ||
1706 | buffer_info->length, PCI_DMA_TODEVICE); | ||
1707 | buffer_info->dma = 0; | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1711 | for (i = 0; i < tpd_ring->count; i++) { | ||
1712 | buffer_info = &tpd_ring->buffer_info[i]; | ||
1713 | if (buffer_info->skb) { | ||
1714 | dev_kfree_skb_any(buffer_info->skb); | ||
1715 | buffer_info->skb = NULL; | ||
1716 | } | ||
1717 | } | ||
1718 | |||
1719 | size = sizeof(struct atl1_buffer) * tpd_ring->count; | ||
1720 | memset(tpd_ring->buffer_info, 0, size); | ||
1721 | |||
1722 | /* Zero out the descriptor ring */ | ||
1723 | memset(tpd_ring->desc, 0, tpd_ring->size); | ||
1724 | |||
1725 | atomic_set(&tpd_ring->next_to_use, 0); | ||
1726 | atomic_set(&tpd_ring->next_to_clean, 0); | ||
1727 | } | ||
1728 | |||
1729 | /* | ||
1730 | * atl1_free_ring_resources - Free Tx / RX descriptor Resources | ||
1731 | * @adapter: board private structure | ||
1732 | * | ||
1733 | * Free all transmit software resources | ||
1734 | */ | ||
1735 | void atl1_free_ring_resources(struct atl1_adapter *adapter) | ||
1736 | { | ||
1737 | struct pci_dev *pdev = adapter->pdev; | ||
1738 | struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; | ||
1739 | struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; | ||
1740 | struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; | ||
1741 | struct atl1_ring_header *ring_header = &adapter->ring_header; | ||
1742 | |||
1743 | atl1_clean_tx_ring(adapter); | ||
1744 | atl1_clean_rx_ring(adapter); | ||
1745 | |||
1746 | kfree(tpd_ring->buffer_info); | ||
1747 | pci_free_consistent(pdev, ring_header->size, ring_header->desc, | ||
1748 | ring_header->dma); | ||
1749 | |||
1750 | tpd_ring->buffer_info = NULL; | ||
1751 | tpd_ring->desc = NULL; | ||
1752 | tpd_ring->dma = 0; | ||
1753 | |||
1754 | rfd_ring->buffer_info = NULL; | ||
1755 | rfd_ring->desc = NULL; | ||
1756 | rfd_ring->dma = 0; | ||
1757 | |||
1758 | rrd_ring->desc = NULL; | ||
1759 | rrd_ring->dma = 0; | ||
1760 | } | ||
1761 | |||
1762 | s32 atl1_up(struct atl1_adapter *adapter) | ||
1763 | { | ||
1764 | struct net_device *netdev = adapter->netdev; | ||
1765 | int err; | ||
1766 | int irq_flags = IRQF_SAMPLE_RANDOM; | ||
1767 | |||
1768 | /* hardware has been reset, we need to reload some things */ | ||
1769 | atl1_set_multi(netdev); | ||
1770 | atl1_restore_vlan(adapter); | ||
1771 | err = atl1_alloc_rx_buffers(adapter); | ||
1772 | if (unlikely(!err)) /* no RX BUFFER allocated */ | ||
1773 | return -ENOMEM; | ||
1774 | |||
1775 | if (unlikely(atl1_configure(adapter))) { | ||
1776 | err = -EIO; | ||
1777 | goto err_up; | ||
1778 | } | ||
1779 | |||
1780 | err = pci_enable_msi(adapter->pdev); | ||
1781 | if (err) { | ||
1782 | dev_info(&adapter->pdev->dev, | ||
1783 | "Unable to enable MSI: %d\n", err); | ||
1784 | irq_flags |= IRQF_SHARED; | ||
1785 | } | ||
1786 | |||
1787 | err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, | ||
1788 | netdev->name, netdev); | ||
1789 | if (unlikely(err)) | ||
1790 | goto err_up; | ||
1791 | |||
1792 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
1793 | atl1_irq_enable(adapter); | ||
1794 | atl1_check_link(adapter); | ||
1795 | return 0; | ||
1796 | |||
1797 | /* FIXME: unreachable code! -- CHS */ | ||
1798 | /* free irq disable any interrupt */ | ||
1799 | iowrite32(0, adapter->hw.hw_addr + REG_IMR); | ||
1800 | free_irq(adapter->pdev->irq, netdev); | ||
1801 | |||
1802 | err_up: | ||
1803 | pci_disable_msi(adapter->pdev); | ||
1804 | /* free rx_buffers */ | ||
1805 | atl1_clean_rx_ring(adapter); | ||
1806 | return err; | ||
1807 | } | ||
1808 | |||
1809 | void atl1_down(struct atl1_adapter *adapter) | ||
1810 | { | ||
1811 | struct net_device *netdev = adapter->netdev; | ||
1812 | |||
1813 | del_timer_sync(&adapter->watchdog_timer); | ||
1814 | del_timer_sync(&adapter->phy_config_timer); | ||
1815 | adapter->phy_timer_pending = false; | ||
1816 | |||
1817 | atl1_irq_disable(adapter); | ||
1818 | free_irq(adapter->pdev->irq, netdev); | ||
1819 | pci_disable_msi(adapter->pdev); | ||
1820 | atl1_reset_hw(&adapter->hw); | ||
1821 | adapter->cmb.cmb->int_stats = 0; | ||
1822 | |||
1823 | adapter->link_speed = SPEED_0; | ||
1824 | adapter->link_duplex = -1; | ||
1825 | netif_carrier_off(netdev); | ||
1826 | netif_stop_queue(netdev); | ||
1827 | |||
1828 | atl1_clean_tx_ring(adapter); | ||
1829 | atl1_clean_rx_ring(adapter); | ||
1830 | } | ||
1831 | |||
1832 | /* | ||
1833 | * atl1_change_mtu - Change the Maximum Transfer Unit | ||
1834 | * @netdev: network interface device structure | ||
1835 | * @new_mtu: new value for maximum frame size | ||
1836 | * | ||
1837 | * Returns 0 on success, negative on failure | ||
1838 | */ | ||
1839 | static int atl1_change_mtu(struct net_device *netdev, int new_mtu) | ||
1840 | { | ||
1841 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1842 | int old_mtu = netdev->mtu; | ||
1843 | int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; | ||
1844 | |||
1845 | if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || | ||
1846 | (max_frame > MAX_JUMBO_FRAME_SIZE)) { | ||
1847 | printk(KERN_WARNING "%s: invalid MTU setting\n", | ||
1848 | atl1_driver_name); | ||
1849 | return -EINVAL; | ||
1850 | } | ||
1851 | |||
1852 | adapter->hw.max_frame_size = max_frame; | ||
1853 | adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; | ||
1854 | adapter->rx_buffer_len = (max_frame + 7) & ~7; | ||
1855 | adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; | ||
1856 | |||
1857 | netdev->mtu = new_mtu; | ||
1858 | if ((old_mtu != new_mtu) && netif_running(netdev)) { | ||
1859 | atl1_down(adapter); | ||
1860 | atl1_up(adapter); | ||
1861 | } | ||
1862 | |||
1863 | return 0; | ||
1864 | } | ||
1865 | |||
1866 | /* | ||
1867 | * atl1_set_mac - Change the Ethernet Address of the NIC | ||
1868 | * @netdev: network interface device structure | ||
1869 | * @p: pointer to an address structure | ||
1870 | * | ||
1871 | * Returns 0 on success, negative on failure | ||
1872 | */ | ||
1873 | static int atl1_set_mac(struct net_device *netdev, void *p) | ||
1874 | { | ||
1875 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1876 | struct sockaddr *addr = p; | ||
1877 | |||
1878 | if (netif_running(netdev)) | ||
1879 | return -EBUSY; | ||
1880 | |||
1881 | if (!is_valid_ether_addr(addr->sa_data)) | ||
1882 | return -EADDRNOTAVAIL; | ||
1883 | |||
1884 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); | ||
1885 | memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); | ||
1886 | |||
1887 | atl1_set_mac_addr(&adapter->hw); | ||
1888 | return 0; | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * atl1_watchdog - Timer Call-back | ||
1893 | * @data: pointer to netdev cast into an unsigned long | ||
1894 | */ | ||
1895 | static void atl1_watchdog(unsigned long data) | ||
1896 | { | ||
1897 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1898 | |||
1899 | /* Reset the timer */ | ||
1900 | mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); | ||
1901 | } | ||
1902 | |||
1903 | static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) | ||
1904 | { | ||
1905 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1906 | u16 result; | ||
1907 | |||
1908 | atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); | ||
1909 | |||
1910 | return result; | ||
1911 | } | ||
1912 | |||
1913 | static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) | ||
1914 | { | ||
1915 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1916 | |||
1917 | atl1_write_phy_reg(&adapter->hw, reg_num, val); | ||
1918 | } | ||
1919 | |||
1920 | /* | ||
1921 | * atl1_mii_ioctl - | ||
1922 | * @netdev: | ||
1923 | * @ifreq: | ||
1924 | * @cmd: | ||
1925 | */ | ||
1926 | static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1927 | { | ||
1928 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1929 | unsigned long flags; | ||
1930 | int retval; | ||
1931 | |||
1932 | if (!netif_running(netdev)) | ||
1933 | return -EINVAL; | ||
1934 | |||
1935 | spin_lock_irqsave(&adapter->lock, flags); | ||
1936 | retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); | ||
1937 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1938 | |||
1939 | return retval; | ||
1940 | } | ||
1941 | |||
1942 | /* | ||
1943 | * atl1_ioctl - | ||
1944 | * @netdev: | ||
1945 | * @ifreq: | ||
1946 | * @cmd: | ||
1947 | */ | ||
1948 | static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1949 | { | ||
1950 | switch (cmd) { | ||
1951 | case SIOCGMIIPHY: | ||
1952 | case SIOCGMIIREG: | ||
1953 | case SIOCSMIIREG: | ||
1954 | return atl1_mii_ioctl(netdev, ifr, cmd); | ||
1955 | default: | ||
1956 | return -EOPNOTSUPP; | ||
1957 | } | ||
1958 | } | ||
1959 | |||
1960 | /* | ||
1961 | * atl1_tx_timeout - Respond to a Tx Hang | ||
1962 | * @netdev: network interface device structure | ||
1963 | */ | ||
1964 | static void atl1_tx_timeout(struct net_device *netdev) | ||
1965 | { | ||
1966 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
1967 | /* Do the reset outside of interrupt context */ | ||
1968 | schedule_work(&adapter->tx_timeout_task); | ||
1969 | } | ||
1970 | |||
1971 | /* | ||
1972 | * atl1_phy_config - Timer Call-back | ||
1973 | * @data: pointer to netdev cast into an unsigned long | ||
1974 | */ | ||
1975 | static void atl1_phy_config(unsigned long data) | ||
1976 | { | ||
1977 | struct atl1_adapter *adapter = (struct atl1_adapter *)data; | ||
1978 | struct atl1_hw *hw = &adapter->hw; | ||
1979 | unsigned long flags; | ||
1980 | |||
1981 | spin_lock_irqsave(&adapter->lock, flags); | ||
1982 | adapter->phy_timer_pending = false; | ||
1983 | atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); | ||
1984 | atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); | ||
1985 | atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); | ||
1986 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
1987 | } | ||
1988 | |||
1989 | int atl1_reset(struct atl1_adapter *adapter) | ||
1990 | { | ||
1991 | int ret; | ||
1992 | |||
1993 | ret = atl1_reset_hw(&adapter->hw); | ||
1994 | if (ret != ATL1_SUCCESS) | ||
1995 | return ret; | ||
1996 | return atl1_init_hw(&adapter->hw); | ||
1997 | } | ||
1998 | |||
1999 | /* | ||
2000 | * atl1_open - Called when a network interface is made active | ||
2001 | * @netdev: network interface device structure | ||
2002 | * | ||
2003 | * Returns 0 on success, negative value on failure | ||
2004 | * | ||
2005 | * The open entry point is called when a network interface is made | ||
2006 | * active by the system (IFF_UP). At this point all resources needed | ||
2007 | * for transmit and receive operations are allocated, the interrupt | ||
2008 | * handler is registered with the OS, the watchdog timer is started, | ||
2009 | * and the stack is notified that the interface is ready. | ||
2010 | */ | ||
2011 | static int atl1_open(struct net_device *netdev) | ||
2012 | { | ||
2013 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2014 | int err; | ||
2015 | |||
2016 | /* allocate transmit descriptors */ | ||
2017 | err = atl1_setup_ring_resources(adapter); | ||
2018 | if (err) | ||
2019 | return err; | ||
2020 | |||
2021 | err = atl1_up(adapter); | ||
2022 | if (err) | ||
2023 | goto err_up; | ||
2024 | |||
2025 | return 0; | ||
2026 | |||
2027 | err_up: | ||
2028 | atl1_reset(adapter); | ||
2029 | return err; | ||
2030 | } | ||
2031 | |||
2032 | /* | ||
2033 | * atl1_close - Disables a network interface | ||
2034 | * @netdev: network interface device structure | ||
2035 | * | ||
2036 | * Returns 0, this is not allowed to fail | ||
2037 | * | ||
2038 | * The close entry point is called when an interface is de-activated | ||
2039 | * by the OS. The hardware is still under the drivers control, but | ||
2040 | * needs to be disabled. A global MAC reset is issued to stop the | ||
2041 | * hardware, and all transmit and receive resources are freed. | ||
2042 | */ | ||
2043 | static int atl1_close(struct net_device *netdev) | ||
2044 | { | ||
2045 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2046 | atl1_down(adapter); | ||
2047 | atl1_free_ring_resources(adapter); | ||
2048 | return 0; | ||
2049 | } | ||
2050 | |||
2051 | /* | ||
2052 | * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT | ||
2053 | * will assert. We do soft reset <0x1400=1> according | ||
2054 | * with the SPEC. BUT, it seemes that PCIE or DMA | ||
2055 | * state-machine will not be reset. DMAR_TO_INT will | ||
2056 | * assert again and again. | ||
2057 | */ | ||
2058 | static void atl1_tx_timeout_task(struct work_struct *work) | ||
2059 | { | ||
2060 | struct atl1_adapter *adapter = | ||
2061 | container_of(work, struct atl1_adapter, tx_timeout_task); | ||
2062 | struct net_device *netdev = adapter->netdev; | ||
2063 | |||
2064 | netif_device_detach(netdev); | ||
2065 | atl1_down(adapter); | ||
2066 | atl1_up(adapter); | ||
2067 | netif_device_attach(netdev); | ||
2068 | } | ||
2069 | |||
2070 | /* | ||
2071 | * atl1_link_chg_task - deal with link change event Out of interrupt context | ||
2072 | */ | ||
2073 | static void atl1_link_chg_task(struct work_struct *work) | ||
2074 | { | ||
2075 | struct atl1_adapter *adapter = | ||
2076 | container_of(work, struct atl1_adapter, link_chg_task); | ||
2077 | unsigned long flags; | ||
2078 | |||
2079 | spin_lock_irqsave(&adapter->lock, flags); | ||
2080 | atl1_check_link(adapter); | ||
2081 | spin_unlock_irqrestore(&adapter->lock, flags); | ||
2082 | } | ||
2083 | |||
2084 | /* | ||
2085 | * atl1_pcie_patch - Patch for PCIE module | ||
2086 | */ | ||
2087 | static void atl1_pcie_patch(struct atl1_adapter *adapter) | ||
2088 | { | ||
2089 | u32 value; | ||
2090 | value = 0x6500; | ||
2091 | iowrite32(value, adapter->hw.hw_addr + 0x12FC); | ||
2092 | /* pcie flow control mode change */ | ||
2093 | value = ioread32(adapter->hw.hw_addr + 0x1008); | ||
2094 | value |= 0x8000; | ||
2095 | iowrite32(value, adapter->hw.hw_addr + 0x1008); | ||
2096 | } | ||
2097 | |||
2098 | /* | ||
2099 | * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 | ||
2100 | * on PCI Command register is disable. | ||
2101 | * The function enable this bit. | ||
2102 | * Brackett, 2006/03/15 | ||
2103 | */ | ||
2104 | static void atl1_via_workaround(struct atl1_adapter *adapter) | ||
2105 | { | ||
2106 | unsigned long value; | ||
2107 | |||
2108 | value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); | ||
2109 | if (value & PCI_COMMAND_INTX_DISABLE) | ||
2110 | value &= ~PCI_COMMAND_INTX_DISABLE; | ||
2111 | iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); | ||
2112 | } | ||
2113 | |||
2114 | /* | ||
2115 | * atl1_probe - Device Initialization Routine | ||
2116 | * @pdev: PCI device information struct | ||
2117 | * @ent: entry in atl1_pci_tbl | ||
2118 | * | ||
2119 | * Returns 0 on success, negative on failure | ||
2120 | * | ||
2121 | * atl1_probe initializes an adapter identified by a pci_dev structure. | ||
2122 | * The OS initialization, configuring of the adapter private structure, | ||
2123 | * and a hardware reset occur. | ||
2124 | */ | ||
2125 | static int __devinit atl1_probe(struct pci_dev *pdev, | ||
2126 | const struct pci_device_id *ent) | ||
2127 | { | ||
2128 | struct net_device *netdev; | ||
2129 | struct atl1_adapter *adapter; | ||
2130 | static int cards_found = 0; | ||
2131 | bool pci_using_64 = true; | ||
2132 | int err; | ||
2133 | |||
2134 | err = pci_enable_device(pdev); | ||
2135 | if (err) | ||
2136 | return err; | ||
2137 | |||
2138 | err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | ||
2139 | if (err) { | ||
2140 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
2141 | if (err) { | ||
2142 | printk(KERN_DEBUG | ||
2143 | "%s: no usable DMA configuration, aborting\n", | ||
2144 | atl1_driver_name); | ||
2145 | goto err_dma; | ||
2146 | } | ||
2147 | pci_using_64 = false; | ||
2148 | } | ||
2149 | /* Mark all PCI regions associated with PCI device | ||
2150 | * pdev as being reserved by owner atl1_driver_name | ||
2151 | */ | ||
2152 | err = pci_request_regions(pdev, atl1_driver_name); | ||
2153 | if (err) | ||
2154 | goto err_request_regions; | ||
2155 | |||
2156 | /* Enables bus-mastering on the device and calls | ||
2157 | * pcibios_set_master to do the needed arch specific settings | ||
2158 | */ | ||
2159 | pci_set_master(pdev); | ||
2160 | |||
2161 | netdev = alloc_etherdev(sizeof(struct atl1_adapter)); | ||
2162 | if (!netdev) { | ||
2163 | err = -ENOMEM; | ||
2164 | goto err_alloc_etherdev; | ||
2165 | } | ||
2166 | SET_MODULE_OWNER(netdev); | ||
2167 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2168 | |||
2169 | pci_set_drvdata(pdev, netdev); | ||
2170 | adapter = netdev_priv(netdev); | ||
2171 | adapter->netdev = netdev; | ||
2172 | adapter->pdev = pdev; | ||
2173 | adapter->hw.back = adapter; | ||
2174 | |||
2175 | adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); | ||
2176 | if (!adapter->hw.hw_addr) { | ||
2177 | err = -EIO; | ||
2178 | goto err_pci_iomap; | ||
2179 | } | ||
2180 | /* get device revision number */ | ||
2181 | adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2)); | ||
2182 | |||
2183 | /* set default ring resource counts */ | ||
2184 | adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; | ||
2185 | adapter->tpd_ring.count = ATL1_DEFAULT_TPD; | ||
2186 | |||
2187 | adapter->mii.dev = netdev; | ||
2188 | adapter->mii.mdio_read = mdio_read; | ||
2189 | adapter->mii.mdio_write = mdio_write; | ||
2190 | adapter->mii.phy_id_mask = 0x1f; | ||
2191 | adapter->mii.reg_num_mask = 0x1f; | ||
2192 | |||
2193 | netdev->open = &atl1_open; | ||
2194 | netdev->stop = &atl1_close; | ||
2195 | netdev->hard_start_xmit = &atl1_xmit_frame; | ||
2196 | netdev->get_stats = &atl1_get_stats; | ||
2197 | netdev->set_multicast_list = &atl1_set_multi; | ||
2198 | netdev->set_mac_address = &atl1_set_mac; | ||
2199 | netdev->change_mtu = &atl1_change_mtu; | ||
2200 | netdev->do_ioctl = &atl1_ioctl; | ||
2201 | netdev->tx_timeout = &atl1_tx_timeout; | ||
2202 | netdev->watchdog_timeo = 5 * HZ; | ||
2203 | netdev->vlan_rx_register = atl1_vlan_rx_register; | ||
2204 | netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid; | ||
2205 | netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid; | ||
2206 | netdev->ethtool_ops = &atl1_ethtool_ops; | ||
2207 | adapter->bd_number = cards_found; | ||
2208 | adapter->pci_using_64 = pci_using_64; | ||
2209 | |||
2210 | /* setup the private structure */ | ||
2211 | err = atl1_sw_init(adapter); | ||
2212 | if (err) | ||
2213 | goto err_common; | ||
2214 | |||
2215 | netdev->features = NETIF_F_HW_CSUM; | ||
2216 | netdev->features |= NETIF_F_SG; | ||
2217 | netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); | ||
2218 | |||
2219 | /* | ||
2220 | * FIXME - Until tso performance gets fixed, disable the feature. | ||
2221 | * Enable it with ethtool -K if desired. | ||
2222 | */ | ||
2223 | /* netdev->features |= NETIF_F_TSO; */ | ||
2224 | |||
2225 | if (pci_using_64) | ||
2226 | netdev->features |= NETIF_F_HIGHDMA; | ||
2227 | |||
2228 | netdev->features |= NETIF_F_LLTX; | ||
2229 | |||
2230 | /* | ||
2231 | * patch for some L1 of old version, | ||
2232 | * the final version of L1 may not need these | ||
2233 | * patches | ||
2234 | */ | ||
2235 | /* atl1_pcie_patch(adapter); */ | ||
2236 | |||
2237 | /* really reset GPHY core */ | ||
2238 | iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); | ||
2239 | |||
2240 | /* | ||
2241 | * reset the controller to | ||
2242 | * put the device in a known good starting state | ||
2243 | */ | ||
2244 | if (atl1_reset_hw(&adapter->hw)) { | ||
2245 | err = -EIO; | ||
2246 | goto err_common; | ||
2247 | } | ||
2248 | |||
2249 | /* copy the MAC address out of the EEPROM */ | ||
2250 | atl1_read_mac_addr(&adapter->hw); | ||
2251 | memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); | ||
2252 | |||
2253 | if (!is_valid_ether_addr(netdev->dev_addr)) { | ||
2254 | err = -EIO; | ||
2255 | goto err_common; | ||
2256 | } | ||
2257 | |||
2258 | atl1_check_options(adapter); | ||
2259 | |||
2260 | /* pre-init the MAC, and setup link */ | ||
2261 | err = atl1_init_hw(&adapter->hw); | ||
2262 | if (err) { | ||
2263 | err = -EIO; | ||
2264 | goto err_common; | ||
2265 | } | ||
2266 | |||
2267 | atl1_pcie_patch(adapter); | ||
2268 | /* assume we have no link for now */ | ||
2269 | netif_carrier_off(netdev); | ||
2270 | netif_stop_queue(netdev); | ||
2271 | |||
2272 | init_timer(&adapter->watchdog_timer); | ||
2273 | adapter->watchdog_timer.function = &atl1_watchdog; | ||
2274 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
2275 | |||
2276 | init_timer(&adapter->phy_config_timer); | ||
2277 | adapter->phy_config_timer.function = &atl1_phy_config; | ||
2278 | adapter->phy_config_timer.data = (unsigned long)adapter; | ||
2279 | adapter->phy_timer_pending = false; | ||
2280 | |||
2281 | INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); | ||
2282 | |||
2283 | INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task); | ||
2284 | |||
2285 | INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); | ||
2286 | |||
2287 | err = register_netdev(netdev); | ||
2288 | if (err) | ||
2289 | goto err_common; | ||
2290 | |||
2291 | cards_found++; | ||
2292 | atl1_via_workaround(adapter); | ||
2293 | return 0; | ||
2294 | |||
2295 | err_common: | ||
2296 | pci_iounmap(pdev, adapter->hw.hw_addr); | ||
2297 | err_pci_iomap: | ||
2298 | free_netdev(netdev); | ||
2299 | err_alloc_etherdev: | ||
2300 | pci_release_regions(pdev); | ||
2301 | err_dma: | ||
2302 | err_request_regions: | ||
2303 | pci_disable_device(pdev); | ||
2304 | return err; | ||
2305 | } | ||
2306 | |||
2307 | /* | ||
2308 | * atl1_remove - Device Removal Routine | ||
2309 | * @pdev: PCI device information struct | ||
2310 | * | ||
2311 | * atl1_remove is called by the PCI subsystem to alert the driver | ||
2312 | * that it should release a PCI device. The could be caused by a | ||
2313 | * Hot-Plug event, or because the driver is going to be removed from | ||
2314 | * memory. | ||
2315 | */ | ||
2316 | static void __devexit atl1_remove(struct pci_dev *pdev) | ||
2317 | { | ||
2318 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2319 | struct atl1_adapter *adapter; | ||
2320 | /* Device not available. Return. */ | ||
2321 | if (!netdev) | ||
2322 | return; | ||
2323 | |||
2324 | adapter = netdev_priv(netdev); | ||
2325 | iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); | ||
2326 | unregister_netdev(netdev); | ||
2327 | pci_iounmap(pdev, adapter->hw.hw_addr); | ||
2328 | pci_release_regions(pdev); | ||
2329 | free_netdev(netdev); | ||
2330 | pci_disable_device(pdev); | ||
2331 | } | ||
2332 | |||
2333 | #ifdef CONFIG_PM | ||
2334 | static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2335 | { | ||
2336 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2337 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2338 | struct atl1_hw *hw = &adapter->hw; | ||
2339 | u32 ctrl = 0; | ||
2340 | u32 wufc = adapter->wol; | ||
2341 | |||
2342 | netif_device_detach(netdev); | ||
2343 | if (netif_running(netdev)) | ||
2344 | atl1_down(adapter); | ||
2345 | |||
2346 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2347 | atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); | ||
2348 | if (ctrl & BMSR_LSTATUS) | ||
2349 | wufc &= ~ATL1_WUFC_LNKC; | ||
2350 | |||
2351 | /* reduce speed to 10/100M */ | ||
2352 | if (wufc) { | ||
2353 | atl1_phy_enter_power_saving(hw); | ||
2354 | /* if resume, let driver to re- setup link */ | ||
2355 | hw->phy_configured = false; | ||
2356 | atl1_set_mac_addr(hw); | ||
2357 | atl1_set_multi(netdev); | ||
2358 | |||
2359 | ctrl = 0; | ||
2360 | /* turn on magic packet wol */ | ||
2361 | if (wufc & ATL1_WUFC_MAG) | ||
2362 | ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; | ||
2363 | |||
2364 | /* turn on Link change WOL */ | ||
2365 | if (wufc & ATL1_WUFC_LNKC) | ||
2366 | ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); | ||
2367 | iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); | ||
2368 | |||
2369 | /* turn on all-multi mode if wake on multicast is enabled */ | ||
2370 | ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); | ||
2371 | ctrl &= ~MAC_CTRL_DBG; | ||
2372 | ctrl &= ~MAC_CTRL_PROMIS_EN; | ||
2373 | if (wufc & ATL1_WUFC_MC) | ||
2374 | ctrl |= MAC_CTRL_MC_ALL_EN; | ||
2375 | else | ||
2376 | ctrl &= ~MAC_CTRL_MC_ALL_EN; | ||
2377 | |||
2378 | /* turn on broadcast mode if wake on-BC is enabled */ | ||
2379 | if (wufc & ATL1_WUFC_BC) | ||
2380 | ctrl |= MAC_CTRL_BC_EN; | ||
2381 | else | ||
2382 | ctrl &= ~MAC_CTRL_BC_EN; | ||
2383 | |||
2384 | /* enable RX */ | ||
2385 | ctrl |= MAC_CTRL_RX_EN; | ||
2386 | iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); | ||
2387 | pci_enable_wake(pdev, PCI_D3hot, 1); | ||
2388 | pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */ | ||
2389 | } else { | ||
2390 | iowrite32(0, hw->hw_addr + REG_WOL_CTRL); | ||
2391 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2392 | pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ | ||
2393 | } | ||
2394 | |||
2395 | pci_save_state(pdev); | ||
2396 | pci_disable_device(pdev); | ||
2397 | |||
2398 | pci_set_power_state(pdev, PCI_D3hot); | ||
2399 | |||
2400 | return 0; | ||
2401 | } | ||
2402 | |||
2403 | static int atl1_resume(struct pci_dev *pdev) | ||
2404 | { | ||
2405 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2406 | struct atl1_adapter *adapter = netdev_priv(netdev); | ||
2407 | u32 ret_val; | ||
2408 | |||
2409 | pci_set_power_state(pdev, 0); | ||
2410 | pci_restore_state(pdev); | ||
2411 | |||
2412 | ret_val = pci_enable_device(pdev); | ||
2413 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
2414 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
2415 | |||
2416 | iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); | ||
2417 | atl1_reset(adapter); | ||
2418 | |||
2419 | if (netif_running(netdev)) | ||
2420 | atl1_up(adapter); | ||
2421 | netif_device_attach(netdev); | ||
2422 | |||
2423 | atl1_via_workaround(adapter); | ||
2424 | |||
2425 | return 0; | ||
2426 | } | ||
2427 | #else | ||
2428 | #define atl1_suspend NULL | ||
2429 | #define atl1_resume NULL | ||
2430 | #endif | ||
2431 | |||
2432 | static struct pci_driver atl1_driver = { | ||
2433 | .name = atl1_driver_name, | ||
2434 | .id_table = atl1_pci_tbl, | ||
2435 | .probe = atl1_probe, | ||
2436 | .remove = __devexit_p(atl1_remove), | ||
2437 | /* Power Managment Hooks */ | ||
2438 | /* probably broken right now -- CHS */ | ||
2439 | .suspend = atl1_suspend, | ||
2440 | .resume = atl1_resume | ||
2441 | }; | ||
2442 | |||
2443 | /* | ||
2444 | * atl1_exit_module - Driver Exit Cleanup Routine | ||
2445 | * | ||
2446 | * atl1_exit_module is called just before the driver is removed | ||
2447 | * from memory. | ||
2448 | */ | ||
2449 | static void __exit atl1_exit_module(void) | ||
2450 | { | ||
2451 | pci_unregister_driver(&atl1_driver); | ||
2452 | } | ||
2453 | |||
2454 | /* | ||
2455 | * atl1_init_module - Driver Registration Routine | ||
2456 | * | ||
2457 | * atl1_init_module is the first routine called when the driver is | ||
2458 | * loaded. All it does is register with the PCI subsystem. | ||
2459 | */ | ||
2460 | static int __init atl1_init_module(void) | ||
2461 | { | ||
2462 | printk(KERN_INFO "%s - version %s\n", atl1_driver_string, DRIVER_VERSION); | ||
2463 | printk(KERN_INFO "%s\n", atl1_copyright); | ||
2464 | return pci_register_driver(&atl1_driver); | ||
2465 | } | ||
2466 | |||
2467 | module_init(atl1_init_module); | ||
2468 | module_exit(atl1_exit_module); | ||
diff --git a/drivers/net/atl1/atl1_param.c b/drivers/net/atl1/atl1_param.c new file mode 100644 index 000000000000..c407214339f6 --- /dev/null +++ b/drivers/net/atl1/atl1_param.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. | ||
3 | * Copyright(c) 2006 Chris Snook <csnook@redhat.com> | ||
4 | * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> | ||
5 | * | ||
6 | * Derived from Intel e1000 driver | ||
7 | * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the Free | ||
11 | * Software Foundation; either version 2 of the License, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
17 | * more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along with | ||
20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/types.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/moduleparam.h> | ||
27 | #include "atl1.h" | ||
28 | |||
29 | /* | ||
30 | * This is the only thing that needs to be changed to adjust the | ||
31 | * maximum number of ports that the driver can manage. | ||
32 | */ | ||
33 | #define ATL1_MAX_NIC 4 | ||
34 | |||
35 | #define OPTION_UNSET -1 | ||
36 | #define OPTION_DISABLED 0 | ||
37 | #define OPTION_ENABLED 1 | ||
38 | |||
39 | #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } | ||
40 | |||
41 | /* | ||
42 | * Interrupt Moderate Timer in units of 2 us | ||
43 | * | ||
44 | * Valid Range: 10-65535 | ||
45 | * | ||
46 | * Default Value: 100 (200us) | ||
47 | */ | ||
48 | static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; | ||
49 | static int num_int_mod_timer = 0; | ||
50 | module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); | ||
51 | MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); | ||
52 | |||
53 | /* | ||
54 | * flash_vendor | ||
55 | * | ||
56 | * Valid Range: 0-2 | ||
57 | * | ||
58 | * 0 - Atmel | ||
59 | * 1 - SST | ||
60 | * 2 - ST | ||
61 | * | ||
62 | * Default Value: 0 | ||
63 | */ | ||
64 | static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; | ||
65 | static int num_flash_vendor = 0; | ||
66 | module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0); | ||
67 | MODULE_PARM_DESC(flash_vendor, "SPI flash vendor"); | ||
68 | |||
69 | #define DEFAULT_INT_MOD_CNT 100 /* 200us */ | ||
70 | #define MAX_INT_MOD_CNT 65000 | ||
71 | #define MIN_INT_MOD_CNT 50 | ||
72 | |||
73 | #define FLASH_VENDOR_DEFAULT 0 | ||
74 | #define FLASH_VENDOR_MIN 0 | ||
75 | #define FLASH_VENDOR_MAX 2 | ||
76 | |||
77 | struct atl1_option { | ||
78 | enum { enable_option, range_option, list_option } type; | ||
79 | char *name; | ||
80 | char *err; | ||
81 | int def; | ||
82 | union { | ||
83 | struct { /* range_option info */ | ||
84 | int min; | ||
85 | int max; | ||
86 | } r; | ||
87 | struct { /* list_option info */ | ||
88 | int nr; | ||
89 | struct atl1_opt_list { | ||
90 | int i; | ||
91 | char *str; | ||
92 | } *p; | ||
93 | } l; | ||
94 | } arg; | ||
95 | }; | ||
96 | |||
97 | static int __devinit atl1_validate_option(int *value, struct atl1_option *opt) | ||
98 | { | ||
99 | if (*value == OPTION_UNSET) { | ||
100 | *value = opt->def; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | switch (opt->type) { | ||
105 | case enable_option: | ||
106 | switch (*value) { | ||
107 | case OPTION_ENABLED: | ||
108 | printk(KERN_INFO "%s: %s Enabled\n", atl1_driver_name, | ||
109 | opt->name); | ||
110 | return 0; | ||
111 | case OPTION_DISABLED: | ||
112 | printk(KERN_INFO "%s: %s Disabled\n", atl1_driver_name, | ||
113 | opt->name); | ||
114 | return 0; | ||
115 | } | ||
116 | break; | ||
117 | case range_option: | ||
118 | if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { | ||
119 | printk(KERN_INFO "%s: %s set to %i\n", | ||
120 | atl1_driver_name, opt->name, *value); | ||
121 | return 0; | ||
122 | } | ||
123 | break; | ||
124 | case list_option:{ | ||
125 | int i; | ||
126 | struct atl1_opt_list *ent; | ||
127 | |||
128 | for (i = 0; i < opt->arg.l.nr; i++) { | ||
129 | ent = &opt->arg.l.p[i]; | ||
130 | if (*value == ent->i) { | ||
131 | if (ent->str[0] != '\0') | ||
132 | printk(KERN_INFO "%s: %s\n", | ||
133 | atl1_driver_name, ent->str); | ||
134 | return 0; | ||
135 | } | ||
136 | } | ||
137 | } | ||
138 | break; | ||
139 | |||
140 | default: | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | printk(KERN_INFO "%s: invalid %s specified (%i) %s\n", | ||
145 | atl1_driver_name, opt->name, *value, opt->err); | ||
146 | *value = opt->def; | ||
147 | return -1; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * atl1_check_options - Range Checking for Command Line Parameters | ||
152 | * @adapter: board private structure | ||
153 | * | ||
154 | * This routine checks all command line parameters for valid user | ||
155 | * input. If an invalid value is given, or if no user specified | ||
156 | * value exists, a default value is used. The final value is stored | ||
157 | * in a variable in the adapter structure. | ||
158 | */ | ||
159 | void __devinit atl1_check_options(struct atl1_adapter *adapter) | ||
160 | { | ||
161 | int bd = adapter->bd_number; | ||
162 | if (bd >= ATL1_MAX_NIC) { | ||
163 | printk(KERN_NOTICE "%s: warning: no configuration for board #%i\n", | ||
164 | atl1_driver_name, bd); | ||
165 | printk(KERN_NOTICE "%s: using defaults for all values\n", | ||
166 | atl1_driver_name); | ||
167 | } | ||
168 | { /* Interrupt Moderate Timer */ | ||
169 | struct atl1_option opt = { | ||
170 | .type = range_option, | ||
171 | .name = "Interrupt Moderator Timer", | ||
172 | .err = "using default of " | ||
173 | __MODULE_STRING(DEFAULT_INT_MOD_CNT), | ||
174 | .def = DEFAULT_INT_MOD_CNT, | ||
175 | .arg = {.r = | ||
176 | {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}} | ||
177 | }; | ||
178 | int val; | ||
179 | if (num_int_mod_timer > bd) { | ||
180 | val = int_mod_timer[bd]; | ||
181 | atl1_validate_option(&val, &opt); | ||
182 | adapter->imt = (u16) val; | ||
183 | } else | ||
184 | adapter->imt = (u16) (opt.def); | ||
185 | } | ||
186 | |||
187 | { /* Flash Vendor */ | ||
188 | struct atl1_option opt = { | ||
189 | .type = range_option, | ||
190 | .name = "SPI Flash Vendor", | ||
191 | .err = "using default of " | ||
192 | __MODULE_STRING(FLASH_VENDOR_DEFAULT), | ||
193 | .def = DEFAULT_INT_MOD_CNT, | ||
194 | .arg = {.r = | ||
195 | {.min = FLASH_VENDOR_MIN,.max = | ||
196 | FLASH_VENDOR_MAX}} | ||
197 | }; | ||
198 | int val; | ||
199 | if (num_flash_vendor > bd) { | ||
200 | val = flash_vendor[bd]; | ||
201 | atl1_validate_option(&val, &opt); | ||
202 | adapter->hw.flash_vendor = (u8) val; | ||
203 | } else | ||
204 | adapter->hw.flash_vendor = (u8) (opt.def); | ||
205 | } | ||
206 | } | ||
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 32923162179e..217a2eedee0a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -184,7 +184,7 @@ static int tlb_initialize(struct bonding *bond) | |||
184 | 184 | ||
185 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); | 185 | spin_lock_init(&(bond_info->tx_hashtbl_lock)); |
186 | 186 | ||
187 | new_hashtbl = kmalloc(size, GFP_KERNEL); | 187 | new_hashtbl = kzalloc(size, GFP_KERNEL); |
188 | if (!new_hashtbl) { | 188 | if (!new_hashtbl) { |
189 | printk(KERN_ERR DRV_NAME | 189 | printk(KERN_ERR DRV_NAME |
190 | ": %s: Error: Failed to allocate TLB hash table\n", | 190 | ": %s: Error: Failed to allocate TLB hash table\n", |
@@ -195,8 +195,6 @@ static int tlb_initialize(struct bonding *bond) | |||
195 | 195 | ||
196 | bond_info->tx_hashtbl = new_hashtbl; | 196 | bond_info->tx_hashtbl = new_hashtbl; |
197 | 197 | ||
198 | memset(bond_info->tx_hashtbl, 0, size); | ||
199 | |||
200 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { | 198 | for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) { |
201 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); | 199 | tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1); |
202 | } | 200 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d3801a00d3d5..8ce8fec615ba 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1343,14 +1343,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1343 | "inaccurate.\n", bond_dev->name, slave_dev->name); | 1343 | "inaccurate.\n", bond_dev->name, slave_dev->name); |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); | 1346 | new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); |
1347 | if (!new_slave) { | 1347 | if (!new_slave) { |
1348 | res = -ENOMEM; | 1348 | res = -ENOMEM; |
1349 | goto err_undo_flags; | 1349 | goto err_undo_flags; |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | memset(new_slave, 0, sizeof(struct slave)); | ||
1353 | |||
1354 | /* save slave's original flags before calling | 1352 | /* save slave's original flags before calling |
1355 | * netdev_set_master and dev_open | 1353 | * netdev_set_master and dev_open |
1356 | */ | 1354 | */ |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index dfa035a1ad45..c67f7d3c2f92 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -74,8 +74,6 @@ enum { | |||
74 | 74 | ||
75 | #define EEPROM_MAGIC 0x38E2F10C | 75 | #define EEPROM_MAGIC 0x38E2F10C |
76 | 76 | ||
77 | #define to_net_dev(class) container_of(class, struct net_device, class_dev) | ||
78 | |||
79 | #define CH_DEVICE(devid, ssid, idx) \ | 77 | #define CH_DEVICE(devid, ssid, idx) \ |
80 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } | 78 | { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx } |
81 | 79 | ||
@@ -434,11 +432,12 @@ static int setup_sge_qsets(struct adapter *adap) | |||
434 | return 0; | 432 | return 0; |
435 | } | 433 | } |
436 | 434 | ||
437 | static ssize_t attr_show(struct class_device *cd, char *buf, | 435 | static ssize_t attr_show(struct device *d, struct device_attribute *attr, |
436 | char *buf, | ||
438 | ssize_t(*format) (struct adapter *, char *)) | 437 | ssize_t(*format) (struct adapter *, char *)) |
439 | { | 438 | { |
440 | ssize_t len; | 439 | ssize_t len; |
441 | struct adapter *adap = to_net_dev(cd)->priv; | 440 | struct adapter *adap = to_net_dev(d)->priv; |
442 | 441 | ||
443 | /* Synchronize with ioctls that may shut down the device */ | 442 | /* Synchronize with ioctls that may shut down the device */ |
444 | rtnl_lock(); | 443 | rtnl_lock(); |
@@ -447,14 +446,15 @@ static ssize_t attr_show(struct class_device *cd, char *buf, | |||
447 | return len; | 446 | return len; |
448 | } | 447 | } |
449 | 448 | ||
450 | static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len, | 449 | static ssize_t attr_store(struct device *d, struct device_attribute *attr, |
450 | const char *buf, size_t len, | ||
451 | ssize_t(*set) (struct adapter *, unsigned int), | 451 | ssize_t(*set) (struct adapter *, unsigned int), |
452 | unsigned int min_val, unsigned int max_val) | 452 | unsigned int min_val, unsigned int max_val) |
453 | { | 453 | { |
454 | char *endp; | 454 | char *endp; |
455 | ssize_t ret; | 455 | ssize_t ret; |
456 | unsigned int val; | 456 | unsigned int val; |
457 | struct adapter *adap = to_net_dev(cd)->priv; | 457 | struct adapter *adap = to_net_dev(d)->priv; |
458 | 458 | ||
459 | if (!capable(CAP_NET_ADMIN)) | 459 | if (!capable(CAP_NET_ADMIN)) |
460 | return -EPERM; | 460 | return -EPERM; |
@@ -476,9 +476,10 @@ static ssize_t format_##name(struct adapter *adap, char *buf) \ | |||
476 | { \ | 476 | { \ |
477 | return sprintf(buf, "%u\n", val_expr); \ | 477 | return sprintf(buf, "%u\n", val_expr); \ |
478 | } \ | 478 | } \ |
479 | static ssize_t show_##name(struct class_device *cd, char *buf) \ | 479 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ |
480 | char *buf) \ | ||
480 | { \ | 481 | { \ |
481 | return attr_show(cd, buf, format_##name); \ | 482 | return attr_show(d, attr, buf, format_##name); \ |
482 | } | 483 | } |
483 | 484 | ||
484 | static ssize_t set_nfilters(struct adapter *adap, unsigned int val) | 485 | static ssize_t set_nfilters(struct adapter *adap, unsigned int val) |
@@ -493,10 +494,10 @@ static ssize_t set_nfilters(struct adapter *adap, unsigned int val) | |||
493 | return 0; | 494 | return 0; |
494 | } | 495 | } |
495 | 496 | ||
496 | static ssize_t store_nfilters(struct class_device *cd, const char *buf, | 497 | static ssize_t store_nfilters(struct device *d, struct device_attribute *attr, |
497 | size_t len) | 498 | const char *buf, size_t len) |
498 | { | 499 | { |
499 | return attr_store(cd, buf, len, set_nfilters, 0, ~0); | 500 | return attr_store(d, attr, buf, len, set_nfilters, 0, ~0); |
500 | } | 501 | } |
501 | 502 | ||
502 | static ssize_t set_nservers(struct adapter *adap, unsigned int val) | 503 | static ssize_t set_nservers(struct adapter *adap, unsigned int val) |
@@ -509,38 +510,39 @@ static ssize_t set_nservers(struct adapter *adap, unsigned int val) | |||
509 | return 0; | 510 | return 0; |
510 | } | 511 | } |
511 | 512 | ||
512 | static ssize_t store_nservers(struct class_device *cd, const char *buf, | 513 | static ssize_t store_nservers(struct device *d, struct device_attribute *attr, |
513 | size_t len) | 514 | const char *buf, size_t len) |
514 | { | 515 | { |
515 | return attr_store(cd, buf, len, set_nservers, 0, ~0); | 516 | return attr_store(d, attr, buf, len, set_nservers, 0, ~0); |
516 | } | 517 | } |
517 | 518 | ||
518 | #define CXGB3_ATTR_R(name, val_expr) \ | 519 | #define CXGB3_ATTR_R(name, val_expr) \ |
519 | CXGB3_SHOW(name, val_expr) \ | 520 | CXGB3_SHOW(name, val_expr) \ |
520 | static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | 521 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
521 | 522 | ||
522 | #define CXGB3_ATTR_RW(name, val_expr, store_method) \ | 523 | #define CXGB3_ATTR_RW(name, val_expr, store_method) \ |
523 | CXGB3_SHOW(name, val_expr) \ | 524 | CXGB3_SHOW(name, val_expr) \ |
524 | static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) | 525 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method) |
525 | 526 | ||
526 | CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); | 527 | CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5)); |
527 | CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); | 528 | CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters); |
528 | CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); | 529 | CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers); |
529 | 530 | ||
530 | static struct attribute *cxgb3_attrs[] = { | 531 | static struct attribute *cxgb3_attrs[] = { |
531 | &class_device_attr_cam_size.attr, | 532 | &dev_attr_cam_size.attr, |
532 | &class_device_attr_nfilters.attr, | 533 | &dev_attr_nfilters.attr, |
533 | &class_device_attr_nservers.attr, | 534 | &dev_attr_nservers.attr, |
534 | NULL | 535 | NULL |
535 | }; | 536 | }; |
536 | 537 | ||
537 | static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; | 538 | static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; |
538 | 539 | ||
539 | static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched) | 540 | static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr, |
541 | char *buf, int sched) | ||
540 | { | 542 | { |
541 | ssize_t len; | 543 | ssize_t len; |
542 | unsigned int v, addr, bpt, cpt; | 544 | unsigned int v, addr, bpt, cpt; |
543 | struct adapter *adap = to_net_dev(cd)->priv; | 545 | struct adapter *adap = to_net_dev(d)->priv; |
544 | 546 | ||
545 | addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; | 547 | addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2; |
546 | rtnl_lock(); | 548 | rtnl_lock(); |
@@ -560,13 +562,13 @@ static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched) | |||
560 | return len; | 562 | return len; |
561 | } | 563 | } |
562 | 564 | ||
563 | static ssize_t tm_attr_store(struct class_device *cd, const char *buf, | 565 | static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr, |
564 | size_t len, int sched) | 566 | const char *buf, size_t len, int sched) |
565 | { | 567 | { |
566 | char *endp; | 568 | char *endp; |
567 | ssize_t ret; | 569 | ssize_t ret; |
568 | unsigned int val; | 570 | unsigned int val; |
569 | struct adapter *adap = to_net_dev(cd)->priv; | 571 | struct adapter *adap = to_net_dev(d)->priv; |
570 | 572 | ||
571 | if (!capable(CAP_NET_ADMIN)) | 573 | if (!capable(CAP_NET_ADMIN)) |
572 | return -EPERM; | 574 | return -EPERM; |
@@ -584,15 +586,17 @@ static ssize_t tm_attr_store(struct class_device *cd, const char *buf, | |||
584 | } | 586 | } |
585 | 587 | ||
586 | #define TM_ATTR(name, sched) \ | 588 | #define TM_ATTR(name, sched) \ |
587 | static ssize_t show_##name(struct class_device *cd, char *buf) \ | 589 | static ssize_t show_##name(struct device *d, struct device_attribute *attr, \ |
590 | char *buf) \ | ||
588 | { \ | 591 | { \ |
589 | return tm_attr_show(cd, buf, sched); \ | 592 | return tm_attr_show(d, attr, buf, sched); \ |
590 | } \ | 593 | } \ |
591 | static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \ | 594 | static ssize_t store_##name(struct device *d, struct device_attribute *attr, \ |
595 | const char *buf, size_t len) \ | ||
592 | { \ | 596 | { \ |
593 | return tm_attr_store(cd, buf, len, sched); \ | 597 | return tm_attr_store(d, attr, buf, len, sched); \ |
594 | } \ | 598 | } \ |
595 | static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) | 599 | static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name) |
596 | 600 | ||
597 | TM_ATTR(sched0, 0); | 601 | TM_ATTR(sched0, 0); |
598 | TM_ATTR(sched1, 1); | 602 | TM_ATTR(sched1, 1); |
@@ -604,14 +608,14 @@ TM_ATTR(sched6, 6); | |||
604 | TM_ATTR(sched7, 7); | 608 | TM_ATTR(sched7, 7); |
605 | 609 | ||
606 | static struct attribute *offload_attrs[] = { | 610 | static struct attribute *offload_attrs[] = { |
607 | &class_device_attr_sched0.attr, | 611 | &dev_attr_sched0.attr, |
608 | &class_device_attr_sched1.attr, | 612 | &dev_attr_sched1.attr, |
609 | &class_device_attr_sched2.attr, | 613 | &dev_attr_sched2.attr, |
610 | &class_device_attr_sched3.attr, | 614 | &dev_attr_sched3.attr, |
611 | &class_device_attr_sched4.attr, | 615 | &dev_attr_sched4.attr, |
612 | &class_device_attr_sched5.attr, | 616 | &dev_attr_sched5.attr, |
613 | &class_device_attr_sched6.attr, | 617 | &dev_attr_sched6.attr, |
614 | &class_device_attr_sched7.attr, | 618 | &dev_attr_sched7.attr, |
615 | NULL | 619 | NULL |
616 | }; | 620 | }; |
617 | 621 | ||
@@ -836,7 +840,7 @@ static int offload_open(struct net_device *dev) | |||
836 | init_smt(adapter); | 840 | init_smt(adapter); |
837 | 841 | ||
838 | /* Never mind if the next step fails */ | 842 | /* Never mind if the next step fails */ |
839 | sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group); | 843 | sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group); |
840 | 844 | ||
841 | /* Call back all registered clients */ | 845 | /* Call back all registered clients */ |
842 | cxgb3_add_clients(tdev); | 846 | cxgb3_add_clients(tdev); |
@@ -861,7 +865,7 @@ static int offload_close(struct t3cdev *tdev) | |||
861 | /* Call back all registered clients */ | 865 | /* Call back all registered clients */ |
862 | cxgb3_remove_clients(tdev); | 866 | cxgb3_remove_clients(tdev); |
863 | 867 | ||
864 | sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group); | 868 | sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); |
865 | 869 | ||
866 | tdev->lldev = NULL; | 870 | tdev->lldev = NULL; |
867 | cxgb3_set_dummy_ops(tdev); | 871 | cxgb3_set_dummy_ops(tdev); |
@@ -2420,7 +2424,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2420 | else if (msi > 0 && pci_enable_msi(pdev) == 0) | 2424 | else if (msi > 0 && pci_enable_msi(pdev) == 0) |
2421 | adapter->flags |= USING_MSI; | 2425 | adapter->flags |= USING_MSI; |
2422 | 2426 | ||
2423 | err = sysfs_create_group(&adapter->port[0]->class_dev.kobj, | 2427 | err = sysfs_create_group(&adapter->port[0]->dev.kobj, |
2424 | &cxgb3_attr_group); | 2428 | &cxgb3_attr_group); |
2425 | 2429 | ||
2426 | print_port_info(adapter, ai); | 2430 | print_port_info(adapter, ai); |
@@ -2452,7 +2456,7 @@ static void __devexit remove_one(struct pci_dev *pdev) | |||
2452 | struct adapter *adapter = dev->priv; | 2456 | struct adapter *adapter = dev->priv; |
2453 | 2457 | ||
2454 | t3_sge_stop(adapter); | 2458 | t3_sge_stop(adapter); |
2455 | sysfs_remove_group(&adapter->port[0]->class_dev.kobj, | 2459 | sysfs_remove_group(&adapter->port[0]->dev.kobj, |
2456 | &cxgb3_attr_group); | 2460 | &cxgb3_attr_group); |
2457 | 2461 | ||
2458 | for_each_port(adapter, i) | 2462 | for_each_port(adapter, i) |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index c3a02d613382..c6b726643185 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -396,7 +396,7 @@ static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs, | |||
396 | int n) | 396 | int n) |
397 | { | 397 | { |
398 | CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n", | 398 | CH_ERR(tdev2adap(dev), "%d unexpected offload packets, first data %u\n", |
399 | n, ntohl(*(u32 *)skbs[0]->data)); | 399 | n, ntohl(*(__be32 *)skbs[0]->data)); |
400 | while (n--) | 400 | while (n--) |
401 | dev_kfree_skb_any(skbs[n]); | 401 | dev_kfree_skb_any(skbs[n]); |
402 | return 0; | 402 | return 0; |
@@ -755,7 +755,7 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb) | |||
755 | { | 755 | { |
756 | struct cpl_trace_pkt *p = cplhdr(skb); | 756 | struct cpl_trace_pkt *p = cplhdr(skb); |
757 | 757 | ||
758 | skb->protocol = 0xffff; | 758 | skb->protocol = htons(0xffff); |
759 | skb->dev = dev->lldev; | 759 | skb->dev = dev->lldev; |
760 | skb_pull(skb, sizeof(*p)); | 760 | skb_pull(skb, sizeof(*p)); |
761 | skb->mac.raw = skb->data; | 761 | skb->mac.raw = skb->data; |
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index dc3ab3b5c8cb..07d2731c1aa8 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c | |||
@@ -10,10 +10,12 @@ | |||
10 | * | 10 | * |
11 | * Abstract: | 11 | * Abstract: |
12 | * A Linux device driver supporting the Digital Equipment Corporation | 12 | * A Linux device driver supporting the Digital Equipment Corporation |
13 | * FDDI EISA and PCI controller families. Supported adapters include: | 13 | * FDDI TURBOchannel, EISA and PCI controller families. Supported |
14 | * adapters include: | ||
14 | * | 15 | * |
15 | * DEC FDDIcontroller/EISA (DEFEA) | 16 | * DEC FDDIcontroller/TURBOchannel (DEFTA) |
16 | * DEC FDDIcontroller/PCI (DEFPA) | 17 | * DEC FDDIcontroller/EISA (DEFEA) |
18 | * DEC FDDIcontroller/PCI (DEFPA) | ||
17 | * | 19 | * |
18 | * The original author: | 20 | * The original author: |
19 | * LVS Lawrence V. Stefani <lstefani@yahoo.com> | 21 | * LVS Lawrence V. Stefani <lstefani@yahoo.com> |
@@ -193,24 +195,27 @@ | |||
193 | * 14 Aug 2004 macro Fix device names reported. | 195 | * 14 Aug 2004 macro Fix device names reported. |
194 | * 14 Jun 2005 macro Use irqreturn_t. | 196 | * 14 Jun 2005 macro Use irqreturn_t. |
195 | * 23 Oct 2006 macro Big-endian host support. | 197 | * 23 Oct 2006 macro Big-endian host support. |
198 | * 14 Dec 2006 macro TURBOchannel support. | ||
196 | */ | 199 | */ |
197 | 200 | ||
198 | /* Include files */ | 201 | /* Include files */ |
199 | 202 | #include <linux/bitops.h> | |
200 | #include <linux/module.h> | ||
201 | #include <linux/kernel.h> | ||
202 | #include <linux/string.h> | ||
203 | #include <linux/errno.h> | ||
204 | #include <linux/ioport.h> | ||
205 | #include <linux/slab.h> | ||
206 | #include <linux/interrupt.h> | ||
207 | #include <linux/pci.h> | ||
208 | #include <linux/delay.h> | 203 | #include <linux/delay.h> |
204 | #include <linux/dma-mapping.h> | ||
205 | #include <linux/eisa.h> | ||
206 | #include <linux/errno.h> | ||
207 | #include <linux/fddidevice.h> | ||
209 | #include <linux/init.h> | 208 | #include <linux/init.h> |
209 | #include <linux/interrupt.h> | ||
210 | #include <linux/ioport.h> | ||
211 | #include <linux/kernel.h> | ||
212 | #include <linux/module.h> | ||
210 | #include <linux/netdevice.h> | 213 | #include <linux/netdevice.h> |
211 | #include <linux/fddidevice.h> | 214 | #include <linux/pci.h> |
212 | #include <linux/skbuff.h> | 215 | #include <linux/skbuff.h> |
213 | #include <linux/bitops.h> | 216 | #include <linux/slab.h> |
217 | #include <linux/string.h> | ||
218 | #include <linux/tc.h> | ||
214 | 219 | ||
215 | #include <asm/byteorder.h> | 220 | #include <asm/byteorder.h> |
216 | #include <asm/io.h> | 221 | #include <asm/io.h> |
@@ -219,8 +224,8 @@ | |||
219 | 224 | ||
220 | /* Version information string should be updated prior to each new release! */ | 225 | /* Version information string should be updated prior to each new release! */ |
221 | #define DRV_NAME "defxx" | 226 | #define DRV_NAME "defxx" |
222 | #define DRV_VERSION "v1.09" | 227 | #define DRV_VERSION "v1.10" |
223 | #define DRV_RELDATE "2006/10/23" | 228 | #define DRV_RELDATE "2006/12/14" |
224 | 229 | ||
225 | static char version[] __devinitdata = | 230 | static char version[] __devinitdata = |
226 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE | 231 | DRV_NAME ": " DRV_VERSION " " DRV_RELDATE |
@@ -235,12 +240,41 @@ static char version[] __devinitdata = | |||
235 | */ | 240 | */ |
236 | #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) | 241 | #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) |
237 | 242 | ||
243 | #define __unused __attribute__ ((unused)) | ||
244 | |||
245 | #ifdef CONFIG_PCI | ||
246 | #define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type) | ||
247 | #else | ||
248 | #define DFX_BUS_PCI(dev) 0 | ||
249 | #endif | ||
250 | |||
251 | #ifdef CONFIG_EISA | ||
252 | #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type) | ||
253 | #else | ||
254 | #define DFX_BUS_EISA(dev) 0 | ||
255 | #endif | ||
256 | |||
257 | #ifdef CONFIG_TC | ||
258 | #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type) | ||
259 | #else | ||
260 | #define DFX_BUS_TC(dev) 0 | ||
261 | #endif | ||
262 | |||
263 | #ifdef CONFIG_DEFXX_MMIO | ||
264 | #define DFX_MMIO 1 | ||
265 | #else | ||
266 | #define DFX_MMIO 0 | ||
267 | #endif | ||
268 | |||
238 | /* Define module-wide (static) routines */ | 269 | /* Define module-wide (static) routines */ |
239 | 270 | ||
240 | static void dfx_bus_init(struct net_device *dev); | 271 | static void dfx_bus_init(struct net_device *dev); |
272 | static void dfx_bus_uninit(struct net_device *dev); | ||
241 | static void dfx_bus_config_check(DFX_board_t *bp); | 273 | static void dfx_bus_config_check(DFX_board_t *bp); |
242 | 274 | ||
243 | static int dfx_driver_init(struct net_device *dev, const char *print_name); | 275 | static int dfx_driver_init(struct net_device *dev, |
276 | const char *print_name, | ||
277 | resource_size_t bar_start); | ||
244 | static int dfx_adap_init(DFX_board_t *bp, int get_buffers); | 278 | static int dfx_adap_init(DFX_board_t *bp, int get_buffers); |
245 | 279 | ||
246 | static int dfx_open(struct net_device *dev); | 280 | static int dfx_open(struct net_device *dev); |
@@ -273,13 +307,13 @@ static void dfx_xmt_flush(DFX_board_t *bp); | |||
273 | 307 | ||
274 | /* Define module-wide (static) variables */ | 308 | /* Define module-wide (static) variables */ |
275 | 309 | ||
276 | static struct net_device *root_dfx_eisa_dev; | 310 | static struct pci_driver dfx_pci_driver; |
311 | static struct eisa_driver dfx_eisa_driver; | ||
312 | static struct tc_driver dfx_tc_driver; | ||
277 | 313 | ||
278 | 314 | ||
279 | /* | 315 | /* |
280 | * ======================= | 316 | * ======================= |
281 | * = dfx_port_write_byte = | ||
282 | * = dfx_port_read_byte = | ||
283 | * = dfx_port_write_long = | 317 | * = dfx_port_write_long = |
284 | * = dfx_port_read_long = | 318 | * = dfx_port_read_long = |
285 | * ======================= | 319 | * ======================= |
@@ -291,12 +325,11 @@ static struct net_device *root_dfx_eisa_dev; | |||
291 | * None | 325 | * None |
292 | * | 326 | * |
293 | * Arguments: | 327 | * Arguments: |
294 | * bp - pointer to board information | 328 | * bp - pointer to board information |
295 | * offset - register offset from base I/O address | 329 | * offset - register offset from base I/O address |
296 | * data - for dfx_port_write_byte and dfx_port_write_long, this | 330 | * data - for dfx_port_write_long, this is a value to write; |
297 | * is a value to write. | 331 | * for dfx_port_read_long, this is a pointer to store |
298 | * for dfx_port_read_byte and dfx_port_read_byte, this | 332 | * the read value |
299 | * is a pointer to store the read value. | ||
300 | * | 333 | * |
301 | * Functional Description: | 334 | * Functional Description: |
302 | * These routines perform the correct operation to read or write | 335 | * These routines perform the correct operation to read or write |
@@ -310,7 +343,7 @@ static struct net_device *root_dfx_eisa_dev; | |||
310 | * registers using the register offsets defined in DEFXX.H. | 343 | * registers using the register offsets defined in DEFXX.H. |
311 | * | 344 | * |
312 | * PCI port block base addresses are assigned by the PCI BIOS or system | 345 | * PCI port block base addresses are assigned by the PCI BIOS or system |
313 | * firmware. There is one 128 byte port block which can be accessed. It | 346 | * firmware. There is one 128 byte port block which can be accessed. It |
314 | * allows for I/O mapping of both PDQ and PFI registers using the register | 347 | * allows for I/O mapping of both PDQ and PFI registers using the register |
315 | * offsets defined in DEFXX.H. | 348 | * offsets defined in DEFXX.H. |
316 | * | 349 | * |
@@ -318,7 +351,7 @@ static struct net_device *root_dfx_eisa_dev; | |||
318 | * None | 351 | * None |
319 | * | 352 | * |
320 | * Assumptions: | 353 | * Assumptions: |
321 | * bp->base_addr is a valid base I/O address for this adapter. | 354 | * bp->base is a valid base I/O address for this adapter. |
322 | * offset is a valid register offset for this adapter. | 355 | * offset is a valid register offset for this adapter. |
323 | * | 356 | * |
324 | * Side Effects: | 357 | * Side Effects: |
@@ -329,69 +362,135 @@ static struct net_device *root_dfx_eisa_dev; | |||
329 | * advantage of strict data type checking. | 362 | * advantage of strict data type checking. |
330 | */ | 363 | */ |
331 | 364 | ||
332 | static inline void dfx_port_write_byte( | 365 | static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) |
333 | DFX_board_t *bp, | 366 | { |
334 | int offset, | 367 | writel(data, bp->base.mem + offset); |
335 | u8 data | 368 | mb(); |
336 | ) | 369 | } |
337 | 370 | ||
338 | { | 371 | static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) |
339 | u16 port = bp->base_addr + offset; | 372 | { |
373 | outl(data, bp->base.port + offset); | ||
374 | } | ||
340 | 375 | ||
341 | outb(data, port); | 376 | static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) |
342 | } | 377 | { |
378 | struct device __unused *bdev = bp->bus_dev; | ||
379 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
380 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
343 | 381 | ||
344 | static inline void dfx_port_read_byte( | 382 | if (dfx_use_mmio) |
345 | DFX_board_t *bp, | 383 | dfx_writel(bp, offset, data); |
346 | int offset, | 384 | else |
347 | u8 *data | 385 | dfx_outl(bp, offset, data); |
348 | ) | 386 | } |
349 | 387 | ||
350 | { | ||
351 | u16 port = bp->base_addr + offset; | ||
352 | 388 | ||
353 | *data = inb(port); | 389 | static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) |
354 | } | 390 | { |
391 | mb(); | ||
392 | *data = readl(bp->base.mem + offset); | ||
393 | } | ||
355 | 394 | ||
356 | static inline void dfx_port_write_long( | 395 | static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) |
357 | DFX_board_t *bp, | 396 | { |
358 | int offset, | 397 | *data = inl(bp->base.port + offset); |
359 | u32 data | 398 | } |
360 | ) | ||
361 | 399 | ||
362 | { | 400 | static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) |
363 | u16 port = bp->base_addr + offset; | 401 | { |
402 | struct device __unused *bdev = bp->bus_dev; | ||
403 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
404 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
364 | 405 | ||
365 | outl(data, port); | 406 | if (dfx_use_mmio) |
366 | } | 407 | dfx_readl(bp, offset, data); |
408 | else | ||
409 | dfx_inl(bp, offset, data); | ||
410 | } | ||
367 | 411 | ||
368 | static inline void dfx_port_read_long( | ||
369 | DFX_board_t *bp, | ||
370 | int offset, | ||
371 | u32 *data | ||
372 | ) | ||
373 | 412 | ||
374 | { | 413 | /* |
375 | u16 port = bp->base_addr + offset; | 414 | * ================ |
415 | * = dfx_get_bars = | ||
416 | * ================ | ||
417 | * | ||
418 | * Overview: | ||
419 | * Retrieves the address range used to access control and status | ||
420 | * registers. | ||
421 | * | ||
422 | * Returns: | ||
423 | * None | ||
424 | * | ||
425 | * Arguments: | ||
426 | * bdev - pointer to device information | ||
427 | * bar_start - pointer to store the start address | ||
428 | * bar_len - pointer to store the length of the area | ||
429 | * | ||
430 | * Assumptions: | ||
431 | * I am sure there are some. | ||
432 | * | ||
433 | * Side Effects: | ||
434 | * None | ||
435 | */ | ||
436 | static void dfx_get_bars(struct device *bdev, | ||
437 | resource_size_t *bar_start, resource_size_t *bar_len) | ||
438 | { | ||
439 | int dfx_bus_pci = DFX_BUS_PCI(bdev); | ||
440 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); | ||
441 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
442 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
376 | 443 | ||
377 | *data = inl(port); | 444 | if (dfx_bus_pci) { |
378 | } | 445 | int num = dfx_use_mmio ? 0 : 1; |
379 | 446 | ||
447 | *bar_start = pci_resource_start(to_pci_dev(bdev), num); | ||
448 | *bar_len = pci_resource_len(to_pci_dev(bdev), num); | ||
449 | } | ||
450 | if (dfx_bus_eisa) { | ||
451 | unsigned long base_addr = to_eisa_device(bdev)->base_addr; | ||
452 | resource_size_t bar; | ||
453 | |||
454 | if (dfx_use_mmio) { | ||
455 | bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); | ||
456 | bar <<= 8; | ||
457 | bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); | ||
458 | bar <<= 8; | ||
459 | bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); | ||
460 | bar <<= 16; | ||
461 | *bar_start = bar; | ||
462 | bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); | ||
463 | bar <<= 8; | ||
464 | bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); | ||
465 | bar <<= 8; | ||
466 | bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); | ||
467 | bar <<= 16; | ||
468 | *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; | ||
469 | } else { | ||
470 | *bar_start = base_addr; | ||
471 | *bar_len = PI_ESIC_K_CSR_IO_LEN; | ||
472 | } | ||
473 | } | ||
474 | if (dfx_bus_tc) { | ||
475 | *bar_start = to_tc_dev(bdev)->resource.start + | ||
476 | PI_TC_K_CSR_OFFSET; | ||
477 | *bar_len = PI_TC_K_CSR_LEN; | ||
478 | } | ||
479 | } | ||
380 | 480 | ||
381 | /* | 481 | /* |
382 | * ============= | 482 | * ================ |
383 | * = dfx_init_one_pci_or_eisa = | 483 | * = dfx_register = |
384 | * ============= | 484 | * ================ |
385 | * | 485 | * |
386 | * Overview: | 486 | * Overview: |
387 | * Initializes a supported FDDI EISA or PCI controller | 487 | * Initializes a supported FDDI controller |
388 | * | 488 | * |
389 | * Returns: | 489 | * Returns: |
390 | * Condition code | 490 | * Condition code |
391 | * | 491 | * |
392 | * Arguments: | 492 | * Arguments: |
393 | * pdev - pointer to pci device information (NULL for EISA) | 493 | * bdev - pointer to device information |
394 | * ioaddr - pointer to port (NULL for PCI) | ||
395 | * | 494 | * |
396 | * Functional Description: | 495 | * Functional Description: |
397 | * | 496 | * |
@@ -407,56 +506,74 @@ static inline void dfx_port_read_long( | |||
407 | * initialized and the board resources are read and stored in | 506 | * initialized and the board resources are read and stored in |
408 | * the device structure. | 507 | * the device structure. |
409 | */ | 508 | */ |
410 | static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr) | 509 | static int __devinit dfx_register(struct device *bdev) |
411 | { | 510 | { |
412 | static int version_disp; | 511 | static int version_disp; |
413 | char *print_name = DRV_NAME; | 512 | int dfx_bus_pci = DFX_BUS_PCI(bdev); |
513 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
514 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
515 | char *print_name = bdev->bus_id; | ||
414 | struct net_device *dev; | 516 | struct net_device *dev; |
415 | DFX_board_t *bp; /* board pointer */ | 517 | DFX_board_t *bp; /* board pointer */ |
518 | resource_size_t bar_start = 0; /* pointer to port */ | ||
519 | resource_size_t bar_len = 0; /* resource length */ | ||
416 | int alloc_size; /* total buffer size used */ | 520 | int alloc_size; /* total buffer size used */ |
417 | int err; | 521 | struct resource *region; |
522 | int err = 0; | ||
418 | 523 | ||
419 | if (!version_disp) { /* display version info if adapter is found */ | 524 | if (!version_disp) { /* display version info if adapter is found */ |
420 | version_disp = 1; /* set display flag to TRUE so that */ | 525 | version_disp = 1; /* set display flag to TRUE so that */ |
421 | printk(version); /* we only display this string ONCE */ | 526 | printk(version); /* we only display this string ONCE */ |
422 | } | 527 | } |
423 | 528 | ||
424 | if (pdev != NULL) | ||
425 | print_name = pci_name(pdev); | ||
426 | |||
427 | dev = alloc_fddidev(sizeof(*bp)); | 529 | dev = alloc_fddidev(sizeof(*bp)); |
428 | if (!dev) { | 530 | if (!dev) { |
429 | printk(KERN_ERR "%s: unable to allocate fddidev, aborting\n", | 531 | printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n", |
430 | print_name); | 532 | print_name); |
431 | return -ENOMEM; | 533 | return -ENOMEM; |
432 | } | 534 | } |
433 | 535 | ||
434 | /* Enable PCI device. */ | 536 | /* Enable PCI device. */ |
435 | if (pdev != NULL) { | 537 | if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { |
436 | err = pci_enable_device (pdev); | 538 | printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", |
437 | if (err) goto err_out; | 539 | print_name); |
438 | ioaddr = pci_resource_start (pdev, 1); | 540 | goto err_out; |
439 | } | 541 | } |
440 | 542 | ||
441 | SET_MODULE_OWNER(dev); | 543 | SET_MODULE_OWNER(dev); |
442 | if (pdev != NULL) | 544 | SET_NETDEV_DEV(dev, bdev); |
443 | SET_NETDEV_DEV(dev, &pdev->dev); | 545 | |
546 | bp = netdev_priv(dev); | ||
547 | bp->bus_dev = bdev; | ||
548 | dev_set_drvdata(bdev, dev); | ||
444 | 549 | ||
445 | bp = dev->priv; | 550 | dfx_get_bars(bdev, &bar_start, &bar_len); |
446 | 551 | ||
447 | if (!request_region(ioaddr, | 552 | if (dfx_use_mmio) |
448 | pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, | 553 | region = request_mem_region(bar_start, bar_len, print_name); |
449 | print_name)) { | 554 | else |
555 | region = request_region(bar_start, bar_len, print_name); | ||
556 | if (!region) { | ||
450 | printk(KERN_ERR "%s: Cannot reserve I/O resource " | 557 | printk(KERN_ERR "%s: Cannot reserve I/O resource " |
451 | "0x%x @ 0x%lx, aborting\n", print_name, | 558 | "0x%lx @ 0x%lx, aborting\n", |
452 | pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN, ioaddr); | 559 | print_name, (long)bar_len, (long)bar_start); |
453 | err = -EBUSY; | 560 | err = -EBUSY; |
454 | goto err_out; | 561 | goto err_out_disable; |
455 | } | 562 | } |
456 | 563 | ||
457 | /* Initialize new device structure */ | 564 | /* Set up I/O base address. */ |
565 | if (dfx_use_mmio) { | ||
566 | bp->base.mem = ioremap_nocache(bar_start, bar_len); | ||
567 | if (!bp->base.mem) { | ||
568 | printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); | ||
569 | goto err_out_region; | ||
570 | } | ||
571 | } else { | ||
572 | bp->base.port = bar_start; | ||
573 | dev->base_addr = bar_start; | ||
574 | } | ||
458 | 575 | ||
459 | dev->base_addr = ioaddr; /* save port (I/O) base address */ | 576 | /* Initialize new device structure */ |
460 | 577 | ||
461 | dev->get_stats = dfx_ctl_get_stats; | 578 | dev->get_stats = dfx_ctl_get_stats; |
462 | dev->open = dfx_open; | 579 | dev->open = dfx_open; |
@@ -465,22 +582,12 @@ static int __devinit dfx_init_one_pci_or_eisa(struct pci_dev *pdev, long ioaddr) | |||
465 | dev->set_multicast_list = dfx_ctl_set_multicast_list; | 582 | dev->set_multicast_list = dfx_ctl_set_multicast_list; |
466 | dev->set_mac_address = dfx_ctl_set_mac_address; | 583 | dev->set_mac_address = dfx_ctl_set_mac_address; |
467 | 584 | ||
468 | if (pdev == NULL) { | 585 | if (dfx_bus_pci) |
469 | /* EISA board */ | 586 | pci_set_master(to_pci_dev(bdev)); |
470 | bp->bus_type = DFX_BUS_TYPE_EISA; | ||
471 | bp->next = root_dfx_eisa_dev; | ||
472 | root_dfx_eisa_dev = dev; | ||
473 | } else { | ||
474 | /* PCI board */ | ||
475 | bp->bus_type = DFX_BUS_TYPE_PCI; | ||
476 | bp->pci_dev = pdev; | ||
477 | pci_set_drvdata (pdev, dev); | ||
478 | pci_set_master (pdev); | ||
479 | } | ||
480 | 587 | ||
481 | if (dfx_driver_init(dev, print_name) != DFX_K_SUCCESS) { | 588 | if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { |
482 | err = -ENODEV; | 589 | err = -ENODEV; |
483 | goto err_out_region; | 590 | goto err_out_unmap; |
484 | } | 591 | } |
485 | 592 | ||
486 | err = register_netdev(dev); | 593 | err = register_netdev(dev); |
@@ -499,44 +606,28 @@ err_out_kfree: | |||
499 | sizeof(PI_CONSUMER_BLOCK) + | 606 | sizeof(PI_CONSUMER_BLOCK) + |
500 | (PI_ALIGN_K_DESC_BLK - 1); | 607 | (PI_ALIGN_K_DESC_BLK - 1); |
501 | if (bp->kmalloced) | 608 | if (bp->kmalloced) |
502 | pci_free_consistent(pdev, alloc_size, | 609 | dma_free_coherent(bdev, alloc_size, |
503 | bp->kmalloced, bp->kmalloced_dma); | 610 | bp->kmalloced, bp->kmalloced_dma); |
611 | |||
612 | err_out_unmap: | ||
613 | if (dfx_use_mmio) | ||
614 | iounmap(bp->base.mem); | ||
615 | |||
504 | err_out_region: | 616 | err_out_region: |
505 | release_region(ioaddr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN); | 617 | if (dfx_use_mmio) |
618 | release_mem_region(bar_start, bar_len); | ||
619 | else | ||
620 | release_region(bar_start, bar_len); | ||
621 | |||
622 | err_out_disable: | ||
623 | if (dfx_bus_pci) | ||
624 | pci_disable_device(to_pci_dev(bdev)); | ||
625 | |||
506 | err_out: | 626 | err_out: |
507 | free_netdev(dev); | 627 | free_netdev(dev); |
508 | return err; | 628 | return err; |
509 | } | 629 | } |
510 | 630 | ||
511 | static int __devinit dfx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
512 | { | ||
513 | return dfx_init_one_pci_or_eisa(pdev, 0); | ||
514 | } | ||
515 | |||
516 | static int __init dfx_eisa_init(void) | ||
517 | { | ||
518 | int rc = -ENODEV; | ||
519 | int i; /* used in for loops */ | ||
520 | u16 port; /* temporary I/O (port) address */ | ||
521 | u32 slot_id; /* EISA hardware (slot) ID read from adapter */ | ||
522 | |||
523 | DBG_printk("In dfx_eisa_init...\n"); | ||
524 | |||
525 | /* Scan for FDDI EISA controllers */ | ||
526 | |||
527 | for (i=0; i < DFX_MAX_EISA_SLOTS; i++) /* only scan for up to 16 EISA slots */ | ||
528 | { | ||
529 | port = (i << 12) + PI_ESIC_K_SLOT_ID; /* port = I/O address for reading slot ID */ | ||
530 | slot_id = inl(port); /* read EISA HW (slot) ID */ | ||
531 | if ((slot_id & 0xF0FFFFFF) == DEFEA_PRODUCT_ID) | ||
532 | { | ||
533 | port = (i << 12); /* recalc base addr */ | ||
534 | |||
535 | if (dfx_init_one_pci_or_eisa(NULL, port) == 0) rc = 0; | ||
536 | } | ||
537 | } | ||
538 | return rc; | ||
539 | } | ||
540 | 631 | ||
541 | /* | 632 | /* |
542 | * ================ | 633 | * ================ |
@@ -544,7 +635,7 @@ static int __init dfx_eisa_init(void) | |||
544 | * ================ | 635 | * ================ |
545 | * | 636 | * |
546 | * Overview: | 637 | * Overview: |
547 | * Initializes EISA and PCI controller bus-specific logic. | 638 | * Initializes the bus-specific controller logic. |
548 | * | 639 | * |
549 | * Returns: | 640 | * Returns: |
550 | * None | 641 | * None |
@@ -560,7 +651,7 @@ static int __init dfx_eisa_init(void) | |||
560 | * None | 651 | * None |
561 | * | 652 | * |
562 | * Assumptions: | 653 | * Assumptions: |
563 | * dev->base_addr has already been set with the proper | 654 | * bp->base has already been set with the proper |
564 | * base I/O address for this device. | 655 | * base I/O address for this device. |
565 | * | 656 | * |
566 | * Side Effects: | 657 | * Side Effects: |
@@ -571,87 +662,103 @@ static int __init dfx_eisa_init(void) | |||
571 | 662 | ||
572 | static void __devinit dfx_bus_init(struct net_device *dev) | 663 | static void __devinit dfx_bus_init(struct net_device *dev) |
573 | { | 664 | { |
574 | DFX_board_t *bp = dev->priv; | 665 | DFX_board_t *bp = netdev_priv(dev); |
575 | u8 val; /* used for I/O read/writes */ | 666 | struct device *bdev = bp->bus_dev; |
667 | int dfx_bus_pci = DFX_BUS_PCI(bdev); | ||
668 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); | ||
669 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
670 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
671 | u8 val; | ||
576 | 672 | ||
577 | DBG_printk("In dfx_bus_init...\n"); | 673 | DBG_printk("In dfx_bus_init...\n"); |
578 | 674 | ||
579 | /* | 675 | /* Initialize a pointer back to the net_device struct */ |
580 | * Initialize base I/O address field in bp structure | ||
581 | * | ||
582 | * Note: bp->base_addr is the same as dev->base_addr. | ||
583 | * It's useful because often we'll need to read | ||
584 | * or write registers where we already have the | ||
585 | * bp pointer instead of the dev pointer. Having | ||
586 | * the base address in the bp structure will | ||
587 | * save a pointer dereference. | ||
588 | * | ||
589 | * IMPORTANT!! This field must be defined before | ||
590 | * any of the dfx_port_* inline functions are | ||
591 | * called. | ||
592 | */ | ||
593 | |||
594 | bp->base_addr = dev->base_addr; | ||
595 | |||
596 | /* And a pointer back to the net_device struct */ | ||
597 | bp->dev = dev; | 676 | bp->dev = dev; |
598 | 677 | ||
599 | /* Initialize adapter based on bus type */ | 678 | /* Initialize adapter based on bus type */ |
600 | 679 | ||
601 | if (bp->bus_type == DFX_BUS_TYPE_EISA) | 680 | if (dfx_bus_tc) |
602 | { | 681 | dev->irq = to_tc_dev(bdev)->interrupt; |
603 | /* Get the interrupt level from the ESIC chip */ | 682 | if (dfx_bus_eisa) { |
604 | 683 | unsigned long base_addr = to_eisa_device(bdev)->base_addr; | |
605 | dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val); | ||
606 | switch ((val & PI_CONFIG_STAT_0_M_IRQ) >> PI_CONFIG_STAT_0_V_IRQ) | ||
607 | { | ||
608 | case PI_CONFIG_STAT_0_IRQ_K_9: | ||
609 | dev->irq = 9; | ||
610 | break; | ||
611 | |||
612 | case PI_CONFIG_STAT_0_IRQ_K_10: | ||
613 | dev->irq = 10; | ||
614 | break; | ||
615 | 684 | ||
616 | case PI_CONFIG_STAT_0_IRQ_K_11: | 685 | /* Get the interrupt level from the ESIC chip. */ |
617 | dev->irq = 11; | 686 | val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); |
618 | break; | 687 | val &= PI_CONFIG_STAT_0_M_IRQ; |
688 | val >>= PI_CONFIG_STAT_0_V_IRQ; | ||
619 | 689 | ||
620 | case PI_CONFIG_STAT_0_IRQ_K_15: | 690 | switch (val) { |
621 | dev->irq = 15; | 691 | case PI_CONFIG_STAT_0_IRQ_K_9: |
622 | break; | 692 | dev->irq = 9; |
623 | } | 693 | break; |
624 | |||
625 | /* Enable access to I/O on the board by writing 0x03 to Function Control Register */ | ||
626 | 694 | ||
627 | dfx_port_write_byte(bp, PI_ESIC_K_FUNCTION_CNTRL, PI_ESIC_K_FUNCTION_CNTRL_IO_ENB); | 695 | case PI_CONFIG_STAT_0_IRQ_K_10: |
696 | dev->irq = 10; | ||
697 | break; | ||
628 | 698 | ||
629 | /* Set the I/O decode range of the board */ | 699 | case PI_CONFIG_STAT_0_IRQ_K_11: |
700 | dev->irq = 11; | ||
701 | break; | ||
630 | 702 | ||
631 | val = ((dev->base_addr >> 12) << PI_IO_CMP_V_SLOT); | 703 | case PI_CONFIG_STAT_0_IRQ_K_15: |
632 | dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_0_1, val); | 704 | dev->irq = 15; |
633 | dfx_port_write_byte(bp, PI_ESIC_K_IO_CMP_1_1, val); | 705 | break; |
706 | } | ||
634 | 707 | ||
635 | /* Enable access to rest of module (including PDQ and packet memory) */ | 708 | /* |
709 | * Enable memory decoding (MEMCS0) and/or port decoding | ||
710 | * (IOCS1/IOCS0) as appropriate in Function Control | ||
711 | * Register. One of the port chip selects seems to be | ||
712 | * used for the Burst Holdoff register, but this bit of | ||
713 | * documentation is missing and as yet it has not been | ||
714 | * determined which of the two. This is also the reason | ||
715 | * the size of the decoded port range is twice as large | ||
716 | * as one required by the PDQ. | ||
717 | */ | ||
636 | 718 | ||
637 | dfx_port_write_byte(bp, PI_ESIC_K_SLOT_CNTRL, PI_SLOT_CNTRL_M_ENB); | 719 | /* Set the decode range of the board. */ |
720 | val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT); | ||
721 | outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val); | ||
722 | outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0); | ||
723 | outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val); | ||
724 | outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0); | ||
725 | val = PI_ESIC_K_CSR_IO_LEN - 1; | ||
726 | outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff); | ||
727 | outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff); | ||
728 | outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff); | ||
729 | outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff); | ||
730 | |||
731 | /* Enable the decoders. */ | ||
732 | val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; | ||
733 | if (dfx_use_mmio) | ||
734 | val |= PI_FUNCTION_CNTRL_M_MEMCS0; | ||
735 | outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val); | ||
638 | 736 | ||
639 | /* | 737 | /* |
640 | * Map PDQ registers into I/O space. This is done by clearing a bit | 738 | * Enable access to the rest of the module |
641 | * in Burst Holdoff register. | 739 | * (including PDQ and packet memory). |
642 | */ | 740 | */ |
741 | val = PI_SLOT_CNTRL_M_ENB; | ||
742 | outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val); | ||
643 | 743 | ||
644 | dfx_port_read_byte(bp, PI_ESIC_K_BURST_HOLDOFF, &val); | 744 | /* |
645 | dfx_port_write_byte(bp, PI_ESIC_K_BURST_HOLDOFF, (val & ~PI_BURST_HOLDOFF_M_MEM_MAP)); | 745 | * Map PDQ registers into memory or port space. This is |
746 | * done with a bit in the Burst Holdoff register. | ||
747 | */ | ||
748 | val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF); | ||
749 | if (dfx_use_mmio) | ||
750 | val |= PI_BURST_HOLDOFF_V_MEM_MAP; | ||
751 | else | ||
752 | val &= ~PI_BURST_HOLDOFF_V_MEM_MAP; | ||
753 | outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val); | ||
646 | 754 | ||
647 | /* Enable interrupts at EISA bus interface chip (ESIC) */ | 755 | /* Enable interrupts at EISA bus interface chip (ESIC) */ |
648 | 756 | val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); | |
649 | dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &val); | 757 | val |= PI_CONFIG_STAT_0_M_INT_ENB; |
650 | dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, (val | PI_CONFIG_STAT_0_M_INT_ENB)); | 758 | outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); |
651 | } | 759 | } |
652 | else | 760 | if (dfx_bus_pci) { |
653 | { | 761 | struct pci_dev *pdev = to_pci_dev(bdev); |
654 | struct pci_dev *pdev = bp->pci_dev; | ||
655 | 762 | ||
656 | /* Get the interrupt level from the PCI Configuration Table */ | 763 | /* Get the interrupt level from the PCI Configuration Table */ |
657 | 764 | ||
@@ -660,17 +767,70 @@ static void __devinit dfx_bus_init(struct net_device *dev) | |||
660 | /* Check Latency Timer and set if less than minimal */ | 767 | /* Check Latency Timer and set if less than minimal */ |
661 | 768 | ||
662 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); | 769 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); |
663 | if (val < PFI_K_LAT_TIMER_MIN) /* if less than min, override with default */ | 770 | if (val < PFI_K_LAT_TIMER_MIN) { |
664 | { | ||
665 | val = PFI_K_LAT_TIMER_DEF; | 771 | val = PFI_K_LAT_TIMER_DEF; |
666 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); | 772 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); |
667 | } | 773 | } |
668 | 774 | ||
669 | /* Enable interrupts at PCI bus interface chip (PFI) */ | 775 | /* Enable interrupts at PCI bus interface chip (PFI) */ |
776 | val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB; | ||
777 | dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); | ||
778 | } | ||
779 | } | ||
670 | 780 | ||
671 | dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, (PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB)); | 781 | /* |
672 | } | 782 | * ================== |
783 | * = dfx_bus_uninit = | ||
784 | * ================== | ||
785 | * | ||
786 | * Overview: | ||
787 | * Uninitializes the bus-specific controller logic. | ||
788 | * | ||
789 | * Returns: | ||
790 | * None | ||
791 | * | ||
792 | * Arguments: | ||
793 | * dev - pointer to device information | ||
794 | * | ||
795 | * Functional Description: | ||
796 | * Perform bus-specific logic uninitialization. | ||
797 | * | ||
798 | * Return Codes: | ||
799 | * None | ||
800 | * | ||
801 | * Assumptions: | ||
802 | * bp->base has already been set with the proper | ||
803 | * base I/O address for this device. | ||
804 | * | ||
805 | * Side Effects: | ||
806 | * Interrupts are disabled at the adapter bus-specific logic. | ||
807 | */ | ||
808 | |||
809 | static void __devinit dfx_bus_uninit(struct net_device *dev) | ||
810 | { | ||
811 | DFX_board_t *bp = netdev_priv(dev); | ||
812 | struct device *bdev = bp->bus_dev; | ||
813 | int dfx_bus_pci = DFX_BUS_PCI(bdev); | ||
814 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); | ||
815 | u8 val; | ||
816 | |||
817 | DBG_printk("In dfx_bus_uninit...\n"); | ||
818 | |||
819 | /* Uninitialize adapter based on bus type */ | ||
820 | |||
821 | if (dfx_bus_eisa) { | ||
822 | unsigned long base_addr = to_eisa_device(bdev)->base_addr; | ||
823 | |||
824 | /* Disable interrupts at EISA bus interface chip (ESIC) */ | ||
825 | val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); | ||
826 | val &= ~PI_CONFIG_STAT_0_M_INT_ENB; | ||
827 | outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); | ||
828 | } | ||
829 | if (dfx_bus_pci) { | ||
830 | /* Disable interrupts at PCI bus interface chip (PFI) */ | ||
831 | dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); | ||
673 | } | 832 | } |
833 | } | ||
674 | 834 | ||
675 | 835 | ||
676 | /* | 836 | /* |
@@ -705,18 +865,16 @@ static void __devinit dfx_bus_init(struct net_device *dev) | |||
705 | 865 | ||
706 | static void __devinit dfx_bus_config_check(DFX_board_t *bp) | 866 | static void __devinit dfx_bus_config_check(DFX_board_t *bp) |
707 | { | 867 | { |
868 | struct device __unused *bdev = bp->bus_dev; | ||
869 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); | ||
708 | int status; /* return code from adapter port control call */ | 870 | int status; /* return code from adapter port control call */ |
709 | u32 slot_id; /* EISA-bus hardware id (DEC3001, DEC3002,...) */ | ||
710 | u32 host_data; /* LW data returned from port control call */ | 871 | u32 host_data; /* LW data returned from port control call */ |
711 | 872 | ||
712 | DBG_printk("In dfx_bus_config_check...\n"); | 873 | DBG_printk("In dfx_bus_config_check...\n"); |
713 | 874 | ||
714 | /* Configuration check only valid for EISA adapter */ | 875 | /* Configuration check only valid for EISA adapter */ |
715 | 876 | ||
716 | if (bp->bus_type == DFX_BUS_TYPE_EISA) | 877 | if (dfx_bus_eisa) { |
717 | { | ||
718 | dfx_port_read_long(bp, PI_ESIC_K_SLOT_ID, &slot_id); | ||
719 | |||
720 | /* | 878 | /* |
721 | * First check if revision 2 EISA controller. Rev. 1 cards used | 879 | * First check if revision 2 EISA controller. Rev. 1 cards used |
722 | * PDQ revision B, so no workaround needed in this case. Rev. 3 | 880 | * PDQ revision B, so no workaround needed in this case. Rev. 3 |
@@ -724,14 +882,11 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp) | |||
724 | * case, either. Only Rev. 2 cards used either Rev. D or E | 882 | * case, either. Only Rev. 2 cards used either Rev. D or E |
725 | * chips, so we must verify the chip revision on Rev. 2 cards. | 883 | * chips, so we must verify the chip revision on Rev. 2 cards. |
726 | */ | 884 | */ |
727 | 885 | if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) { | |
728 | if (slot_id == DEFEA_PROD_ID_2) | ||
729 | { | ||
730 | /* | 886 | /* |
731 | * Revision 2 FDDI EISA controller found, so let's check PDQ | 887 | * Revision 2 FDDI EISA controller found, |
732 | * revision of adapter. | 888 | * so let's check PDQ revision of adapter. |
733 | */ | 889 | */ |
734 | |||
735 | status = dfx_hw_port_ctrl_req(bp, | 890 | status = dfx_hw_port_ctrl_req(bp, |
736 | PI_PCTRL_M_SUB_CMD, | 891 | PI_PCTRL_M_SUB_CMD, |
737 | PI_SUB_CMD_K_PDQ_REV_GET, | 892 | PI_SUB_CMD_K_PDQ_REV_GET, |
@@ -805,13 +960,20 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp) | |||
805 | */ | 960 | */ |
806 | 961 | ||
807 | static int __devinit dfx_driver_init(struct net_device *dev, | 962 | static int __devinit dfx_driver_init(struct net_device *dev, |
808 | const char *print_name) | 963 | const char *print_name, |
964 | resource_size_t bar_start) | ||
809 | { | 965 | { |
810 | DFX_board_t *bp = dev->priv; | 966 | DFX_board_t *bp = netdev_priv(dev); |
811 | int alloc_size; /* total buffer size needed */ | 967 | struct device *bdev = bp->bus_dev; |
812 | char *top_v, *curr_v; /* virtual addrs into memory block */ | 968 | int dfx_bus_pci = DFX_BUS_PCI(bdev); |
813 | dma_addr_t top_p, curr_p; /* physical addrs into memory block */ | 969 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); |
814 | u32 data; /* host data register value */ | 970 | int dfx_bus_tc = DFX_BUS_TC(bdev); |
971 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
972 | int alloc_size; /* total buffer size needed */ | ||
973 | char *top_v, *curr_v; /* virtual addrs into memory block */ | ||
974 | dma_addr_t top_p, curr_p; /* physical addrs into memory block */ | ||
975 | u32 data, le32; /* host data register value */ | ||
976 | char *board_name = NULL; | ||
815 | 977 | ||
816 | DBG_printk("In dfx_driver_init...\n"); | 978 | DBG_printk("In dfx_driver_init...\n"); |
817 | 979 | ||
@@ -860,8 +1022,8 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
860 | print_name); | 1022 | print_name); |
861 | return(DFX_K_FAILURE); | 1023 | return(DFX_K_FAILURE); |
862 | } | 1024 | } |
863 | data = cpu_to_le32(data); | 1025 | le32 = cpu_to_le32(data); |
864 | memcpy(&bp->factory_mac_addr[0], &data, sizeof(u32)); | 1026 | memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); |
865 | 1027 | ||
866 | if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, | 1028 | if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, |
867 | &data) != DFX_K_SUCCESS) { | 1029 | &data) != DFX_K_SUCCESS) { |
@@ -869,8 +1031,8 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
869 | print_name); | 1031 | print_name); |
870 | return(DFX_K_FAILURE); | 1032 | return(DFX_K_FAILURE); |
871 | } | 1033 | } |
872 | data = cpu_to_le32(data); | 1034 | le32 = cpu_to_le32(data); |
873 | memcpy(&bp->factory_mac_addr[4], &data, sizeof(u16)); | 1035 | memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); |
874 | 1036 | ||
875 | /* | 1037 | /* |
876 | * Set current address to factory address | 1038 | * Set current address to factory address |
@@ -880,20 +1042,18 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
880 | */ | 1042 | */ |
881 | 1043 | ||
882 | memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); | 1044 | memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); |
883 | if (bp->bus_type == DFX_BUS_TYPE_EISA) | 1045 | if (dfx_bus_tc) |
884 | printk("%s: DEFEA at I/O addr = 0x%lX, IRQ = %d, " | 1046 | board_name = "DEFTA"; |
885 | "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", | 1047 | if (dfx_bus_eisa) |
886 | print_name, dev->base_addr, dev->irq, | 1048 | board_name = "DEFEA"; |
887 | dev->dev_addr[0], dev->dev_addr[1], | 1049 | if (dfx_bus_pci) |
888 | dev->dev_addr[2], dev->dev_addr[3], | 1050 | board_name = "DEFPA"; |
889 | dev->dev_addr[4], dev->dev_addr[5]); | 1051 | pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, " |
890 | else | 1052 | "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", |
891 | printk("%s: DEFPA at I/O addr = 0x%lX, IRQ = %d, " | 1053 | print_name, board_name, dfx_use_mmio ? "" : "I/O ", |
892 | "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", | 1054 | (long long)bar_start, dev->irq, |
893 | print_name, dev->base_addr, dev->irq, | 1055 | dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], |
894 | dev->dev_addr[0], dev->dev_addr[1], | 1056 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
895 | dev->dev_addr[2], dev->dev_addr[3], | ||
896 | dev->dev_addr[4], dev->dev_addr[5]); | ||
897 | 1057 | ||
898 | /* | 1058 | /* |
899 | * Get memory for descriptor block, consumer block, and other buffers | 1059 | * Get memory for descriptor block, consumer block, and other buffers |
@@ -908,8 +1068,9 @@ static int __devinit dfx_driver_init(struct net_device *dev, | |||
908 | #endif | 1068 | #endif |
909 | sizeof(PI_CONSUMER_BLOCK) + | 1069 | sizeof(PI_CONSUMER_BLOCK) + |
910 | (PI_ALIGN_K_DESC_BLK - 1); | 1070 | (PI_ALIGN_K_DESC_BLK - 1); |
911 | bp->kmalloced = top_v = pci_alloc_consistent(bp->pci_dev, alloc_size, | 1071 | bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, |
912 | &bp->kmalloced_dma); | 1072 | &bp->kmalloced_dma, |
1073 | GFP_ATOMIC); | ||
913 | if (top_v == NULL) { | 1074 | if (top_v == NULL) { |
914 | printk("%s: Could not allocate memory for host buffers " | 1075 | printk("%s: Could not allocate memory for host buffers " |
915 | "and structures!\n", print_name); | 1076 | "and structures!\n", print_name); |
@@ -1219,14 +1380,15 @@ static int dfx_adap_init(DFX_board_t *bp, int get_buffers) | |||
1219 | 1380 | ||
1220 | static int dfx_open(struct net_device *dev) | 1381 | static int dfx_open(struct net_device *dev) |
1221 | { | 1382 | { |
1383 | DFX_board_t *bp = netdev_priv(dev); | ||
1222 | int ret; | 1384 | int ret; |
1223 | DFX_board_t *bp = dev->priv; | ||
1224 | 1385 | ||
1225 | DBG_printk("In dfx_open...\n"); | 1386 | DBG_printk("In dfx_open...\n"); |
1226 | 1387 | ||
1227 | /* Register IRQ - support shared interrupts by passing device ptr */ | 1388 | /* Register IRQ - support shared interrupts by passing device ptr */ |
1228 | 1389 | ||
1229 | ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, dev); | 1390 | ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, |
1391 | dev); | ||
1230 | if (ret) { | 1392 | if (ret) { |
1231 | printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); | 1393 | printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); |
1232 | return ret; | 1394 | return ret; |
@@ -1309,7 +1471,7 @@ static int dfx_open(struct net_device *dev) | |||
1309 | 1471 | ||
1310 | static int dfx_close(struct net_device *dev) | 1472 | static int dfx_close(struct net_device *dev) |
1311 | { | 1473 | { |
1312 | DFX_board_t *bp = dev->priv; | 1474 | DFX_board_t *bp = netdev_priv(dev); |
1313 | 1475 | ||
1314 | DBG_printk("In dfx_close...\n"); | 1476 | DBG_printk("In dfx_close...\n"); |
1315 | 1477 | ||
@@ -1645,7 +1807,7 @@ static void dfx_int_type_0_process(DFX_board_t *bp) | |||
1645 | 1807 | ||
1646 | static void dfx_int_common(struct net_device *dev) | 1808 | static void dfx_int_common(struct net_device *dev) |
1647 | { | 1809 | { |
1648 | DFX_board_t *bp = dev->priv; | 1810 | DFX_board_t *bp = netdev_priv(dev); |
1649 | PI_UINT32 port_status; /* Port Status register */ | 1811 | PI_UINT32 port_status; /* Port Status register */ |
1650 | 1812 | ||
1651 | /* Process xmt interrupts - frequent case, so always call this routine */ | 1813 | /* Process xmt interrupts - frequent case, so always call this routine */ |
@@ -1715,18 +1877,16 @@ static void dfx_int_common(struct net_device *dev) | |||
1715 | 1877 | ||
1716 | static irqreturn_t dfx_interrupt(int irq, void *dev_id) | 1878 | static irqreturn_t dfx_interrupt(int irq, void *dev_id) |
1717 | { | 1879 | { |
1718 | struct net_device *dev = dev_id; | 1880 | struct net_device *dev = dev_id; |
1719 | DFX_board_t *bp; /* private board structure pointer */ | 1881 | DFX_board_t *bp = netdev_priv(dev); |
1720 | 1882 | struct device *bdev = bp->bus_dev; | |
1721 | /* Get board pointer only if device structure is valid */ | 1883 | int dfx_bus_pci = DFX_BUS_PCI(bdev); |
1722 | 1884 | int dfx_bus_eisa = DFX_BUS_EISA(bdev); | |
1723 | bp = dev->priv; | 1885 | int dfx_bus_tc = DFX_BUS_TC(bdev); |
1724 | |||
1725 | /* See if we're already servicing an interrupt */ | ||
1726 | 1886 | ||
1727 | /* Service adapter interrupts */ | 1887 | /* Service adapter interrupts */ |
1728 | 1888 | ||
1729 | if (bp->bus_type == DFX_BUS_TYPE_PCI) { | 1889 | if (dfx_bus_pci) { |
1730 | u32 status; | 1890 | u32 status; |
1731 | 1891 | ||
1732 | dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); | 1892 | dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); |
@@ -1750,10 +1910,12 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id) | |||
1750 | PFI_MODE_M_DMA_ENB)); | 1910 | PFI_MODE_M_DMA_ENB)); |
1751 | 1911 | ||
1752 | spin_unlock(&bp->lock); | 1912 | spin_unlock(&bp->lock); |
1753 | } else { | 1913 | } |
1914 | if (dfx_bus_eisa) { | ||
1915 | unsigned long base_addr = to_eisa_device(bdev)->base_addr; | ||
1754 | u8 status; | 1916 | u8 status; |
1755 | 1917 | ||
1756 | dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &status); | 1918 | status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); |
1757 | if (!(status & PI_CONFIG_STAT_0_M_PEND)) | 1919 | if (!(status & PI_CONFIG_STAT_0_M_PEND)) |
1758 | return IRQ_NONE; | 1920 | return IRQ_NONE; |
1759 | 1921 | ||
@@ -1761,15 +1923,35 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id) | |||
1761 | 1923 | ||
1762 | /* Disable interrupts at the ESIC */ | 1924 | /* Disable interrupts at the ESIC */ |
1763 | status &= ~PI_CONFIG_STAT_0_M_INT_ENB; | 1925 | status &= ~PI_CONFIG_STAT_0_M_INT_ENB; |
1764 | dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, status); | 1926 | outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); |
1765 | 1927 | ||
1766 | /* Call interrupt service routine for this adapter */ | 1928 | /* Call interrupt service routine for this adapter */ |
1767 | dfx_int_common(dev); | 1929 | dfx_int_common(dev); |
1768 | 1930 | ||
1769 | /* Reenable interrupts at the ESIC */ | 1931 | /* Reenable interrupts at the ESIC */ |
1770 | dfx_port_read_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, &status); | 1932 | status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); |
1771 | status |= PI_CONFIG_STAT_0_M_INT_ENB; | 1933 | status |= PI_CONFIG_STAT_0_M_INT_ENB; |
1772 | dfx_port_write_byte(bp, PI_ESIC_K_IO_CONFIG_STAT_0, status); | 1934 | outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); |
1935 | |||
1936 | spin_unlock(&bp->lock); | ||
1937 | } | ||
1938 | if (dfx_bus_tc) { | ||
1939 | u32 status; | ||
1940 | |||
1941 | dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); | ||
1942 | if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING | | ||
1943 | PI_PSTATUS_M_XMT_DATA_PENDING | | ||
1944 | PI_PSTATUS_M_SMT_HOST_PENDING | | ||
1945 | PI_PSTATUS_M_UNSOL_PENDING | | ||
1946 | PI_PSTATUS_M_CMD_RSP_PENDING | | ||
1947 | PI_PSTATUS_M_CMD_REQ_PENDING | | ||
1948 | PI_PSTATUS_M_TYPE_0_PENDING))) | ||
1949 | return IRQ_NONE; | ||
1950 | |||
1951 | spin_lock(&bp->lock); | ||
1952 | |||
1953 | /* Call interrupt service routine for this adapter */ | ||
1954 | dfx_int_common(dev); | ||
1773 | 1955 | ||
1774 | spin_unlock(&bp->lock); | 1956 | spin_unlock(&bp->lock); |
1775 | } | 1957 | } |
@@ -1823,7 +2005,7 @@ static irqreturn_t dfx_interrupt(int irq, void *dev_id) | |||
1823 | 2005 | ||
1824 | static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) | 2006 | static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) |
1825 | { | 2007 | { |
1826 | DFX_board_t *bp = dev->priv; | 2008 | DFX_board_t *bp = netdev_priv(dev); |
1827 | 2009 | ||
1828 | /* Fill the bp->stats structure with driver-maintained counters */ | 2010 | /* Fill the bp->stats structure with driver-maintained counters */ |
1829 | 2011 | ||
@@ -2009,8 +2191,8 @@ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) | |||
2009 | */ | 2191 | */ |
2010 | 2192 | ||
2011 | static void dfx_ctl_set_multicast_list(struct net_device *dev) | 2193 | static void dfx_ctl_set_multicast_list(struct net_device *dev) |
2012 | { | 2194 | { |
2013 | DFX_board_t *bp = dev->priv; | 2195 | DFX_board_t *bp = netdev_priv(dev); |
2014 | int i; /* used as index in for loop */ | 2196 | int i; /* used as index in for loop */ |
2015 | struct dev_mc_list *dmi; /* ptr to multicast addr entry */ | 2197 | struct dev_mc_list *dmi; /* ptr to multicast addr entry */ |
2016 | 2198 | ||
@@ -2124,8 +2306,8 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev) | |||
2124 | 2306 | ||
2125 | static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) | 2307 | static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) |
2126 | { | 2308 | { |
2127 | DFX_board_t *bp = dev->priv; | ||
2128 | struct sockaddr *p_sockaddr = (struct sockaddr *)addr; | 2309 | struct sockaddr *p_sockaddr = (struct sockaddr *)addr; |
2310 | DFX_board_t *bp = netdev_priv(dev); | ||
2129 | 2311 | ||
2130 | /* Copy unicast address to driver-maintained structs and update count */ | 2312 | /* Copy unicast address to driver-maintained structs and update count */ |
2131 | 2313 | ||
@@ -2764,9 +2946,9 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) | |||
2764 | 2946 | ||
2765 | my_skb_align(newskb, 128); | 2947 | my_skb_align(newskb, 128); |
2766 | bp->descr_block_virt->rcv_data[i + j].long_1 = | 2948 | bp->descr_block_virt->rcv_data[i + j].long_1 = |
2767 | (u32)pci_map_single(bp->pci_dev, newskb->data, | 2949 | (u32)dma_map_single(bp->bus_dev, newskb->data, |
2768 | NEW_SKB_SIZE, | 2950 | NEW_SKB_SIZE, |
2769 | PCI_DMA_FROMDEVICE); | 2951 | DMA_FROM_DEVICE); |
2770 | /* | 2952 | /* |
2771 | * p_rcv_buff_va is only used inside the | 2953 | * p_rcv_buff_va is only used inside the |
2772 | * kernel so we put the skb pointer here. | 2954 | * kernel so we put the skb pointer here. |
@@ -2880,17 +3062,17 @@ static void dfx_rcv_queue_process( | |||
2880 | 3062 | ||
2881 | my_skb_align(newskb, 128); | 3063 | my_skb_align(newskb, 128); |
2882 | skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; | 3064 | skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; |
2883 | pci_unmap_single(bp->pci_dev, | 3065 | dma_unmap_single(bp->bus_dev, |
2884 | bp->descr_block_virt->rcv_data[entry].long_1, | 3066 | bp->descr_block_virt->rcv_data[entry].long_1, |
2885 | NEW_SKB_SIZE, | 3067 | NEW_SKB_SIZE, |
2886 | PCI_DMA_FROMDEVICE); | 3068 | DMA_FROM_DEVICE); |
2887 | skb_reserve(skb, RCV_BUFF_K_PADDING); | 3069 | skb_reserve(skb, RCV_BUFF_K_PADDING); |
2888 | bp->p_rcv_buff_va[entry] = (char *)newskb; | 3070 | bp->p_rcv_buff_va[entry] = (char *)newskb; |
2889 | bp->descr_block_virt->rcv_data[entry].long_1 = | 3071 | bp->descr_block_virt->rcv_data[entry].long_1 = |
2890 | (u32)pci_map_single(bp->pci_dev, | 3072 | (u32)dma_map_single(bp->bus_dev, |
2891 | newskb->data, | 3073 | newskb->data, |
2892 | NEW_SKB_SIZE, | 3074 | NEW_SKB_SIZE, |
2893 | PCI_DMA_FROMDEVICE); | 3075 | DMA_FROM_DEVICE); |
2894 | } else | 3076 | } else |
2895 | skb = NULL; | 3077 | skb = NULL; |
2896 | } else | 3078 | } else |
@@ -3010,7 +3192,7 @@ static int dfx_xmt_queue_pkt( | |||
3010 | ) | 3192 | ) |
3011 | 3193 | ||
3012 | { | 3194 | { |
3013 | DFX_board_t *bp = dev->priv; | 3195 | DFX_board_t *bp = netdev_priv(dev); |
3014 | u8 prod; /* local transmit producer index */ | 3196 | u8 prod; /* local transmit producer index */ |
3015 | PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ | 3197 | PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ |
3016 | XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ | 3198 | XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ |
@@ -3116,8 +3298,8 @@ static int dfx_xmt_queue_pkt( | |||
3116 | */ | 3298 | */ |
3117 | 3299 | ||
3118 | p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); | 3300 | p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); |
3119 | p_xmt_descr->long_1 = (u32)pci_map_single(bp->pci_dev, skb->data, | 3301 | p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data, |
3120 | skb->len, PCI_DMA_TODEVICE); | 3302 | skb->len, DMA_TO_DEVICE); |
3121 | 3303 | ||
3122 | /* | 3304 | /* |
3123 | * Verify that descriptor is actually available | 3305 | * Verify that descriptor is actually available |
@@ -3220,10 +3402,10 @@ static int dfx_xmt_done(DFX_board_t *bp) | |||
3220 | 3402 | ||
3221 | /* Return skb to operating system */ | 3403 | /* Return skb to operating system */ |
3222 | comp = bp->rcv_xmt_reg.index.xmt_comp; | 3404 | comp = bp->rcv_xmt_reg.index.xmt_comp; |
3223 | pci_unmap_single(bp->pci_dev, | 3405 | dma_unmap_single(bp->bus_dev, |
3224 | bp->descr_block_virt->xmt_data[comp].long_1, | 3406 | bp->descr_block_virt->xmt_data[comp].long_1, |
3225 | p_xmt_drv_descr->p_skb->len, | 3407 | p_xmt_drv_descr->p_skb->len, |
3226 | PCI_DMA_TODEVICE); | 3408 | DMA_TO_DEVICE); |
3227 | dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); | 3409 | dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); |
3228 | 3410 | ||
3229 | /* | 3411 | /* |
@@ -3344,10 +3526,10 @@ static void dfx_xmt_flush( DFX_board_t *bp ) | |||
3344 | 3526 | ||
3345 | /* Return skb to operating system */ | 3527 | /* Return skb to operating system */ |
3346 | comp = bp->rcv_xmt_reg.index.xmt_comp; | 3528 | comp = bp->rcv_xmt_reg.index.xmt_comp; |
3347 | pci_unmap_single(bp->pci_dev, | 3529 | dma_unmap_single(bp->bus_dev, |
3348 | bp->descr_block_virt->xmt_data[comp].long_1, | 3530 | bp->descr_block_virt->xmt_data[comp].long_1, |
3349 | p_xmt_drv_descr->p_skb->len, | 3531 | p_xmt_drv_descr->p_skb->len, |
3350 | PCI_DMA_TODEVICE); | 3532 | DMA_TO_DEVICE); |
3351 | dev_kfree_skb(p_xmt_drv_descr->p_skb); | 3533 | dev_kfree_skb(p_xmt_drv_descr->p_skb); |
3352 | 3534 | ||
3353 | /* Increment transmit error counter */ | 3535 | /* Increment transmit error counter */ |
@@ -3375,13 +3557,44 @@ static void dfx_xmt_flush( DFX_board_t *bp ) | |||
3375 | bp->cons_block_virt->xmt_rcv_data = prod_cons; | 3557 | bp->cons_block_virt->xmt_rcv_data = prod_cons; |
3376 | } | 3558 | } |
3377 | 3559 | ||
3378 | static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct net_device *dev) | 3560 | /* |
3561 | * ================== | ||
3562 | * = dfx_unregister = | ||
3563 | * ================== | ||
3564 | * | ||
3565 | * Overview: | ||
3566 | * Shuts down an FDDI controller | ||
3567 | * | ||
3568 | * Returns: | ||
3569 | * Condition code | ||
3570 | * | ||
3571 | * Arguments: | ||
3572 | * bdev - pointer to device information | ||
3573 | * | ||
3574 | * Functional Description: | ||
3575 | * | ||
3576 | * Return Codes: | ||
3577 | * None | ||
3578 | * | ||
3579 | * Assumptions: | ||
3580 | * It compiles so it should work :-( (PCI cards do :-) | ||
3581 | * | ||
3582 | * Side Effects: | ||
3583 | * Device structures for FDDI adapters (fddi0, fddi1, etc) are | ||
3584 | * freed. | ||
3585 | */ | ||
3586 | static void __devexit dfx_unregister(struct device *bdev) | ||
3379 | { | 3587 | { |
3380 | DFX_board_t *bp = dev->priv; | 3588 | struct net_device *dev = dev_get_drvdata(bdev); |
3589 | DFX_board_t *bp = netdev_priv(dev); | ||
3590 | int dfx_bus_pci = DFX_BUS_PCI(bdev); | ||
3591 | int dfx_bus_tc = DFX_BUS_TC(bdev); | ||
3592 | int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; | ||
3593 | resource_size_t bar_start = 0; /* pointer to port */ | ||
3594 | resource_size_t bar_len = 0; /* resource length */ | ||
3381 | int alloc_size; /* total buffer size used */ | 3595 | int alloc_size; /* total buffer size used */ |
3382 | 3596 | ||
3383 | unregister_netdev(dev); | 3597 | unregister_netdev(dev); |
3384 | release_region(dev->base_addr, pdev ? PFI_K_CSR_IO_LEN : PI_ESIC_K_CSR_IO_LEN ); | ||
3385 | 3598 | ||
3386 | alloc_size = sizeof(PI_DESCR_BLOCK) + | 3599 | alloc_size = sizeof(PI_DESCR_BLOCK) + |
3387 | PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + | 3600 | PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + |
@@ -3391,78 +3604,141 @@ static void __devexit dfx_remove_one_pci_or_eisa(struct pci_dev *pdev, struct ne | |||
3391 | sizeof(PI_CONSUMER_BLOCK) + | 3604 | sizeof(PI_CONSUMER_BLOCK) + |
3392 | (PI_ALIGN_K_DESC_BLK - 1); | 3605 | (PI_ALIGN_K_DESC_BLK - 1); |
3393 | if (bp->kmalloced) | 3606 | if (bp->kmalloced) |
3394 | pci_free_consistent(pdev, alloc_size, bp->kmalloced, | 3607 | dma_free_coherent(bdev, alloc_size, |
3395 | bp->kmalloced_dma); | 3608 | bp->kmalloced, bp->kmalloced_dma); |
3609 | |||
3610 | dfx_bus_uninit(dev); | ||
3611 | |||
3612 | dfx_get_bars(bdev, &bar_start, &bar_len); | ||
3613 | if (dfx_use_mmio) { | ||
3614 | iounmap(bp->base.mem); | ||
3615 | release_mem_region(bar_start, bar_len); | ||
3616 | } else | ||
3617 | release_region(bar_start, bar_len); | ||
3618 | |||
3619 | if (dfx_bus_pci) | ||
3620 | pci_disable_device(to_pci_dev(bdev)); | ||
3621 | |||
3396 | free_netdev(dev); | 3622 | free_netdev(dev); |
3397 | } | 3623 | } |
3398 | 3624 | ||
3399 | static void __devexit dfx_remove_one (struct pci_dev *pdev) | ||
3400 | { | ||
3401 | struct net_device *dev = pci_get_drvdata(pdev); | ||
3402 | 3625 | ||
3403 | dfx_remove_one_pci_or_eisa(pdev, dev); | 3626 | static int __devinit __unused dfx_dev_register(struct device *); |
3404 | pci_set_drvdata(pdev, NULL); | 3627 | static int __devexit __unused dfx_dev_unregister(struct device *); |
3405 | } | ||
3406 | 3628 | ||
3407 | static struct pci_device_id dfx_pci_tbl[] = { | 3629 | #ifdef CONFIG_PCI |
3408 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI, PCI_ANY_ID, PCI_ANY_ID, }, | 3630 | static int __devinit dfx_pci_register(struct pci_dev *, |
3409 | { 0, } | 3631 | const struct pci_device_id *); |
3632 | static void __devexit dfx_pci_unregister(struct pci_dev *); | ||
3633 | |||
3634 | static struct pci_device_id dfx_pci_table[] = { | ||
3635 | { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, | ||
3636 | { } | ||
3410 | }; | 3637 | }; |
3411 | MODULE_DEVICE_TABLE(pci, dfx_pci_tbl); | 3638 | MODULE_DEVICE_TABLE(pci, dfx_pci_table); |
3412 | 3639 | ||
3413 | static struct pci_driver dfx_driver = { | 3640 | static struct pci_driver dfx_pci_driver = { |
3414 | .name = "defxx", | 3641 | .name = "defxx", |
3415 | .probe = dfx_init_one, | 3642 | .id_table = dfx_pci_table, |
3416 | .remove = __devexit_p(dfx_remove_one), | 3643 | .probe = dfx_pci_register, |
3417 | .id_table = dfx_pci_tbl, | 3644 | .remove = __devexit_p(dfx_pci_unregister), |
3418 | }; | 3645 | }; |
3419 | 3646 | ||
3420 | static int dfx_have_pci; | 3647 | static __devinit int dfx_pci_register(struct pci_dev *pdev, |
3421 | static int dfx_have_eisa; | 3648 | const struct pci_device_id *ent) |
3422 | 3649 | { | |
3650 | return dfx_register(&pdev->dev); | ||
3651 | } | ||
3423 | 3652 | ||
3424 | static void __exit dfx_eisa_cleanup(void) | 3653 | static void __devexit dfx_pci_unregister(struct pci_dev *pdev) |
3425 | { | 3654 | { |
3426 | struct net_device *dev = root_dfx_eisa_dev; | 3655 | dfx_unregister(&pdev->dev); |
3656 | } | ||
3657 | #endif /* CONFIG_PCI */ | ||
3658 | |||
3659 | #ifdef CONFIG_EISA | ||
3660 | static struct eisa_device_id dfx_eisa_table[] = { | ||
3661 | { "DEC3001", DEFEA_PROD_ID_1 }, | ||
3662 | { "DEC3002", DEFEA_PROD_ID_2 }, | ||
3663 | { "DEC3003", DEFEA_PROD_ID_3 }, | ||
3664 | { "DEC3004", DEFEA_PROD_ID_4 }, | ||
3665 | { } | ||
3666 | }; | ||
3667 | MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); | ||
3668 | |||
3669 | static struct eisa_driver dfx_eisa_driver = { | ||
3670 | .id_table = dfx_eisa_table, | ||
3671 | .driver = { | ||
3672 | .name = "defxx", | ||
3673 | .bus = &eisa_bus_type, | ||
3674 | .probe = dfx_dev_register, | ||
3675 | .remove = __devexit_p(dfx_dev_unregister), | ||
3676 | }, | ||
3677 | }; | ||
3678 | #endif /* CONFIG_EISA */ | ||
3679 | |||
3680 | #ifdef CONFIG_TC | ||
3681 | static struct tc_device_id const dfx_tc_table[] = { | ||
3682 | { "DEC ", "PMAF-FA " }, | ||
3683 | { "DEC ", "PMAF-FD " }, | ||
3684 | { "DEC ", "PMAF-FS " }, | ||
3685 | { "DEC ", "PMAF-FU " }, | ||
3686 | { } | ||
3687 | }; | ||
3688 | MODULE_DEVICE_TABLE(tc, dfx_tc_table); | ||
3689 | |||
3690 | static struct tc_driver dfx_tc_driver = { | ||
3691 | .id_table = dfx_tc_table, | ||
3692 | .driver = { | ||
3693 | .name = "defxx", | ||
3694 | .bus = &tc_bus_type, | ||
3695 | .probe = dfx_dev_register, | ||
3696 | .remove = __devexit_p(dfx_dev_unregister), | ||
3697 | }, | ||
3698 | }; | ||
3699 | #endif /* CONFIG_TC */ | ||
3427 | 3700 | ||
3428 | while (dev) | 3701 | static int __devinit __unused dfx_dev_register(struct device *dev) |
3429 | { | 3702 | { |
3430 | struct net_device *tmp; | 3703 | int status; |
3431 | DFX_board_t *bp; | ||
3432 | 3704 | ||
3433 | bp = (DFX_board_t*)dev->priv; | 3705 | status = dfx_register(dev); |
3434 | tmp = bp->next; | 3706 | if (!status) |
3435 | dfx_remove_one_pci_or_eisa(NULL, dev); | 3707 | get_device(dev); |
3436 | dev = tmp; | 3708 | return status; |
3437 | } | ||
3438 | } | 3709 | } |
3439 | 3710 | ||
3440 | static int __init dfx_init(void) | 3711 | static int __devexit __unused dfx_dev_unregister(struct device *dev) |
3441 | { | 3712 | { |
3442 | int rc_pci, rc_eisa; | 3713 | put_device(dev); |
3443 | 3714 | dfx_unregister(dev); | |
3444 | rc_pci = pci_register_driver(&dfx_driver); | 3715 | return 0; |
3445 | if (rc_pci >= 0) dfx_have_pci = 1; | 3716 | } |
3446 | 3717 | ||
3447 | rc_eisa = dfx_eisa_init(); | ||
3448 | if (rc_eisa >= 0) dfx_have_eisa = 1; | ||
3449 | 3718 | ||
3450 | return ((rc_eisa < 0) ? 0 : rc_eisa) + ((rc_pci < 0) ? 0 : rc_pci); | 3719 | static int __devinit dfx_init(void) |
3720 | { | ||
3721 | int status; | ||
3722 | |||
3723 | status = pci_register_driver(&dfx_pci_driver); | ||
3724 | if (!status) | ||
3725 | status = eisa_driver_register(&dfx_eisa_driver); | ||
3726 | if (!status) | ||
3727 | status = tc_register_driver(&dfx_tc_driver); | ||
3728 | return status; | ||
3451 | } | 3729 | } |
3452 | 3730 | ||
3453 | static void __exit dfx_cleanup(void) | 3731 | static void __devexit dfx_cleanup(void) |
3454 | { | 3732 | { |
3455 | if (dfx_have_pci) | 3733 | tc_unregister_driver(&dfx_tc_driver); |
3456 | pci_unregister_driver(&dfx_driver); | 3734 | eisa_driver_unregister(&dfx_eisa_driver); |
3457 | if (dfx_have_eisa) | 3735 | pci_unregister_driver(&dfx_pci_driver); |
3458 | dfx_eisa_cleanup(); | ||
3459 | |||
3460 | } | 3736 | } |
3461 | 3737 | ||
3462 | module_init(dfx_init); | 3738 | module_init(dfx_init); |
3463 | module_exit(dfx_cleanup); | 3739 | module_exit(dfx_cleanup); |
3464 | MODULE_AUTHOR("Lawrence V. Stefani"); | 3740 | MODULE_AUTHOR("Lawrence V. Stefani"); |
3465 | MODULE_DESCRIPTION("DEC FDDIcontroller EISA/PCI (DEFEA/DEFPA) driver " | 3741 | MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver " |
3466 | DRV_VERSION " " DRV_RELDATE); | 3742 | DRV_VERSION " " DRV_RELDATE); |
3467 | MODULE_LICENSE("GPL"); | 3743 | MODULE_LICENSE("GPL"); |
3468 | 3744 | ||
diff --git a/drivers/net/defxx.h b/drivers/net/defxx.h index 2ce8f97253eb..19a6f64df198 100644 --- a/drivers/net/defxx.h +++ b/drivers/net/defxx.h | |||
@@ -26,6 +26,7 @@ | |||
26 | * 12-Sep-96 LVS Removed packet request header pointers. | 26 | * 12-Sep-96 LVS Removed packet request header pointers. |
27 | * 04 Aug 2003 macro Converted to the DMA API. | 27 | * 04 Aug 2003 macro Converted to the DMA API. |
28 | * 23 Oct 2006 macro Big-endian host support. | 28 | * 23 Oct 2006 macro Big-endian host support. |
29 | * 14 Dec 2006 macro TURBOchannel support. | ||
29 | */ | 30 | */ |
30 | 31 | ||
31 | #ifndef _DEFXX_H_ | 32 | #ifndef _DEFXX_H_ |
@@ -1471,9 +1472,17 @@ typedef union | |||
1471 | 1472 | ||
1472 | #endif /* __BIG_ENDIAN */ | 1473 | #endif /* __BIG_ENDIAN */ |
1473 | 1474 | ||
1475 | /* Define TC PDQ CSR offset and length */ | ||
1476 | |||
1477 | #define PI_TC_K_CSR_OFFSET 0x100000 | ||
1478 | #define PI_TC_K_CSR_LEN 0x40 /* 64 bytes */ | ||
1479 | |||
1474 | /* Define EISA controller register offsets */ | 1480 | /* Define EISA controller register offsets */ |
1475 | 1481 | ||
1476 | #define PI_ESIC_K_BURST_HOLDOFF 0x040 | 1482 | #define PI_ESIC_K_CSR_IO_LEN 0x80 /* 128 bytes */ |
1483 | |||
1484 | #define PI_DEFEA_K_BURST_HOLDOFF 0x040 | ||
1485 | |||
1477 | #define PI_ESIC_K_SLOT_ID 0xC80 | 1486 | #define PI_ESIC_K_SLOT_ID 0xC80 |
1478 | #define PI_ESIC_K_SLOT_CNTRL 0xC84 | 1487 | #define PI_ESIC_K_SLOT_CNTRL 0xC84 |
1479 | #define PI_ESIC_K_MEM_ADD_CMP_0 0xC85 | 1488 | #define PI_ESIC_K_MEM_ADD_CMP_0 0xC85 |
@@ -1488,14 +1497,14 @@ typedef union | |||
1488 | #define PI_ESIC_K_MEM_ADD_LO_CMP_0 0xC8E | 1497 | #define PI_ESIC_K_MEM_ADD_LO_CMP_0 0xC8E |
1489 | #define PI_ESIC_K_MEM_ADD_LO_CMP_1 0xC8F | 1498 | #define PI_ESIC_K_MEM_ADD_LO_CMP_1 0xC8F |
1490 | #define PI_ESIC_K_MEM_ADD_LO_CMP_2 0xC90 | 1499 | #define PI_ESIC_K_MEM_ADD_LO_CMP_2 0xC90 |
1491 | #define PI_ESIC_K_IO_CMP_0_0 0xC91 | 1500 | #define PI_ESIC_K_IO_ADD_CMP_0_0 0xC91 |
1492 | #define PI_ESIC_K_IO_CMP_0_1 0xC92 | 1501 | #define PI_ESIC_K_IO_ADD_CMP_0_1 0xC92 |
1493 | #define PI_ESIC_K_IO_CMP_1_0 0xC93 | 1502 | #define PI_ESIC_K_IO_ADD_CMP_1_0 0xC93 |
1494 | #define PI_ESIC_K_IO_CMP_1_1 0xC94 | 1503 | #define PI_ESIC_K_IO_ADD_CMP_1_1 0xC94 |
1495 | #define PI_ESIC_K_IO_CMP_2_0 0xC95 | 1504 | #define PI_ESIC_K_IO_ADD_CMP_2_0 0xC95 |
1496 | #define PI_ESIC_K_IO_CMP_2_1 0xC96 | 1505 | #define PI_ESIC_K_IO_ADD_CMP_2_1 0xC96 |
1497 | #define PI_ESIC_K_IO_CMP_3_0 0xC97 | 1506 | #define PI_ESIC_K_IO_ADD_CMP_3_0 0xC97 |
1498 | #define PI_ESIC_K_IO_CMP_3_1 0xC98 | 1507 | #define PI_ESIC_K_IO_ADD_CMP_3_1 0xC98 |
1499 | #define PI_ESIC_K_IO_ADD_MASK_0_0 0xC99 | 1508 | #define PI_ESIC_K_IO_ADD_MASK_0_0 0xC99 |
1500 | #define PI_ESIC_K_IO_ADD_MASK_0_1 0xC9A | 1509 | #define PI_ESIC_K_IO_ADD_MASK_0_1 0xC9A |
1501 | #define PI_ESIC_K_IO_ADD_MASK_1_0 0xC9B | 1510 | #define PI_ESIC_K_IO_ADD_MASK_1_0 0xC9B |
@@ -1518,11 +1527,16 @@ typedef union | |||
1518 | #define PI_ESIC_K_INPUT_PORT 0xCAC | 1527 | #define PI_ESIC_K_INPUT_PORT 0xCAC |
1519 | #define PI_ESIC_K_OUTPUT_PORT 0xCAD | 1528 | #define PI_ESIC_K_OUTPUT_PORT 0xCAD |
1520 | #define PI_ESIC_K_FUNCTION_CNTRL 0xCAE | 1529 | #define PI_ESIC_K_FUNCTION_CNTRL 0xCAE |
1521 | #define PI_ESIC_K_CSR_IO_LEN PI_ESIC_K_FUNCTION_CNTRL+1 /* always last reg + 1 */ | ||
1522 | 1530 | ||
1523 | /* Define the value all drivers must write to the function control register. */ | 1531 | /* Define the bits in the function control register. */ |
1524 | 1532 | ||
1525 | #define PI_ESIC_K_FUNCTION_CNTRL_IO_ENB 0x03 | 1533 | #define PI_FUNCTION_CNTRL_M_IOCS0 0x01 |
1534 | #define PI_FUNCTION_CNTRL_M_IOCS1 0x02 | ||
1535 | #define PI_FUNCTION_CNTRL_M_IOCS2 0x04 | ||
1536 | #define PI_FUNCTION_CNTRL_M_IOCS3 0x08 | ||
1537 | #define PI_FUNCTION_CNTRL_M_MEMCS0 0x10 | ||
1538 | #define PI_FUNCTION_CNTRL_M_MEMCS1 0x20 | ||
1539 | #define PI_FUNCTION_CNTRL_M_DMA 0x80 | ||
1526 | 1540 | ||
1527 | /* Define the bits in the slot control register. */ | 1541 | /* Define the bits in the slot control register. */ |
1528 | 1542 | ||
@@ -1540,6 +1554,10 @@ typedef union | |||
1540 | #define PI_BURST_HOLDOFF_V_RESERVED 1 | 1554 | #define PI_BURST_HOLDOFF_V_RESERVED 1 |
1541 | #define PI_BURST_HOLDOFF_V_MEM_MAP 0 | 1555 | #define PI_BURST_HOLDOFF_V_MEM_MAP 0 |
1542 | 1556 | ||
1557 | /* Define the implicit mask of the Memory Address Mask Register. */ | ||
1558 | |||
1559 | #define PI_MEM_ADD_MASK_M 0x3ff | ||
1560 | |||
1543 | /* | 1561 | /* |
1544 | * Define the fields in the IO Compare registers. | 1562 | * Define the fields in the IO Compare registers. |
1545 | * The driver must initialize the slot field with the slot ID shifted by the | 1563 | * The driver must initialize the slot field with the slot ID shifted by the |
@@ -1577,6 +1595,7 @@ typedef union | |||
1577 | #define DEFEA_PROD_ID_1 0x0130A310 /* DEC product 300, rev 1 */ | 1595 | #define DEFEA_PROD_ID_1 0x0130A310 /* DEC product 300, rev 1 */ |
1578 | #define DEFEA_PROD_ID_2 0x0230A310 /* DEC product 300, rev 2 */ | 1596 | #define DEFEA_PROD_ID_2 0x0230A310 /* DEC product 300, rev 2 */ |
1579 | #define DEFEA_PROD_ID_3 0x0330A310 /* DEC product 300, rev 3 */ | 1597 | #define DEFEA_PROD_ID_3 0x0330A310 /* DEC product 300, rev 3 */ |
1598 | #define DEFEA_PROD_ID_4 0x0430A310 /* DEC product 300, rev 4 */ | ||
1580 | 1599 | ||
1581 | /**********************************************/ | 1600 | /**********************************************/ |
1582 | /* Digital PFI Specification v1.0 Definitions */ | 1601 | /* Digital PFI Specification v1.0 Definitions */ |
@@ -1633,12 +1652,6 @@ typedef union | |||
1633 | #define PFI_STATUS_V_FIFO_EMPTY 1 | 1652 | #define PFI_STATUS_V_FIFO_EMPTY 1 |
1634 | #define PFI_STATUS_V_DMA_IN_PROGRESS 0 | 1653 | #define PFI_STATUS_V_DMA_IN_PROGRESS 0 |
1635 | 1654 | ||
1636 | #define DFX_MAX_EISA_SLOTS 16 /* maximum number of EISA slots to scan */ | ||
1637 | #define DFX_MAX_NUM_BOARDS 8 /* maximum number of adapters supported */ | ||
1638 | |||
1639 | #define DFX_BUS_TYPE_PCI 0 /* type code for DEC FDDIcontroller/PCI */ | ||
1640 | #define DFX_BUS_TYPE_EISA 1 /* type code for DEC FDDIcontroller/EISA */ | ||
1641 | |||
1642 | #define DFX_FC_PRH2_PRH1_PRH0 0x54003820 /* Packet Request Header bytes + FC */ | 1655 | #define DFX_FC_PRH2_PRH1_PRH0 0x54003820 /* Packet Request Header bytes + FC */ |
1643 | #define DFX_PRH0_BYTE 0x20 /* Packet Request Header byte 0 */ | 1656 | #define DFX_PRH0_BYTE 0x20 /* Packet Request Header byte 0 */ |
1644 | #define DFX_PRH1_BYTE 0x38 /* Packet Request Header byte 1 */ | 1657 | #define DFX_PRH1_BYTE 0x38 /* Packet Request Header byte 1 */ |
@@ -1756,10 +1769,11 @@ typedef struct DFX_board_tag | |||
1756 | /* Store device, bus-specific, and parameter information for this adapter */ | 1769 | /* Store device, bus-specific, and parameter information for this adapter */ |
1757 | 1770 | ||
1758 | struct net_device *dev; /* pointer to device structure */ | 1771 | struct net_device *dev; /* pointer to device structure */ |
1759 | struct net_device *next; | 1772 | union { |
1760 | u32 bus_type; /* bus type (0 == PCI, 1 == EISA) */ | 1773 | void __iomem *mem; |
1761 | u16 base_addr; /* base I/O address (same as dev->base_addr) */ | 1774 | int port; |
1762 | struct pci_dev * pci_dev; | 1775 | } base; /* base address */ |
1776 | struct device *bus_dev; | ||
1763 | u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */ | 1777 | u32 full_duplex_enb; /* FDDI Full Duplex enable (1 == on, 2 == off) */ |
1764 | u32 req_ttrt; /* requested TTRT value (in 80ns units) */ | 1778 | u32 req_ttrt; /* requested TTRT value (in 80ns units) */ |
1765 | u32 burst_size; /* adapter burst size (enumerated) */ | 1779 | u32 burst_size; /* adapter burst size (enumerated) */ |
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index c62d9c6363c6..b2b0a96218ca 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c | |||
@@ -355,8 +355,7 @@ e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring | |||
355 | 355 | ||
356 | mem_on(ioaddr, shared_mem, (ring_offset>>8)); | 356 | mem_on(ioaddr, shared_mem, (ring_offset>>8)); |
357 | 357 | ||
358 | /* Packet is always in one chunk -- we can copy + cksum. */ | 358 | memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count); |
359 | eth_io_copy_and_sum(skb, ei_status.mem + (ring_offset & 0xff), count, 0); | ||
360 | 359 | ||
361 | mem_off(ioaddr); | 360 | mem_off(ioaddr); |
362 | } | 361 | } |
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c index 2d2ea94a00bb..822e5bfd1a71 100644 --- a/drivers/net/es3210.c +++ b/drivers/net/es3210.c | |||
@@ -375,7 +375,7 @@ static void es_block_input(struct net_device *dev, int count, struct sk_buff *sk | |||
375 | memcpy_fromio(skb->data + semi_count, ei_status.mem, count); | 375 | memcpy_fromio(skb->data + semi_count, ei_status.mem, count); |
376 | } else { | 376 | } else { |
377 | /* Packet is in one chunk. */ | 377 | /* Packet is in one chunk. */ |
378 | eth_io_copy_and_sum(skb, xfer_start, count, 0); | 378 | memcpy_fromio(skb->data, xfer_start, count); |
379 | } | 379 | } |
380 | } | 380 | } |
381 | 381 | ||
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 24f6050fbf33..8ca57a0a4c11 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/skbuff.h> | 49 | #include <linux/skbuff.h> |
50 | #include <linux/platform_device.h> | 50 | #include <linux/platform_device.h> |
51 | #include <linux/dma-mapping.h> | 51 | #include <linux/dma-mapping.h> |
52 | #include <linux/bitrev.h> | ||
52 | 53 | ||
53 | #include <asm/bootinfo.h> | 54 | #include <asm/bootinfo.h> |
54 | #include <asm/system.h> | 55 | #include <asm/system.h> |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index b3bf86422734..d98e53efa2ef 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -2780,7 +2780,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = { | |||
2780 | .get_link = mv643xx_eth_get_link, | 2780 | .get_link = mv643xx_eth_get_link, |
2781 | .get_sg = ethtool_op_get_sg, | 2781 | .get_sg = ethtool_op_get_sg, |
2782 | .set_sg = ethtool_op_set_sg, | 2782 | .set_sg = ethtool_op_set_sg, |
2783 | .get_strings = mv643xx_get_strings, | ||
2784 | .get_stats_count = mv643xx_get_stats_count, | 2783 | .get_stats_count = mv643xx_get_stats_count, |
2785 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 2784 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2786 | .get_strings = mv643xx_get_strings, | 2785 | .get_strings = mv643xx_get_strings, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 577babd4c938..5598d86380b4 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2016,7 +2016,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, | |||
2016 | if (!skb) | 2016 | if (!skb) |
2017 | goto err_out; | 2017 | goto err_out; |
2018 | 2018 | ||
2019 | skb_reserve(skb, (align - 1) & (u32)skb->data); | 2019 | skb_reserve(skb, (align - 1) & (unsigned long)skb->data); |
2020 | *sk_buff = skb; | 2020 | *sk_buff = skb; |
2021 | 2021 | ||
2022 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, | 2022 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, |
@@ -2487,7 +2487,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, | |||
2487 | 2487 | ||
2488 | skb = dev_alloc_skb(pkt_size + align); | 2488 | skb = dev_alloc_skb(pkt_size + align); |
2489 | if (skb) { | 2489 | if (skb) { |
2490 | skb_reserve(skb, (align - 1) & (u32)skb->data); | 2490 | skb_reserve(skb, (align - 1) & (unsigned long)skb->data); |
2491 | eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); | 2491 | eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); |
2492 | *sk_buff = skb; | 2492 | *sk_buff = skb; |
2493 | rtl8169_mark_to_asic(desc, rx_buf_sz); | 2493 | rtl8169_mark_to_asic(desc, rx_buf_sz); |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 639fbc0f16f3..8646b64994ab 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -7298,7 +7298,7 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro) | |||
7298 | { | 7298 | { |
7299 | struct iphdr *ip = lro->iph; | 7299 | struct iphdr *ip = lro->iph; |
7300 | struct tcphdr *tcp = lro->tcph; | 7300 | struct tcphdr *tcp = lro->tcph; |
7301 | u16 nchk; | 7301 | __sum16 nchk; |
7302 | struct stat_block *statinfo = sp->mac_control.stats_info; | 7302 | struct stat_block *statinfo = sp->mac_control.stats_info; |
7303 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); | 7303 | DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); |
7304 | 7304 | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index a5e1a513deb5..0de0c65f945a 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -727,12 +727,12 @@ struct lro { | |||
727 | struct iphdr *iph; | 727 | struct iphdr *iph; |
728 | struct tcphdr *tcph; | 728 | struct tcphdr *tcph; |
729 | u32 tcp_next_seq; | 729 | u32 tcp_next_seq; |
730 | u32 tcp_ack; | 730 | __be32 tcp_ack; |
731 | int total_len; | 731 | int total_len; |
732 | int frags_len; | 732 | int frags_len; |
733 | int sg_num; | 733 | int sg_num; |
734 | int in_use; | 734 | int in_use; |
735 | u16 window; | 735 | __be16 window; |
736 | u32 cur_tsval; | 736 | u32 cur_tsval; |
737 | u32 cur_tsecr; | 737 | u32 cur_tsecr; |
738 | u8 saw_ts; | 738 | u8 saw_ts; |
@@ -1005,7 +1005,7 @@ static int s2io_set_swapper(struct s2io_nic * sp); | |||
1005 | static void s2io_card_down(struct s2io_nic *nic); | 1005 | static void s2io_card_down(struct s2io_nic *nic); |
1006 | static int s2io_card_up(struct s2io_nic *nic); | 1006 | static int s2io_card_up(struct s2io_nic *nic); |
1007 | static int get_xena_rev_id(struct pci_dev *pdev); | 1007 | static int get_xena_rev_id(struct pci_dev *pdev); |
1008 | static int wait_for_cmd_complete(void *addr, u64 busy_bit); | 1008 | static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit); |
1009 | static int s2io_add_isr(struct s2io_nic * sp); | 1009 | static int s2io_add_isr(struct s2io_nic * sp); |
1010 | static void s2io_rem_isr(struct s2io_nic * sp); | 1010 | static void s2io_rem_isr(struct s2io_nic * sp); |
1011 | 1011 | ||
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index a0806d262fc6..2f4b1de7a2b4 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
@@ -1343,15 +1343,12 @@ static int __init slip_init(void) | |||
1343 | printk(KERN_INFO "SLIP linefill/keepalive option.\n"); | 1343 | printk(KERN_INFO "SLIP linefill/keepalive option.\n"); |
1344 | #endif | 1344 | #endif |
1345 | 1345 | ||
1346 | slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL); | 1346 | slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL); |
1347 | if (!slip_devs) { | 1347 | if (!slip_devs) { |
1348 | printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n"); | 1348 | printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n"); |
1349 | return -ENOMEM; | 1349 | return -ENOMEM; |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | /* Clear the pointer array, we allocate devices when we need them */ | ||
1353 | memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev); | ||
1354 | |||
1355 | /* Fill in our line protocol discipline, and register it */ | 1352 | /* Fill in our line protocol discipline, and register it */ |
1356 | if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) { | 1353 | if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) { |
1357 | printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); | 1354 | printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status); |
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c index 7122932eac90..ae1ae343beed 100644 --- a/drivers/net/smc-mca.c +++ b/drivers/net/smc-mca.c | |||
@@ -482,8 +482,7 @@ static void ultramca_block_input(struct net_device *dev, int count, struct sk_bu | |||
482 | count -= semi_count; | 482 | count -= semi_count; |
483 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); | 483 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); |
484 | } else { | 484 | } else { |
485 | /* Packet is in one chunk -- we can copy + cksum. */ | 485 | memcpy_fromio(skb->data, xfer_start, count); |
486 | eth_io_copy_and_sum(skb, xfer_start, count, 0); | ||
487 | } | 486 | } |
488 | 487 | ||
489 | } | 488 | } |
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index d70bc9795346..a52b22d7db65 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c | |||
@@ -454,8 +454,7 @@ ultra_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ri | |||
454 | count -= semi_count; | 454 | count -= semi_count; |
455 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); | 455 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); |
456 | } else { | 456 | } else { |
457 | /* Packet is in one chunk -- we can copy + cksum. */ | 457 | memcpy_fromio(skb->data, xfer_start, count); |
458 | eth_io_copy_and_sum(skb, xfer_start, count, 0); | ||
459 | } | 458 | } |
460 | 459 | ||
461 | outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ | 460 | outb(0x00, dev->base_addr - ULTRA_NIC_OFFSET); /* Disable memory. */ |
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c index 2c5319c62fa5..88a30e56c64c 100644 --- a/drivers/net/smc-ultra32.c +++ b/drivers/net/smc-ultra32.c | |||
@@ -395,8 +395,7 @@ static void ultra32_block_input(struct net_device *dev, | |||
395 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); | 395 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); |
396 | } | 396 | } |
397 | } else { | 397 | } else { |
398 | /* Packet is in one chunk -- we can copy + cksum. */ | 398 | memcpy_fromio(skb->data, xfer_start, count); |
399 | eth_io_copy_and_sum(skb, xfer_start, count, 0); | ||
400 | } | 399 | } |
401 | } | 400 | } |
402 | 401 | ||
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index bf6ff39e02bb..64ed8ff5b03a 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1907,7 +1907,7 @@ spider_net_stop(struct net_device *netdev) | |||
1907 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); | 1907 | spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0); |
1908 | 1908 | ||
1909 | /* free_irq(netdev->irq, netdev);*/ | 1909 | /* free_irq(netdev->irq, netdev);*/ |
1910 | free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev); | 1910 | free_irq(to_pci_dev(netdev->dev.parent)->irq, netdev); |
1911 | 1911 | ||
1912 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, | 1912 | spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, |
1913 | SPIDER_NET_DMA_TX_FEND_VALUE); | 1913 | SPIDER_NET_DMA_TX_FEND_VALUE); |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 135c0987deae..e136bae61970 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3380,7 +3380,7 @@ next_pkt: | |||
3380 | } | 3380 | } |
3381 | next_pkt_nopost: | 3381 | next_pkt_nopost: |
3382 | sw_idx++; | 3382 | sw_idx++; |
3383 | sw_idx %= TG3_RX_RCB_RING_SIZE(tp); | 3383 | sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1); |
3384 | 3384 | ||
3385 | /* Refresh hw_idx to see if there is new work */ | 3385 | /* Refresh hw_idx to see if there is new work */ |
3386 | if (sw_idx == hw_idx) { | 3386 | if (sw_idx == hw_idx) { |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index abb8611c5a91..31c97a6591a4 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -1709,75 +1709,13 @@ static void adjust_link(struct net_device *dev) | |||
1709 | if (mii_info->speed != ugeth->oldspeed) { | 1709 | if (mii_info->speed != ugeth->oldspeed) { |
1710 | switch (mii_info->speed) { | 1710 | switch (mii_info->speed) { |
1711 | case 1000: | 1711 | case 1000: |
1712 | #ifdef CONFIG_PPC_MPC836x | 1712 | ugeth->ug_info->enet_interface = ENET_1000_RGMII; |
1713 | /* FIXME: This code is for 100Mbs BUG fixing, | ||
1714 | remove this when it is fixed!!! */ | ||
1715 | if (ugeth->ug_info->enet_interface == | ||
1716 | ENET_1000_GMII) | ||
1717 | /* Run the commands which initialize the PHY */ | ||
1718 | { | ||
1719 | tempval = | ||
1720 | (u32) mii_info->mdio_read(ugeth-> | ||
1721 | dev, mii_info->mii_id, 0x1b); | ||
1722 | tempval |= 0x000f; | ||
1723 | mii_info->mdio_write(ugeth->dev, | ||
1724 | mii_info->mii_id, 0x1b, | ||
1725 | (u16) tempval); | ||
1726 | tempval = | ||
1727 | (u32) mii_info->mdio_read(ugeth-> | ||
1728 | dev, mii_info->mii_id, | ||
1729 | MII_BMCR); | ||
1730 | mii_info->mdio_write(ugeth->dev, | ||
1731 | mii_info->mii_id, MII_BMCR, | ||
1732 | (u16) (tempval | BMCR_RESET)); | ||
1733 | } else if (ugeth->ug_info->enet_interface == | ||
1734 | ENET_1000_RGMII) | ||
1735 | /* Run the commands which initialize the PHY */ | ||
1736 | { | ||
1737 | tempval = | ||
1738 | (u32) mii_info->mdio_read(ugeth-> | ||
1739 | dev, mii_info->mii_id, 0x1b); | ||
1740 | tempval = (tempval & ~0x000f) | 0x000b; | ||
1741 | mii_info->mdio_write(ugeth->dev, | ||
1742 | mii_info->mii_id, 0x1b, | ||
1743 | (u16) tempval); | ||
1744 | tempval = | ||
1745 | (u32) mii_info->mdio_read(ugeth-> | ||
1746 | dev, mii_info->mii_id, | ||
1747 | MII_BMCR); | ||
1748 | mii_info->mdio_write(ugeth->dev, | ||
1749 | mii_info->mii_id, MII_BMCR, | ||
1750 | (u16) (tempval | BMCR_RESET)); | ||
1751 | } | ||
1752 | msleep(4000); | ||
1753 | #endif /* CONFIG_MPC8360 */ | ||
1754 | adjust_enet_interface(ugeth); | ||
1755 | break; | 1713 | break; |
1756 | case 100: | 1714 | case 100: |
1757 | case 10: | ||
1758 | #ifdef CONFIG_PPC_MPC836x | ||
1759 | /* FIXME: This code is for 100Mbs BUG fixing, | ||
1760 | remove this lines when it will be fixed!!! */ | ||
1761 | ugeth->ug_info->enet_interface = ENET_100_RGMII; | 1715 | ugeth->ug_info->enet_interface = ENET_100_RGMII; |
1762 | tempval = | 1716 | break; |
1763 | (u32) mii_info->mdio_read(ugeth->dev, | 1717 | case 10: |
1764 | mii_info->mii_id, | 1718 | ugeth->ug_info->enet_interface = ENET_10_RGMII; |
1765 | 0x1b); | ||
1766 | tempval = (tempval & ~0x000f) | 0x000b; | ||
1767 | mii_info->mdio_write(ugeth->dev, | ||
1768 | mii_info->mii_id, 0x1b, | ||
1769 | (u16) tempval); | ||
1770 | tempval = | ||
1771 | (u32) mii_info->mdio_read(ugeth->dev, | ||
1772 | mii_info->mii_id, | ||
1773 | MII_BMCR); | ||
1774 | mii_info->mdio_write(ugeth->dev, | ||
1775 | mii_info->mii_id, MII_BMCR, | ||
1776 | (u16) (tempval | | ||
1777 | BMCR_RESET)); | ||
1778 | msleep(4000); | ||
1779 | #endif /* CONFIG_MPC8360 */ | ||
1780 | adjust_enet_interface(ugeth); | ||
1781 | break; | 1719 | break; |
1782 | default: | 1720 | default: |
1783 | ugeth_warn | 1721 | ugeth_warn |
@@ -1785,6 +1723,7 @@ remove this lines when it will be fixed!!! */ | |||
1785 | dev->name, mii_info->speed); | 1723 | dev->name, mii_info->speed); |
1786 | break; | 1724 | break; |
1787 | } | 1725 | } |
1726 | adjust_enet_interface(ugeth); | ||
1788 | 1727 | ||
1789 | ugeth_info("%s: Speed %dBT", dev->name, | 1728 | ugeth_info("%s: Speed %dBT", dev->name, |
1790 | mii_info->speed); | 1729 | mii_info->speed); |
@@ -4133,6 +4072,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
4133 | static int mii_mng_configured = 0; | 4072 | static int mii_mng_configured = 0; |
4134 | const phandle *ph; | 4073 | const phandle *ph; |
4135 | const unsigned int *prop; | 4074 | const unsigned int *prop; |
4075 | const void *mac_addr; | ||
4136 | 4076 | ||
4137 | ugeth_vdbg("%s: IN", __FUNCTION__); | 4077 | ugeth_vdbg("%s: IN", __FUNCTION__); |
4138 | 4078 | ||
@@ -4258,7 +4198,12 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma | |||
4258 | 4198 | ||
4259 | ugeth->ug_info = ug_info; | 4199 | ugeth->ug_info = ug_info; |
4260 | ugeth->dev = dev; | 4200 | ugeth->dev = dev; |
4261 | memcpy(dev->dev_addr, get_property(np, "mac-address", NULL), 6); | 4201 | |
4202 | mac_addr = get_property(np, "mac-address", NULL); | ||
4203 | if (mac_addr == NULL) | ||
4204 | mac_addr = get_property(np, "local-mac-address", NULL); | ||
4205 | if (mac_addr) | ||
4206 | memcpy(dev->dev_addr, mac_addr, 6); | ||
4262 | 4207 | ||
4263 | return 0; | 4208 | return 0; |
4264 | } | 4209 | } |
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c index 3c86592ce03c..6fda6d88be49 100644 --- a/drivers/net/ucc_geth_phy.c +++ b/drivers/net/ucc_geth_phy.c | |||
@@ -376,6 +376,8 @@ static int marvell_init(struct ugeth_mii_info *mii_info) | |||
376 | ugphy_vdbg("%s: IN", __FUNCTION__); | 376 | ugphy_vdbg("%s: IN", __FUNCTION__); |
377 | 377 | ||
378 | ucc_geth_phy_write(mii_info, 0x14, 0x0cd2); | 378 | ucc_geth_phy_write(mii_info, 0x14, 0x0cd2); |
379 | ucc_geth_phy_write(mii_info, 0x1b, | ||
380 | (ucc_geth_phy_read(mii_info, 0x1b) & ~0x000f) | 0x000b); | ||
379 | ucc_geth_phy_write(mii_info, MII_BMCR, | 381 | ucc_geth_phy_write(mii_info, MII_BMCR, |
380 | ucc_geth_phy_read(mii_info, MII_BMCR) | BMCR_RESET); | 382 | ucc_geth_phy_read(mii_info, MII_BMCR) | BMCR_RESET); |
381 | msleep(4000); | 383 | msleep(4000); |
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index 79b2d5454d6b..bc156b51678a 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c | |||
@@ -101,8 +101,8 @@ typedef struct port_s { | |||
101 | typedef struct card_s { | 101 | typedef struct card_s { |
102 | int type; /* RSV, X21, etc. */ | 102 | int type; /* RSV, X21, etc. */ |
103 | int n_ports; /* 1 or 2 ports */ | 103 | int n_ports; /* 1 or 2 ports */ |
104 | u8* __iomem rambase; /* buffer memory base (virtual) */ | 104 | u8 __iomem *rambase; /* buffer memory base (virtual) */ |
105 | u8* __iomem scabase; /* SCA memory base (virtual) */ | 105 | u8 __iomem *scabase; /* SCA memory base (virtual) */ |
106 | plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */ | 106 | plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */ |
107 | u32 init_ctrl_value; /* Saved value - 9050 bug workaround */ | 107 | u32 init_ctrl_value; /* Saved value - 9050 bug workaround */ |
108 | u16 rx_ring_buffers; /* number of buffers in a ring */ | 108 | u16 rx_ring_buffers; /* number of buffers in a ring */ |
@@ -134,7 +134,7 @@ typedef struct card_s { | |||
134 | static void pc300_set_iface(port_t *port) | 134 | static void pc300_set_iface(port_t *port) |
135 | { | 135 | { |
136 | card_t *card = port->card; | 136 | card_t *card = port->card; |
137 | u32* init_ctrl = &card->plxbase->init_ctrl; | 137 | u32 __iomem * init_ctrl = &card->plxbase->init_ctrl; |
138 | u16 msci = get_msci(port); | 138 | u16 msci = get_msci(port); |
139 | u8 rxs = port->rxs & CLK_BRG_MASK; | 139 | u8 rxs = port->rxs & CLK_BRG_MASK; |
140 | u8 txs = port->txs & CLK_BRG_MASK; | 140 | u8 txs = port->txs & CLK_BRG_MASK; |
@@ -393,7 +393,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev, | |||
393 | 393 | ||
394 | /* PLX PCI 9050 workaround for local configuration register read bug */ | 394 | /* PLX PCI 9050 workaround for local configuration register read bug */ |
395 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); | 395 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); |
396 | card->init_ctrl_value = readl(&((plx9050*)card->scabase)->init_ctrl); | 396 | card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); |
397 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); | 397 | pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); |
398 | 398 | ||
399 | /* Reset PLX */ | 399 | /* Reset PLX */ |
@@ -519,10 +519,10 @@ static struct pci_device_id pc300_pci_tbl[] __devinitdata = { | |||
519 | 519 | ||
520 | 520 | ||
521 | static struct pci_driver pc300_pci_driver = { | 521 | static struct pci_driver pc300_pci_driver = { |
522 | name: "PC300", | 522 | .name = "PC300", |
523 | id_table: pc300_pci_tbl, | 523 | .id_table = pc300_pci_tbl, |
524 | probe: pc300_pci_init_one, | 524 | .probe = pc300_pci_init_one, |
525 | remove: pc300_pci_remove_one, | 525 | .remove = pc300_pci_remove_one, |
526 | }; | 526 | }; |
527 | 527 | ||
528 | 528 | ||
diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 7f38012b9c92..a0326818ff2f 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c | |||
@@ -433,7 +433,7 @@ wd_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_ | |||
433 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); | 433 | memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count); |
434 | } else { | 434 | } else { |
435 | /* Packet is in one chunk -- we can copy + cksum. */ | 435 | /* Packet is in one chunk -- we can copy + cksum. */ |
436 | eth_io_copy_and_sum(skb, xfer_start, count, 0); | 436 | memcpy_fromio(skb->data, xfer_start, count); |
437 | } | 437 | } |
438 | 438 | ||
439 | /* Turn off 16 bit access so that reboot works. ISA brain-damage */ | 439 | /* Turn off 16 bit access so that reboot works. ISA brain-damage */ |
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index bbf025874d0c..4dbef0762376 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c | |||
@@ -722,7 +722,7 @@ static int __init init_m32r_pcc(void) | |||
722 | /* Set up interrupt handler(s) */ | 722 | /* Set up interrupt handler(s) */ |
723 | 723 | ||
724 | for (i = 0 ; i < pcc_sockets ; i++) { | 724 | for (i = 0 ; i < pcc_sockets ; i++) { |
725 | socket[i].socket.dev.dev = &pcc_device.dev; | 725 | socket[i].socket.dev.parent = &pcc_device.dev; |
726 | socket[i].socket.ops = &pcc_operations; | 726 | socket[i].socket.ops = &pcc_operations; |
727 | socket[i].socket.resource_ops = &pccard_static_ops; | 727 | socket[i].socket.resource_ops = &pccard_static_ops; |
728 | socket[i].socket.owner = THIS_MODULE; | 728 | socket[i].socket.owner = THIS_MODULE; |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 94d3df62a5fa..82f2ac87ccd4 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -305,7 +305,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, | |||
305 | 305 | ||
306 | case RTC_IRQP_READ: | 306 | case RTC_IRQP_READ: |
307 | if (ops->irq_set_freq) | 307 | if (ops->irq_set_freq) |
308 | err = put_user(rtc->irq_freq, (unsigned long *) arg); | 308 | err = put_user(rtc->irq_freq, (unsigned long __user *)uarg); |
309 | break; | 309 | break; |
310 | 310 | ||
311 | case RTC_IRQP_SET: | 311 | case RTC_IRQP_SET: |
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 4b72b8ef5d66..038118bbfaea 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c | |||
@@ -53,6 +53,25 @@ I2C_CLIENT_INSMOD; | |||
53 | #define PCF8563_SC_LV 0x80 /* low voltage */ | 53 | #define PCF8563_SC_LV 0x80 /* low voltage */ |
54 | #define PCF8563_MO_C 0x80 /* century */ | 54 | #define PCF8563_MO_C 0x80 /* century */ |
55 | 55 | ||
56 | struct pcf8563 { | ||
57 | struct i2c_client client; | ||
58 | /* | ||
59 | * The meaning of MO_C bit varies by the chip type. | ||
60 | * From PCF8563 datasheet: this bit is toggled when the years | ||
61 | * register overflows from 99 to 00 | ||
62 | * 0 indicates the century is 20xx | ||
63 | * 1 indicates the century is 19xx | ||
64 | * From RTC8564 datasheet: this bit indicates change of | ||
65 | * century. When the year digit data overflows from 99 to 00, | ||
66 | * this bit is set. By presetting it to 0 while still in the | ||
67 | * 20th century, it will be set in year 2000, ... | ||
68 | * There seems no reliable way to know how the system use this | ||
69 | * bit. So let's do it heuristically, assuming we are live in | ||
70 | * 1970...2069. | ||
71 | */ | ||
72 | int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */ | ||
73 | }; | ||
74 | |||
56 | static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind); | 75 | static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind); |
57 | static int pcf8563_detach(struct i2c_client *client); | 76 | static int pcf8563_detach(struct i2c_client *client); |
58 | 77 | ||
@@ -62,6 +81,7 @@ static int pcf8563_detach(struct i2c_client *client); | |||
62 | */ | 81 | */ |
63 | static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) | 82 | static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) |
64 | { | 83 | { |
84 | struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client); | ||
65 | unsigned char buf[13] = { PCF8563_REG_ST1 }; | 85 | unsigned char buf[13] = { PCF8563_REG_ST1 }; |
66 | 86 | ||
67 | struct i2c_msg msgs[] = { | 87 | struct i2c_msg msgs[] = { |
@@ -94,8 +114,12 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
94 | tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F); | 114 | tm->tm_mday = BCD2BIN(buf[PCF8563_REG_DM] & 0x3F); |
95 | tm->tm_wday = buf[PCF8563_REG_DW] & 0x07; | 115 | tm->tm_wday = buf[PCF8563_REG_DW] & 0x07; |
96 | tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ | 116 | tm->tm_mon = BCD2BIN(buf[PCF8563_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */ |
97 | tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]) | 117 | tm->tm_year = BCD2BIN(buf[PCF8563_REG_YR]); |
98 | + (buf[PCF8563_REG_MO] & PCF8563_MO_C ? 0 : 100); | 118 | if (tm->tm_year < 70) |
119 | tm->tm_year += 100; /* assume we are in 1970...2069 */ | ||
120 | /* detect the polarity heuristically. see note above. */ | ||
121 | pcf8563->c_polarity = (buf[PCF8563_REG_MO] & PCF8563_MO_C) ? | ||
122 | (tm->tm_year >= 100) : (tm->tm_year < 100); | ||
99 | 123 | ||
100 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " | 124 | dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, " |
101 | "mday=%d, mon=%d, year=%d, wday=%d\n", | 125 | "mday=%d, mon=%d, year=%d, wday=%d\n", |
@@ -114,6 +138,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
114 | 138 | ||
115 | static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) | 139 | static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) |
116 | { | 140 | { |
141 | struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client); | ||
117 | int i, err; | 142 | int i, err; |
118 | unsigned char buf[9]; | 143 | unsigned char buf[9]; |
119 | 144 | ||
@@ -135,7 +160,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm) | |||
135 | 160 | ||
136 | /* year and century */ | 161 | /* year and century */ |
137 | buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100); | 162 | buf[PCF8563_REG_YR] = BIN2BCD(tm->tm_year % 100); |
138 | if (tm->tm_year < 100) | 163 | if (pcf8563->c_polarity ? (tm->tm_year >= 100) : (tm->tm_year < 100)) |
139 | buf[PCF8563_REG_MO] |= PCF8563_MO_C; | 164 | buf[PCF8563_REG_MO] |= PCF8563_MO_C; |
140 | 165 | ||
141 | buf[PCF8563_REG_DW] = tm->tm_wday & 0x07; | 166 | buf[PCF8563_REG_DW] = tm->tm_wday & 0x07; |
@@ -248,6 +273,7 @@ static struct i2c_driver pcf8563_driver = { | |||
248 | 273 | ||
249 | static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind) | 274 | static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind) |
250 | { | 275 | { |
276 | struct pcf8563 *pcf8563; | ||
251 | struct i2c_client *client; | 277 | struct i2c_client *client; |
252 | struct rtc_device *rtc; | 278 | struct rtc_device *rtc; |
253 | 279 | ||
@@ -260,11 +286,12 @@ static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind) | |||
260 | goto exit; | 286 | goto exit; |
261 | } | 287 | } |
262 | 288 | ||
263 | if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) { | 289 | if (!(pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL))) { |
264 | err = -ENOMEM; | 290 | err = -ENOMEM; |
265 | goto exit; | 291 | goto exit; |
266 | } | 292 | } |
267 | 293 | ||
294 | client = &pcf8563->client; | ||
268 | client->addr = address; | 295 | client->addr = address; |
269 | client->driver = &pcf8563_driver; | 296 | client->driver = &pcf8563_driver; |
270 | client->adapter = adapter; | 297 | client->adapter = adapter; |
@@ -301,7 +328,7 @@ exit_detach: | |||
301 | i2c_detach_client(client); | 328 | i2c_detach_client(client); |
302 | 329 | ||
303 | exit_kfree: | 330 | exit_kfree: |
304 | kfree(client); | 331 | kfree(pcf8563); |
305 | 332 | ||
306 | exit: | 333 | exit: |
307 | return err; | 334 | return err; |
@@ -309,6 +336,7 @@ exit: | |||
309 | 336 | ||
310 | static int pcf8563_detach(struct i2c_client *client) | 337 | static int pcf8563_detach(struct i2c_client *client) |
311 | { | 338 | { |
339 | struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client); | ||
312 | int err; | 340 | int err; |
313 | struct rtc_device *rtc = i2c_get_clientdata(client); | 341 | struct rtc_device *rtc = i2c_get_clientdata(client); |
314 | 342 | ||
@@ -318,7 +346,7 @@ static int pcf8563_detach(struct i2c_client *client) | |||
318 | if ((err = i2c_detach_client(client))) | 346 | if ((err = i2c_detach_client(client))) |
319 | return err; | 347 | return err; |
320 | 348 | ||
321 | kfree(client); | 349 | kfree(pcf8563); |
322 | 350 | ||
323 | return 0; | 351 | return 0; |
324 | } | 352 | } |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index a138b1510093..3a1a958fb5f2 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Character device driver for reading z/VM *MONITOR service records. | 4 | * Character device driver for reading z/VM *MONITOR service records. |
5 | * | 5 | * |
6 | * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. | 6 | * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH. |
7 | * | 7 | * |
8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> | 8 | * Author: Gerald Schaefer <geraldsc@de.ibm.com> |
9 | */ | 9 | */ |
@@ -22,7 +22,7 @@ | |||
22 | #include <asm/ebcdic.h> | 22 | #include <asm/ebcdic.h> |
23 | #include <asm/extmem.h> | 23 | #include <asm/extmem.h> |
24 | #include <linux/poll.h> | 24 | #include <linux/poll.h> |
25 | #include "../net/iucv.h" | 25 | #include <net/iucv/iucv.h> |
26 | 26 | ||
27 | 27 | ||
28 | //#define MON_DEBUG /* Debug messages on/off */ | 28 | //#define MON_DEBUG /* Debug messages on/off */ |
@@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0"; | |||
50 | struct mon_msg { | 50 | struct mon_msg { |
51 | u32 pos; | 51 | u32 pos; |
52 | u32 mca_offset; | 52 | u32 mca_offset; |
53 | iucv_MessagePending local_eib; | 53 | struct iucv_message msg; |
54 | char msglim_reached; | 54 | char msglim_reached; |
55 | char replied_msglim; | 55 | char replied_msglim; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | struct mon_private { | 58 | struct mon_private { |
59 | u16 pathid; | 59 | struct iucv_path *path; |
60 | iucv_handle_t iucv_handle; | ||
61 | struct mon_msg *msg_array[MON_MSGLIM]; | 60 | struct mon_msg *msg_array[MON_MSGLIM]; |
62 | unsigned int write_index; | 61 | unsigned int write_index; |
63 | unsigned int read_index; | 62 | unsigned int read_index; |
@@ -75,8 +74,6 @@ static unsigned long mon_dcss_end; | |||
75 | static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); | 74 | static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue); |
76 | static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); | 75 | static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue); |
77 | 76 | ||
78 | static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; | ||
79 | |||
80 | static u8 user_data_connect[16] = { | 77 | static u8 user_data_connect[16] = { |
81 | /* Version code, must be 0x01 for shared mode */ | 78 | /* Version code, must be 0x01 for shared mode */ |
82 | 0x01, | 79 | 0x01, |
@@ -100,8 +97,7 @@ static u8 user_data_sever[16] = { | |||
100 | * Create the 8 bytes EBCDIC DCSS segment name from | 97 | * Create the 8 bytes EBCDIC DCSS segment name from |
101 | * an ASCII name, incl. padding | 98 | * an ASCII name, incl. padding |
102 | */ | 99 | */ |
103 | static inline void | 100 | static inline void dcss_mkname(char *ascii_name, char *ebcdic_name) |
104 | dcss_mkname(char *ascii_name, char *ebcdic_name) | ||
105 | { | 101 | { |
106 | int i; | 102 | int i; |
107 | 103 | ||
@@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name) | |||
119 | * print appropriate error message for segment_load()/segment_type() | 115 | * print appropriate error message for segment_load()/segment_type() |
120 | * return code | 116 | * return code |
121 | */ | 117 | */ |
122 | static void | 118 | static void mon_segment_warn(int rc, char* seg_name) |
123 | mon_segment_warn(int rc, char* seg_name) | ||
124 | { | 119 | { |
125 | switch (rc) { | 120 | switch (rc) { |
126 | case -ENOENT: | 121 | case -ENOENT: |
@@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name) | |||
166 | } | 161 | } |
167 | } | 162 | } |
168 | 163 | ||
169 | static inline unsigned long | 164 | static inline unsigned long mon_mca_start(struct mon_msg *monmsg) |
170 | mon_mca_start(struct mon_msg *monmsg) | ||
171 | { | 165 | { |
172 | return monmsg->local_eib.ln1msg1.iprmmsg1_u32; | 166 | return *(u32 *) &monmsg->msg.rmmsg; |
173 | } | 167 | } |
174 | 168 | ||
175 | static inline unsigned long | 169 | static inline unsigned long mon_mca_end(struct mon_msg *monmsg) |
176 | mon_mca_end(struct mon_msg *monmsg) | ||
177 | { | 170 | { |
178 | return monmsg->local_eib.ln1msg2.ipbfln1f; | 171 | return *(u32 *) &monmsg->msg.rmmsg[4]; |
179 | } | 172 | } |
180 | 173 | ||
181 | static inline u8 | 174 | static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index) |
182 | mon_mca_type(struct mon_msg *monmsg, u8 index) | ||
183 | { | 175 | { |
184 | return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); | 176 | return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index); |
185 | } | 177 | } |
186 | 178 | ||
187 | static inline u32 | 179 | static inline u32 mon_mca_size(struct mon_msg *monmsg) |
188 | mon_mca_size(struct mon_msg *monmsg) | ||
189 | { | 180 | { |
190 | return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; | 181 | return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1; |
191 | } | 182 | } |
192 | 183 | ||
193 | static inline u32 | 184 | static inline u32 mon_rec_start(struct mon_msg *monmsg) |
194 | mon_rec_start(struct mon_msg *monmsg) | ||
195 | { | 185 | { |
196 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); | 186 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4)); |
197 | } | 187 | } |
198 | 188 | ||
199 | static inline u32 | 189 | static inline u32 mon_rec_end(struct mon_msg *monmsg) |
200 | mon_rec_end(struct mon_msg *monmsg) | ||
201 | { | 190 | { |
202 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); | 191 | return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8)); |
203 | } | 192 | } |
204 | 193 | ||
205 | static inline int | 194 | static inline int mon_check_mca(struct mon_msg *monmsg) |
206 | mon_check_mca(struct mon_msg *monmsg) | ||
207 | { | 195 | { |
208 | if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || | 196 | if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) || |
209 | (mon_rec_start(monmsg) < mon_dcss_start) || | 197 | (mon_rec_start(monmsg) < mon_dcss_start) || |
@@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg) | |||
221 | return 0; | 209 | return 0; |
222 | } | 210 | } |
223 | 211 | ||
224 | static inline int | 212 | static inline int mon_send_reply(struct mon_msg *monmsg, |
225 | mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) | 213 | struct mon_private *monpriv) |
226 | { | 214 | { |
227 | u8 prmmsg[8]; | ||
228 | int rc; | 215 | int rc; |
229 | 216 | ||
230 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " | 217 | P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = " |
231 | "0x%08X\n\n", | 218 | "0x%08X\n\n", |
232 | monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, | 219 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); |
233 | monmsg->local_eib.iptrgcls); | 220 | |
234 | rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid, | 221 | rc = iucv_message_reply(monpriv->path, &monmsg->msg, |
235 | monmsg->local_eib.ipmsgid, | 222 | IUCV_IPRMDATA, NULL, 0); |
236 | monmsg->local_eib.iptrgcls, | ||
237 | 0, prmmsg); | ||
238 | atomic_dec(&monpriv->msglim_count); | 223 | atomic_dec(&monpriv->msglim_count); |
239 | if (likely(!monmsg->msglim_reached)) { | 224 | if (likely(!monmsg->msglim_reached)) { |
240 | monmsg->pos = 0; | 225 | monmsg->pos = 0; |
@@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
251 | return 0; | 236 | return 0; |
252 | } | 237 | } |
253 | 238 | ||
254 | static inline struct mon_private * | 239 | static inline void mon_free_mem(struct mon_private *monpriv) |
255 | mon_alloc_mem(void) | 240 | { |
241 | int i; | ||
242 | |||
243 | for (i = 0; i < MON_MSGLIM; i++) | ||
244 | if (monpriv->msg_array[i]) | ||
245 | kfree(monpriv->msg_array[i]); | ||
246 | kfree(monpriv); | ||
247 | } | ||
248 | |||
249 | static inline struct mon_private *mon_alloc_mem(void) | ||
256 | { | 250 | { |
257 | int i,j; | 251 | int i; |
258 | struct mon_private *monpriv; | 252 | struct mon_private *monpriv; |
259 | 253 | ||
260 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); | 254 | monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL); |
@@ -267,16 +261,15 @@ mon_alloc_mem(void) | |||
267 | GFP_KERNEL); | 261 | GFP_KERNEL); |
268 | if (!monpriv->msg_array[i]) { | 262 | if (!monpriv->msg_array[i]) { |
269 | P_ERROR("open, no memory for msg_array\n"); | 263 | P_ERROR("open, no memory for msg_array\n"); |
270 | for (j = 0; j < i; j++) | 264 | mon_free_mem(monpriv); |
271 | kfree(monpriv->msg_array[j]); | ||
272 | return NULL; | 265 | return NULL; |
273 | } | 266 | } |
274 | } | 267 | } |
275 | return monpriv; | 268 | return monpriv; |
276 | } | 269 | } |
277 | 270 | ||
278 | static inline void | 271 | static inline void mon_read_debug(struct mon_msg *monmsg, |
279 | mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | 272 | struct mon_private *monpriv) |
280 | { | 273 | { |
281 | #ifdef MON_DEBUG | 274 | #ifdef MON_DEBUG |
282 | u8 msg_type[2], mca_type; | 275 | u8 msg_type[2], mca_type; |
@@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
284 | 277 | ||
285 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; | 278 | records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1; |
286 | 279 | ||
287 | memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2); | 280 | memcpy(msg_type, &monmsg->msg.class, 2); |
288 | EBCASC(msg_type, 2); | 281 | EBCASC(msg_type, 2); |
289 | mca_type = mon_mca_type(monmsg, 0); | 282 | mca_type = mon_mca_type(monmsg, 0); |
290 | EBCASC(&mca_type, 1); | 283 | EBCASC(&mca_type, 1); |
@@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
292 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", | 285 | P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n", |
293 | monpriv->read_index, monpriv->write_index); | 286 | monpriv->read_index, monpriv->write_index); |
294 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", | 287 | P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n", |
295 | monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid, | 288 | monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class); |
296 | monmsg->local_eib.iptrgcls); | ||
297 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", | 289 | P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n", |
298 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', | 290 | msg_type[0], msg_type[1], mca_type ? mca_type : 'X', |
299 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); | 291 | mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2)); |
@@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv) | |||
306 | #endif | 298 | #endif |
307 | } | 299 | } |
308 | 300 | ||
309 | static inline void | 301 | static inline void mon_next_mca(struct mon_msg *monmsg) |
310 | mon_next_mca(struct mon_msg *monmsg) | ||
311 | { | 302 | { |
312 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) | 303 | if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12)) |
313 | return; | 304 | return; |
@@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg) | |||
316 | monmsg->pos = 0; | 307 | monmsg->pos = 0; |
317 | } | 308 | } |
318 | 309 | ||
319 | static inline struct mon_msg * | 310 | static inline struct mon_msg *mon_next_message(struct mon_private *monpriv) |
320 | mon_next_message(struct mon_private *monpriv) | ||
321 | { | 311 | { |
322 | struct mon_msg *monmsg; | 312 | struct mon_msg *monmsg; |
323 | 313 | ||
@@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv) | |||
342 | /****************************************************************************** | 332 | /****************************************************************************** |
343 | * IUCV handler * | 333 | * IUCV handler * |
344 | *****************************************************************************/ | 334 | *****************************************************************************/ |
345 | static void | 335 | static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) |
346 | mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data) | ||
347 | { | 336 | { |
348 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 337 | struct mon_private *monpriv = path->private; |
349 | 338 | ||
350 | P_DEBUG("IUCV connection completed\n"); | 339 | P_DEBUG("IUCV connection completed\n"); |
351 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " | 340 | P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = " |
352 | "0x%02X, Sample = 0x%02X\n", | 341 | "0x%02X, Sample = 0x%02X\n", |
353 | eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]); | 342 | ipuser[0], ipuser[1], ipuser[2]); |
354 | atomic_set(&monpriv->iucv_connected, 1); | 343 | atomic_set(&monpriv->iucv_connected, 1); |
355 | wake_up(&mon_conn_wait_queue); | 344 | wake_up(&mon_conn_wait_queue); |
356 | } | 345 | } |
357 | 346 | ||
358 | static void | 347 | static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) |
359 | mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data) | ||
360 | { | 348 | { |
361 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 349 | struct mon_private *monpriv = path->private; |
362 | 350 | ||
363 | P_ERROR("IUCV connection severed with rc = 0x%X\n", | 351 | P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); |
364 | (u8) eib->ipuser[0]); | 352 | iucv_path_sever(path, NULL); |
365 | atomic_set(&monpriv->iucv_severed, 1); | 353 | atomic_set(&monpriv->iucv_severed, 1); |
366 | wake_up(&mon_conn_wait_queue); | 354 | wake_up(&mon_conn_wait_queue); |
367 | wake_up_interruptible(&mon_read_wait_queue); | 355 | wake_up_interruptible(&mon_read_wait_queue); |
368 | } | 356 | } |
369 | 357 | ||
370 | static void | 358 | static void mon_iucv_message_pending(struct iucv_path *path, |
371 | mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) | 359 | struct iucv_message *msg) |
372 | { | 360 | { |
373 | struct mon_private *monpriv = (struct mon_private *) pgm_data; | 361 | struct mon_private *monpriv = path->private; |
374 | 362 | ||
375 | P_DEBUG("IUCV message pending\n"); | 363 | P_DEBUG("IUCV message pending\n"); |
376 | memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib, | 364 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
377 | sizeof(iucv_MessagePending)); | 365 | msg, sizeof(*msg)); |
378 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 366 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
379 | P_WARNING("IUCV message pending, message limit (%i) reached\n", | 367 | P_WARNING("IUCV message pending, message limit (%i) reached\n", |
380 | MON_MSGLIM); | 368 | MON_MSGLIM); |
@@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data) | |||
385 | wake_up_interruptible(&mon_read_wait_queue); | 373 | wake_up_interruptible(&mon_read_wait_queue); |
386 | } | 374 | } |
387 | 375 | ||
388 | static iucv_interrupt_ops_t mon_iucvops = { | 376 | static struct iucv_handler monreader_iucv_handler = { |
389 | .ConnectionComplete = mon_iucv_ConnectionComplete, | 377 | .path_complete = mon_iucv_path_complete, |
390 | .ConnectionSevered = mon_iucv_ConnectionSevered, | 378 | .path_severed = mon_iucv_path_severed, |
391 | .MessagePending = mon_iucv_MessagePending, | 379 | .message_pending = mon_iucv_message_pending, |
392 | }; | 380 | }; |
393 | 381 | ||
394 | /****************************************************************************** | 382 | /****************************************************************************** |
395 | * file operations * | 383 | * file operations * |
396 | *****************************************************************************/ | 384 | *****************************************************************************/ |
397 | static int | 385 | static int mon_open(struct inode *inode, struct file *filp) |
398 | mon_open(struct inode *inode, struct file *filp) | ||
399 | { | 386 | { |
400 | int rc, i; | ||
401 | struct mon_private *monpriv; | 387 | struct mon_private *monpriv; |
388 | int rc; | ||
402 | 389 | ||
403 | /* | 390 | /* |
404 | * only one user allowed | 391 | * only one user allowed |
405 | */ | 392 | */ |
393 | rc = -EBUSY; | ||
406 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) | 394 | if (test_and_set_bit(MON_IN_USE, &mon_in_use)) |
407 | return -EBUSY; | 395 | goto out; |
408 | 396 | ||
397 | rc = -ENOMEM; | ||
409 | monpriv = mon_alloc_mem(); | 398 | monpriv = mon_alloc_mem(); |
410 | if (!monpriv) | 399 | if (!monpriv) |
411 | return -ENOMEM; | 400 | goto out_use; |
412 | 401 | ||
413 | /* | 402 | /* |
414 | * Register with IUCV and connect to *MONITOR service | 403 | * Connect to *MONITOR service |
415 | */ | 404 | */ |
416 | monpriv->iucv_handle = iucv_register_program("my_monreader ", | 405 | monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); |
417 | MON_SERVICE, | 406 | if (!monpriv->path) |
418 | NULL, | 407 | goto out_priv; |
419 | &mon_iucvops, | 408 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, |
420 | monpriv); | 409 | MON_SERVICE, NULL, user_data_connect, monpriv); |
421 | if (!monpriv->iucv_handle) { | ||
422 | P_ERROR("failed to register with iucv driver\n"); | ||
423 | rc = -EIO; | ||
424 | goto out_error; | ||
425 | } | ||
426 | P_INFO("open, registered with IUCV\n"); | ||
427 | |||
428 | rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect, | ||
429 | MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL, | ||
430 | monpriv->iucv_handle, NULL); | ||
431 | if (rc) { | 410 | if (rc) { |
432 | P_ERROR("iucv connection to *MONITOR failed with " | 411 | P_ERROR("iucv connection to *MONITOR failed with " |
433 | "IPUSER SEVER code = %i\n", rc); | 412 | "IPUSER SEVER code = %i\n", rc); |
434 | rc = -EIO; | 413 | rc = -EIO; |
435 | goto out_unregister; | 414 | goto out_path; |
436 | } | 415 | } |
437 | /* | 416 | /* |
438 | * Wait for connection confirmation | 417 | * Wait for connection confirmation |
@@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp) | |||
444 | atomic_set(&monpriv->iucv_severed, 0); | 423 | atomic_set(&monpriv->iucv_severed, 0); |
445 | atomic_set(&monpriv->iucv_connected, 0); | 424 | atomic_set(&monpriv->iucv_connected, 0); |
446 | rc = -EIO; | 425 | rc = -EIO; |
447 | goto out_unregister; | 426 | goto out_path; |
448 | } | 427 | } |
449 | P_INFO("open, established connection to *MONITOR service\n\n"); | 428 | P_INFO("open, established connection to *MONITOR service\n\n"); |
450 | filp->private_data = monpriv; | 429 | filp->private_data = monpriv; |
451 | return nonseekable_open(inode, filp); | 430 | return nonseekable_open(inode, filp); |
452 | 431 | ||
453 | out_unregister: | 432 | out_path: |
454 | iucv_unregister_program(monpriv->iucv_handle); | 433 | kfree(monpriv->path); |
455 | out_error: | 434 | out_priv: |
456 | for (i = 0; i < MON_MSGLIM; i++) | 435 | mon_free_mem(monpriv); |
457 | kfree(monpriv->msg_array[i]); | 436 | out_use: |
458 | kfree(monpriv); | ||
459 | clear_bit(MON_IN_USE, &mon_in_use); | 437 | clear_bit(MON_IN_USE, &mon_in_use); |
438 | out: | ||
460 | return rc; | 439 | return rc; |
461 | } | 440 | } |
462 | 441 | ||
463 | static int | 442 | static int mon_close(struct inode *inode, struct file *filp) |
464 | mon_close(struct inode *inode, struct file *filp) | ||
465 | { | 443 | { |
466 | int rc, i; | 444 | int rc, i; |
467 | struct mon_private *monpriv = filp->private_data; | 445 | struct mon_private *monpriv = filp->private_data; |
@@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp) | |||
469 | /* | 447 | /* |
470 | * Close IUCV connection and unregister | 448 | * Close IUCV connection and unregister |
471 | */ | 449 | */ |
472 | rc = iucv_sever(monpriv->pathid, user_data_sever); | 450 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
473 | if (rc) | 451 | if (rc) |
474 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 452 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); |
475 | else | 453 | else |
476 | P_INFO("close, terminated connection to *MONITOR service\n"); | 454 | P_INFO("close, terminated connection to *MONITOR service\n"); |
477 | 455 | ||
478 | rc = iucv_unregister_program(monpriv->iucv_handle); | ||
479 | if (rc) | ||
480 | P_ERROR("close, iucv_unregister failed with rc = %i\n", rc); | ||
481 | else | ||
482 | P_INFO("close, unregistered with IUCV\n"); | ||
483 | |||
484 | atomic_set(&monpriv->iucv_severed, 0); | 456 | atomic_set(&monpriv->iucv_severed, 0); |
485 | atomic_set(&monpriv->iucv_connected, 0); | 457 | atomic_set(&monpriv->iucv_connected, 0); |
486 | atomic_set(&monpriv->read_ready, 0); | 458 | atomic_set(&monpriv->read_ready, 0); |
@@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp) | |||
495 | return 0; | 467 | return 0; |
496 | } | 468 | } |
497 | 469 | ||
498 | static ssize_t | 470 | static ssize_t mon_read(struct file *filp, char __user *data, |
499 | mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | 471 | size_t count, loff_t *ppos) |
500 | { | 472 | { |
501 | struct mon_private *monpriv = filp->private_data; | 473 | struct mon_private *monpriv = filp->private_data; |
502 | struct mon_msg *monmsg; | 474 | struct mon_msg *monmsg; |
@@ -563,8 +535,7 @@ out_copy: | |||
563 | return count; | 535 | return count; |
564 | } | 536 | } |
565 | 537 | ||
566 | static unsigned int | 538 | static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p) |
567 | mon_poll(struct file *filp, struct poll_table_struct *p) | ||
568 | { | 539 | { |
569 | struct mon_private *monpriv = filp->private_data; | 540 | struct mon_private *monpriv = filp->private_data; |
570 | 541 | ||
@@ -593,8 +564,7 @@ static struct miscdevice mon_dev = { | |||
593 | /****************************************************************************** | 564 | /****************************************************************************** |
594 | * module init/exit * | 565 | * module init/exit * |
595 | *****************************************************************************/ | 566 | *****************************************************************************/ |
596 | static int __init | 567 | static int __init mon_init(void) |
597 | mon_init(void) | ||
598 | { | 568 | { |
599 | int rc; | 569 | int rc; |
600 | 570 | ||
@@ -603,22 +573,34 @@ mon_init(void) | |||
603 | return -ENODEV; | 573 | return -ENODEV; |
604 | } | 574 | } |
605 | 575 | ||
576 | /* | ||
577 | * Register with IUCV and connect to *MONITOR service | ||
578 | */ | ||
579 | rc = iucv_register(&monreader_iucv_handler, 1); | ||
580 | if (rc) { | ||
581 | P_ERROR("failed to register with iucv driver\n"); | ||
582 | return rc; | ||
583 | } | ||
584 | P_INFO("open, registered with IUCV\n"); | ||
585 | |||
606 | rc = segment_type(mon_dcss_name); | 586 | rc = segment_type(mon_dcss_name); |
607 | if (rc < 0) { | 587 | if (rc < 0) { |
608 | mon_segment_warn(rc, mon_dcss_name); | 588 | mon_segment_warn(rc, mon_dcss_name); |
609 | return rc; | 589 | goto out_iucv; |
610 | } | 590 | } |
611 | if (rc != SEG_TYPE_SC) { | 591 | if (rc != SEG_TYPE_SC) { |
612 | P_ERROR("segment %s has unsupported type, should be SC\n", | 592 | P_ERROR("segment %s has unsupported type, should be SC\n", |
613 | mon_dcss_name); | 593 | mon_dcss_name); |
614 | return -EINVAL; | 594 | rc = -EINVAL; |
595 | goto out_iucv; | ||
615 | } | 596 | } |
616 | 597 | ||
617 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, | 598 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, |
618 | &mon_dcss_start, &mon_dcss_end); | 599 | &mon_dcss_start, &mon_dcss_end); |
619 | if (rc < 0) { | 600 | if (rc < 0) { |
620 | mon_segment_warn(rc, mon_dcss_name); | 601 | mon_segment_warn(rc, mon_dcss_name); |
621 | return -EINVAL; | 602 | rc = -EINVAL; |
603 | goto out_iucv; | ||
622 | } | 604 | } |
623 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); | 605 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); |
624 | 606 | ||
@@ -634,14 +616,16 @@ mon_init(void) | |||
634 | 616 | ||
635 | out: | 617 | out: |
636 | segment_unload(mon_dcss_name); | 618 | segment_unload(mon_dcss_name); |
619 | out_iucv: | ||
620 | iucv_unregister(&monreader_iucv_handler, 1); | ||
637 | return rc; | 621 | return rc; |
638 | } | 622 | } |
639 | 623 | ||
640 | static void __exit | 624 | static void __exit mon_exit(void) |
641 | mon_exit(void) | ||
642 | { | 625 | { |
643 | segment_unload(mon_dcss_name); | 626 | segment_unload(mon_dcss_name); |
644 | WARN_ON(misc_deregister(&mon_dev) != 0); | 627 | WARN_ON(misc_deregister(&mon_dev) != 0); |
628 | iucv_unregister(&monreader_iucv_handler, 1); | ||
645 | return; | 629 | return; |
646 | } | 630 | } |
647 | 631 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 4f894dc2373b..8432a76b961e 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * character device driver for reading z/VM system service records | 3 | * character device driver for reading z/VM system service records |
4 | * | 4 | * |
5 | * | 5 | * |
6 | * Copyright (C) 2004 IBM Corporation | 6 | * Copyright 2004 IBM Corporation |
7 | * character device driver for reading z/VM system service records, | 7 | * character device driver for reading z/VM system service records, |
8 | * Version 1.0 | 8 | * Version 1.0 |
9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> | 9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> |
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/cpcmd.h> | 21 | #include <asm/cpcmd.h> |
22 | #include <asm/debug.h> | 22 | #include <asm/debug.h> |
23 | #include <asm/ebcdic.h> | 23 | #include <asm/ebcdic.h> |
24 | #include "../net/iucv.h" | 24 | #include <net/iucv/iucv.h> |
25 | #include <linux/kmod.h> | 25 | #include <linux/kmod.h> |
26 | #include <linux/cdev.h> | 26 | #include <linux/cdev.h> |
27 | #include <linux/device.h> | 27 | #include <linux/device.h> |
@@ -60,12 +60,11 @@ struct vmlogrdr_priv_t { | |||
60 | char system_service[8]; | 60 | char system_service[8]; |
61 | char internal_name[8]; | 61 | char internal_name[8]; |
62 | char recording_name[8]; | 62 | char recording_name[8]; |
63 | u16 pathid; | 63 | struct iucv_path *path; |
64 | int connection_established; | 64 | int connection_established; |
65 | int iucv_path_severed; | 65 | int iucv_path_severed; |
66 | iucv_MessagePending local_interrupt_buffer; | 66 | struct iucv_message local_interrupt_buffer; |
67 | atomic_t receive_ready; | 67 | atomic_t receive_ready; |
68 | iucv_handle_t iucv_handle; | ||
69 | int minor_num; | 68 | int minor_num; |
70 | char * buffer; | 69 | char * buffer; |
71 | char * current_position; | 70 | char * current_position; |
@@ -97,37 +96,19 @@ static struct file_operations vmlogrdr_fops = { | |||
97 | }; | 96 | }; |
98 | 97 | ||
99 | 98 | ||
100 | static u8 iucvMagic[16] = { | 99 | static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]); |
101 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, | 100 | static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]); |
102 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 | 101 | static void vmlogrdr_iucv_message_pending(struct iucv_path *, |
103 | }; | 102 | struct iucv_message *); |
104 | 103 | ||
105 | 104 | ||
106 | static u8 mask[] = { | 105 | static struct iucv_handler vmlogrdr_iucv_handler = { |
107 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 106 | .path_complete = vmlogrdr_iucv_path_complete, |
108 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 107 | .path_severed = vmlogrdr_iucv_path_severed, |
109 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 108 | .message_pending = vmlogrdr_iucv_message_pending, |
110 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
111 | }; | 109 | }; |
112 | 110 | ||
113 | 111 | ||
114 | static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | ||
115 | |||
116 | |||
117 | static void | ||
118 | vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data); | ||
119 | static void | ||
120 | vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data); | ||
121 | static void | ||
122 | vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data); | ||
123 | |||
124 | |||
125 | static iucv_interrupt_ops_t vmlogrdr_iucvops = { | ||
126 | .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete, | ||
127 | .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered, | ||
128 | .MessagePending = vmlogrdr_iucv_MessagePending, | ||
129 | }; | ||
130 | |||
131 | static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); | 112 | static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); |
132 | static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); | 113 | static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); |
133 | 114 | ||
@@ -176,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL; | |||
176 | static int recording_class_AB; | 157 | static int recording_class_AB; |
177 | 158 | ||
178 | 159 | ||
179 | static void | 160 | static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16]) |
180 | vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib, | ||
181 | void * pgm_data) | ||
182 | { | 161 | { |
183 | struct vmlogrdr_priv_t * logptr = pgm_data; | 162 | struct vmlogrdr_priv_t * logptr = path->private; |
163 | |||
184 | spin_lock(&logptr->priv_lock); | 164 | spin_lock(&logptr->priv_lock); |
185 | logptr->connection_established = 1; | 165 | logptr->connection_established = 1; |
186 | spin_unlock(&logptr->priv_lock); | 166 | spin_unlock(&logptr->priv_lock); |
187 | wake_up(&conn_wait_queue); | 167 | wake_up(&conn_wait_queue); |
188 | return; | ||
189 | } | 168 | } |
190 | 169 | ||
191 | 170 | ||
192 | static void | 171 | static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) |
193 | vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) | ||
194 | { | 172 | { |
195 | u8 reason = (u8) eib->ipuser[8]; | 173 | struct vmlogrdr_priv_t * logptr = path->private; |
196 | struct vmlogrdr_priv_t * logptr = pgm_data; | 174 | u8 reason = (u8) ipuser[8]; |
197 | 175 | ||
198 | printk (KERN_ERR "vmlogrdr: connection severed with" | 176 | printk (KERN_ERR "vmlogrdr: connection severed with" |
199 | " reason %i\n", reason); | 177 | " reason %i\n", reason); |
200 | 178 | ||
179 | iucv_path_sever(path, NULL); | ||
180 | kfree(path); | ||
181 | logptr->path = NULL; | ||
182 | |||
201 | spin_lock(&logptr->priv_lock); | 183 | spin_lock(&logptr->priv_lock); |
202 | logptr->connection_established = 0; | 184 | logptr->connection_established = 0; |
203 | logptr->iucv_path_severed = 1; | 185 | logptr->iucv_path_severed = 1; |
@@ -209,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data) | |||
209 | } | 191 | } |
210 | 192 | ||
211 | 193 | ||
212 | static void | 194 | static void vmlogrdr_iucv_message_pending(struct iucv_path *path, |
213 | vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) | 195 | struct iucv_message *msg) |
214 | { | 196 | { |
215 | struct vmlogrdr_priv_t * logptr = pgm_data; | 197 | struct vmlogrdr_priv_t * logptr = path->private; |
216 | 198 | ||
217 | /* | 199 | /* |
218 | * This function is the bottom half so it should be quick. | 200 | * This function is the bottom half so it should be quick. |
@@ -220,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data) | |||
220 | * the usage count | 202 | * the usage count |
221 | */ | 203 | */ |
222 | spin_lock(&logptr->priv_lock); | 204 | spin_lock(&logptr->priv_lock); |
223 | memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib)); | 205 | memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg)); |
224 | atomic_inc(&logptr->receive_ready); | 206 | atomic_inc(&logptr->receive_ready); |
225 | spin_unlock(&logptr->priv_lock); | 207 | spin_unlock(&logptr->priv_lock); |
226 | wake_up_interruptible(&read_wait_queue); | 208 | wake_up_interruptible(&read_wait_queue); |
227 | } | 209 | } |
228 | 210 | ||
229 | 211 | ||
230 | static int | 212 | static int vmlogrdr_get_recording_class_AB(void) |
231 | vmlogrdr_get_recording_class_AB(void) { | 213 | { |
232 | char cp_command[]="QUERY COMMAND RECORDING "; | 214 | char cp_command[]="QUERY COMMAND RECORDING "; |
233 | char cp_response[80]; | 215 | char cp_response[80]; |
234 | char *tail; | 216 | char *tail; |
@@ -258,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) { | |||
258 | } | 240 | } |
259 | 241 | ||
260 | 242 | ||
261 | static int | 243 | static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, |
262 | vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { | 244 | int action, int purge) |
245 | { | ||
263 | 246 | ||
264 | char cp_command[80]; | 247 | char cp_command[80]; |
265 | char cp_response[160]; | 248 | char cp_response[160]; |
@@ -317,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) { | |||
317 | } | 300 | } |
318 | 301 | ||
319 | 302 | ||
320 | static int | 303 | static int vmlogrdr_open (struct inode *inode, struct file *filp) |
321 | vmlogrdr_open (struct inode *inode, struct file *filp) | ||
322 | { | 304 | { |
323 | int dev_num = 0; | 305 | int dev_num = 0; |
324 | struct vmlogrdr_priv_t * logptr = NULL; | 306 | struct vmlogrdr_priv_t * logptr = NULL; |
@@ -328,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
328 | dev_num = iminor(inode); | 310 | dev_num = iminor(inode); |
329 | if (dev_num > MAXMINOR) | 311 | if (dev_num > MAXMINOR) |
330 | return -ENODEV; | 312 | return -ENODEV; |
331 | |||
332 | logptr = &sys_ser[dev_num]; | 313 | logptr = &sys_ser[dev_num]; |
333 | if (logptr == NULL) | ||
334 | return -ENODEV; | ||
335 | 314 | ||
336 | /* | 315 | /* |
337 | * only allow for blocking reads to be open | 316 | * only allow for blocking reads to be open |
@@ -344,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
344 | if (logptr->dev_in_use) { | 323 | if (logptr->dev_in_use) { |
345 | spin_unlock_bh(&logptr->priv_lock); | 324 | spin_unlock_bh(&logptr->priv_lock); |
346 | return -EBUSY; | 325 | return -EBUSY; |
347 | } else { | ||
348 | logptr->dev_in_use = 1; | ||
349 | spin_unlock_bh(&logptr->priv_lock); | ||
350 | } | 326 | } |
351 | 327 | logptr->dev_in_use = 1; | |
328 | logptr->connection_established = 0; | ||
329 | logptr->iucv_path_severed = 0; | ||
352 | atomic_set(&logptr->receive_ready, 0); | 330 | atomic_set(&logptr->receive_ready, 0); |
353 | logptr->buffer_free = 1; | 331 | logptr->buffer_free = 1; |
332 | spin_unlock_bh(&logptr->priv_lock); | ||
354 | 333 | ||
355 | /* set the file options */ | 334 | /* set the file options */ |
356 | filp->private_data = logptr; | 335 | filp->private_data = logptr; |
357 | filp->f_op = &vmlogrdr_fops; | 336 | filp->f_op = &vmlogrdr_fops; |
358 | 337 | ||
359 | /* start recording for this service*/ | 338 | /* start recording for this service*/ |
360 | ret=0; | 339 | if (logptr->autorecording) { |
361 | if (logptr->autorecording) | ||
362 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); | 340 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); |
363 | if (ret) | 341 | if (ret) |
364 | printk (KERN_WARNING "vmlogrdr: failed to start " | 342 | printk (KERN_WARNING "vmlogrdr: failed to start " |
365 | "recording automatically\n"); | 343 | "recording automatically\n"); |
366 | |||
367 | /* Register with iucv driver */ | ||
368 | logptr->iucv_handle = iucv_register_program(iucvMagic, | ||
369 | logptr->system_service, mask, &vmlogrdr_iucvops, | ||
370 | logptr); | ||
371 | |||
372 | if (logptr->iucv_handle == NULL) { | ||
373 | printk (KERN_ERR "vmlogrdr: failed to register with" | ||
374 | "iucv driver\n"); | ||
375 | goto not_registered; | ||
376 | } | 344 | } |
377 | 345 | ||
378 | /* create connection to the system service */ | 346 | /* create connection to the system service */ |
379 | spin_lock_bh(&logptr->priv_lock); | 347 | logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL); |
380 | logptr->connection_established = 0; | 348 | if (!logptr->path) |
381 | logptr->iucv_path_severed = 0; | 349 | goto out_dev; |
382 | spin_unlock_bh(&logptr->priv_lock); | 350 | connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler, |
383 | 351 | logptr->system_service, NULL, NULL, | |
384 | connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic, | 352 | logptr); |
385 | logptr->system_service, iucv_host, 0, | ||
386 | NULL, NULL, | ||
387 | logptr->iucv_handle, NULL); | ||
388 | if (connect_rc) { | 353 | if (connect_rc) { |
389 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " | 354 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " |
390 | "failed with rc %i \n", logptr->system_service, | 355 | "failed with rc %i \n", logptr->system_service, |
391 | connect_rc); | 356 | connect_rc); |
392 | goto not_connected; | 357 | goto out_path; |
393 | } | 358 | } |
394 | 359 | ||
395 | /* We've issued the connect and now we must wait for a | 360 | /* We've issued the connect and now we must wait for a |
@@ -398,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp) | |||
398 | */ | 363 | */ |
399 | wait_event(conn_wait_queue, (logptr->connection_established) | 364 | wait_event(conn_wait_queue, (logptr->connection_established) |
400 | || (logptr->iucv_path_severed)); | 365 | || (logptr->iucv_path_severed)); |
401 | if (logptr->iucv_path_severed) { | 366 | if (logptr->iucv_path_severed) |
402 | goto not_connected; | 367 | goto out_record; |
403 | } | ||
404 | |||
405 | return nonseekable_open(inode, filp); | 368 | return nonseekable_open(inode, filp); |
406 | 369 | ||
407 | not_connected: | 370 | out_record: |
408 | iucv_unregister_program(logptr->iucv_handle); | ||
409 | logptr->iucv_handle = NULL; | ||
410 | not_registered: | ||
411 | if (logptr->autorecording) | 371 | if (logptr->autorecording) |
412 | vmlogrdr_recording(logptr,0,logptr->autopurge); | 372 | vmlogrdr_recording(logptr,0,logptr->autopurge); |
373 | out_path: | ||
374 | kfree(logptr->path); /* kfree(NULL) is ok. */ | ||
375 | logptr->path = NULL; | ||
376 | out_dev: | ||
413 | logptr->dev_in_use = 0; | 377 | logptr->dev_in_use = 0; |
414 | return -EIO; | 378 | return -EIO; |
415 | |||
416 | |||
417 | } | 379 | } |
418 | 380 | ||
419 | 381 | ||
420 | static int | 382 | static int vmlogrdr_release (struct inode *inode, struct file *filp) |
421 | vmlogrdr_release (struct inode *inode, struct file *filp) | ||
422 | { | 383 | { |
423 | int ret; | 384 | int ret; |
424 | 385 | ||
425 | struct vmlogrdr_priv_t * logptr = filp->private_data; | 386 | struct vmlogrdr_priv_t * logptr = filp->private_data; |
426 | 387 | ||
427 | iucv_unregister_program(logptr->iucv_handle); | ||
428 | logptr->iucv_handle = NULL; | ||
429 | |||
430 | if (logptr->autorecording) { | 388 | if (logptr->autorecording) { |
431 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
432 | if (ret) | 390 | if (ret) |
@@ -439,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp) | |||
439 | } | 397 | } |
440 | 398 | ||
441 | 399 | ||
442 | static int | 400 | static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) |
443 | vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | 401 | { |
444 | int rc, *temp; | 402 | int rc, *temp; |
445 | /* we need to keep track of two data sizes here: | 403 | /* we need to keep track of two data sizes here: |
446 | * The number of bytes we need to receive from iucv and | 404 | * The number of bytes we need to receive from iucv and |
@@ -461,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
461 | * We need to return the total length of the record | 419 | * We need to return the total length of the record |
462 | * + size of FENCE in the first 4 bytes of the buffer. | 420 | * + size of FENCE in the first 4 bytes of the buffer. |
463 | */ | 421 | */ |
464 | iucv_data_count = | 422 | iucv_data_count = priv->local_interrupt_buffer.length; |
465 | priv->local_interrupt_buffer.ln1msg2.ipbfln1f; | ||
466 | user_data_count = sizeof(int); | 423 | user_data_count = sizeof(int); |
467 | temp = (int*)priv->buffer; | 424 | temp = (int*)priv->buffer; |
468 | *temp= iucv_data_count + sizeof(FENCE); | 425 | *temp= iucv_data_count + sizeof(FENCE); |
@@ -474,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
474 | */ | 431 | */ |
475 | if (iucv_data_count > NET_BUFFER_SIZE) | 432 | if (iucv_data_count > NET_BUFFER_SIZE) |
476 | iucv_data_count = NET_BUFFER_SIZE; | 433 | iucv_data_count = NET_BUFFER_SIZE; |
477 | rc = iucv_receive(priv->pathid, | 434 | rc = iucv_message_receive(priv->path, |
478 | priv->local_interrupt_buffer.ipmsgid, | 435 | &priv->local_interrupt_buffer, |
479 | priv->local_interrupt_buffer.iptrgcls, | 436 | 0, buffer, iucv_data_count, |
480 | buffer, | 437 | &priv->residual_length); |
481 | iucv_data_count, | ||
482 | NULL, | ||
483 | NULL, | ||
484 | &priv->residual_length); | ||
485 | spin_unlock_bh(&priv->priv_lock); | 438 | spin_unlock_bh(&priv->priv_lock); |
486 | /* An rc of 5 indicates that the record was bigger then | 439 | /* An rc of 5 indicates that the record was bigger then |
487 | * the buffer, which is OK for us. A 9 indicates that the | 440 | * the buffer, which is OK for us. A 9 indicates that the |
@@ -513,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
513 | } | 466 | } |
514 | 467 | ||
515 | 468 | ||
516 | static ssize_t | 469 | static ssize_t vmlogrdr_read(struct file *filp, char __user *data, |
517 | vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) | 470 | size_t count, loff_t * ppos) |
518 | { | 471 | { |
519 | int rc; | 472 | int rc; |
520 | struct vmlogrdr_priv_t * priv = filp->private_data; | 473 | struct vmlogrdr_priv_t * priv = filp->private_data; |
@@ -546,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) | |||
546 | return count; | 499 | return count; |
547 | } | 500 | } |
548 | 501 | ||
549 | static ssize_t | 502 | static ssize_t vmlogrdr_autopurge_store(struct device * dev, |
550 | vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 503 | struct device_attribute *attr, |
504 | const char * buf, size_t count) | ||
505 | { | ||
551 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 506 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
552 | ssize_t ret = count; | 507 | ssize_t ret = count; |
553 | 508 | ||
@@ -565,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con | |||
565 | } | 520 | } |
566 | 521 | ||
567 | 522 | ||
568 | static ssize_t | 523 | static ssize_t vmlogrdr_autopurge_show(struct device *dev, |
569 | vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) { | 524 | struct device_attribute *attr, |
525 | char *buf) | ||
526 | { | ||
570 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 527 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
571 | return sprintf(buf, "%u\n", priv->autopurge); | 528 | return sprintf(buf, "%u\n", priv->autopurge); |
572 | } | 529 | } |
@@ -576,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show, | |||
576 | vmlogrdr_autopurge_store); | 533 | vmlogrdr_autopurge_store); |
577 | 534 | ||
578 | 535 | ||
579 | static ssize_t | 536 | static ssize_t vmlogrdr_purge_store(struct device * dev, |
580 | vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 537 | struct device_attribute *attr, |
538 | const char * buf, size_t count) | ||
539 | { | ||
581 | 540 | ||
582 | char cp_command[80]; | 541 | char cp_command[80]; |
583 | char cp_response[80]; | 542 | char cp_response[80]; |
@@ -617,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c | |||
617 | static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); | 576 | static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store); |
618 | 577 | ||
619 | 578 | ||
620 | static ssize_t | 579 | static ssize_t vmlogrdr_autorecording_store(struct device *dev, |
621 | vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf, | 580 | struct device_attribute *attr, |
622 | size_t count) { | 581 | const char *buf, size_t count) |
582 | { | ||
623 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 583 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
624 | ssize_t ret = count; | 584 | ssize_t ret = count; |
625 | 585 | ||
@@ -637,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, | |||
637 | } | 597 | } |
638 | 598 | ||
639 | 599 | ||
640 | static ssize_t | 600 | static ssize_t vmlogrdr_autorecording_show(struct device *dev, |
641 | vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) { | 601 | struct device_attribute *attr, |
602 | char *buf) | ||
603 | { | ||
642 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 604 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
643 | return sprintf(buf, "%u\n", priv->autorecording); | 605 | return sprintf(buf, "%u\n", priv->autorecording); |
644 | } | 606 | } |
@@ -648,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show, | |||
648 | vmlogrdr_autorecording_store); | 610 | vmlogrdr_autorecording_store); |
649 | 611 | ||
650 | 612 | ||
651 | static ssize_t | 613 | static ssize_t vmlogrdr_recording_store(struct device * dev, |
652 | vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) { | 614 | struct device_attribute *attr, |
653 | 615 | const char * buf, size_t count) | |
616 | { | ||
654 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 617 | struct vmlogrdr_priv_t *priv = dev->driver_data; |
655 | ssize_t ret; | 618 | ssize_t ret; |
656 | 619 | ||
@@ -675,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con | |||
675 | static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); | 638 | static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store); |
676 | 639 | ||
677 | 640 | ||
678 | static ssize_t | 641 | static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver, |
679 | vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) { | 642 | char *buf) |
643 | { | ||
680 | 644 | ||
681 | char cp_command[] = "QUERY RECORDING "; | 645 | char cp_command[] = "QUERY RECORDING "; |
682 | int len; | 646 | int len; |
@@ -709,52 +673,63 @@ static struct device_driver vmlogrdr_driver = { | |||
709 | }; | 673 | }; |
710 | 674 | ||
711 | 675 | ||
712 | static int | 676 | static int vmlogrdr_register_driver(void) |
713 | vmlogrdr_register_driver(void) { | 677 | { |
714 | int ret; | 678 | int ret; |
715 | 679 | ||
680 | /* Register with iucv driver */ | ||
681 | ret = iucv_register(&vmlogrdr_iucv_handler, 1); | ||
682 | if (ret) { | ||
683 | printk (KERN_ERR "vmlogrdr: failed to register with" | ||
684 | "iucv driver\n"); | ||
685 | goto out; | ||
686 | } | ||
687 | |||
716 | ret = driver_register(&vmlogrdr_driver); | 688 | ret = driver_register(&vmlogrdr_driver); |
717 | if (ret) { | 689 | if (ret) { |
718 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); | 690 | printk(KERN_ERR "vmlogrdr: failed to register driver.\n"); |
719 | return ret; | 691 | goto out_iucv; |
720 | } | 692 | } |
721 | 693 | ||
722 | ret = driver_create_file(&vmlogrdr_driver, | 694 | ret = driver_create_file(&vmlogrdr_driver, |
723 | &driver_attr_recording_status); | 695 | &driver_attr_recording_status); |
724 | if (ret) { | 696 | if (ret) { |
725 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); | 697 | printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n"); |
726 | goto unregdriver; | 698 | goto out_driver; |
727 | } | 699 | } |
728 | 700 | ||
729 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); | 701 | vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr"); |
730 | if (IS_ERR(vmlogrdr_class)) { | 702 | if (IS_ERR(vmlogrdr_class)) { |
731 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); | 703 | printk(KERN_ERR "vmlogrdr: failed to create class.\n"); |
732 | ret=PTR_ERR(vmlogrdr_class); | 704 | ret = PTR_ERR(vmlogrdr_class); |
733 | vmlogrdr_class=NULL; | 705 | vmlogrdr_class = NULL; |
734 | goto unregattr; | 706 | goto out_attr; |
735 | } | 707 | } |
736 | return 0; | 708 | return 0; |
737 | 709 | ||
738 | unregattr: | 710 | out_attr: |
739 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); | 711 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); |
740 | unregdriver: | 712 | out_driver: |
741 | driver_unregister(&vmlogrdr_driver); | 713 | driver_unregister(&vmlogrdr_driver); |
714 | out_iucv: | ||
715 | iucv_unregister(&vmlogrdr_iucv_handler, 1); | ||
716 | out: | ||
742 | return ret; | 717 | return ret; |
743 | } | 718 | } |
744 | 719 | ||
745 | 720 | ||
746 | static void | 721 | static void vmlogrdr_unregister_driver(void) |
747 | vmlogrdr_unregister_driver(void) { | 722 | { |
748 | class_destroy(vmlogrdr_class); | 723 | class_destroy(vmlogrdr_class); |
749 | vmlogrdr_class = NULL; | 724 | vmlogrdr_class = NULL; |
750 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); | 725 | driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status); |
751 | driver_unregister(&vmlogrdr_driver); | 726 | driver_unregister(&vmlogrdr_driver); |
752 | return; | 727 | iucv_unregister(&vmlogrdr_iucv_handler, 1); |
753 | } | 728 | } |
754 | 729 | ||
755 | 730 | ||
756 | static int | 731 | static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) |
757 | vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { | 732 | { |
758 | struct device *dev; | 733 | struct device *dev; |
759 | int ret; | 734 | int ret; |
760 | 735 | ||
@@ -803,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) { | |||
803 | } | 778 | } |
804 | 779 | ||
805 | 780 | ||
806 | static int | 781 | static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv) |
807 | vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { | 782 | { |
808 | class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num)); | 783 | class_device_destroy(vmlogrdr_class, |
784 | MKDEV(vmlogrdr_major, priv->minor_num)); | ||
809 | if (priv->device != NULL) { | 785 | if (priv->device != NULL) { |
810 | sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); | 786 | sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group); |
811 | device_unregister(priv->device); | 787 | device_unregister(priv->device); |
@@ -815,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) { | |||
815 | } | 791 | } |
816 | 792 | ||
817 | 793 | ||
818 | static int | 794 | static int vmlogrdr_register_cdev(dev_t dev) |
819 | vmlogrdr_register_cdev(dev_t dev) { | 795 | { |
820 | int rc = 0; | 796 | int rc = 0; |
821 | vmlogrdr_cdev = cdev_alloc(); | 797 | vmlogrdr_cdev = cdev_alloc(); |
822 | if (!vmlogrdr_cdev) { | 798 | if (!vmlogrdr_cdev) { |
@@ -836,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) { | |||
836 | } | 812 | } |
837 | 813 | ||
838 | 814 | ||
839 | static void | 815 | static void vmlogrdr_cleanup(void) |
840 | vmlogrdr_cleanup(void) { | 816 | { |
841 | int i; | 817 | int i; |
818 | |||
842 | if (vmlogrdr_cdev) { | 819 | if (vmlogrdr_cdev) { |
843 | cdev_del(vmlogrdr_cdev); | 820 | cdev_del(vmlogrdr_cdev); |
844 | vmlogrdr_cdev=NULL; | 821 | vmlogrdr_cdev=NULL; |
@@ -855,8 +832,7 @@ vmlogrdr_cleanup(void) { | |||
855 | } | 832 | } |
856 | 833 | ||
857 | 834 | ||
858 | static int | 835 | static int vmlogrdr_init(void) |
859 | vmlogrdr_init(void) | ||
860 | { | 836 | { |
861 | int rc; | 837 | int rc; |
862 | int i; | 838 | int i; |
@@ -906,8 +882,7 @@ cleanup: | |||
906 | } | 882 | } |
907 | 883 | ||
908 | 884 | ||
909 | static void | 885 | static void vmlogrdr_exit(void) |
910 | vmlogrdr_exit(void) | ||
911 | { | 886 | { |
912 | vmlogrdr_cleanup(); | 887 | vmlogrdr_cleanup(); |
913 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); | 888 | printk (KERN_INFO "vmlogrdr: driver unloaded\n"); |
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 52625153a4f0..f98fa465df0a 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -22,13 +22,6 @@ config CTC | |||
22 | available. This option is also available as a module which will be | 22 | available. This option is also available as a module which will be |
23 | called ctc.ko. If you do not know what it is, it's safe to say "Y". | 23 | called ctc.ko. If you do not know what it is, it's safe to say "Y". |
24 | 24 | ||
25 | config IUCV | ||
26 | tristate "IUCV support (VM only)" | ||
27 | help | ||
28 | Select this option if you want to use inter-user communication | ||
29 | under VM or VIF. If unsure, say "Y" to enable a fast communication | ||
30 | link between VM guests. | ||
31 | |||
32 | config NETIUCV | 25 | config NETIUCV |
33 | tristate "IUCV network device support (VM only)" | 26 | tristate "IUCV network device support (VM only)" |
34 | depends on IUCV && NETDEVICES | 27 | depends on IUCV && NETDEVICES |
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 4777e36a922f..bbe3ab2e93d9 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | ctc-objs := ctcmain.o ctcdbug.o | 5 | ctc-objs := ctcmain.o ctcdbug.o |
6 | 6 | ||
7 | obj-$(CONFIG_IUCV) += iucv.o | ||
8 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o | 7 | obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o |
9 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | 8 | obj-$(CONFIG_SMSGIUCV) += smsgiucv.o |
10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o | 9 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o |
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c deleted file mode 100644 index 229aeb5fc399..000000000000 --- a/drivers/s390/net/iucv.c +++ /dev/null | |||
@@ -1,2540 +0,0 @@ | |||
1 | /* | ||
2 | * IUCV network driver | ||
3 | * | ||
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): | ||
6 | * Original source: | ||
7 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 | ||
8 | * Xenia Tkatschow (xenia@us.ibm.com) | ||
9 | * 2Gb awareness and general cleanup: | ||
10 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
11 | * | ||
12 | * Documentation used: | ||
13 | * The original source | ||
14 | * CP Programming Service, IBM document # SC24-5760 | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or modify | ||
17 | * it under the terms of the GNU General Public License as published by | ||
18 | * the Free Software Foundation; either version 2, or (at your option) | ||
19 | * any later version. | ||
20 | * | ||
21 | * This program is distributed in the hope that it will be useful, | ||
22 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
24 | * GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with this program; if not, write to the Free Software | ||
28 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | /* #define DEBUG */ | ||
33 | |||
34 | #include <linux/module.h> | ||
35 | #include <linux/moduleparam.h> | ||
36 | |||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/list.h> | ||
43 | #include <linux/errno.h> | ||
44 | #include <linux/err.h> | ||
45 | #include <linux/device.h> | ||
46 | #include <asm/atomic.h> | ||
47 | #include "iucv.h" | ||
48 | #include <asm/io.h> | ||
49 | #include <asm/s390_ext.h> | ||
50 | #include <asm/ebcdic.h> | ||
51 | #include <asm/smp.h> | ||
52 | #include <asm/s390_rdev.h> | ||
53 | |||
54 | /* FLAGS: | ||
55 | * All flags are defined in the field IPFLAGS1 of each function | ||
56 | * and can be found in CP Programming Services. | ||
57 | * IPSRCCLS - Indicates you have specified a source class | ||
58 | * IPFGMCL - Indicates you have specified a target class | ||
59 | * IPFGPID - Indicates you have specified a pathid | ||
60 | * IPFGMID - Indicates you have specified a message ID | ||
61 | * IPANSLST - Indicates that you are using an address list for | ||
62 | * reply data | ||
63 | * IPBUFLST - Indicates that you are using an address list for | ||
64 | * message data | ||
65 | */ | ||
66 | |||
67 | #define IPSRCCLS 0x01 | ||
68 | #define IPFGMCL 0x01 | ||
69 | #define IPFGPID 0x02 | ||
70 | #define IPFGMID 0x04 | ||
71 | #define IPANSLST 0x08 | ||
72 | #define IPBUFLST 0x40 | ||
73 | |||
74 | static int | ||
75 | iucv_bus_match (struct device *dev, struct device_driver *drv) | ||
76 | { | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | struct bus_type iucv_bus = { | ||
81 | .name = "iucv", | ||
82 | .match = iucv_bus_match, | ||
83 | }; | ||
84 | |||
85 | struct device *iucv_root; | ||
86 | |||
87 | /* General IUCV interrupt structure */ | ||
88 | typedef struct { | ||
89 | __u16 ippathid; | ||
90 | __u8 res1; | ||
91 | __u8 iptype; | ||
92 | __u32 res2; | ||
93 | __u8 ipvmid[8]; | ||
94 | __u8 res3[24]; | ||
95 | } iucv_GeneralInterrupt; | ||
96 | |||
97 | static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL; | ||
98 | |||
99 | /* Spin Lock declaration */ | ||
100 | |||
101 | static DEFINE_SPINLOCK(iucv_lock); | ||
102 | |||
103 | static int messagesDisabled = 0; | ||
104 | |||
105 | /***************INTERRUPT HANDLING ***************/ | ||
106 | |||
107 | typedef struct { | ||
108 | struct list_head queue; | ||
109 | iucv_GeneralInterrupt data; | ||
110 | } iucv_irqdata; | ||
111 | |||
112 | static struct list_head iucv_irq_queue; | ||
113 | static DEFINE_SPINLOCK(iucv_irq_queue_lock); | ||
114 | |||
115 | /* | ||
116 | *Internal function prototypes | ||
117 | */ | ||
118 | static void iucv_tasklet_handler(unsigned long); | ||
119 | static void iucv_irq_handler(__u16); | ||
120 | |||
121 | static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0); | ||
122 | |||
123 | /************ FUNCTION ID'S ****************************/ | ||
124 | |||
125 | #define ACCEPT 10 | ||
126 | #define CONNECT 11 | ||
127 | #define DECLARE_BUFFER 12 | ||
128 | #define PURGE 9 | ||
129 | #define QUERY 0 | ||
130 | #define QUIESCE 13 | ||
131 | #define RECEIVE 5 | ||
132 | #define REJECT 8 | ||
133 | #define REPLY 6 | ||
134 | #define RESUME 14 | ||
135 | #define RETRIEVE_BUFFER 2 | ||
136 | #define SEND 4 | ||
137 | #define SETMASK 16 | ||
138 | #define SEVER 15 | ||
139 | |||
140 | /** | ||
141 | * Structure: handler | ||
142 | * members: list - list management. | ||
143 | * structure: id | ||
144 | * userid - 8 char array of machine identification | ||
145 | * user_data - 16 char array for user identification | ||
146 | * mask - 24 char array used to compare the 2 previous | ||
147 | * interrupt_table - vector of interrupt functions. | ||
148 | * pgm_data - ulong, application data that is passed | ||
149 | * to the interrupt handlers | ||
150 | */ | ||
151 | typedef struct handler_t { | ||
152 | struct list_head list; | ||
153 | struct { | ||
154 | __u8 userid[8]; | ||
155 | __u8 user_data[16]; | ||
156 | __u8 mask[24]; | ||
157 | } id; | ||
158 | iucv_interrupt_ops_t *interrupt_table; | ||
159 | void *pgm_data; | ||
160 | } handler; | ||
161 | |||
162 | /** | ||
163 | * iucv_handler_table: List of registered handlers. | ||
164 | */ | ||
165 | static struct list_head iucv_handler_table; | ||
166 | |||
167 | /** | ||
168 | * iucv_pathid_table: an array of *handler pointing into | ||
169 | * iucv_handler_table for fast indexing by pathid; | ||
170 | */ | ||
171 | static handler **iucv_pathid_table; | ||
172 | |||
173 | static unsigned long max_connections; | ||
174 | |||
175 | /** | ||
176 | * iucv_cpuid: contains the logical cpu number of the cpu which | ||
177 | * has declared the iucv buffer by issuing DECLARE_BUFFER. | ||
178 | * If no cpu has done the initialization iucv_cpuid contains -1. | ||
179 | */ | ||
180 | static int iucv_cpuid = -1; | ||
181 | /** | ||
182 | * register_flag: is 0 when external interrupt has not been registered | ||
183 | */ | ||
184 | static int register_flag; | ||
185 | |||
186 | /****************FIVE 40-BYTE PARAMETER STRUCTURES******************/ | ||
187 | /* Data struct 1: iparml_control | ||
188 | * Used for iucv_accept | ||
189 | * iucv_connect | ||
190 | * iucv_quiesce | ||
191 | * iucv_resume | ||
192 | * iucv_sever | ||
193 | * iucv_retrieve_buffer | ||
194 | * Data struct 2: iparml_dpl (data in parameter list) | ||
195 | * Used for iucv_send_prmmsg | ||
196 | * iucv_send2way_prmmsg | ||
197 | * iucv_send2way_prmmsg_array | ||
198 | * iucv_reply_prmmsg | ||
199 | * Data struct 3: iparml_db (data in a buffer) | ||
200 | * Used for iucv_receive | ||
201 | * iucv_receive_array | ||
202 | * iucv_reject | ||
203 | * iucv_reply | ||
204 | * iucv_reply_array | ||
205 | * iucv_send | ||
206 | * iucv_send_array | ||
207 | * iucv_send2way | ||
208 | * iucv_send2way_array | ||
209 | * iucv_declare_buffer | ||
210 | * Data struct 4: iparml_purge | ||
211 | * Used for iucv_purge | ||
212 | * iucv_query | ||
213 | * Data struct 5: iparml_set_mask | ||
214 | * Used for iucv_set_mask | ||
215 | */ | ||
216 | |||
217 | typedef struct { | ||
218 | __u16 ippathid; | ||
219 | __u8 ipflags1; | ||
220 | __u8 iprcode; | ||
221 | __u16 ipmsglim; | ||
222 | __u16 res1; | ||
223 | __u8 ipvmid[8]; | ||
224 | __u8 ipuser[16]; | ||
225 | __u8 iptarget[8]; | ||
226 | } iparml_control; | ||
227 | |||
228 | typedef struct { | ||
229 | __u16 ippathid; | ||
230 | __u8 ipflags1; | ||
231 | __u8 iprcode; | ||
232 | __u32 ipmsgid; | ||
233 | __u32 iptrgcls; | ||
234 | __u8 iprmmsg[8]; | ||
235 | __u32 ipsrccls; | ||
236 | __u32 ipmsgtag; | ||
237 | __u32 ipbfadr2; | ||
238 | __u32 ipbfln2f; | ||
239 | __u32 res; | ||
240 | } iparml_dpl; | ||
241 | |||
242 | typedef struct { | ||
243 | __u16 ippathid; | ||
244 | __u8 ipflags1; | ||
245 | __u8 iprcode; | ||
246 | __u32 ipmsgid; | ||
247 | __u32 iptrgcls; | ||
248 | __u32 ipbfadr1; | ||
249 | __u32 ipbfln1f; | ||
250 | __u32 ipsrccls; | ||
251 | __u32 ipmsgtag; | ||
252 | __u32 ipbfadr2; | ||
253 | __u32 ipbfln2f; | ||
254 | __u32 res; | ||
255 | } iparml_db; | ||
256 | |||
257 | typedef struct { | ||
258 | __u16 ippathid; | ||
259 | __u8 ipflags1; | ||
260 | __u8 iprcode; | ||
261 | __u32 ipmsgid; | ||
262 | __u8 ipaudit[3]; | ||
263 | __u8 res1[5]; | ||
264 | __u32 res2; | ||
265 | __u32 ipsrccls; | ||
266 | __u32 ipmsgtag; | ||
267 | __u32 res3[3]; | ||
268 | } iparml_purge; | ||
269 | |||
270 | typedef struct { | ||
271 | __u8 ipmask; | ||
272 | __u8 res1[2]; | ||
273 | __u8 iprcode; | ||
274 | __u32 res2[9]; | ||
275 | } iparml_set_mask; | ||
276 | |||
277 | typedef struct { | ||
278 | union { | ||
279 | iparml_control p_ctrl; | ||
280 | iparml_dpl p_dpl; | ||
281 | iparml_db p_db; | ||
282 | iparml_purge p_purge; | ||
283 | iparml_set_mask p_set_mask; | ||
284 | } param; | ||
285 | atomic_t in_use; | ||
286 | __u32 res; | ||
287 | } __attribute__ ((aligned(8))) iucv_param; | ||
288 | #define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param)) | ||
289 | |||
290 | static iucv_param * iucv_param_pool; | ||
291 | |||
292 | MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); | ||
293 | MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); | ||
294 | MODULE_LICENSE("GPL"); | ||
295 | |||
296 | /* | ||
297 | * Debugging stuff | ||
298 | *******************************************************************************/ | ||
299 | |||
300 | |||
301 | #ifdef DEBUG | ||
302 | static int debuglevel = 0; | ||
303 | |||
304 | module_param(debuglevel, int, 0); | ||
305 | MODULE_PARM_DESC(debuglevel, | ||
306 | "Specifies the debug level (0=off ... 3=all)"); | ||
307 | |||
308 | static void | ||
309 | iucv_dumpit(char *title, void *buf, int len) | ||
310 | { | ||
311 | int i; | ||
312 | __u8 *p = (__u8 *)buf; | ||
313 | |||
314 | if (debuglevel < 3) | ||
315 | return; | ||
316 | |||
317 | printk(KERN_DEBUG "%s\n", title); | ||
318 | printk(" "); | ||
319 | for (i = 0; i < len; i++) { | ||
320 | if (!(i % 16) && i != 0) | ||
321 | printk ("\n "); | ||
322 | else if (!(i % 4) && i != 0) | ||
323 | printk(" "); | ||
324 | printk("%02X", *p++); | ||
325 | } | ||
326 | if (len % 16) | ||
327 | printk ("\n"); | ||
328 | return; | ||
329 | } | ||
330 | #define iucv_debug(lvl, fmt, args...) \ | ||
331 | do { \ | ||
332 | if (debuglevel >= lvl) \ | ||
333 | printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \ | ||
334 | } while (0) | ||
335 | |||
336 | #else | ||
337 | |||
338 | #define iucv_debug(lvl, fmt, args...) do { } while (0) | ||
339 | #define iucv_dumpit(title, buf, len) do { } while (0) | ||
340 | |||
341 | #endif | ||
342 | |||
343 | /* | ||
344 | * Internal functions | ||
345 | *******************************************************************************/ | ||
346 | |||
347 | /** | ||
348 | * print start banner | ||
349 | */ | ||
350 | static void | ||
351 | iucv_banner(void) | ||
352 | { | ||
353 | printk(KERN_INFO "IUCV lowlevel driver initialized\n"); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * iucv_init - Initialization | ||
358 | * | ||
359 | * Allocates and initializes various data structures. | ||
360 | */ | ||
361 | static int | ||
362 | iucv_init(void) | ||
363 | { | ||
364 | int ret; | ||
365 | |||
366 | if (iucv_external_int_buffer) | ||
367 | return 0; | ||
368 | |||
369 | if (!MACHINE_IS_VM) { | ||
370 | printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n"); | ||
371 | return -EPROTONOSUPPORT; | ||
372 | } | ||
373 | |||
374 | ret = bus_register(&iucv_bus); | ||
375 | if (ret) { | ||
376 | printk(KERN_ERR "IUCV: failed to register bus.\n"); | ||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | iucv_root = s390_root_dev_register("iucv"); | ||
381 | if (IS_ERR(iucv_root)) { | ||
382 | printk(KERN_ERR "IUCV: failed to register iucv root.\n"); | ||
383 | bus_unregister(&iucv_bus); | ||
384 | return PTR_ERR(iucv_root); | ||
385 | } | ||
386 | |||
387 | /* Note: GFP_DMA used used to get memory below 2G */ | ||
388 | iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt), | ||
389 | GFP_KERNEL|GFP_DMA); | ||
390 | if (!iucv_external_int_buffer) { | ||
391 | printk(KERN_WARNING | ||
392 | "%s: Could not allocate external interrupt buffer\n", | ||
393 | __FUNCTION__); | ||
394 | s390_root_dev_unregister(iucv_root); | ||
395 | bus_unregister(&iucv_bus); | ||
396 | return -ENOMEM; | ||
397 | } | ||
398 | |||
399 | /* Initialize parameter pool */ | ||
400 | iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, | ||
401 | GFP_KERNEL|GFP_DMA); | ||
402 | if (!iucv_param_pool) { | ||
403 | printk(KERN_WARNING "%s: Could not allocate param pool\n", | ||
404 | __FUNCTION__); | ||
405 | kfree(iucv_external_int_buffer); | ||
406 | iucv_external_int_buffer = NULL; | ||
407 | s390_root_dev_unregister(iucv_root); | ||
408 | bus_unregister(&iucv_bus); | ||
409 | return -ENOMEM; | ||
410 | } | ||
411 | |||
412 | /* Initialize irq queue */ | ||
413 | INIT_LIST_HEAD(&iucv_irq_queue); | ||
414 | |||
415 | /* Initialize handler table */ | ||
416 | INIT_LIST_HEAD(&iucv_handler_table); | ||
417 | |||
418 | iucv_banner(); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * iucv_exit - De-Initialization | ||
424 | * | ||
425 | * Frees everything allocated from iucv_init. | ||
426 | */ | ||
427 | static int iucv_retrieve_buffer (void); | ||
428 | |||
429 | static void | ||
430 | iucv_exit(void) | ||
431 | { | ||
432 | iucv_retrieve_buffer(); | ||
433 | kfree(iucv_external_int_buffer); | ||
434 | iucv_external_int_buffer = NULL; | ||
435 | kfree(iucv_param_pool); | ||
436 | iucv_param_pool = NULL; | ||
437 | s390_root_dev_unregister(iucv_root); | ||
438 | bus_unregister(&iucv_bus); | ||
439 | printk(KERN_INFO "IUCV lowlevel driver unloaded\n"); | ||
440 | } | ||
441 | |||
442 | /** | ||
443 | * grab_param: - Get a parameter buffer from the pre-allocated pool. | ||
444 | * | ||
445 | * This function searches for an unused element in the pre-allocated pool | ||
446 | * of parameter buffers. If one is found, it marks it "in use" and returns | ||
447 | * a pointer to it. The calling function is responsible for releasing it | ||
448 | * when it has finished its usage. | ||
449 | * | ||
450 | * Returns: A pointer to iucv_param. | ||
451 | */ | ||
452 | static __inline__ iucv_param * | ||
453 | grab_param(void) | ||
454 | { | ||
455 | iucv_param *ptr; | ||
456 | static int hint = 0; | ||
457 | |||
458 | ptr = iucv_param_pool + hint; | ||
459 | do { | ||
460 | ptr++; | ||
461 | if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) | ||
462 | ptr = iucv_param_pool; | ||
463 | } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0); | ||
464 | hint = ptr - iucv_param_pool; | ||
465 | |||
466 | memset(&ptr->param, 0, sizeof(ptr->param)); | ||
467 | return ptr; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * release_param - Release a parameter buffer. | ||
472 | * @p: A pointer to a struct iucv_param, previously obtained by calling | ||
473 | * grab_param(). | ||
474 | * | ||
475 | * This function marks the specified parameter buffer "unused". | ||
476 | */ | ||
477 | static __inline__ void | ||
478 | release_param(void *p) | ||
479 | { | ||
480 | atomic_set(&((iucv_param *)p)->in_use, 0); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * iucv_add_handler: - Add a new handler | ||
485 | * @new_handler: handle that is being entered into chain. | ||
486 | * | ||
487 | * Places new handle on iucv_handler_table, if identical handler is not | ||
488 | * found. | ||
489 | * | ||
490 | * Returns: 0 on success, !0 on failure (handler already in chain). | ||
491 | */ | ||
492 | static int | ||
493 | iucv_add_handler (handler *new) | ||
494 | { | ||
495 | ulong flags; | ||
496 | |||
497 | iucv_debug(1, "entering"); | ||
498 | iucv_dumpit("handler:", new, sizeof(handler)); | ||
499 | |||
500 | spin_lock_irqsave (&iucv_lock, flags); | ||
501 | if (!list_empty(&iucv_handler_table)) { | ||
502 | struct list_head *lh; | ||
503 | |||
504 | /** | ||
505 | * Search list for handler with identical id. If one | ||
506 | * is found, the new handler is _not_ added. | ||
507 | */ | ||
508 | list_for_each(lh, &iucv_handler_table) { | ||
509 | handler *h = list_entry(lh, handler, list); | ||
510 | if (!memcmp(&new->id, &h->id, sizeof(h->id))) { | ||
511 | iucv_debug(1, "ret 1"); | ||
512 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
513 | return 1; | ||
514 | } | ||
515 | } | ||
516 | } | ||
517 | /** | ||
518 | * If we get here, no handler was found. | ||
519 | */ | ||
520 | INIT_LIST_HEAD(&new->list); | ||
521 | list_add(&new->list, &iucv_handler_table); | ||
522 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
523 | |||
524 | iucv_debug(1, "exiting"); | ||
525 | return 0; | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * b2f0: | ||
530 | * @code: identifier of IUCV call to CP. | ||
531 | * @parm: pointer to 40 byte iparml area passed to CP | ||
532 | * | ||
533 | * Calls CP to execute IUCV commands. | ||
534 | * | ||
535 | * Returns: return code from CP's IUCV call | ||
536 | */ | ||
537 | static inline ulong b2f0(__u32 code, void *parm) | ||
538 | { | ||
539 | register unsigned long reg0 asm ("0"); | ||
540 | register unsigned long reg1 asm ("1"); | ||
541 | iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); | ||
542 | |||
543 | reg0 = code; | ||
544 | reg1 = virt_to_phys(parm); | ||
545 | asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1)); | ||
546 | |||
547 | iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); | ||
548 | |||
549 | return (unsigned long)*((__u8 *)(parm + 3)); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * Name: iucv_add_pathid | ||
554 | * Purpose: Adds a path id to the system. | ||
555 | * Input: pathid - pathid that is going to be entered into system | ||
556 | * handle - address of handler that the pathid will be associated | ||
557 | * with. | ||
558 | * pgm_data - token passed in by application. | ||
559 | * Output: 0: successful addition of pathid | ||
560 | * - EINVAL - pathid entry is being used by another application | ||
561 | * - ENOMEM - storage allocation for a new pathid table failed | ||
562 | */ | ||
563 | static int | ||
564 | __iucv_add_pathid(__u16 pathid, handler *handler) | ||
565 | { | ||
566 | |||
567 | iucv_debug(1, "entering"); | ||
568 | |||
569 | iucv_debug(1, "handler is pointing to %p", handler); | ||
570 | |||
571 | if (pathid > (max_connections - 1)) | ||
572 | return -EINVAL; | ||
573 | |||
574 | if (iucv_pathid_table[pathid]) { | ||
575 | iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]); | ||
576 | printk(KERN_WARNING | ||
577 | "%s: Pathid being used, error.\n", __FUNCTION__); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | iucv_pathid_table[pathid] = handler; | ||
581 | |||
582 | iucv_debug(1, "exiting"); | ||
583 | return 0; | ||
584 | } /* end of add_pathid function */ | ||
585 | |||
586 | static int | ||
587 | iucv_add_pathid(__u16 pathid, handler *handler) | ||
588 | { | ||
589 | ulong flags; | ||
590 | int rc; | ||
591 | |||
592 | spin_lock_irqsave (&iucv_lock, flags); | ||
593 | rc = __iucv_add_pathid(pathid, handler); | ||
594 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
595 | return rc; | ||
596 | } | ||
597 | |||
598 | static void | ||
599 | iucv_remove_pathid(__u16 pathid) | ||
600 | { | ||
601 | ulong flags; | ||
602 | |||
603 | if (pathid > (max_connections - 1)) | ||
604 | return; | ||
605 | |||
606 | spin_lock_irqsave (&iucv_lock, flags); | ||
607 | iucv_pathid_table[pathid] = NULL; | ||
608 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
609 | } | ||
610 | |||
611 | /** | ||
612 | * iucv_declare_buffer_cpuid | ||
613 | * Register at VM for subsequent IUCV operations. This is executed | ||
614 | * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer(). | ||
615 | */ | ||
616 | static void | ||
617 | iucv_declare_buffer_cpuid (void *result) | ||
618 | { | ||
619 | iparml_db *parm; | ||
620 | |||
621 | parm = (iparml_db *)grab_param(); | ||
622 | parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer); | ||
623 | if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1) | ||
624 | *((ulong *)result) = parm->iprcode; | ||
625 | release_param(parm); | ||
626 | } | ||
627 | |||
628 | /** | ||
629 | * iucv_retrieve_buffer_cpuid: | ||
630 | * Unregister IUCV usage at VM. This is always executed on the same | ||
631 | * cpu that registered the buffer to VM. | ||
632 | * Called from iucv_retrieve_buffer(). | ||
633 | */ | ||
634 | static void | ||
635 | iucv_retrieve_buffer_cpuid (void *cpu) | ||
636 | { | ||
637 | iparml_control *parm; | ||
638 | |||
639 | parm = (iparml_control *)grab_param(); | ||
640 | b2f0(RETRIEVE_BUFFER, parm); | ||
641 | release_param(parm); | ||
642 | } | ||
643 | |||
644 | /** | ||
645 | * Name: iucv_declare_buffer | ||
646 | * Purpose: Specifies the guests real address of an external | ||
647 | * interrupt. | ||
648 | * Input: void | ||
649 | * Output: iprcode - return code from b2f0 call | ||
650 | */ | ||
651 | static int | ||
652 | iucv_declare_buffer (void) | ||
653 | { | ||
654 | unsigned long flags; | ||
655 | ulong b2f0_result; | ||
656 | |||
657 | iucv_debug(1, "entering"); | ||
658 | b2f0_result = -ENODEV; | ||
659 | spin_lock_irqsave (&iucv_lock, flags); | ||
660 | if (iucv_cpuid == -1) { | ||
661 | /* Reserve any cpu for use by iucv. */ | ||
662 | iucv_cpuid = smp_get_cpu(CPU_MASK_ALL); | ||
663 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
664 | smp_call_function_on(iucv_declare_buffer_cpuid, | ||
665 | &b2f0_result, 0, 1, iucv_cpuid); | ||
666 | if (b2f0_result) { | ||
667 | smp_put_cpu(iucv_cpuid); | ||
668 | iucv_cpuid = -1; | ||
669 | } | ||
670 | iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); | ||
671 | } else { | ||
672 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
673 | b2f0_result = 0; | ||
674 | } | ||
675 | iucv_debug(1, "exiting"); | ||
676 | return b2f0_result; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * iucv_retrieve_buffer: | ||
681 | * | ||
682 | * Terminates all use of IUCV. | ||
683 | * Returns: return code from CP | ||
684 | */ | ||
685 | static int | ||
686 | iucv_retrieve_buffer (void) | ||
687 | { | ||
688 | iucv_debug(1, "entering"); | ||
689 | if (iucv_cpuid != -1) { | ||
690 | smp_call_function_on(iucv_retrieve_buffer_cpuid, | ||
691 | NULL, 0, 1, iucv_cpuid); | ||
692 | /* Release the cpu reserved by iucv_declare_buffer. */ | ||
693 | smp_put_cpu(iucv_cpuid); | ||
694 | iucv_cpuid = -1; | ||
695 | } | ||
696 | iucv_debug(1, "exiting"); | ||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * iucv_remove_handler: | ||
702 | * @users_handler: handler to be removed | ||
703 | * | ||
704 | * Remove handler when application unregisters. | ||
705 | */ | ||
706 | static void | ||
707 | iucv_remove_handler(handler *handler) | ||
708 | { | ||
709 | unsigned long flags; | ||
710 | |||
711 | if ((!iucv_pathid_table) || (!handler)) | ||
712 | return; | ||
713 | |||
714 | iucv_debug(1, "entering"); | ||
715 | |||
716 | spin_lock_irqsave (&iucv_lock, flags); | ||
717 | list_del(&handler->list); | ||
718 | if (list_empty(&iucv_handler_table)) { | ||
719 | if (register_flag) { | ||
720 | unregister_external_interrupt(0x4000, iucv_irq_handler); | ||
721 | register_flag = 0; | ||
722 | } | ||
723 | } | ||
724 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
725 | |||
726 | iucv_debug(1, "exiting"); | ||
727 | return; | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * iucv_register_program: | ||
732 | * @pgmname: user identification | ||
733 | * @userid: machine identification | ||
734 | * @pgmmask: Indicates which bits in the pgmname and userid combined will be | ||
735 | * used to determine who is given control. | ||
736 | * @ops: Address of interrupt handler table. | ||
737 | * @pgm_data: Application data to be passed to interrupt handlers. | ||
738 | * | ||
739 | * Registers an application with IUCV. | ||
740 | * Returns: | ||
741 | * The address of handler, or NULL on failure. | ||
742 | * NOTE on pgmmask: | ||
743 | * If pgmname, userid and pgmmask are provided, pgmmask is entered into the | ||
744 | * handler as is. | ||
745 | * If pgmmask is NULL, the internal mask is set to all 0xff's | ||
746 | * When userid is NULL, the first 8 bytes of the internal mask are forced | ||
747 | * to 0x00. | ||
748 | * If pgmmask and userid are NULL, the first 8 bytes of the internal mask | ||
749 | * are forced to 0x00 and the last 16 bytes to 0xff. | ||
750 | */ | ||
751 | |||
752 | iucv_handle_t | ||
753 | iucv_register_program (__u8 pgmname[16], | ||
754 | __u8 userid[8], | ||
755 | __u8 pgmmask[24], | ||
756 | iucv_interrupt_ops_t * ops, void *pgm_data) | ||
757 | { | ||
758 | ulong rc = 0; /* return code from function calls */ | ||
759 | handler *new_handler; | ||
760 | |||
761 | iucv_debug(1, "entering"); | ||
762 | |||
763 | if (ops == NULL) { | ||
764 | /* interrupt table is not defined */ | ||
765 | printk(KERN_WARNING "%s: Interrupt table is not defined, " | ||
766 | "exiting\n", __FUNCTION__); | ||
767 | return NULL; | ||
768 | } | ||
769 | if (!pgmname) { | ||
770 | printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__); | ||
771 | return NULL; | ||
772 | } | ||
773 | |||
774 | /* Allocate handler entry */ | ||
775 | new_handler = kmalloc(sizeof(handler), GFP_ATOMIC); | ||
776 | if (new_handler == NULL) { | ||
777 | printk(KERN_WARNING "%s: storage allocation for new handler " | ||
778 | "failed.\n", __FUNCTION__); | ||
779 | return NULL; | ||
780 | } | ||
781 | |||
782 | if (!iucv_pathid_table) { | ||
783 | if (iucv_init()) { | ||
784 | kfree(new_handler); | ||
785 | return NULL; | ||
786 | } | ||
787 | |||
788 | max_connections = iucv_query_maxconn(); | ||
789 | iucv_pathid_table = kcalloc(max_connections, sizeof(handler *), | ||
790 | GFP_ATOMIC); | ||
791 | if (iucv_pathid_table == NULL) { | ||
792 | printk(KERN_WARNING "%s: iucv_pathid_table storage " | ||
793 | "allocation failed\n", __FUNCTION__); | ||
794 | kfree(new_handler); | ||
795 | return NULL; | ||
796 | } | ||
797 | } | ||
798 | memset(new_handler, 0, sizeof (handler)); | ||
799 | memcpy(new_handler->id.user_data, pgmname, | ||
800 | sizeof (new_handler->id.user_data)); | ||
801 | if (userid) { | ||
802 | memcpy (new_handler->id.userid, userid, | ||
803 | sizeof (new_handler->id.userid)); | ||
804 | ASCEBC (new_handler->id.userid, | ||
805 | sizeof (new_handler->id.userid)); | ||
806 | EBC_TOUPPER (new_handler->id.userid, | ||
807 | sizeof (new_handler->id.userid)); | ||
808 | |||
809 | if (pgmmask) { | ||
810 | memcpy (new_handler->id.mask, pgmmask, | ||
811 | sizeof (new_handler->id.mask)); | ||
812 | } else { | ||
813 | memset (new_handler->id.mask, 0xFF, | ||
814 | sizeof (new_handler->id.mask)); | ||
815 | } | ||
816 | } else { | ||
817 | if (pgmmask) { | ||
818 | memcpy (new_handler->id.mask, pgmmask, | ||
819 | sizeof (new_handler->id.mask)); | ||
820 | } else { | ||
821 | memset (new_handler->id.mask, 0xFF, | ||
822 | sizeof (new_handler->id.mask)); | ||
823 | } | ||
824 | memset (new_handler->id.userid, 0x00, | ||
825 | sizeof (new_handler->id.userid)); | ||
826 | } | ||
827 | /* fill in the rest of handler */ | ||
828 | new_handler->pgm_data = pgm_data; | ||
829 | new_handler->interrupt_table = ops; | ||
830 | |||
831 | /* | ||
832 | * Check if someone else is registered with same pgmname, userid | ||
833 | * and mask. If someone is already registered with same pgmname, | ||
834 | * userid and mask, registration will fail and NULL will be returned | ||
835 | * to the application. | ||
836 | * If identical handler not found, then handler is added to list. | ||
837 | */ | ||
838 | rc = iucv_add_handler(new_handler); | ||
839 | if (rc) { | ||
840 | printk(KERN_WARNING "%s: Someone already registered with same " | ||
841 | "pgmname, userid, pgmmask\n", __FUNCTION__); | ||
842 | kfree (new_handler); | ||
843 | return NULL; | ||
844 | } | ||
845 | |||
846 | rc = iucv_declare_buffer(); | ||
847 | if (rc) { | ||
848 | char *err = "Unknown"; | ||
849 | iucv_remove_handler(new_handler); | ||
850 | kfree(new_handler); | ||
851 | switch(rc) { | ||
852 | case 0x03: | ||
853 | err = "Directory error"; | ||
854 | break; | ||
855 | case 0x0a: | ||
856 | err = "Invalid length"; | ||
857 | break; | ||
858 | case 0x13: | ||
859 | err = "Buffer already exists"; | ||
860 | break; | ||
861 | case 0x3e: | ||
862 | err = "Buffer overlap"; | ||
863 | break; | ||
864 | case 0x5c: | ||
865 | err = "Paging or storage error"; | ||
866 | break; | ||
867 | } | ||
868 | printk(KERN_WARNING "%s: iucv_declare_buffer " | ||
869 | "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err); | ||
870 | return NULL; | ||
871 | } | ||
872 | if (!register_flag) { | ||
873 | /* request the 0x4000 external interrupt */ | ||
874 | rc = register_external_interrupt (0x4000, iucv_irq_handler); | ||
875 | if (rc) { | ||
876 | iucv_remove_handler(new_handler); | ||
877 | kfree (new_handler); | ||
878 | printk(KERN_WARNING "%s: " | ||
879 | "register_external_interrupt returned %ld\n", | ||
880 | __FUNCTION__, rc); | ||
881 | return NULL; | ||
882 | |||
883 | } | ||
884 | register_flag = 1; | ||
885 | } | ||
886 | iucv_debug(1, "exiting"); | ||
887 | return new_handler; | ||
888 | } /* end of register function */ | ||
889 | |||
890 | /** | ||
891 | * iucv_unregister_program: | ||
892 | * @handle: address of handler | ||
893 | * | ||
894 | * Unregister application with IUCV. | ||
895 | * Returns: | ||
896 | * 0 on success, -EINVAL, if specified handle is invalid. | ||
897 | */ | ||
898 | |||
899 | int | ||
900 | iucv_unregister_program (iucv_handle_t handle) | ||
901 | { | ||
902 | handler *h = NULL; | ||
903 | struct list_head *lh; | ||
904 | int i; | ||
905 | ulong flags; | ||
906 | |||
907 | iucv_debug(1, "entering"); | ||
908 | iucv_debug(1, "address of handler is %p", h); | ||
909 | |||
910 | /* Checking if handle is valid */ | ||
911 | spin_lock_irqsave (&iucv_lock, flags); | ||
912 | list_for_each(lh, &iucv_handler_table) { | ||
913 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
914 | h = (handler *)handle; | ||
915 | break; | ||
916 | } | ||
917 | } | ||
918 | if (!h) { | ||
919 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
920 | if (handle) | ||
921 | printk(KERN_WARNING | ||
922 | "%s: Handler not found in iucv_handler_table.\n", | ||
923 | __FUNCTION__); | ||
924 | else | ||
925 | printk(KERN_WARNING | ||
926 | "%s: NULL handle passed by application.\n", | ||
927 | __FUNCTION__); | ||
928 | return -EINVAL; | ||
929 | } | ||
930 | |||
931 | /** | ||
932 | * First, walk thru iucv_pathid_table and sever any pathid which is | ||
933 | * still pointing to the handler to be removed. | ||
934 | */ | ||
935 | for (i = 0; i < max_connections; i++) | ||
936 | if (iucv_pathid_table[i] == h) { | ||
937 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
938 | iucv_sever(i, h->id.user_data); | ||
939 | spin_lock_irqsave(&iucv_lock, flags); | ||
940 | } | ||
941 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
942 | |||
943 | iucv_remove_handler(h); | ||
944 | kfree(h); | ||
945 | |||
946 | iucv_debug(1, "exiting"); | ||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | /** | ||
951 | * iucv_accept: | ||
952 | * @pathid: Path identification number | ||
953 | * @msglim_reqstd: The number of outstanding messages requested. | ||
954 | * @user_data: Data specified by the iucv_connect function. | ||
955 | * @flags1: Contains options for this path. | ||
956 | * - IPPRTY (0x20) Specifies if you want to send priority message. | ||
957 | * - IPRMDATA (0x80) Specifies whether your program can handle a message | ||
958 | * in the parameter list. | ||
959 | * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being | ||
960 | * established. | ||
961 | * @handle: Address of handler. | ||
962 | * @pgm_data: Application data passed to interrupt handlers. | ||
963 | * @flags1_out: Pointer to an int. If not NULL, on return the options for | ||
964 | * the path are stored at the given location: | ||
965 | * - IPPRTY (0x20) Indicates you may send a priority message. | ||
966 | * @msglim: Pointer to an __u16. If not NULL, on return the maximum | ||
967 | * number of outstanding messages is stored at the given | ||
968 | * location. | ||
969 | * | ||
970 | * This function is issued after the user receives a Connection Pending external | ||
971 | * interrupt and now wishes to complete the IUCV communication path. | ||
972 | * Returns: | ||
973 | * return code from CP | ||
974 | */ | ||
975 | int | ||
976 | iucv_accept(__u16 pathid, __u16 msglim_reqstd, | ||
977 | __u8 user_data[16], int flags1, | ||
978 | iucv_handle_t handle, void *pgm_data, | ||
979 | int *flags1_out, __u16 * msglim) | ||
980 | { | ||
981 | ulong b2f0_result = 0; | ||
982 | ulong flags; | ||
983 | struct list_head *lh; | ||
984 | handler *h = NULL; | ||
985 | iparml_control *parm; | ||
986 | |||
987 | iucv_debug(1, "entering"); | ||
988 | iucv_debug(1, "pathid = %d", pathid); | ||
989 | |||
990 | /* Checking if handle is valid */ | ||
991 | spin_lock_irqsave (&iucv_lock, flags); | ||
992 | list_for_each(lh, &iucv_handler_table) { | ||
993 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
994 | h = (handler *)handle; | ||
995 | break; | ||
996 | } | ||
997 | } | ||
998 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
999 | |||
1000 | if (!h) { | ||
1001 | if (handle) | ||
1002 | printk(KERN_WARNING | ||
1003 | "%s: Handler not found in iucv_handler_table.\n", | ||
1004 | __FUNCTION__); | ||
1005 | else | ||
1006 | printk(KERN_WARNING | ||
1007 | "%s: NULL handle passed by application.\n", | ||
1008 | __FUNCTION__); | ||
1009 | return -EINVAL; | ||
1010 | } | ||
1011 | |||
1012 | parm = (iparml_control *)grab_param(); | ||
1013 | |||
1014 | parm->ippathid = pathid; | ||
1015 | parm->ipmsglim = msglim_reqstd; | ||
1016 | if (user_data) | ||
1017 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1018 | |||
1019 | parm->ipflags1 = (__u8)flags1; | ||
1020 | b2f0_result = b2f0(ACCEPT, parm); | ||
1021 | |||
1022 | if (!b2f0_result) { | ||
1023 | if (msglim) | ||
1024 | *msglim = parm->ipmsglim; | ||
1025 | if (pgm_data) | ||
1026 | h->pgm_data = pgm_data; | ||
1027 | if (flags1_out) | ||
1028 | *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; | ||
1029 | } | ||
1030 | release_param(parm); | ||
1031 | |||
1032 | iucv_debug(1, "exiting"); | ||
1033 | return b2f0_result; | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * iucv_connect: | ||
1038 | * @pathid: Path identification number | ||
1039 | * @msglim_reqstd: Number of outstanding messages requested | ||
1040 | * @user_data: 16-byte user data | ||
1041 | * @userid: 8-byte of user identification | ||
1042 | * @system_name: 8-byte identifying the system name | ||
1043 | * @flags1: Specifies options for this path: | ||
1044 | * - IPPRTY (0x20) Specifies if you want to send priority message. | ||
1045 | * - IPRMDATA (0x80) Specifies whether your program can handle a message | ||
1046 | * in the parameter list. | ||
1047 | * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being | ||
1048 | * established. | ||
1049 | * - IPLOCAL (0x01) Allows an application to force the partner to be on the | ||
1050 | * local system. If local is specified then target class | ||
1051 | * cannot be specified. | ||
1052 | * @flags1_out: Pointer to an int. If not NULL, on return the options for | ||
1053 | * the path are stored at the given location: | ||
1054 | * - IPPRTY (0x20) Indicates you may send a priority message. | ||
1055 | * @msglim: Pointer to an __u16. If not NULL, on return the maximum | ||
1056 | * number of outstanding messages is stored at the given | ||
1057 | * location. | ||
1058 | * @handle: Address of handler. | ||
1059 | * @pgm_data: Application data to be passed to interrupt handlers. | ||
1060 | * | ||
1061 | * This function establishes an IUCV path. Although the connect may complete | ||
1062 | * successfully, you are not able to use the path until you receive an IUCV | ||
1063 | * Connection Complete external interrupt. | ||
1064 | * Returns: return code from CP, or one of the following | ||
1065 | * - ENOMEM | ||
1066 | * - return code from iucv_declare_buffer | ||
1067 | * - EINVAL - invalid handle passed by application | ||
1068 | * - EINVAL - pathid address is NULL | ||
1069 | * - ENOMEM - pathid table storage allocation failed | ||
1070 | * - return code from internal function add_pathid | ||
1071 | */ | ||
1072 | int | ||
1073 | iucv_connect (__u16 *pathid, __u16 msglim_reqstd, | ||
1074 | __u8 user_data[16], __u8 userid[8], | ||
1075 | __u8 system_name[8], int flags1, | ||
1076 | int *flags1_out, __u16 * msglim, | ||
1077 | iucv_handle_t handle, void *pgm_data) | ||
1078 | { | ||
1079 | iparml_control *parm; | ||
1080 | iparml_control local_parm; | ||
1081 | struct list_head *lh; | ||
1082 | ulong b2f0_result = 0; | ||
1083 | ulong flags; | ||
1084 | int add_pathid_result = 0; | ||
1085 | handler *h = NULL; | ||
1086 | __u8 no_memory[16] = "NO MEMORY"; | ||
1087 | |||
1088 | iucv_debug(1, "entering"); | ||
1089 | |||
1090 | /* Checking if handle is valid */ | ||
1091 | spin_lock_irqsave (&iucv_lock, flags); | ||
1092 | list_for_each(lh, &iucv_handler_table) { | ||
1093 | if ((handler *)handle == list_entry(lh, handler, list)) { | ||
1094 | h = (handler *)handle; | ||
1095 | break; | ||
1096 | } | ||
1097 | } | ||
1098 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
1099 | |||
1100 | if (!h) { | ||
1101 | if (handle) | ||
1102 | printk(KERN_WARNING | ||
1103 | "%s: Handler not found in iucv_handler_table.\n", | ||
1104 | __FUNCTION__); | ||
1105 | else | ||
1106 | printk(KERN_WARNING | ||
1107 | "%s: NULL handle passed by application.\n", | ||
1108 | __FUNCTION__); | ||
1109 | return -EINVAL; | ||
1110 | } | ||
1111 | |||
1112 | if (pathid == NULL) { | ||
1113 | printk(KERN_WARNING "%s: NULL pathid pointer\n", | ||
1114 | __FUNCTION__); | ||
1115 | return -EINVAL; | ||
1116 | } | ||
1117 | |||
1118 | parm = (iparml_control *)grab_param(); | ||
1119 | |||
1120 | parm->ipmsglim = msglim_reqstd; | ||
1121 | |||
1122 | if (user_data) | ||
1123 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1124 | |||
1125 | if (userid) { | ||
1126 | memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid)); | ||
1127 | ASCEBC(parm->ipvmid, sizeof(parm->ipvmid)); | ||
1128 | EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid)); | ||
1129 | } | ||
1130 | |||
1131 | if (system_name) { | ||
1132 | memcpy(parm->iptarget, system_name, sizeof(parm->iptarget)); | ||
1133 | ASCEBC(parm->iptarget, sizeof(parm->iptarget)); | ||
1134 | EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget)); | ||
1135 | } | ||
1136 | |||
1137 | /* In order to establish an IUCV connection, the procedure is: | ||
1138 | * | ||
1139 | * b2f0(CONNECT) | ||
1140 | * take the ippathid from the b2f0 call | ||
1141 | * register the handler to the ippathid | ||
1142 | * | ||
1143 | * Unfortunately, the ConnectionEstablished message gets sent after the | ||
1144 | * b2f0(CONNECT) call but before the register is handled. | ||
1145 | * | ||
1146 | * In order for this race condition to be eliminated, the IUCV Control | ||
1147 | * Interrupts must be disabled for the above procedure. | ||
1148 | * | ||
1149 | * David Kennedy <dkennedy@linuxcare.com> | ||
1150 | */ | ||
1151 | |||
1152 | /* Enable everything but IUCV Control messages */ | ||
1153 | iucv_setmask(~(AllInterrupts)); | ||
1154 | messagesDisabled = 1; | ||
1155 | |||
1156 | spin_lock_irqsave (&iucv_lock, flags); | ||
1157 | parm->ipflags1 = (__u8)flags1; | ||
1158 | b2f0_result = b2f0(CONNECT, parm); | ||
1159 | memcpy(&local_parm, parm, sizeof(local_parm)); | ||
1160 | release_param(parm); | ||
1161 | parm = &local_parm; | ||
1162 | if (!b2f0_result) | ||
1163 | add_pathid_result = __iucv_add_pathid(parm->ippathid, h); | ||
1164 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
1165 | |||
1166 | if (b2f0_result) { | ||
1167 | iucv_setmask(~0); | ||
1168 | messagesDisabled = 0; | ||
1169 | return b2f0_result; | ||
1170 | } | ||
1171 | |||
1172 | *pathid = parm->ippathid; | ||
1173 | |||
1174 | /* Enable everything again */ | ||
1175 | iucv_setmask(IUCVControlInterruptsFlag); | ||
1176 | |||
1177 | if (msglim) | ||
1178 | *msglim = parm->ipmsglim; | ||
1179 | if (flags1_out) | ||
1180 | *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; | ||
1181 | |||
1182 | if (add_pathid_result) { | ||
1183 | iucv_sever(*pathid, no_memory); | ||
1184 | printk(KERN_WARNING "%s: add_pathid failed with rc =" | ||
1185 | " %d\n", __FUNCTION__, add_pathid_result); | ||
1186 | return(add_pathid_result); | ||
1187 | } | ||
1188 | |||
1189 | iucv_debug(1, "exiting"); | ||
1190 | return b2f0_result; | ||
1191 | } | ||
1192 | |||
1193 | /** | ||
1194 | * iucv_purge: | ||
1195 | * @pathid: Path identification number | ||
1196 | * @msgid: Message ID of message to purge. | ||
1197 | * @srccls: Message class of the message to purge. | ||
1198 | * @audit: Pointer to an __u32. If not NULL, on return, information about | ||
1199 | * asynchronous errors that may have affected the normal completion | ||
1200 | * of this message ist stored at the given location. | ||
1201 | * | ||
1202 | * Cancels a message you have sent. | ||
1203 | * Returns: return code from CP | ||
1204 | */ | ||
1205 | int | ||
1206 | iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit) | ||
1207 | { | ||
1208 | iparml_purge *parm; | ||
1209 | ulong b2f0_result = 0; | ||
1210 | |||
1211 | iucv_debug(1, "entering"); | ||
1212 | iucv_debug(1, "pathid = %d", pathid); | ||
1213 | |||
1214 | parm = (iparml_purge *)grab_param(); | ||
1215 | |||
1216 | parm->ipmsgid = msgid; | ||
1217 | parm->ippathid = pathid; | ||
1218 | parm->ipsrccls = srccls; | ||
1219 | parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID); | ||
1220 | b2f0_result = b2f0(PURGE, parm); | ||
1221 | |||
1222 | if (!b2f0_result && audit) { | ||
1223 | memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit)); | ||
1224 | /* parm->ipaudit has only 3 bytes */ | ||
1225 | *audit >>= 8; | ||
1226 | } | ||
1227 | |||
1228 | release_param(parm); | ||
1229 | |||
1230 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1231 | iucv_debug(1, "exiting"); | ||
1232 | return b2f0_result; | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * iucv_query_generic: | ||
1237 | * @want_maxconn: Flag, describing which value is to be returned. | ||
1238 | * | ||
1239 | * Helper function for iucv_query_maxconn() and iucv_query_bufsize(). | ||
1240 | * | ||
1241 | * Returns: The buffersize, if want_maxconn is 0; the maximum number of | ||
1242 | * connections, if want_maxconn is 1 or an error-code < 0 on failure. | ||
1243 | */ | ||
1244 | static int | ||
1245 | iucv_query_generic(int want_maxconn) | ||
1246 | { | ||
1247 | register unsigned long reg0 asm ("0"); | ||
1248 | register unsigned long reg1 asm ("1"); | ||
1249 | iparml_purge *parm = (iparml_purge *)grab_param(); | ||
1250 | int bufsize, maxconn; | ||
1251 | int ccode; | ||
1252 | |||
1253 | /** | ||
1254 | * Call b2f0 and store R0 (max buffer size), | ||
1255 | * R1 (max connections) and CC. | ||
1256 | */ | ||
1257 | reg0 = QUERY; | ||
1258 | reg1 = virt_to_phys(parm); | ||
1259 | asm volatile( | ||
1260 | " .long 0xb2f01000\n" | ||
1261 | " ipm %0\n" | ||
1262 | " srl %0,28\n" | ||
1263 | : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); | ||
1264 | bufsize = reg0; | ||
1265 | maxconn = reg1; | ||
1266 | release_param(parm); | ||
1267 | |||
1268 | if (ccode) | ||
1269 | return -EPERM; | ||
1270 | if (want_maxconn) | ||
1271 | return maxconn; | ||
1272 | return bufsize; | ||
1273 | } | ||
1274 | |||
1275 | /** | ||
1276 | * iucv_query_maxconn: | ||
1277 | * | ||
1278 | * Determines the maximum number of connections thay may be established. | ||
1279 | * | ||
1280 | * Returns: Maximum number of connections that can be. | ||
1281 | */ | ||
1282 | ulong | ||
1283 | iucv_query_maxconn(void) | ||
1284 | { | ||
1285 | return iucv_query_generic(1); | ||
1286 | } | ||
1287 | |||
1288 | /** | ||
1289 | * iucv_query_bufsize: | ||
1290 | * | ||
1291 | * Determines the size of the external interrupt buffer. | ||
1292 | * | ||
1293 | * Returns: Size of external interrupt buffer. | ||
1294 | */ | ||
1295 | ulong | ||
1296 | iucv_query_bufsize (void) | ||
1297 | { | ||
1298 | return iucv_query_generic(0); | ||
1299 | } | ||
1300 | |||
1301 | /** | ||
1302 | * iucv_quiesce: | ||
1303 | * @pathid: Path identification number | ||
1304 | * @user_data: 16-byte user data | ||
1305 | * | ||
1306 | * Temporarily suspends incoming messages on an IUCV path. | ||
1307 | * You can later reactivate the path by invoking the iucv_resume function. | ||
1308 | * Returns: return code from CP | ||
1309 | */ | ||
1310 | int | ||
1311 | iucv_quiesce (__u16 pathid, __u8 user_data[16]) | ||
1312 | { | ||
1313 | iparml_control *parm; | ||
1314 | ulong b2f0_result = 0; | ||
1315 | |||
1316 | iucv_debug(1, "entering"); | ||
1317 | iucv_debug(1, "pathid = %d", pathid); | ||
1318 | |||
1319 | parm = (iparml_control *)grab_param(); | ||
1320 | |||
1321 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
1322 | parm->ippathid = pathid; | ||
1323 | |||
1324 | b2f0_result = b2f0(QUIESCE, parm); | ||
1325 | release_param(parm); | ||
1326 | |||
1327 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1328 | iucv_debug(1, "exiting"); | ||
1329 | |||
1330 | return b2f0_result; | ||
1331 | } | ||
1332 | |||
1333 | /** | ||
1334 | * iucv_receive: | ||
1335 | * @pathid: Path identification number. | ||
1336 | * @buffer: Address of buffer to receive. Must be below 2G. | ||
1337 | * @buflen: Length of buffer to receive. | ||
1338 | * @msgid: Specifies the message ID. | ||
1339 | * @trgcls: Specifies target class. | ||
1340 | * @flags1_out: Receives options for path on return. | ||
1341 | * - IPNORPY (0x10) Specifies whether a reply is required | ||
1342 | * - IPPRTY (0x20) Specifies if you want to send priority message | ||
1343 | * - IPRMDATA (0x80) Specifies the data is contained in the parameter list | ||
1344 | * @residual_buffer: Receives the address of buffer updated by the number | ||
1345 | * of bytes you have received on return. | ||
1346 | * @residual_length: On return, receives one of the following values: | ||
1347 | * - 0 If the receive buffer is the same length as | ||
1348 | * the message. | ||
1349 | * - Remaining bytes in buffer If the receive buffer is longer than the | ||
1350 | * message. | ||
1351 | * - Remaining bytes in message If the receive buffer is shorter than the | ||
1352 | * message. | ||
1353 | * | ||
1354 | * This function receives messages that are being sent to you over established | ||
1355 | * paths. | ||
1356 | * Returns: return code from CP IUCV call; If the receive buffer is shorter | ||
1357 | * than the message, always 5 | ||
1358 | * -EINVAL - buffer address is pointing to NULL | ||
1359 | */ | ||
1360 | int | ||
1361 | iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls, | ||
1362 | void *buffer, ulong buflen, | ||
1363 | int *flags1_out, ulong * residual_buffer, ulong * residual_length) | ||
1364 | { | ||
1365 | iparml_db *parm; | ||
1366 | ulong b2f0_result; | ||
1367 | int moved = 0; /* number of bytes moved from parmlist to buffer */ | ||
1368 | |||
1369 | iucv_debug(2, "entering"); | ||
1370 | |||
1371 | if (!buffer) | ||
1372 | return -EINVAL; | ||
1373 | |||
1374 | parm = (iparml_db *)grab_param(); | ||
1375 | |||
1376 | parm->ipbfadr1 = (__u32) (addr_t) buffer; | ||
1377 | parm->ipbfln1f = (__u32) ((ulong) buflen); | ||
1378 | parm->ipmsgid = msgid; | ||
1379 | parm->ippathid = pathid; | ||
1380 | parm->iptrgcls = trgcls; | ||
1381 | parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL); | ||
1382 | |||
1383 | b2f0_result = b2f0(RECEIVE, parm); | ||
1384 | |||
1385 | if (!b2f0_result || b2f0_result == 5) { | ||
1386 | if (flags1_out) { | ||
1387 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1388 | *flags1_out = (parm->ipflags1 & (~0x07)); | ||
1389 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1390 | } | ||
1391 | |||
1392 | if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ | ||
1393 | if (residual_length) | ||
1394 | *residual_length = parm->ipbfln1f; | ||
1395 | |||
1396 | if (residual_buffer) | ||
1397 | *residual_buffer = parm->ipbfadr1; | ||
1398 | } else { | ||
1399 | moved = min_t (unsigned long, buflen, 8); | ||
1400 | |||
1401 | memcpy ((char *) buffer, | ||
1402 | (char *) &parm->ipbfadr1, moved); | ||
1403 | |||
1404 | if (buflen < 8) | ||
1405 | b2f0_result = 5; | ||
1406 | |||
1407 | if (residual_length) | ||
1408 | *residual_length = abs (buflen - 8); | ||
1409 | |||
1410 | if (residual_buffer) | ||
1411 | *residual_buffer = (ulong) (buffer + moved); | ||
1412 | } | ||
1413 | } | ||
1414 | release_param(parm); | ||
1415 | |||
1416 | iucv_debug(2, "exiting"); | ||
1417 | return b2f0_result; | ||
1418 | } | ||
1419 | |||
1420 | /* | ||
1421 | * Name: iucv_receive_array | ||
1422 | * Purpose: This function receives messages that are being sent to you | ||
1423 | * over established paths. | ||
1424 | * Input: pathid - path identification number | ||
1425 | * buffer - address of array of buffers | ||
1426 | * buflen - total length of buffers | ||
1427 | * msgid - specifies the message ID. | ||
1428 | * trgcls - specifies target class | ||
1429 | * Output: | ||
1430 | * flags1_out: Options for path. | ||
1431 | * IPNORPY - 0x10 specifies whether a reply is required | ||
1432 | * IPPRTY - 0x20 specifies if you want to send priority message | ||
1433 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
1434 | * residual_buffer - address points to the current list entry IUCV | ||
1435 | * is working on. | ||
1436 | * residual_length - | ||
1437 | * Contains one of the following values, if the receive buffer is: | ||
1438 | * The same length as the message, this field is zero. | ||
1439 | * Longer than the message, this field contains the number of | ||
1440 | * bytes remaining in the buffer. | ||
1441 | * Shorter than the message, this field contains the residual | ||
1442 | * count (that is, the number of bytes remaining in the | ||
1443 | * message that does not fit into the buffer. In this case | ||
1444 | * b2f0_result = 5. | ||
1445 | * Return: b2f0_result - return code from CP | ||
1446 | * (-EINVAL) - buffer address is NULL | ||
1447 | */ | ||
1448 | int | ||
1449 | iucv_receive_array (__u16 pathid, | ||
1450 | __u32 msgid, __u32 trgcls, | ||
1451 | iucv_array_t * buffer, ulong buflen, | ||
1452 | int *flags1_out, | ||
1453 | ulong * residual_buffer, ulong * residual_length) | ||
1454 | { | ||
1455 | iparml_db *parm; | ||
1456 | ulong b2f0_result; | ||
1457 | int i = 0, moved = 0, need_to_move = 8, dyn_len; | ||
1458 | |||
1459 | iucv_debug(2, "entering"); | ||
1460 | |||
1461 | if (!buffer) | ||
1462 | return -EINVAL; | ||
1463 | |||
1464 | parm = (iparml_db *)grab_param(); | ||
1465 | |||
1466 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1467 | parm->ipbfln1f = (__u32) buflen; | ||
1468 | parm->ipmsgid = msgid; | ||
1469 | parm->ippathid = pathid; | ||
1470 | parm->iptrgcls = trgcls; | ||
1471 | parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL); | ||
1472 | |||
1473 | b2f0_result = b2f0(RECEIVE, parm); | ||
1474 | |||
1475 | if (!b2f0_result || b2f0_result == 5) { | ||
1476 | |||
1477 | if (flags1_out) { | ||
1478 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1479 | *flags1_out = (parm->ipflags1 & (~0x07)); | ||
1480 | iucv_debug(2, "*flags1_out = %d", *flags1_out); | ||
1481 | } | ||
1482 | |||
1483 | if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ | ||
1484 | |||
1485 | if (residual_length) | ||
1486 | *residual_length = parm->ipbfln1f; | ||
1487 | |||
1488 | if (residual_buffer) | ||
1489 | *residual_buffer = parm->ipbfadr1; | ||
1490 | |||
1491 | } else { | ||
1492 | /* copy msg from parmlist to users array. */ | ||
1493 | |||
1494 | while ((moved < 8) && (moved < buflen)) { | ||
1495 | dyn_len = | ||
1496 | min_t (unsigned int, | ||
1497 | (buffer + i)->length, need_to_move); | ||
1498 | |||
1499 | memcpy ((char *)((ulong)((buffer + i)->address)), | ||
1500 | ((char *) &parm->ipbfadr1) + moved, | ||
1501 | dyn_len); | ||
1502 | |||
1503 | moved += dyn_len; | ||
1504 | need_to_move -= dyn_len; | ||
1505 | |||
1506 | (buffer + i)->address = | ||
1507 | (__u32) | ||
1508 | ((ulong)(__u8 *) ((ulong)(buffer + i)->address) | ||
1509 | + dyn_len); | ||
1510 | |||
1511 | (buffer + i)->length -= dyn_len; | ||
1512 | i++; | ||
1513 | } | ||
1514 | |||
1515 | if (need_to_move) /* buflen < 8 bytes */ | ||
1516 | b2f0_result = 5; | ||
1517 | |||
1518 | if (residual_length) | ||
1519 | *residual_length = abs (buflen - 8); | ||
1520 | |||
1521 | if (residual_buffer) { | ||
1522 | if (!moved) | ||
1523 | *residual_buffer = (ulong) buffer; | ||
1524 | else | ||
1525 | *residual_buffer = | ||
1526 | (ulong) (buffer + (i - 1)); | ||
1527 | } | ||
1528 | |||
1529 | } | ||
1530 | } | ||
1531 | release_param(parm); | ||
1532 | |||
1533 | iucv_debug(2, "exiting"); | ||
1534 | return b2f0_result; | ||
1535 | } | ||
1536 | |||
1537 | /** | ||
1538 | * iucv_reject: | ||
1539 | * @pathid: Path identification number. | ||
1540 | * @msgid: Message ID of the message to reject. | ||
1541 | * @trgcls: Target class of the message to reject. | ||
1542 | * Returns: return code from CP | ||
1543 | * | ||
1544 | * Refuses a specified message. Between the time you are notified of a | ||
1545 | * message and the time that you complete the message, the message may | ||
1546 | * be rejected. | ||
1547 | */ | ||
1548 | int | ||
1549 | iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls) | ||
1550 | { | ||
1551 | iparml_db *parm; | ||
1552 | ulong b2f0_result = 0; | ||
1553 | |||
1554 | iucv_debug(1, "entering"); | ||
1555 | iucv_debug(1, "pathid = %d", pathid); | ||
1556 | |||
1557 | parm = (iparml_db *)grab_param(); | ||
1558 | |||
1559 | parm->ippathid = pathid; | ||
1560 | parm->ipmsgid = msgid; | ||
1561 | parm->iptrgcls = trgcls; | ||
1562 | parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID); | ||
1563 | |||
1564 | b2f0_result = b2f0(REJECT, parm); | ||
1565 | release_param(parm); | ||
1566 | |||
1567 | iucv_debug(1, "b2f0_result = %ld", b2f0_result); | ||
1568 | iucv_debug(1, "exiting"); | ||
1569 | |||
1570 | return b2f0_result; | ||
1571 | } | ||
1572 | |||
1573 | /* | ||
1574 | * Name: iucv_reply | ||
1575 | * Purpose: This function responds to the two-way messages that you | ||
1576 | * receive. You must identify completely the message to | ||
1577 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1578 | * Input: pathid - path identification number | ||
1579 | * msgid - specifies the message ID. | ||
1580 | * trgcls - specifies target class | ||
1581 | * flags1 - option for path | ||
1582 | * IPPRTY- 0x20 - specifies if you want to send priority message | ||
1583 | * buffer - address of reply buffer | ||
1584 | * buflen - length of reply buffer | ||
1585 | * Output: ipbfadr2 - Address of buffer updated by the number | ||
1586 | * of bytes you have moved. | ||
1587 | * ipbfln2f - Contains one of the following values: | ||
1588 | * If the answer buffer is the same length as the reply, this field | ||
1589 | * contains zero. | ||
1590 | * If the answer buffer is longer than the reply, this field contains | ||
1591 | * the number of bytes remaining in the buffer. | ||
1592 | * If the answer buffer is shorter than the reply, this field contains | ||
1593 | * a residual count (that is, the number of bytes remianing in the | ||
1594 | * reply that does not fit into the buffer. In this | ||
1595 | * case b2f0_result = 5. | ||
1596 | * Return: b2f0_result - return code from CP | ||
1597 | * (-EINVAL) - buffer address is NULL | ||
1598 | */ | ||
1599 | int | ||
1600 | iucv_reply (__u16 pathid, | ||
1601 | __u32 msgid, __u32 trgcls, | ||
1602 | int flags1, | ||
1603 | void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) | ||
1604 | { | ||
1605 | iparml_db *parm; | ||
1606 | ulong b2f0_result; | ||
1607 | |||
1608 | iucv_debug(2, "entering"); | ||
1609 | |||
1610 | if (!buffer) | ||
1611 | return -EINVAL; | ||
1612 | |||
1613 | parm = (iparml_db *)grab_param(); | ||
1614 | |||
1615 | parm->ipbfadr2 = (__u32) ((ulong) buffer); | ||
1616 | parm->ipbfln2f = (__u32) buflen; /* length of message */ | ||
1617 | parm->ippathid = pathid; | ||
1618 | parm->ipmsgid = msgid; | ||
1619 | parm->iptrgcls = trgcls; | ||
1620 | parm->ipflags1 = (__u8) flags1; /* priority message */ | ||
1621 | |||
1622 | b2f0_result = b2f0(REPLY, parm); | ||
1623 | |||
1624 | if ((!b2f0_result) || (b2f0_result == 5)) { | ||
1625 | if (ipbfadr2) | ||
1626 | *ipbfadr2 = parm->ipbfadr2; | ||
1627 | if (ipbfln2f) | ||
1628 | *ipbfln2f = parm->ipbfln2f; | ||
1629 | } | ||
1630 | release_param(parm); | ||
1631 | |||
1632 | iucv_debug(2, "exiting"); | ||
1633 | |||
1634 | return b2f0_result; | ||
1635 | } | ||
1636 | |||
1637 | /* | ||
1638 | * Name: iucv_reply_array | ||
1639 | * Purpose: This function responds to the two-way messages that you | ||
1640 | * receive. You must identify completely the message to | ||
1641 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1642 | * The array identifies a list of addresses and lengths of | ||
1643 | * discontiguous buffers that contains the reply data. | ||
1644 | * Input: pathid - path identification number | ||
1645 | * msgid - specifies the message ID. | ||
1646 | * trgcls - specifies target class | ||
1647 | * flags1 - option for path | ||
1648 | * IPPRTY- specifies if you want to send priority message | ||
1649 | * buffer - address of array of reply buffers | ||
1650 | * buflen - total length of reply buffers | ||
1651 | * Output: ipbfadr2 - Address of buffer which IUCV is currently working on. | ||
1652 | * ipbfln2f - Contains one of the following values: | ||
1653 | * If the answer buffer is the same length as the reply, this field | ||
1654 | * contains zero. | ||
1655 | * If the answer buffer is longer than the reply, this field contains | ||
1656 | * the number of bytes remaining in the buffer. | ||
1657 | * If the answer buffer is shorter than the reply, this field contains | ||
1658 | * a residual count (that is, the number of bytes remianing in the | ||
1659 | * reply that does not fit into the buffer. In this | ||
1660 | * case b2f0_result = 5. | ||
1661 | * Return: b2f0_result - return code from CP | ||
1662 | * (-EINVAL) - buffer address is NULL | ||
1663 | */ | ||
1664 | int | ||
1665 | iucv_reply_array (__u16 pathid, | ||
1666 | __u32 msgid, __u32 trgcls, | ||
1667 | int flags1, | ||
1668 | iucv_array_t * buffer, | ||
1669 | ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) | ||
1670 | { | ||
1671 | iparml_db *parm; | ||
1672 | ulong b2f0_result; | ||
1673 | |||
1674 | iucv_debug(2, "entering"); | ||
1675 | |||
1676 | if (!buffer) | ||
1677 | return -EINVAL; | ||
1678 | |||
1679 | parm = (iparml_db *)grab_param(); | ||
1680 | |||
1681 | parm->ipbfadr2 = (__u32) ((ulong) buffer); | ||
1682 | parm->ipbfln2f = buflen; /* length of message */ | ||
1683 | parm->ippathid = pathid; | ||
1684 | parm->ipmsgid = msgid; | ||
1685 | parm->iptrgcls = trgcls; | ||
1686 | parm->ipflags1 = (IPANSLST | flags1); | ||
1687 | |||
1688 | b2f0_result = b2f0(REPLY, parm); | ||
1689 | |||
1690 | if ((!b2f0_result) || (b2f0_result == 5)) { | ||
1691 | |||
1692 | if (ipbfadr2) | ||
1693 | *ipbfadr2 = parm->ipbfadr2; | ||
1694 | if (ipbfln2f) | ||
1695 | *ipbfln2f = parm->ipbfln2f; | ||
1696 | } | ||
1697 | release_param(parm); | ||
1698 | |||
1699 | iucv_debug(2, "exiting"); | ||
1700 | |||
1701 | return b2f0_result; | ||
1702 | } | ||
1703 | |||
1704 | /* | ||
1705 | * Name: iucv_reply_prmmsg | ||
1706 | * Purpose: This function responds to the two-way messages that you | ||
1707 | * receive. You must identify completely the message to | ||
1708 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
1709 | * Prmmsg signifies the data is moved into the | ||
1710 | * parameter list. | ||
1711 | * Input: pathid - path identification number | ||
1712 | * msgid - specifies the message ID. | ||
1713 | * trgcls - specifies target class | ||
1714 | * flags1 - option for path | ||
1715 | * IPPRTY- specifies if you want to send priority message | ||
1716 | * prmmsg - 8-bytes of data to be placed into the parameter | ||
1717 | * list. | ||
1718 | * Output: NA | ||
1719 | * Return: b2f0_result - return code from CP | ||
1720 | */ | ||
1721 | int | ||
1722 | iucv_reply_prmmsg (__u16 pathid, | ||
1723 | __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8]) | ||
1724 | { | ||
1725 | iparml_dpl *parm; | ||
1726 | ulong b2f0_result; | ||
1727 | |||
1728 | iucv_debug(2, "entering"); | ||
1729 | |||
1730 | parm = (iparml_dpl *)grab_param(); | ||
1731 | |||
1732 | parm->ippathid = pathid; | ||
1733 | parm->ipmsgid = msgid; | ||
1734 | parm->iptrgcls = trgcls; | ||
1735 | memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg)); | ||
1736 | parm->ipflags1 = (IPRMDATA | flags1); | ||
1737 | |||
1738 | b2f0_result = b2f0(REPLY, parm); | ||
1739 | release_param(parm); | ||
1740 | |||
1741 | iucv_debug(2, "exiting"); | ||
1742 | |||
1743 | return b2f0_result; | ||
1744 | } | ||
1745 | |||
1746 | /** | ||
1747 | * iucv_resume: | ||
1748 | * @pathid: Path identification number | ||
1749 | * @user_data: 16-byte of user data | ||
1750 | * | ||
1751 | * This function restores communication over a quiesced path. | ||
1752 | * Returns: return code from CP | ||
1753 | */ | ||
1754 | int | ||
1755 | iucv_resume (__u16 pathid, __u8 user_data[16]) | ||
1756 | { | ||
1757 | iparml_control *parm; | ||
1758 | ulong b2f0_result = 0; | ||
1759 | |||
1760 | iucv_debug(1, "entering"); | ||
1761 | iucv_debug(1, "pathid = %d", pathid); | ||
1762 | |||
1763 | parm = (iparml_control *)grab_param(); | ||
1764 | |||
1765 | memcpy (parm->ipuser, user_data, sizeof (*user_data)); | ||
1766 | parm->ippathid = pathid; | ||
1767 | |||
1768 | b2f0_result = b2f0(RESUME, parm); | ||
1769 | release_param(parm); | ||
1770 | |||
1771 | iucv_debug(1, "exiting"); | ||
1772 | |||
1773 | return b2f0_result; | ||
1774 | } | ||
1775 | |||
1776 | /* | ||
1777 | * Name: iucv_send | ||
1778 | * Purpose: sends messages | ||
1779 | * Input: pathid - ushort, pathid | ||
1780 | * msgid - ulong *, id of message returned to caller | ||
1781 | * trgcls - ulong, target message class | ||
1782 | * srccls - ulong, source message class | ||
1783 | * msgtag - ulong, message tag | ||
1784 | * flags1 - Contains options for this path. | ||
1785 | * IPPRTY - Ox20 - specifies if you want to send a priority message. | ||
1786 | * buffer - pointer to buffer | ||
1787 | * buflen - ulong, length of buffer | ||
1788 | * Output: b2f0_result - return code from b2f0 call | ||
1789 | * msgid - returns message id | ||
1790 | */ | ||
1791 | int | ||
1792 | iucv_send (__u16 pathid, __u32 * msgid, | ||
1793 | __u32 trgcls, __u32 srccls, | ||
1794 | __u32 msgtag, int flags1, void *buffer, ulong buflen) | ||
1795 | { | ||
1796 | iparml_db *parm; | ||
1797 | ulong b2f0_result; | ||
1798 | |||
1799 | iucv_debug(2, "entering"); | ||
1800 | |||
1801 | if (!buffer) | ||
1802 | return -EINVAL; | ||
1803 | |||
1804 | parm = (iparml_db *)grab_param(); | ||
1805 | |||
1806 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1807 | parm->ippathid = pathid; | ||
1808 | parm->iptrgcls = trgcls; | ||
1809 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1810 | parm->ipsrccls = srccls; | ||
1811 | parm->ipmsgtag = msgtag; | ||
1812 | parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */ | ||
1813 | |||
1814 | b2f0_result = b2f0(SEND, parm); | ||
1815 | |||
1816 | if ((!b2f0_result) && (msgid)) | ||
1817 | *msgid = parm->ipmsgid; | ||
1818 | release_param(parm); | ||
1819 | |||
1820 | iucv_debug(2, "exiting"); | ||
1821 | |||
1822 | return b2f0_result; | ||
1823 | } | ||
1824 | |||
1825 | /* | ||
1826 | * Name: iucv_send_array | ||
1827 | * Purpose: This function transmits data to another application. | ||
1828 | * The contents of buffer is the address of the array of | ||
1829 | * addresses and lengths of discontiguous buffers that hold | ||
1830 | * the message text. This is a one-way message and the | ||
1831 | * receiver will not reply to the message. | ||
1832 | * Input: pathid - path identification number | ||
1833 | * trgcls - specifies target class | ||
1834 | * srccls - specifies the source message class | ||
1835 | * msgtag - specifies a tag to be associated witht the message | ||
1836 | * flags1 - option for path | ||
1837 | * IPPRTY- specifies if you want to send priority message | ||
1838 | * buffer - address of array of send buffers | ||
1839 | * buflen - total length of send buffers | ||
1840 | * Output: msgid - specifies the message ID. | ||
1841 | * Return: b2f0_result - return code from CP | ||
1842 | * (-EINVAL) - buffer address is NULL | ||
1843 | */ | ||
1844 | int | ||
1845 | iucv_send_array (__u16 pathid, | ||
1846 | __u32 * msgid, | ||
1847 | __u32 trgcls, | ||
1848 | __u32 srccls, | ||
1849 | __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen) | ||
1850 | { | ||
1851 | iparml_db *parm; | ||
1852 | ulong b2f0_result; | ||
1853 | |||
1854 | iucv_debug(2, "entering"); | ||
1855 | |||
1856 | if (!buffer) | ||
1857 | return -EINVAL; | ||
1858 | |||
1859 | parm = (iparml_db *)grab_param(); | ||
1860 | |||
1861 | parm->ippathid = pathid; | ||
1862 | parm->iptrgcls = trgcls; | ||
1863 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1864 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1865 | parm->ipsrccls = srccls; | ||
1866 | parm->ipmsgtag = msgtag; | ||
1867 | parm->ipflags1 = (IPNORPY | IPBUFLST | flags1); | ||
1868 | b2f0_result = b2f0(SEND, parm); | ||
1869 | |||
1870 | if ((!b2f0_result) && (msgid)) | ||
1871 | *msgid = parm->ipmsgid; | ||
1872 | release_param(parm); | ||
1873 | |||
1874 | iucv_debug(2, "exiting"); | ||
1875 | return b2f0_result; | ||
1876 | } | ||
1877 | |||
1878 | /* | ||
1879 | * Name: iucv_send_prmmsg | ||
1880 | * Purpose: This function transmits data to another application. | ||
1881 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
1882 | * into the parameter list. This is a one-way message and the | ||
1883 | * receiver will not reply to the message. | ||
1884 | * Input: pathid - path identification number | ||
1885 | * trgcls - specifies target class | ||
1886 | * srccls - specifies the source message class | ||
1887 | * msgtag - specifies a tag to be associated with the message | ||
1888 | * flags1 - option for path | ||
1889 | * IPPRTY- specifies if you want to send priority message | ||
1890 | * prmmsg - 8-bytes of data to be placed into parameter list | ||
1891 | * Output: msgid - specifies the message ID. | ||
1892 | * Return: b2f0_result - return code from CP | ||
1893 | */ | ||
1894 | int | ||
1895 | iucv_send_prmmsg (__u16 pathid, | ||
1896 | __u32 * msgid, | ||
1897 | __u32 trgcls, | ||
1898 | __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8]) | ||
1899 | { | ||
1900 | iparml_dpl *parm; | ||
1901 | ulong b2f0_result; | ||
1902 | |||
1903 | iucv_debug(2, "entering"); | ||
1904 | |||
1905 | parm = (iparml_dpl *)grab_param(); | ||
1906 | |||
1907 | parm->ippathid = pathid; | ||
1908 | parm->iptrgcls = trgcls; | ||
1909 | parm->ipsrccls = srccls; | ||
1910 | parm->ipmsgtag = msgtag; | ||
1911 | parm->ipflags1 = (IPRMDATA | IPNORPY | flags1); | ||
1912 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
1913 | |||
1914 | b2f0_result = b2f0(SEND, parm); | ||
1915 | |||
1916 | if ((!b2f0_result) && (msgid)) | ||
1917 | *msgid = parm->ipmsgid; | ||
1918 | release_param(parm); | ||
1919 | |||
1920 | iucv_debug(2, "exiting"); | ||
1921 | |||
1922 | return b2f0_result; | ||
1923 | } | ||
1924 | |||
1925 | /* | ||
1926 | * Name: iucv_send2way | ||
1927 | * Purpose: This function transmits data to another application. | ||
1928 | * Data to be transmitted is in a buffer. The receiver | ||
1929 | * of the send is expected to reply to the message and | ||
1930 | * a buffer is provided into which IUCV moves the reply | ||
1931 | * to this message. | ||
1932 | * Input: pathid - path identification number | ||
1933 | * trgcls - specifies target class | ||
1934 | * srccls - specifies the source message class | ||
1935 | * msgtag - specifies a tag associated with the message | ||
1936 | * flags1 - option for path | ||
1937 | * IPPRTY- specifies if you want to send priority message | ||
1938 | * buffer - address of send buffer | ||
1939 | * buflen - length of send buffer | ||
1940 | * ansbuf - address of buffer to reply with | ||
1941 | * anslen - length of buffer to reply with | ||
1942 | * Output: msgid - specifies the message ID. | ||
1943 | * Return: b2f0_result - return code from CP | ||
1944 | * (-EINVAL) - buffer or ansbuf address is NULL | ||
1945 | */ | ||
1946 | int | ||
1947 | iucv_send2way (__u16 pathid, | ||
1948 | __u32 * msgid, | ||
1949 | __u32 trgcls, | ||
1950 | __u32 srccls, | ||
1951 | __u32 msgtag, | ||
1952 | int flags1, | ||
1953 | void *buffer, ulong buflen, void *ansbuf, ulong anslen) | ||
1954 | { | ||
1955 | iparml_db *parm; | ||
1956 | ulong b2f0_result; | ||
1957 | |||
1958 | iucv_debug(2, "entering"); | ||
1959 | |||
1960 | if (!buffer || !ansbuf) | ||
1961 | return -EINVAL; | ||
1962 | |||
1963 | parm = (iparml_db *)grab_param(); | ||
1964 | |||
1965 | parm->ippathid = pathid; | ||
1966 | parm->iptrgcls = trgcls; | ||
1967 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
1968 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
1969 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
1970 | parm->ipbfln2f = (__u32) anslen; | ||
1971 | parm->ipsrccls = srccls; | ||
1972 | parm->ipmsgtag = msgtag; | ||
1973 | parm->ipflags1 = flags1; /* priority message */ | ||
1974 | |||
1975 | b2f0_result = b2f0(SEND, parm); | ||
1976 | |||
1977 | if ((!b2f0_result) && (msgid)) | ||
1978 | *msgid = parm->ipmsgid; | ||
1979 | release_param(parm); | ||
1980 | |||
1981 | iucv_debug(2, "exiting"); | ||
1982 | |||
1983 | return b2f0_result; | ||
1984 | } | ||
1985 | |||
1986 | /* | ||
1987 | * Name: iucv_send2way_array | ||
1988 | * Purpose: This function transmits data to another application. | ||
1989 | * The contents of buffer is the address of the array of | ||
1990 | * addresses and lengths of discontiguous buffers that hold | ||
1991 | * the message text. The receiver of the send is expected to | ||
1992 | * reply to the message and a buffer is provided into which | ||
1993 | * IUCV moves the reply to this message. | ||
1994 | * Input: pathid - path identification number | ||
1995 | * trgcls - specifies target class | ||
1996 | * srccls - specifies the source message class | ||
1997 | * msgtag - spcifies a tag to be associated with the message | ||
1998 | * flags1 - option for path | ||
1999 | * IPPRTY- specifies if you want to send priority message | ||
2000 | * buffer - address of array of send buffers | ||
2001 | * buflen - total length of send buffers | ||
2002 | * ansbuf - address of buffer to reply with | ||
2003 | * anslen - length of buffer to reply with | ||
2004 | * Output: msgid - specifies the message ID. | ||
2005 | * Return: b2f0_result - return code from CP | ||
2006 | * (-EINVAL) - buffer address is NULL | ||
2007 | */ | ||
2008 | int | ||
2009 | iucv_send2way_array (__u16 pathid, | ||
2010 | __u32 * msgid, | ||
2011 | __u32 trgcls, | ||
2012 | __u32 srccls, | ||
2013 | __u32 msgtag, | ||
2014 | int flags1, | ||
2015 | iucv_array_t * buffer, | ||
2016 | ulong buflen, iucv_array_t * ansbuf, ulong anslen) | ||
2017 | { | ||
2018 | iparml_db *parm; | ||
2019 | ulong b2f0_result; | ||
2020 | |||
2021 | iucv_debug(2, "entering"); | ||
2022 | |||
2023 | if (!buffer || !ansbuf) | ||
2024 | return -EINVAL; | ||
2025 | |||
2026 | parm = (iparml_db *)grab_param(); | ||
2027 | |||
2028 | parm->ippathid = pathid; | ||
2029 | parm->iptrgcls = trgcls; | ||
2030 | parm->ipbfadr1 = (__u32) ((ulong) buffer); | ||
2031 | parm->ipbfln1f = (__u32) buflen; /* length of message */ | ||
2032 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2033 | parm->ipbfln2f = (__u32) anslen; | ||
2034 | parm->ipsrccls = srccls; | ||
2035 | parm->ipmsgtag = msgtag; | ||
2036 | parm->ipflags1 = (IPBUFLST | IPANSLST | flags1); | ||
2037 | b2f0_result = b2f0(SEND, parm); | ||
2038 | if ((!b2f0_result) && (msgid)) | ||
2039 | *msgid = parm->ipmsgid; | ||
2040 | release_param(parm); | ||
2041 | |||
2042 | iucv_debug(2, "exiting"); | ||
2043 | return b2f0_result; | ||
2044 | } | ||
2045 | |||
2046 | /* | ||
2047 | * Name: iucv_send2way_prmmsg | ||
2048 | * Purpose: This function transmits data to another application. | ||
2049 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
2050 | * into the parameter list. This is a two-way message and the | ||
2051 | * receiver of the message is expected to reply. A buffer | ||
2052 | * is provided into which IUCV moves the reply to this | ||
2053 | * message. | ||
2054 | * Input: pathid - path identification number | ||
2055 | * trgcls - specifies target class | ||
2056 | * srccls - specifies the source message class | ||
2057 | * msgtag - specifies a tag to be associated with the message | ||
2058 | * flags1 - option for path | ||
2059 | * IPPRTY- specifies if you want to send priority message | ||
2060 | * prmmsg - 8-bytes of data to be placed in parameter list | ||
2061 | * ansbuf - address of buffer to reply with | ||
2062 | * anslen - length of buffer to reply with | ||
2063 | * Output: msgid - specifies the message ID. | ||
2064 | * Return: b2f0_result - return code from CP | ||
2065 | * (-EINVAL) - buffer address is NULL | ||
2066 | */ | ||
2067 | int | ||
2068 | iucv_send2way_prmmsg (__u16 pathid, | ||
2069 | __u32 * msgid, | ||
2070 | __u32 trgcls, | ||
2071 | __u32 srccls, | ||
2072 | __u32 msgtag, | ||
2073 | ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen) | ||
2074 | { | ||
2075 | iparml_dpl *parm; | ||
2076 | ulong b2f0_result; | ||
2077 | |||
2078 | iucv_debug(2, "entering"); | ||
2079 | |||
2080 | if (!ansbuf) | ||
2081 | return -EINVAL; | ||
2082 | |||
2083 | parm = (iparml_dpl *)grab_param(); | ||
2084 | |||
2085 | parm->ippathid = pathid; | ||
2086 | parm->iptrgcls = trgcls; | ||
2087 | parm->ipsrccls = srccls; | ||
2088 | parm->ipmsgtag = msgtag; | ||
2089 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2090 | parm->ipbfln2f = (__u32) anslen; | ||
2091 | parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */ | ||
2092 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
2093 | |||
2094 | b2f0_result = b2f0(SEND, parm); | ||
2095 | |||
2096 | if ((!b2f0_result) && (msgid)) | ||
2097 | *msgid = parm->ipmsgid; | ||
2098 | release_param(parm); | ||
2099 | |||
2100 | iucv_debug(2, "exiting"); | ||
2101 | |||
2102 | return b2f0_result; | ||
2103 | } | ||
2104 | |||
2105 | /* | ||
2106 | * Name: iucv_send2way_prmmsg_array | ||
2107 | * Purpose: This function transmits data to another application. | ||
2108 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
2109 | * into the parameter list. This is a two-way message and the | ||
2110 | * receiver of the message is expected to reply. A buffer | ||
2111 | * is provided into which IUCV moves the reply to this | ||
2112 | * message. The contents of ansbuf is the address of the | ||
2113 | * array of addresses and lengths of discontiguous buffers | ||
2114 | * that contain the reply. | ||
2115 | * Input: pathid - path identification number | ||
2116 | * trgcls - specifies target class | ||
2117 | * srccls - specifies the source message class | ||
2118 | * msgtag - specifies a tag to be associated with the message | ||
2119 | * flags1 - option for path | ||
2120 | * IPPRTY- specifies if you want to send priority message | ||
2121 | * prmmsg - 8-bytes of data to be placed into the parameter list | ||
2122 | * ansbuf - address of buffer to reply with | ||
2123 | * anslen - length of buffer to reply with | ||
2124 | * Output: msgid - specifies the message ID. | ||
2125 | * Return: b2f0_result - return code from CP | ||
2126 | * (-EINVAL) - ansbuf address is NULL | ||
2127 | */ | ||
2128 | int | ||
2129 | iucv_send2way_prmmsg_array (__u16 pathid, | ||
2130 | __u32 * msgid, | ||
2131 | __u32 trgcls, | ||
2132 | __u32 srccls, | ||
2133 | __u32 msgtag, | ||
2134 | int flags1, | ||
2135 | __u8 prmmsg[8], | ||
2136 | iucv_array_t * ansbuf, ulong anslen) | ||
2137 | { | ||
2138 | iparml_dpl *parm; | ||
2139 | ulong b2f0_result; | ||
2140 | |||
2141 | iucv_debug(2, "entering"); | ||
2142 | |||
2143 | if (!ansbuf) | ||
2144 | return -EINVAL; | ||
2145 | |||
2146 | parm = (iparml_dpl *)grab_param(); | ||
2147 | |||
2148 | parm->ippathid = pathid; | ||
2149 | parm->iptrgcls = trgcls; | ||
2150 | parm->ipsrccls = srccls; | ||
2151 | parm->ipmsgtag = msgtag; | ||
2152 | parm->ipbfadr2 = (__u32) ((ulong) ansbuf); | ||
2153 | parm->ipbfln2f = (__u32) anslen; | ||
2154 | parm->ipflags1 = (IPRMDATA | IPANSLST | flags1); | ||
2155 | memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); | ||
2156 | b2f0_result = b2f0(SEND, parm); | ||
2157 | if ((!b2f0_result) && (msgid)) | ||
2158 | *msgid = parm->ipmsgid; | ||
2159 | release_param(parm); | ||
2160 | |||
2161 | iucv_debug(2, "exiting"); | ||
2162 | return b2f0_result; | ||
2163 | } | ||
2164 | |||
2165 | void | ||
2166 | iucv_setmask_cpuid (void *result) | ||
2167 | { | ||
2168 | iparml_set_mask *parm; | ||
2169 | |||
2170 | iucv_debug(1, "entering"); | ||
2171 | parm = (iparml_set_mask *)grab_param(); | ||
2172 | parm->ipmask = *((__u8*)result); | ||
2173 | *((ulong *)result) = b2f0(SETMASK, parm); | ||
2174 | release_param(parm); | ||
2175 | |||
2176 | iucv_debug(1, "b2f0_result = %ld", *((ulong *)result)); | ||
2177 | iucv_debug(1, "exiting"); | ||
2178 | } | ||
2179 | |||
2180 | /* | ||
2181 | * Name: iucv_setmask | ||
2182 | * Purpose: This function enables or disables the following IUCV | ||
2183 | * external interruptions: Nonpriority and priority message | ||
2184 | * interrupts, nonpriority and priority reply interrupts. | ||
2185 | * Input: SetMaskFlag - options for interrupts | ||
2186 | * 0x80 - Nonpriority_MessagePendingInterruptsFlag | ||
2187 | * 0x40 - Priority_MessagePendingInterruptsFlag | ||
2188 | * 0x20 - Nonpriority_MessageCompletionInterruptsFlag | ||
2189 | * 0x10 - Priority_MessageCompletionInterruptsFlag | ||
2190 | * 0x08 - IUCVControlInterruptsFlag | ||
2191 | * Output: NA | ||
2192 | * Return: b2f0_result - return code from CP | ||
2193 | */ | ||
2194 | int | ||
2195 | iucv_setmask (int SetMaskFlag) | ||
2196 | { | ||
2197 | union { | ||
2198 | ulong result; | ||
2199 | __u8 param; | ||
2200 | } u; | ||
2201 | int cpu; | ||
2202 | |||
2203 | u.param = SetMaskFlag; | ||
2204 | cpu = get_cpu(); | ||
2205 | smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid); | ||
2206 | put_cpu(); | ||
2207 | |||
2208 | return u.result; | ||
2209 | } | ||
2210 | |||
2211 | /** | ||
2212 | * iucv_sever: | ||
2213 | * @pathid: Path identification number | ||
2214 | * @user_data: 16-byte of user data | ||
2215 | * | ||
2216 | * This function terminates an iucv path. | ||
2217 | * Returns: return code from CP | ||
2218 | */ | ||
2219 | int | ||
2220 | iucv_sever(__u16 pathid, __u8 user_data[16]) | ||
2221 | { | ||
2222 | iparml_control *parm; | ||
2223 | ulong b2f0_result = 0; | ||
2224 | |||
2225 | iucv_debug(1, "entering"); | ||
2226 | parm = (iparml_control *)grab_param(); | ||
2227 | |||
2228 | memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); | ||
2229 | parm->ippathid = pathid; | ||
2230 | |||
2231 | b2f0_result = b2f0(SEVER, parm); | ||
2232 | |||
2233 | if (!b2f0_result) | ||
2234 | iucv_remove_pathid(pathid); | ||
2235 | release_param(parm); | ||
2236 | |||
2237 | iucv_debug(1, "exiting"); | ||
2238 | return b2f0_result; | ||
2239 | } | ||
2240 | |||
2241 | /* | ||
2242 | * Interrupt Handlers | ||
2243 | *******************************************************************************/ | ||
2244 | |||
2245 | /** | ||
2246 | * iucv_irq_handler: | ||
2247 | * @regs: Current registers | ||
2248 | * @code: irq code | ||
2249 | * | ||
2250 | * Handles external interrupts coming in from CP. | ||
2251 | * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). | ||
2252 | */ | ||
2253 | static void | ||
2254 | iucv_irq_handler(__u16 code) | ||
2255 | { | ||
2256 | iucv_irqdata *irqdata; | ||
2257 | |||
2258 | irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC); | ||
2259 | if (!irqdata) { | ||
2260 | printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__); | ||
2261 | return; | ||
2262 | } | ||
2263 | |||
2264 | memcpy(&irqdata->data, iucv_external_int_buffer, | ||
2265 | sizeof(iucv_GeneralInterrupt)); | ||
2266 | |||
2267 | spin_lock(&iucv_irq_queue_lock); | ||
2268 | list_add_tail(&irqdata->queue, &iucv_irq_queue); | ||
2269 | spin_unlock(&iucv_irq_queue_lock); | ||
2270 | |||
2271 | tasklet_schedule(&iucv_tasklet); | ||
2272 | } | ||
2273 | |||
2274 | /** | ||
2275 | * iucv_do_int: | ||
2276 | * @int_buf: Pointer to copy of external interrupt buffer | ||
2277 | * | ||
2278 | * The workhorse for handling interrupts queued by iucv_irq_handler(). | ||
2279 | * This function is called from the bottom half iucv_tasklet_handler(). | ||
2280 | */ | ||
2281 | static void | ||
2282 | iucv_do_int(iucv_GeneralInterrupt * int_buf) | ||
2283 | { | ||
2284 | handler *h = NULL; | ||
2285 | struct list_head *lh; | ||
2286 | ulong flags; | ||
2287 | iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */ | ||
2288 | __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */ | ||
2289 | int rc = 0, j = 0; | ||
2290 | __u8 no_listener[16] = "NO LISTENER"; | ||
2291 | |||
2292 | iucv_debug(2, "entering, pathid %d, type %02X", | ||
2293 | int_buf->ippathid, int_buf->iptype); | ||
2294 | iucv_dumpit("External Interrupt Buffer:", | ||
2295 | int_buf, sizeof(iucv_GeneralInterrupt)); | ||
2296 | |||
2297 | ASCEBC (no_listener, 16); | ||
2298 | |||
2299 | if (int_buf->iptype != 01) { | ||
2300 | if ((int_buf->ippathid) > (max_connections - 1)) { | ||
2301 | printk(KERN_WARNING "%s: Got interrupt with pathid %d" | ||
2302 | " > max_connections (%ld)\n", __FUNCTION__, | ||
2303 | int_buf->ippathid, max_connections - 1); | ||
2304 | } else { | ||
2305 | h = iucv_pathid_table[int_buf->ippathid]; | ||
2306 | interrupt = h->interrupt_table; | ||
2307 | iucv_dumpit("Handler:", h, sizeof(handler)); | ||
2308 | } | ||
2309 | } | ||
2310 | |||
2311 | /* end of if statement */ | ||
2312 | switch (int_buf->iptype) { | ||
2313 | case 0x01: /* connection pending */ | ||
2314 | if (messagesDisabled) { | ||
2315 | iucv_setmask(~0); | ||
2316 | messagesDisabled = 0; | ||
2317 | } | ||
2318 | spin_lock_irqsave(&iucv_lock, flags); | ||
2319 | list_for_each(lh, &iucv_handler_table) { | ||
2320 | h = list_entry(lh, handler, list); | ||
2321 | memcpy(temp_buff1, &(int_buf->ipvmid), 24); | ||
2322 | memcpy(temp_buff2, &(h->id.userid), 24); | ||
2323 | for (j = 0; j < 24; j++) { | ||
2324 | temp_buff1[j] &= (h->id.mask)[j]; | ||
2325 | temp_buff2[j] &= (h->id.mask)[j]; | ||
2326 | } | ||
2327 | |||
2328 | iucv_dumpit("temp_buff1:", | ||
2329 | temp_buff1, sizeof(temp_buff1)); | ||
2330 | iucv_dumpit("temp_buff2", | ||
2331 | temp_buff2, sizeof(temp_buff2)); | ||
2332 | |||
2333 | if (!memcmp (temp_buff1, temp_buff2, 24)) { | ||
2334 | |||
2335 | iucv_debug(2, | ||
2336 | "found a matching handler"); | ||
2337 | break; | ||
2338 | } else | ||
2339 | h = NULL; | ||
2340 | } | ||
2341 | spin_unlock_irqrestore (&iucv_lock, flags); | ||
2342 | if (h) { | ||
2343 | /* ADD PATH TO PATHID TABLE */ | ||
2344 | rc = iucv_add_pathid(int_buf->ippathid, h); | ||
2345 | if (rc) { | ||
2346 | iucv_sever (int_buf->ippathid, | ||
2347 | no_listener); | ||
2348 | iucv_debug(1, | ||
2349 | "add_pathid failed, rc = %d", | ||
2350 | rc); | ||
2351 | } else { | ||
2352 | interrupt = h->interrupt_table; | ||
2353 | if (interrupt->ConnectionPending) { | ||
2354 | EBCASC (int_buf->ipvmid, 8); | ||
2355 | interrupt->ConnectionPending( | ||
2356 | (iucv_ConnectionPending *)int_buf, | ||
2357 | h->pgm_data); | ||
2358 | } else | ||
2359 | iucv_sever(int_buf->ippathid, | ||
2360 | no_listener); | ||
2361 | } | ||
2362 | } else | ||
2363 | iucv_sever(int_buf->ippathid, no_listener); | ||
2364 | break; | ||
2365 | |||
2366 | case 0x02: /*connection complete */ | ||
2367 | if (messagesDisabled) { | ||
2368 | iucv_setmask(~0); | ||
2369 | messagesDisabled = 0; | ||
2370 | } | ||
2371 | if (h) { | ||
2372 | if (interrupt->ConnectionComplete) | ||
2373 | { | ||
2374 | interrupt->ConnectionComplete( | ||
2375 | (iucv_ConnectionComplete *)int_buf, | ||
2376 | h->pgm_data); | ||
2377 | } | ||
2378 | else | ||
2379 | iucv_debug(1, | ||
2380 | "ConnectionComplete not called"); | ||
2381 | } else | ||
2382 | iucv_sever(int_buf->ippathid, no_listener); | ||
2383 | break; | ||
2384 | |||
2385 | case 0x03: /* connection severed */ | ||
2386 | if (messagesDisabled) { | ||
2387 | iucv_setmask(~0); | ||
2388 | messagesDisabled = 0; | ||
2389 | } | ||
2390 | if (h) { | ||
2391 | if (interrupt->ConnectionSevered) | ||
2392 | interrupt->ConnectionSevered( | ||
2393 | (iucv_ConnectionSevered *)int_buf, | ||
2394 | h->pgm_data); | ||
2395 | |||
2396 | else | ||
2397 | iucv_sever (int_buf->ippathid, no_listener); | ||
2398 | } else | ||
2399 | iucv_sever(int_buf->ippathid, no_listener); | ||
2400 | break; | ||
2401 | |||
2402 | case 0x04: /* connection quiesced */ | ||
2403 | if (messagesDisabled) { | ||
2404 | iucv_setmask(~0); | ||
2405 | messagesDisabled = 0; | ||
2406 | } | ||
2407 | if (h) { | ||
2408 | if (interrupt->ConnectionQuiesced) | ||
2409 | interrupt->ConnectionQuiesced( | ||
2410 | (iucv_ConnectionQuiesced *)int_buf, | ||
2411 | h->pgm_data); | ||
2412 | else | ||
2413 | iucv_debug(1, | ||
2414 | "ConnectionQuiesced not called"); | ||
2415 | } | ||
2416 | break; | ||
2417 | |||
2418 | case 0x05: /* connection resumed */ | ||
2419 | if (messagesDisabled) { | ||
2420 | iucv_setmask(~0); | ||
2421 | messagesDisabled = 0; | ||
2422 | } | ||
2423 | if (h) { | ||
2424 | if (interrupt->ConnectionResumed) | ||
2425 | interrupt->ConnectionResumed( | ||
2426 | (iucv_ConnectionResumed *)int_buf, | ||
2427 | h->pgm_data); | ||
2428 | else | ||
2429 | iucv_debug(1, | ||
2430 | "ConnectionResumed not called"); | ||
2431 | } | ||
2432 | break; | ||
2433 | |||
2434 | case 0x06: /* priority message complete */ | ||
2435 | case 0x07: /* nonpriority message complete */ | ||
2436 | if (h) { | ||
2437 | if (interrupt->MessageComplete) | ||
2438 | interrupt->MessageComplete( | ||
2439 | (iucv_MessageComplete *)int_buf, | ||
2440 | h->pgm_data); | ||
2441 | else | ||
2442 | iucv_debug(2, | ||
2443 | "MessageComplete not called"); | ||
2444 | } | ||
2445 | break; | ||
2446 | |||
2447 | case 0x08: /* priority message pending */ | ||
2448 | case 0x09: /* nonpriority message pending */ | ||
2449 | if (h) { | ||
2450 | if (interrupt->MessagePending) | ||
2451 | interrupt->MessagePending( | ||
2452 | (iucv_MessagePending *) int_buf, | ||
2453 | h->pgm_data); | ||
2454 | else | ||
2455 | iucv_debug(2, | ||
2456 | "MessagePending not called"); | ||
2457 | } | ||
2458 | break; | ||
2459 | default: /* unknown iucv type */ | ||
2460 | printk(KERN_WARNING "%s: unknown iucv interrupt\n", | ||
2461 | __FUNCTION__); | ||
2462 | break; | ||
2463 | } /* end switch */ | ||
2464 | |||
2465 | iucv_debug(2, "exiting pathid %d, type %02X", | ||
2466 | int_buf->ippathid, int_buf->iptype); | ||
2467 | |||
2468 | return; | ||
2469 | } | ||
2470 | |||
2471 | /** | ||
2472 | * iucv_tasklet_handler: | ||
2473 | * | ||
2474 | * This function loops over the queue of irq buffers and runs iucv_do_int() | ||
2475 | * on every queue element. | ||
2476 | */ | ||
2477 | static void | ||
2478 | iucv_tasklet_handler(unsigned long ignored) | ||
2479 | { | ||
2480 | struct list_head head; | ||
2481 | struct list_head *next; | ||
2482 | ulong flags; | ||
2483 | |||
2484 | spin_lock_irqsave(&iucv_irq_queue_lock, flags); | ||
2485 | list_add(&head, &iucv_irq_queue); | ||
2486 | list_del_init(&iucv_irq_queue); | ||
2487 | spin_unlock_irqrestore (&iucv_irq_queue_lock, flags); | ||
2488 | |||
2489 | next = head.next; | ||
2490 | while (next != &head) { | ||
2491 | iucv_irqdata *p = list_entry(next, iucv_irqdata, queue); | ||
2492 | |||
2493 | next = next->next; | ||
2494 | iucv_do_int(&p->data); | ||
2495 | kfree(p); | ||
2496 | } | ||
2497 | |||
2498 | return; | ||
2499 | } | ||
2500 | |||
2501 | subsys_initcall(iucv_init); | ||
2502 | module_exit(iucv_exit); | ||
2503 | |||
2504 | /** | ||
2505 | * Export all public stuff | ||
2506 | */ | ||
2507 | EXPORT_SYMBOL (iucv_bus); | ||
2508 | EXPORT_SYMBOL (iucv_root); | ||
2509 | EXPORT_SYMBOL (iucv_accept); | ||
2510 | EXPORT_SYMBOL (iucv_connect); | ||
2511 | #if 0 | ||
2512 | EXPORT_SYMBOL (iucv_purge); | ||
2513 | EXPORT_SYMBOL (iucv_query_maxconn); | ||
2514 | EXPORT_SYMBOL (iucv_query_bufsize); | ||
2515 | EXPORT_SYMBOL (iucv_quiesce); | ||
2516 | #endif | ||
2517 | EXPORT_SYMBOL (iucv_receive); | ||
2518 | #if 0 | ||
2519 | EXPORT_SYMBOL (iucv_receive_array); | ||
2520 | #endif | ||
2521 | EXPORT_SYMBOL (iucv_reject); | ||
2522 | #if 0 | ||
2523 | EXPORT_SYMBOL (iucv_reply); | ||
2524 | EXPORT_SYMBOL (iucv_reply_array); | ||
2525 | EXPORT_SYMBOL (iucv_resume); | ||
2526 | #endif | ||
2527 | EXPORT_SYMBOL (iucv_reply_prmmsg); | ||
2528 | EXPORT_SYMBOL (iucv_send); | ||
2529 | EXPORT_SYMBOL (iucv_send2way); | ||
2530 | EXPORT_SYMBOL (iucv_send2way_array); | ||
2531 | EXPORT_SYMBOL (iucv_send2way_prmmsg); | ||
2532 | EXPORT_SYMBOL (iucv_send2way_prmmsg_array); | ||
2533 | #if 0 | ||
2534 | EXPORT_SYMBOL (iucv_send_array); | ||
2535 | EXPORT_SYMBOL (iucv_send_prmmsg); | ||
2536 | EXPORT_SYMBOL (iucv_setmask); | ||
2537 | #endif | ||
2538 | EXPORT_SYMBOL (iucv_sever); | ||
2539 | EXPORT_SYMBOL (iucv_register_program); | ||
2540 | EXPORT_SYMBOL (iucv_unregister_program); | ||
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h deleted file mode 100644 index 5b6b1b7241c9..000000000000 --- a/drivers/s390/net/iucv.h +++ /dev/null | |||
@@ -1,849 +0,0 @@ | |||
1 | /* | ||
2 | * drivers/s390/net/iucv.h | ||
3 | * IUCV base support. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2000 IBM Corporation | ||
7 | * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) | ||
8 | * Xenia Tkatschow (xenia@us.ibm.com) | ||
9 | * | ||
10 | * | ||
11 | * Functionality: | ||
12 | * To explore any of the IUCV functions, one must first register | ||
13 | * their program using iucv_register_program(). Once your program has | ||
14 | * successfully completed a register, it can exploit the other functions. | ||
15 | * For furthur reference on all IUCV functionality, refer to the | ||
16 | * CP Programming Services book, also available on the web | ||
17 | * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 | ||
18 | * | ||
19 | * Definition of Return Codes | ||
20 | * -All positive return codes including zero are reflected back | ||
21 | * from CP except for iucv_register_program. The definition of each | ||
22 | * return code can be found in CP Programming Services book. | ||
23 | * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 | ||
24 | * - Return Code of: | ||
25 | * (-EINVAL) Invalid value | ||
26 | * (-ENOMEM) storage allocation failed | ||
27 | * pgmask defined in iucv_register_program will be set depending on input | ||
28 | * paramters. | ||
29 | * | ||
30 | */ | ||
31 | |||
32 | #include <linux/types.h> | ||
33 | #include <asm/debug.h> | ||
34 | |||
35 | /** | ||
36 | * Debug Facility stuff | ||
37 | */ | ||
38 | #define IUCV_DBF_SETUP_NAME "iucv_setup" | ||
39 | #define IUCV_DBF_SETUP_LEN 32 | ||
40 | #define IUCV_DBF_SETUP_PAGES 2 | ||
41 | #define IUCV_DBF_SETUP_NR_AREAS 1 | ||
42 | #define IUCV_DBF_SETUP_LEVEL 3 | ||
43 | |||
44 | #define IUCV_DBF_DATA_NAME "iucv_data" | ||
45 | #define IUCV_DBF_DATA_LEN 128 | ||
46 | #define IUCV_DBF_DATA_PAGES 2 | ||
47 | #define IUCV_DBF_DATA_NR_AREAS 1 | ||
48 | #define IUCV_DBF_DATA_LEVEL 2 | ||
49 | |||
50 | #define IUCV_DBF_TRACE_NAME "iucv_trace" | ||
51 | #define IUCV_DBF_TRACE_LEN 16 | ||
52 | #define IUCV_DBF_TRACE_PAGES 4 | ||
53 | #define IUCV_DBF_TRACE_NR_AREAS 1 | ||
54 | #define IUCV_DBF_TRACE_LEVEL 3 | ||
55 | |||
56 | #define IUCV_DBF_TEXT(name,level,text) \ | ||
57 | do { \ | ||
58 | debug_text_event(iucv_dbf_##name,level,text); \ | ||
59 | } while (0) | ||
60 | |||
61 | #define IUCV_DBF_HEX(name,level,addr,len) \ | ||
62 | do { \ | ||
63 | debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ | ||
64 | } while (0) | ||
65 | |||
66 | DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); | ||
67 | |||
68 | #define IUCV_DBF_TEXT_(name,level,text...) \ | ||
69 | do { \ | ||
70 | char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ | ||
71 | sprintf(iucv_dbf_txt_buf, text); \ | ||
72 | debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ | ||
73 | put_cpu_var(iucv_dbf_txt_buf); \ | ||
74 | } while (0) | ||
75 | |||
76 | #define IUCV_DBF_SPRINTF(name,level,text...) \ | ||
77 | do { \ | ||
78 | debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ | ||
79 | debug_sprintf_event(iucv_dbf_trace, level, text ); \ | ||
80 | } while (0) | ||
81 | |||
82 | /** | ||
83 | * some more debug stuff | ||
84 | */ | ||
85 | #define IUCV_HEXDUMP16(importance,header,ptr) \ | ||
86 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
87 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
88 | *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ | ||
89 | *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ | ||
90 | *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ | ||
91 | *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ | ||
92 | *(((char*)ptr)+12),*(((char*)ptr)+13), \ | ||
93 | *(((char*)ptr)+14),*(((char*)ptr)+15)); \ | ||
94 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
95 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
96 | *(((char*)ptr)+16),*(((char*)ptr)+17), \ | ||
97 | *(((char*)ptr)+18),*(((char*)ptr)+19), \ | ||
98 | *(((char*)ptr)+20),*(((char*)ptr)+21), \ | ||
99 | *(((char*)ptr)+22),*(((char*)ptr)+23), \ | ||
100 | *(((char*)ptr)+24),*(((char*)ptr)+25), \ | ||
101 | *(((char*)ptr)+26),*(((char*)ptr)+27), \ | ||
102 | *(((char*)ptr)+28),*(((char*)ptr)+29), \ | ||
103 | *(((char*)ptr)+30),*(((char*)ptr)+31)); | ||
104 | |||
105 | static inline void | ||
106 | iucv_hex_dump(unsigned char *buf, size_t len) | ||
107 | { | ||
108 | size_t i; | ||
109 | |||
110 | for (i = 0; i < len; i++) { | ||
111 | if (i && !(i % 16)) | ||
112 | printk("\n"); | ||
113 | printk("%02x ", *(buf + i)); | ||
114 | } | ||
115 | printk("\n"); | ||
116 | } | ||
117 | /** | ||
118 | * end of debug stuff | ||
119 | */ | ||
120 | |||
121 | #define uchar unsigned char | ||
122 | #define ushort unsigned short | ||
123 | #define ulong unsigned long | ||
124 | #define iucv_handle_t void * | ||
125 | |||
126 | /* flags1: | ||
127 | * All flags are defined in the field IPFLAGS1 of each function | ||
128 | * and can be found in CP Programming Services. | ||
129 | * IPLOCAL - Indicates the connect can only be satisfied on the | ||
130 | * local system | ||
131 | * IPPRTY - Indicates a priority message | ||
132 | * IPQUSCE - Indicates you do not want to receive messages on a | ||
133 | * path until an iucv_resume is issued | ||
134 | * IPRMDATA - Indicates that the message is in the parameter list | ||
135 | */ | ||
136 | #define IPLOCAL 0x01 | ||
137 | #define IPPRTY 0x20 | ||
138 | #define IPQUSCE 0x40 | ||
139 | #define IPRMDATA 0x80 | ||
140 | |||
141 | /* flags1_out: | ||
142 | * All flags are defined in the output field of IPFLAGS1 for each function | ||
143 | * and can be found in CP Programming Services. | ||
144 | * IPNORPY - Specifies this is a one-way message and no reply is expected. | ||
145 | * IPPRTY - Indicates a priority message is permitted. Defined in flags1. | ||
146 | */ | ||
147 | #define IPNORPY 0x10 | ||
148 | |||
149 | #define Nonpriority_MessagePendingInterruptsFlag 0x80 | ||
150 | #define Priority_MessagePendingInterruptsFlag 0x40 | ||
151 | #define Nonpriority_MessageCompletionInterruptsFlag 0x20 | ||
152 | #define Priority_MessageCompletionInterruptsFlag 0x10 | ||
153 | #define IUCVControlInterruptsFlag 0x08 | ||
154 | #define AllInterrupts 0xf8 | ||
155 | /* | ||
156 | * Mapping of external interrupt buffers should be used with the corresponding | ||
157 | * interrupt types. | ||
158 | * Names: iucv_ConnectionPending -> connection pending | ||
159 | * iucv_ConnectionComplete -> connection complete | ||
160 | * iucv_ConnectionSevered -> connection severed | ||
161 | * iucv_ConnectionQuiesced -> connection quiesced | ||
162 | * iucv_ConnectionResumed -> connection resumed | ||
163 | * iucv_MessagePending -> message pending | ||
164 | * iucv_MessageComplete -> message complete | ||
165 | */ | ||
166 | typedef struct { | ||
167 | u16 ippathid; | ||
168 | uchar ipflags1; | ||
169 | uchar iptype; | ||
170 | u16 ipmsglim; | ||
171 | u16 res1; | ||
172 | uchar ipvmid[8]; | ||
173 | uchar ipuser[16]; | ||
174 | u32 res3; | ||
175 | uchar ippollfg; | ||
176 | uchar res4[3]; | ||
177 | } iucv_ConnectionPending; | ||
178 | |||
179 | typedef struct { | ||
180 | u16 ippathid; | ||
181 | uchar ipflags1; | ||
182 | uchar iptype; | ||
183 | u16 ipmsglim; | ||
184 | u16 res1; | ||
185 | uchar res2[8]; | ||
186 | uchar ipuser[16]; | ||
187 | u32 res3; | ||
188 | uchar ippollfg; | ||
189 | uchar res4[3]; | ||
190 | } iucv_ConnectionComplete; | ||
191 | |||
192 | typedef struct { | ||
193 | u16 ippathid; | ||
194 | uchar res1; | ||
195 | uchar iptype; | ||
196 | u32 res2; | ||
197 | uchar res3[8]; | ||
198 | uchar ipuser[16]; | ||
199 | u32 res4; | ||
200 | uchar ippollfg; | ||
201 | uchar res5[3]; | ||
202 | } iucv_ConnectionSevered; | ||
203 | |||
204 | typedef struct { | ||
205 | u16 ippathid; | ||
206 | uchar res1; | ||
207 | uchar iptype; | ||
208 | u32 res2; | ||
209 | uchar res3[8]; | ||
210 | uchar ipuser[16]; | ||
211 | u32 res4; | ||
212 | uchar ippollfg; | ||
213 | uchar res5[3]; | ||
214 | } iucv_ConnectionQuiesced; | ||
215 | |||
216 | typedef struct { | ||
217 | u16 ippathid; | ||
218 | uchar res1; | ||
219 | uchar iptype; | ||
220 | u32 res2; | ||
221 | uchar res3[8]; | ||
222 | uchar ipuser[16]; | ||
223 | u32 res4; | ||
224 | uchar ippollfg; | ||
225 | uchar res5[3]; | ||
226 | } iucv_ConnectionResumed; | ||
227 | |||
228 | typedef struct { | ||
229 | u16 ippathid; | ||
230 | uchar ipflags1; | ||
231 | uchar iptype; | ||
232 | u32 ipmsgid; | ||
233 | u32 iptrgcls; | ||
234 | union u2 { | ||
235 | u32 iprmmsg1_u32; | ||
236 | uchar iprmmsg1[4]; | ||
237 | } ln1msg1; | ||
238 | union u1 { | ||
239 | u32 ipbfln1f; | ||
240 | uchar iprmmsg2[4]; | ||
241 | } ln1msg2; | ||
242 | u32 res1[3]; | ||
243 | u32 ipbfln2f; | ||
244 | uchar ippollfg; | ||
245 | uchar res2[3]; | ||
246 | } iucv_MessagePending; | ||
247 | |||
248 | typedef struct { | ||
249 | u16 ippathid; | ||
250 | uchar ipflags1; | ||
251 | uchar iptype; | ||
252 | u32 ipmsgid; | ||
253 | u32 ipaudit; | ||
254 | uchar iprmmsg[8]; | ||
255 | u32 ipsrccls; | ||
256 | u32 ipmsgtag; | ||
257 | u32 res; | ||
258 | u32 ipbfln2f; | ||
259 | uchar ippollfg; | ||
260 | uchar res2[3]; | ||
261 | } iucv_MessageComplete; | ||
262 | |||
263 | /* | ||
264 | * iucv_interrupt_ops_t: Is a vector of functions that handle | ||
265 | * IUCV interrupts. | ||
266 | * Parameter list: | ||
267 | * eib - is a pointer to a 40-byte area described | ||
268 | * with one of the structures above. | ||
269 | * pgm_data - this data is strictly for the | ||
270 | * interrupt handler that is passed by | ||
271 | * the application. This may be an address | ||
272 | * or token. | ||
273 | */ | ||
274 | typedef struct { | ||
275 | void (*ConnectionPending) (iucv_ConnectionPending * eib, | ||
276 | void *pgm_data); | ||
277 | void (*ConnectionComplete) (iucv_ConnectionComplete * eib, | ||
278 | void *pgm_data); | ||
279 | void (*ConnectionSevered) (iucv_ConnectionSevered * eib, | ||
280 | void *pgm_data); | ||
281 | void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib, | ||
282 | void *pgm_data); | ||
283 | void (*ConnectionResumed) (iucv_ConnectionResumed * eib, | ||
284 | void *pgm_data); | ||
285 | void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data); | ||
286 | void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data); | ||
287 | } iucv_interrupt_ops_t; | ||
288 | |||
289 | /* | ||
290 | *iucv_array_t : Defines buffer array. | ||
291 | * Inside the array may be 31- bit addresses and 31-bit lengths. | ||
292 | */ | ||
293 | typedef struct { | ||
294 | u32 address; | ||
295 | u32 length; | ||
296 | } iucv_array_t __attribute__ ((aligned (8))); | ||
297 | |||
298 | extern struct bus_type iucv_bus; | ||
299 | extern struct device *iucv_root; | ||
300 | |||
301 | /* -prototypes- */ | ||
302 | /* | ||
303 | * Name: iucv_register_program | ||
304 | * Purpose: Registers an application with IUCV | ||
305 | * Input: prmname - user identification | ||
306 | * userid - machine identification | ||
307 | * pgmmask - indicates which bits in the prmname and userid combined will be | ||
308 | * used to determine who is given control | ||
309 | * ops - address of vector of interrupt handlers | ||
310 | * pgm_data- application data passed to interrupt handlers | ||
311 | * Output: NA | ||
312 | * Return: address of handler | ||
313 | * (0) - Error occurred, registration not completed. | ||
314 | * NOTE: Exact cause of failure will be recorded in syslog. | ||
315 | */ | ||
316 | iucv_handle_t iucv_register_program (uchar pgmname[16], | ||
317 | uchar userid[8], | ||
318 | uchar pgmmask[24], | ||
319 | iucv_interrupt_ops_t * ops, | ||
320 | void *pgm_data); | ||
321 | |||
322 | /* | ||
323 | * Name: iucv_unregister_program | ||
324 | * Purpose: Unregister application with IUCV | ||
325 | * Input: address of handler | ||
326 | * Output: NA | ||
327 | * Return: (0) - Normal return | ||
328 | * (-EINVAL) - Internal error, wild pointer | ||
329 | */ | ||
330 | int iucv_unregister_program (iucv_handle_t handle); | ||
331 | |||
332 | /* | ||
333 | * Name: iucv_accept | ||
334 | * Purpose: This function is issued after the user receives a Connection Pending external | ||
335 | * interrupt and now wishes to complete the IUCV communication path. | ||
336 | * Input: pathid - u16 , Path identification number | ||
337 | * msglim_reqstd - u16, The number of outstanding messages requested. | ||
338 | * user_data - uchar[16], Data specified by the iucv_connect function. | ||
339 | * flags1 - int, Contains options for this path. | ||
340 | * -IPPRTY - 0x20- Specifies if you want to send priority message. | ||
341 | * -IPRMDATA - 0x80, Specifies whether your program can handle a message | ||
342 | * in the parameter list. | ||
343 | * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being | ||
344 | * established. | ||
345 | * handle - iucv_handle_t, Address of handler. | ||
346 | * pgm_data - void *, Application data passed to interrupt handlers. | ||
347 | * flags1_out - int * Contains information about the path | ||
348 | * - IPPRTY - 0x20, Indicates you may send priority messages. | ||
349 | * msglim - *u16, Number of outstanding messages. | ||
350 | * Output: return code from CP IUCV call. | ||
351 | */ | ||
352 | |||
353 | int iucv_accept (u16 pathid, | ||
354 | u16 msglim_reqstd, | ||
355 | uchar user_data[16], | ||
356 | int flags1, | ||
357 | iucv_handle_t handle, | ||
358 | void *pgm_data, int *flags1_out, u16 * msglim); | ||
359 | |||
360 | /* | ||
361 | * Name: iucv_connect | ||
362 | * Purpose: This function establishes an IUCV path. Although the connect may complete | ||
363 | * successfully, you are not able to use the path until you receive an IUCV | ||
364 | * Connection Complete external interrupt. | ||
365 | * Input: pathid - u16 *, Path identification number | ||
366 | * msglim_reqstd - u16, Number of outstanding messages requested | ||
367 | * user_data - uchar[16], 16-byte user data | ||
368 | * userid - uchar[8], User identification | ||
369 | * system_name - uchar[8], 8-byte identifying the system name | ||
370 | * flags1 - int, Contains options for this path. | ||
371 | * -IPPRTY - 0x20, Specifies if you want to send priority message. | ||
372 | * -IPRMDATA - 0x80, Specifies whether your program can handle a message | ||
373 | * in the parameter list. | ||
374 | * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being | ||
375 | * established. | ||
376 | * -IPLOCAL - 0X01, Allows an application to force the partner to be on | ||
377 | * the local system. If local is specified then target class cannot be | ||
378 | * specified. | ||
379 | * flags1_out - int * Contains information about the path | ||
380 | * - IPPRTY - 0x20, Indicates you may send priority messages. | ||
381 | * msglim - * u16, Number of outstanding messages | ||
382 | * handle - iucv_handle_t, Address of handler | ||
383 | * pgm_data - void *, Application data passed to interrupt handlers | ||
384 | * Output: return code from CP IUCV call | ||
385 | * rc - return code from iucv_declare_buffer | ||
386 | * -EINVAL - Invalid handle passed by application | ||
387 | * -EINVAL - Pathid address is NULL | ||
388 | * add_pathid_result - Return code from internal function add_pathid | ||
389 | */ | ||
390 | int | ||
391 | iucv_connect (u16 * pathid, | ||
392 | u16 msglim_reqstd, | ||
393 | uchar user_data[16], | ||
394 | uchar userid[8], | ||
395 | uchar system_name[8], | ||
396 | int flags1, | ||
397 | int *flags1_out, | ||
398 | u16 * msglim, iucv_handle_t handle, void *pgm_data); | ||
399 | |||
400 | /* | ||
401 | * Name: iucv_purge | ||
402 | * Purpose: This function cancels a message that you have sent. | ||
403 | * Input: pathid - Path identification number. | ||
404 | * msgid - Specifies the message ID of the message to be purged. | ||
405 | * srccls - Specifies the source message class. | ||
406 | * Output: audit - Contains information about asynchronous error | ||
407 | * that may have affected the normal completion | ||
408 | * of this message. | ||
409 | * Return: Return code from CP IUCV call. | ||
410 | */ | ||
411 | int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit); | ||
412 | /* | ||
413 | * Name: iucv_query_maxconn | ||
414 | * Purpose: This function determines the maximum number of communication paths you | ||
415 | * may establish. | ||
416 | * Return: maxconn - ulong, Maximum number of connection the virtual machine may | ||
417 | * establish. | ||
418 | */ | ||
419 | ulong iucv_query_maxconn (void); | ||
420 | |||
421 | /* | ||
422 | * Name: iucv_query_bufsize | ||
423 | * Purpose: This function determines how large an external interrupt | ||
424 | * buffer IUCV requires to store information. | ||
425 | * Return: bufsize - ulong, Size of external interrupt buffer. | ||
426 | */ | ||
427 | ulong iucv_query_bufsize (void); | ||
428 | |||
429 | /* | ||
430 | * Name: iucv_quiesce | ||
431 | * Purpose: This function temporarily suspends incoming messages on an | ||
432 | * IUCV path. You can later reactivate the path by invoking | ||
433 | * the iucv_resume function. | ||
434 | * Input: pathid - Path identification number | ||
435 | * user_data - 16-bytes of user data | ||
436 | * Output: NA | ||
437 | * Return: Return code from CP IUCV call. | ||
438 | */ | ||
439 | int iucv_quiesce (u16 pathid, uchar user_data[16]); | ||
440 | |||
441 | /* | ||
442 | * Name: iucv_receive | ||
443 | * Purpose: This function receives messages that are being sent to you | ||
444 | * over established paths. Data will be returned in buffer for length of | ||
445 | * buflen. | ||
446 | * Input: | ||
447 | * pathid - Path identification number. | ||
448 | * buffer - Address of buffer to receive. | ||
449 | * buflen - Length of buffer to receive. | ||
450 | * msgid - Specifies the message ID. | ||
451 | * trgcls - Specifies target class. | ||
452 | * Output: | ||
453 | * flags1_out: int *, Contains information about this path. | ||
454 | * IPNORPY - 0x10 Specifies this is a one-way message and no reply is | ||
455 | * expected. | ||
456 | * IPPRTY - 0x20 Specifies if you want to send priority message. | ||
457 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
458 | * residual_buffer - address of buffer updated by the number | ||
459 | * of bytes you have received. | ||
460 | * residual_length - | ||
461 | * Contains one of the following values, if the receive buffer is: | ||
462 | * The same length as the message, this field is zero. | ||
463 | * Longer than the message, this field contains the number of | ||
464 | * bytes remaining in the buffer. | ||
465 | * Shorter than the message, this field contains the residual | ||
466 | * count (that is, the number of bytes remaining in the | ||
467 | * message that does not fit into the buffer. In this | ||
468 | * case b2f0_result = 5. | ||
469 | * Return: Return code from CP IUCV call. | ||
470 | * (-EINVAL) - buffer address is pointing to NULL | ||
471 | */ | ||
472 | int iucv_receive (u16 pathid, | ||
473 | u32 msgid, | ||
474 | u32 trgcls, | ||
475 | void *buffer, | ||
476 | ulong buflen, | ||
477 | int *flags1_out, | ||
478 | ulong * residual_buffer, ulong * residual_length); | ||
479 | |||
480 | /* | ||
481 | * Name: iucv_receive_array | ||
482 | * Purpose: This function receives messages that are being sent to you | ||
483 | * over established paths. Data will be returned in first buffer for | ||
484 | * length of first buffer. | ||
485 | * Input: pathid - Path identification number. | ||
486 | * msgid - specifies the message ID. | ||
487 | * trgcls - Specifies target class. | ||
488 | * buffer - Address of array of buffers. | ||
489 | * buflen - Total length of buffers. | ||
490 | * Output: | ||
491 | * flags1_out: int *, Contains information about this path. | ||
492 | * IPNORPY - 0x10 Specifies this is a one-way message and no reply is | ||
493 | * expected. | ||
494 | * IPPRTY - 0x20 Specifies if you want to send priority message. | ||
495 | * IPRMDATA - 0x80 specifies the data is contained in the parameter list | ||
496 | * residual_buffer - address points to the current list entry IUCV | ||
497 | * is working on. | ||
498 | * residual_length - | ||
499 | * Contains one of the following values, if the receive buffer is: | ||
500 | * The same length as the message, this field is zero. | ||
501 | * Longer than the message, this field contains the number of | ||
502 | * bytes remaining in the buffer. | ||
503 | * Shorter than the message, this field contains the residual | ||
504 | * count (that is, the number of bytes remaining in the | ||
505 | * message that does not fit into the buffer. In this | ||
506 | * case b2f0_result = 5. | ||
507 | * Return: Return code from CP IUCV call. | ||
508 | * (-EINVAL) - Buffer address is NULL. | ||
509 | */ | ||
510 | int iucv_receive_array (u16 pathid, | ||
511 | u32 msgid, | ||
512 | u32 trgcls, | ||
513 | iucv_array_t * buffer, | ||
514 | ulong buflen, | ||
515 | int *flags1_out, | ||
516 | ulong * residual_buffer, ulong * residual_length); | ||
517 | |||
518 | /* | ||
519 | * Name: iucv_reject | ||
520 | * Purpose: The reject function refuses a specified message. Between the | ||
521 | * time you are notified of a message and the time that you | ||
522 | * complete the message, the message may be rejected. | ||
523 | * Input: pathid - Path identification number. | ||
524 | * msgid - Specifies the message ID. | ||
525 | * trgcls - Specifies target class. | ||
526 | * Output: NA | ||
527 | * Return: Return code from CP IUCV call. | ||
528 | */ | ||
529 | int iucv_reject (u16 pathid, u32 msgid, u32 trgcls); | ||
530 | |||
531 | /* | ||
532 | * Name: iucv_reply | ||
533 | * Purpose: This function responds to the two-way messages that you | ||
534 | * receive. You must identify completely the message to | ||
535 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
536 | * Input: pathid - Path identification number. | ||
537 | * msgid - Specifies the message ID. | ||
538 | * trgcls - Specifies target class. | ||
539 | * flags1 - Option for path. | ||
540 | * IPPRTY- 0x20, Specifies if you want to send priority message. | ||
541 | * buffer - Address of reply buffer. | ||
542 | * buflen - Length of reply buffer. | ||
543 | * Output: residual_buffer - Address of buffer updated by the number | ||
544 | * of bytes you have moved. | ||
545 | * residual_length - Contains one of the following values: | ||
546 | * If the answer buffer is the same length as the reply, this field | ||
547 | * contains zero. | ||
548 | * If the answer buffer is longer than the reply, this field contains | ||
549 | * the number of bytes remaining in the buffer. | ||
550 | * If the answer buffer is shorter than the reply, this field contains | ||
551 | * a residual count (that is, the number of bytes remianing in the | ||
552 | * reply that does not fit into the buffer. In this | ||
553 | * case b2f0_result = 5. | ||
554 | * Return: Return code from CP IUCV call. | ||
555 | * (-EINVAL) - Buffer address is NULL. | ||
556 | */ | ||
557 | int iucv_reply (u16 pathid, | ||
558 | u32 msgid, | ||
559 | u32 trgcls, | ||
560 | int flags1, | ||
561 | void *buffer, ulong buflen, ulong * residual_buffer, | ||
562 | ulong * residual_length); | ||
563 | |||
564 | /* | ||
565 | * Name: iucv_reply_array | ||
566 | * Purpose: This function responds to the two-way messages that you | ||
567 | * receive. You must identify completely the message to | ||
568 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
569 | * The array identifies a list of addresses and lengths of | ||
570 | * discontiguous buffers that contains the reply data. | ||
571 | * Input: pathid - Path identification number | ||
572 | * msgid - Specifies the message ID. | ||
573 | * trgcls - Specifies target class. | ||
574 | * flags1 - Option for path. | ||
575 | * IPPRTY- 0x20, Specifies if you want to send priority message. | ||
576 | * buffer - Address of array of reply buffers. | ||
577 | * buflen - Total length of reply buffers. | ||
578 | * Output: residual_buffer - Address of buffer which IUCV is currently working on. | ||
579 | * residual_length - Contains one of the following values: | ||
580 | * If the answer buffer is the same length as the reply, this field | ||
581 | * contains zero. | ||
582 | * If the answer buffer is longer than the reply, this field contains | ||
583 | * the number of bytes remaining in the buffer. | ||
584 | * If the answer buffer is shorter than the reply, this field contains | ||
585 | * a residual count (that is, the number of bytes remianing in the | ||
586 | * reply that does not fit into the buffer. In this | ||
587 | * case b2f0_result = 5. | ||
588 | * Return: Return code from CP IUCV call. | ||
589 | * (-EINVAL) - Buffer address is NULL. | ||
590 | */ | ||
591 | int iucv_reply_array (u16 pathid, | ||
592 | u32 msgid, | ||
593 | u32 trgcls, | ||
594 | int flags1, | ||
595 | iucv_array_t * buffer, | ||
596 | ulong buflen, ulong * residual_address, | ||
597 | ulong * residual_length); | ||
598 | |||
599 | /* | ||
600 | * Name: iucv_reply_prmmsg | ||
601 | * Purpose: This function responds to the two-way messages that you | ||
602 | * receive. You must identify completely the message to | ||
603 | * which you wish to reply. ie, pathid, msgid, and trgcls. | ||
604 | * Prmmsg signifies the data is moved into the | ||
605 | * parameter list. | ||
606 | * Input: pathid - Path identification number. | ||
607 | * msgid - Specifies the message ID. | ||
608 | * trgcls - Specifies target class. | ||
609 | * flags1 - Option for path. | ||
610 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
611 | * prmmsg - 8-bytes of data to be placed into the parameter. | ||
612 | * list. | ||
613 | * Output: NA | ||
614 | * Return: Return code from CP IUCV call. | ||
615 | */ | ||
616 | int iucv_reply_prmmsg (u16 pathid, | ||
617 | u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]); | ||
618 | |||
619 | /* | ||
620 | * Name: iucv_resume | ||
621 | * Purpose: This function restores communications over a quiesced path | ||
622 | * Input: pathid - Path identification number. | ||
623 | * user_data - 16-bytes of user data. | ||
624 | * Output: NA | ||
625 | * Return: Return code from CP IUCV call. | ||
626 | */ | ||
627 | int iucv_resume (u16 pathid, uchar user_data[16]); | ||
628 | |||
629 | /* | ||
630 | * Name: iucv_send | ||
631 | * Purpose: This function transmits data to another application. | ||
632 | * Data to be transmitted is in a buffer and this is a | ||
633 | * one-way message and the receiver will not reply to the | ||
634 | * message. | ||
635 | * Input: pathid - Path identification number. | ||
636 | * trgcls - Specifies target class. | ||
637 | * srccls - Specifies the source message class. | ||
638 | * msgtag - Specifies a tag to be associated with the message. | ||
639 | * flags1 - Option for path. | ||
640 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
641 | * buffer - Address of send buffer. | ||
642 | * buflen - Length of send buffer. | ||
643 | * Output: msgid - Specifies the message ID. | ||
644 | * Return: Return code from CP IUCV call. | ||
645 | * (-EINVAL) - Buffer address is NULL. | ||
646 | */ | ||
647 | int iucv_send (u16 pathid, | ||
648 | u32 * msgid, | ||
649 | u32 trgcls, | ||
650 | u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen); | ||
651 | |||
652 | /* | ||
653 | * Name: iucv_send_array | ||
654 | * Purpose: This function transmits data to another application. | ||
655 | * The contents of buffer is the address of the array of | ||
656 | * addresses and lengths of discontiguous buffers that hold | ||
657 | * the message text. This is a one-way message and the | ||
658 | * receiver will not reply to the message. | ||
659 | * Input: pathid - Path identification number. | ||
660 | * trgcls - Specifies target class. | ||
661 | * srccls - Specifies the source message class. | ||
662 | * msgtag - Specifies a tag to be associated witht the message. | ||
663 | * flags1 - Option for path. | ||
664 | * IPPRTY- specifies if you want to send priority message. | ||
665 | * buffer - Address of array of send buffers. | ||
666 | * buflen - Total length of send buffers. | ||
667 | * Output: msgid - Specifies the message ID. | ||
668 | * Return: Return code from CP IUCV call. | ||
669 | * (-EINVAL) - Buffer address is NULL. | ||
670 | */ | ||
671 | int iucv_send_array (u16 pathid, | ||
672 | u32 * msgid, | ||
673 | u32 trgcls, | ||
674 | u32 srccls, | ||
675 | u32 msgtag, | ||
676 | int flags1, iucv_array_t * buffer, ulong buflen); | ||
677 | |||
678 | /* | ||
679 | * Name: iucv_send_prmmsg | ||
680 | * Purpose: This function transmits data to another application. | ||
681 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
682 | * into the parameter list. This is a one-way message and the | ||
683 | * receiver will not reply to the message. | ||
684 | * Input: pathid - Path identification number. | ||
685 | * trgcls - Specifies target class. | ||
686 | * srccls - Specifies the source message class. | ||
687 | * msgtag - Specifies a tag to be associated with the message. | ||
688 | * flags1 - Option for path. | ||
689 | * IPPRTY- 0x20 specifies if you want to send priority message. | ||
690 | * prmmsg - 8-bytes of data to be placed into parameter list. | ||
691 | * Output: msgid - Specifies the message ID. | ||
692 | * Return: Return code from CP IUCV call. | ||
693 | */ | ||
694 | int iucv_send_prmmsg (u16 pathid, | ||
695 | u32 * msgid, | ||
696 | u32 trgcls, | ||
697 | u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]); | ||
698 | |||
699 | /* | ||
700 | * Name: iucv_send2way | ||
701 | * Purpose: This function transmits data to another application. | ||
702 | * Data to be transmitted is in a buffer. The receiver | ||
703 | * of the send is expected to reply to the message and | ||
704 | * a buffer is provided into which IUCV moves the reply | ||
705 | * to this message. | ||
706 | * Input: pathid - Path identification number. | ||
707 | * trgcls - Specifies target class. | ||
708 | * srccls - Specifies the source message class. | ||
709 | * msgtag - Specifies a tag associated with the message. | ||
710 | * flags1 - Option for path. | ||
711 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
712 | * buffer - Address of send buffer. | ||
713 | * buflen - Length of send buffer. | ||
714 | * ansbuf - Address of buffer into which IUCV moves the reply of | ||
715 | * this message. | ||
716 | * anslen - Address of length of buffer. | ||
717 | * Output: msgid - Specifies the message ID. | ||
718 | * Return: Return code from CP IUCV call. | ||
719 | * (-EINVAL) - Buffer or ansbuf address is NULL. | ||
720 | */ | ||
721 | int iucv_send2way (u16 pathid, | ||
722 | u32 * msgid, | ||
723 | u32 trgcls, | ||
724 | u32 srccls, | ||
725 | u32 msgtag, | ||
726 | int flags1, | ||
727 | void *buffer, ulong buflen, void *ansbuf, ulong anslen); | ||
728 | |||
729 | /* | ||
730 | * Name: iucv_send2way_array | ||
731 | * Purpose: This function transmits data to another application. | ||
732 | * The contents of buffer is the address of the array of | ||
733 | * addresses and lengths of discontiguous buffers that hold | ||
734 | * the message text. The receiver of the send is expected to | ||
735 | * reply to the message and a buffer is provided into which | ||
736 | * IUCV moves the reply to this message. | ||
737 | * Input: pathid - Path identification number. | ||
738 | * trgcls - Specifies target class. | ||
739 | * srccls - Specifies the source message class. | ||
740 | * msgtag - Specifies a tag to be associated with the message. | ||
741 | * flags1 - Option for path. | ||
742 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
743 | * buffer - Sddress of array of send buffers. | ||
744 | * buflen - Total length of send buffers. | ||
745 | * ansbuf - Address of array of buffer into which IUCV moves the reply | ||
746 | * of this message. | ||
747 | * anslen - Address of length reply buffers. | ||
748 | * Output: msgid - Specifies the message ID. | ||
749 | * Return: Return code from CP IUCV call. | ||
750 | * (-EINVAL) - Buffer address is NULL. | ||
751 | */ | ||
752 | int iucv_send2way_array (u16 pathid, | ||
753 | u32 * msgid, | ||
754 | u32 trgcls, | ||
755 | u32 srccls, | ||
756 | u32 msgtag, | ||
757 | int flags1, | ||
758 | iucv_array_t * buffer, | ||
759 | ulong buflen, iucv_array_t * ansbuf, ulong anslen); | ||
760 | |||
761 | /* | ||
762 | * Name: iucv_send2way_prmmsg | ||
763 | * Purpose: This function transmits data to another application. | ||
764 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
765 | * into the parameter list. This is a two-way message and the | ||
766 | * receiver of the message is expected to reply. A buffer | ||
767 | * is provided into which IUCV moves the reply to this | ||
768 | * message. | ||
769 | * Input: pathid - Rath identification number. | ||
770 | * trgcls - Specifies target class. | ||
771 | * srccls - Specifies the source message class. | ||
772 | * msgtag - Specifies a tag to be associated with the message. | ||
773 | * flags1 - Option for path. | ||
774 | * IPPRTY- 0x20 Specifies if you want to send priority message. | ||
775 | * prmmsg - 8-bytes of data to be placed in parameter list. | ||
776 | * ansbuf - Address of buffer into which IUCV moves the reply of | ||
777 | * this message. | ||
778 | * anslen - Address of length of buffer. | ||
779 | * Output: msgid - Specifies the message ID. | ||
780 | * Return: Return code from CP IUCV call. | ||
781 | * (-EINVAL) - Buffer address is NULL. | ||
782 | */ | ||
783 | int iucv_send2way_prmmsg (u16 pathid, | ||
784 | u32 * msgid, | ||
785 | u32 trgcls, | ||
786 | u32 srccls, | ||
787 | u32 msgtag, | ||
788 | ulong flags1, | ||
789 | uchar prmmsg[8], void *ansbuf, ulong anslen); | ||
790 | |||
791 | /* | ||
792 | * Name: iucv_send2way_prmmsg_array | ||
793 | * Purpose: This function transmits data to another application. | ||
794 | * Prmmsg specifies that the 8-bytes of data are to be moved | ||
795 | * into the parameter list. This is a two-way message and the | ||
796 | * receiver of the message is expected to reply. A buffer | ||
797 | * is provided into which IUCV moves the reply to this | ||
798 | * message. The contents of ansbuf is the address of the | ||
799 | * array of addresses and lengths of discontiguous buffers | ||
800 | * that contain the reply. | ||
801 | * Input: pathid - Path identification number. | ||
802 | * trgcls - Specifies target class. | ||
803 | * srccls - Specifies the source message class. | ||
804 | * msgtag - Specifies a tag to be associated with the message. | ||
805 | * flags1 - Option for path. | ||
806 | * IPPRTY- 0x20 specifies if you want to send priority message. | ||
807 | * prmmsg - 8-bytes of data to be placed into the parameter list. | ||
808 | * ansbuf - Address of array of buffer into which IUCV moves the reply | ||
809 | * of this message. | ||
810 | * anslen - Address of length of reply buffers. | ||
811 | * Output: msgid - Specifies the message ID. | ||
812 | * Return: Return code from CP IUCV call. | ||
813 | * (-EINVAL) - Ansbuf address is NULL. | ||
814 | */ | ||
815 | int iucv_send2way_prmmsg_array (u16 pathid, | ||
816 | u32 * msgid, | ||
817 | u32 trgcls, | ||
818 | u32 srccls, | ||
819 | u32 msgtag, | ||
820 | int flags1, | ||
821 | uchar prmmsg[8], | ||
822 | iucv_array_t * ansbuf, ulong anslen); | ||
823 | |||
824 | /* | ||
825 | * Name: iucv_setmask | ||
826 | * Purpose: This function enables or disables the following IUCV | ||
827 | * external interruptions: Nonpriority and priority message | ||
828 | * interrupts, nonpriority and priority reply interrupts. | ||
829 | * Input: SetMaskFlag - options for interrupts | ||
830 | * 0x80 - Nonpriority_MessagePendingInterruptsFlag | ||
831 | * 0x40 - Priority_MessagePendingInterruptsFlag | ||
832 | * 0x20 - Nonpriority_MessageCompletionInterruptsFlag | ||
833 | * 0x10 - Priority_MessageCompletionInterruptsFlag | ||
834 | * 0x08 - IUCVControlInterruptsFlag | ||
835 | * Output: NA | ||
836 | * Return: Return code from CP IUCV call. | ||
837 | */ | ||
838 | int iucv_setmask (int SetMaskFlag); | ||
839 | |||
840 | /* | ||
841 | * Name: iucv_sever | ||
842 | * Purpose: This function terminates an IUCV path. | ||
843 | * Input: pathid - Path identification number. | ||
844 | * user_data - 16-bytes of user data. | ||
845 | * Output: NA | ||
846 | * Return: Return code from CP IUCV call. | ||
847 | * (-EINVAL) - Interal error, wild pointer. | ||
848 | */ | ||
849 | int iucv_sever (u16 pathid, uchar user_data[16]); | ||
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 3346088f47e0..6387b483f2bf 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV network driver | 2 | * IUCV network driver |
3 | * | 3 | * |
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation |
5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) |
6 | * | 6 | * |
7 | * Sysfs integration and all bugs therein by Cornelia Huck | 7 | * Sysfs integration and all bugs therein by Cornelia Huck |
@@ -58,13 +58,94 @@ | |||
58 | #include <asm/io.h> | 58 | #include <asm/io.h> |
59 | #include <asm/uaccess.h> | 59 | #include <asm/uaccess.h> |
60 | 60 | ||
61 | #include "iucv.h" | 61 | #include <net/iucv/iucv.h> |
62 | #include "fsm.h" | 62 | #include "fsm.h" |
63 | 63 | ||
64 | MODULE_AUTHOR | 64 | MODULE_AUTHOR |
65 | ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); | 65 | ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); |
66 | MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); | 66 | MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); |
67 | 67 | ||
68 | /** | ||
69 | * Debug Facility stuff | ||
70 | */ | ||
71 | #define IUCV_DBF_SETUP_NAME "iucv_setup" | ||
72 | #define IUCV_DBF_SETUP_LEN 32 | ||
73 | #define IUCV_DBF_SETUP_PAGES 2 | ||
74 | #define IUCV_DBF_SETUP_NR_AREAS 1 | ||
75 | #define IUCV_DBF_SETUP_LEVEL 3 | ||
76 | |||
77 | #define IUCV_DBF_DATA_NAME "iucv_data" | ||
78 | #define IUCV_DBF_DATA_LEN 128 | ||
79 | #define IUCV_DBF_DATA_PAGES 2 | ||
80 | #define IUCV_DBF_DATA_NR_AREAS 1 | ||
81 | #define IUCV_DBF_DATA_LEVEL 2 | ||
82 | |||
83 | #define IUCV_DBF_TRACE_NAME "iucv_trace" | ||
84 | #define IUCV_DBF_TRACE_LEN 16 | ||
85 | #define IUCV_DBF_TRACE_PAGES 4 | ||
86 | #define IUCV_DBF_TRACE_NR_AREAS 1 | ||
87 | #define IUCV_DBF_TRACE_LEVEL 3 | ||
88 | |||
89 | #define IUCV_DBF_TEXT(name,level,text) \ | ||
90 | do { \ | ||
91 | debug_text_event(iucv_dbf_##name,level,text); \ | ||
92 | } while (0) | ||
93 | |||
94 | #define IUCV_DBF_HEX(name,level,addr,len) \ | ||
95 | do { \ | ||
96 | debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ | ||
97 | } while (0) | ||
98 | |||
99 | DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); | ||
100 | |||
101 | #define IUCV_DBF_TEXT_(name,level,text...) \ | ||
102 | do { \ | ||
103 | char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ | ||
104 | sprintf(iucv_dbf_txt_buf, text); \ | ||
105 | debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ | ||
106 | put_cpu_var(iucv_dbf_txt_buf); \ | ||
107 | } while (0) | ||
108 | |||
109 | #define IUCV_DBF_SPRINTF(name,level,text...) \ | ||
110 | do { \ | ||
111 | debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ | ||
112 | debug_sprintf_event(iucv_dbf_trace, level, text ); \ | ||
113 | } while (0) | ||
114 | |||
115 | /** | ||
116 | * some more debug stuff | ||
117 | */ | ||
118 | #define IUCV_HEXDUMP16(importance,header,ptr) \ | ||
119 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
120 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
121 | *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ | ||
122 | *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ | ||
123 | *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ | ||
124 | *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ | ||
125 | *(((char*)ptr)+12),*(((char*)ptr)+13), \ | ||
126 | *(((char*)ptr)+14),*(((char*)ptr)+15)); \ | ||
127 | PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
128 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
129 | *(((char*)ptr)+16),*(((char*)ptr)+17), \ | ||
130 | *(((char*)ptr)+18),*(((char*)ptr)+19), \ | ||
131 | *(((char*)ptr)+20),*(((char*)ptr)+21), \ | ||
132 | *(((char*)ptr)+22),*(((char*)ptr)+23), \ | ||
133 | *(((char*)ptr)+24),*(((char*)ptr)+25), \ | ||
134 | *(((char*)ptr)+26),*(((char*)ptr)+27), \ | ||
135 | *(((char*)ptr)+28),*(((char*)ptr)+29), \ | ||
136 | *(((char*)ptr)+30),*(((char*)ptr)+31)); | ||
137 | |||
138 | static inline void iucv_hex_dump(unsigned char *buf, size_t len) | ||
139 | { | ||
140 | size_t i; | ||
141 | |||
142 | for (i = 0; i < len; i++) { | ||
143 | if (i && !(i % 16)) | ||
144 | printk("\n"); | ||
145 | printk("%02x ", *(buf + i)); | ||
146 | } | ||
147 | printk("\n"); | ||
148 | } | ||
68 | 149 | ||
69 | #define PRINTK_HEADER " iucv: " /* for debugging */ | 150 | #define PRINTK_HEADER " iucv: " /* for debugging */ |
70 | 151 | ||
@@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = { | |||
73 | .bus = &iucv_bus, | 154 | .bus = &iucv_bus, |
74 | }; | 155 | }; |
75 | 156 | ||
157 | static int netiucv_callback_connreq(struct iucv_path *, | ||
158 | u8 ipvmid[8], u8 ipuser[16]); | ||
159 | static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]); | ||
160 | static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); | ||
161 | static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]); | ||
162 | static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]); | ||
163 | static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); | ||
164 | static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); | ||
165 | |||
166 | static struct iucv_handler netiucv_handler = { | ||
167 | .path_pending = netiucv_callback_connreq, | ||
168 | .path_complete = netiucv_callback_connack, | ||
169 | .path_severed = netiucv_callback_connrej, | ||
170 | .path_quiesced = netiucv_callback_connsusp, | ||
171 | .path_resumed = netiucv_callback_connres, | ||
172 | .message_pending = netiucv_callback_rx, | ||
173 | .message_complete = netiucv_callback_txdone | ||
174 | }; | ||
175 | |||
76 | /** | 176 | /** |
77 | * Per connection profiling data | 177 | * Per connection profiling data |
78 | */ | 178 | */ |
@@ -92,9 +192,8 @@ struct connection_profile { | |||
92 | * Representation of one iucv connection | 192 | * Representation of one iucv connection |
93 | */ | 193 | */ |
94 | struct iucv_connection { | 194 | struct iucv_connection { |
95 | struct iucv_connection *next; | 195 | struct list_head list; |
96 | iucv_handle_t handle; | 196 | struct iucv_path *path; |
97 | __u16 pathid; | ||
98 | struct sk_buff *rx_buff; | 197 | struct sk_buff *rx_buff; |
99 | struct sk_buff *tx_buff; | 198 | struct sk_buff *tx_buff; |
100 | struct sk_buff_head collect_queue; | 199 | struct sk_buff_head collect_queue; |
@@ -112,12 +211,9 @@ struct iucv_connection { | |||
112 | /** | 211 | /** |
113 | * Linked list of all connection structs. | 212 | * Linked list of all connection structs. |
114 | */ | 213 | */ |
115 | struct iucv_connection_struct { | 214 | static struct list_head iucv_connection_list = |
116 | struct iucv_connection *iucv_connections; | 215 | LIST_HEAD_INIT(iucv_connection_list); |
117 | rwlock_t iucv_rwlock; | 216 | static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED; |
118 | }; | ||
119 | |||
120 | static struct iucv_connection_struct iucv_conns; | ||
121 | 217 | ||
122 | /** | 218 | /** |
123 | * Representation of event-data for the | 219 | * Representation of event-data for the |
@@ -142,11 +238,11 @@ struct netiucv_priv { | |||
142 | /** | 238 | /** |
143 | * Link level header for a packet. | 239 | * Link level header for a packet. |
144 | */ | 240 | */ |
145 | typedef struct ll_header_t { | 241 | struct ll_header { |
146 | __u16 next; | 242 | u16 next; |
147 | } ll_header; | 243 | }; |
148 | 244 | ||
149 | #define NETIUCV_HDRLEN (sizeof(ll_header)) | 245 | #define NETIUCV_HDRLEN (sizeof(struct ll_header)) |
150 | #define NETIUCV_BUFSIZE_MAX 32768 | 246 | #define NETIUCV_BUFSIZE_MAX 32768 |
151 | #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX | 247 | #define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX |
152 | #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) | 248 | #define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) |
@@ -158,36 +254,26 @@ typedef struct ll_header_t { | |||
158 | * Compatibility macros for busy handling | 254 | * Compatibility macros for busy handling |
159 | * of network devices. | 255 | * of network devices. |
160 | */ | 256 | */ |
161 | static __inline__ void netiucv_clear_busy(struct net_device *dev) | 257 | static inline void netiucv_clear_busy(struct net_device *dev) |
162 | { | 258 | { |
163 | clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); | 259 | struct netiucv_priv *priv = netdev_priv(dev); |
260 | clear_bit(0, &priv->tbusy); | ||
164 | netif_wake_queue(dev); | 261 | netif_wake_queue(dev); |
165 | } | 262 | } |
166 | 263 | ||
167 | static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) | 264 | static inline int netiucv_test_and_set_busy(struct net_device *dev) |
168 | { | 265 | { |
266 | struct netiucv_priv *priv = netdev_priv(dev); | ||
169 | netif_stop_queue(dev); | 267 | netif_stop_queue(dev); |
170 | return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); | 268 | return test_and_set_bit(0, &priv->tbusy); |
171 | } | 269 | } |
172 | 270 | ||
173 | static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 271 | static u8 iucvMagic[16] = { |
174 | static __u8 iucvMagic[16] = { | ||
175 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, | 272 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, |
176 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 | 273 | 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 |
177 | }; | 274 | }; |
178 | 275 | ||
179 | /** | 276 | /** |
180 | * This mask means the 16-byte IUCV "magic" and the origin userid must | ||
181 | * match exactly as specified in order to give connection_pending() | ||
182 | * control. | ||
183 | */ | ||
184 | static __u8 netiucv_mask[] = { | ||
185 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
186 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
187 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
188 | }; | ||
189 | |||
190 | /** | ||
191 | * Convert an iucv userId to its printable | 277 | * Convert an iucv userId to its printable |
192 | * form (strip whitespace at end). | 278 | * form (strip whitespace at end). |
193 | * | 279 | * |
@@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = { | |||
195 | * | 281 | * |
196 | * @returns The printable string (static data!!) | 282 | * @returns The printable string (static data!!) |
197 | */ | 283 | */ |
198 | static __inline__ char * | 284 | static inline char *netiucv_printname(char *name) |
199 | netiucv_printname(char *name) | ||
200 | { | 285 | { |
201 | static char tmp[9]; | 286 | static char tmp[9]; |
202 | char *p = tmp; | 287 | char *p = tmp; |
@@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL; | |||
379 | 464 | ||
380 | DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); | 465 | DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); |
381 | 466 | ||
382 | static void | 467 | static void iucv_unregister_dbf_views(void) |
383 | iucv_unregister_dbf_views(void) | ||
384 | { | 468 | { |
385 | if (iucv_dbf_setup) | 469 | if (iucv_dbf_setup) |
386 | debug_unregister(iucv_dbf_setup); | 470 | debug_unregister(iucv_dbf_setup); |
@@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void) | |||
389 | if (iucv_dbf_trace) | 473 | if (iucv_dbf_trace) |
390 | debug_unregister(iucv_dbf_trace); | 474 | debug_unregister(iucv_dbf_trace); |
391 | } | 475 | } |
392 | static int | 476 | static int iucv_register_dbf_views(void) |
393 | iucv_register_dbf_views(void) | ||
394 | { | 477 | { |
395 | iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, | 478 | iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, |
396 | IUCV_DBF_SETUP_PAGES, | 479 | IUCV_DBF_SETUP_PAGES, |
@@ -422,125 +505,111 @@ iucv_register_dbf_views(void) | |||
422 | return 0; | 505 | return 0; |
423 | } | 506 | } |
424 | 507 | ||
425 | /** | 508 | /* |
426 | * Callback-wrappers, called from lowlevel iucv layer. | 509 | * Callback-wrappers, called from lowlevel iucv layer. |
427 | *****************************************************************************/ | 510 | */ |
428 | 511 | ||
429 | static void | 512 | static void netiucv_callback_rx(struct iucv_path *path, |
430 | netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) | 513 | struct iucv_message *msg) |
431 | { | 514 | { |
432 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 515 | struct iucv_connection *conn = path->private; |
433 | struct iucv_event ev; | 516 | struct iucv_event ev; |
434 | 517 | ||
435 | ev.conn = conn; | 518 | ev.conn = conn; |
436 | ev.data = (void *)eib; | 519 | ev.data = msg; |
437 | |||
438 | fsm_event(conn->fsm, CONN_EVENT_RX, &ev); | 520 | fsm_event(conn->fsm, CONN_EVENT_RX, &ev); |
439 | } | 521 | } |
440 | 522 | ||
441 | static void | 523 | static void netiucv_callback_txdone(struct iucv_path *path, |
442 | netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) | 524 | struct iucv_message *msg) |
443 | { | 525 | { |
444 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 526 | struct iucv_connection *conn = path->private; |
445 | struct iucv_event ev; | 527 | struct iucv_event ev; |
446 | 528 | ||
447 | ev.conn = conn; | 529 | ev.conn = conn; |
448 | ev.data = (void *)eib; | 530 | ev.data = msg; |
449 | fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); | 531 | fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); |
450 | } | 532 | } |
451 | 533 | ||
452 | static void | 534 | static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) |
453 | netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data) | ||
454 | { | 535 | { |
455 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 536 | struct iucv_connection *conn = path->private; |
456 | struct iucv_event ev; | ||
457 | 537 | ||
458 | ev.conn = conn; | 538 | fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); |
459 | ev.data = (void *)eib; | ||
460 | fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev); | ||
461 | } | 539 | } |
462 | 540 | ||
463 | static void | 541 | static int netiucv_callback_connreq(struct iucv_path *path, |
464 | netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) | 542 | u8 ipvmid[8], u8 ipuser[16]) |
465 | { | 543 | { |
466 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 544 | struct iucv_connection *conn = path->private; |
467 | struct iucv_event ev; | 545 | struct iucv_event ev; |
546 | int rc; | ||
468 | 547 | ||
469 | ev.conn = conn; | 548 | if (memcmp(iucvMagic, ipuser, sizeof(ipuser))) |
470 | ev.data = (void *)eib; | 549 | /* ipuser must match iucvMagic. */ |
471 | fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); | 550 | return -EINVAL; |
551 | rc = -EINVAL; | ||
552 | read_lock_bh(&iucv_connection_rwlock); | ||
553 | list_for_each_entry(conn, &iucv_connection_list, list) { | ||
554 | if (strncmp(ipvmid, conn->userid, 8)) | ||
555 | continue; | ||
556 | /* Found a matching connection for this path. */ | ||
557 | conn->path = path; | ||
558 | ev.conn = conn; | ||
559 | ev.data = path; | ||
560 | fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); | ||
561 | rc = 0; | ||
562 | } | ||
563 | read_unlock_bh(&iucv_connection_rwlock); | ||
564 | return rc; | ||
472 | } | 565 | } |
473 | 566 | ||
474 | static void | 567 | static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) |
475 | netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data) | ||
476 | { | 568 | { |
477 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 569 | struct iucv_connection *conn = path->private; |
478 | struct iucv_event ev; | ||
479 | 570 | ||
480 | ev.conn = conn; | 571 | fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); |
481 | ev.data = (void *)eib; | ||
482 | fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev); | ||
483 | } | 572 | } |
484 | 573 | ||
485 | static void | 574 | static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16]) |
486 | netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data) | ||
487 | { | 575 | { |
488 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 576 | struct iucv_connection *conn = path->private; |
489 | struct iucv_event ev; | ||
490 | 577 | ||
491 | ev.conn = conn; | 578 | fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); |
492 | ev.data = (void *)eib; | ||
493 | fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev); | ||
494 | } | 579 | } |
495 | 580 | ||
496 | static void | 581 | static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16]) |
497 | netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data) | ||
498 | { | 582 | { |
499 | struct iucv_connection *conn = (struct iucv_connection *)pgm_data; | 583 | struct iucv_connection *conn = path->private; |
500 | struct iucv_event ev; | ||
501 | 584 | ||
502 | ev.conn = conn; | 585 | fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); |
503 | ev.data = (void *)eib; | 586 | } |
504 | fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev); | ||
505 | } | ||
506 | |||
507 | static iucv_interrupt_ops_t netiucv_ops = { | ||
508 | .ConnectionPending = netiucv_callback_connreq, | ||
509 | .ConnectionComplete = netiucv_callback_connack, | ||
510 | .ConnectionSevered = netiucv_callback_connrej, | ||
511 | .ConnectionQuiesced = netiucv_callback_connsusp, | ||
512 | .ConnectionResumed = netiucv_callback_connres, | ||
513 | .MessagePending = netiucv_callback_rx, | ||
514 | .MessageComplete = netiucv_callback_txdone | ||
515 | }; | ||
516 | 587 | ||
517 | /** | 588 | /** |
518 | * Dummy NOP action for all statemachines | 589 | * Dummy NOP action for all statemachines |
519 | */ | 590 | */ |
520 | static void | 591 | static void fsm_action_nop(fsm_instance *fi, int event, void *arg) |
521 | fsm_action_nop(fsm_instance *fi, int event, void *arg) | ||
522 | { | 592 | { |
523 | } | 593 | } |
524 | 594 | ||
525 | /** | 595 | /* |
526 | * Actions of the connection statemachine | 596 | * Actions of the connection statemachine |
527 | *****************************************************************************/ | 597 | */ |
528 | 598 | ||
529 | /** | 599 | /** |
530 | * Helper function for conn_action_rx() | 600 | * netiucv_unpack_skb |
531 | * Unpack a just received skb and hand it over to | 601 | * @conn: The connection where this skb has been received. |
532 | * upper layers. | 602 | * @pskb: The received skb. |
533 | * | 603 | * |
534 | * @param conn The connection where this skb has been received. | 604 | * Unpack a just received skb and hand it over to upper layers. |
535 | * @param pskb The received skb. | 605 | * Helper function for conn_action_rx. |
536 | */ | 606 | */ |
537 | //static __inline__ void | 607 | static void netiucv_unpack_skb(struct iucv_connection *conn, |
538 | static void | 608 | struct sk_buff *pskb) |
539 | netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | ||
540 | { | 609 | { |
541 | struct net_device *dev = conn->netdev; | 610 | struct net_device *dev = conn->netdev; |
542 | struct netiucv_priv *privptr = dev->priv; | 611 | struct netiucv_priv *privptr = netdev_priv(dev); |
543 | __u16 offset = 0; | 612 | u16 offset = 0; |
544 | 613 | ||
545 | skb_put(pskb, NETIUCV_HDRLEN); | 614 | skb_put(pskb, NETIUCV_HDRLEN); |
546 | pskb->dev = dev; | 615 | pskb->dev = dev; |
@@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | |||
549 | 618 | ||
550 | while (1) { | 619 | while (1) { |
551 | struct sk_buff *skb; | 620 | struct sk_buff *skb; |
552 | ll_header *header = (ll_header *)pskb->data; | 621 | struct ll_header *header = (struct ll_header *) pskb->data; |
553 | 622 | ||
554 | if (!header->next) | 623 | if (!header->next) |
555 | break; | 624 | break; |
@@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) | |||
595 | } | 664 | } |
596 | } | 665 | } |
597 | 666 | ||
598 | static void | 667 | static void conn_action_rx(fsm_instance *fi, int event, void *arg) |
599 | conn_action_rx(fsm_instance *fi, int event, void *arg) | ||
600 | { | 668 | { |
601 | struct iucv_event *ev = (struct iucv_event *)arg; | 669 | struct iucv_event *ev = arg; |
602 | struct iucv_connection *conn = ev->conn; | 670 | struct iucv_connection *conn = ev->conn; |
603 | iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; | 671 | struct iucv_message *msg = ev->data; |
604 | struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; | 672 | struct netiucv_priv *privptr = netdev_priv(conn->netdev); |
605 | |||
606 | __u32 msglen = eib->ln1msg2.ipbfln1f; | ||
607 | int rc; | 673 | int rc; |
608 | 674 | ||
609 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 675 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
610 | 676 | ||
611 | if (!conn->netdev) { | 677 | if (!conn->netdev) { |
612 | /* FRITZ: How to tell iucv LL to drop the msg? */ | 678 | iucv_message_reject(conn->path, msg); |
613 | PRINT_WARN("Received data for unlinked connection\n"); | 679 | PRINT_WARN("Received data for unlinked connection\n"); |
614 | IUCV_DBF_TEXT(data, 2, | 680 | IUCV_DBF_TEXT(data, 2, |
615 | "Received data for unlinked connection\n"); | 681 | "Received data for unlinked connection\n"); |
616 | return; | 682 | return; |
617 | } | 683 | } |
618 | if (msglen > conn->max_buffsize) { | 684 | if (msg->length > conn->max_buffsize) { |
619 | /* FRITZ: How to tell iucv LL to drop the msg? */ | 685 | iucv_message_reject(conn->path, msg); |
620 | privptr->stats.rx_dropped++; | 686 | privptr->stats.rx_dropped++; |
621 | PRINT_WARN("msglen %d > max_buffsize %d\n", | 687 | PRINT_WARN("msglen %d > max_buffsize %d\n", |
622 | msglen, conn->max_buffsize); | 688 | msg->length, conn->max_buffsize); |
623 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", | 689 | IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", |
624 | msglen, conn->max_buffsize); | 690 | msg->length, conn->max_buffsize); |
625 | return; | 691 | return; |
626 | } | 692 | } |
627 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; | 693 | conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; |
628 | conn->rx_buff->len = 0; | 694 | conn->rx_buff->len = 0; |
629 | rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, | 695 | rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, |
630 | conn->rx_buff->data, msglen, NULL, NULL, NULL); | 696 | msg->length, NULL); |
631 | if (rc || msglen < 5) { | 697 | if (rc || msg->length < 5) { |
632 | privptr->stats.rx_errors++; | 698 | privptr->stats.rx_errors++; |
633 | PRINT_WARN("iucv_receive returned %08x\n", rc); | 699 | PRINT_WARN("iucv_receive returned %08x\n", rc); |
634 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); | 700 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); |
@@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg) | |||
637 | netiucv_unpack_skb(conn, conn->rx_buff); | 703 | netiucv_unpack_skb(conn, conn->rx_buff); |
638 | } | 704 | } |
639 | 705 | ||
640 | static void | 706 | static void conn_action_txdone(fsm_instance *fi, int event, void *arg) |
641 | conn_action_txdone(fsm_instance *fi, int event, void *arg) | ||
642 | { | 707 | { |
643 | struct iucv_event *ev = (struct iucv_event *)arg; | 708 | struct iucv_event *ev = arg; |
644 | struct iucv_connection *conn = ev->conn; | 709 | struct iucv_connection *conn = ev->conn; |
645 | iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; | 710 | struct iucv_message *msg = ev->data; |
711 | struct iucv_message txmsg; | ||
646 | struct netiucv_priv *privptr = NULL; | 712 | struct netiucv_priv *privptr = NULL; |
647 | /* Shut up, gcc! skb is always below 2G. */ | 713 | u32 single_flag = msg->tag; |
648 | __u32 single_flag = eib->ipmsgtag; | 714 | u32 txbytes = 0; |
649 | __u32 txbytes = 0; | 715 | u32 txpackets = 0; |
650 | __u32 txpackets = 0; | 716 | u32 stat_maxcq = 0; |
651 | __u32 stat_maxcq = 0; | ||
652 | struct sk_buff *skb; | 717 | struct sk_buff *skb; |
653 | unsigned long saveflags; | 718 | unsigned long saveflags; |
654 | ll_header header; | 719 | struct ll_header header; |
720 | int rc; | ||
655 | 721 | ||
656 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 722 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
657 | 723 | ||
658 | if (conn && conn->netdev && conn->netdev->priv) | 724 | if (conn && conn->netdev) |
659 | privptr = (struct netiucv_priv *)conn->netdev->priv; | 725 | privptr = netdev_priv(conn->netdev); |
660 | conn->prof.tx_pending--; | 726 | conn->prof.tx_pending--; |
661 | if (single_flag) { | 727 | if (single_flag) { |
662 | if ((skb = skb_dequeue(&conn->commit_queue))) { | 728 | if ((skb = skb_dequeue(&conn->commit_queue))) { |
@@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg) | |||
688 | conn->prof.maxmulti = conn->collect_len; | 754 | conn->prof.maxmulti = conn->collect_len; |
689 | conn->collect_len = 0; | 755 | conn->collect_len = 0; |
690 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); | 756 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); |
691 | if (conn->tx_buff->len) { | 757 | if (conn->tx_buff->len == 0) { |
692 | int rc; | 758 | fsm_newstate(fi, CONN_STATE_IDLE); |
693 | 759 | return; | |
694 | header.next = 0; | 760 | } |
695 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, | ||
696 | NETIUCV_HDRLEN); | ||
697 | 761 | ||
698 | conn->prof.send_stamp = xtime; | 762 | header.next = 0; |
699 | rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, | 763 | memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); |
764 | conn->prof.send_stamp = xtime; | ||
765 | txmsg.class = 0; | ||
766 | txmsg.tag = 0; | ||
767 | rc = iucv_message_send(conn->path, &txmsg, 0, 0, | ||
700 | conn->tx_buff->data, conn->tx_buff->len); | 768 | conn->tx_buff->data, conn->tx_buff->len); |
701 | conn->prof.doios_multi++; | 769 | conn->prof.doios_multi++; |
702 | conn->prof.txlen += conn->tx_buff->len; | 770 | conn->prof.txlen += conn->tx_buff->len; |
703 | conn->prof.tx_pending++; | 771 | conn->prof.tx_pending++; |
704 | if (conn->prof.tx_pending > conn->prof.tx_max_pending) | 772 | if (conn->prof.tx_pending > conn->prof.tx_max_pending) |
705 | conn->prof.tx_max_pending = conn->prof.tx_pending; | 773 | conn->prof.tx_max_pending = conn->prof.tx_pending; |
706 | if (rc) { | 774 | if (rc) { |
707 | conn->prof.tx_pending--; | 775 | conn->prof.tx_pending--; |
708 | fsm_newstate(fi, CONN_STATE_IDLE); | ||
709 | if (privptr) | ||
710 | privptr->stats.tx_errors += txpackets; | ||
711 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
712 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | ||
713 | } else { | ||
714 | if (privptr) { | ||
715 | privptr->stats.tx_packets += txpackets; | ||
716 | privptr->stats.tx_bytes += txbytes; | ||
717 | } | ||
718 | if (stat_maxcq > conn->prof.maxcqueue) | ||
719 | conn->prof.maxcqueue = stat_maxcq; | ||
720 | } | ||
721 | } else | ||
722 | fsm_newstate(fi, CONN_STATE_IDLE); | 776 | fsm_newstate(fi, CONN_STATE_IDLE); |
777 | if (privptr) | ||
778 | privptr->stats.tx_errors += txpackets; | ||
779 | PRINT_WARN("iucv_send returned %08x\n", rc); | ||
780 | IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); | ||
781 | } else { | ||
782 | if (privptr) { | ||
783 | privptr->stats.tx_packets += txpackets; | ||
784 | privptr->stats.tx_bytes += txbytes; | ||
785 | } | ||
786 | if (stat_maxcq > conn->prof.maxcqueue) | ||
787 | conn->prof.maxcqueue = stat_maxcq; | ||
788 | } | ||
723 | } | 789 | } |
724 | 790 | ||
725 | static void | 791 | static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) |
726 | conn_action_connaccept(fsm_instance *fi, int event, void *arg) | ||
727 | { | 792 | { |
728 | struct iucv_event *ev = (struct iucv_event *)arg; | 793 | struct iucv_event *ev = arg; |
729 | struct iucv_connection *conn = ev->conn; | 794 | struct iucv_connection *conn = ev->conn; |
730 | iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; | 795 | struct iucv_path *path = ev->data; |
731 | struct net_device *netdev = conn->netdev; | 796 | struct net_device *netdev = conn->netdev; |
732 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 797 | struct netiucv_priv *privptr = netdev_priv(netdev); |
733 | int rc; | 798 | int rc; |
734 | __u16 msglimit; | ||
735 | __u8 udata[16]; | ||
736 | 799 | ||
737 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 800 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
738 | 801 | ||
739 | rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, | 802 | conn->path = path; |
740 | conn->handle, conn, NULL, &msglimit); | 803 | path->msglim = NETIUCV_QUEUELEN_DEFAULT; |
804 | path->flags = 0; | ||
805 | rc = iucv_path_accept(path, &netiucv_handler, NULL, conn); | ||
741 | if (rc) { | 806 | if (rc) { |
742 | PRINT_WARN("%s: IUCV accept failed with error %d\n", | 807 | PRINT_WARN("%s: IUCV accept failed with error %d\n", |
743 | netdev->name, rc); | 808 | netdev->name, rc); |
@@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg) | |||
745 | return; | 810 | return; |
746 | } | 811 | } |
747 | fsm_newstate(fi, CONN_STATE_IDLE); | 812 | fsm_newstate(fi, CONN_STATE_IDLE); |
748 | conn->pathid = eib->ippathid; | 813 | netdev->tx_queue_len = conn->path->msglim; |
749 | netdev->tx_queue_len = msglimit; | ||
750 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); | 814 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); |
751 | } | 815 | } |
752 | 816 | ||
753 | static void | 817 | static void conn_action_connreject(fsm_instance *fi, int event, void *arg) |
754 | conn_action_connreject(fsm_instance *fi, int event, void *arg) | ||
755 | { | 818 | { |
756 | struct iucv_event *ev = (struct iucv_event *)arg; | 819 | struct iucv_event *ev = arg; |
757 | struct iucv_connection *conn = ev->conn; | 820 | struct iucv_path *path = ev->data; |
758 | struct net_device *netdev = conn->netdev; | ||
759 | iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; | ||
760 | __u8 udata[16]; | ||
761 | 821 | ||
762 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 822 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
763 | 823 | iucv_path_sever(path, NULL); | |
764 | iucv_sever(eib->ippathid, udata); | ||
765 | if (eib->ippathid != conn->pathid) { | ||
766 | PRINT_INFO("%s: IR Connection Pending; " | ||
767 | "pathid %d does not match original pathid %d\n", | ||
768 | netdev->name, eib->ippathid, conn->pathid); | ||
769 | IUCV_DBF_TEXT_(data, 2, | ||
770 | "connreject: IR pathid %d, conn. pathid %d\n", | ||
771 | eib->ippathid, conn->pathid); | ||
772 | iucv_sever(conn->pathid, udata); | ||
773 | } | ||
774 | } | 824 | } |
775 | 825 | ||
776 | static void | 826 | static void conn_action_connack(fsm_instance *fi, int event, void *arg) |
777 | conn_action_connack(fsm_instance *fi, int event, void *arg) | ||
778 | { | 827 | { |
779 | struct iucv_event *ev = (struct iucv_event *)arg; | 828 | struct iucv_connection *conn = arg; |
780 | struct iucv_connection *conn = ev->conn; | ||
781 | iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data; | ||
782 | struct net_device *netdev = conn->netdev; | 829 | struct net_device *netdev = conn->netdev; |
783 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 830 | struct netiucv_priv *privptr = netdev_priv(netdev); |
784 | 831 | ||
785 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 832 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
786 | |||
787 | fsm_deltimer(&conn->timer); | 833 | fsm_deltimer(&conn->timer); |
788 | fsm_newstate(fi, CONN_STATE_IDLE); | 834 | fsm_newstate(fi, CONN_STATE_IDLE); |
789 | if (eib->ippathid != conn->pathid) { | 835 | netdev->tx_queue_len = conn->path->msglim; |
790 | PRINT_INFO("%s: IR Connection Complete; " | ||
791 | "pathid %d does not match original pathid %d\n", | ||
792 | netdev->name, eib->ippathid, conn->pathid); | ||
793 | IUCV_DBF_TEXT_(data, 2, | ||
794 | "connack: IR pathid %d, conn. pathid %d\n", | ||
795 | eib->ippathid, conn->pathid); | ||
796 | conn->pathid = eib->ippathid; | ||
797 | } | ||
798 | netdev->tx_queue_len = eib->ipmsglim; | ||
799 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); | 836 | fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); |
800 | } | 837 | } |
801 | 838 | ||
802 | static void | 839 | static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) |
803 | conn_action_conntimsev(fsm_instance *fi, int event, void *arg) | ||
804 | { | 840 | { |
805 | struct iucv_connection *conn = (struct iucv_connection *)arg; | 841 | struct iucv_connection *conn = arg; |
806 | __u8 udata[16]; | ||
807 | 842 | ||
808 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 843 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
809 | |||
810 | fsm_deltimer(&conn->timer); | 844 | fsm_deltimer(&conn->timer); |
811 | iucv_sever(conn->pathid, udata); | 845 | iucv_path_sever(conn->path, NULL); |
812 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 846 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
813 | } | 847 | } |
814 | 848 | ||
815 | static void | 849 | static void conn_action_connsever(fsm_instance *fi, int event, void *arg) |
816 | conn_action_connsever(fsm_instance *fi, int event, void *arg) | ||
817 | { | 850 | { |
818 | struct iucv_event *ev = (struct iucv_event *)arg; | 851 | struct iucv_connection *conn = arg; |
819 | struct iucv_connection *conn = ev->conn; | ||
820 | struct net_device *netdev = conn->netdev; | 852 | struct net_device *netdev = conn->netdev; |
821 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 853 | struct netiucv_priv *privptr = netdev_priv(netdev); |
822 | __u8 udata[16]; | ||
823 | 854 | ||
824 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 855 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
825 | 856 | ||
826 | fsm_deltimer(&conn->timer); | 857 | fsm_deltimer(&conn->timer); |
827 | iucv_sever(conn->pathid, udata); | 858 | iucv_path_sever(conn->path, NULL); |
828 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); | 859 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); |
829 | IUCV_DBF_TEXT(data, 2, | 860 | IUCV_DBF_TEXT(data, 2, |
830 | "conn_action_connsever: Remote dropped connection\n"); | 861 | "conn_action_connsever: Remote dropped connection\n"); |
831 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 862 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
832 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); | 863 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); |
833 | } | 864 | } |
834 | 865 | ||
835 | static void | 866 | static void conn_action_start(fsm_instance *fi, int event, void *arg) |
836 | conn_action_start(fsm_instance *fi, int event, void *arg) | ||
837 | { | 867 | { |
838 | struct iucv_event *ev = (struct iucv_event *)arg; | 868 | struct iucv_connection *conn = arg; |
839 | struct iucv_connection *conn = ev->conn; | ||
840 | __u16 msglimit; | ||
841 | int rc; | 869 | int rc; |
842 | 870 | ||
843 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 871 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
844 | 872 | ||
845 | if (!conn->handle) { | 873 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
846 | IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n"); | ||
847 | conn->handle = | ||
848 | iucv_register_program(iucvMagic, conn->userid, | ||
849 | netiucv_mask, | ||
850 | &netiucv_ops, conn); | ||
851 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | ||
852 | if (!conn->handle) { | ||
853 | fsm_newstate(fi, CONN_STATE_REGERR); | ||
854 | conn->handle = NULL; | ||
855 | IUCV_DBF_TEXT(setup, 2, | ||
856 | "NULL from iucv_register_program\n"); | ||
857 | return; | ||
858 | } | ||
859 | |||
860 | PRINT_DEBUG("%s('%s'): registered successfully\n", | ||
861 | conn->netdev->name, conn->userid); | ||
862 | } | ||
863 | |||
864 | PRINT_DEBUG("%s('%s'): connecting ...\n", | 874 | PRINT_DEBUG("%s('%s'): connecting ...\n", |
865 | conn->netdev->name, conn->userid); | 875 | conn->netdev->name, conn->userid); |
866 | 876 | ||
867 | /* We must set the state before calling iucv_connect because the callback | 877 | /* |
868 | * handler could be called at any point after the connection request is | 878 | * We must set the state before calling iucv_connect because the |
869 | * sent */ | 879 | * callback handler could be called at any point after the connection |
880 | * request is sent | ||
881 | */ | ||
870 | 882 | ||
871 | fsm_newstate(fi, CONN_STATE_SETUPWAIT); | 883 | fsm_newstate(fi, CONN_STATE_SETUPWAIT); |
872 | rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, | 884 | conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); |
873 | conn->userid, iucv_host, 0, NULL, &msglimit, | 885 | rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, |
874 | conn->handle, conn); | 886 | NULL, iucvMagic, conn); |
875 | switch (rc) { | 887 | switch (rc) { |
876 | case 0: | 888 | case 0: |
877 | conn->netdev->tx_queue_len = msglimit; | 889 | conn->netdev->tx_queue_len = conn->path->msglim; |
878 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, | 890 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, |
879 | CONN_EVENT_TIMER, conn); | 891 | CONN_EVENT_TIMER, conn); |
880 | return; | 892 | return; |
881 | case 11: | 893 | case 11: |
882 | PRINT_INFO("%s: User %s is currently not available.\n", | 894 | PRINT_INFO("%s: User %s is currently not available.\n", |
883 | conn->netdev->name, | 895 | conn->netdev->name, |
884 | netiucv_printname(conn->userid)); | 896 | netiucv_printname(conn->userid)); |
885 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 897 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
886 | return; | 898 | break; |
887 | case 12: | 899 | case 12: |
888 | PRINT_INFO("%s: User %s is currently not ready.\n", | 900 | PRINT_INFO("%s: User %s is currently not ready.\n", |
889 | conn->netdev->name, | 901 | conn->netdev->name, |
890 | netiucv_printname(conn->userid)); | 902 | netiucv_printname(conn->userid)); |
891 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 903 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
892 | return; | 904 | break; |
893 | case 13: | 905 | case 13: |
894 | PRINT_WARN("%s: Too many IUCV connections.\n", | 906 | PRINT_WARN("%s: Too many IUCV connections.\n", |
895 | conn->netdev->name); | 907 | conn->netdev->name); |
896 | fsm_newstate(fi, CONN_STATE_CONNERR); | 908 | fsm_newstate(fi, CONN_STATE_CONNERR); |
897 | break; | 909 | break; |
898 | case 14: | 910 | case 14: |
899 | PRINT_WARN( | 911 | PRINT_WARN("%s: User %s has too many IUCV connections.\n", |
900 | "%s: User %s has too many IUCV connections.\n", | 912 | conn->netdev->name, |
901 | conn->netdev->name, | 913 | netiucv_printname(conn->userid)); |
902 | netiucv_printname(conn->userid)); | 914 | fsm_newstate(fi, CONN_STATE_CONNERR); |
903 | fsm_newstate(fi, CONN_STATE_CONNERR); | 915 | break; |
904 | break; | 916 | case 15: |
905 | case 15: | 917 | PRINT_WARN("%s: No IUCV authorization in CP directory.\n", |
906 | PRINT_WARN( | 918 | conn->netdev->name); |
907 | "%s: No IUCV authorization in CP directory.\n", | 919 | fsm_newstate(fi, CONN_STATE_CONNERR); |
908 | conn->netdev->name); | 920 | break; |
909 | fsm_newstate(fi, CONN_STATE_CONNERR); | 921 | default: |
910 | break; | 922 | PRINT_WARN("%s: iucv_connect returned error %d\n", |
911 | default: | 923 | conn->netdev->name, rc); |
912 | PRINT_WARN("%s: iucv_connect returned error %d\n", | 924 | fsm_newstate(fi, CONN_STATE_CONNERR); |
913 | conn->netdev->name, rc); | 925 | break; |
914 | fsm_newstate(fi, CONN_STATE_CONNERR); | ||
915 | break; | ||
916 | } | 926 | } |
917 | IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); | 927 | IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); |
918 | IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); | 928 | kfree(conn->path); |
919 | iucv_unregister_program(conn->handle); | 929 | conn->path = NULL; |
920 | conn->handle = NULL; | ||
921 | } | 930 | } |
922 | 931 | ||
923 | static void | 932 | static void netiucv_purge_skb_queue(struct sk_buff_head *q) |
924 | netiucv_purge_skb_queue(struct sk_buff_head *q) | ||
925 | { | 933 | { |
926 | struct sk_buff *skb; | 934 | struct sk_buff *skb; |
927 | 935 | ||
@@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q) | |||
931 | } | 939 | } |
932 | } | 940 | } |
933 | 941 | ||
934 | static void | 942 | static void conn_action_stop(fsm_instance *fi, int event, void *arg) |
935 | conn_action_stop(fsm_instance *fi, int event, void *arg) | ||
936 | { | 943 | { |
937 | struct iucv_event *ev = (struct iucv_event *)arg; | 944 | struct iucv_event *ev = arg; |
938 | struct iucv_connection *conn = ev->conn; | 945 | struct iucv_connection *conn = ev->conn; |
939 | struct net_device *netdev = conn->netdev; | 946 | struct net_device *netdev = conn->netdev; |
940 | struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; | 947 | struct netiucv_priv *privptr = netdev_priv(netdev); |
941 | 948 | ||
942 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 949 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
943 | 950 | ||
944 | fsm_deltimer(&conn->timer); | 951 | fsm_deltimer(&conn->timer); |
945 | fsm_newstate(fi, CONN_STATE_STOPPED); | 952 | fsm_newstate(fi, CONN_STATE_STOPPED); |
946 | netiucv_purge_skb_queue(&conn->collect_queue); | 953 | netiucv_purge_skb_queue(&conn->collect_queue); |
947 | if (conn->handle) | 954 | if (conn->path) { |
948 | IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); | 955 | IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); |
949 | iucv_unregister_program(conn->handle); | 956 | iucv_path_sever(conn->path, iucvMagic); |
950 | conn->handle = NULL; | 957 | kfree(conn->path); |
958 | conn->path = NULL; | ||
959 | } | ||
951 | netiucv_purge_skb_queue(&conn->commit_queue); | 960 | netiucv_purge_skb_queue(&conn->commit_queue); |
952 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); | 961 | fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); |
953 | } | 962 | } |
954 | 963 | ||
955 | static void | 964 | static void conn_action_inval(fsm_instance *fi, int event, void *arg) |
956 | conn_action_inval(fsm_instance *fi, int event, void *arg) | ||
957 | { | 965 | { |
958 | struct iucv_event *ev = (struct iucv_event *)arg; | 966 | struct iucv_connection *conn = arg; |
959 | struct iucv_connection *conn = ev->conn; | ||
960 | struct net_device *netdev = conn->netdev; | 967 | struct net_device *netdev = conn->netdev; |
961 | 968 | ||
962 | PRINT_WARN("%s: Cannot connect without username\n", | 969 | PRINT_WARN("%s: Cannot connect without username\n", netdev->name); |
963 | netdev->name); | ||
964 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); | 970 | IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); |
965 | } | 971 | } |
966 | 972 | ||
@@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = { | |||
999 | static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); | 1005 | static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); |
1000 | 1006 | ||
1001 | 1007 | ||
1002 | /** | 1008 | /* |
1003 | * Actions for interface - statemachine. | 1009 | * Actions for interface - statemachine. |
1004 | *****************************************************************************/ | 1010 | */ |
1005 | 1011 | ||
1006 | /** | 1012 | /** |
1007 | * Startup connection by sending CONN_EVENT_START to it. | 1013 | * dev_action_start |
1014 | * @fi: An instance of an interface statemachine. | ||
1015 | * @event: The event, just happened. | ||
1016 | * @arg: Generic pointer, casted from struct net_device * upon call. | ||
1008 | * | 1017 | * |
1009 | * @param fi An instance of an interface statemachine. | 1018 | * Startup connection by sending CONN_EVENT_START to it. |
1010 | * @param event The event, just happened. | ||
1011 | * @param arg Generic pointer, casted from struct net_device * upon call. | ||
1012 | */ | 1019 | */ |
1013 | static void | 1020 | static void dev_action_start(fsm_instance *fi, int event, void *arg) |
1014 | dev_action_start(fsm_instance *fi, int event, void *arg) | ||
1015 | { | 1021 | { |
1016 | struct net_device *dev = (struct net_device *)arg; | 1022 | struct net_device *dev = arg; |
1017 | struct netiucv_priv *privptr = dev->priv; | 1023 | struct netiucv_priv *privptr = netdev_priv(dev); |
1018 | struct iucv_event ev; | ||
1019 | 1024 | ||
1020 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1025 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1021 | 1026 | ||
1022 | ev.conn = privptr->conn; | ||
1023 | fsm_newstate(fi, DEV_STATE_STARTWAIT); | 1027 | fsm_newstate(fi, DEV_STATE_STARTWAIT); |
1024 | fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); | 1028 | fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); |
1025 | } | 1029 | } |
1026 | 1030 | ||
1027 | /** | 1031 | /** |
@@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg) | |||
1034 | static void | 1038 | static void |
1035 | dev_action_stop(fsm_instance *fi, int event, void *arg) | 1039 | dev_action_stop(fsm_instance *fi, int event, void *arg) |
1036 | { | 1040 | { |
1037 | struct net_device *dev = (struct net_device *)arg; | 1041 | struct net_device *dev = arg; |
1038 | struct netiucv_priv *privptr = dev->priv; | 1042 | struct netiucv_priv *privptr = netdev_priv(dev); |
1039 | struct iucv_event ev; | 1043 | struct iucv_event ev; |
1040 | 1044 | ||
1041 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1045 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
@@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg) | |||
1057 | static void | 1061 | static void |
1058 | dev_action_connup(fsm_instance *fi, int event, void *arg) | 1062 | dev_action_connup(fsm_instance *fi, int event, void *arg) |
1059 | { | 1063 | { |
1060 | struct net_device *dev = (struct net_device *)arg; | 1064 | struct net_device *dev = arg; |
1061 | struct netiucv_priv *privptr = dev->priv; | 1065 | struct netiucv_priv *privptr = netdev_priv(dev); |
1062 | 1066 | ||
1063 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1067 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1064 | 1068 | ||
@@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); | |||
1131 | * | 1135 | * |
1132 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1136 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1133 | */ | 1137 | */ |
1134 | static int | 1138 | static int netiucv_transmit_skb(struct iucv_connection *conn, |
1135 | netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | 1139 | struct sk_buff *skb) |
1140 | { | ||
1141 | struct iucv_message msg; | ||
1136 | unsigned long saveflags; | 1142 | unsigned long saveflags; |
1137 | ll_header header; | 1143 | struct ll_header header; |
1138 | int rc = 0; | 1144 | int rc; |
1139 | 1145 | ||
1140 | if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { | 1146 | if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { |
1141 | int l = skb->len + NETIUCV_HDRLEN; | 1147 | int l = skb->len + NETIUCV_HDRLEN; |
@@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1145 | (conn->max_buffsize - NETIUCV_HDRLEN)) { | 1151 | (conn->max_buffsize - NETIUCV_HDRLEN)) { |
1146 | rc = -EBUSY; | 1152 | rc = -EBUSY; |
1147 | IUCV_DBF_TEXT(data, 2, | 1153 | IUCV_DBF_TEXT(data, 2, |
1148 | "EBUSY from netiucv_transmit_skb\n"); | 1154 | "EBUSY from netiucv_transmit_skb\n"); |
1149 | } else { | 1155 | } else { |
1150 | atomic_inc(&skb->users); | 1156 | atomic_inc(&skb->users); |
1151 | skb_queue_tail(&conn->collect_queue, skb); | 1157 | skb_queue_tail(&conn->collect_queue, skb); |
1152 | conn->collect_len += l; | 1158 | conn->collect_len += l; |
1159 | rc = 0; | ||
1153 | } | 1160 | } |
1154 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); | 1161 | spin_unlock_irqrestore(&conn->collect_lock, saveflags); |
1155 | } else { | 1162 | } else { |
@@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1188 | fsm_newstate(conn->fsm, CONN_STATE_TX); | 1195 | fsm_newstate(conn->fsm, CONN_STATE_TX); |
1189 | conn->prof.send_stamp = xtime; | 1196 | conn->prof.send_stamp = xtime; |
1190 | 1197 | ||
1191 | rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, | 1198 | msg.tag = 1; |
1192 | 0, nskb->data, nskb->len); | 1199 | msg.class = 0; |
1193 | /* Shut up, gcc! nskb is always below 2G. */ | 1200 | rc = iucv_message_send(conn->path, &msg, 0, 0, |
1201 | nskb->data, nskb->len); | ||
1194 | conn->prof.doios_single++; | 1202 | conn->prof.doios_single++; |
1195 | conn->prof.txlen += skb->len; | 1203 | conn->prof.txlen += skb->len; |
1196 | conn->prof.tx_pending++; | 1204 | conn->prof.tx_pending++; |
@@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1200 | struct netiucv_priv *privptr; | 1208 | struct netiucv_priv *privptr; |
1201 | fsm_newstate(conn->fsm, CONN_STATE_IDLE); | 1209 | fsm_newstate(conn->fsm, CONN_STATE_IDLE); |
1202 | conn->prof.tx_pending--; | 1210 | conn->prof.tx_pending--; |
1203 | privptr = (struct netiucv_priv *)conn->netdev->priv; | 1211 | privptr = netdev_priv(conn->netdev); |
1204 | if (privptr) | 1212 | if (privptr) |
1205 | privptr->stats.tx_errors++; | 1213 | privptr->stats.tx_errors++; |
1206 | if (copied) | 1214 | if (copied) |
@@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1226 | return rc; | 1234 | return rc; |
1227 | } | 1235 | } |
1228 | 1236 | ||
1229 | /** | 1237 | /* |
1230 | * Interface API for upper network layers | 1238 | * Interface API for upper network layers |
1231 | *****************************************************************************/ | 1239 | */ |
1232 | 1240 | ||
1233 | /** | 1241 | /** |
1234 | * Open an interface. | 1242 | * Open an interface. |
@@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { | |||
1238 | * | 1246 | * |
1239 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1247 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1240 | */ | 1248 | */ |
1241 | static int | 1249 | static int netiucv_open(struct net_device *dev) |
1242 | netiucv_open(struct net_device *dev) { | 1250 | { |
1243 | fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); | 1251 | struct netiucv_priv *priv = netdev_priv(dev); |
1252 | |||
1253 | fsm_event(priv->fsm, DEV_EVENT_START, dev); | ||
1244 | return 0; | 1254 | return 0; |
1245 | } | 1255 | } |
1246 | 1256 | ||
@@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) { | |||
1252 | * | 1262 | * |
1253 | * @return 0 on success, -ERRNO on failure. (Never fails.) | 1263 | * @return 0 on success, -ERRNO on failure. (Never fails.) |
1254 | */ | 1264 | */ |
1255 | static int | 1265 | static int netiucv_close(struct net_device *dev) |
1256 | netiucv_close(struct net_device *dev) { | 1266 | { |
1257 | fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); | 1267 | struct netiucv_priv *priv = netdev_priv(dev); |
1268 | |||
1269 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); | ||
1258 | return 0; | 1270 | return 0; |
1259 | } | 1271 | } |
1260 | 1272 | ||
@@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) { | |||
1271 | */ | 1283 | */ |
1272 | static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | 1284 | static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) |
1273 | { | 1285 | { |
1274 | int rc = 0; | 1286 | struct netiucv_priv *privptr = netdev_priv(dev); |
1275 | struct netiucv_priv *privptr = dev->priv; | 1287 | int rc; |
1276 | 1288 | ||
1277 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); | 1289 | IUCV_DBF_TEXT(trace, 4, __FUNCTION__); |
1278 | /** | 1290 | /** |
@@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | |||
1312 | return -EBUSY; | 1324 | return -EBUSY; |
1313 | } | 1325 | } |
1314 | dev->trans_start = jiffies; | 1326 | dev->trans_start = jiffies; |
1315 | if (netiucv_transmit_skb(privptr->conn, skb)) | 1327 | rc = netiucv_transmit_skb(privptr->conn, skb) != 0; |
1316 | rc = 1; | ||
1317 | netiucv_clear_busy(dev); | 1328 | netiucv_clear_busy(dev); |
1318 | return rc; | 1329 | return rc; |
1319 | } | 1330 | } |
1320 | 1331 | ||
1321 | /** | 1332 | /** |
1322 | * Returns interface statistics of a device. | 1333 | * netiucv_stats |
1334 | * @dev: Pointer to interface struct. | ||
1323 | * | 1335 | * |
1324 | * @param dev Pointer to interface struct. | 1336 | * Returns interface statistics of a device. |
1325 | * | 1337 | * |
1326 | * @return Pointer to stats struct of this interface. | 1338 | * Returns pointer to stats struct of this interface. |
1327 | */ | 1339 | */ |
1328 | static struct net_device_stats * | 1340 | static struct net_device_stats *netiucv_stats (struct net_device * dev) |
1329 | netiucv_stats (struct net_device * dev) | ||
1330 | { | 1341 | { |
1342 | struct netiucv_priv *priv = netdev_priv(dev); | ||
1343 | |||
1331 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); | 1344 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); |
1332 | return &((struct netiucv_priv *)dev->priv)->stats; | 1345 | return &priv->stats; |
1333 | } | 1346 | } |
1334 | 1347 | ||
1335 | /** | 1348 | /** |
1336 | * Sets MTU of an interface. | 1349 | * netiucv_change_mtu |
1350 | * @dev: Pointer to interface struct. | ||
1351 | * @new_mtu: The new MTU to use for this interface. | ||
1337 | * | 1352 | * |
1338 | * @param dev Pointer to interface struct. | 1353 | * Sets MTU of an interface. |
1339 | * @param new_mtu The new MTU to use for this interface. | ||
1340 | * | 1354 | * |
1341 | * @return 0 on success, -EINVAL if MTU is out of valid range. | 1355 | * Returns 0 on success, -EINVAL if MTU is out of valid range. |
1342 | * (valid range is 576 .. NETIUCV_MTU_MAX). | 1356 | * (valid range is 576 .. NETIUCV_MTU_MAX). |
1343 | */ | 1357 | */ |
1344 | static int | 1358 | static int netiucv_change_mtu(struct net_device * dev, int new_mtu) |
1345 | netiucv_change_mtu (struct net_device * dev, int new_mtu) | ||
1346 | { | 1359 | { |
1347 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1360 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1348 | if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { | 1361 | if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) { |
1349 | IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); | 1362 | IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); |
1350 | return -EINVAL; | 1363 | return -EINVAL; |
1351 | } | 1364 | } |
@@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu) | |||
1353 | return 0; | 1366 | return 0; |
1354 | } | 1367 | } |
1355 | 1368 | ||
1356 | /** | 1369 | /* |
1357 | * attributes in sysfs | 1370 | * attributes in sysfs |
1358 | *****************************************************************************/ | 1371 | */ |
1359 | 1372 | ||
1360 | static ssize_t | 1373 | static ssize_t user_show(struct device *dev, struct device_attribute *attr, |
1361 | user_show (struct device *dev, struct device_attribute *attr, char *buf) | 1374 | char *buf) |
1362 | { | 1375 | { |
1363 | struct netiucv_priv *priv = dev->driver_data; | 1376 | struct netiucv_priv *priv = dev->driver_data; |
1364 | 1377 | ||
@@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1366 | return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); | 1379 | return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); |
1367 | } | 1380 | } |
1368 | 1381 | ||
1369 | static ssize_t | 1382 | static ssize_t user_write(struct device *dev, struct device_attribute *attr, |
1370 | user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1383 | const char *buf, size_t count) |
1371 | { | 1384 | { |
1372 | struct netiucv_priv *priv = dev->driver_data; | 1385 | struct netiucv_priv *priv = dev->driver_data; |
1373 | struct net_device *ndev = priv->conn->netdev; | 1386 | struct net_device *ndev = priv->conn->netdev; |
@@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1375 | char *tmp; | 1388 | char *tmp; |
1376 | char username[9]; | 1389 | char username[9]; |
1377 | int i; | 1390 | int i; |
1378 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 1391 | struct iucv_connection *cp; |
1379 | unsigned long flags; | ||
1380 | 1392 | ||
1381 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1393 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1382 | if (count>9) { | 1394 | if (count > 9) { |
1383 | PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); | 1395 | PRINT_WARN("netiucv: username too long (%d)!\n", (int) count); |
1384 | IUCV_DBF_TEXT_(setup, 2, | 1396 | IUCV_DBF_TEXT_(setup, 2, |
1385 | "%d is length of username\n", (int)count); | 1397 | "%d is length of username\n", (int) count); |
1386 | return -EINVAL; | 1398 | return -EINVAL; |
1387 | } | 1399 | } |
1388 | 1400 | ||
1389 | tmp = strsep((char **) &buf, "\n"); | 1401 | tmp = strsep((char **) &buf, "\n"); |
1390 | for (i=0, p=tmp; i<8 && *p; i++, p++) { | 1402 | for (i = 0, p = tmp; i < 8 && *p; i++, p++) { |
1391 | if (isalnum(*p) || (*p == '$')) | 1403 | if (isalnum(*p) || (*p == '$')) { |
1392 | username[i]= toupper(*p); | 1404 | username[i]= toupper(*p); |
1393 | else if (*p == '\n') { | 1405 | continue; |
1406 | } | ||
1407 | if (*p == '\n') { | ||
1394 | /* trailing lf, grr */ | 1408 | /* trailing lf, grr */ |
1395 | break; | 1409 | break; |
1396 | } else { | ||
1397 | PRINT_WARN("netiucv: Invalid char %c in username!\n", | ||
1398 | *p); | ||
1399 | IUCV_DBF_TEXT_(setup, 2, | ||
1400 | "username: invalid character %c\n", | ||
1401 | *p); | ||
1402 | return -EINVAL; | ||
1403 | } | 1410 | } |
1411 | PRINT_WARN("netiucv: Invalid char %c in username!\n", *p); | ||
1412 | IUCV_DBF_TEXT_(setup, 2, | ||
1413 | "username: invalid character %c\n", *p); | ||
1414 | return -EINVAL; | ||
1404 | } | 1415 | } |
1405 | while (i<8) | 1416 | while (i < 8) |
1406 | username[i++] = ' '; | 1417 | username[i++] = ' '; |
1407 | username[8] = '\0'; | 1418 | username[8] = '\0'; |
1408 | 1419 | ||
1409 | if (memcmp(username, priv->conn->userid, 9)) { | 1420 | if (memcmp(username, priv->conn->userid, 9) && |
1410 | /* username changed */ | 1421 | (ndev->flags & (IFF_UP | IFF_RUNNING))) { |
1411 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 1422 | /* username changed while the interface is active. */ |
1412 | PRINT_WARN( | 1423 | PRINT_WARN("netiucv: device %s active, connected to %s\n", |
1413 | "netiucv: device %s active, connected to %s\n", | 1424 | dev->bus_id, priv->conn->userid); |
1414 | dev->bus_id, priv->conn->userid); | 1425 | PRINT_WARN("netiucv: user cannot be updated\n"); |
1415 | PRINT_WARN("netiucv: user cannot be updated\n"); | 1426 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); |
1416 | IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); | 1427 | return -EBUSY; |
1417 | return -EBUSY; | 1428 | } |
1429 | read_lock_bh(&iucv_connection_rwlock); | ||
1430 | list_for_each_entry(cp, &iucv_connection_list, list) { | ||
1431 | if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) { | ||
1432 | read_unlock_bh(&iucv_connection_rwlock); | ||
1433 | PRINT_WARN("netiucv: Connection to %s already " | ||
1434 | "exists\n", username); | ||
1435 | return -EEXIST; | ||
1418 | } | 1436 | } |
1419 | } | 1437 | } |
1420 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1438 | read_unlock_bh(&iucv_connection_rwlock); |
1421 | while (*clist) { | ||
1422 | if (!strncmp(username, (*clist)->userid, 9) || | ||
1423 | ((*clist)->netdev != ndev)) | ||
1424 | break; | ||
1425 | clist = &((*clist)->next); | ||
1426 | } | ||
1427 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1428 | if (*clist) { | ||
1429 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
1430 | username); | ||
1431 | return -EEXIST; | ||
1432 | } | ||
1433 | memcpy(priv->conn->userid, username, 9); | 1439 | memcpy(priv->conn->userid, username, 9); |
1434 | |||
1435 | return count; | 1440 | return count; |
1436 | |||
1437 | } | 1441 | } |
1438 | 1442 | ||
1439 | static DEVICE_ATTR(user, 0644, user_show, user_write); | 1443 | static DEVICE_ATTR(user, 0644, user_show, user_write); |
1440 | 1444 | ||
1441 | static ssize_t | 1445 | static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, |
1442 | buffer_show (struct device *dev, struct device_attribute *attr, char *buf) | 1446 | char *buf) |
1443 | { | 1447 | { struct netiucv_priv *priv = dev->driver_data; |
1444 | struct netiucv_priv *priv = dev->driver_data; | ||
1445 | 1448 | ||
1446 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); | 1449 | IUCV_DBF_TEXT(trace, 5, __FUNCTION__); |
1447 | return sprintf(buf, "%d\n", priv->conn->max_buffsize); | 1450 | return sprintf(buf, "%d\n", priv->conn->max_buffsize); |
1448 | } | 1451 | } |
1449 | 1452 | ||
1450 | static ssize_t | 1453 | static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, |
1451 | buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1454 | const char *buf, size_t count) |
1452 | { | 1455 | { |
1453 | struct netiucv_priv *priv = dev->driver_data; | 1456 | struct netiucv_priv *priv = dev->driver_data; |
1454 | struct net_device *ndev = priv->conn->netdev; | 1457 | struct net_device *ndev = priv->conn->netdev; |
@@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1502 | 1505 | ||
1503 | static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); | 1506 | static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); |
1504 | 1507 | ||
1505 | static ssize_t | 1508 | static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, |
1506 | dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | 1509 | char *buf) |
1507 | { | 1510 | { |
1508 | struct netiucv_priv *priv = dev->driver_data; | 1511 | struct netiucv_priv *priv = dev->driver_data; |
1509 | 1512 | ||
@@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1513 | 1516 | ||
1514 | static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); | 1517 | static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); |
1515 | 1518 | ||
1516 | static ssize_t | 1519 | static ssize_t conn_fsm_show (struct device *dev, |
1517 | conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | 1520 | struct device_attribute *attr, char *buf) |
1518 | { | 1521 | { |
1519 | struct netiucv_priv *priv = dev->driver_data; | 1522 | struct netiucv_priv *priv = dev->driver_data; |
1520 | 1523 | ||
@@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1524 | 1527 | ||
1525 | static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); | 1528 | static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); |
1526 | 1529 | ||
1527 | static ssize_t | 1530 | static ssize_t maxmulti_show (struct device *dev, |
1528 | maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) | 1531 | struct device_attribute *attr, char *buf) |
1529 | { | 1532 | { |
1530 | struct netiucv_priv *priv = dev->driver_data; | 1533 | struct netiucv_priv *priv = dev->driver_data; |
1531 | 1534 | ||
@@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1533 | return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); | 1536 | return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); |
1534 | } | 1537 | } |
1535 | 1538 | ||
1536 | static ssize_t | 1539 | static ssize_t maxmulti_write (struct device *dev, |
1537 | maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1540 | struct device_attribute *attr, |
1541 | const char *buf, size_t count) | ||
1538 | { | 1542 | { |
1539 | struct netiucv_priv *priv = dev->driver_data; | 1543 | struct netiucv_priv *priv = dev->driver_data; |
1540 | 1544 | ||
@@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b | |||
1545 | 1549 | ||
1546 | static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); | 1550 | static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); |
1547 | 1551 | ||
1548 | static ssize_t | 1552 | static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, |
1549 | maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) | 1553 | char *buf) |
1550 | { | 1554 | { |
1551 | struct netiucv_priv *priv = dev->driver_data; | 1555 | struct netiucv_priv *priv = dev->driver_data; |
1552 | 1556 | ||
@@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1554 | return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); | 1558 | return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); |
1555 | } | 1559 | } |
1556 | 1560 | ||
1557 | static ssize_t | 1561 | static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, |
1558 | maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1562 | const char *buf, size_t count) |
1559 | { | 1563 | { |
1560 | struct netiucv_priv *priv = dev->driver_data; | 1564 | struct netiucv_priv *priv = dev->driver_data; |
1561 | 1565 | ||
@@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1566 | 1570 | ||
1567 | static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); | 1571 | static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); |
1568 | 1572 | ||
1569 | static ssize_t | 1573 | static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, |
1570 | sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | 1574 | char *buf) |
1571 | { | 1575 | { |
1572 | struct netiucv_priv *priv = dev->driver_data; | 1576 | struct netiucv_priv *priv = dev->driver_data; |
1573 | 1577 | ||
@@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1575 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); | 1579 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); |
1576 | } | 1580 | } |
1577 | 1581 | ||
1578 | static ssize_t | 1582 | static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, |
1579 | sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1583 | const char *buf, size_t count) |
1580 | { | 1584 | { |
1581 | struct netiucv_priv *priv = dev->driver_data; | 1585 | struct netiucv_priv *priv = dev->driver_data; |
1582 | 1586 | ||
@@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1587 | 1591 | ||
1588 | static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); | 1592 | static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); |
1589 | 1593 | ||
1590 | static ssize_t | 1594 | static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, |
1591 | mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | 1595 | char *buf) |
1592 | { | 1596 | { |
1593 | struct netiucv_priv *priv = dev->driver_data; | 1597 | struct netiucv_priv *priv = dev->driver_data; |
1594 | 1598 | ||
@@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1596 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); | 1600 | return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); |
1597 | } | 1601 | } |
1598 | 1602 | ||
1599 | static ssize_t | 1603 | static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, |
1600 | mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1604 | const char *buf, size_t count) |
1601 | { | 1605 | { |
1602 | struct netiucv_priv *priv = dev->driver_data; | 1606 | struct netiucv_priv *priv = dev->driver_data; |
1603 | 1607 | ||
@@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1608 | 1612 | ||
1609 | static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); | 1613 | static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); |
1610 | 1614 | ||
1611 | static ssize_t | 1615 | static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, |
1612 | txlen_show (struct device *dev, struct device_attribute *attr, char *buf) | 1616 | char *buf) |
1613 | { | 1617 | { |
1614 | struct netiucv_priv *priv = dev->driver_data; | 1618 | struct netiucv_priv *priv = dev->driver_data; |
1615 | 1619 | ||
@@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1617 | return sprintf(buf, "%ld\n", priv->conn->prof.txlen); | 1621 | return sprintf(buf, "%ld\n", priv->conn->prof.txlen); |
1618 | } | 1622 | } |
1619 | 1623 | ||
1620 | static ssize_t | 1624 | static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, |
1621 | txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1625 | const char *buf, size_t count) |
1622 | { | 1626 | { |
1623 | struct netiucv_priv *priv = dev->driver_data; | 1627 | struct netiucv_priv *priv = dev->driver_data; |
1624 | 1628 | ||
@@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1629 | 1633 | ||
1630 | static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); | 1634 | static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); |
1631 | 1635 | ||
1632 | static ssize_t | 1636 | static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, |
1633 | txtime_show (struct device *dev, struct device_attribute *attr, char *buf) | 1637 | char *buf) |
1634 | { | 1638 | { |
1635 | struct netiucv_priv *priv = dev->driver_data; | 1639 | struct netiucv_priv *priv = dev->driver_data; |
1636 | 1640 | ||
@@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1638 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); | 1642 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); |
1639 | } | 1643 | } |
1640 | 1644 | ||
1641 | static ssize_t | 1645 | static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, |
1642 | txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1646 | const char *buf, size_t count) |
1643 | { | 1647 | { |
1644 | struct netiucv_priv *priv = dev->driver_data; | 1648 | struct netiucv_priv *priv = dev->driver_data; |
1645 | 1649 | ||
@@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1650 | 1654 | ||
1651 | static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); | 1655 | static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); |
1652 | 1656 | ||
1653 | static ssize_t | 1657 | static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, |
1654 | txpend_show (struct device *dev, struct device_attribute *attr, char *buf) | 1658 | char *buf) |
1655 | { | 1659 | { |
1656 | struct netiucv_priv *priv = dev->driver_data; | 1660 | struct netiucv_priv *priv = dev->driver_data; |
1657 | 1661 | ||
@@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1659 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); | 1663 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); |
1660 | } | 1664 | } |
1661 | 1665 | ||
1662 | static ssize_t | 1666 | static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, |
1663 | txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1667 | const char *buf, size_t count) |
1664 | { | 1668 | { |
1665 | struct netiucv_priv *priv = dev->driver_data; | 1669 | struct netiucv_priv *priv = dev->driver_data; |
1666 | 1670 | ||
@@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf | |||
1671 | 1675 | ||
1672 | static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); | 1676 | static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); |
1673 | 1677 | ||
1674 | static ssize_t | 1678 | static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, |
1675 | txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) | 1679 | char *buf) |
1676 | { | 1680 | { |
1677 | struct netiucv_priv *priv = dev->driver_data; | 1681 | struct netiucv_priv *priv = dev->driver_data; |
1678 | 1682 | ||
@@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf) | |||
1680 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); | 1684 | return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); |
1681 | } | 1685 | } |
1682 | 1686 | ||
1683 | static ssize_t | 1687 | static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, |
1684 | txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | 1688 | const char *buf, size_t count) |
1685 | { | 1689 | { |
1686 | struct netiucv_priv *priv = dev->driver_data; | 1690 | struct netiucv_priv *priv = dev->driver_data; |
1687 | 1691 | ||
@@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = { | |||
1721 | .attrs = netiucv_stat_attrs, | 1725 | .attrs = netiucv_stat_attrs, |
1722 | }; | 1726 | }; |
1723 | 1727 | ||
1724 | static inline int | 1728 | static inline int netiucv_add_files(struct device *dev) |
1725 | netiucv_add_files(struct device *dev) | ||
1726 | { | 1729 | { |
1727 | int ret; | 1730 | int ret; |
1728 | 1731 | ||
@@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev) | |||
1736 | return ret; | 1739 | return ret; |
1737 | } | 1740 | } |
1738 | 1741 | ||
1739 | static inline void | 1742 | static inline void netiucv_remove_files(struct device *dev) |
1740 | netiucv_remove_files(struct device *dev) | ||
1741 | { | 1743 | { |
1742 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1744 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1743 | sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); | 1745 | sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); |
1744 | sysfs_remove_group(&dev->kobj, &netiucv_attr_group); | 1746 | sysfs_remove_group(&dev->kobj, &netiucv_attr_group); |
1745 | } | 1747 | } |
1746 | 1748 | ||
1747 | static int | 1749 | static int netiucv_register_device(struct net_device *ndev) |
1748 | netiucv_register_device(struct net_device *ndev) | ||
1749 | { | 1750 | { |
1750 | struct netiucv_priv *priv = ndev->priv; | 1751 | struct netiucv_priv *priv = netdev_priv(ndev); |
1751 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 1752 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
1752 | int ret; | 1753 | int ret; |
1753 | 1754 | ||
@@ -1786,8 +1787,7 @@ out_unreg: | |||
1786 | return ret; | 1787 | return ret; |
1787 | } | 1788 | } |
1788 | 1789 | ||
1789 | static void | 1790 | static void netiucv_unregister_device(struct device *dev) |
1790 | netiucv_unregister_device(struct device *dev) | ||
1791 | { | 1791 | { |
1792 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1792 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1793 | netiucv_remove_files(dev); | 1793 | netiucv_remove_files(dev); |
@@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev) | |||
1798 | * Allocate and initialize a new connection structure. | 1798 | * Allocate and initialize a new connection structure. |
1799 | * Add it to the list of netiucv connections; | 1799 | * Add it to the list of netiucv connections; |
1800 | */ | 1800 | */ |
1801 | static struct iucv_connection * | 1801 | static struct iucv_connection *netiucv_new_connection(struct net_device *dev, |
1802 | netiucv_new_connection(struct net_device *dev, char *username) | 1802 | char *username) |
1803 | { | 1803 | { |
1804 | unsigned long flags; | 1804 | struct iucv_connection *conn; |
1805 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1806 | struct iucv_connection *conn = | ||
1807 | kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); | ||
1808 | |||
1809 | if (conn) { | ||
1810 | skb_queue_head_init(&conn->collect_queue); | ||
1811 | skb_queue_head_init(&conn->commit_queue); | ||
1812 | spin_lock_init(&conn->collect_lock); | ||
1813 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | ||
1814 | conn->netdev = dev; | ||
1815 | |||
1816 | conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, | ||
1817 | GFP_KERNEL | GFP_DMA); | ||
1818 | if (!conn->rx_buff) { | ||
1819 | kfree(conn); | ||
1820 | return NULL; | ||
1821 | } | ||
1822 | conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, | ||
1823 | GFP_KERNEL | GFP_DMA); | ||
1824 | if (!conn->tx_buff) { | ||
1825 | kfree_skb(conn->rx_buff); | ||
1826 | kfree(conn); | ||
1827 | return NULL; | ||
1828 | } | ||
1829 | conn->fsm = init_fsm("netiucvconn", conn_state_names, | ||
1830 | conn_event_names, NR_CONN_STATES, | ||
1831 | NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, | ||
1832 | GFP_KERNEL); | ||
1833 | if (!conn->fsm) { | ||
1834 | kfree_skb(conn->tx_buff); | ||
1835 | kfree_skb(conn->rx_buff); | ||
1836 | kfree(conn); | ||
1837 | return NULL; | ||
1838 | } | ||
1839 | fsm_settimer(conn->fsm, &conn->timer); | ||
1840 | fsm_newstate(conn->fsm, CONN_STATE_INVALID); | ||
1841 | |||
1842 | if (username) { | ||
1843 | memcpy(conn->userid, username, 9); | ||
1844 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); | ||
1845 | } | ||
1846 | 1805 | ||
1847 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1806 | conn = kzalloc(sizeof(*conn), GFP_KERNEL); |
1848 | conn->next = *clist; | 1807 | if (!conn) |
1849 | *clist = conn; | 1808 | goto out; |
1850 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1809 | skb_queue_head_init(&conn->collect_queue); |
1810 | skb_queue_head_init(&conn->commit_queue); | ||
1811 | spin_lock_init(&conn->collect_lock); | ||
1812 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | ||
1813 | conn->netdev = dev; | ||
1814 | |||
1815 | conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); | ||
1816 | if (!conn->rx_buff) | ||
1817 | goto out_conn; | ||
1818 | conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); | ||
1819 | if (!conn->tx_buff) | ||
1820 | goto out_rx; | ||
1821 | conn->fsm = init_fsm("netiucvconn", conn_state_names, | ||
1822 | conn_event_names, NR_CONN_STATES, | ||
1823 | NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, | ||
1824 | GFP_KERNEL); | ||
1825 | if (!conn->fsm) | ||
1826 | goto out_tx; | ||
1827 | |||
1828 | fsm_settimer(conn->fsm, &conn->timer); | ||
1829 | fsm_newstate(conn->fsm, CONN_STATE_INVALID); | ||
1830 | |||
1831 | if (username) { | ||
1832 | memcpy(conn->userid, username, 9); | ||
1833 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); | ||
1851 | } | 1834 | } |
1835 | |||
1836 | write_lock_bh(&iucv_connection_rwlock); | ||
1837 | list_add_tail(&conn->list, &iucv_connection_list); | ||
1838 | write_unlock_bh(&iucv_connection_rwlock); | ||
1852 | return conn; | 1839 | return conn; |
1840 | |||
1841 | out_tx: | ||
1842 | kfree_skb(conn->tx_buff); | ||
1843 | out_rx: | ||
1844 | kfree_skb(conn->rx_buff); | ||
1845 | out_conn: | ||
1846 | kfree(conn); | ||
1847 | out: | ||
1848 | return NULL; | ||
1853 | } | 1849 | } |
1854 | 1850 | ||
1855 | /** | 1851 | /** |
1856 | * Release a connection structure and remove it from the | 1852 | * Release a connection structure and remove it from the |
1857 | * list of netiucv connections. | 1853 | * list of netiucv connections. |
1858 | */ | 1854 | */ |
1859 | static void | 1855 | static void netiucv_remove_connection(struct iucv_connection *conn) |
1860 | netiucv_remove_connection(struct iucv_connection *conn) | ||
1861 | { | 1856 | { |
1862 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1863 | unsigned long flags; | ||
1864 | |||
1865 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1857 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1866 | if (conn == NULL) | 1858 | write_lock_bh(&iucv_connection_rwlock); |
1867 | return; | 1859 | list_del_init(&conn->list); |
1868 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1860 | write_unlock_bh(&iucv_connection_rwlock); |
1869 | while (*clist) { | 1861 | if (conn->path) { |
1870 | if (*clist == conn) { | 1862 | iucv_path_sever(conn->path, iucvMagic); |
1871 | *clist = conn->next; | 1863 | kfree(conn->path); |
1872 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1864 | conn->path = NULL; |
1873 | if (conn->handle) { | ||
1874 | iucv_unregister_program(conn->handle); | ||
1875 | conn->handle = NULL; | ||
1876 | } | ||
1877 | fsm_deltimer(&conn->timer); | ||
1878 | kfree_fsm(conn->fsm); | ||
1879 | kfree_skb(conn->rx_buff); | ||
1880 | kfree_skb(conn->tx_buff); | ||
1881 | return; | ||
1882 | } | ||
1883 | clist = &((*clist)->next); | ||
1884 | } | 1865 | } |
1885 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1866 | fsm_deltimer(&conn->timer); |
1867 | kfree_fsm(conn->fsm); | ||
1868 | kfree_skb(conn->rx_buff); | ||
1869 | kfree_skb(conn->tx_buff); | ||
1886 | } | 1870 | } |
1887 | 1871 | ||
1888 | /** | 1872 | /** |
1889 | * Release everything of a net device. | 1873 | * Release everything of a net device. |
1890 | */ | 1874 | */ |
1891 | static void | 1875 | static void netiucv_free_netdevice(struct net_device *dev) |
1892 | netiucv_free_netdevice(struct net_device *dev) | ||
1893 | { | 1876 | { |
1894 | struct netiucv_priv *privptr; | 1877 | struct netiucv_priv *privptr = netdev_priv(dev); |
1895 | 1878 | ||
1896 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1879 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1897 | 1880 | ||
1898 | if (!dev) | 1881 | if (!dev) |
1899 | return; | 1882 | return; |
1900 | 1883 | ||
1901 | privptr = (struct netiucv_priv *)dev->priv; | ||
1902 | if (privptr) { | 1884 | if (privptr) { |
1903 | if (privptr->conn) | 1885 | if (privptr->conn) |
1904 | netiucv_remove_connection(privptr->conn); | 1886 | netiucv_remove_connection(privptr->conn); |
@@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev) | |||
1913 | /** | 1895 | /** |
1914 | * Initialize a net device. (Called from kernel in alloc_netdev()) | 1896 | * Initialize a net device. (Called from kernel in alloc_netdev()) |
1915 | */ | 1897 | */ |
1916 | static void | 1898 | static void netiucv_setup_netdevice(struct net_device *dev) |
1917 | netiucv_setup_netdevice(struct net_device *dev) | ||
1918 | { | 1899 | { |
1919 | memset(dev->priv, 0, sizeof(struct netiucv_priv)); | ||
1920 | |||
1921 | dev->mtu = NETIUCV_MTU_DEFAULT; | 1900 | dev->mtu = NETIUCV_MTU_DEFAULT; |
1922 | dev->hard_start_xmit = netiucv_tx; | 1901 | dev->hard_start_xmit = netiucv_tx; |
1923 | dev->open = netiucv_open; | 1902 | dev->open = netiucv_open; |
@@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev) | |||
1936 | /** | 1915 | /** |
1937 | * Allocate and initialize everything of a net device. | 1916 | * Allocate and initialize everything of a net device. |
1938 | */ | 1917 | */ |
1939 | static struct net_device * | 1918 | static struct net_device *netiucv_init_netdevice(char *username) |
1940 | netiucv_init_netdevice(char *username) | ||
1941 | { | 1919 | { |
1942 | struct netiucv_priv *privptr; | 1920 | struct netiucv_priv *privptr; |
1943 | struct net_device *dev; | 1921 | struct net_device *dev; |
@@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username) | |||
1946 | netiucv_setup_netdevice); | 1924 | netiucv_setup_netdevice); |
1947 | if (!dev) | 1925 | if (!dev) |
1948 | return NULL; | 1926 | return NULL; |
1949 | if (dev_alloc_name(dev, dev->name) < 0) { | 1927 | if (dev_alloc_name(dev, dev->name) < 0) |
1950 | free_netdev(dev); | 1928 | goto out_netdev; |
1951 | return NULL; | ||
1952 | } | ||
1953 | 1929 | ||
1954 | privptr = (struct netiucv_priv *)dev->priv; | 1930 | privptr = netdev_priv(dev); |
1955 | privptr->fsm = init_fsm("netiucvdev", dev_state_names, | 1931 | privptr->fsm = init_fsm("netiucvdev", dev_state_names, |
1956 | dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, | 1932 | dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, |
1957 | dev_fsm, DEV_FSM_LEN, GFP_KERNEL); | 1933 | dev_fsm, DEV_FSM_LEN, GFP_KERNEL); |
1958 | if (!privptr->fsm) { | 1934 | if (!privptr->fsm) |
1959 | free_netdev(dev); | 1935 | goto out_netdev; |
1960 | return NULL; | 1936 | |
1961 | } | ||
1962 | privptr->conn = netiucv_new_connection(dev, username); | 1937 | privptr->conn = netiucv_new_connection(dev, username); |
1963 | if (!privptr->conn) { | 1938 | if (!privptr->conn) { |
1964 | kfree_fsm(privptr->fsm); | ||
1965 | free_netdev(dev); | ||
1966 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); | 1939 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); |
1967 | return NULL; | 1940 | goto out_fsm; |
1968 | } | 1941 | } |
1969 | fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); | 1942 | fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); |
1970 | |||
1971 | return dev; | 1943 | return dev; |
1944 | |||
1945 | out_fsm: | ||
1946 | kfree_fsm(privptr->fsm); | ||
1947 | out_netdev: | ||
1948 | free_netdev(dev); | ||
1949 | return NULL; | ||
1972 | } | 1950 | } |
1973 | 1951 | ||
1974 | static ssize_t | 1952 | static ssize_t conn_write(struct device_driver *drv, |
1975 | conn_write(struct device_driver *drv, const char *buf, size_t count) | 1953 | const char *buf, size_t count) |
1976 | { | 1954 | { |
1977 | char *p; | 1955 | const char *p; |
1978 | char username[9]; | 1956 | char username[9]; |
1979 | int i, ret; | 1957 | int i, rc; |
1980 | struct net_device *dev; | 1958 | struct net_device *dev; |
1981 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 1959 | struct netiucv_priv *priv; |
1982 | unsigned long flags; | 1960 | struct iucv_connection *cp; |
1983 | 1961 | ||
1984 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1962 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1985 | if (count>9) { | 1963 | if (count>9) { |
@@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count) | |||
1988 | return -EINVAL; | 1966 | return -EINVAL; |
1989 | } | 1967 | } |
1990 | 1968 | ||
1991 | for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { | 1969 | for (i = 0, p = buf; i < 8 && *p; i++, p++) { |
1992 | if (isalnum(*p) || (*p == '$')) | 1970 | if (isalnum(*p) || *p == '$') { |
1993 | username[i]= toupper(*p); | 1971 | username[i] = toupper(*p); |
1994 | else if (*p == '\n') { | 1972 | continue; |
1973 | } | ||
1974 | if (*p == '\n') | ||
1995 | /* trailing lf, grr */ | 1975 | /* trailing lf, grr */ |
1996 | break; | 1976 | break; |
1997 | } else { | 1977 | PRINT_WARN("netiucv: Invalid character in username!\n"); |
1998 | PRINT_WARN("netiucv: Invalid character in username!\n"); | 1978 | IUCV_DBF_TEXT_(setup, 2, |
1999 | IUCV_DBF_TEXT_(setup, 2, | 1979 | "conn_write: invalid character %c\n", *p); |
2000 | "conn_write: invalid character %c\n", *p); | 1980 | return -EINVAL; |
2001 | return -EINVAL; | ||
2002 | } | ||
2003 | } | 1981 | } |
2004 | while (i<8) | 1982 | while (i < 8) |
2005 | username[i++] = ' '; | 1983 | username[i++] = ' '; |
2006 | username[8] = '\0'; | 1984 | username[8] = '\0'; |
2007 | 1985 | ||
2008 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 1986 | read_lock_bh(&iucv_connection_rwlock); |
2009 | while (*clist) { | 1987 | list_for_each_entry(cp, &iucv_connection_list, list) { |
2010 | if (!strncmp(username, (*clist)->userid, 9)) | 1988 | if (!strncmp(username, cp->userid, 9)) { |
2011 | break; | 1989 | read_unlock_bh(&iucv_connection_rwlock); |
2012 | clist = &((*clist)->next); | 1990 | PRINT_WARN("netiucv: Connection to %s already " |
2013 | } | 1991 | "exists\n", username); |
2014 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 1992 | return -EEXIST; |
2015 | if (*clist) { | 1993 | } |
2016 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
2017 | username); | ||
2018 | return -EEXIST; | ||
2019 | } | 1994 | } |
1995 | read_unlock_bh(&iucv_connection_rwlock); | ||
1996 | |||
2020 | dev = netiucv_init_netdevice(username); | 1997 | dev = netiucv_init_netdevice(username); |
2021 | if (!dev) { | 1998 | if (!dev) { |
2022 | PRINT_WARN( | 1999 | PRINT_WARN("netiucv: Could not allocate network device " |
2023 | "netiucv: Could not allocate network device structure " | 2000 | "structure for user '%s'\n", |
2024 | "for user '%s'\n", netiucv_printname(username)); | 2001 | netiucv_printname(username)); |
2025 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); | 2002 | IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); |
2026 | return -ENODEV; | 2003 | return -ENODEV; |
2027 | } | 2004 | } |
2028 | 2005 | ||
2029 | if ((ret = netiucv_register_device(dev))) { | 2006 | rc = netiucv_register_device(dev); |
2007 | if (rc) { | ||
2030 | IUCV_DBF_TEXT_(setup, 2, | 2008 | IUCV_DBF_TEXT_(setup, 2, |
2031 | "ret %d from netiucv_register_device\n", ret); | 2009 | "ret %d from netiucv_register_device\n", rc); |
2032 | goto out_free_ndev; | 2010 | goto out_free_ndev; |
2033 | } | 2011 | } |
2034 | 2012 | ||
2035 | /* sysfs magic */ | 2013 | /* sysfs magic */ |
2036 | SET_NETDEV_DEV(dev, | 2014 | priv = netdev_priv(dev); |
2037 | (struct device*)((struct netiucv_priv*)dev->priv)->dev); | 2015 | SET_NETDEV_DEV(dev, priv->dev); |
2038 | 2016 | ||
2039 | if ((ret = register_netdev(dev))) { | 2017 | rc = register_netdev(dev); |
2040 | netiucv_unregister_device((struct device*) | 2018 | if (rc) |
2041 | ((struct netiucv_priv*)dev->priv)->dev); | 2019 | goto out_unreg; |
2042 | goto out_free_ndev; | ||
2043 | } | ||
2044 | 2020 | ||
2045 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); | 2021 | PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); |
2046 | 2022 | ||
2047 | return count; | 2023 | return count; |
2048 | 2024 | ||
2025 | out_unreg: | ||
2026 | netiucv_unregister_device(priv->dev); | ||
2049 | out_free_ndev: | 2027 | out_free_ndev: |
2050 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); | 2028 | PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); |
2051 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); | 2029 | IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); |
2052 | netiucv_free_netdevice(dev); | 2030 | netiucv_free_netdevice(dev); |
2053 | return ret; | 2031 | return rc; |
2054 | } | 2032 | } |
2055 | 2033 | ||
2056 | static DRIVER_ATTR(connection, 0200, NULL, conn_write); | 2034 | static DRIVER_ATTR(connection, 0200, NULL, conn_write); |
2057 | 2035 | ||
2058 | static ssize_t | 2036 | static ssize_t remove_write (struct device_driver *drv, |
2059 | remove_write (struct device_driver *drv, const char *buf, size_t count) | 2037 | const char *buf, size_t count) |
2060 | { | 2038 | { |
2061 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | 2039 | struct iucv_connection *cp; |
2062 | unsigned long flags; | ||
2063 | struct net_device *ndev; | 2040 | struct net_device *ndev; |
2064 | struct netiucv_priv *priv; | 2041 | struct netiucv_priv *priv; |
2065 | struct device *dev; | 2042 | struct device *dev; |
2066 | char name[IFNAMSIZ]; | 2043 | char name[IFNAMSIZ]; |
2067 | char *p; | 2044 | const char *p; |
2068 | int i; | 2045 | int i; |
2069 | 2046 | ||
2070 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2047 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
@@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2072 | if (count >= IFNAMSIZ) | 2049 | if (count >= IFNAMSIZ) |
2073 | count = IFNAMSIZ - 1;; | 2050 | count = IFNAMSIZ - 1;; |
2074 | 2051 | ||
2075 | for (i=0, p=(char *)buf; i<count && *p; i++, p++) { | 2052 | for (i = 0, p = buf; i < count && *p; i++, p++) { |
2076 | if ((*p == '\n') || (*p == ' ')) { | 2053 | if (*p == '\n' || *p == ' ') |
2077 | /* trailing lf, grr */ | 2054 | /* trailing lf, grr */ |
2078 | break; | 2055 | break; |
2079 | } else { | 2056 | name[i] = *p; |
2080 | name[i]=*p; | ||
2081 | } | ||
2082 | } | 2057 | } |
2083 | name[i] = '\0'; | 2058 | name[i] = '\0'; |
2084 | 2059 | ||
2085 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | 2060 | read_lock_bh(&iucv_connection_rwlock); |
2086 | while (*clist) { | 2061 | list_for_each_entry(cp, &iucv_connection_list, list) { |
2087 | ndev = (*clist)->netdev; | 2062 | ndev = cp->netdev; |
2088 | priv = (struct netiucv_priv*)ndev->priv; | 2063 | priv = netdev_priv(ndev); |
2089 | dev = priv->dev; | 2064 | dev = priv->dev; |
2090 | 2065 | if (strncmp(name, ndev->name, count)) | |
2091 | if (strncmp(name, ndev->name, count)) { | 2066 | continue; |
2092 | clist = &((*clist)->next); | 2067 | read_unlock_bh(&iucv_connection_rwlock); |
2093 | continue; | ||
2094 | } | ||
2095 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
2096 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2068 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2097 | PRINT_WARN( | 2069 | PRINT_WARN("netiucv: net device %s active with peer " |
2098 | "netiucv: net device %s active with peer %s\n", | 2070 | "%s\n", ndev->name, priv->conn->userid); |
2099 | ndev->name, priv->conn->userid); | ||
2100 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2071 | PRINT_WARN("netiucv: %s cannot be removed\n", |
2101 | ndev->name); | 2072 | ndev->name); |
2102 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2073 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2103 | return -EBUSY; | 2074 | return -EBUSY; |
2104 | } | 2075 | } |
@@ -2106,7 +2077,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2106 | netiucv_unregister_device(dev); | 2077 | netiucv_unregister_device(dev); |
2107 | return count; | 2078 | return count; |
2108 | } | 2079 | } |
2109 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | 2080 | read_unlock_bh(&iucv_connection_rwlock); |
2110 | PRINT_WARN("netiucv: net device %s unknown\n", name); | 2081 | PRINT_WARN("netiucv: net device %s unknown\n", name); |
2111 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); | 2082 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); |
2112 | return -EINVAL; | 2083 | return -EINVAL; |
@@ -2114,67 +2085,86 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2114 | 2085 | ||
2115 | static DRIVER_ATTR(remove, 0200, NULL, remove_write); | 2086 | static DRIVER_ATTR(remove, 0200, NULL, remove_write); |
2116 | 2087 | ||
2117 | static void | 2088 | static struct attribute * netiucv_drv_attrs[] = { |
2118 | netiucv_banner(void) | 2089 | &driver_attr_connection.attr, |
2090 | &driver_attr_remove.attr, | ||
2091 | NULL, | ||
2092 | }; | ||
2093 | |||
2094 | static struct attribute_group netiucv_drv_attr_group = { | ||
2095 | .attrs = netiucv_drv_attrs, | ||
2096 | }; | ||
2097 | |||
2098 | static void netiucv_banner(void) | ||
2119 | { | 2099 | { |
2120 | PRINT_INFO("NETIUCV driver initialized\n"); | 2100 | PRINT_INFO("NETIUCV driver initialized\n"); |
2121 | } | 2101 | } |
2122 | 2102 | ||
2123 | static void __exit | 2103 | static void __exit netiucv_exit(void) |
2124 | netiucv_exit(void) | ||
2125 | { | 2104 | { |
2105 | struct iucv_connection *cp; | ||
2106 | struct net_device *ndev; | ||
2107 | struct netiucv_priv *priv; | ||
2108 | struct device *dev; | ||
2109 | |||
2126 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2110 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2127 | while (iucv_conns.iucv_connections) { | 2111 | while (!list_empty(&iucv_connection_list)) { |
2128 | struct net_device *ndev = iucv_conns.iucv_connections->netdev; | 2112 | cp = list_entry(iucv_connection_list.next, |
2129 | struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; | 2113 | struct iucv_connection, list); |
2130 | struct device *dev = priv->dev; | 2114 | list_del(&cp->list); |
2115 | ndev = cp->netdev; | ||
2116 | priv = netdev_priv(ndev); | ||
2117 | dev = priv->dev; | ||
2131 | 2118 | ||
2132 | unregister_netdev(ndev); | 2119 | unregister_netdev(ndev); |
2133 | netiucv_unregister_device(dev); | 2120 | netiucv_unregister_device(dev); |
2134 | } | 2121 | } |
2135 | 2122 | ||
2136 | driver_remove_file(&netiucv_driver, &driver_attr_connection); | 2123 | sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); |
2137 | driver_remove_file(&netiucv_driver, &driver_attr_remove); | ||
2138 | driver_unregister(&netiucv_driver); | 2124 | driver_unregister(&netiucv_driver); |
2125 | iucv_unregister(&netiucv_handler, 1); | ||
2139 | iucv_unregister_dbf_views(); | 2126 | iucv_unregister_dbf_views(); |
2140 | 2127 | ||
2141 | PRINT_INFO("NETIUCV driver unloaded\n"); | 2128 | PRINT_INFO("NETIUCV driver unloaded\n"); |
2142 | return; | 2129 | return; |
2143 | } | 2130 | } |
2144 | 2131 | ||
2145 | static int __init | 2132 | static int __init netiucv_init(void) |
2146 | netiucv_init(void) | ||
2147 | { | 2133 | { |
2148 | int ret; | 2134 | int rc; |
2149 | 2135 | ||
2150 | ret = iucv_register_dbf_views(); | 2136 | rc = iucv_register_dbf_views(); |
2151 | if (ret) { | 2137 | if (rc) |
2152 | PRINT_WARN("netiucv_init failed, " | 2138 | goto out; |
2153 | "iucv_register_dbf_views rc = %d\n", ret); | 2139 | rc = iucv_register(&netiucv_handler, 1); |
2154 | return ret; | 2140 | if (rc) |
2155 | } | 2141 | goto out_dbf; |
2156 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2142 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2157 | ret = driver_register(&netiucv_driver); | 2143 | rc = driver_register(&netiucv_driver); |
2158 | if (ret) { | 2144 | if (rc) { |
2159 | PRINT_ERR("NETIUCV: failed to register driver.\n"); | 2145 | PRINT_ERR("NETIUCV: failed to register driver.\n"); |
2160 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); | 2146 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); |
2161 | iucv_unregister_dbf_views(); | 2147 | goto out_iucv; |
2162 | return ret; | ||
2163 | } | 2148 | } |
2164 | 2149 | ||
2165 | /* Add entry for specifying connections. */ | 2150 | rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group); |
2166 | ret = driver_create_file(&netiucv_driver, &driver_attr_connection); | 2151 | if (rc) { |
2167 | if (!ret) { | 2152 | PRINT_ERR("NETIUCV: failed to add driver attributes.\n"); |
2168 | ret = driver_create_file(&netiucv_driver, &driver_attr_remove); | 2153 | IUCV_DBF_TEXT_(setup, 2, |
2169 | netiucv_banner(); | 2154 | "ret %d - netiucv_drv_attr_group\n", rc); |
2170 | rwlock_init(&iucv_conns.iucv_rwlock); | 2155 | goto out_driver; |
2171 | } else { | ||
2172 | PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); | ||
2173 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); | ||
2174 | driver_unregister(&netiucv_driver); | ||
2175 | iucv_unregister_dbf_views(); | ||
2176 | } | 2156 | } |
2177 | return ret; | 2157 | netiucv_banner(); |
2158 | return rc; | ||
2159 | |||
2160 | out_driver: | ||
2161 | driver_unregister(&netiucv_driver); | ||
2162 | out_iucv: | ||
2163 | iucv_unregister(&netiucv_handler, 1); | ||
2164 | out_dbf: | ||
2165 | iucv_unregister_dbf_views(); | ||
2166 | out: | ||
2167 | return rc; | ||
2178 | } | 2168 | } |
2179 | 2169 | ||
2180 | module_init(netiucv_init); | 2170 | module_init(netiucv_init); |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index b8179c27ceb6..3ccca5871fdf 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV special message driver | 2 | * IUCV special message driver |
3 | * | 3 | * |
4 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation |
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -23,10 +23,10 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
26 | #include <net/iucv/iucv.h> | ||
26 | #include <asm/cpcmd.h> | 27 | #include <asm/cpcmd.h> |
27 | #include <asm/ebcdic.h> | 28 | #include <asm/ebcdic.h> |
28 | 29 | #include "smsgiucv.h" | |
29 | #include "iucv.h" | ||
30 | 30 | ||
31 | struct smsg_callback { | 31 | struct smsg_callback { |
32 | struct list_head list; | 32 | struct list_head list; |
@@ -39,38 +39,46 @@ MODULE_AUTHOR | |||
39 | ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); | 39 | ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); |
40 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); | 40 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); |
41 | 41 | ||
42 | static iucv_handle_t smsg_handle; | 42 | static struct iucv_path *smsg_path; |
43 | static unsigned short smsg_pathid; | 43 | |
44 | static DEFINE_SPINLOCK(smsg_list_lock); | 44 | static DEFINE_SPINLOCK(smsg_list_lock); |
45 | static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); | 45 | static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); |
46 | 46 | ||
47 | static void | 47 | static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); |
48 | smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) | 48 | static void smsg_message_pending(struct iucv_path *, struct iucv_message *); |
49 | |||
50 | static struct iucv_handler smsg_handler = { | ||
51 | .path_pending = smsg_path_pending, | ||
52 | .message_pending = smsg_message_pending, | ||
53 | }; | ||
54 | |||
55 | static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8], | ||
56 | u8 ipuser[16]) | ||
49 | { | 57 | { |
58 | if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0) | ||
59 | return -EINVAL; | ||
60 | /* Path pending from *MSG. */ | ||
61 | return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); | ||
50 | } | 62 | } |
51 | 63 | ||
52 | 64 | static void smsg_message_pending(struct iucv_path *path, | |
53 | static void | 65 | struct iucv_message *msg) |
54 | smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) | ||
55 | { | 66 | { |
56 | struct smsg_callback *cb; | 67 | struct smsg_callback *cb; |
57 | unsigned char *msg; | 68 | unsigned char *buffer; |
58 | unsigned char sender[9]; | 69 | unsigned char sender[9]; |
59 | unsigned short len; | ||
60 | int rc, i; | 70 | int rc, i; |
61 | 71 | ||
62 | len = eib->ln1msg2.ipbfln1f; | 72 | buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); |
63 | msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); | 73 | if (!buffer) { |
64 | if (!msg) { | 74 | iucv_message_reject(path, msg); |
65 | iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls); | ||
66 | return; | 75 | return; |
67 | } | 76 | } |
68 | rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, | 77 | rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); |
69 | msg, len, NULL, NULL, NULL); | ||
70 | if (rc == 0) { | 78 | if (rc == 0) { |
71 | msg[len] = 0; | 79 | buffer[msg->length] = 0; |
72 | EBCASC(msg, len); | 80 | EBCASC(buffer, msg->length); |
73 | memcpy(sender, msg, 8); | 81 | memcpy(sender, buffer, 8); |
74 | sender[8] = 0; | 82 | sender[8] = 0; |
75 | /* Remove trailing whitespace from the sender name. */ | 83 | /* Remove trailing whitespace from the sender name. */ |
76 | for (i = 7; i >= 0; i--) { | 84 | for (i = 7; i >= 0; i--) { |
@@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) | |||
80 | } | 88 | } |
81 | spin_lock(&smsg_list_lock); | 89 | spin_lock(&smsg_list_lock); |
82 | list_for_each_entry(cb, &smsg_list, list) | 90 | list_for_each_entry(cb, &smsg_list, list) |
83 | if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { | 91 | if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { |
84 | cb->callback(sender, msg + 8); | 92 | cb->callback(sender, buffer + 8); |
85 | break; | 93 | break; |
86 | } | 94 | } |
87 | spin_unlock(&smsg_list_lock); | 95 | spin_unlock(&smsg_list_lock); |
88 | } | 96 | } |
89 | kfree(msg); | 97 | kfree(buffer); |
90 | } | 98 | } |
91 | 99 | ||
92 | static iucv_interrupt_ops_t smsg_ops = { | 100 | int smsg_register_callback(char *prefix, |
93 | .ConnectionComplete = smsg_connection_complete, | 101 | void (*callback)(char *from, char *str)) |
94 | .MessagePending = smsg_message_pending, | ||
95 | }; | ||
96 | |||
97 | static struct device_driver smsg_driver = { | ||
98 | .name = "SMSGIUCV", | ||
99 | .bus = &iucv_bus, | ||
100 | }; | ||
101 | |||
102 | int | ||
103 | smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) | ||
104 | { | 102 | { |
105 | struct smsg_callback *cb; | 103 | struct smsg_callback *cb; |
106 | 104 | ||
@@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str)) | |||
110 | cb->prefix = prefix; | 108 | cb->prefix = prefix; |
111 | cb->len = strlen(prefix); | 109 | cb->len = strlen(prefix); |
112 | cb->callback = callback; | 110 | cb->callback = callback; |
113 | spin_lock(&smsg_list_lock); | 111 | spin_lock_bh(&smsg_list_lock); |
114 | list_add_tail(&cb->list, &smsg_list); | 112 | list_add_tail(&cb->list, &smsg_list); |
115 | spin_unlock(&smsg_list_lock); | 113 | spin_unlock_bh(&smsg_list_lock); |
116 | return 0; | 114 | return 0; |
117 | } | 115 | } |
118 | 116 | ||
119 | void | 117 | void smsg_unregister_callback(char *prefix, |
120 | smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) | 118 | void (*callback)(char *from, char *str)) |
121 | { | 119 | { |
122 | struct smsg_callback *cb, *tmp; | 120 | struct smsg_callback *cb, *tmp; |
123 | 121 | ||
124 | spin_lock(&smsg_list_lock); | 122 | spin_lock_bh(&smsg_list_lock); |
125 | cb = NULL; | 123 | cb = NULL; |
126 | list_for_each_entry(tmp, &smsg_list, list) | 124 | list_for_each_entry(tmp, &smsg_list, list) |
127 | if (tmp->callback == callback && | 125 | if (tmp->callback == callback && |
@@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) | |||
130 | list_del(&cb->list); | 128 | list_del(&cb->list); |
131 | break; | 129 | break; |
132 | } | 130 | } |
133 | spin_unlock(&smsg_list_lock); | 131 | spin_unlock_bh(&smsg_list_lock); |
134 | kfree(cb); | 132 | kfree(cb); |
135 | } | 133 | } |
136 | 134 | ||
137 | static void __exit | 135 | static struct device_driver smsg_driver = { |
138 | smsg_exit(void) | 136 | .name = "SMSGIUCV", |
137 | .bus = &iucv_bus, | ||
138 | }; | ||
139 | |||
140 | static void __exit smsg_exit(void) | ||
139 | { | 141 | { |
140 | if (smsg_handle > 0) { | 142 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
141 | cpcmd("SET SMSG OFF", NULL, 0, NULL); | 143 | iucv_unregister(&smsg_handler, 1); |
142 | iucv_sever(smsg_pathid, NULL); | 144 | driver_unregister(&smsg_driver); |
143 | iucv_unregister_program(smsg_handle); | ||
144 | driver_unregister(&smsg_driver); | ||
145 | } | ||
146 | return; | ||
147 | } | 145 | } |
148 | 146 | ||
149 | static int __init | 147 | static int __init smsg_init(void) |
150 | smsg_init(void) | ||
151 | { | 148 | { |
152 | static unsigned char pgmmask[24] = { | ||
153 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
154 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | ||
155 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff | ||
156 | }; | ||
157 | int rc; | 149 | int rc; |
158 | 150 | ||
159 | rc = driver_register(&smsg_driver); | 151 | rc = driver_register(&smsg_driver); |
160 | if (rc != 0) { | 152 | if (rc != 0) |
161 | printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); | 153 | goto out; |
162 | return rc; | 154 | rc = iucv_register(&smsg_handler, 1); |
163 | } | 155 | if (rc) { |
164 | smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", | ||
165 | pgmmask, &smsg_ops, NULL); | ||
166 | if (!smsg_handle) { | ||
167 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); | 156 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); |
168 | driver_unregister(&smsg_driver); | 157 | rc = -EIO; /* better errno ? */ |
169 | return -EIO; /* better errno ? */ | 158 | goto out_driver; |
159 | } | ||
160 | smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); | ||
161 | if (!smsg_path) { | ||
162 | rc = -ENOMEM; | ||
163 | goto out_register; | ||
170 | } | 164 | } |
171 | rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, | 165 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
172 | NULL, NULL, smsg_handle, NULL); | 166 | NULL, NULL, NULL); |
173 | if (rc) { | 167 | if (rc) { |
174 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); | 168 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); |
175 | iucv_unregister_program(smsg_handle); | 169 | rc = -EIO; /* better errno ? */ |
176 | driver_unregister(&smsg_driver); | 170 | goto out_free; |
177 | smsg_handle = NULL; | ||
178 | return -EIO; | ||
179 | } | 171 | } |
180 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 172 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
181 | return 0; | 173 | return 0; |
174 | |||
175 | out_free: | ||
176 | iucv_path_free(smsg_path); | ||
177 | out_register: | ||
178 | iucv_unregister(&smsg_handler, 1); | ||
179 | out_driver: | ||
180 | driver_unregister(&smsg_driver); | ||
181 | out: | ||
182 | return rc; | ||
182 | } | 183 | } |
183 | 184 | ||
184 | module_init(smsg_init); | 185 | module_init(smsg_init); |
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c index 3c912ee29da0..8b5334c56f0a 100644 --- a/drivers/scsi/NCR53C9x.c +++ b/drivers/scsi/NCR53C9x.c | |||
@@ -528,12 +528,16 @@ void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs) | |||
528 | /* Allocate structure and insert basic data such as SCSI chip frequency | 528 | /* Allocate structure and insert basic data such as SCSI chip frequency |
529 | * data and a pointer to the device | 529 | * data and a pointer to the device |
530 | */ | 530 | */ |
531 | struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev) | 531 | struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev, |
532 | int hotplug) | ||
532 | { | 533 | { |
533 | struct NCR_ESP *esp, *elink; | 534 | struct NCR_ESP *esp, *elink; |
534 | struct Scsi_Host *esp_host; | 535 | struct Scsi_Host *esp_host; |
535 | 536 | ||
536 | esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP)); | 537 | if (hotplug) |
538 | esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP)); | ||
539 | else | ||
540 | esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP)); | ||
537 | if(!esp_host) | 541 | if(!esp_host) |
538 | panic("Cannot register ESP SCSI host"); | 542 | panic("Cannot register ESP SCSI host"); |
539 | esp = (struct NCR_ESP *) esp_host->hostdata; | 543 | esp = (struct NCR_ESP *) esp_host->hostdata; |
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h index 521e3f842cfd..d85cb73a9f69 100644 --- a/drivers/scsi/NCR53C9x.h +++ b/drivers/scsi/NCR53C9x.h | |||
@@ -652,7 +652,7 @@ extern int nesps, esps_in_use, esps_running; | |||
652 | 652 | ||
653 | /* External functions */ | 653 | /* External functions */ |
654 | extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs); | 654 | extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs); |
655 | extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *); | 655 | extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int); |
656 | extern void esp_deallocate(struct NCR_ESP *); | 656 | extern void esp_deallocate(struct NCR_ESP *); |
657 | extern void esp_release(void); | 657 | extern void esp_release(void); |
658 | extern void esp_initialize(struct NCR_ESP *); | 658 | extern void esp_initialize(struct NCR_ESP *); |
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c index 329a8f297b31..23f7c24ab809 100644 --- a/drivers/scsi/blz1230.c +++ b/drivers/scsi/blz1230.c | |||
@@ -121,7 +121,8 @@ int __init blz1230_esp_detect(struct scsi_host_template *tpnt) | |||
121 | */ | 121 | */ |
122 | address = ZTWO_VADDR(board); | 122 | address = ZTWO_VADDR(board); |
123 | eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR); | 123 | eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR); |
124 | esp = esp_allocate(tpnt, (void *)board+REAL_BLZ1230_ESP_ADDR); | 124 | esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR, |
125 | 0); | ||
125 | 126 | ||
126 | esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); | 127 | esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); |
127 | udelay(5); | 128 | udelay(5); |
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c index b6c137b97350..b6203ec00961 100644 --- a/drivers/scsi/blz2060.c +++ b/drivers/scsi/blz2060.c | |||
@@ -100,7 +100,7 @@ int __init blz2060_esp_detect(struct scsi_host_template *tpnt) | |||
100 | unsigned long board = z->resource.start; | 100 | unsigned long board = z->resource.start; |
101 | if (request_mem_region(board+BLZ2060_ESP_ADDR, | 101 | if (request_mem_region(board+BLZ2060_ESP_ADDR, |
102 | sizeof(struct ESP_regs), "NCR53C9x")) { | 102 | sizeof(struct ESP_regs), "NCR53C9x")) { |
103 | esp = esp_allocate(tpnt, (void *)board+BLZ2060_ESP_ADDR); | 103 | esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0); |
104 | 104 | ||
105 | /* Do command transfer with programmed I/O */ | 105 | /* Do command transfer with programmed I/O */ |
106 | esp->do_pio_cmds = 1; | 106 | esp->do_pio_cmds = 1; |
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c index 7c7cfb54e897..c6b98a42e89d 100644 --- a/drivers/scsi/cyberstorm.c +++ b/drivers/scsi/cyberstorm.c | |||
@@ -126,7 +126,7 @@ int __init cyber_esp_detect(struct scsi_host_template *tpnt) | |||
126 | sizeof(struct ESP_regs)); | 126 | sizeof(struct ESP_regs)); |
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | esp = esp_allocate(tpnt, (void *)board+CYBER_ESP_ADDR); | 129 | esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0); |
130 | 130 | ||
131 | /* Do command transfer with programmed I/O */ | 131 | /* Do command transfer with programmed I/O */ |
132 | esp->do_pio_cmds = 1; | 132 | esp->do_pio_cmds = 1; |
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c index d88cb9cf091e..e336e853e66f 100644 --- a/drivers/scsi/cyberstormII.c +++ b/drivers/scsi/cyberstormII.c | |||
@@ -98,7 +98,7 @@ int __init cyberII_esp_detect(struct scsi_host_template *tpnt) | |||
98 | address = (unsigned long)ZTWO_VADDR(board); | 98 | address = (unsigned long)ZTWO_VADDR(board); |
99 | eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR); | 99 | eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR); |
100 | 100 | ||
101 | esp = esp_allocate(tpnt, (void *)board+CYBERII_ESP_ADDR); | 101 | esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0); |
102 | 102 | ||
103 | esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); | 103 | esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7)); |
104 | udelay(5); | 104 | udelay(5); |
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c index c29ccbc44693..d42ad663ffee 100644 --- a/drivers/scsi/dec_esp.c +++ b/drivers/scsi/dec_esp.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * 20001005 - Initialization fixes for 2.4.0-test9 | 18 | * 20001005 - Initialization fixes for 2.4.0-test9 |
19 | * Florian Lohoff <flo@rfc822.org> | 19 | * Florian Lohoff <flo@rfc822.org> |
20 | * | 20 | * |
21 | * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki | 21 | * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/proc_fs.h> | 30 | #include <linux/proc_fs.h> |
31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
32 | #include <linux/stat.h> | 32 | #include <linux/stat.h> |
33 | #include <linux/tc.h> | ||
33 | 34 | ||
34 | #include <asm/dma.h> | 35 | #include <asm/dma.h> |
35 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
@@ -42,7 +43,6 @@ | |||
42 | #include <asm/dec/ioasic_ints.h> | 43 | #include <asm/dec/ioasic_ints.h> |
43 | #include <asm/dec/machtype.h> | 44 | #include <asm/dec/machtype.h> |
44 | #include <asm/dec/system.h> | 45 | #include <asm/dec/system.h> |
45 | #include <asm/dec/tc.h> | ||
46 | 46 | ||
47 | #define DEC_SCSI_SREG 0 | 47 | #define DEC_SCSI_SREG 0 |
48 | #define DEC_SCSI_DMAREG 0x40000 | 48 | #define DEC_SCSI_DMAREG 0x40000 |
@@ -98,51 +98,33 @@ static irqreturn_t scsi_dma_merr_int(int, void *); | |||
98 | static irqreturn_t scsi_dma_err_int(int, void *); | 98 | static irqreturn_t scsi_dma_err_int(int, void *); |
99 | static irqreturn_t scsi_dma_int(int, void *); | 99 | static irqreturn_t scsi_dma_int(int, void *); |
100 | 100 | ||
101 | static int dec_esp_detect(struct scsi_host_template * tpnt); | 101 | static struct scsi_host_template dec_esp_template = { |
102 | 102 | .module = THIS_MODULE, | |
103 | static int dec_esp_release(struct Scsi_Host *shost) | ||
104 | { | ||
105 | if (shost->irq) | ||
106 | free_irq(shost->irq, NULL); | ||
107 | if (shost->io_port && shost->n_io_port) | ||
108 | release_region(shost->io_port, shost->n_io_port); | ||
109 | scsi_unregister(shost); | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static struct scsi_host_template driver_template = { | ||
114 | .proc_name = "dec_esp", | ||
115 | .proc_info = esp_proc_info, | ||
116 | .name = "NCR53C94", | 103 | .name = "NCR53C94", |
117 | .detect = dec_esp_detect, | ||
118 | .slave_alloc = esp_slave_alloc, | ||
119 | .slave_destroy = esp_slave_destroy, | ||
120 | .release = dec_esp_release, | ||
121 | .info = esp_info, | 104 | .info = esp_info, |
122 | .queuecommand = esp_queue, | 105 | .queuecommand = esp_queue, |
123 | .eh_abort_handler = esp_abort, | 106 | .eh_abort_handler = esp_abort, |
124 | .eh_bus_reset_handler = esp_reset, | 107 | .eh_bus_reset_handler = esp_reset, |
108 | .slave_alloc = esp_slave_alloc, | ||
109 | .slave_destroy = esp_slave_destroy, | ||
110 | .proc_info = esp_proc_info, | ||
111 | .proc_name = "dec_esp", | ||
125 | .can_queue = 7, | 112 | .can_queue = 7, |
126 | .this_id = 7, | ||
127 | .sg_tablesize = SG_ALL, | 113 | .sg_tablesize = SG_ALL, |
128 | .cmd_per_lun = 1, | 114 | .cmd_per_lun = 1, |
129 | .use_clustering = DISABLE_CLUSTERING, | 115 | .use_clustering = DISABLE_CLUSTERING, |
130 | }; | 116 | }; |
131 | 117 | ||
132 | 118 | static struct NCR_ESP *dec_esp_platform; | |
133 | #include "scsi_module.c" | ||
134 | 119 | ||
135 | /***************************************************************** Detection */ | 120 | /***************************************************************** Detection */ |
136 | static int dec_esp_detect(struct scsi_host_template * tpnt) | 121 | static int dec_esp_platform_probe(void) |
137 | { | 122 | { |
138 | struct NCR_ESP *esp; | 123 | struct NCR_ESP *esp; |
139 | struct ConfigDev *esp_dev; | 124 | int err = 0; |
140 | int slot; | ||
141 | unsigned long mem_start; | ||
142 | 125 | ||
143 | if (IOASIC) { | 126 | if (IOASIC) { |
144 | esp_dev = 0; | 127 | esp = esp_allocate(&dec_esp_template, NULL, 1); |
145 | esp = esp_allocate(tpnt, (void *) esp_dev); | ||
146 | 128 | ||
147 | /* Do command transfer with programmed I/O */ | 129 | /* Do command transfer with programmed I/O */ |
148 | esp->do_pio_cmds = 1; | 130 | esp->do_pio_cmds = 1; |
@@ -200,112 +182,175 @@ static int dec_esp_detect(struct scsi_host_template * tpnt) | |||
200 | /* Check for differential SCSI-bus */ | 182 | /* Check for differential SCSI-bus */ |
201 | esp->diff = 0; | 183 | esp->diff = 0; |
202 | 184 | ||
185 | err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, | ||
186 | "ncr53c94", esp->ehost); | ||
187 | if (err) | ||
188 | goto err_alloc; | ||
189 | err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], | ||
190 | scsi_dma_merr_int, IRQF_DISABLED, | ||
191 | "ncr53c94 error", esp->ehost); | ||
192 | if (err) | ||
193 | goto err_irq; | ||
194 | err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR], | ||
195 | scsi_dma_err_int, IRQF_DISABLED, | ||
196 | "ncr53c94 overrun", esp->ehost); | ||
197 | if (err) | ||
198 | goto err_irq_merr; | ||
199 | err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int, | ||
200 | IRQF_DISABLED, "ncr53c94 dma", esp->ehost); | ||
201 | if (err) | ||
202 | goto err_irq_err; | ||
203 | |||
203 | esp_initialize(esp); | 204 | esp_initialize(esp); |
204 | 205 | ||
205 | if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, | 206 | err = scsi_add_host(esp->ehost, NULL); |
206 | "ncr53c94", esp->ehost)) | 207 | if (err) { |
207 | goto err_dealloc; | 208 | printk(KERN_ERR "ESP: Unable to register adapter\n"); |
208 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_MERR], | 209 | goto err_irq_dma; |
209 | scsi_dma_merr_int, IRQF_DISABLED, | 210 | } |
210 | "ncr53c94 error", esp->ehost)) | 211 | |
211 | goto err_free_irq; | 212 | scsi_scan_host(esp->ehost); |
212 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_ERR], | ||
213 | scsi_dma_err_int, IRQF_DISABLED, | ||
214 | "ncr53c94 overrun", esp->ehost)) | ||
215 | goto err_free_irq_merr; | ||
216 | if (request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], | ||
217 | scsi_dma_int, IRQF_DISABLED, | ||
218 | "ncr53c94 dma", esp->ehost)) | ||
219 | goto err_free_irq_err; | ||
220 | 213 | ||
214 | dec_esp_platform = esp; | ||
221 | } | 215 | } |
222 | 216 | ||
223 | if (TURBOCHANNEL) { | 217 | return 0; |
224 | while ((slot = search_tc_card("PMAZ-AA")) >= 0) { | 218 | |
225 | claim_tc_card(slot); | 219 | err_irq_dma: |
226 | 220 | free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost); | |
227 | esp_dev = 0; | 221 | err_irq_err: |
228 | esp = esp_allocate(tpnt, (void *) esp_dev); | 222 | free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost); |
229 | 223 | err_irq_merr: | |
230 | mem_start = get_tc_base_addr(slot); | 224 | free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost); |
231 | 225 | err_irq: | |
232 | /* Store base addr into esp struct */ | 226 | free_irq(esp->irq, esp->ehost); |
233 | esp->slot = CPHYSADDR(mem_start); | 227 | err_alloc: |
234 | 228 | esp_deallocate(esp); | |
235 | esp->dregs = 0; | 229 | scsi_host_put(esp->ehost); |
236 | esp->eregs = (void *)CKSEG1ADDR(mem_start + | 230 | return err; |
237 | DEC_SCSI_SREG); | 231 | } |
238 | esp->do_pio_cmds = 1; | 232 | |
239 | 233 | static int __init dec_esp_probe(struct device *dev) | |
240 | /* Set the command buffer */ | 234 | { |
241 | esp->esp_command = (volatile unsigned char *) pmaz_cmd_buffer; | 235 | struct NCR_ESP *esp; |
242 | 236 | resource_size_t start, len; | |
243 | /* get virtual dma address for command buffer */ | 237 | int err; |
244 | esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer); | 238 | |
245 | 239 | esp = esp_allocate(&dec_esp_template, NULL, 1); | |
246 | esp->cfreq = get_tc_speed(); | 240 | |
247 | 241 | dev_set_drvdata(dev, esp); | |
248 | esp->irq = get_tc_irq_nr(slot); | 242 | |
249 | 243 | start = to_tc_dev(dev)->resource.start; | |
250 | /* Required functions */ | 244 | len = to_tc_dev(dev)->resource.end - start + 1; |
251 | esp->dma_bytes_sent = &dma_bytes_sent; | 245 | |
252 | esp->dma_can_transfer = &dma_can_transfer; | 246 | if (!request_mem_region(start, len, dev->bus_id)) { |
253 | esp->dma_dump_state = &dma_dump_state; | 247 | printk(KERN_ERR "%s: Unable to reserve MMIO resource\n", |
254 | esp->dma_init_read = &pmaz_dma_init_read; | 248 | dev->bus_id); |
255 | esp->dma_init_write = &pmaz_dma_init_write; | 249 | err = -EBUSY; |
256 | esp->dma_ints_off = &pmaz_dma_ints_off; | 250 | goto err_alloc; |
257 | esp->dma_ints_on = &pmaz_dma_ints_on; | ||
258 | esp->dma_irq_p = &dma_irq_p; | ||
259 | esp->dma_ports_p = &dma_ports_p; | ||
260 | esp->dma_setup = &pmaz_dma_setup; | ||
261 | |||
262 | /* Optional functions */ | ||
263 | esp->dma_barrier = 0; | ||
264 | esp->dma_drain = &pmaz_dma_drain; | ||
265 | esp->dma_invalidate = 0; | ||
266 | esp->dma_irq_entry = 0; | ||
267 | esp->dma_irq_exit = 0; | ||
268 | esp->dma_poll = 0; | ||
269 | esp->dma_reset = 0; | ||
270 | esp->dma_led_off = 0; | ||
271 | esp->dma_led_on = 0; | ||
272 | |||
273 | esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one; | ||
274 | esp->dma_mmu_get_scsi_sgl = 0; | ||
275 | esp->dma_mmu_release_scsi_one = 0; | ||
276 | esp->dma_mmu_release_scsi_sgl = 0; | ||
277 | esp->dma_advance_sg = 0; | ||
278 | |||
279 | if (request_irq(esp->irq, esp_intr, IRQF_DISABLED, | ||
280 | "PMAZ_AA", esp->ehost)) { | ||
281 | esp_deallocate(esp); | ||
282 | release_tc_card(slot); | ||
283 | continue; | ||
284 | } | ||
285 | esp->scsi_id = 7; | ||
286 | esp->diff = 0; | ||
287 | esp_initialize(esp); | ||
288 | } | ||
289 | } | 251 | } |
290 | 252 | ||
291 | if(nesps) { | 253 | /* Store base addr into esp struct. */ |
292 | printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use); | 254 | esp->slot = start; |
293 | esps_running = esps_in_use; | 255 | |
294 | return esps_in_use; | 256 | esp->dregs = 0; |
257 | esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG); | ||
258 | esp->do_pio_cmds = 1; | ||
259 | |||
260 | /* Set the command buffer. */ | ||
261 | esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer; | ||
262 | |||
263 | /* Get virtual dma address for command buffer. */ | ||
264 | esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer); | ||
265 | |||
266 | esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus); | ||
267 | |||
268 | esp->irq = to_tc_dev(dev)->interrupt; | ||
269 | |||
270 | /* Required functions. */ | ||
271 | esp->dma_bytes_sent = &dma_bytes_sent; | ||
272 | esp->dma_can_transfer = &dma_can_transfer; | ||
273 | esp->dma_dump_state = &dma_dump_state; | ||
274 | esp->dma_init_read = &pmaz_dma_init_read; | ||
275 | esp->dma_init_write = &pmaz_dma_init_write; | ||
276 | esp->dma_ints_off = &pmaz_dma_ints_off; | ||
277 | esp->dma_ints_on = &pmaz_dma_ints_on; | ||
278 | esp->dma_irq_p = &dma_irq_p; | ||
279 | esp->dma_ports_p = &dma_ports_p; | ||
280 | esp->dma_setup = &pmaz_dma_setup; | ||
281 | |||
282 | /* Optional functions. */ | ||
283 | esp->dma_barrier = 0; | ||
284 | esp->dma_drain = &pmaz_dma_drain; | ||
285 | esp->dma_invalidate = 0; | ||
286 | esp->dma_irq_entry = 0; | ||
287 | esp->dma_irq_exit = 0; | ||
288 | esp->dma_poll = 0; | ||
289 | esp->dma_reset = 0; | ||
290 | esp->dma_led_off = 0; | ||
291 | esp->dma_led_on = 0; | ||
292 | |||
293 | esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one; | ||
294 | esp->dma_mmu_get_scsi_sgl = 0; | ||
295 | esp->dma_mmu_release_scsi_one = 0; | ||
296 | esp->dma_mmu_release_scsi_sgl = 0; | ||
297 | esp->dma_advance_sg = 0; | ||
298 | |||
299 | err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA", | ||
300 | esp->ehost); | ||
301 | if (err) { | ||
302 | printk(KERN_ERR "%s: Unable to get IRQ %d\n", | ||
303 | dev->bus_id, esp->irq); | ||
304 | goto err_resource; | ||
305 | } | ||
306 | |||
307 | esp->scsi_id = 7; | ||
308 | esp->diff = 0; | ||
309 | esp_initialize(esp); | ||
310 | |||
311 | err = scsi_add_host(esp->ehost, dev); | ||
312 | if (err) { | ||
313 | printk(KERN_ERR "%s: Unable to register adapter\n", | ||
314 | dev->bus_id); | ||
315 | goto err_irq; | ||
295 | } | 316 | } |
317 | |||
318 | scsi_scan_host(esp->ehost); | ||
319 | |||
296 | return 0; | 320 | return 0; |
297 | 321 | ||
298 | err_free_irq_err: | 322 | err_irq: |
299 | free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], scsi_dma_err_int); | 323 | free_irq(esp->irq, esp->ehost); |
300 | err_free_irq_merr: | 324 | |
301 | free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], scsi_dma_merr_int); | 325 | err_resource: |
302 | err_free_irq: | 326 | release_mem_region(start, len); |
303 | free_irq(esp->irq, esp_intr); | 327 | |
304 | err_dealloc: | 328 | err_alloc: |
305 | esp_deallocate(esp); | 329 | esp_deallocate(esp); |
306 | return 0; | 330 | scsi_host_put(esp->ehost); |
331 | return err; | ||
332 | } | ||
333 | |||
334 | static void __exit dec_esp_platform_remove(void) | ||
335 | { | ||
336 | struct NCR_ESP *esp = dec_esp_platform; | ||
337 | |||
338 | free_irq(esp->irq, esp->ehost); | ||
339 | esp_deallocate(esp); | ||
340 | scsi_host_put(esp->ehost); | ||
341 | dec_esp_platform = NULL; | ||
307 | } | 342 | } |
308 | 343 | ||
344 | static void __exit dec_esp_remove(struct device *dev) | ||
345 | { | ||
346 | struct NCR_ESP *esp = dev_get_drvdata(dev); | ||
347 | |||
348 | free_irq(esp->irq, esp->ehost); | ||
349 | esp_deallocate(esp); | ||
350 | scsi_host_put(esp->ehost); | ||
351 | } | ||
352 | |||
353 | |||
309 | /************************************************************* DMA Functions */ | 354 | /************************************************************* DMA Functions */ |
310 | static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id) | 355 | static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id) |
311 | { | 356 | { |
@@ -576,3 +621,67 @@ static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp | |||
576 | { | 621 | { |
577 | sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); | 622 | sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer); |
578 | } | 623 | } |
624 | |||
625 | |||
626 | #ifdef CONFIG_TC | ||
627 | static int __init dec_esp_tc_probe(struct device *dev); | ||
628 | static int __exit dec_esp_tc_remove(struct device *dev); | ||
629 | |||
630 | static const struct tc_device_id dec_esp_tc_table[] = { | ||
631 | { "DEC ", "PMAZ-AA " }, | ||
632 | { } | ||
633 | }; | ||
634 | MODULE_DEVICE_TABLE(tc, dec_esp_tc_table); | ||
635 | |||
636 | static struct tc_driver dec_esp_tc_driver = { | ||
637 | .id_table = dec_esp_tc_table, | ||
638 | .driver = { | ||
639 | .name = "dec_esp", | ||
640 | .bus = &tc_bus_type, | ||
641 | .probe = dec_esp_tc_probe, | ||
642 | .remove = __exit_p(dec_esp_tc_remove), | ||
643 | }, | ||
644 | }; | ||
645 | |||
646 | static int __init dec_esp_tc_probe(struct device *dev) | ||
647 | { | ||
648 | int status = dec_esp_probe(dev); | ||
649 | if (!status) | ||
650 | get_device(dev); | ||
651 | return status; | ||
652 | } | ||
653 | |||
654 | static int __exit dec_esp_tc_remove(struct device *dev) | ||
655 | { | ||
656 | put_device(dev); | ||
657 | dec_esp_remove(dev); | ||
658 | return 0; | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | static int __init dec_esp_init(void) | ||
663 | { | ||
664 | int status; | ||
665 | |||
666 | status = tc_register_driver(&dec_esp_tc_driver); | ||
667 | if (!status) | ||
668 | dec_esp_platform_probe(); | ||
669 | |||
670 | if (nesps) { | ||
671 | pr_info("ESP: Total of %d ESP hosts found, " | ||
672 | "%d actually in use.\n", nesps, esps_in_use); | ||
673 | esps_running = esps_in_use; | ||
674 | } | ||
675 | |||
676 | return status; | ||
677 | } | ||
678 | |||
679 | static void __exit dec_esp_exit(void) | ||
680 | { | ||
681 | dec_esp_platform_remove(); | ||
682 | tc_unregister_driver(&dec_esp_tc_driver); | ||
683 | } | ||
684 | |||
685 | |||
686 | module_init(dec_esp_init); | ||
687 | module_exit(dec_esp_exit); | ||
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c index 2a1c5c22b9e0..4266a2139b5f 100644 --- a/drivers/scsi/fastlane.c +++ b/drivers/scsi/fastlane.c | |||
@@ -142,7 +142,7 @@ int __init fastlane_esp_detect(struct scsi_host_template *tpnt) | |||
142 | if (board < 0x1000000) { | 142 | if (board < 0x1000000) { |
143 | goto err_release; | 143 | goto err_release; |
144 | } | 144 | } |
145 | esp = esp_allocate(tpnt, (void *)board+FASTLANE_ESP_ADDR); | 145 | esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0); |
146 | 146 | ||
147 | /* Do command transfer with programmed I/O */ | 147 | /* Do command transfer with programmed I/O */ |
148 | esp->do_pio_cmds = 1; | 148 | esp->do_pio_cmds = 1; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 437684084377..8f55e1431433 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -1375,7 +1375,7 @@ iscsi_tcp_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask) | |||
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); | 1377 | BUG_ON(tcp_mtask->xmstate != XMSTATE_IDLE); |
1378 | if (mtask->hdr->itt == cpu_to_be32(ISCSI_RESERVED_TAG)) { | 1378 | if (mtask->hdr->itt == RESERVED_ITT) { |
1379 | struct iscsi_session *session = conn->session; | 1379 | struct iscsi_session *session = conn->session; |
1380 | 1380 | ||
1381 | spin_lock_bh(&session->lock); | 1381 | spin_lock_bh(&session->lock); |
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c index bfac4441d89f..19dd4b962e18 100644 --- a/drivers/scsi/jazz_esp.c +++ b/drivers/scsi/jazz_esp.c | |||
@@ -75,7 +75,7 @@ static int jazz_esp_detect(struct scsi_host_template *tpnt) | |||
75 | */ | 75 | */ |
76 | if (1) { | 76 | if (1) { |
77 | esp_dev = NULL; | 77 | esp_dev = NULL; |
78 | esp = esp_allocate(tpnt, (void *) esp_dev); | 78 | esp = esp_allocate(tpnt, esp_dev, 0); |
79 | 79 | ||
80 | /* Do command transfer with programmed I/O */ | 80 | /* Do command transfer with programmed I/O */ |
81 | esp->do_pio_cmds = 1; | 81 | esp->do_pio_cmds = 1; |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d37048c96eab..7c75771c77ff 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -113,8 +113,7 @@ static void iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask) | |||
113 | hdr->opcode = ISCSI_OP_SCSI_CMD; | 113 | hdr->opcode = ISCSI_OP_SCSI_CMD; |
114 | hdr->flags = ISCSI_ATTR_SIMPLE; | 114 | hdr->flags = ISCSI_ATTR_SIMPLE; |
115 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); | 115 | int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun); |
116 | hdr->itt = ctask->itt | (conn->id << ISCSI_CID_SHIFT) | | 116 | hdr->itt = build_itt(ctask->itt, conn->id, session->age); |
117 | (session->age << ISCSI_AGE_SHIFT); | ||
118 | hdr->data_length = cpu_to_be32(sc->request_bufflen); | 117 | hdr->data_length = cpu_to_be32(sc->request_bufflen); |
119 | hdr->cmdsn = cpu_to_be32(session->cmdsn); | 118 | hdr->cmdsn = cpu_to_be32(session->cmdsn); |
120 | session->cmdsn++; | 119 | session->cmdsn++; |
@@ -270,7 +269,7 @@ invalid_datalen: | |||
270 | goto out; | 269 | goto out; |
271 | } | 270 | } |
272 | 271 | ||
273 | senselen = be16_to_cpu(*(uint16_t *)data); | 272 | senselen = be16_to_cpu(*(__be16 *)data); |
274 | if (datalen < senselen) | 273 | if (datalen < senselen) |
275 | goto invalid_datalen; | 274 | goto invalid_datalen; |
276 | 275 | ||
@@ -338,7 +337,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
338 | 337 | ||
339 | if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { | 338 | if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) { |
340 | memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); | 339 | memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr)); |
341 | itt = rejected_pdu.itt & ISCSI_ITT_MASK; | 340 | itt = get_itt(rejected_pdu.itt); |
342 | printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " | 341 | printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected " |
343 | "due to DataDigest error.\n", itt, | 342 | "due to DataDigest error.\n", itt, |
344 | rejected_pdu.opcode); | 343 | rejected_pdu.opcode); |
@@ -367,10 +366,10 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
367 | struct iscsi_mgmt_task *mtask; | 366 | struct iscsi_mgmt_task *mtask; |
368 | uint32_t itt; | 367 | uint32_t itt; |
369 | 368 | ||
370 | if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) | 369 | if (hdr->itt != RESERVED_ITT) |
371 | itt = hdr->itt & ISCSI_ITT_MASK; | 370 | itt = get_itt(hdr->itt); |
372 | else | 371 | else |
373 | itt = hdr->itt; | 372 | itt = ~0U; |
374 | 373 | ||
375 | if (itt < session->cmds_max) { | 374 | if (itt < session->cmds_max) { |
376 | ctask = session->cmds[itt]; | 375 | ctask = session->cmds[itt]; |
@@ -440,7 +439,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
440 | iscsi_tmf_rsp(conn, hdr); | 439 | iscsi_tmf_rsp(conn, hdr); |
441 | break; | 440 | break; |
442 | case ISCSI_OP_NOOP_IN: | 441 | case ISCSI_OP_NOOP_IN: |
443 | if (hdr->ttt != ISCSI_RESERVED_TAG || datalen) { | 442 | if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) || datalen) { |
444 | rc = ISCSI_ERR_PROTO; | 443 | rc = ISCSI_ERR_PROTO; |
445 | break; | 444 | break; |
446 | } | 445 | } |
@@ -457,7 +456,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
457 | rc = ISCSI_ERR_BAD_OPCODE; | 456 | rc = ISCSI_ERR_BAD_OPCODE; |
458 | break; | 457 | break; |
459 | } | 458 | } |
460 | } else if (itt == ISCSI_RESERVED_TAG) { | 459 | } else if (itt == ~0U) { |
461 | rc = iscsi_check_assign_cmdsn(session, | 460 | rc = iscsi_check_assign_cmdsn(session, |
462 | (struct iscsi_nopin*)hdr); | 461 | (struct iscsi_nopin*)hdr); |
463 | if (rc) | 462 | if (rc) |
@@ -470,7 +469,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
470 | break; | 469 | break; |
471 | } | 470 | } |
472 | 471 | ||
473 | if (hdr->ttt == ISCSI_RESERVED_TAG) | 472 | if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG)) |
474 | break; | 473 | break; |
475 | 474 | ||
476 | if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) | 475 | if (iscsi_recv_pdu(conn->cls_conn, hdr, NULL, 0)) |
@@ -516,24 +515,24 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
516 | struct iscsi_cmd_task *ctask; | 515 | struct iscsi_cmd_task *ctask; |
517 | uint32_t itt; | 516 | uint32_t itt; |
518 | 517 | ||
519 | if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { | 518 | if (hdr->itt != RESERVED_ITT) { |
520 | if ((hdr->itt & ISCSI_AGE_MASK) != | 519 | if (((__force u32)hdr->itt & ISCSI_AGE_MASK) != |
521 | (session->age << ISCSI_AGE_SHIFT)) { | 520 | (session->age << ISCSI_AGE_SHIFT)) { |
522 | printk(KERN_ERR "iscsi: received itt %x expected " | 521 | printk(KERN_ERR "iscsi: received itt %x expected " |
523 | "session age (%x)\n", hdr->itt, | 522 | "session age (%x)\n", (__force u32)hdr->itt, |
524 | session->age & ISCSI_AGE_MASK); | 523 | session->age & ISCSI_AGE_MASK); |
525 | return ISCSI_ERR_BAD_ITT; | 524 | return ISCSI_ERR_BAD_ITT; |
526 | } | 525 | } |
527 | 526 | ||
528 | if ((hdr->itt & ISCSI_CID_MASK) != | 527 | if (((__force u32)hdr->itt & ISCSI_CID_MASK) != |
529 | (conn->id << ISCSI_CID_SHIFT)) { | 528 | (conn->id << ISCSI_CID_SHIFT)) { |
530 | printk(KERN_ERR "iscsi: received itt %x, expected " | 529 | printk(KERN_ERR "iscsi: received itt %x, expected " |
531 | "CID (%x)\n", hdr->itt, conn->id); | 530 | "CID (%x)\n", (__force u32)hdr->itt, conn->id); |
532 | return ISCSI_ERR_BAD_ITT; | 531 | return ISCSI_ERR_BAD_ITT; |
533 | } | 532 | } |
534 | itt = hdr->itt & ISCSI_ITT_MASK; | 533 | itt = get_itt(hdr->itt); |
535 | } else | 534 | } else |
536 | itt = hdr->itt; | 535 | itt = ~0U; |
537 | 536 | ||
538 | if (itt < session->cmds_max) { | 537 | if (itt < session->cmds_max) { |
539 | ctask = session->cmds[itt]; | 538 | ctask = session->cmds[itt]; |
@@ -896,9 +895,8 @@ iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
896 | /* | 895 | /* |
897 | * pre-format CmdSN for outgoing PDU. | 896 | * pre-format CmdSN for outgoing PDU. |
898 | */ | 897 | */ |
899 | if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) { | 898 | if (hdr->itt != RESERVED_ITT) { |
900 | hdr->itt = mtask->itt | (conn->id << ISCSI_CID_SHIFT) | | 899 | hdr->itt = build_itt(mtask->itt, conn->id, session->age); |
901 | (session->age << ISCSI_AGE_SHIFT); | ||
902 | nop->cmdsn = cpu_to_be32(session->cmdsn); | 900 | nop->cmdsn = cpu_to_be32(session->cmdsn); |
903 | if (conn->c_stage == ISCSI_CONN_STARTED && | 901 | if (conn->c_stage == ISCSI_CONN_STARTED && |
904 | !(hdr->opcode & ISCSI_OP_IMMEDIATE)) | 902 | !(hdr->opcode & ISCSI_OP_IMMEDIATE)) |
@@ -1064,7 +1062,7 @@ static int iscsi_exec_abort_task(struct scsi_cmnd *sc, | |||
1064 | 1062 | ||
1065 | spin_lock_bh(&session->lock); | 1063 | spin_lock_bh(&session->lock); |
1066 | ctask->mtask = (struct iscsi_mgmt_task *) | 1064 | ctask->mtask = (struct iscsi_mgmt_task *) |
1067 | session->mgmt_cmds[(hdr->itt & ISCSI_ITT_MASK) - | 1065 | session->mgmt_cmds[get_itt(hdr->itt) - |
1068 | ISCSI_MGMT_ITT_OFFSET]; | 1066 | ISCSI_MGMT_ITT_OFFSET]; |
1069 | 1067 | ||
1070 | if (conn->tmabort_state == TMABORT_INITIAL) { | 1068 | if (conn->tmabort_state == TMABORT_INITIAL) { |
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 3586fac9be9a..bcb49021b7e2 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c | |||
@@ -351,7 +351,7 @@ int mac_esp_detect(struct scsi_host_template * tpnt) | |||
351 | for (chipnum = 0; chipnum < chipspresent; chipnum ++) { | 351 | for (chipnum = 0; chipnum < chipspresent; chipnum ++) { |
352 | struct NCR_ESP * esp; | 352 | struct NCR_ESP * esp; |
353 | 353 | ||
354 | esp = esp_allocate(tpnt, (void *) NULL); | 354 | esp = esp_allocate(tpnt, NULL, 0); |
355 | esp->eregs = (struct ESP_regs *) get_base(chipnum); | 355 | esp->eregs = (struct ESP_regs *) get_base(chipnum); |
356 | 356 | ||
357 | esp->dma_irq_p = &esp_dafb_dma_irq_p; | 357 | esp->dma_irq_p = &esp_dafb_dma_irq_p; |
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c index 998a8bbc1a4b..d693d0f21395 100644 --- a/drivers/scsi/mca_53c9x.c +++ b/drivers/scsi/mca_53c9x.c | |||
@@ -122,7 +122,7 @@ static int mca_esp_detect(struct scsi_host_template *tpnt) | |||
122 | if ((slot = mca_find_adapter(*id_to_check, 0)) != | 122 | if ((slot = mca_find_adapter(*id_to_check, 0)) != |
123 | MCA_NOTFOUND) | 123 | MCA_NOTFOUND) |
124 | { | 124 | { |
125 | esp = esp_allocate(tpnt, (void *) NULL); | 125 | esp = esp_allocate(tpnt, NULL, 0); |
126 | 126 | ||
127 | pos[0] = mca_read_stored_pos(slot, 2); | 127 | pos[0] = mca_read_stored_pos(slot, 2); |
128 | pos[1] = mca_read_stored_pos(slot, 3); | 128 | pos[1] = mca_read_stored_pos(slot, 3); |
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c index c116a6ae3c54..26a6d55faf3e 100644 --- a/drivers/scsi/oktagon_esp.c +++ b/drivers/scsi/oktagon_esp.c | |||
@@ -133,7 +133,7 @@ int oktagon_esp_detect(struct scsi_host_template *tpnt) | |||
133 | eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR); | 133 | eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR); |
134 | 134 | ||
135 | /* This line was 5 lines lower */ | 135 | /* This line was 5 lines lower */ |
136 | esp = esp_allocate(tpnt, (void *)board+OKTAGON_ESP_ADDR); | 136 | esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0); |
137 | 137 | ||
138 | /* we have to shift the registers only one bit for oktagon */ | 138 | /* we have to shift the registers only one bit for oktagon */ |
139 | esp->shift = 1; | 139 | esp->shift = 1; |
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 7d2311067903..bd6bbf61adb8 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c | |||
@@ -521,10 +521,10 @@ static void osst_init_aux(struct osst_tape * STp, int frame_type, int frame_seq_ | |||
521 | break; | 521 | break; |
522 | default: ; /* probably FILL */ | 522 | default: ; /* probably FILL */ |
523 | } | 523 | } |
524 | aux->filemark_cnt = ntohl(STp->filemark_cnt); | 524 | aux->filemark_cnt = htonl(STp->filemark_cnt); |
525 | aux->phys_fm = ntohl(0xffffffff); | 525 | aux->phys_fm = htonl(0xffffffff); |
526 | aux->last_mark_ppos = ntohl(STp->last_mark_ppos); | 526 | aux->last_mark_ppos = htonl(STp->last_mark_ppos); |
527 | aux->last_mark_lbn = ntohl(STp->last_mark_lbn); | 527 | aux->last_mark_lbn = htonl(STp->last_mark_lbn); |
528 | } | 528 | } |
529 | 529 | ||
530 | /* | 530 | /* |
diff --git a/drivers/scsi/osst.h b/drivers/scsi/osst.h index 1e426f5d0ed8..2cc7b5a1606a 100644 --- a/drivers/scsi/osst.h +++ b/drivers/scsi/osst.h | |||
@@ -288,11 +288,11 @@ typedef struct { | |||
288 | #else | 288 | #else |
289 | #error "Please fix <asm/byteorder.h>" | 289 | #error "Please fix <asm/byteorder.h>" |
290 | #endif | 290 | #endif |
291 | u16 max_speed; /* Maximum speed supported in KBps */ | 291 | __be16 max_speed; /* Maximum speed supported in KBps */ |
292 | u8 reserved10, reserved11; | 292 | u8 reserved10, reserved11; |
293 | u16 ctl; /* Continuous Transfer Limit in blocks */ | 293 | __be16 ctl; /* Continuous Transfer Limit in blocks */ |
294 | u16 speed; /* Current Speed, in KBps */ | 294 | __be16 speed; /* Current Speed, in KBps */ |
295 | u16 buffer_size; /* Buffer Size, in 512 bytes */ | 295 | __be16 buffer_size; /* Buffer Size, in 512 bytes */ |
296 | u8 reserved18, reserved19; | 296 | u8 reserved18, reserved19; |
297 | } osst_capabilities_page_t; | 297 | } osst_capabilities_page_t; |
298 | 298 | ||
@@ -352,8 +352,8 @@ typedef struct { | |||
352 | u8 reserved2; | 352 | u8 reserved2; |
353 | u8 density; | 353 | u8 density; |
354 | u8 reserved3,reserved4; | 354 | u8 reserved3,reserved4; |
355 | u16 segtrk; | 355 | __be16 segtrk; |
356 | u16 trks; | 356 | __be16 trks; |
357 | u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10; | 357 | u8 reserved5,reserved6,reserved7,reserved8,reserved9,reserved10; |
358 | } osst_tape_paramtr_page_t; | 358 | } osst_tape_paramtr_page_t; |
359 | 359 | ||
@@ -369,18 +369,18 @@ typedef struct { | |||
369 | typedef struct os_partition_s { | 369 | typedef struct os_partition_s { |
370 | __u8 partition_num; | 370 | __u8 partition_num; |
371 | __u8 par_desc_ver; | 371 | __u8 par_desc_ver; |
372 | __u16 wrt_pass_cntr; | 372 | __be16 wrt_pass_cntr; |
373 | __u32 first_frame_ppos; | 373 | __be32 first_frame_ppos; |
374 | __u32 last_frame_ppos; | 374 | __be32 last_frame_ppos; |
375 | __u32 eod_frame_ppos; | 375 | __be32 eod_frame_ppos; |
376 | } os_partition_t; | 376 | } os_partition_t; |
377 | 377 | ||
378 | /* | 378 | /* |
379 | * DAT entry | 379 | * DAT entry |
380 | */ | 380 | */ |
381 | typedef struct os_dat_entry_s { | 381 | typedef struct os_dat_entry_s { |
382 | __u32 blk_sz; | 382 | __be32 blk_sz; |
383 | __u16 blk_cnt; | 383 | __be16 blk_cnt; |
384 | __u8 flags; | 384 | __u8 flags; |
385 | __u8 reserved; | 385 | __u8 reserved; |
386 | } os_dat_entry_t; | 386 | } os_dat_entry_t; |
@@ -412,23 +412,23 @@ typedef struct os_dat_s { | |||
412 | * AUX | 412 | * AUX |
413 | */ | 413 | */ |
414 | typedef struct os_aux_s { | 414 | typedef struct os_aux_s { |
415 | __u32 format_id; /* hardware compability AUX is based on */ | 415 | __be32 format_id; /* hardware compability AUX is based on */ |
416 | char application_sig[4]; /* driver used to write this media */ | 416 | char application_sig[4]; /* driver used to write this media */ |
417 | __u32 hdwr; /* reserved */ | 417 | __be32 hdwr; /* reserved */ |
418 | __u32 update_frame_cntr; /* for configuration frame */ | 418 | __be32 update_frame_cntr; /* for configuration frame */ |
419 | __u8 frame_type; | 419 | __u8 frame_type; |
420 | __u8 frame_type_reserved; | 420 | __u8 frame_type_reserved; |
421 | __u8 reserved_18_19[2]; | 421 | __u8 reserved_18_19[2]; |
422 | os_partition_t partition; | 422 | os_partition_t partition; |
423 | __u8 reserved_36_43[8]; | 423 | __u8 reserved_36_43[8]; |
424 | __u32 frame_seq_num; | 424 | __be32 frame_seq_num; |
425 | __u32 logical_blk_num_high; | 425 | __be32 logical_blk_num_high; |
426 | __u32 logical_blk_num; | 426 | __be32 logical_blk_num; |
427 | os_dat_t dat; | 427 | os_dat_t dat; |
428 | __u8 reserved188_191[4]; | 428 | __u8 reserved188_191[4]; |
429 | __u32 filemark_cnt; | 429 | __be32 filemark_cnt; |
430 | __u32 phys_fm; | 430 | __be32 phys_fm; |
431 | __u32 last_mark_ppos; | 431 | __be32 last_mark_ppos; |
432 | __u8 reserved204_223[20]; | 432 | __u8 reserved204_223[20]; |
433 | 433 | ||
434 | /* | 434 | /* |
@@ -436,8 +436,8 @@ typedef struct os_aux_s { | |||
436 | * | 436 | * |
437 | * Linux specific fields: | 437 | * Linux specific fields: |
438 | */ | 438 | */ |
439 | __u32 next_mark_ppos; /* when known, points to next marker */ | 439 | __be32 next_mark_ppos; /* when known, points to next marker */ |
440 | __u32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */ | 440 | __be32 last_mark_lbn; /* storing log_blk_num of last mark is extends ADR spec */ |
441 | __u8 linux_specific[24]; | 441 | __u8 linux_specific[24]; |
442 | 442 | ||
443 | __u8 reserved_256_511[256]; | 443 | __u8 reserved_256_511[256]; |
@@ -450,19 +450,19 @@ typedef struct os_fm_tab_s { | |||
450 | __u8 reserved_1; | 450 | __u8 reserved_1; |
451 | __u8 fm_tab_ent_sz; | 451 | __u8 fm_tab_ent_sz; |
452 | __u8 reserved_3; | 452 | __u8 reserved_3; |
453 | __u16 fm_tab_ent_cnt; | 453 | __be16 fm_tab_ent_cnt; |
454 | __u8 reserved6_15[10]; | 454 | __u8 reserved6_15[10]; |
455 | __u32 fm_tab_ent[OS_FM_TAB_MAX]; | 455 | __be32 fm_tab_ent[OS_FM_TAB_MAX]; |
456 | } os_fm_tab_t; | 456 | } os_fm_tab_t; |
457 | 457 | ||
458 | typedef struct os_ext_trk_ey_s { | 458 | typedef struct os_ext_trk_ey_s { |
459 | __u8 et_part_num; | 459 | __u8 et_part_num; |
460 | __u8 fmt; | 460 | __u8 fmt; |
461 | __u16 fm_tab_off; | 461 | __be16 fm_tab_off; |
462 | __u8 reserved4_7[4]; | 462 | __u8 reserved4_7[4]; |
463 | __u32 last_hlb_hi; | 463 | __be32 last_hlb_hi; |
464 | __u32 last_hlb; | 464 | __be32 last_hlb; |
465 | __u32 last_pp; | 465 | __be32 last_pp; |
466 | __u8 reserved20_31[12]; | 466 | __u8 reserved20_31[12]; |
467 | } os_ext_trk_ey_t; | 467 | } os_ext_trk_ey_t; |
468 | 468 | ||
@@ -479,17 +479,17 @@ typedef struct os_header_s { | |||
479 | char ident_str[8]; | 479 | char ident_str[8]; |
480 | __u8 major_rev; | 480 | __u8 major_rev; |
481 | __u8 minor_rev; | 481 | __u8 minor_rev; |
482 | __u16 ext_trk_tb_off; | 482 | __be16 ext_trk_tb_off; |
483 | __u8 reserved12_15[4]; | 483 | __u8 reserved12_15[4]; |
484 | __u8 pt_par_num; | 484 | __u8 pt_par_num; |
485 | __u8 pt_reserved1_3[3]; | 485 | __u8 pt_reserved1_3[3]; |
486 | os_partition_t partition[16]; | 486 | os_partition_t partition[16]; |
487 | __u32 cfg_col_width; | 487 | __be32 cfg_col_width; |
488 | __u32 dat_col_width; | 488 | __be32 dat_col_width; |
489 | __u32 qfa_col_width; | 489 | __be32 qfa_col_width; |
490 | __u8 cartridge[16]; | 490 | __u8 cartridge[16]; |
491 | __u8 reserved304_511[208]; | 491 | __u8 reserved304_511[208]; |
492 | __u32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */ | 492 | __be32 old_filemark_list[16680/4]; /* in ADR 1.4 __u8 track_table[16680] */ |
493 | os_ext_trk_tb_t ext_track_tb; | 493 | os_ext_trk_tb_t ext_track_tb; |
494 | __u8 reserved17272_17735[464]; | 494 | __u8 reserved17272_17735[464]; |
495 | os_fm_tab_t dat_fm_tab; | 495 | os_fm_tab_t dat_fm_tab; |
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 6b60536ac92b..80fb3f88af2e 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c | |||
@@ -53,7 +53,7 @@ int sun3x_esp_detect(struct scsi_host_template *tpnt) | |||
53 | struct ConfigDev *esp_dev; | 53 | struct ConfigDev *esp_dev; |
54 | 54 | ||
55 | esp_dev = 0; | 55 | esp_dev = 0; |
56 | esp = esp_allocate(tpnt, (void *) esp_dev); | 56 | esp = esp_allocate(tpnt, esp_dev, 0); |
57 | 57 | ||
58 | /* Do command transfer with DMA */ | 58 | /* Do command transfer with DMA */ |
59 | esp->do_pio_cmds = 0; | 59 | esp->do_pio_cmds = 0; |
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index db8607e3d531..f5051cf1a0c8 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c | |||
@@ -256,7 +256,7 @@ static void ulite_release_port(struct uart_port *port) | |||
256 | { | 256 | { |
257 | release_mem_region(port->mapbase, ULITE_REGION); | 257 | release_mem_region(port->mapbase, ULITE_REGION); |
258 | iounmap(port->membase); | 258 | iounmap(port->membase); |
259 | port->membase = 0; | 259 | port->membase = NULL; |
260 | } | 260 | } |
261 | 261 | ||
262 | static int ulite_request_port(struct uart_port *port) | 262 | static int ulite_request_port(struct uart_port *port) |
@@ -438,7 +438,7 @@ static int __devinit ulite_probe(struct platform_device *pdev) | |||
438 | port->iotype = UPIO_MEM; | 438 | port->iotype = UPIO_MEM; |
439 | port->iobase = 1; /* mark port in use */ | 439 | port->iobase = 1; /* mark port in use */ |
440 | port->mapbase = res->start; | 440 | port->mapbase = res->start; |
441 | port->membase = 0; | 441 | port->membase = NULL; |
442 | port->ops = &ulite_ops; | 442 | port->ops = &ulite_ops; |
443 | port->irq = res2->start; | 443 | port->irq = res2->start; |
444 | port->flags = UPF_BOOT_AUTOCONF; | 444 | port->flags = UPF_BOOT_AUTOCONF; |
@@ -462,7 +462,7 @@ static int ulite_remove(struct platform_device *pdev) | |||
462 | uart_remove_one_port(&ulite_uart_driver, port); | 462 | uart_remove_one_port(&ulite_uart_driver, port); |
463 | 463 | ||
464 | /* mark port as free */ | 464 | /* mark port as free */ |
465 | port->membase = 0; | 465 | port->membase = NULL; |
466 | 466 | ||
467 | return 0; | 467 | return 0; |
468 | } | 468 | } |
diff --git a/drivers/tc/Makefile b/drivers/tc/Makefile index 83b5bd75ce26..967342692211 100644 --- a/drivers/tc/Makefile +++ b/drivers/tc/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Object file lists. | 5 | # Object file lists. |
6 | 6 | ||
7 | obj-$(CONFIG_TC) += tc.o | 7 | obj-$(CONFIG_TC) += tc.o tc-driver.o |
8 | obj-$(CONFIG_ZS) += zs.o | 8 | obj-$(CONFIG_ZS) += zs.o |
9 | obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o | 9 | obj-$(CONFIG_VT) += lk201.o lk201-map.o lk201-remap.o |
10 | 10 | ||
diff --git a/drivers/tc/tc-driver.c b/drivers/tc/tc-driver.c new file mode 100644 index 000000000000..16b5bae63c74 --- /dev/null +++ b/drivers/tc/tc-driver.c | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * TURBOchannel driver services. | ||
3 | * | ||
4 | * Copyright (c) 2005 James Simmons | ||
5 | * Copyright (c) 2006 Maciej W. Rozycki | ||
6 | * | ||
7 | * Loosely based on drivers/dio/dio-driver.c and | ||
8 | * drivers/pci/pci-driver.c. | ||
9 | * | ||
10 | * This file is subject to the terms and conditions of the GNU | ||
11 | * General Public License. See the file "COPYING" in the main | ||
12 | * directory of this archive for more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/init.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/tc.h> | ||
18 | |||
19 | /** | ||
20 | * tc_register_driver - register a new TC driver | ||
21 | * @drv: the driver structure to register | ||
22 | * | ||
23 | * Adds the driver structure to the list of registered drivers | ||
24 | * Returns a negative value on error, otherwise 0. | ||
25 | * If no error occurred, the driver remains registered even if | ||
26 | * no device was claimed during registration. | ||
27 | */ | ||
28 | int tc_register_driver(struct tc_driver *tdrv) | ||
29 | { | ||
30 | return driver_register(&tdrv->driver); | ||
31 | } | ||
32 | EXPORT_SYMBOL(tc_register_driver); | ||
33 | |||
34 | /** | ||
35 | * tc_unregister_driver - unregister a TC driver | ||
36 | * @drv: the driver structure to unregister | ||
37 | * | ||
38 | * Deletes the driver structure from the list of registered TC drivers, | ||
39 | * gives it a chance to clean up by calling its remove() function for | ||
40 | * each device it was responsible for, and marks those devices as | ||
41 | * driverless. | ||
42 | */ | ||
43 | void tc_unregister_driver(struct tc_driver *tdrv) | ||
44 | { | ||
45 | driver_unregister(&tdrv->driver); | ||
46 | } | ||
47 | EXPORT_SYMBOL(tc_unregister_driver); | ||
48 | |||
49 | /** | ||
50 | * tc_match_device - tell if a TC device structure has a matching | ||
51 | * TC device ID structure | ||
52 | * @tdrv: the TC driver to earch for matching TC device ID strings | ||
53 | * @tdev: the TC device structure to match against | ||
54 | * | ||
55 | * Used by a driver to check whether a TC device present in the | ||
56 | * system is in its list of supported devices. Returns the matching | ||
57 | * tc_device_id structure or %NULL if there is no match. | ||
58 | */ | ||
59 | const struct tc_device_id *tc_match_device(struct tc_driver *tdrv, | ||
60 | struct tc_dev *tdev) | ||
61 | { | ||
62 | const struct tc_device_id *id = tdrv->id_table; | ||
63 | |||
64 | if (id) { | ||
65 | while (id->name[0] || id->vendor[0]) { | ||
66 | if (strcmp(tdev->name, id->name) == 0 && | ||
67 | strcmp(tdev->vendor, id->vendor) == 0) | ||
68 | return id; | ||
69 | id++; | ||
70 | } | ||
71 | } | ||
72 | return NULL; | ||
73 | } | ||
74 | EXPORT_SYMBOL(tc_match_device); | ||
75 | |||
76 | /** | ||
77 | * tc_bus_match - Tell if a device structure has a matching | ||
78 | * TC device ID structure | ||
79 | * @dev: the device structure to match against | ||
80 | * @drv: the device driver to search for matching TC device ID strings | ||
81 | * | ||
82 | * Used by a driver to check whether a TC device present in the | ||
83 | * system is in its list of supported devices. Returns 1 if there | ||
84 | * is a match or 0 otherwise. | ||
85 | */ | ||
86 | static int tc_bus_match(struct device *dev, struct device_driver *drv) | ||
87 | { | ||
88 | struct tc_dev *tdev = to_tc_dev(dev); | ||
89 | struct tc_driver *tdrv = to_tc_driver(drv); | ||
90 | const struct tc_device_id *id; | ||
91 | |||
92 | id = tc_match_device(tdrv, tdev); | ||
93 | if (id) | ||
94 | return 1; | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | struct bus_type tc_bus_type = { | ||
100 | .name = "tc", | ||
101 | .match = tc_bus_match, | ||
102 | }; | ||
103 | EXPORT_SYMBOL(tc_bus_type); | ||
104 | |||
105 | static int __init tc_driver_init(void) | ||
106 | { | ||
107 | return bus_register(&tc_bus_type); | ||
108 | } | ||
109 | |||
110 | postcore_initcall(tc_driver_init); | ||
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c index 4a51e56f85b6..f77f62a4b325 100644 --- a/drivers/tc/tc.c +++ b/drivers/tc/tc.c | |||
@@ -1,254 +1,193 @@ | |||
1 | /* | 1 | /* |
2 | * tc-init: We assume the TURBOchannel to be up and running so | 2 | * TURBOchannel bus services. |
3 | * just probe for Modules and fill in the global data structure | ||
4 | * tc_bus. | ||
5 | * | 3 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 4 | * Copyright (c) Harald Koerfgen, 1998 |
7 | * License. See the file "COPYING" in the main directory of this archive | 5 | * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki |
8 | * for more details. | 6 | * Copyright (c) 2005 James Simmons |
9 | * | 7 | * |
10 | * Copyright (c) Harald Koerfgen, 1998 | 8 | * This file is subject to the terms and conditions of the GNU |
11 | * Copyright (c) 2001, 2003, 2005 Maciej W. Rozycki | 9 | * General Public License. See the file "COPYING" in the main |
10 | * directory of this archive for more details. | ||
12 | */ | 11 | */ |
12 | #include <linux/compiler.h> | ||
13 | #include <linux/errno.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/ioport.h> | ||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/list.h> | ||
15 | #include <linux/module.h> | 18 | #include <linux/module.h> |
16 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | #include <linux/tc.h> | ||
17 | #include <linux/types.h> | 21 | #include <linux/types.h> |
18 | 22 | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/errno.h> | ||
21 | #include <asm/io.h> | 23 | #include <asm/io.h> |
22 | #include <asm/paccess.h> | ||
23 | 24 | ||
24 | #include <asm/dec/machtype.h> | 25 | static struct tc_bus tc_bus = { |
25 | #include <asm/dec/prom.h> | 26 | .name = "TURBOchannel", |
26 | #include <asm/dec/tcinfo.h> | 27 | }; |
27 | #include <asm/dec/tcmodule.h> | ||
28 | #include <asm/dec/interrupts.h> | ||
29 | |||
30 | MODULE_LICENSE("GPL"); | ||
31 | slot_info tc_bus[MAX_SLOT]; | ||
32 | static int num_tcslots; | ||
33 | static tcinfo *info; | ||
34 | 28 | ||
35 | /* | 29 | /* |
36 | * Interface to the world. Read comment in include/asm-mips/tc.h. | 30 | * Probing for TURBOchannel modules. |
37 | */ | 31 | */ |
38 | 32 | static void __init tc_bus_add_devices(struct tc_bus *tbus) | |
39 | int search_tc_card(const char *name) | ||
40 | { | ||
41 | int slot; | ||
42 | slot_info *sip; | ||
43 | |||
44 | for (slot = 0; slot < num_tcslots; slot++) { | ||
45 | sip = &tc_bus[slot]; | ||
46 | if ((sip->flags & FREE) && | ||
47 | (strncmp(sip->name, name, strlen(name)) == 0)) { | ||
48 | return slot; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | return -ENODEV; | ||
53 | } | ||
54 | |||
55 | void claim_tc_card(int slot) | ||
56 | { | ||
57 | if (tc_bus[slot].flags & IN_USE) { | ||
58 | printk("claim_tc_card: attempting to claim a card already in use\n"); | ||
59 | return; | ||
60 | } | ||
61 | tc_bus[slot].flags &= ~FREE; | ||
62 | tc_bus[slot].flags |= IN_USE; | ||
63 | } | ||
64 | |||
65 | void release_tc_card(int slot) | ||
66 | { | 33 | { |
67 | if (tc_bus[slot].flags & FREE) { | 34 | resource_size_t slotsize = tbus->info.slot_size << 20; |
68 | printk("release_tc_card: " | 35 | resource_size_t extslotsize = tbus->ext_slot_size; |
69 | "attempting to release a card already free\n"); | 36 | resource_size_t slotaddr; |
70 | return; | 37 | resource_size_t extslotaddr; |
71 | } | 38 | resource_size_t devsize; |
72 | tc_bus[slot].flags &= ~IN_USE; | 39 | void __iomem *module; |
73 | tc_bus[slot].flags |= FREE; | 40 | struct tc_dev *tdev; |
74 | } | ||
75 | |||
76 | unsigned long get_tc_base_addr(int slot) | ||
77 | { | ||
78 | return tc_bus[slot].base_addr; | ||
79 | } | ||
80 | |||
81 | unsigned long get_tc_irq_nr(int slot) | ||
82 | { | ||
83 | return tc_bus[slot].interrupt; | ||
84 | } | ||
85 | |||
86 | unsigned long get_tc_speed(void) | ||
87 | { | ||
88 | return 100000 * (10000 / (unsigned long)info->clk_period); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * Probing for TURBOchannel modules | ||
93 | */ | ||
94 | static void __init tc_probe(unsigned long startaddr, unsigned long size, | ||
95 | int slots) | ||
96 | { | ||
97 | unsigned long slotaddr; | ||
98 | int i, slot, err; | 41 | int i, slot, err; |
99 | long offset; | ||
100 | u8 pattern[4]; | 42 | u8 pattern[4]; |
101 | volatile u8 *module; | 43 | long offset; |
102 | 44 | ||
103 | for (slot = 0; slot < slots; slot++) { | 45 | for (slot = 0; slot < tbus->num_tcslots; slot++) { |
104 | slotaddr = startaddr + slot * size; | 46 | slotaddr = tbus->slot_base + slot * slotsize; |
105 | module = ioremap_nocache(slotaddr, size); | 47 | extslotaddr = tbus->ext_slot_base + slot * extslotsize; |
48 | module = ioremap_nocache(slotaddr, slotsize); | ||
106 | BUG_ON(!module); | 49 | BUG_ON(!module); |
107 | 50 | ||
108 | offset = OLDCARD; | 51 | offset = TC_OLDCARD; |
109 | 52 | ||
110 | err = 0; | 53 | err = 0; |
111 | err |= get_dbe(pattern[0], module + OLDCARD + TC_PATTERN0); | 54 | err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0); |
112 | err |= get_dbe(pattern[1], module + OLDCARD + TC_PATTERN1); | 55 | err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1); |
113 | err |= get_dbe(pattern[2], module + OLDCARD + TC_PATTERN2); | 56 | err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2); |
114 | err |= get_dbe(pattern[3], module + OLDCARD + TC_PATTERN3); | 57 | err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3); |
115 | if (err) { | 58 | if (err) |
116 | iounmap(module); | 59 | goto out_err; |
117 | continue; | ||
118 | } | ||
119 | 60 | ||
120 | if (pattern[0] != 0x55 || pattern[1] != 0x00 || | 61 | if (pattern[0] != 0x55 || pattern[1] != 0x00 || |
121 | pattern[2] != 0xaa || pattern[3] != 0xff) { | 62 | pattern[2] != 0xaa || pattern[3] != 0xff) { |
122 | offset = NEWCARD; | 63 | offset = TC_NEWCARD; |
123 | 64 | ||
124 | err = 0; | 65 | err = 0; |
125 | err |= get_dbe(pattern[0], module + TC_PATTERN0); | 66 | err |= tc_preadb(pattern + 0, |
126 | err |= get_dbe(pattern[1], module + TC_PATTERN1); | 67 | module + offset + TC_PATTERN0); |
127 | err |= get_dbe(pattern[2], module + TC_PATTERN2); | 68 | err |= tc_preadb(pattern + 1, |
128 | err |= get_dbe(pattern[3], module + TC_PATTERN3); | 69 | module + offset + TC_PATTERN1); |
129 | if (err) { | 70 | err |= tc_preadb(pattern + 2, |
130 | iounmap(module); | 71 | module + offset + TC_PATTERN2); |
131 | continue; | 72 | err |= tc_preadb(pattern + 3, |
132 | } | 73 | module + offset + TC_PATTERN3); |
74 | if (err) | ||
75 | goto out_err; | ||
133 | } | 76 | } |
134 | 77 | ||
135 | if (pattern[0] != 0x55 || pattern[1] != 0x00 || | 78 | if (pattern[0] != 0x55 || pattern[1] != 0x00 || |
136 | pattern[2] != 0xaa || pattern[3] != 0xff) { | 79 | pattern[2] != 0xaa || pattern[3] != 0xff) |
137 | iounmap(module); | 80 | goto out_err; |
138 | continue; | 81 | |
82 | /* Found a board, allocate it an entry in the list */ | ||
83 | tdev = kzalloc(sizeof(*tdev), GFP_KERNEL); | ||
84 | if (!tdev) { | ||
85 | printk(KERN_ERR "tc%x: unable to allocate tc_dev\n", | ||
86 | slot); | ||
87 | goto out_err; | ||
139 | } | 88 | } |
89 | sprintf(tdev->dev.bus_id, "tc%x", slot); | ||
90 | tdev->bus = tbus; | ||
91 | tdev->dev.parent = &tbus->dev; | ||
92 | tdev->dev.bus = &tc_bus_type; | ||
93 | tdev->slot = slot; | ||
140 | 94 | ||
141 | tc_bus[slot].base_addr = slotaddr; | ||
142 | for (i = 0; i < 8; i++) { | 95 | for (i = 0; i < 8; i++) { |
143 | tc_bus[slot].firmware[i] = | 96 | tdev->firmware[i] = |
144 | module[TC_FIRM_VER + offset + 4 * i]; | 97 | readb(module + offset + TC_FIRM_VER + 4 * i); |
145 | tc_bus[slot].vendor[i] = | 98 | tdev->vendor[i] = |
146 | module[TC_VENDOR + offset + 4 * i]; | 99 | readb(module + offset + TC_VENDOR + 4 * i); |
147 | tc_bus[slot].name[i] = | 100 | tdev->name[i] = |
148 | module[TC_MODULE + offset + 4 * i]; | 101 | readb(module + offset + TC_MODULE + 4 * i); |
149 | } | 102 | } |
150 | tc_bus[slot].firmware[8] = 0; | 103 | tdev->firmware[8] = 0; |
151 | tc_bus[slot].vendor[8] = 0; | 104 | tdev->vendor[8] = 0; |
152 | tc_bus[slot].name[8] = 0; | 105 | tdev->name[8] = 0; |
153 | /* | 106 | |
154 | * Looks unneccesary, but we may change | 107 | pr_info("%s: %s %s %s\n", tdev->dev.bus_id, tdev->vendor, |
155 | * TC? in the future | 108 | tdev->name, tdev->firmware); |
156 | */ | 109 | |
157 | switch (slot) { | 110 | devsize = readb(module + offset + TC_SLOT_SIZE); |
158 | case 0: | 111 | devsize <<= 22; |
159 | tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC0]; | 112 | if (devsize <= slotsize) { |
160 | break; | 113 | tdev->resource.start = slotaddr; |
161 | case 1: | 114 | tdev->resource.end = slotaddr + devsize - 1; |
162 | tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC1]; | 115 | } else if (devsize <= extslotsize) { |
163 | break; | 116 | tdev->resource.start = extslotaddr; |
164 | case 2: | 117 | tdev->resource.end = extslotaddr + devsize - 1; |
165 | tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC2]; | 118 | } else { |
166 | break; | 119 | printk(KERN_ERR "%s: Cannot provide slot space " |
167 | /* | 120 | "(%dMiB required, up to %dMiB supported)\n", |
168 | * Yuck! DS5000/200 onboard devices | 121 | tdev->dev.bus_id, devsize >> 20, |
169 | */ | 122 | max(slotsize, extslotsize) >> 20); |
170 | case 5: | 123 | kfree(tdev); |
171 | tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC5]; | 124 | goto out_err; |
172 | break; | ||
173 | case 6: | ||
174 | tc_bus[slot].interrupt = dec_interrupt[DEC_IRQ_TC6]; | ||
175 | break; | ||
176 | default: | ||
177 | tc_bus[slot].interrupt = -1; | ||
178 | break; | ||
179 | } | 125 | } |
126 | tdev->resource.name = tdev->name; | ||
127 | tdev->resource.flags = IORESOURCE_MEM; | ||
128 | |||
129 | tc_device_get_irq(tdev); | ||
180 | 130 | ||
131 | device_register(&tdev->dev); | ||
132 | list_add_tail(&tdev->node, &tbus->devices); | ||
133 | |||
134 | out_err: | ||
181 | iounmap(module); | 135 | iounmap(module); |
182 | } | 136 | } |
183 | } | 137 | } |
184 | 138 | ||
185 | /* | 139 | /* |
186 | * the main entry | 140 | * The main entry. |
187 | */ | 141 | */ |
188 | static int __init tc_init(void) | 142 | static int __init tc_init(void) |
189 | { | 143 | { |
190 | int tc_clock; | 144 | /* Initialize the TURBOchannel bus */ |
191 | int i; | 145 | if (tc_bus_get_info(&tc_bus)) |
192 | unsigned long slot0addr; | ||
193 | unsigned long slot_size; | ||
194 | |||
195 | if (!TURBOCHANNEL) | ||
196 | return 0; | 146 | return 0; |
197 | 147 | ||
198 | for (i = 0; i < MAX_SLOT; i++) { | 148 | INIT_LIST_HEAD(&tc_bus.devices); |
199 | tc_bus[i].base_addr = 0; | 149 | strcpy(tc_bus.dev.bus_id, "tc"); |
200 | tc_bus[i].name[0] = 0; | 150 | device_register(&tc_bus.dev); |
201 | tc_bus[i].vendor[0] = 0; | 151 | |
202 | tc_bus[i].firmware[0] = 0; | 152 | if (tc_bus.info.slot_size) { |
203 | tc_bus[i].interrupt = -1; | 153 | unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; |
204 | tc_bus[i].flags = FREE; | 154 | |
205 | } | 155 | pr_info("tc: TURBOchannel rev. %d at %d.%d MHz " |
206 | 156 | "(with%s parity)\n", tc_bus.info.revision, | |
207 | info = rex_gettcinfo(); | 157 | tc_clock / 10, tc_clock % 10, |
208 | slot0addr = CPHYSADDR((long)rex_slot_address(0)); | 158 | tc_bus.info.parity ? "" : "out"); |
209 | 159 | ||
210 | switch (mips_machtype) { | 160 | tc_bus.resource[0].start = tc_bus.slot_base; |
211 | case MACH_DS5000_200: | 161 | tc_bus.resource[0].end = tc_bus.slot_base + |
212 | num_tcslots = 7; | 162 | (tc_bus.info.slot_size << 20) * |
213 | break; | 163 | tc_bus.num_tcslots - 1; |
214 | case MACH_DS5000_1XX: | 164 | tc_bus.resource[0].name = tc_bus.name; |
215 | case MACH_DS5000_2X0: | 165 | tc_bus.resource[0].flags = IORESOURCE_MEM; |
216 | case MACH_DS5900: | 166 | if (request_resource(&iomem_resource, |
217 | num_tcslots = 3; | 167 | &tc_bus.resource[0]) < 0) { |
218 | break; | 168 | printk(KERN_ERR "tc: Cannot reserve resource\n"); |
219 | case MACH_DS5000_XX: | 169 | return 0; |
220 | default: | 170 | } |
221 | num_tcslots = 2; | 171 | if (tc_bus.ext_slot_size) { |
222 | break; | 172 | tc_bus.resource[1].start = tc_bus.ext_slot_base; |
223 | } | 173 | tc_bus.resource[1].end = tc_bus.ext_slot_base + |
224 | 174 | tc_bus.ext_slot_size * | |
225 | tc_clock = 10000 / info->clk_period; | 175 | tc_bus.num_tcslots - 1; |
226 | 176 | tc_bus.resource[1].name = tc_bus.name; | |
227 | if (info->slot_size && slot0addr) { | 177 | tc_bus.resource[1].flags = IORESOURCE_MEM; |
228 | pr_info("TURBOchannel rev. %d at %d.%d MHz (with%s parity)\n", | 178 | if (request_resource(&iomem_resource, |
229 | info->revision, tc_clock / 10, tc_clock % 10, | 179 | &tc_bus.resource[1]) < 0) { |
230 | info->parity ? "" : "out"); | 180 | printk(KERN_ERR |
231 | 181 | "tc: Cannot reserve resource\n"); | |
232 | slot_size = info->slot_size << 20; | 182 | release_resource(&tc_bus.resource[0]); |
233 | 183 | return 0; | |
234 | tc_probe(slot0addr, slot_size, num_tcslots); | 184 | } |
235 | |||
236 | for (i = 0; i < num_tcslots; i++) { | ||
237 | if (!tc_bus[i].base_addr) | ||
238 | continue; | ||
239 | pr_info(" slot %d: %s %s %s\n", i, tc_bus[i].vendor, | ||
240 | tc_bus[i].name, tc_bus[i].firmware); | ||
241 | } | 185 | } |
186 | |||
187 | tc_bus_add_devices(&tc_bus); | ||
242 | } | 188 | } |
243 | 189 | ||
244 | return 0; | 190 | return 0; |
245 | } | 191 | } |
246 | 192 | ||
247 | subsys_initcall(tc_init); | 193 | subsys_initcall(tc_init); |
248 | |||
249 | EXPORT_SYMBOL(search_tc_card); | ||
250 | EXPORT_SYMBOL(claim_tc_card); | ||
251 | EXPORT_SYMBOL(release_tc_card); | ||
252 | EXPORT_SYMBOL(get_tc_base_addr); | ||
253 | EXPORT_SYMBOL(get_tc_irq_nr); | ||
254 | EXPORT_SYMBOL(get_tc_speed); | ||
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c index 371f194a9d39..4d781a2a9807 100644 --- a/drivers/usb/host/ehci-ps3.c +++ b/drivers/usb/host/ehci-ps3.c | |||
@@ -104,7 +104,7 @@ static int ps3_ehci_sb_probe(struct ps3_system_bus_device *dev) | |||
104 | dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, | 104 | dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, |
105 | __LINE__, dev->m_region->lpar_addr); | 105 | __LINE__, dev->m_region->lpar_addr); |
106 | 106 | ||
107 | result = ps3_alloc_io_irq(dev->interrupt_id, &virq); | 107 | result = ps3_alloc_io_irq(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq); |
108 | 108 | ||
109 | if (result) { | 109 | if (result) { |
110 | dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", | 110 | dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", |
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index ec0da0343be4..46fa57a520d0 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h | |||
@@ -677,10 +677,10 @@ static inline unsigned int ehci_readl (const struct ehci_hcd *ehci, | |||
677 | { | 677 | { |
678 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO | 678 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO |
679 | return ehci_big_endian_mmio(ehci) ? | 679 | return ehci_big_endian_mmio(ehci) ? |
680 | readl_be((__force u32 *)regs) : | 680 | readl_be(regs) : |
681 | readl((__force u32 *)regs); | 681 | readl(regs); |
682 | #else | 682 | #else |
683 | return readl((__force u32 *)regs); | 683 | return readl(regs); |
684 | #endif | 684 | #endif |
685 | } | 685 | } |
686 | 686 | ||
@@ -689,10 +689,10 @@ static inline void ehci_writel (const struct ehci_hcd *ehci, | |||
689 | { | 689 | { |
690 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO | 690 | #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO |
691 | ehci_big_endian_mmio(ehci) ? | 691 | ehci_big_endian_mmio(ehci) ? |
692 | writel_be(val, (__force u32 *)regs) : | 692 | writel_be(val, regs) : |
693 | writel(val, (__force u32 *)regs); | 693 | writel(val, regs); |
694 | #else | 694 | #else |
695 | writel(val, (__force u32 *)regs); | 695 | writel(val, regs); |
696 | #endif | 696 | #endif |
697 | } | 697 | } |
698 | 698 | ||
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c index 69d948b4a701..62283a3926de 100644 --- a/drivers/usb/host/ohci-ps3.c +++ b/drivers/usb/host/ohci-ps3.c | |||
@@ -107,7 +107,7 @@ static int ps3_ohci_sb_probe(struct ps3_system_bus_device *dev) | |||
107 | dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, | 107 | dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, |
108 | __LINE__, dev->m_region->lpar_addr); | 108 | __LINE__, dev->m_region->lpar_addr); |
109 | 109 | ||
110 | result = ps3_alloc_io_irq(dev->interrupt_id, &virq); | 110 | result = ps3_alloc_io_irq(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq); |
111 | 111 | ||
112 | if (result) { | 112 | if (result) { |
113 | dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", | 113 | dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", |
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 0dafcda37291..c2b5ecfe5e9f 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h | |||
@@ -507,10 +507,10 @@ static inline unsigned int _ohci_readl (const struct ohci_hcd *ohci, | |||
507 | { | 507 | { |
508 | #ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO | 508 | #ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO |
509 | return big_endian_mmio(ohci) ? | 509 | return big_endian_mmio(ohci) ? |
510 | readl_be ((__force u32 *)regs) : | 510 | readl_be (regs) : |
511 | readl ((__force u32 *)regs); | 511 | readl (regs); |
512 | #else | 512 | #else |
513 | return readl ((__force u32 *)regs); | 513 | return readl (regs); |
514 | #endif | 514 | #endif |
515 | } | 515 | } |
516 | 516 | ||
@@ -519,10 +519,10 @@ static inline void _ohci_writel (const struct ohci_hcd *ohci, | |||
519 | { | 519 | { |
520 | #ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO | 520 | #ifdef CONFIG_USB_OHCI_BIG_ENDIAN_MMIO |
521 | big_endian_mmio(ohci) ? | 521 | big_endian_mmio(ohci) ? |
522 | writel_be (val, (__force u32 *)regs) : | 522 | writel_be (val, regs) : |
523 | writel (val, (__force u32 *)regs); | 523 | writel (val, regs); |
524 | #else | 524 | #else |
525 | writel (val, (__force u32 *)regs); | 525 | writel (val, regs); |
526 | #endif | 526 | #endif |
527 | } | 527 | } |
528 | 528 | ||
diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c index a6f0f4d934df..31e5fe363fdc 100644 --- a/drivers/usb/net/gl620a.c +++ b/drivers/usb/net/gl620a.c | |||
@@ -70,12 +70,12 @@ | |||
70 | (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4) | 70 | (((GL_MAX_PACKET_LEN + 4) * GL_MAX_TRANSMIT_PACKETS) + 4) |
71 | 71 | ||
72 | struct gl_packet { | 72 | struct gl_packet { |
73 | u32 packet_length; | 73 | __le32 packet_length; |
74 | char packet_data [1]; | 74 | char packet_data [1]; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct gl_header { | 77 | struct gl_header { |
78 | u32 packet_count; | 78 | __le32 packet_count; |
79 | struct gl_packet packets; | 79 | struct gl_packet packets; |
80 | }; | 80 | }; |
81 | 81 | ||
@@ -85,15 +85,14 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
85 | struct gl_packet *packet; | 85 | struct gl_packet *packet; |
86 | struct sk_buff *gl_skb; | 86 | struct sk_buff *gl_skb; |
87 | u32 size; | 87 | u32 size; |
88 | u32 count; | ||
88 | 89 | ||
89 | header = (struct gl_header *) skb->data; | 90 | header = (struct gl_header *) skb->data; |
90 | 91 | ||
91 | // get the packet count of the received skb | 92 | // get the packet count of the received skb |
92 | le32_to_cpus(&header->packet_count); | 93 | count = le32_to_cpu(header->packet_count); |
93 | if ((header->packet_count > GL_MAX_TRANSMIT_PACKETS) | 94 | if (count > GL_MAX_TRANSMIT_PACKETS) { |
94 | || (header->packet_count < 0)) { | 95 | dbg("genelink: invalid received packet count %u", count); |
95 | dbg("genelink: invalid received packet count %d", | ||
96 | header->packet_count); | ||
97 | return 0; | 96 | return 0; |
98 | } | 97 | } |
99 | 98 | ||
@@ -103,7 +102,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
103 | // decrement the length for the packet count size 4 bytes | 102 | // decrement the length for the packet count size 4 bytes |
104 | skb_pull(skb, 4); | 103 | skb_pull(skb, 4); |
105 | 104 | ||
106 | while (header->packet_count > 1) { | 105 | while (count > 1) { |
107 | // get the packet length | 106 | // get the packet length |
108 | size = le32_to_cpu(packet->packet_length); | 107 | size = le32_to_cpu(packet->packet_length); |
109 | 108 | ||
@@ -124,9 +123,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
124 | } | 123 | } |
125 | 124 | ||
126 | // advance to the next packet | 125 | // advance to the next packet |
127 | packet = (struct gl_packet *) | 126 | packet = (struct gl_packet *)&packet->packet_data[size]; |
128 | &packet->packet_data [size]; | 127 | count--; |
129 | header->packet_count--; | ||
130 | 128 | ||
131 | // shift the data pointer to the next gl_packet | 129 | // shift the data pointer to the next gl_packet |
132 | skb_pull(skb, size + 4); | 130 | skb_pull(skb, size + 4); |
@@ -149,8 +147,8 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
149 | int length = skb->len; | 147 | int length = skb->len; |
150 | int headroom = skb_headroom(skb); | 148 | int headroom = skb_headroom(skb); |
151 | int tailroom = skb_tailroom(skb); | 149 | int tailroom = skb_tailroom(skb); |
152 | u32 *packet_count; | 150 | __le32 *packet_count; |
153 | u32 *packet_len; | 151 | __le32 *packet_len; |
154 | 152 | ||
155 | // FIXME: magic numbers, bleech | 153 | // FIXME: magic numbers, bleech |
156 | padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1; | 154 | padlen = ((skb->len + (4 + 4*1)) % 64) ? 0 : 1; |
@@ -172,7 +170,7 @@ genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) | |||
172 | } | 170 | } |
173 | 171 | ||
174 | // attach the packet count to the header | 172 | // attach the packet count to the header |
175 | packet_count = (u32 *) skb_push(skb, (4 + 4*1)); | 173 | packet_count = (__le32 *) skb_push(skb, (4 + 4*1)); |
176 | packet_len = packet_count + 1; | 174 | packet_len = packet_count + 1; |
177 | 175 | ||
178 | *packet_count = cpu_to_le32(1); | 176 | *packet_count = cpu_to_le32(1); |
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c index 06b4fffc189c..3ec24870bca9 100644 --- a/drivers/usb/serial/cp2101.c +++ b/drivers/usb/serial/cp2101.c | |||
@@ -170,13 +170,13 @@ static int cp2101_get_config(struct usb_serial_port* port, u8 request, | |||
170 | unsigned int *data, int size) | 170 | unsigned int *data, int size) |
171 | { | 171 | { |
172 | struct usb_serial *serial = port->serial; | 172 | struct usb_serial *serial = port->serial; |
173 | u32 *buf; | 173 | __le32 *buf; |
174 | int result, i, length; | 174 | int result, i, length; |
175 | 175 | ||
176 | /* Number of integers required to contain the array */ | 176 | /* Number of integers required to contain the array */ |
177 | length = (((size - 1) | 3) + 1)/4; | 177 | length = (((size - 1) | 3) + 1)/4; |
178 | 178 | ||
179 | buf = kcalloc(length, sizeof(u32), GFP_KERNEL); | 179 | buf = kcalloc(length, sizeof(__le32), GFP_KERNEL); |
180 | if (!buf) { | 180 | if (!buf) { |
181 | dev_err(&port->dev, "%s - out of memory.\n", __FUNCTION__); | 181 | dev_err(&port->dev, "%s - out of memory.\n", __FUNCTION__); |
182 | return -ENOMEM; | 182 | return -ENOMEM; |
@@ -216,13 +216,13 @@ static int cp2101_set_config(struct usb_serial_port* port, u8 request, | |||
216 | unsigned int *data, int size) | 216 | unsigned int *data, int size) |
217 | { | 217 | { |
218 | struct usb_serial *serial = port->serial; | 218 | struct usb_serial *serial = port->serial; |
219 | u32 *buf; | 219 | __le32 *buf; |
220 | int result, i, length; | 220 | int result, i, length; |
221 | 221 | ||
222 | /* Number of integers required to contain the array */ | 222 | /* Number of integers required to contain the array */ |
223 | length = (((size - 1) | 3) + 1)/4; | 223 | length = (((size - 1) | 3) + 1)/4; |
224 | 224 | ||
225 | buf = kmalloc(length * sizeof(u32), GFP_KERNEL); | 225 | buf = kmalloc(length * sizeof(__le32), GFP_KERNEL); |
226 | if (!buf) { | 226 | if (!buf) { |
227 | dev_err(&port->dev, "%s - out of memory.\n", | 227 | dev_err(&port->dev, "%s - out of memory.\n", |
228 | __FUNCTION__); | 228 | __FUNCTION__); |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 4e83f01e894e..45fe65d8d7a0 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
@@ -1444,8 +1444,8 @@ config FB_PMAG_AA | |||
1444 | used mainly in the MIPS-based DECstation series. | 1444 | used mainly in the MIPS-based DECstation series. |
1445 | 1445 | ||
1446 | config FB_PMAG_BA | 1446 | config FB_PMAG_BA |
1447 | bool "PMAG-BA TURBOchannel framebuffer support" | 1447 | tristate "PMAG-BA TURBOchannel framebuffer support" |
1448 | depends on (FB = y) && TC | 1448 | depends on FB && TC |
1449 | select FB_CFB_FILLRECT | 1449 | select FB_CFB_FILLRECT |
1450 | select FB_CFB_COPYAREA | 1450 | select FB_CFB_COPYAREA |
1451 | select FB_CFB_IMAGEBLIT | 1451 | select FB_CFB_IMAGEBLIT |
@@ -1454,8 +1454,8 @@ config FB_PMAG_BA | |||
1454 | used mainly in the MIPS-based DECstation series. | 1454 | used mainly in the MIPS-based DECstation series. |
1455 | 1455 | ||
1456 | config FB_PMAGB_B | 1456 | config FB_PMAGB_B |
1457 | bool "PMAGB-B TURBOchannel framebuffer support" | 1457 | tristate "PMAGB-B TURBOchannel framebuffer support" |
1458 | depends on (FB = y) && TC | 1458 | depends on TC |
1459 | select FB_CFB_FILLRECT | 1459 | select FB_CFB_FILLRECT |
1460 | select FB_CFB_COPYAREA | 1460 | select FB_CFB_COPYAREA |
1461 | select FB_CFB_IMAGEBLIT | 1461 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/video/pmag-ba-fb.c b/drivers/video/pmag-ba-fb.c index f5361cd8ccce..264d37243fad 100644 --- a/drivers/video/pmag-ba-fb.c +++ b/drivers/video/pmag-ba-fb.c | |||
@@ -15,7 +15,8 @@ | |||
15 | * Michael Engel <engel@unix-ag.org>, | 15 | * Michael Engel <engel@unix-ag.org>, |
16 | * Karsten Merker <merker@linuxtag.org> and | 16 | * Karsten Merker <merker@linuxtag.org> and |
17 | * Harald Koerfgen. | 17 | * Harald Koerfgen. |
18 | * Copyright (c) 2005 Maciej W. Rozycki | 18 | * Copyright (c) 2005, 2006 Maciej W. Rozycki |
19 | * Copyright (c) 2005 James Simmons | ||
19 | * | 20 | * |
20 | * This file is subject to the terms and conditions of the GNU General | 21 | * This file is subject to the terms and conditions of the GNU General |
21 | * Public License. See the file COPYING in the main directory of this | 22 | * Public License. See the file COPYING in the main directory of this |
@@ -28,26 +29,21 @@ | |||
28 | #include <linux/init.h> | 29 | #include <linux/init.h> |
29 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
30 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/tc.h> | ||
31 | #include <linux/types.h> | 33 | #include <linux/types.h> |
32 | 34 | ||
33 | #include <asm/io.h> | 35 | #include <asm/io.h> |
34 | #include <asm/system.h> | 36 | #include <asm/system.h> |
35 | 37 | ||
36 | #include <asm/dec/tc.h> | ||
37 | |||
38 | #include <video/pmag-ba-fb.h> | 38 | #include <video/pmag-ba-fb.h> |
39 | 39 | ||
40 | 40 | ||
41 | struct pmagbafb_par { | 41 | struct pmagbafb_par { |
42 | struct fb_info *next; | ||
43 | volatile void __iomem *mmio; | 42 | volatile void __iomem *mmio; |
44 | volatile u32 __iomem *dac; | 43 | volatile u32 __iomem *dac; |
45 | int slot; | ||
46 | }; | 44 | }; |
47 | 45 | ||
48 | 46 | ||
49 | static struct fb_info *root_pmagbafb_dev; | ||
50 | |||
51 | static struct fb_var_screeninfo pmagbafb_defined __initdata = { | 47 | static struct fb_var_screeninfo pmagbafb_defined __initdata = { |
52 | .xres = 1024, | 48 | .xres = 1024, |
53 | .yres = 864, | 49 | .yres = 864, |
@@ -145,24 +141,19 @@ static void __init pmagbafb_erase_cursor(struct fb_info *info) | |||
145 | } | 141 | } |
146 | 142 | ||
147 | 143 | ||
148 | static int __init pmagbafb_init_one(int slot) | 144 | static int __init pmagbafb_probe(struct device *dev) |
149 | { | 145 | { |
146 | struct tc_dev *tdev = to_tc_dev(dev); | ||
147 | resource_size_t start, len; | ||
150 | struct fb_info *info; | 148 | struct fb_info *info; |
151 | struct pmagbafb_par *par; | 149 | struct pmagbafb_par *par; |
152 | unsigned long base_addr; | ||
153 | 150 | ||
154 | info = framebuffer_alloc(sizeof(struct pmagbafb_par), NULL); | 151 | info = framebuffer_alloc(sizeof(struct pmagbafb_par), dev); |
155 | if (!info) | 152 | if (!info) |
156 | return -ENOMEM; | 153 | return -ENOMEM; |
157 | 154 | ||
158 | par = info->par; | 155 | par = info->par; |
159 | par->slot = slot; | 156 | dev_set_drvdata(dev, info); |
160 | claim_tc_card(par->slot); | ||
161 | |||
162 | base_addr = get_tc_base_addr(par->slot); | ||
163 | |||
164 | par->next = root_pmagbafb_dev; | ||
165 | root_pmagbafb_dev = info; | ||
166 | 157 | ||
167 | if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) | 158 | if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) |
168 | goto err_alloc; | 159 | goto err_alloc; |
@@ -172,15 +163,21 @@ static int __init pmagbafb_init_one(int slot) | |||
172 | info->var = pmagbafb_defined; | 163 | info->var = pmagbafb_defined; |
173 | info->flags = FBINFO_DEFAULT; | 164 | info->flags = FBINFO_DEFAULT; |
174 | 165 | ||
166 | /* Request the I/O MEM resource. */ | ||
167 | start = tdev->resource.start; | ||
168 | len = tdev->resource.end - start + 1; | ||
169 | if (!request_mem_region(start, len, dev->bus_id)) | ||
170 | goto err_cmap; | ||
171 | |||
175 | /* MMIO mapping setup. */ | 172 | /* MMIO mapping setup. */ |
176 | info->fix.mmio_start = base_addr; | 173 | info->fix.mmio_start = start; |
177 | par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); | 174 | par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); |
178 | if (!par->mmio) | 175 | if (!par->mmio) |
179 | goto err_cmap; | 176 | goto err_resource; |
180 | par->dac = par->mmio + PMAG_BA_BT459; | 177 | par->dac = par->mmio + PMAG_BA_BT459; |
181 | 178 | ||
182 | /* Frame buffer mapping setup. */ | 179 | /* Frame buffer mapping setup. */ |
183 | info->fix.smem_start = base_addr + PMAG_BA_FBMEM; | 180 | info->fix.smem_start = start + PMAG_BA_FBMEM; |
184 | info->screen_base = ioremap_nocache(info->fix.smem_start, | 181 | info->screen_base = ioremap_nocache(info->fix.smem_start, |
185 | info->fix.smem_len); | 182 | info->fix.smem_len); |
186 | if (!info->screen_base) | 183 | if (!info->screen_base) |
@@ -192,8 +189,10 @@ static int __init pmagbafb_init_one(int slot) | |||
192 | if (register_framebuffer(info) < 0) | 189 | if (register_framebuffer(info) < 0) |
193 | goto err_smem_map; | 190 | goto err_smem_map; |
194 | 191 | ||
195 | pr_info("fb%d: %s frame buffer device in slot %d\n", | 192 | get_device(dev); |
196 | info->node, info->fix.id, par->slot); | 193 | |
194 | pr_info("fb%d: %s frame buffer device at %s\n", | ||
195 | info->node, info->fix.id, dev->bus_id); | ||
197 | 196 | ||
198 | return 0; | 197 | return 0; |
199 | 198 | ||
@@ -204,54 +203,68 @@ err_smem_map: | |||
204 | err_mmio_map: | 203 | err_mmio_map: |
205 | iounmap(par->mmio); | 204 | iounmap(par->mmio); |
206 | 205 | ||
206 | err_resource: | ||
207 | release_mem_region(start, len); | ||
208 | |||
207 | err_cmap: | 209 | err_cmap: |
208 | fb_dealloc_cmap(&info->cmap); | 210 | fb_dealloc_cmap(&info->cmap); |
209 | 211 | ||
210 | err_alloc: | 212 | err_alloc: |
211 | root_pmagbafb_dev = par->next; | ||
212 | release_tc_card(par->slot); | ||
213 | framebuffer_release(info); | 213 | framebuffer_release(info); |
214 | return -ENXIO; | 214 | return -ENXIO; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void __exit pmagbafb_exit_one(void) | 217 | static int __exit pmagbafb_remove(struct device *dev) |
218 | { | 218 | { |
219 | struct fb_info *info = root_pmagbafb_dev; | 219 | struct tc_dev *tdev = to_tc_dev(dev); |
220 | struct fb_info *info = dev_get_drvdata(dev); | ||
220 | struct pmagbafb_par *par = info->par; | 221 | struct pmagbafb_par *par = info->par; |
222 | resource_size_t start, len; | ||
221 | 223 | ||
224 | put_device(dev); | ||
222 | unregister_framebuffer(info); | 225 | unregister_framebuffer(info); |
223 | iounmap(info->screen_base); | 226 | iounmap(info->screen_base); |
224 | iounmap(par->mmio); | 227 | iounmap(par->mmio); |
228 | start = tdev->resource.start; | ||
229 | len = tdev->resource.end - start + 1; | ||
230 | release_mem_region(start, len); | ||
225 | fb_dealloc_cmap(&info->cmap); | 231 | fb_dealloc_cmap(&info->cmap); |
226 | root_pmagbafb_dev = par->next; | ||
227 | release_tc_card(par->slot); | ||
228 | framebuffer_release(info); | 232 | framebuffer_release(info); |
233 | return 0; | ||
229 | } | 234 | } |
230 | 235 | ||
231 | 236 | ||
232 | /* | 237 | /* |
233 | * Initialise the framebuffer. | 238 | * Initialize the framebuffer. |
234 | */ | 239 | */ |
240 | static const struct tc_device_id pmagbafb_tc_table[] = { | ||
241 | { "DEC ", "PMAG-BA " }, | ||
242 | { } | ||
243 | }; | ||
244 | MODULE_DEVICE_TABLE(tc, pmagbafb_tc_table); | ||
245 | |||
246 | static struct tc_driver pmagbafb_driver = { | ||
247 | .id_table = pmagbafb_tc_table, | ||
248 | .driver = { | ||
249 | .name = "pmagbafb", | ||
250 | .bus = &tc_bus_type, | ||
251 | .probe = pmagbafb_probe, | ||
252 | .remove = __exit_p(pmagbafb_remove), | ||
253 | }, | ||
254 | }; | ||
255 | |||
235 | static int __init pmagbafb_init(void) | 256 | static int __init pmagbafb_init(void) |
236 | { | 257 | { |
237 | int count = 0; | 258 | #ifndef MODULE |
238 | int slot; | ||
239 | |||
240 | if (fb_get_options("pmagbafb", NULL)) | 259 | if (fb_get_options("pmagbafb", NULL)) |
241 | return -ENXIO; | 260 | return -ENXIO; |
242 | 261 | #endif | |
243 | while ((slot = search_tc_card("PMAG-BA")) >= 0) { | 262 | return tc_register_driver(&pmagbafb_driver); |
244 | if (pmagbafb_init_one(slot) < 0) | ||
245 | break; | ||
246 | count++; | ||
247 | } | ||
248 | return (count > 0) ? 0 : -ENXIO; | ||
249 | } | 263 | } |
250 | 264 | ||
251 | static void __exit pmagbafb_exit(void) | 265 | static void __exit pmagbafb_exit(void) |
252 | { | 266 | { |
253 | while (root_pmagbafb_dev) | 267 | tc_unregister_driver(&pmagbafb_driver); |
254 | pmagbafb_exit_one(); | ||
255 | } | 268 | } |
256 | 269 | ||
257 | 270 | ||
diff --git a/drivers/video/pmagb-b-fb.c b/drivers/video/pmagb-b-fb.c index a06a064ad757..7a0ce7d5af6b 100644 --- a/drivers/video/pmagb-b-fb.c +++ b/drivers/video/pmagb-b-fb.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * Michael Engel <engel@unix-ag.org>, | 11 | * Michael Engel <engel@unix-ag.org>, |
12 | * Karsten Merker <merker@linuxtag.org> and | 12 | * Karsten Merker <merker@linuxtag.org> and |
13 | * Harald Koerfgen. | 13 | * Harald Koerfgen. |
14 | * Copyright (c) 2005 Maciej W. Rozycki | 14 | * Copyright (c) 2005, 2006 Maciej W. Rozycki |
15 | * | 15 | * |
16 | * This file is subject to the terms and conditions of the GNU General | 16 | * This file is subject to the terms and conditions of the GNU General |
17 | * Public License. See the file COPYING in the main directory of this | 17 | * Public License. See the file COPYING in the main directory of this |
@@ -25,18 +25,16 @@ | |||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/tc.h> | ||
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | 30 | ||
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
31 | #include <asm/system.h> | 32 | #include <asm/system.h> |
32 | 33 | ||
33 | #include <asm/dec/tc.h> | ||
34 | |||
35 | #include <video/pmagb-b-fb.h> | 34 | #include <video/pmagb-b-fb.h> |
36 | 35 | ||
37 | 36 | ||
38 | struct pmagbbfb_par { | 37 | struct pmagbbfb_par { |
39 | struct fb_info *next; | ||
40 | volatile void __iomem *mmio; | 38 | volatile void __iomem *mmio; |
41 | volatile void __iomem *smem; | 39 | volatile void __iomem *smem; |
42 | volatile u32 __iomem *sfb; | 40 | volatile u32 __iomem *sfb; |
@@ -47,8 +45,6 @@ struct pmagbbfb_par { | |||
47 | }; | 45 | }; |
48 | 46 | ||
49 | 47 | ||
50 | static struct fb_info *root_pmagbbfb_dev; | ||
51 | |||
52 | static struct fb_var_screeninfo pmagbbfb_defined __initdata = { | 48 | static struct fb_var_screeninfo pmagbbfb_defined __initdata = { |
53 | .bits_per_pixel = 8, | 49 | .bits_per_pixel = 8, |
54 | .red.length = 8, | 50 | .red.length = 8, |
@@ -190,8 +186,9 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info) | |||
190 | 69197, 66000, 65000, 50350, 36000, 32000, 25175 | 186 | 69197, 66000, 65000, 50350, 36000, 32000, 25175 |
191 | }; | 187 | }; |
192 | struct pmagbbfb_par *par = info->par; | 188 | struct pmagbbfb_par *par = info->par; |
189 | struct tc_bus *tbus = to_tc_dev(info->device)->bus; | ||
193 | u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8; | 190 | u32 count0 = 8, count1 = 8, counttc = 16 * 256 + 8; |
194 | u32 freq0, freq1, freqtc = get_tc_speed() / 250; | 191 | u32 freq0, freq1, freqtc = tc_get_speed(tbus) / 250; |
195 | int i, j; | 192 | int i, j; |
196 | 193 | ||
197 | gp0_write(par, 0); /* select Osc0 */ | 194 | gp0_write(par, 0); /* select Osc0 */ |
@@ -249,26 +246,21 @@ static void __init pmagbbfb_osc_setup(struct fb_info *info) | |||
249 | }; | 246 | }; |
250 | 247 | ||
251 | 248 | ||
252 | static int __init pmagbbfb_init_one(int slot) | 249 | static int __init pmagbbfb_probe(struct device *dev) |
253 | { | 250 | { |
254 | char freq0[12], freq1[12]; | 251 | struct tc_dev *tdev = to_tc_dev(dev); |
252 | resource_size_t start, len; | ||
255 | struct fb_info *info; | 253 | struct fb_info *info; |
256 | struct pmagbbfb_par *par; | 254 | struct pmagbbfb_par *par; |
257 | unsigned long base_addr; | 255 | char freq0[12], freq1[12]; |
258 | u32 vid_base; | 256 | u32 vid_base; |
259 | 257 | ||
260 | info = framebuffer_alloc(sizeof(struct pmagbbfb_par), NULL); | 258 | info = framebuffer_alloc(sizeof(struct pmagbbfb_par), dev); |
261 | if (!info) | 259 | if (!info) |
262 | return -ENOMEM; | 260 | return -ENOMEM; |
263 | 261 | ||
264 | par = info->par; | 262 | par = info->par; |
265 | par->slot = slot; | 263 | dev_set_drvdata(dev, info); |
266 | claim_tc_card(par->slot); | ||
267 | |||
268 | base_addr = get_tc_base_addr(par->slot); | ||
269 | |||
270 | par->next = root_pmagbbfb_dev; | ||
271 | root_pmagbbfb_dev = info; | ||
272 | 264 | ||
273 | if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) | 265 | if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) |
274 | goto err_alloc; | 266 | goto err_alloc; |
@@ -278,16 +270,22 @@ static int __init pmagbbfb_init_one(int slot) | |||
278 | info->var = pmagbbfb_defined; | 270 | info->var = pmagbbfb_defined; |
279 | info->flags = FBINFO_DEFAULT; | 271 | info->flags = FBINFO_DEFAULT; |
280 | 272 | ||
273 | /* Request the I/O MEM resource. */ | ||
274 | start = tdev->resource.start; | ||
275 | len = tdev->resource.end - start + 1; | ||
276 | if (!request_mem_region(start, len, dev->bus_id)) | ||
277 | goto err_cmap; | ||
278 | |||
281 | /* MMIO mapping setup. */ | 279 | /* MMIO mapping setup. */ |
282 | info->fix.mmio_start = base_addr; | 280 | info->fix.mmio_start = start; |
283 | par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); | 281 | par->mmio = ioremap_nocache(info->fix.mmio_start, info->fix.mmio_len); |
284 | if (!par->mmio) | 282 | if (!par->mmio) |
285 | goto err_cmap; | 283 | goto err_resource; |
286 | par->sfb = par->mmio + PMAGB_B_SFB; | 284 | par->sfb = par->mmio + PMAGB_B_SFB; |
287 | par->dac = par->mmio + PMAGB_B_BT459; | 285 | par->dac = par->mmio + PMAGB_B_BT459; |
288 | 286 | ||
289 | /* Frame buffer mapping setup. */ | 287 | /* Frame buffer mapping setup. */ |
290 | info->fix.smem_start = base_addr + PMAGB_B_FBMEM; | 288 | info->fix.smem_start = start + PMAGB_B_FBMEM; |
291 | par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); | 289 | par->smem = ioremap_nocache(info->fix.smem_start, info->fix.smem_len); |
292 | if (!par->smem) | 290 | if (!par->smem) |
293 | goto err_mmio_map; | 291 | goto err_mmio_map; |
@@ -302,13 +300,15 @@ static int __init pmagbbfb_init_one(int slot) | |||
302 | if (register_framebuffer(info) < 0) | 300 | if (register_framebuffer(info) < 0) |
303 | goto err_smem_map; | 301 | goto err_smem_map; |
304 | 302 | ||
303 | get_device(dev); | ||
304 | |||
305 | snprintf(freq0, sizeof(freq0), "%u.%03uMHz", | 305 | snprintf(freq0, sizeof(freq0), "%u.%03uMHz", |
306 | par->osc0 / 1000, par->osc0 % 1000); | 306 | par->osc0 / 1000, par->osc0 % 1000); |
307 | snprintf(freq1, sizeof(freq1), "%u.%03uMHz", | 307 | snprintf(freq1, sizeof(freq1), "%u.%03uMHz", |
308 | par->osc1 / 1000, par->osc1 % 1000); | 308 | par->osc1 / 1000, par->osc1 % 1000); |
309 | 309 | ||
310 | pr_info("fb%d: %s frame buffer device in slot %d\n", | 310 | pr_info("fb%d: %s frame buffer device at %s\n", |
311 | info->node, info->fix.id, par->slot); | 311 | info->node, info->fix.id, dev->bus_id); |
312 | pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n", | 312 | pr_info("fb%d: Osc0: %s, Osc1: %s, Osc%u selected\n", |
313 | info->node, freq0, par->osc1 ? freq1 : "disabled", | 313 | info->node, freq0, par->osc1 ? freq1 : "disabled", |
314 | par->osc1 != 0); | 314 | par->osc1 != 0); |
@@ -322,54 +322,68 @@ err_smem_map: | |||
322 | err_mmio_map: | 322 | err_mmio_map: |
323 | iounmap(par->mmio); | 323 | iounmap(par->mmio); |
324 | 324 | ||
325 | err_resource: | ||
326 | release_mem_region(start, len); | ||
327 | |||
325 | err_cmap: | 328 | err_cmap: |
326 | fb_dealloc_cmap(&info->cmap); | 329 | fb_dealloc_cmap(&info->cmap); |
327 | 330 | ||
328 | err_alloc: | 331 | err_alloc: |
329 | root_pmagbbfb_dev = par->next; | ||
330 | release_tc_card(par->slot); | ||
331 | framebuffer_release(info); | 332 | framebuffer_release(info); |
332 | return -ENXIO; | 333 | return -ENXIO; |
333 | } | 334 | } |
334 | 335 | ||
335 | static void __exit pmagbbfb_exit_one(void) | 336 | static int __exit pmagbbfb_remove(struct device *dev) |
336 | { | 337 | { |
337 | struct fb_info *info = root_pmagbbfb_dev; | 338 | struct tc_dev *tdev = to_tc_dev(dev); |
339 | struct fb_info *info = dev_get_drvdata(dev); | ||
338 | struct pmagbbfb_par *par = info->par; | 340 | struct pmagbbfb_par *par = info->par; |
341 | resource_size_t start, len; | ||
339 | 342 | ||
343 | put_device(dev); | ||
340 | unregister_framebuffer(info); | 344 | unregister_framebuffer(info); |
341 | iounmap(par->smem); | 345 | iounmap(par->smem); |
342 | iounmap(par->mmio); | 346 | iounmap(par->mmio); |
347 | start = tdev->resource.start; | ||
348 | len = tdev->resource.end - start + 1; | ||
349 | release_mem_region(start, len); | ||
343 | fb_dealloc_cmap(&info->cmap); | 350 | fb_dealloc_cmap(&info->cmap); |
344 | root_pmagbbfb_dev = par->next; | ||
345 | release_tc_card(par->slot); | ||
346 | framebuffer_release(info); | 351 | framebuffer_release(info); |
352 | return 0; | ||
347 | } | 353 | } |
348 | 354 | ||
349 | 355 | ||
350 | /* | 356 | /* |
351 | * Initialise the framebuffer. | 357 | * Initialize the framebuffer. |
352 | */ | 358 | */ |
359 | static const struct tc_device_id pmagbbfb_tc_table[] = { | ||
360 | { "DEC ", "PMAGB-BA" }, | ||
361 | { } | ||
362 | }; | ||
363 | MODULE_DEVICE_TABLE(tc, pmagbbfb_tc_table); | ||
364 | |||
365 | static struct tc_driver pmagbbfb_driver = { | ||
366 | .id_table = pmagbbfb_tc_table, | ||
367 | .driver = { | ||
368 | .name = "pmagbbfb", | ||
369 | .bus = &tc_bus_type, | ||
370 | .probe = pmagbbfb_probe, | ||
371 | .remove = __exit_p(pmagbbfb_remove), | ||
372 | }, | ||
373 | }; | ||
374 | |||
353 | static int __init pmagbbfb_init(void) | 375 | static int __init pmagbbfb_init(void) |
354 | { | 376 | { |
355 | int count = 0; | 377 | #ifndef MODULE |
356 | int slot; | ||
357 | |||
358 | if (fb_get_options("pmagbbfb", NULL)) | 378 | if (fb_get_options("pmagbbfb", NULL)) |
359 | return -ENXIO; | 379 | return -ENXIO; |
360 | 380 | #endif | |
361 | while ((slot = search_tc_card("PMAGB-BA")) >= 0) { | 381 | return tc_register_driver(&pmagbbfb_driver); |
362 | if (pmagbbfb_init_one(slot) < 0) | ||
363 | break; | ||
364 | count++; | ||
365 | } | ||
366 | return (count > 0) ? 0 : -ENXIO; | ||
367 | } | 382 | } |
368 | 383 | ||
369 | static void __exit pmagbbfb_exit(void) | 384 | static void __exit pmagbbfb_exit(void) |
370 | { | 385 | { |
371 | while (root_pmagbbfb_dev) | 386 | tc_unregister_driver(&pmagbbfb_driver); |
372 | pmagbbfb_exit_one(); | ||
373 | } | 387 | } |
374 | 388 | ||
375 | 389 | ||